From 5018a3557cb4bff707966bddf8c086a6ed10f53b Mon Sep 17 00:00:00 2001 From: Sebastian Spaeth Date: Wed, 19 Dec 2012 14:18:03 +0100 Subject: [PATCH] Don't read full image media into RAM on copying (#419) We copy uploaded media from the queue store to the local workbench and then to its final destination. The latter was done by simply: dst.write(src.read()) which is of course evil as it reads the whole file content into RAM. Which *might* arguably still be OK for images, but you never know. Make use of the provided storage() methods that offer chunked copying rather than opening and fudging with files ourselves. Signed-off-by: Sebastian Spaeth --- mediagoblin/media_types/image/processing.py | 13 +++---------- 1 file changed, 3 insertions(+), 10 deletions(-) diff --git a/mediagoblin/media_types/image/processing.py b/mediagoblin/media_types/image/processing.py index bdb2290f..bf464069 100644 --- a/mediagoblin/media_types/image/processing.py +++ b/mediagoblin/media_types/image/processing.py @@ -120,17 +120,10 @@ def process_image(entry): else: medium_filepath = None - # we have to re-read because unlike PIL, not everything reads - # things in string representation :) - queued_file = file(queued_filename, 'rb') - - with queued_file: - original_filepath = create_pub_filepath( + # Copy our queued local workbench to its final destination + original_filepath = create_pub_filepath( entry, name_builder.fill('{basename}{ext}')) - - with mgg.public_store.get_file(original_filepath, 'wb') \ - as original_file: - original_file.write(queued_file.read()) + mgg.public_store.copy_local_to_storage(queued_filename, original_filepath) # Remove queued media file from storage and database mgg.queue_store.delete_file(queued_filepath) -- 2.25.1