Convert media processing backends to delete the queue directory (#254)
[mediagoblin.git] / mediagoblin / media_types / video / processing.py
index 703c4681d22422200310d1b919700aee264094eb..68d14148af97aa281fd5f6ed4796cda2548e960d 100644 (file)
@@ -18,6 +18,7 @@ from tempfile import NamedTemporaryFile
 import logging
 
 from mediagoblin import mg_globals as mgg
+from mediagoblin.decorators import get_workbench
 from mediagoblin.processing import \
     create_pub_filepath, FilenameBuilder, BaseProcessingFail, ProgressCallback
 from mediagoblin.tools.translate import lazy_pass_to_ugettext as _
@@ -51,16 +52,17 @@ def sniff_handler(media_file, **kw):
 
     return False
 
-
-def process_video(entry):
+@get_workbench
+def process_video(entry, workbench=None):
     """
     Process a video entry, transcode the queued media files (originals) and
     create a thumbnail for the entry.
+
+    A Workbench() represents a local tempory dir. It is automatically
+    cleaned up when this function exits.
     """
     video_config = mgg.global_config['media_type:mediagoblin.media_types.video']
 
-    workbench = mgg.workbench_manager.create_workbench()
-
     queued_filepath = entry.queued_media_file
     queued_filename = workbench.localized_file(
         mgg.queue_store, queued_filepath,
@@ -73,9 +75,8 @@ def process_video(entry):
     thumbnail_filepath = create_pub_filepath(
         entry, name_builder.fill('{basename}.thumbnail.jpg'))
 
-    # Create a temporary file for the video destination
-    tmp_dst = NamedTemporaryFile(dir=workbench.dir)
-
+    # Create a temporary file for the video destination (cleaned up with workbench)
+    tmp_dst = NamedTemporaryFile(dir=workbench.dir, delete=False)
     with tmp_dst:
         # Transcode queued file to a VP8/vorbis file that fits in a 640x640 square
         progress_callback = ProgressCallback(entry)
@@ -86,22 +87,20 @@ def process_video(entry):
                 vorbis_quality=video_config['vorbis_quality'],
                 progress_callback=progress_callback)
 
-        # Push transcoded video to public storage
-        _log.debug('Saving medium...')
-        # TODO (#419, we read everything in RAM here!)
-        mgg.public_store.get_file(medium_filepath, 'wb').write(
-            tmp_dst.read())
-        _log.debug('Saved medium')
+    # Push transcoded video to public storage
+    _log.debug('Saving medium...')
+    mgg.public_store.copy_local_to_storage(tmp_dst.name, medium_filepath)
+    _log.debug('Saved medium')
 
-        entry.media_files['webm_640'] = medium_filepath
+    entry.media_files['webm_640'] = medium_filepath
 
-        # Save the width and height of the transcoded video
-        entry.media_data_init(
-            width=transcoder.dst_data.videowidth,
-            height=transcoder.dst_data.videoheight)
+    # Save the width and height of the transcoded video
+    entry.media_data_init(
+        width=transcoder.dst_data.videowidth,
+        height=transcoder.dst_data.videoheight)
 
-    # Create a temporary file for the video thumbnail
-    tmp_thumb = NamedTemporaryFile(dir=workbench.dir, suffix='.jpg')
+    # Temporary file for the video thumbnail (cleaned up with workbench)
+    tmp_thumb = NamedTemporaryFile(dir=workbench.dir, suffix='.jpg', delete=False)
 
     with tmp_thumb:
         # Create a thumbnail.jpg that fits in a 180x180 square
@@ -110,33 +109,22 @@ def process_video(entry):
                 tmp_thumb.name,
                 180)
 
-        # Push the thumbnail to public storage
-        _log.debug('Saving thumbnail...')
-        mgg.public_store.get_file(thumbnail_filepath, 'wb').write(
-            tmp_thumb.read())
-        _log.debug('Saved thumbnail')
-
-        entry.media_files['thumb'] = thumbnail_filepath
+    # Push the thumbnail to public storage
+    _log.debug('Saving thumbnail...')
+    mgg.public_store.copy_local_to_storage(tmp_thumb.name, thumbnail_filepath)
+    entry.media_files['thumb'] = thumbnail_filepath
 
     if video_config['keep_original']:
         # Push original file to public storage
-        queued_file = file(queued_filename, 'rb')
-
-        with queued_file:
-            original_filepath = create_pub_filepath(
-                entry,
-                queued_filepath[-1])
-
-            with mgg.public_store.get_file(original_filepath, 'wb') as \
-                    original_file:
-                _log.debug('Saving original...')
-                # TODO (#419, we read everything in RAM here!)
-                original_file.write(queued_file.read())
-                _log.debug('Saved original')
-
-                entry.media_files['original'] = original_filepath
-
-    mgg.queue_store.delete_file(queued_filepath)
-
-    # clean up workbench
-    workbench.destroy_self()
+        _log.debug('Saving original...')
+        original_filepath = create_pub_filepath(entry, queued_filepath[-1])
+        mgg.public_store.copy_local_to_storage(queued_filename, original_filepath)
+        entry.media_files['original'] = original_filepath
+
+    # Remove queued media file from storage and database.
+    # queued_filepath is in the task_id directory which should
+    # be removed too, but fail if the directory is not empty to be on
+    # the super-safe side.
+    mgg.queue_store.delete_file(queued_filepath)      # rm file
+    mgg.queue_store.delete_dir(queued_filepath[:-1])  # rm dir
+    entry.queued_media_file = []