Feature #571 - Closing storage objects - Removed closing(), renamed
[mediagoblin.git] / mediagoblin / process_media / __init__.py
index d6cdd74761eae236a46fec1f9660f61b5a841155..2b9eed6e111a051afe1001262caa85c17305a9c7 100644 (file)
@@ -1,5 +1,5 @@
 # GNU MediaGoblin -- federated, autonomous media hosting
-# Copyright (C) 2011 Free Software Foundation, Inc
+# Copyright (C) 2011 MediaGoblin contributors.  See AUTHORS.
 #
 # This program is free software: you can redistribute it and/or modify
 # it under the terms of the GNU Affero General Public License as published by
@@ -16,8 +16,7 @@
 
 import Image
 
-from contextlib import contextmanager
-from celery.task import task, Task
+from celery.task import Task
 from celery import registry
 
 from mediagoblin.db.util import ObjectId
@@ -36,14 +35,6 @@ def create_pub_filepath(entry, filename):
              filename])
 
 
-@contextmanager
-def closing(callback):
-    try:
-        yield callback
-    finally:
-        pass
-
-
 ################################
 # Media processing initial steps
 ################################
@@ -59,7 +50,14 @@ class ProcessMedia(Task):
         """
         entry = mgg.database.MediaEntry.one(
             {'_id': ObjectId(media_id)})
-        process_image(entry)
+
+        # Try to process, and handle expected errors.
+        try:
+            process_image(entry)
+        except BaseProcessingFail, exc:
+            mark_entry_failed(entry[u'_id'], exc)
+            return
+
         entry['state'] = u'processed'
         entry.save()
 
@@ -71,31 +69,46 @@ class ProcessMedia(Task):
         we can use that to get more information about the failure and store that
         for conveying information to users about the failure, etc.
         """
-        media_id = args[0]
-        entry = mgg.database.MediaEntry.one(
-            {'_id': ObjectId(media_id)})
+        entry_id = args[0]
+        mark_entry_failed(entry_id, exc)
 
-        entry[u'state'] = u'failed'
-
-        # Was this a BaseProcessingFail?  In other words, was this a
-        # type of error that we know how to handle?
-        if isinstance(exc, BaseProcessingFail):
-            # Looks like yes, so record information about that failure and any
-            # metadata the user might have supplied.
-            entry[u'fail_error'] = exc.exception_path
-            entry[u'fail_metadata'] = exc.metadata
-        else:
-            # Looks like no, so just mark it as failed and don't record a
-            # failure_error (we'll assume it wasn't handled) and don't record
-            # metadata (in fact overwrite it if somehow it had previous info
-            # here)
-            entry[u'fail_error'] = None
-            entry[u'fail_metadata'] = {}
 
-        entry.save()
+process_media = registry.tasks[ProcessMedia.name]
 
 
-process_media = registry.tasks[ProcessMedia.name]
+def mark_entry_failed(entry_id, exc):
+    """
+    Mark a media entry as having failed in its conversion.
+
+    Uses the exception that was raised to mark more information.  If the
+    exception is a derivative of BaseProcessingFail then we can store extra
+    information that can be useful for users telling them why their media failed
+    to process.
+
+    Args:
+     - entry_id: The id of the media entry
+
+    """
+    # Was this a BaseProcessingFail?  In other words, was this a
+    # type of error that we know how to handle?
+    if isinstance(exc, BaseProcessingFail):
+        # Looks like yes, so record information about that failure and any
+        # metadata the user might have supplied.
+        mgg.database['media_entries'].update(
+            {'_id': entry_id},
+            {'$set': {u'state': u'failed',
+                      u'fail_error': exc.exception_path,
+                      u'fail_metadata': exc.metadata}})
+    else:
+        # Looks like no, so just mark it as failed and don't record a
+        # failure_error (we'll assume it wasn't handled) and don't record
+        # metadata (in fact overwrite it if somehow it had previous info
+        # here)
+        mgg.database['media_entries'].update(
+            {'_id': entry_id},
+            {'$set': {u'state': u'failed',
+                      u'fail_error': None,
+                      u'fail_metadata': {}}})
 
 
 def process_image(entry):
@@ -120,16 +133,14 @@ def process_image(entry):
         thumb = thumb.convert("RGB")
 
     thumb_filepath = create_pub_filepath(entry, 'thumbnail.jpg')
-
     thumb_file = mgg.public_store.get_file(thumb_filepath, 'w')
-    with closing(thumb_file):
+
+    with thumb_file:
         thumb.save(thumb_file, "JPEG", quality=90)
 
-    """
-    If the size of the original file exceeds the specified size of a `medium`
-    file, a `medium.jpg` files is created and later associated with the media
-    entry.
-    """
+    # If the size of the original file exceeds the specified size of a `medium`
+    # file, a `medium.jpg` files is created and later associated with the media
+    # entry.
     medium = Image.open(queued_filename)
     medium_processed = False
 
@@ -140,9 +151,9 @@ def process_image(entry):
             medium = medium.convert("RGB")
 
         medium_filepath = create_pub_filepath(entry, 'medium.jpg')
-
         medium_file = mgg.public_store.get_file(medium_filepath, 'w')
-        with closing(medium_file):
+
+        with medium_file:
             medium.save(medium_file, "JPEG", quality=90)
             medium_processed = True
 
@@ -152,8 +163,8 @@ def process_image(entry):
 
     with queued_file:
         original_filepath = create_pub_filepath(entry, queued_filepath[-1])
-        
-        with closing(mgg.public_store.get_file(original_filepath, 'wb')) as original_file:
+
+        with mgg.public_store.get_file(original_filepath, 'wb') as original_file:
             original_file.write(queued_file.read())
 
     mgg.queue_store.delete_file(queued_filepath)