added comments and did a little refactoring. not sure if it is actually any clearer...
authorRodney Ewing <ewing.rj@gmail.com>
Fri, 2 Aug 2013 22:12:07 +0000 (15:12 -0700)
committerRodney Ewing <ewing.rj@gmail.com>
Fri, 16 Aug 2013 22:30:14 +0000 (15:30 -0700)
mediagoblin/gmg_commands/reprocess.py
mediagoblin/media_types/image/__init__.py
mediagoblin/media_types/image/processing.py
mediagoblin/processing/task.py

index 9d8ede2415a5b342baf8c167fbf07cef1e271002..4df0d5817607b24e8fa085b38d38d3160c00f949 100644 (file)
@@ -49,16 +49,18 @@ def reprocess_parser_setup(subparser):
 
 
 def _set_media_type(args):
+    """
+    This will verify that all media id's are of the same media_type. If the
+    --type flag is set, it will be replaced by the given media id's type.
+
+    If they are trying to process different media types, an Exception will be
+    raised.
+    """
     if args[0].media_id:
         if len(args[0].media_id) == 1:
-            media_type = MediaEntry.query.filter_by(id=args[0].media_id[0])\
+            args[0].type = MediaEntry.query.filter_by(id=args[0].media_id[0])\
                 .first().media_type.split('.')[-1]
 
-            if not args[0].type:
-                args[0].type = media_type
-            elif args[0].type != media_type:
-                raise Exception(_('The --type that you set does not match the'
-                                  'type of the given media_id.'))
         elif len(args[0].media_id) > 1:
             media_types = []
 
@@ -70,15 +72,17 @@ def _set_media_type(args):
                     raise Exception((u'You cannot reprocess different'
                                      ' media_types at the same time.'))
 
-            if not args[0].type:
-                args[0].type = media_types[0]
-            elif args[0].type != media_types[0]:
-                raise Exception(_('The --type that you set does not match the'
-                                  ' type of the given media_ids.'))
+            args[0].type = media_types[0]
 
 
 def _reprocess_all(args):
+    """
+    This handles reprocessing if no media_id's are given.
+    """
     if not args[0].type:
+        # If no media type is given, we can either regenerate all thumbnails,
+        # or try to reprocess all failed media
+
         if args[0].thumbnails:
             if args[0].available:
                 print _('Available options for regenerating all processed'
@@ -89,6 +93,7 @@ def _reprocess_all(args):
                 #TODO regenerate all thumbnails
                 pass
 
+        # Reprocess all failed media
         elif args[0].state == 'failed':
             if args[0].available:
                 print _('\n Available reprocess actions for all failed'
@@ -97,6 +102,8 @@ def _reprocess_all(args):
                 #TODO reprocess all failed entries
                 pass
 
+        # If here, they didn't set the --type flag and were trying to do
+        # something other the generating thumbnails or initial_processing
         else:
             raise Exception(_('You must set --type when trying to reprocess'
                               ' all media_entries, unless you set --state'
@@ -107,6 +114,8 @@ def _reprocess_all(args):
 
 
 def _run_reprocessing(args):
+    # Are they just asking for the available reprocessing options for the given
+    # media?
     if args[0].available:
         if args[0].state == 'failed':
             print _('\n Available reprocess actions for all failed'
@@ -118,11 +127,20 @@ def _run_reprocessing(args):
                         ' entries in the {} state'.format(args[0].type,
                                                           args[0].state))
     else:
+        # Run media reprocessing
         return hook_handle(('media_reprocess', args[0].type), args)
 
 
 def _set_media_state(args):
+    """
+    This will verify that all media id's are in the same state. If the
+    --state flag is set, it will be replaced by the given media id's state.
+
+    If they are trying to process different media states, an Exception will be
+    raised.
+    """
     if args[0].media_id:
+        # Only check if we are given media_ids
         if len(args[0].media_id) == 1:
             args[0].state = MediaEntry.query.filter_by(id=args[0].media_id[0])\
                 .first().state
@@ -133,6 +151,8 @@ def _set_media_state(args):
             for id in args[0].media_id:
                 media_states.append(MediaEntry.query.filter_by(id=id).first()
                                     .state)
+
+            # Make sure that all media are in the same state
             for state in media_states:
                 if state != media_states[0]:
                     raise Exception(_('You can only reprocess media that is in'
@@ -140,11 +160,13 @@ def _set_media_state(args):
 
             args[0].state = media_states[0]
 
+    # If no state was set, then we will default to the processed state
     if not args[0].state:
         args[0].state = 'processed'
 
 
 def reprocess(args):
+    # Run eagerly unless explicetly set not to
     if not args[0].celery:
         os.environ['CELERY_ALWAYS_EAGER'] = 'true'
     commands_util.setup_app(args[0])
@@ -152,6 +174,7 @@ def reprocess(args):
     _set_media_state(args)
     _set_media_type(args)
 
+    # If no media_ids were given, then try to reprocess all entries
     if not args[0].media_id:
         return _reprocess_all(args)
 
index 1aff21d4ebceecb77a49ea19d156206e5fb69cba..de7de3ace5db9594e778a96481284ae355d95565 100644 (file)
@@ -72,6 +72,9 @@ def get_media_type_and_manager(ext):
 
 
 def reprocess_action(args):
+    """
+    List the available actions for media in a given state
+    """
     if args[0].state == 'processed':
         print _('\n Available reprocessing actions for processed images:'
                 '\n \t --resize: thumb or medium'
@@ -81,9 +84,13 @@ def reprocess_action(args):
 
 
 def _parser(args):
+    """
+    Parses the unknown args from the gmg parser
+    """
     parser = argparse.ArgumentParser()
     parser.add_argument(
-        '--resize')
+        '--resize',
+        choices=['thumb', 'medium'])
     parser.add_argument(
         '--size',
         nargs=2,
@@ -96,6 +103,10 @@ def _parser(args):
 
 
 def _check_eligible(entry_args, reprocess_args):
+    """
+    Check to see if we can actually process the given media as requested
+    """
+
     if entry_args.state == 'processed':
         if reprocess_args.initial_processing:
             raise Exception(_('You can not run --initial_processing on media'
@@ -118,36 +129,37 @@ def media_reprocess(args):
     reprocess_args = _parser(args)
     entry_args = args[0]
 
+    # Can we actually process the given media as requested?
     _check_eligible(entry_args, reprocess_args)
+
+    # Do we want to re-try initial processing?
     if reprocess_args.initial_processing:
         for id in entry_args.media_id:
             entry = MediaEntry.query.filter_by(id=id).first()
-            # Should we get the feed_url?
             run_process_media(entry)
 
+    # Are we wanting to resize the thumbnail or medium?
     elif reprocess_args.resize:
-        if reprocess_args.resize == 'medium' or reprocess_args.resize == \
-           'thumb':
-            for id in entry_args.media_id:
-                entry = MediaEntry.query.filter_by(id=id).first()
 
-                # For now we can only reprocess with the original file
-                if not entry.media_files.get('original'):
-                    raise Exception(_('The original file for this media entry'
-                                      ' does not exist.'))
+        # reprocess all given media entries
+        for id in entry_args.media_id:
+            entry = MediaEntry.query.filter_by(id=id).first()
+
+            # For now we can only reprocess with the original file
+            if not entry.media_files.get('original'):
+                raise Exception(_('The original file for this media entry'
+                                    ' does not exist.'))
 
-                reprocess_info = {'resize': reprocess_args.resize}
+            reprocess_info = {'resize': reprocess_args.resize}
 
-                if reprocess_args.size:
-                    reprocess_info['max_width'] = reprocess_args.size[0]
-                    reprocess_info['max_height'] = reprocess_args.size[1]
+            if reprocess_args.size:
+                reprocess_info['max_width'] = reprocess_args.size[0]
+                reprocess_info['max_height'] = reprocess_args.size[1]
 
-                run_process_media(entry, reprocess_info=reprocess_info)
+            run_process_media(entry, reprocess_info=reprocess_info)
 
-        else:
-            raise Exception(_('The --resize flag must set either "thumb"'
-                              ' or "medium".'))
 
+    # If we are here, they forgot to tell us how to reprocess
     else:
         _log.warn('You must set either --resize or --initial_processing flag'
                   ' to reprocess an image.')
index 18b8bd4e9c7f9ef376e6e9f75b519f2e265a903c..078ab0d86f912086f6d9b89dbb3be4bb10d4d550 100644 (file)
@@ -73,12 +73,17 @@ def resize_image(proc_state, resized, keyname, target_name, new_size,
     proc_state.store_public(keyname, tmp_resized_filename, target_name)
 
 
-def resize_tool(proc_state, force, keyname, filename, target_name,
+def resize_tool(proc_state, force, keyname, target_name,
                 conversions_subdir, exif_tags, new_size=None):
+    # Get the filename of the original file
+    filename = proc_state.get_orig_filename()
+
+    # Use the default size if new_size was not given
     if not new_size:
         max_width = mgg.global_config['media:' + keyname]['max_width']
         max_height = mgg.global_config['media:' + keyname]['max_height']
         new_size = (max_width, max_height)
+
     # If the size of the original file exceeds the specified size for the desized
     # file, a target_name file is created and later associated with the media
     # entry.
@@ -125,74 +130,67 @@ def process_image(proc_state, reprocess_info=None):
     A Workbench() represents a local tempory dir. It is automatically
     cleaned up when this function exits.
     """
-    entry = proc_state.entry
-    workbench = proc_state.workbench
+    def init(self, proc_state):
+        self.proc_state = proc_state
+        self.entry = proc_state.entry
+        self.workbench = proc_state.workbench
 
-    # Conversions subdirectory to avoid collisions
-    conversions_subdir = os.path.join(
-        workbench.dir, 'conversions')
-    os.mkdir(conversions_subdir)
+        # Conversions subdirectory to avoid collisions
+        self.conversions_subdir = os.path.join(
+            self.workbench.dir, 'convirsions')
 
-    if reprocess_info:
-        _reprocess_image(proc_state, reprocess_info, conversions_subdir)
+        self.orig_filename = proc_state.get_orig_filename()
+        self.name_builder = FilenameBuilder(self.orig_filename)
 
-    else:
-        queued_filename = proc_state.get_queued_filename()
-        name_builder = FilenameBuilder(queued_filename)
+        # Exif extraction
+        self.exif_tags = extract_exif(self.orig_filename)
+
+        os.mkdir(self.conversions_subdir)
 
-        # EXIF extraction
-        exif_tags = extract_exif(queued_filename)
-        gps_data = get_gps_data(exif_tags)
+    def initial_processing(self):
+        # Is there any GPS data
+        gps_data = get_gps_data(self.exif_tags)
 
-        # Always create a small thumbnail
-        resize_tool(proc_state, True, 'thumb', queued_filename,
-                    name_builder.fill('{basename}.thumbnail{ext}'),
-                    conversions_subdir, exif_tags)
+         # Always create a small thumbnail
+        resize_tool(self.proc_state, True, 'thumb', self.orig_filename,
+                    self.name_builder.fill('{basename}.thumbnail{ext}'),
+                    self.conversions_subdir, self.exif_tags)
 
         # Possibly create a medium
-        resize_tool(proc_state, False, 'medium', queued_filename,
-                    name_builder.fill('{basename}.medium{ext}'),
-                    conversions_subdir, exif_tags)
+        resize_tool(self.proc_state, False, 'medium', self.orig_filename,
+                    self.name_builder.fill('{basename}.medium{ext}'),
+                    self.conversions_subdir, self.exif_tags)
 
         # Copy our queued local workbench to its final destination
-        proc_state.copy_original(name_builder.fill('{basename}{ext}'))
+        self.proc_state.copy_original(self.name_builder.fill('{basename}{ext}'))
 
         # Remove queued media file from storage and database
-        proc_state.delete_queue_file()
+        self.proc_state.delete_queue_file()
 
         # Insert exif data into database
-        exif_all = clean_exif(exif_tags)
+        exif_all = clean_exif(self.exif_tags)
 
         if len(exif_all):
-            entry.media_data_init(exif_all=exif_all)
+            self.entry.media_data_init(exif_all=exif_all)
 
         if len(gps_data):
             for key in list(gps_data.keys()):
                 gps_data['gps_' + key] = gps_data.pop(key)
-            entry.media_data_init(**gps_data)
+            self.entry.media_data_init(**gps_data)
 
+    def reprocess(self, reprocess_info):
+        new_size = None
 
-def _reprocess_image(proc_state, reprocess_info, conversions_subdir):
-    reprocess_filename = proc_state.get_reprocess_filename()
-    name_builder = FilenameBuilder(reprocess_filename)
-
-    exif_tags = extract_exif(reprocess_filename)
-
-    if reprocess_info.get('max_width'):
-        max_width = reprocess_info['max_width']
-        max_height = reprocess_info['max_height']
-    else:
-        max_width = mgg.global_config \
-            ['media:' + reprocess_info['resize']]['max_width']
-        max_height = mgg.global_config \
-            ['media:' + reprocess_info['resize']]['max_height']
-
-    new_size = (max_width, max_height)
+        # Did they specify a size?
+        if reprocess_info.get('max_width'):
+            max_width = reprocess_info['max_width']
+            max_height = reprocess_info['max_height']
 
-    resize_tool(proc_state, False, reprocess_info['resize'], reprocess_filename,
-                name_builder.fill('{basename}.thumbnail{ext}'),
-                conversions_subdir, exif_tags, new_size)
+            new_size = (max_width, max_height)
 
+        resize_tool(self.proc_state, False, reprocess_info['resize'],
+                    self.name_builder.fill('{basename}.medium{ext}'),
+                    self.conversions_subdir, self.exif_tags, new_size)
 
 if __name__ == '__main__':
     import sys
index c0dfb9b4540e2d2b74f6022ddb5c1d84bd700d96..36ee31fd217132b61c0371fd1bf36fe9a56f438a 100644 (file)
@@ -89,9 +89,17 @@ class ProcessMedia(task.Task):
 
             proc_state = ProcessingState(entry)
             with mgg.workbench_manager.create() as workbench:
+
                 proc_state.set_workbench(workbench)
-                # run the processing code
-                entry.media_manager.processor(proc_state, reprocess_info)
+                processor = entry.media_manager.processor(proc_state)
+
+                # If we have reprocess_info, let's reprocess
+                if reprocess_info:
+                    processor.reprocess(reprocess_info)
+
+                # Run initial processing
+                else:
+                    processor.initial_processing()
 
             # We set the state to processed and save the entry here so there's
             # no need to save at the end of the processing stage, probably ;)