Merge remote-tracking branch 'refs/remotes/rodney757/reprocessing'
[mediagoblin.git] / mediagoblin / media_types / image / processing.py
index 99be848f521074be3f31e657016f8122da24895e..a0ad2ce81d36e0781d9885f2940d825b332adf79 100644 (file)
 # You should have received a copy of the GNU Affero General Public License
 # along with this program.  If not, see <http://www.gnu.org/licenses/>.
 
-import Image
+try:
+    from PIL import Image
+except ImportError:
+    import Image
 import os
 import logging
+import argparse
 
 from mediagoblin import mg_globals as mgg
-from mediagoblin.decorators import get_workbench
-from mediagoblin.processing import BadMediaFail, \
-    create_pub_filepath, FilenameBuilder
+from mediagoblin.processing import (
+    BadMediaFail, FilenameBuilder,
+    MediaProcessor, ProcessingManager,
+    request_from_args, get_process_filename,
+    store_public, copy_original)
 from mediagoblin.tools.exif import exif_fix_image_orientation, \
     extract_exif, clean_exif, get_gps_data, get_useful, \
     exif_image_needs_rotation
@@ -34,57 +40,124 @@ PIL_FILTERS = {
     'BICUBIC': Image.BICUBIC,
     'ANTIALIAS': Image.ANTIALIAS}
 
+MEDIA_TYPE = 'mediagoblin.media_types.image'
 
-def resize_image(entry, filename, new_path, exif_tags, workdir, new_size,
-                 size_limits=(0, 0)):
+
+def resize_image(entry, resized, keyname, target_name, new_size,
+                 exif_tags, workdir, quality, filter):
     """
     Store a resized version of an image and return its pathname.
 
     Arguments:
-    entry -- the entry for the image to resize
-    filename -- the filename of the original image being resized
-    new_path -- public file path for the new resized image
+    proc_state -- the processing state for the image to resize
+    resized -- an image from Image.open() of the original image being resized
+    keyname -- Under what key to save in the db.
+    target_name -- public file path for the new resized image
     exif_tags -- EXIF data for the original image
     workdir -- directory path for storing converted image files
     new_size -- 2-tuple size for the resized image
+    quality -- level of compression used when resizing images
+    filter -- One of BICUBIC, BILINEAR, NEAREST, ANTIALIAS
     """
-    try:
-        resized = Image.open(filename)
-    except IOError:
-        raise BadMediaFail()
     resized = exif_fix_image_orientation(resized, exif_tags)  # Fix orientation
 
-    filter_config = \
-            mgg.global_config['media_type:mediagoblin.media_types.image']\
-                ['resize_filter']
-
     try:
-        resize_filter = PIL_FILTERS[filter_config.upper()]
+        resize_filter = PIL_FILTERS[filter.upper()]
     except KeyError:
         raise Exception('Filter "{0}" not found, choose one of {1}'.format(
-            unicode(filter_config),
+            unicode(filter),
             u', '.join(PIL_FILTERS.keys())))
 
     resized.thumbnail(new_size, resize_filter)
 
     # Copy the new file to the conversion subdir, then remotely.
-    tmp_resized_filename = os.path.join(workdir, new_path[-1])
+    tmp_resized_filename = os.path.join(workdir, target_name)
     with file(tmp_resized_filename, 'w') as resized_file:
-        resized.save(resized_file)
-    mgg.public_store.copy_local_to_storage(tmp_resized_filename, new_path)
+        resized.save(resized_file, quality=quality)
+    store_public(entry, keyname, tmp_resized_filename, target_name)
+
+    # store the thumb/medium info
+    image_info = {'width': new_size[0],
+                  'height': new_size[1],
+                  'quality': quality,
+                  'filter': filter}
+
+    entry.set_file_metadata(keyname, **image_info)
+
+
+def resize_tool(entry,
+                force, keyname, orig_file, target_name,
+                conversions_subdir, exif_tags, quality, filter, new_size=None):
+    # Use the default size if new_size was not given
+    if not new_size:
+        max_width = mgg.global_config['media:' + keyname]['max_width']
+        max_height = mgg.global_config['media:' + keyname]['max_height']
+        new_size = (max_width, max_height)
+
+    # If thumb or medium is already the same quality and size, then don't
+    # reprocess
+    if _skip_resizing(entry, keyname, new_size, quality, filter):
+        _log.info('{0} of same size and quality already in use, skipping '
+                  'resizing of media {1}.'.format(keyname, entry.id))
+        return
+
+    # If the size of the original file exceeds the specified size for the desized
+    # file, a target_name file is created and later associated with the media
+    # entry.
+    # Also created if the file needs rotation, or if forced.
+    try:
+        im = Image.open(orig_file)
+    except IOError:
+        raise BadMediaFail()
+    if force \
+        or im.size[0] > new_size[0]\
+        or im.size[1] > new_size[1]\
+        or exif_image_needs_rotation(exif_tags):
+        resize_image(
+            entry, im, unicode(keyname), target_name,
+            tuple(new_size),
+            exif_tags, conversions_subdir,
+            quality, filter)
+
+
+def _skip_resizing(entry, keyname, size, quality, filter):
+    """
+    Determines wither the saved thumb or medium is of the same quality and size
+    """
+    image_info = entry.get_file_metadata(keyname)
+
+    if not image_info:
+        return False
 
+    skip = True
 
-SUPPORTED_FILETYPES = ['png', 'gif', 'jpg', 'jpeg']
+    if image_info.get('width') != size[0]:
+        skip = False
+
+    elif image_info.get('height') != size[1]:
+        skip = False
+
+    elif image_info.get('filter') != filter:
+        skip = False
+
+    elif image_info.get('quality') != quality:
+        skip = False
+
+    return skip
+
+
+SUPPORTED_FILETYPES = ['png', 'gif', 'jpg', 'jpeg', 'tiff']
 
 
 def sniff_handler(media_file, **kw):
+    _log.info('Sniffing {0}'.format(MEDIA_TYPE))
     if kw.get('media') is not None:  # That's a double negative!
         name, ext = os.path.splitext(kw['media'].filename)
         clean_ext = ext[1:].lower()  # Strip the . from ext and make lowercase
 
         if clean_ext in SUPPORTED_FILETYPES:
             _log.info('Found file extension in supported filetypes')
-            return True
+            return MEDIA_TYPE
         else:
             _log.debug('Media present, extension not found in {0}'.format(
                     SUPPORTED_FILETYPES))
@@ -92,81 +165,213 @@ def sniff_handler(media_file, **kw):
         _log.warning('Need additional information (keyword argument \'media\')'
                      ' to be able to handle sniffing')
 
-    return False
-
+    return None
 
-@get_workbench
-def process_image(entry, workbench=None):
-    """Code to process an image. Will be run by celery.
 
-    A Workbench() represents a local tempory dir. It is automatically
-    cleaned up when this function exits.
+class CommonImageProcessor(MediaProcessor):
     """
-    # Conversions subdirectory to avoid collisions
-    conversions_subdir = os.path.join(
-        workbench.dir, 'conversions')
-    os.mkdir(conversions_subdir)
-    queued_filepath = entry.queued_media_file
-    queued_filename = workbench.localized_file(
-        mgg.queue_store, queued_filepath,
-        'source')
-    name_builder = FilenameBuilder(queued_filename)
-
-    # EXIF extraction
-    exif_tags = extract_exif(queued_filename)
-    gps_data = get_gps_data(exif_tags)
-
-    # Always create a small thumbnail
-    thumb_filepath = create_pub_filepath(
-        entry, name_builder.fill('{basename}.thumbnail{ext}'))
-    resize_image(entry, queued_filename, thumb_filepath,
-                exif_tags, conversions_subdir,
-                (mgg.global_config['media:thumb']['max_width'],
-                 mgg.global_config['media:thumb']['max_height']))
-
-    # If the size of the original file exceeds the specified size of a `medium`
-    # file, a `.medium.jpg` files is created and later associated with the media
-    # entry.
-    medium = Image.open(queued_filename)
-    if medium.size[0] > mgg.global_config['media:medium']['max_width'] \
-        or medium.size[1] > mgg.global_config['media:medium']['max_height'] \
-        or exif_image_needs_rotation(exif_tags):
-        medium_filepath = create_pub_filepath(
-            entry, name_builder.fill('{basename}.medium{ext}'))
-        resize_image(
-            entry, queued_filename, medium_filepath,
-            exif_tags, conversions_subdir,
-            (mgg.global_config['media:medium']['max_width'],
-             mgg.global_config['media:medium']['max_height']))
-    else:
-        medium_filepath = None
-
-    # Copy our queued local workbench to its final destination
-    original_filepath = create_pub_filepath(
-            entry, name_builder.fill('{basename}{ext}'))
-    mgg.public_store.copy_local_to_storage(queued_filename, original_filepath)
-
-    # Remove queued media file from storage and database
-    mgg.queue_store.delete_file(queued_filepath)
-    entry.queued_media_file = []
-
-    # Insert media file information into database
-    media_files_dict = entry.setdefault('media_files', {})
-    media_files_dict[u'thumb'] = thumb_filepath
-    media_files_dict[u'original'] = original_filepath
-    if medium_filepath:
-        media_files_dict[u'medium'] = medium_filepath
-
-    # Insert exif data into database
-    exif_all = clean_exif(exif_tags)
-
-    if len(exif_all):
-        entry.media_data_init(exif_all=exif_all)
-
-    if len(gps_data):
-        for key in list(gps_data.keys()):
-            gps_data['gps_' + key] = gps_data.pop(key)
-        entry.media_data_init(**gps_data)
+    Provides a base for various media processing steps
+    """
+    # list of acceptable file keys in order of prefrence for reprocessing
+    acceptable_files = ['original', 'medium']
+
+    def common_setup(self):
+        """
+        Set up the workbench directory and pull down the original file
+        """
+        self.image_config = mgg.global_config['plugins'][
+            'mediagoblin.media_types.image']
+
+        ## @@: Should this be two functions?
+        # Conversions subdirectory to avoid collisions
+        self.conversions_subdir = os.path.join(
+            self.workbench.dir, 'conversions')
+        os.mkdir(self.conversions_subdir)
+
+        # Pull down and set up the processing file
+        self.process_filename = get_process_filename(
+            self.entry, self.workbench, self.acceptable_files)
+        self.name_builder = FilenameBuilder(self.process_filename)
+
+        # Exif extraction
+        self.exif_tags = extract_exif(self.process_filename)
+
+    def generate_medium_if_applicable(self, size=None, quality=None,
+                                      filter=None):
+        if not quality:
+            quality = self.image_config['quality']
+        if not filter:
+            filter = self.image_config['resize_filter']
+
+        resize_tool(self.entry, False, 'medium', self.process_filename,
+                    self.name_builder.fill('{basename}.medium{ext}'),
+                    self.conversions_subdir, self.exif_tags, quality,
+                    filter, size)
+
+    def generate_thumb(self, size=None, quality=None, filter=None):
+        if not quality:
+            quality = self.image_config['quality']
+        if not filter:
+            filter = self.image_config['resize_filter']
+
+        resize_tool(self.entry, True, 'thumb', self.process_filename,
+                    self.name_builder.fill('{basename}.thumbnail{ext}'),
+                    self.conversions_subdir, self.exif_tags, quality,
+                    filter, size)
+
+    def copy_original(self):
+        copy_original(
+            self.entry, self.process_filename,
+            self.name_builder.fill('{basename}{ext}'))
+
+    def extract_metadata(self):
+        # Is there any GPS data
+        gps_data = get_gps_data(self.exif_tags)
+
+        # Insert exif data into database
+        exif_all = clean_exif(self.exif_tags)
+
+        if len(exif_all):
+            self.entry.media_data_init(exif_all=exif_all)
+
+        if len(gps_data):
+            for key in list(gps_data.keys()):
+                gps_data['gps_' + key] = gps_data.pop(key)
+            self.entry.media_data_init(**gps_data)
+
+
+class InitialProcessor(CommonImageProcessor):
+    """
+    Initial processing step for new images
+    """
+    name = "initial"
+    description = "Initial processing"
+
+    @classmethod
+    def media_is_eligible(cls, entry=None, state=None):
+        """
+        Determine if this media type is eligible for processing
+        """
+        if not state:
+            state = entry.state
+        return state in (
+            "unprocessed", "failed")
+
+    ###############################
+    # Command line interface things
+    ###############################
+
+    @classmethod
+    def generate_parser(cls):
+        parser = argparse.ArgumentParser(
+            description=cls.description,
+            prog=cls.name)
+
+        parser.add_argument(
+            '--size',
+            nargs=2,
+            metavar=('max_width', 'max_height'),
+            type=int)
+
+        parser.add_argument(
+            '--thumb-size',
+            nargs=2,
+            metavar=('max_width', 'max_height'),
+            type=int)
+
+        parser.add_argument(
+            '--filter',
+            choices=['BICUBIC', 'BILINEAR', 'NEAREST', 'ANTIALIAS'])
+
+        parser.add_argument(
+            '--quality',
+            type=int,
+            help='level of compression used when resizing images')
+
+        return parser
+
+    @classmethod
+    def args_to_request(cls, args):
+        return request_from_args(
+            args, ['size', 'thumb_size', 'filter', 'quality'])
+
+    def process(self, size=None, thumb_size=None, quality=None, filter=None):
+        self.common_setup()
+        self.generate_medium_if_applicable(size=size, filter=filter,
+                                           quality=quality)
+        self.generate_thumb(size=thumb_size, filter=filter, quality=quality)
+        self.copy_original()
+        self.extract_metadata()
+        self.delete_queue_file()
+
+
+class Resizer(CommonImageProcessor):
+    """
+    Resizing process steps for processed media
+    """
+    name = 'resize'
+    description = 'Resize image'
+    thumb_size = 'size'
+
+    @classmethod
+    def media_is_eligible(cls, entry=None, state=None):
+        """
+        Determine if this media type is eligible for processing
+        """
+        if not state:
+            state = entry.state
+        return state in 'processed'
+
+    ###############################
+    # Command line interface things
+    ###############################
+
+    @classmethod
+    def generate_parser(cls):
+        parser = argparse.ArgumentParser(
+            description=cls.description,
+            prog=cls.name)
+
+        parser.add_argument(
+            '--size',
+            nargs=2,
+            metavar=('max_width', 'max_height'),
+            type=int)
+
+        parser.add_argument(
+            '--filter',
+            choices=['BICUBIC', 'BILINEAR', 'NEAREST', 'ANTIALIAS'])
+
+        parser.add_argument(
+            '--quality',
+            type=int,
+            help='level of compression used when resizing images')
+
+        parser.add_argument(
+            'file',
+            choices=['medium', 'thumb'])
+
+        return parser
+
+    @classmethod
+    def args_to_request(cls, args):
+        return request_from_args(
+            args, ['size', 'file', 'quality', 'filter'])
+
+    def process(self, file, size=None, filter=None, quality=None):
+        self.common_setup()
+        if file == 'medium':
+            self.generate_medium_if_applicable(size=size, filter=filter,
+                                              quality=quality)
+        elif file == 'thumb':
+            self.generate_thumb(size=size, filter=filter, quality=quality)
+
+
+class ImageProcessingManager(ProcessingManager):
+    def __init__(self):
+        super(self.__class__, self).__init__()
+        self.add_processor(InitialProcessor)
+        self.add_processor(Resizer)
 
 
 if __name__ == '__main__':