# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
-import Image
+try:
+ from PIL import Image
+except ImportError:
+ import Image
import os
+import logging
+import argparse
from mediagoblin import mg_globals as mgg
-from mediagoblin.processing import BadMediaFail, \
- create_pub_filepath, THUMB_SIZE, MEDIUM_SIZE
+from mediagoblin.processing import (
+ BadMediaFail, FilenameBuilder,
+ MediaProcessor, ProcessingManager,
+ request_from_args, get_process_filename,
+ store_public, copy_original)
from mediagoblin.tools.exif import exif_fix_image_orientation, \
extract_exif, clean_exif, get_gps_data, get_useful, \
exif_image_needs_rotation
-def process_image(entry):
+_log = logging.getLogger(__name__)
+
+PIL_FILTERS = {
+ 'NEAREST': Image.NEAREST,
+ 'BILINEAR': Image.BILINEAR,
+ 'BICUBIC': Image.BICUBIC,
+ 'ANTIALIAS': Image.ANTIALIAS}
+
+MEDIA_TYPE = 'mediagoblin.media_types.image'
+
+
+def resize_image(entry, resized, keyname, target_name, new_size,
+ exif_tags, workdir, quality, filter):
"""
- Code to process an image
+ Store a resized version of an image and return its pathname.
+
+ Arguments:
+ proc_state -- the processing state for the image to resize
+ resized -- an image from Image.open() of the original image being resized
+ keyname -- Under what key to save in the db.
+ target_name -- public file path for the new resized image
+ exif_tags -- EXIF data for the original image
+ workdir -- directory path for storing converted image files
+ new_size -- 2-tuple size for the resized image
+ quality -- level of compression used when resizing images
+ filter -- One of BICUBIC, BILINEAR, NEAREST, ANTIALIAS
"""
- workbench = mgg.workbench_manager.create_workbench()
- # Conversions subdirectory to avoid collisions
- conversions_subdir = os.path.join(
- workbench.dir, 'conversions')
- os.mkdir(conversions_subdir)
- queued_filepath = entry.queued_media_file
- queued_filename = workbench.localized_file(
- mgg.queue_store, queued_filepath,
- 'source')
-
- filename_bits = os.path.splitext(queued_filename)
- basename = os.path.split(filename_bits[0])[1]
- extension = filename_bits[1].lower()
-
- # EXIF extraction
- exif_tags = extract_exif(queued_filename)
- gps_data = get_gps_data(exif_tags)
+ resized = exif_fix_image_orientation(resized, exif_tags) # Fix orientation
try:
- thumb = Image.open(queued_filename)
- except IOError:
- raise BadMediaFail()
+ resize_filter = PIL_FILTERS[filter.upper()]
+ except KeyError:
+ raise Exception('Filter "{0}" not found, choose one of {1}'.format(
+ unicode(filter),
+ u', '.join(PIL_FILTERS.keys())))
- thumb = exif_fix_image_orientation(thumb, exif_tags)
+ resized.thumbnail(new_size, resize_filter)
- thumb.thumbnail(THUMB_SIZE, Image.ANTIALIAS)
+ # Copy the new file to the conversion subdir, then remotely.
+ tmp_resized_filename = os.path.join(workdir, target_name)
+ with file(tmp_resized_filename, 'w') as resized_file:
+ resized.save(resized_file, quality=quality)
+ store_public(entry, keyname, tmp_resized_filename, target_name)
- # Copy the thumb to the conversion subdir, then remotely.
- thumb_filename = 'thumbnail' + extension
- thumb_filepath = create_pub_filepath(entry, thumb_filename)
+ # store the thumb/medium info
+ image_info = {'width': new_size[0],
+ 'height': new_size[1],
+ 'quality': quality,
+ 'filter': filter}
- tmp_thumb_filename = os.path.join(
- conversions_subdir, thumb_filename)
+ entry.set_file_metadata(keyname, **image_info)
- with file(tmp_thumb_filename, 'w') as thumb_file:
- thumb.save(thumb_file)
- mgg.public_store.copy_local_to_storage(
- tmp_thumb_filename, thumb_filepath)
+def resize_tool(entry,
+ force, keyname, orig_file, target_name,
+ conversions_subdir, exif_tags, quality, filter, new_size=None):
+ # Use the default size if new_size was not given
+ if not new_size:
+ max_width = mgg.global_config['media:' + keyname]['max_width']
+ max_height = mgg.global_config['media:' + keyname]['max_height']
+ new_size = (max_width, max_height)
- # If the size of the original file exceeds the specified size of a `medium`
- # file, a `medium.jpg` files is created and later associated with the media
+ # If thumb or medium is already the same quality and size, then don't
+ # reprocess
+ if _skip_resizing(entry, keyname, new_size, quality, filter):
+ _log.info('{0} of same size and quality already in use, skipping '
+ 'resizing of media {1}.'.format(keyname, entry.id))
+ return
+
+ # If the size of the original file exceeds the specified size for the desized
+ # file, a target_name file is created and later associated with the media
# entry.
- medium = Image.open(queued_filename)
- if medium.size[0] > MEDIUM_SIZE[0] or medium.size[1] > MEDIUM_SIZE[1] \
+ # Also created if the file needs rotation, or if forced.
+ try:
+ im = Image.open(orig_file)
+ except IOError:
+ raise BadMediaFail()
+ if force \
+ or im.size[0] > new_size[0]\
+ or im.size[1] > new_size[1]\
or exif_image_needs_rotation(exif_tags):
-
- medium.thumbnail(MEDIUM_SIZE, Image.ANTIALIAS)
-
-
+ resize_image(
+ entry, im, unicode(keyname), target_name,
+ tuple(new_size),
+ exif_tags, conversions_subdir,
+ quality, filter)
+
+
+def _skip_resizing(entry, keyname, size, quality, filter):
+ """
+ Determines wither the saved thumb or medium is of the same quality and size
+ """
+ image_info = entry.get_file_metadata(keyname)
+
+ if not image_info:
+ return False
+
+ skip = True
+
+ if image_info.get('width') != size[0]:
+ skip = False
- # Fix orientation
- medium = exif_fix_image_orientation(medium, exif_tags)
+ elif image_info.get('height') != size[1]:
+ skip = False
-
+ elif image_info.get('filter') != filter:
+ skip = False
- medium_filename = 'medium' + extension
- medium_filepath = create_pub_filepath(entry, medium_filename)
+ elif image_info.get('quality') != quality:
+ skip = False
- tmp_medium_filename = os.path.join(
- conversions_subdir, medium_filename)
+ return skip
- with file(tmp_medium_filename, 'w') as medium_file:
- medium.save(medium_file)
- mgg.public_store.copy_local_to_storage(
- tmp_medium_filename, medium_filepath)
+SUPPORTED_FILETYPES = ['png', 'gif', 'jpg', 'jpeg', 'tiff']
+
+
+def sniff_handler(media_file, **kw):
+ _log.info('Sniffing {0}'.format(MEDIA_TYPE))
+ if kw.get('media') is not None: # That's a double negative!
+ name, ext = os.path.splitext(kw['media'].filename)
+ clean_ext = ext[1:].lower() # Strip the . from ext and make lowercase
+
+ if clean_ext in SUPPORTED_FILETYPES:
+ _log.info('Found file extension in supported filetypes')
+ return MEDIA_TYPE
+ else:
+ _log.debug('Media present, extension not found in {0}'.format(
+ SUPPORTED_FILETYPES))
else:
- medium_filepath = None
-
- # we have to re-read because unlike PIL, not everything reads
- # things in string representation :)
- queued_file = file(queued_filename, 'rb')
-
- with queued_file:
- #create_pub_filepath(entry, queued_filepath[-1])
- original_filepath = create_pub_filepath(entry, basename + extension)
-
- with mgg.public_store.get_file(original_filepath, 'wb') \
- as original_file:
- original_file.write(queued_file.read())
-
- # Remove queued media file from storage and database
- mgg.queue_store.delete_file(queued_filepath)
- entry.queued_media_file = []
-
- # Insert media file information into database
- media_files_dict = entry.setdefault('media_files', {})
- media_files_dict['thumb'] = thumb_filepath
- media_files_dict['original'] = original_filepath
- if medium_filepath:
- media_files_dict['medium'] = medium_filepath
-
- # Insert exif data into database
- media_data = entry.setdefault('media_data', {})
- media_data['exif'] = {
- 'clean': clean_exif(exif_tags)}
- media_data['exif']['useful'] = get_useful(
- media_data['exif']['clean'])
- media_data['gps'] = gps_data
-
- # clean up workbench
- workbench.destroy_self()
+ _log.warning('Need additional information (keyword argument \'media\')'
+ ' to be able to handle sniffing')
+
+ return None
+
+
+class CommonImageProcessor(MediaProcessor):
+ """
+ Provides a base for various media processing steps
+ """
+ # list of acceptable file keys in order of prefrence for reprocessing
+ acceptable_files = ['original', 'medium']
+
+ def common_setup(self):
+ """
+ Set up the workbench directory and pull down the original file
+ """
+ self.image_config = mgg.global_config['plugins'][
+ 'mediagoblin.media_types.image']
+
+ ## @@: Should this be two functions?
+ # Conversions subdirectory to avoid collisions
+ self.conversions_subdir = os.path.join(
+ self.workbench.dir, 'conversions')
+ os.mkdir(self.conversions_subdir)
+
+ # Pull down and set up the processing file
+ self.process_filename = get_process_filename(
+ self.entry, self.workbench, self.acceptable_files)
+ self.name_builder = FilenameBuilder(self.process_filename)
+
+ # Exif extraction
+ self.exif_tags = extract_exif(self.process_filename)
+
+ def generate_medium_if_applicable(self, size=None, quality=None,
+ filter=None):
+ if not quality:
+ quality = self.image_config['quality']
+ if not filter:
+ filter = self.image_config['resize_filter']
+
+ resize_tool(self.entry, False, 'medium', self.process_filename,
+ self.name_builder.fill('{basename}.medium{ext}'),
+ self.conversions_subdir, self.exif_tags, quality,
+ filter, size)
+
+ def generate_thumb(self, size=None, quality=None, filter=None):
+ if not quality:
+ quality = self.image_config['quality']
+ if not filter:
+ filter = self.image_config['resize_filter']
+
+ resize_tool(self.entry, True, 'thumb', self.process_filename,
+ self.name_builder.fill('{basename}.thumbnail{ext}'),
+ self.conversions_subdir, self.exif_tags, quality,
+ filter, size)
+
+ def copy_original(self):
+ copy_original(
+ self.entry, self.process_filename,
+ self.name_builder.fill('{basename}{ext}'))
+
+ def extract_metadata(self):
+ # Is there any GPS data
+ gps_data = get_gps_data(self.exif_tags)
+
+ # Insert exif data into database
+ exif_all = clean_exif(self.exif_tags)
+
+ if len(exif_all):
+ self.entry.media_data_init(exif_all=exif_all)
+
+ if len(gps_data):
+ for key in list(gps_data.keys()):
+ gps_data['gps_' + key] = gps_data.pop(key)
+ self.entry.media_data_init(**gps_data)
+
+
+class InitialProcessor(CommonImageProcessor):
+ """
+ Initial processing step for new images
+ """
+ name = "initial"
+ description = "Initial processing"
+
+ @classmethod
+ def media_is_eligible(cls, entry=None, state=None):
+ """
+ Determine if this media type is eligible for processing
+ """
+ if not state:
+ state = entry.state
+ return state in (
+ "unprocessed", "failed")
+
+ ###############################
+ # Command line interface things
+ ###############################
+
+ @classmethod
+ def generate_parser(cls):
+ parser = argparse.ArgumentParser(
+ description=cls.description,
+ prog=cls.name)
+
+ parser.add_argument(
+ '--size',
+ nargs=2,
+ metavar=('max_width', 'max_height'),
+ type=int)
+
+ parser.add_argument(
+ '--thumb-size',
+ nargs=2,
+ metavar=('max_width', 'max_height'),
+ type=int)
+
+ parser.add_argument(
+ '--filter',
+ choices=['BICUBIC', 'BILINEAR', 'NEAREST', 'ANTIALIAS'])
+
+ parser.add_argument(
+ '--quality',
+ type=int,
+ help='level of compression used when resizing images')
+
+ return parser
+
+ @classmethod
+ def args_to_request(cls, args):
+ return request_from_args(
+ args, ['size', 'thumb_size', 'filter', 'quality'])
+
+ def process(self, size=None, thumb_size=None, quality=None, filter=None):
+ self.common_setup()
+ self.generate_medium_if_applicable(size=size, filter=filter,
+ quality=quality)
+ self.generate_thumb(size=thumb_size, filter=filter, quality=quality)
+ self.copy_original()
+ self.extract_metadata()
+ self.delete_queue_file()
+
+
+class Resizer(CommonImageProcessor):
+ """
+ Resizing process steps for processed media
+ """
+ name = 'resize'
+ description = 'Resize image'
+ thumb_size = 'size'
+
+ @classmethod
+ def media_is_eligible(cls, entry=None, state=None):
+ """
+ Determine if this media type is eligible for processing
+ """
+ if not state:
+ state = entry.state
+ return state in 'processed'
+
+ ###############################
+ # Command line interface things
+ ###############################
+
+ @classmethod
+ def generate_parser(cls):
+ parser = argparse.ArgumentParser(
+ description=cls.description,
+ prog=cls.name)
+
+ parser.add_argument(
+ '--size',
+ nargs=2,
+ metavar=('max_width', 'max_height'),
+ type=int)
+
+ parser.add_argument(
+ '--filter',
+ choices=['BICUBIC', 'BILINEAR', 'NEAREST', 'ANTIALIAS'])
+
+ parser.add_argument(
+ '--quality',
+ type=int,
+ help='level of compression used when resizing images')
+
+ parser.add_argument(
+ 'file',
+ choices=['medium', 'thumb'])
+
+ return parser
+
+ @classmethod
+ def args_to_request(cls, args):
+ return request_from_args(
+ args, ['size', 'file', 'quality', 'filter'])
+
+ def process(self, file, size=None, filter=None, quality=None):
+ self.common_setup()
+ if file == 'medium':
+ self.generate_medium_if_applicable(size=size, filter=filter,
+ quality=quality)
+ elif file == 'thumb':
+ self.generate_thumb(size=size, filter=filter, quality=quality)
+
+
+class ImageProcessingManager(ProcessingManager):
+ def __init__(self):
+ super(self.__class__, self).__init__()
+ self.add_processor(InitialProcessor)
+ self.add_processor(Resizer)
+
if __name__ == '__main__':
import sys