# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
-import tempfile
+import argparse
+import os.path
import logging
-import os
+import datetime
from mediagoblin import mg_globals as mgg
-from mediagoblin.processing import mark_entry_failed, \
- THUMB_SIZE, MEDIUM_SIZE, create_pub_filepath
-from . import transcoders
+from mediagoblin.processing import (
+ FilenameBuilder, BaseProcessingFail,
+ ProgressCallback, MediaProcessor,
+ ProcessingManager, request_from_args,
+ get_process_filename, store_public,
+ copy_original)
+from mediagoblin.tools.translate import lazy_pass_to_ugettext as _
-logging.basicConfig()
+from . import transcoders
+from .util import skip_transcode
_log = logging.getLogger(__name__)
_log.setLevel(logging.DEBUG)
+MEDIA_TYPE = 'mediagoblin.media_types.video'
+
+
+class VideoTranscodingFail(BaseProcessingFail):
+ '''
+ Error raised if video transcoding fails
+ '''
+ general_message = _(u'Video transcoding failed')
+
+
+def sniff_handler(media_file, **kw):
+ transcoder = transcoders.VideoTranscoder()
+ data = transcoder.discover(media_file.name)
+
+ _log.info('Sniffing {0}'.format(MEDIA_TYPE))
+ _log.debug('Discovered: {0}'.format(data))
+
+ if not data:
+ _log.error('Could not discover {0}'.format(
+ kw.get('media')))
+ return None
-def process_video(entry):
+ if data['is_video'] is True:
+ return MEDIA_TYPE
+
+ return None
+
+
+def store_metadata(media_entry, metadata):
+ """
+ Store metadata from this video for this media entry.
"""
- Code to process a video
+ # Let's pull out the easy, not having to be converted ones first
+ stored_metadata = dict(
+ [(key, metadata[key])
+ for key in [
+ "videoheight", "videolength", "videowidth",
+ "audiorate", "audiolength", "audiochannels", "audiowidth",
+ "mimetype"]
+ if key in metadata])
+
+ # We have to convert videorate into a sequence because it's a
+ # special type normally..
+
+ if "videorate" in metadata:
+ videorate = metadata["videorate"]
+ stored_metadata["videorate"] = [videorate.num, videorate.denom]
+
+ # Also make a whitelist conversion of the tags.
+ if "tags" in metadata:
+ tags_metadata = metadata['tags']
+
+ # we don't use *all* of these, but we know these ones are
+ # safe...
+ tags = dict(
+ [(key, tags_metadata[key])
+ for key in [
+ "application-name", "artist", "audio-codec", "bitrate",
+ "container-format", "copyright", "encoder",
+ "encoder-version", "license", "nominal-bitrate", "title",
+ "video-codec"]
+ if key in tags_metadata])
+ if 'date' in tags_metadata:
+ date = tags_metadata['date']
+ tags['date'] = "%s-%s-%s" % (
+ date.year, date.month, date.day)
- Much of this code is derived from the arista-transcoder script in
- the arista PyPI package and changed to match the needs of
- MediaGoblin
+ # TODO: handle timezone info; gst.get_time_zone_offset +
+ # python's tzinfo should help
+ if 'datetime' in tags_metadata:
+ dt = tags_metadata['datetime']
+ tags['datetime'] = datetime.datetime(
+ dt.get_year(), dt.get_month(), dt.get_day(), dt.get_hour(),
+ dt.get_minute(), dt.get_second(),
+ dt.get_microsecond()).isoformat()
- This function sets up the arista video encoder in some kind of new thread
- and attaches callbacks to that child process, hopefully, the
- entry-complete callback will be called when the video is done.
+ metadata['tags'] = tags
+
+ # Only save this field if there's something to save
+ if len(stored_metadata):
+ media_entry.media_data_init(
+ orig_metadata=stored_metadata)
+
+
+class CommonVideoProcessor(MediaProcessor):
+ """
+ Provides a base for various video processing steps
"""
- video_config = mgg.global_config['media_type:mediagoblin.media_types.video']
+ acceptable_files = ['original', 'best_quality', 'webm_video']
+
+ def common_setup(self):
+ self.video_config = mgg \
+ .global_config['media_type:mediagoblin.media_types.video']
+
+ # Pull down and set up the processing file
+ self.process_filename = get_process_filename(
+ self.entry, self.workbench, self.acceptable_files)
+ self.name_builder = FilenameBuilder(self.process_filename)
+
+ self.transcoder = transcoders.VideoTranscoder()
+ self.did_transcode = False
+
+ def copy_original(self):
+ # If we didn't transcode, then we need to keep the original
+ if not self.did_transcode or \
+ (self.video_config['keep_original'] and self.did_transcode):
+ copy_original(
+ self.entry, self.process_filename,
+ self.name_builder.fill('{basename}{ext}'))
- workbench = mgg.workbench_manager.create_workbench()
+ def _keep_best(self):
+ """
+ If there is no original, keep the best file that we have
+ """
+ if not self.entry.media_files.get('best_quality'):
+ # Save the best quality file if no original?
+ if not self.entry.media_files.get('original') and \
+ self.entry.media_files.get('webm_video'):
+ self.entry.media_files['best_quality'] = self.entry \
+ .media_files['webm_video']
- queued_filepath = entry.queued_media_file
- queued_filename = workbench.localized_file(
- mgg.queue_store, queued_filepath,
- 'source')
- medium_filepath = create_pub_filepath(
- entry,
- '{original}-640p.webm'.format(
- original=os.path.splitext(
- queued_filepath[-1])[0] # Select the
- ))
+ def transcode(self, medium_size=None, vp8_quality=None, vp8_threads=None,
+ vorbis_quality=None):
+ progress_callback = ProgressCallback(self.entry)
+ tmp_dst = os.path.join(self.workbench.dir,
+ self.name_builder.fill('{basename}.medium.webm'))
- thumbnail_filepath = create_pub_filepath(
- entry, 'thumbnail.jpg')
+ if not medium_size:
+ medium_size = (
+ mgg.global_config['media:medium']['max_width'],
+ mgg.global_config['media:medium']['max_height'])
+ if not vp8_quality:
+ vp8_quality = self.video_config['vp8_quality']
+ if not vp8_threads:
+ vp8_threads = self.video_config['vp8_threads']
+ if not vorbis_quality:
+ vorbis_quality = self.video_config['vorbis_quality']
+ # Extract metadata and keep a record of it
+ metadata = self.transcoder.discover(self.process_filename)
+ store_metadata(self.entry, metadata)
- # Create a temporary file for the video destination
- tmp_dst = tempfile.NamedTemporaryFile()
+ # Figure out whether or not we need to transcode this video or
+ # if we can skip it
+ if skip_transcode(metadata, medium_size):
+ _log.debug('Skipping transcoding')
- with tmp_dst:
- # Transcode queued file to a VP8/vorbis file that fits in a 640x640 square
- transcoder = transcoders.VideoTranscoder(queued_filename, tmp_dst.name)
+ dst_dimensions = metadata['videowidth'], metadata['videoheight']
- # Push transcoded video to public storage
- _log.debug('Saving medium...')
- mgg.public_store.get_file(medium_filepath, 'wb').write(
- tmp_dst.read())
- _log.debug('Saved medium')
+ # If there is an original and transcoded, delete the transcoded
+ # since it must be of lower quality then the original
+ if self.entry.media_files.get('original') and \
+ self.entry.media_files.get('webm_video'):
+ self.entry.media_files['webm_video'].delete()
- entry.media_files['webm_640'] = medium_filepath
+ else:
+ self.transcoder.transcode(self.process_filename, tmp_dst,
+ vp8_quality=vp8_quality,
+ vp8_threads=vp8_threads,
+ vorbis_quality=vorbis_quality,
+ progress_callback=progress_callback,
+ dimensions=tuple(medium_size))
+
+ dst_dimensions = self.transcoder.dst_data.videowidth,\
+ self.transcoder.dst_data.videoheight
+
+ self._keep_best()
+
+ # Push transcoded video to public storage
+ _log.debug('Saving medium...')
+ store_public(self.entry, 'webm_video', tmp_dst,
+ self.name_builder.fill('{basename}.medium.webm'))
+ _log.debug('Saved medium')
+
+ self.did_transcode = True
# Save the width and height of the transcoded video
- entry.media_data_init(
- width=transcoder.dst_data.videowidth,
- height=transcoder.dst_data.videoheight)
+ self.entry.media_data_init(
+ width=dst_dimensions[0],
+ height=dst_dimensions[1])
- # Create a temporary file for the video thumbnail
- tmp_thumb = tempfile.NamedTemporaryFile()
+ def generate_thumb(self, thumb_size=None):
+ # Temporary file for the video thumbnail (cleaned up with workbench)
+ tmp_thumb = os.path.join(self.workbench.dir,
+ self.name_builder.fill(
+ '{basename}.thumbnail.jpg'))
- with tmp_thumb:
- # Create a thumbnail.jpg that fits in a 180x180 square
- transcoders.VideoThumbnailer(queued_filename, tmp_thumb.name)
+ if not thumb_size:
+ thumb_size = (mgg.global_config['media:thumb']['max_width'])
+
+ # We will only use the width so that the correct scale is kept
+ transcoders.VideoThumbnailerMarkII(
+ self.process_filename,
+ tmp_thumb,
+ thumb_size[0])
# Push the thumbnail to public storage
_log.debug('Saving thumbnail...')
- mgg.public_store.get_file(thumbnail_filepath, 'wb').write(
- tmp_thumb.read())
- _log.debug('Saved thumbnail')
+ store_public(self.entry, 'thumb', tmp_thumb,
+ self.name_builder.fill('{basename}.thumbnail.jpg'))
+
+
+class InitialProcessor(CommonVideoProcessor):
+ """
+ Initial processing steps for new video
+ """
+ name = "initial"
+ description = "Initial processing"
+
+ @classmethod
+ def media_is_eligible(cls, entry=None, state=None):
+ if not state:
+ state = entry.state
+ return state in (
+ "unprocessed", "failed")
+
+ @classmethod
+ def generate_parser(cls):
+ parser = argparse.ArgumentParser(
+ description=cls.description,
+ prog=cls.name)
+
+ parser.add_argument(
+ '--medium_size',
+ nargs=2,
+ metavar=('max_width', 'max_height'),
+ type=int)
+
+ parser.add_argument(
+ '--vp8_quality',
+ type=int,
+ help='Range 0..10')
+
+ parser.add_argument(
+ '--vp8_threads',
+ type=int,
+ help='0 means number_of_CPUs - 1')
+
+ parser.add_argument(
+ '--vorbis_quality',
+ type=float,
+ help='Range -0.1..1')
+
+ parser.add_argument(
+ '--thumb_size',
+ nargs=2,
+ metavar=('max_width', 'max_height'),
+ type=int)
+
+ return parser
+
+ @classmethod
+ def args_to_request(cls, args):
+ return request_from_args(
+ args, ['medium_size', 'vp8_quality', 'vp8_threads',
+ 'vorbis_quality', 'thumb_size'])
+
+ def process(self, medium_size=None, vp8_threads=None, vp8_quality=None,
+ vorbis_quality=None, thumb_size=None):
+ self.common_setup()
+
+ self.transcode(medium_size=medium_size, vp8_quality=vp8_quality,
+ vp8_threads=vp8_threads, vorbis_quality=vorbis_quality)
+
+ self.copy_original()
+ self.generate_thumb(thumb_size=thumb_size)
+ self.delete_queue_file()
+
+
+class Resizer(CommonVideoProcessor):
+ """
+ Video thumbnail resizing process steps for processed media
+ """
+ name = 'resize'
+ description = 'Resize thumbnail'
+ thumb_size = 'thumb_size'
+
+ @classmethod
+ def media_is_eligible(cls, entry=None, state=None):
+ if not state:
+ state = entry.state
+ return state in 'processed'
+
+ @classmethod
+ def generate_parser(cls):
+ parser = argparse.ArgumentParser(
+ description=cls.description,
+ prog=cls.name)
+
+ parser.add_argument(
+ '--thumb_size',
+ nargs=2,
+ metavar=('max_width', 'max_height'),
+ type=int)
+
+ # Needed for gmg reprocess thumbs to work
+ parser.add_argument(
+ 'file',
+ nargs='?',
+ default='thumb',
+ choices=['thumb'])
+
+ return parser
+
+ @classmethod
+ def args_to_request(cls, args):
+ return request_from_args(
+ args, ['thumb_size', 'file'])
+
+ def process(self, thumb_size=None, file=None):
+ self.common_setup()
+ self.generate_thumb(thumb_size=thumb_size)
+
+
+class Transcoder(CommonVideoProcessor):
+ """
+ Transcoding processing steps for processed video
+ """
+ name = 'transcode'
+ description = 'Re-transcode video'
+
+ @classmethod
+ def media_is_eligible(cls, entry=None, state=None):
+ if not state:
+ state = entry.state
+ return state in 'processed'
+
+ @classmethod
+ def generate_parser(cls):
+ parser = argparse.ArgumentParser(
+ description=cls.description,
+ prog=cls.name)
+
+ parser.add_argument(
+ '--medium_size',
+ nargs=2,
+ metavar=('max_width', 'max_height'),
+ type=int)
+
+ parser.add_argument(
+ '--vp8_quality',
+ type=int,
+ help='Range 0..10')
- entry.media_files['thumb'] = thumbnail_filepath
+ parser.add_argument(
+ '--vp8_threads',
+ type=int,
+ help='0 means number_of_CPUs - 1')
- if video_config['keep_original']:
- # Push original file to public storage
- queued_file = file(queued_filename, 'rb')
+ parser.add_argument(
+ '--vorbis_quality',
+ type=float,
+ help='Range -0.1..1')
- with queued_file:
- original_filepath = create_pub_filepath(
- entry,
- queued_filepath[-1])
+ return parser
- with mgg.public_store.get_file(original_filepath, 'wb') as \
- original_file:
- _log.debug('Saving original...')
- original_file.write(queued_file.read())
- _log.debug('Saved original')
+ @classmethod
+ def args_to_request(cls, args):
+ return request_from_args(
+ args, ['medium_size', 'vp8_threads', 'vp8_quality',
+ 'vorbis_quality'])
- entry.media_files['original'] = original_filepath
+ def process(self, medium_size=None, vp8_quality=None, vp8_threads=None,
+ vorbis_quality=None):
+ self.common_setup()
+ self.transcode(medium_size=medium_size, vp8_threads=vp8_threads,
+ vp8_quality=vp8_quality, vorbis_quality=vorbis_quality)
- mgg.queue_store.delete_file(queued_filepath)
- # Save the MediaEntry
- entry.save()
+class VideoProcessingManager(ProcessingManager):
+ def __init__(self):
+ super(self.__class__, self).__init__()
+ self.add_processor(InitialProcessor)
+ self.add_processor(Resizer)
+ self.add_processor(Transcoder)