X-Git-Url: https://vcs.fsf.org/?a=blobdiff_plain;f=mediagoblin%2Fmedia_types%2Fvideo%2Fprocessing.py;h=68d14148af97aa281fd5f6ed4796cda2548e960d;hb=36ae6bcbbb0fc3ab0dbbc8dcba0664b3d7c5096f;hp=4e05a71c111b28d0bdbe8b90b8e19a14039805f3;hpb=a63b640f12896a873ebf96f9fe0ef62d0794bfe7;p=mediagoblin.git diff --git a/mediagoblin/media_types/video/processing.py b/mediagoblin/media_types/video/processing.py index 4e05a71c..68d14148 100644 --- a/mediagoblin/media_types/video/processing.py +++ b/mediagoblin/media_types/video/processing.py @@ -1,5 +1,5 @@ # GNU MediaGoblin -- federated, autonomous media hosting -# Copyright (C) 2011 MediaGoblin contributors. See AUTHORS. +# Copyright (C) 2011, 2012 MediaGoblin contributors. See AUTHORS. # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as published by @@ -14,152 +14,117 @@ # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see . -import tempfile +from tempfile import NamedTemporaryFile import logging -import os -from celery.task import Task -from celery import registry - -from mediagoblin.db.util import ObjectId from mediagoblin import mg_globals as mgg -from mediagoblin.process_media import BaseProcessingFail -from mediagoblin.process_media import mark_entry_failed +from mediagoblin.decorators import get_workbench +from mediagoblin.processing import \ + create_pub_filepath, FilenameBuilder, BaseProcessingFail, ProgressCallback +from mediagoblin.tools.translate import lazy_pass_to_ugettext as _ + from . import transcoders -THUMB_SIZE = 180, 180 -MEDIUM_SIZE = 640, 640 +_log = logging.getLogger(__name__) +_log.setLevel(logging.DEBUG) -logger = logging.getLogger(__name__) -logging.basicConfig() -logger.setLevel(logging.DEBUG) +class VideoTranscodingFail(BaseProcessingFail): + ''' + Error raised if video transcoding fails + ''' + general_message = _(u'Video transcoding failed') -def process_video(entry): - """ - Code to process a video - Much of this code is derived from the arista-transcoder script in - the arista PyPI package and changed to match the needs of - MediaGoblin +def sniff_handler(media_file, **kw): + transcoder = transcoders.VideoTranscoder() + data = transcoder.discover(media_file.name) + + _log.debug('Discovered: {0}'.format(data)) + + if not data: + _log.error('Could not discover {0}'.format( + kw.get('media'))) + return False + + if data['is_video'] == True: + return True + + return False - This function sets up the arista video encoder in some kind of new thread - and attaches callbacks to that child process, hopefully, the - entry-complete callback will be called when the video is done. +@get_workbench +def process_video(entry, workbench=None): """ - workbench = mgg.workbench_manager.create_workbench() + Process a video entry, transcode the queued media files (originals) and + create a thumbnail for the entry. - queued_filepath = entry['queued_media_file'] + A Workbench() represents a local tempory dir. It is automatically + cleaned up when this function exits. + """ + video_config = mgg.global_config['media_type:mediagoblin.media_types.video'] + + queued_filepath = entry.queued_media_file queued_filename = workbench.localized_file( mgg.queue_store, queued_filepath, 'source') + name_builder = FilenameBuilder(queued_filename) medium_filepath = create_pub_filepath( - entry, - '{original}-640p.webm'.format( - original=os.path.splitext( - queued_filepath[-1])[0] # Select the - )) + entry, name_builder.fill('{basename}-640p.webm')) thumbnail_filepath = create_pub_filepath( - entry, 'thumbnail.jpg') - - - # Create a temporary file for the video destination - tmp_dst = tempfile.NamedTemporaryFile() + entry, name_builder.fill('{basename}.thumbnail.jpg')) + # Create a temporary file for the video destination (cleaned up with workbench) + tmp_dst = NamedTemporaryFile(dir=workbench.dir, delete=False) with tmp_dst: # Transcode queued file to a VP8/vorbis file that fits in a 640x640 square - transcoder = transcoders.VideoTranscoder(queued_filename, tmp_dst.name) + progress_callback = ProgressCallback(entry) + transcoder = transcoders.VideoTranscoder() + transcoder.transcode(queued_filename, tmp_dst.name, + vp8_quality=video_config['vp8_quality'], + vp8_threads=video_config['vp8_threads'], + vorbis_quality=video_config['vorbis_quality'], + progress_callback=progress_callback) - # Push transcoded video to public storage - mgg.public_store.get_file(medium_filepath, 'wb').write( - tmp_dst.read()) + # Push transcoded video to public storage + _log.debug('Saving medium...') + mgg.public_store.copy_local_to_storage(tmp_dst.name, medium_filepath) + _log.debug('Saved medium') - entry['media_files']['webm_640'] = medium_filepath + entry.media_files['webm_640'] = medium_filepath - # Save the width and height of the transcoded video - entry['media_data']['video'] = { - u'width': transcoder.dst_data.videowidth, - u'height': transcoder.dst_data.videoheight} + # Save the width and height of the transcoded video + entry.media_data_init( + width=transcoder.dst_data.videowidth, + height=transcoder.dst_data.videoheight) - # Create a temporary file for the video thumbnail - tmp_thumb = tempfile.NamedTemporaryFile() + # Temporary file for the video thumbnail (cleaned up with workbench) + tmp_thumb = NamedTemporaryFile(dir=workbench.dir, suffix='.jpg', delete=False) with tmp_thumb: # Create a thumbnail.jpg that fits in a 180x180 square - transcoders.VideoThumbnailer(queued_filename, tmp_thumb.name) - - # Push the thumbnail to public storage - mgg.public_store.get_file(thumbnail_filepath, 'wb').write( - tmp_thumb.read()) - - entry['media_files']['thumb'] = thumbnail_filepath - - - # Push original file to public storage - queued_file = file(queued_filename, 'rb') - - with queued_file: - original_filepath = create_pub_filepath( - entry, - queued_filepath[-1]) - - with mgg.public_store.get_file(original_filepath, 'wb') as \ - original_file: - original_file.write(queued_file.read()) - - entry['media_files']['original'] = original_filepath - - mgg.queue_store.delete_file(queued_filepath) - - - # Save the MediaEntry - entry.save() - -def create_pub_filepath(entry, filename): - return mgg.public_store.get_unique_filepath( - ['media_entries', - unicode(entry['_id']), - filename]) - - -################################ -# Media processing initial steps -################################ - -class ProcessMedia(Task): - """ - Pass this entry off for processing. - """ - def run(self, media_id): - """ - Pass the media entry off to the appropriate processing function - (for now just process_image...) - """ - entry = mgg.database.MediaEntry.one( - {'_id': ObjectId(media_id)}) - - # Try to process, and handle expected errors. - try: - process_video(entry) - except BaseProcessingFail, exc: - mark_entry_failed(entry[u'_id'], exc) - return - - entry['state'] = u'processed' - entry.save() - - def on_failure(self, exc, task_id, args, kwargs, einfo): - """ - If the processing failed we should mark that in the database. - - Assuming that the exception raised is a subclass of BaseProcessingFail, - we can use that to get more information about the failure and store that - for conveying information to users about the failure, etc. - """ - entry_id = args[0] - mark_entry_failed(entry_id, exc) - - -process_media = registry.tasks[ProcessMedia.name] + transcoders.VideoThumbnailerMarkII( + queued_filename, + tmp_thumb.name, + 180) + + # Push the thumbnail to public storage + _log.debug('Saving thumbnail...') + mgg.public_store.copy_local_to_storage(tmp_thumb.name, thumbnail_filepath) + entry.media_files['thumb'] = thumbnail_filepath + + if video_config['keep_original']: + # Push original file to public storage + _log.debug('Saving original...') + original_filepath = create_pub_filepath(entry, queued_filepath[-1]) + mgg.public_store.copy_local_to_storage(queued_filename, original_filepath) + entry.media_files['original'] = original_filepath + + # Remove queued media file from storage and database. + # queued_filepath is in the task_id directory which should + # be removed too, but fail if the directory is not empty to be on + # the super-safe side. + mgg.queue_store.delete_file(queued_filepath) # rm file + mgg.queue_store.delete_dir(queued_filepath[:-1]) # rm dir + entry.queued_media_file = []