Porting video to GStreamer 1.0
[mediagoblin.git] / mediagoblin / media_types / video / processing.py
index 4d1d5ea264b1baf6d35ea428db705f6cf66852b4..588af2828b7c6e4d6a96bb47497d6a77c84b85fc 100644 (file)
@@ -44,19 +44,19 @@ class VideoTranscodingFail(BaseProcessingFail):
     general_message = _(u'Video transcoding failed')
 
 
-def sniff_handler(media_file, **kw):
-    transcoder = transcoders.VideoTranscoder()
-    data = transcoder.discover(media_file.name)
+EXCLUDED_EXTS = ["nef", "cr2"]
+
+def sniff_handler(media_file, filename):
+    data = transcoders.discover(media_file.name)
 
     _log.info('Sniffing {0}'.format(MEDIA_TYPE))
     _log.debug('Discovered: {0}'.format(data))
 
     if not data:
-        _log.error('Could not discover {0}'.format(
-            kw.get('media')))
+        _log.error('Could not discover {0}'.format(filename))
         return None
 
-    if data['is_video'] is True:
+    if data.get_video_streams():
         return MEDIA_TYPE
 
     return None
@@ -66,52 +66,58 @@ def store_metadata(media_entry, metadata):
     """
     Store metadata from this video for this media entry.
     """
+    stored_metadata = dict()
+    audio_info_list = metadata.get_audio_streams()
+    if audio_info_list:
+        audio_info = audio_info_list[0]
+        stored_metadata['audiochannels'] = audio_info.get_channels()
+    # video is always there
+    video_info = metadata.get_video_streams()[0]
     # Let's pull out the easy, not having to be converted ones first
-    stored_metadata = dict(
-        [(key, metadata[key])
-         for key in [
-             "videoheight", "videolength", "videowidth",
-             "audiorate", "audiolength", "audiochannels", "audiowidth",
-             "mimetype"]
-         if key in metadata])
-
+    stored_metadata = dict()
+    audio_info_list = metadata.get_audio_streams()
+    if audio_info:
+        audio_info = audio_info_list[0]
+        stored_metadata['audiochannels'] = audio_info.get_channels()
+    # video is always there
+    video_info = metadata.get_video_streams()[0]
+    # Let's pull out the easy, not having to be converted ones first
+    stored_metadata['videoheight'] = video_info.get_height()
+    stored_metadata['videowidth'] = video_info.get_width()
+    stored_metadata['videolength'] = metadata.get_duration()
+    stored_metadata['mimetype'] = metadata.get_tags().get_string('mimetype')
     # We have to convert videorate into a sequence because it's a
     # special type normally..
+    stored_metadata['videorate'] = [video_info.get_framerate_num(),
+                                   video_info.get_framerate_denom()]
 
-    if "videorate" in metadata:
-        videorate = metadata["videorate"]
-        stored_metadata["videorate"] = [videorate.num, videorate.denom]
-
-    # Also make a whitelist conversion of the tags.
-    if "tags" in metadata:
-        tags_metadata = metadata['tags']
-
+    if metadata.get_tags():
+        tags_metadata = metadata.get_tags()
         # we don't use *all* of these, but we know these ones are
         # safe...
+        # get_string returns (success, value) tuple
         tags = dict(
-            [(key, tags_metadata[key])
+            [(key, tags_metadata.get_string(key)[1])
              for key in [
                  "application-name", "artist", "audio-codec", "bitrate",
                  "container-format", "copyright", "encoder",
                  "encoder-version", "license", "nominal-bitrate", "title",
                  "video-codec"]
-             if key in tags_metadata])
-        if 'date' in tags_metadata:
-            date = tags_metadata['date']
+             if tags_metadata.get_string(key)[0]])
+        (success, date) = tags_metadata.get_date('date')
+        if success:
             tags['date'] = "%s-%s-%s" % (
                 date.year, date.month, date.day)
 
         # TODO: handle timezone info; gst.get_time_zone_offset +
         #   python's tzinfo should help
-        if 'datetime' in tags_metadata:
-            dt = tags_metadata['datetime']
+        (success, dt) = tags_metadata.get_date_time('datetime')
+        if success:
             tags['datetime'] = datetime.datetime(
                 dt.get_year(), dt.get_month(), dt.get_day(), dt.get_hour(),
                 dt.get_minute(), dt.get_second(),
                 dt.get_microsecond()).isoformat()
-
-        metadata['tags'] = tags
-
+        stored_metadata['tags'] = tags
     # Only save this field if there's something to save
     if len(stored_metadata):
         media_entry.media_data_init(
@@ -155,6 +161,29 @@ class CommonVideoProcessor(MediaProcessor):
                 self.entry.media_files['best_quality'] = self.entry \
                     .media_files['webm_video']
 
+    def _skip_processing(self, keyname, **kwargs):
+        file_metadata = self.entry.get_file_metadata(keyname)
+
+        if not file_metadata:
+            return False
+        skip = True
+
+        if keyname == 'webm_video':
+            if kwargs.get('medium_size') != file_metadata.get('medium_size'):
+                skip = False
+            elif kwargs.get('vp8_quality') != file_metadata.get('vp8_quality'):
+                skip = False
+            elif kwargs.get('vp8_threads') != file_metadata.get('vp8_threads'):
+                skip = False
+            elif kwargs.get('vorbis_quality') != \
+                    file_metadata.get('vorbis_quality'):
+                skip = False
+        elif keyname == 'thumb':
+            if kwargs.get('thumb_size') != file_metadata.get('thumb_size'):
+                skip = False
+
+        return skip
+
 
     def transcode(self, medium_size=None, vp8_quality=None, vp8_threads=None,
                   vorbis_quality=None):
@@ -173,8 +202,19 @@ class CommonVideoProcessor(MediaProcessor):
         if not vorbis_quality:
             vorbis_quality = self.video_config['vorbis_quality']
 
+        file_metadata = {'medium_size': medium_size,
+                         'vp8_threads': vp8_threads,
+                         'vp8_quality': vp8_quality,
+                         'vorbis_quality': vorbis_quality}
+
+        if self._skip_processing('webm_video', **file_metadata):
+            return
+
         # Extract metadata and keep a record of it
-        metadata = self.transcoder.discover(self.process_filename)
+        metadata = transcoders.discover(self.process_filename)
+        # metadata's stream info here is a DiscovererContainerInfo instance,
+        # it gets split into DiscovererAudioInfo and DiscovererVideoInfo;
+        # metadata itself has container-related data in tags, like video-codec
         store_metadata(self.entry, metadata)
 
         # Figure out whether or not we need to transcode this video or
@@ -197,10 +237,8 @@ class CommonVideoProcessor(MediaProcessor):
                                       vorbis_quality=vorbis_quality,
                                       progress_callback=progress_callback,
                                       dimensions=tuple(medium_size))
-
-            dst_dimensions = self.transcoder.dst_data.videowidth,\
-                self.transcoder.dst_data.videoheight
-
+            video_info = self.transcoder.dst_data.get_video_streams()[0]
+            dst_dimensions = (video_info.get_width(), video_info.get_height())
             self._keep_best()
 
             # Push transcoded video to public storage
@@ -209,6 +247,8 @@ class CommonVideoProcessor(MediaProcessor):
                          self.name_builder.fill('{basename}.medium.webm'))
             _log.debug('Saved medium')
 
+            self.entry.set_file_metadata('webm_video', **file_metadata)
+
             self.did_transcode = True
 
         # Save the width and height of the transcoded video
@@ -225,17 +265,26 @@ class CommonVideoProcessor(MediaProcessor):
         if not thumb_size:
             thumb_size = (mgg.global_config['media:thumb']['max_width'],)
 
+        if self._skip_processing('thumb', thumb_size=thumb_size):
+            return
+
         # We will only use the width so that the correct scale is kept
-        transcoders.VideoThumbnailerMarkII(
+        transcoders.capture_thumb(
             self.process_filename,
             tmp_thumb,
             thumb_size[0])
 
+        # Checking if the thumbnail was correctly created.  If it was not,
+        # then just give up.
+        if not os.path.exists (tmp_thumb):
+            return
+
         # Push the thumbnail to public storage
         _log.debug('Saving thumbnail...')
         store_public(self.entry, 'thumb', tmp_thumb,
                      self.name_builder.fill('{basename}.thumbnail.jpg'))
 
+        self.entry.set_file_metadata('thumb', thumb_size=thumb_size)
 
 class InitialProcessor(CommonVideoProcessor):
     """
@@ -406,7 +455,7 @@ class Transcoder(CommonVideoProcessor):
 
 class VideoProcessingManager(ProcessingManager):
     def __init__(self):
-        super(self.__class__, self).__init__()
+        super(VideoProcessingManager, self).__init__()
         self.add_processor(InitialProcessor)
         self.add_processor(Resizer)
         self.add_processor(Transcoder)