Video transcoding is now gstreamer directly instead of through arista
authorJoar Wandborg <git@wandborg.com>
Fri, 14 Oct 2011 01:17:06 +0000 (03:17 +0200)
committerJoar Wandborg <git@wandborg.com>
Fri, 14 Oct 2011 01:17:06 +0000 (03:17 +0200)
mediagoblin/media_types/video/processing.py
mediagoblin/media_types/video/transcoders.py
mediagoblin/templates/mediagoblin/media_displays/video.html

index 52047ae442e9359b4fd861af4a05b4b45bd5c2ed..09f8a0d91888c1b782e910937a1e1ce07a829c77 100644 (file)
 # You should have received a copy of the GNU Affero General Public License
 # along with this program.  If not, see <http://www.gnu.org/licenses/>.
 
-import Image
 import tempfile
 import pkg_resources
 import os
+import logging
 
 from celery.task import Task
 from celery import registry
@@ -29,21 +29,9 @@ from mediagoblin.process_media.errors import BaseProcessingFail, BadMediaFail
 from mediagoblin.process_media import mark_entry_failed
 from . import transcoders
 
-import gobject
-gobject.threads_init()
-
-import gst
-import arista
-import logging
-
-from arista.transcoder import TranscoderOptions
-
 THUMB_SIZE = 180, 180
 MEDIUM_SIZE = 640, 640
 
-ARISTA_DEVICE = 'devices/web-advanced.json'
-ARISTA_PRESET = None
-
 loop = None  # Is this even used?
 
 logger = logging.getLogger(__name__)
@@ -63,11 +51,6 @@ def process_video(entry):
     and attaches callbacks to that child process, hopefully, the
     entry-complete callback will be called when the video is done.
     """
-
-    ''' Empty dict, will store data which will be passed to the callback
-    functions '''
-    info = {}
-
     workbench = mgg.workbench_manager.create_workbench()
 
     queued_filepath = entry['queued_media_file']
@@ -75,57 +58,65 @@ def process_video(entry):
         mgg.queue_store, queued_filepath,
         'source')
 
-    ''' Initialize arista '''
-    arista.init()
+    medium_filepath = create_pub_filepath(
+        entry, '640p.webm')
 
-    ''' Loads a preset file which specifies the format of the output video'''
-    device = arista.presets.load(
-        pkg_resources.resource_filename(
-            __name__,
-            ARISTA_DEVICE))
+    thumbnail_filepath = create_pub_filepath(
+        entry, 'thumbnail.jpg')
 
-    # FIXME: Is this needed since we only transcode one video?
-    queue = arista.queue.TranscodeQueue()
 
-    info['tmp_file'] = tempfile.NamedTemporaryFile(delete=False)
+    # Create a temporary file for the video destination
+    tmp_dst = tempfile.NamedTemporaryFile()
 
-    info['medium_filepath'] = create_pub_filepath(
-        entry, 'video.webm')
+    with tmp_dst:
+        # Transcode queued file to a VP8/vorbis file that fits in a 640x640 square
+        transcoder = transcoders.VideoTranscoder(queued_filename, tmp_dst.name)
 
-    info['thumb_filepath'] = create_pub_filepath(
-        entry, 'thumbnail.jpg')
+        # Push transcoded video to public storage
+        mgg.public_store.get_file(medium_filepath, 'wb').write(
+            tmp_dst.read())
+
+        entry['media_files']['webm_640'] = medium_filepath
+
+        # Save the width and height of the transcoded video
+        entry['media_data']['video'] = {
+            u'width': transcoder.dst_data.videowidth,
+            u'height': transcoder.dst_data.videoheight}
 
-    # With the web-advanced.json device preset, this will select
-    # 480p WebM w/ OGG Vorbis
-    preset = device.presets[ARISTA_PRESET or device.default]
+    # Create a temporary file for the video thumbnail
+    tmp_thumb = tempfile.NamedTemporaryFile()
 
-    logger.debug('preset: {0}'.format(preset))
+    with tmp_thumb:
+        # Create a thumbnail.jpg that fits in a 180x180 square
+        transcoders.VideoThumbnailer(queued_filename, tmp_thumb.name)
 
-    opts = TranscoderOptions(
-        'file://' + queued_filename,  # Arista did it this way, IIRC
-        preset,
-        info['tmp_file'].name)
+        # Push the thumbnail to public storage
+        mgg.public_store.get_file(thumbnail_filepath, 'wb').write(
+            tmp_thumb.read())
 
-    queue.append(opts)
+        entry['media_files']['thumb'] = thumbnail_filepath
 
-    info['entry'] = entry
 
-    queue.connect("entry-start", _transcoding_start, info)
-    queue.connect("entry-pass-setup", _transcoding_pass_setup, info)
-    queue.connect("entry-error", _transcoding_error, info)
-    queue.connect("entry-complete", _transcoding_complete, info)
+    # Push original file to public storage
+    queued_file = file(queued_filename, 'rb')
 
-    # Add data to the info dict, making it available to the callbacks
-    info['loop'] = gobject.MainLoop()
-    info['queued_filename'] = queued_filename
-    info['queued_filepath'] = queued_filepath
-    info['workbench'] = workbench
-    info['preset'] = preset
+    with queued_file:
+        original_filepath = create_pub_filepath(
+            entry,
+            queued_filepath[-1])
 
-    info['loop'].run()
+        with mgg.public_store.get_file(original_filepath, 'wb') as \
+                original_file:
+            original_file.write(queued_file.read())
 
-    logger.debug('info: {0}'.format(info))
+            entry['media_files']['original'] = original_filepath
 
+    mgg.queue_store.delete_file(queued_filepath)
+
+
+    # Save the MediaEntry
+    entry.save()
+    
 
 def __create_thumbnail(info):
     thumbnail = tempfile.NamedTemporaryFile()
@@ -139,6 +130,7 @@ def __create_thumbnail(info):
     mgg.public_store.get_file(info['thumb_filepath'], 'wb').write(
         thumbnail.read())
 
+
     info['entry']['media_files']['thumb'] = info['thumb_filepath']
     info['entry'].save()
 
@@ -267,6 +259,9 @@ class ProcessMedia(Task):
             mark_entry_failed(entry[u'_id'], exc)
             return
 
+        entry['state'] = u'processed'
+        entry.save()
+
     def on_failure(self, exc, task_id, args, kwargs, einfo):
         """
         If the processing failed we should mark that in the database.
index d305d5fce84463608c93addeaffa8adbccb8ac47..8115bb385b063c0112a1e97b5df0b59fdbf8aaaf 100644 (file)
@@ -17,7 +17,7 @@
 from __future__ import division
 import sys
 import logging
-
+import pdb
 
 _log = logging.getLogger(__name__)
 logging.basicConfig()
@@ -28,14 +28,17 @@ try:
     gobject.threads_init()
 except:
     _log.error('Could not import gobject')
+    raise Exception()
 
 try:
     import pygst
     pygst.require('0.10')
     import gst
+    from gst import pbutils
     from gst.extend import discoverer
 except:
     _log.error('pygst could not be imported')
+    raise Exception()
 
 
 class VideoThumbnailer:
@@ -201,12 +204,14 @@ class VideoThumbnailer:
         gobject.idle_add(self.loop.quit)
 
 
-class VideoTranscoder():
+class VideoTranscoder:
     '''
     Video transcoder
 
+    Transcodes the SRC video file to a VP8 WebM video file at DST
+
     TODO:
-     - Currently not working
+    - Audio pipeline
     '''
     def __init__(self, src, dst, **kwargs):
         _log.info('Initializing VideoTranscoder...')
@@ -215,7 +220,7 @@ class VideoTranscoder():
         self.source_path = src
         self.destination_path = dst
 
-        self.destination_dimensions = kwargs.get('dimensions') or (180, 180)
+        self.destination_dimensions = kwargs.get('dimensions') or (640, 640)
 
         if not type(self.destination_dimensions) == tuple:
             raise Exception('dimensions must be tuple: (width, height)')
@@ -253,12 +258,14 @@ class VideoTranscoder():
 
         self.data = data
 
+        self._on_discovered()
+
         # Tell the transcoding pipeline to start running
         self.pipeline.set_state(gst.STATE_PLAYING)
         _log.info('Transcoding...')
 
     def _on_discovered(self):
-        self.__setup_capsfilter()
+        self.__setup_videoscale_capsfilter()
 
     def _setup_pass(self):
         self.pipeline = gst.Pipeline('VideoTranscoderPipeline')
@@ -276,7 +283,8 @@ class VideoTranscoder():
         self.pipeline.add(self.ffmpegcolorspace)
 
         self.videoscale = gst.element_factory_make('videoscale', 'videoscale')
-        self.videoscale.set_property('method', 'bilinear')
+        self.videoscale.set_property('method', 2)  # I'm not sure this works
+        self.videoscale.set_property('add-borders', 0)
         self.pipeline.add(self.videoscale)
 
         self.capsfilter = gst.element_factory_make('capsfilter', 'capsfilter')
@@ -286,16 +294,36 @@ class VideoTranscoder():
         self.vp8enc.set_property('quality', 6)
         self.vp8enc.set_property('threads', 2)
         self.vp8enc.set_property('speed', 2)
+        self.pipeline.add(self.vp8enc)
+
+
+        # Audio
+        self.audioconvert = gst.element_factory_make('audioconvert', 'audioconvert')
+        self.pipeline.add(self.audioconvert)
+
+        self.vorbisenc = gst.element_factory_make('vorbisenc', 'vorbisenc')
+        self.vorbisenc.set_property('quality', 0.7)
+        self.pipeline.add(self.vorbisenc)
+
 
         self.webmmux = gst.element_factory_make('webmmux', 'webmmux')
         self.pipeline.add(self.webmmux)
 
         self.filesink = gst.element_factory_make('filesink', 'filesink')
+        self.filesink.set_property('location', self.destination_path)
+        self.pipeline.add(self.filesink)
 
         self.filesrc.link(self.decoder)
         self.ffmpegcolorspace.link(self.videoscale)
         self.videoscale.link(self.capsfilter)
-        self.vp8enc.link(self.filesink)
+        self.capsfilter.link(self.vp8enc)
+        self.vp8enc.link(self.webmmux)
+
+        # Audio
+        self.audioconvert.link(self.vorbisenc)
+        self.vorbisenc.link(self.webmmux)
+
+        self.webmmux.link(self.filesink)
 
         self._setup_bus()
 
@@ -303,39 +331,43 @@ class VideoTranscoder():
         '''
         Callback called when ``decodebin2`` has a pad that we can connect to
         '''
-        pad.link(
-            self.ffmpegcolorspace.get_pad('sink'))
+        _log.debug('Linked {0}'.format(pad))
+
+        #pdb.set_trace()
+
+        if self.ffmpegcolorspace.get_pad_template('sink')\
+                .get_caps().intersect(pad.get_caps()).is_empty():
+            pad.link(
+                self.audioconvert.get_pad('sink'))
+        else:
+            pad.link(
+                self.ffmpegcolorspace.get_pad('sink'))
 
     def _setup_bus(self):
         self.bus = self.pipeline.get_bus()
         self.bus.add_signal_watch()
         self.bus.connect('message', self._on_message)
 
-    def __setup_capsfilter(self):
-        thumbsizes = self.calculate_resize()  # Returns tuple with (width, height)
-
-        self.capsfilter.set_property(
-            'caps',
-            gst.caps_from_string('video/x-raw-rgb, width={width}, height={height}'.format(
-                    width=thumbsizes[0],
-                    height=thumbsizes[1]
-                    )))
-
-    def calculate_resize(self):
-        x_ratio = self.destination_dimensions[0] / self.data.videowidth
-        y_ratio = self.destination_dimensions[1] / self.data.videoheight
+    def __setup_videoscale_capsfilter(self):
+        caps = ['video/x-raw-yuv', 'pixel-aspect-ratio=1/1']
 
         if self.data.videoheight > self.data.videowidth:
-            # We're dealing with a portrait!
-            dimensions = (
-                int(self.data.videowidth * y_ratio),
-                180)
+            # Whoa! We have ourselves a portrait video!
+            caps.append('height={0}'.format(
+                    self.destination_dimensions[1]))
         else:
-            dimensions = (
-                180,
-                int(self.data.videoheight * x_ratio))
+            # It's a landscape, phew, how normal.
+            caps.append('width={0}'.format(
+                    self.destination_dimensions[0]))
 
-        return dimensions
+        self.capsfilter.set_property(
+            'caps',
+            gst.caps_from_string(
+                ', '.join(caps)))
+        gst.DEBUG_BIN_TO_DOT_FILE (
+            self.pipeline,
+            gst.DEBUG_GRAPH_SHOW_ALL,
+            'supersimple-debug-graph')
 
     def _on_message(self, bus, message):
         _log.debug((bus, message))
@@ -343,12 +375,25 @@ class VideoTranscoder():
         t = message.type
 
         if t == gst.MESSAGE_EOS:
-            self.__stop()
+            self._discover_dst_and_stop()
             _log.info('Done')
         elif t == gst.MESSAGE_ERROR:
             _log.error((bus, message))
             self.__stop()
 
+    def _discover_dst_and_stop(self):
+        self.dst_discoverer = discoverer.Discoverer(self.destination_path)
+
+        self.dst_discoverer.connect('discovered', self.__dst_discovered)
+
+        self.dst_discoverer.discover()
+
+
+    def __dst_discovered(self, data, is_media):
+        self.dst_data = data
+
+        self.__stop()
+
     def __stop(self):
         _log.debug(self.loop)
 
@@ -358,6 +403,9 @@ class VideoTranscoder():
 
 
 if __name__ == '__main__':
+    import os
+    os.environ["GST_DEBUG_DUMP_DOT_DIR"] = "/tmp"
+    os.putenv('GST_DEBUG_DUMP_DOT_DIR', '/tmp')
     from optparse import OptionParser
 
     parser = OptionParser(
@@ -396,4 +444,5 @@ if __name__ == '__main__':
     if options.action == 'thumbnail':
         VideoThumbnailer(*args)
     elif options.action == 'video':
-        VideoTranscoder(*args)
+        transcoder = VideoTranscoder(*args)
+        pdb.set_trace()
index 22b19240594596aaf14275bbbf0e8a4d951c6628..bff9889a17094f3f410980b44d00da4570a6f34c 100644 (file)
@@ -1,16 +1,19 @@
 {% extends 'mediagoblin/user_pages/media.html' %}
 {% block mediagoblin_media %}
-  <video width="640" height="" controls>
+  <video width="{{ media.media_data.video.width }}"
+        height="{{ media.media_data.video.height }}" controls="controls">
     <source src="{{ request.app.public_store.file_url(
-                media['media_files']['medium']) }}" 
+                media['media_files']['webm_640']) }}" 
            type='video/webm; codecs="vp8, vorbis"' />
   </video>
   {% if 'original' in media.media_files %}
+  <p>
     <a href="{{ request.app.public_store.file_url(
             media['media_files']['original']) }}">
       {%- trans -%}
         Original
       {%- endtrans -%}
     </a>
+  </p>
   {% endif %}    
 {% endblock %}