Porting video to GStreamer 1.0
[mediagoblin.git] / mediagoblin / media_types / video / transcoders.py
index 8115bb385b063c0112a1e97b5df0b59fdbf8aaaf..d53cabc6c25c520fa88477247eb968724cab75bc 100644 (file)
@@ -1,5 +1,5 @@
 # GNU MediaGoblin -- federated, autonomous media hosting
-# Copyright (C) 2011 MediaGoblin contributors.  See AUTHORS.
+# Copyright (C) 2011, 2012 MediaGoblin contributors.  See AUTHORS.
 #
 # This program is free software: you can redistribute it and/or modify
 # it under the terms of the GNU Affero General Public License as published by
 # along with this program.  If not, see <http://www.gnu.org/licenses/>.
 
 from __future__ import division
+
+import os
 import sys
 import logging
-import pdb
-
-_log = logging.getLogger(__name__)
-logging.basicConfig()
-_log.setLevel(logging.DEBUG)
-
-try:
-    import gobject
-    gobject.threads_init()
-except:
-    _log.error('Could not import gobject')
-    raise Exception()
+import multiprocessing
+from mediagoblin.media_types.tools import discover
 
-try:
-    import pygst
-    pygst.require('0.10')
-    import gst
-    from gst import pbutils
-    from gst.extend import discoverer
-except:
-    _log.error('pygst could not be imported')
-    raise Exception()
-
-
-class VideoThumbnailer:
-    '''
-    Creates a video thumbnail
-
-     - Sets up discoverer & transcoding pipeline.
-       Discoverer finds out information about the media file
-     - Launches gobject.MainLoop, this triggers the discoverer to start running
-     - Once the discoverer is done, it calls the __discovered callback function
-     - The __discovered callback function launches the transcoding process
-     - The _on_message callback is called from the transcoding process until it gets a 
-       message of type gst.MESSAGE_EOS, then it calls __stop which shuts down the
-       gobject.MainLoop
-    '''
-    def __init__(self, src, dst, **kwargs):
-        _log.info('Initializing VideoThumbnailer...')
-
-        self.loop = gobject.MainLoop()
-        self.source_path = src
-        self.destination_path = dst
-
-        self.destination_dimensions = kwargs.get('dimensions') or (180, 180)
-
-        if not type(self.destination_dimensions) == tuple:
-            raise Exception('dimensions must be tuple: (width, height)')
+#os.environ['GST_DEBUG'] = '4,python:4'
 
-        self._setup()
-        self._run()
-
-    def _setup(self):
-        self._setup_pass()
-        self._setup_discover()
-
-    def _run(self):
-        _log.info('Discovering...')
-        self.discoverer.discover()
-        _log.info('Done')
-
-        _log.debug('Initializing MainLoop()')
-        self.loop.run()
-
-    def _setup_discover(self):
-        self.discoverer = discoverer.Discoverer(self.source_path)
-
-        # Connect self.__discovered to the 'discovered' event
-        self.discoverer.connect('discovered', self.__discovered)
-
-    def __discovered(self, data, is_media):
-        '''
-        Callback for media discoverer.
-        '''
-        if not is_media:
-            self.__stop()
-            raise Exception('Could not discover {0}'.format(self.source_path))
-
-        _log.debug('__discovered, data: {0}'.format(data))
-
-        self.data = data
-
-        self._on_discovered()
-
-        # Tell the transcoding pipeline to start running
-        self.pipeline.set_state(gst.STATE_PLAYING)
-        _log.info('Transcoding...')
-
-    def _on_discovered(self):
-        self.__setup_capsfilter()
-
-    def _setup_pass(self):
-        self.pipeline = gst.Pipeline('VideoThumbnailerPipeline')
-
-        self.filesrc = gst.element_factory_make('filesrc', 'filesrc')
-        self.filesrc.set_property('location', self.source_path)
-        self.pipeline.add(self.filesrc)
-
-        self.decoder = gst.element_factory_make('decodebin2', 'decoder')
-
-        self.decoder.connect('new-decoded-pad', self._on_dynamic_pad)
-        self.pipeline.add(self.decoder)
-
-        self.ffmpegcolorspace = gst.element_factory_make('ffmpegcolorspace', 'ffmpegcolorspace')
-        self.pipeline.add(self.ffmpegcolorspace)
-
-        self.videoscale = gst.element_factory_make('videoscale', 'videoscale')
-        self.videoscale.set_property('method', 'bilinear')
-        self.pipeline.add(self.videoscale)
-
-        self.capsfilter = gst.element_factory_make('capsfilter', 'capsfilter')
-        self.pipeline.add(self.capsfilter)
-
-        self.jpegenc = gst.element_factory_make('jpegenc', 'jpegenc')
-        self.pipeline.add(self.jpegenc)
-
-        self.filesink = gst.element_factory_make('filesink', 'filesink')
-        self.filesink.set_property('location', self.destination_path)
-        self.pipeline.add(self.filesink)
-
-        # Link all the elements together
-        self.filesrc.link(self.decoder)
-        self.ffmpegcolorspace.link(self.videoscale)
-        self.videoscale.link(self.capsfilter)
-        self.capsfilter.link(self.jpegenc)
-        self.jpegenc.link(self.filesink)
-
-        self._setup_bus()
-
-    def _setup_bus(self):
-        self.bus = self.pipeline.get_bus()
-        self.bus.add_signal_watch()
-        self.bus.connect('message', self._on_message)
-
-    def __setup_capsfilter(self):
-        thumbsizes = self.calculate_resize()  # Returns tuple with (width, height)
-
-        self.capsfilter.set_property(
-            'caps',
-            gst.caps_from_string('video/x-raw-rgb, width={width}, height={height}'.format(
-                    width=thumbsizes[0],
-                    height=thumbsizes[1]
-                    )))
-
-    def calculate_resize(self):
-        x_ratio = self.destination_dimensions[0] / self.data.videowidth
-        y_ratio = self.destination_dimensions[1] / self.data.videoheight
-
-        if self.data.videoheight > self.data.videowidth:
-            # We're dealing with a portrait!
-            dimensions = (
-                int(self.data.videowidth * y_ratio),
-                180)
-        else:
-            dimensions = (
-                180,
-                int(self.data.videoheight * x_ratio))
-
-        return dimensions
-
-    def _on_message(self, bus, message):
-        _log.debug((bus, message))
+old_argv = sys.argv
+sys.argv = []
 
-        t = message.type
+import gi
+gi.require_version('Gst', '1.0')
+from gi.repository import GObject, Gst, GstPbutils
+Gst.init(None)
 
-        if t == gst.MESSAGE_EOS:
-            self.__stop()
-            _log.info('Done')
-        elif t == gst.MESSAGE_ERROR:
-            _log.error((bus, message))
-            self.__stop()
-
-    def _on_dynamic_pad(self, dbin, pad, islast):
-        '''
-        Callback called when ``decodebin2`` has a pad that we can connect to
-        '''
-        pad.link(
-            self.ffmpegcolorspace.get_pad('sink'))
-
-    def __stop(self):
-        _log.debug(self.loop)
-
-        self.pipeline.set_state(gst.STATE_NULL)
+sys.argv = old_argv
+import struct
+try:
+    from PIL import Image
+except ImportError:
+    import Image
 
-        gobject.idle_add(self.loop.quit)
+_log = logging.getLogger(__name__)
 
+CPU_COUNT = 2
 
-class VideoTranscoder:
+try:
+    CPU_COUNT = multiprocessing.cpu_count()
+except NotImplementedError:
+    _log.warning('multiprocessing.cpu_count not implemented')
+
+os.putenv('GST_DEBUG_DUMP_DOT_DIR', '/tmp')
+
+
+def capture_thumb(video_path, dest_path, width=None, height=None, percent=0.5):
+    def pad_added(element, pad, connect_to):
+        '''This is a callback to dynamically add element to pipeline'''
+        caps = pad.query_caps(None)
+        name = caps.to_string()
+        _log.debug('on_pad_added: {0}'.format(name))
+        if name.startswith('video') and not connect_to.is_linked():
+            pad.link(connect_to)
+
+    # construct pipeline: uridecodebin ! videoconvert ! videoscale ! \
+    # ! CAPS ! appsink
+    pipeline = Gst.Pipeline()
+    uridecodebin = Gst.ElementFactory.make('uridecodebin', None)
+    uridecodebin.set_property('uri', 'file://{0}'.format(video_path))
+    videoconvert = Gst.ElementFactory.make('videoconvert', None)
+    uridecodebin.connect('pad-added', pad_added,
+                         videoconvert.get_static_pad('sink'))
+    videoscale = Gst.ElementFactory.make('videoscale', None)
+
+    # create caps for video scaling
+    caps_struct = Gst.Structure.new_empty('video/x-raw')
+    caps_struct.set_value('pixel-aspect-ratio', Gst.Fraction(1, 1))
+    caps_struct.set_value('format', 'RGB')
+    if height:
+        caps_struct.set_value('height', height)
+    if width:
+        caps_struct.set_value('width', width)
+    caps = Gst.Caps.new_empty()
+    caps.append_structure(caps_struct)
+
+    # sink everything to memory
+    appsink = Gst.ElementFactory.make('appsink', None)
+    appsink.set_property('caps', caps)
+
+    # add everything to pipeline
+    elements = [uridecodebin, videoconvert, videoscale, appsink]
+    for e in elements:
+        pipeline.add(e)
+    videoconvert.link(videoscale)
+    videoscale.link(appsink)
+
+    # pipeline constructed, starting playing, but first some preparations
+    # seek to 50% of the file is required
+    pipeline.set_state(Gst.State.PAUSED)
+    # timeout of 3 seconds below was set experimentally
+    state = pipeline.get_state(Gst.SECOND * 3)
+    if state[0] != Gst.StateChangeReturn.SUCCESS:
+        _log.warning('state change failed, {0}'.format(state))
+        return
+
+    # get duration
+    (success, duration) = pipeline.query_duration(Gst.Format.TIME)
+    if not success:
+        _log.warning('query_duration failed')
+        return
+
+    seek_to = int(duration * int(percent * 100) / 100)
+    _log.debug('Seeking to {0} of {1}'.format(
+            float(seek_to) / Gst.SECOND, float(duration) / Gst.SECOND))
+    seek = pipeline.seek_simple(Gst.Format.TIME, Gst.SeekFlags.FLUSH, seek_to)
+    if not seek:
+        _log.warning('seek failed')
+        return
+
+    # get sample, retrieve it's format and save
+    sample = appsink.emit("pull-preroll")
+    if not sample:
+        _log.warning('could not get sample')
+        return
+    caps = sample.get_caps()
+    if not caps:
+        _log.warning('could not get snapshot format')
+        return
+    structure = caps.get_structure(0)
+    (success, width) = structure.get_int('width')
+    (success, height) = structure.get_int('height')
+    buffer = sample.get_buffer()
+
+    # get the image from the buffer and save it to disk
+    im = Image.frombytes('RGB', (width, height),
+                         buffer.extract_dup(0, buffer.get_size()))
+    im.save(dest_path)
+    _log.info('thumbnail saved to {0}'.format(dest_path))
+
+    # cleanup
+    pipeline.set_state(Gst.State.NULL)
+
+
+class VideoTranscoder(object):
     '''
     Video transcoder
 
     Transcodes the SRC video file to a VP8 WebM video file at DST
 
-    TODO:
-    - Audio pipeline
+     - Produces a WebM vp8 and vorbis video file.
     '''
-    def __init__(self, src, dst, **kwargs):
+    def __init__(self):
         _log.info('Initializing VideoTranscoder...')
+        self.progress_percentage = None
+        self.loop = GObject.MainLoop()
 
-        self.loop = gobject.MainLoop()
+    def transcode(self, src, dst, **kwargs):
+        '''
+        Transcode a video file into a 'medium'-sized version.
+        '''
         self.source_path = src
         self.destination_path = dst
 
-        self.destination_dimensions = kwargs.get('dimensions') or (640, 640)
+        # vp8enc options
+        self.destination_dimensions = kwargs.get('dimensions', (640, 640))
+        self.vp8_quality = kwargs.get('vp8_quality', 8)
+        # Number of threads used by vp8enc:
+        # number of real cores - 1 as per recommendation on
+        # <http://www.webmproject.org/tools/encoder-parameters/#6-multi-threaded-encode-and-decode>
+        self.vp8_threads = kwargs.get('vp8_threads', CPU_COUNT - 1)
 
-        if not type(self.destination_dimensions) == tuple:
-            raise Exception('dimensions must be tuple: (width, height)')
+        # 0 means auto-detect, but dict.get() only falls back to CPU_COUNT
+        # if value is None, this will correct our incompatibility with
+        # dict.get()
+        # This will also correct cases where there's only 1 CPU core, see
+        # original self.vp8_threads assignment above.
+        if self.vp8_threads == 0:
+            self.vp8_threads = CPU_COUNT
 
-        self._setup()
-        self._run()
+        # vorbisenc options
+        self.vorbis_quality = kwargs.get('vorbis_quality', 0.3)
 
-    def _setup(self):
-        self._setup_pass()
-        self._setup_discover()
+        self._progress_callback = kwargs.get('progress_callback') or None
 
-    def _run(self):
-        _log.info('Discovering...')
-        self.discoverer.discover()
-        _log.info('Done')
+        if not type(self.destination_dimensions) == tuple:
+            raise Exception('dimensions must be tuple: (width, height)')
 
+        self._setup_pipeline()
+        self.data = discover(self.source_path)
+        self._link_elements()
+        self.__setup_videoscale_capsfilter()
+        self.pipeline.set_state(Gst.State.PLAYING)
+        _log.info('Transcoding...')
         _log.debug('Initializing MainLoop()')
         self.loop.run()
 
-    def _setup_discover(self):
-        self.discoverer = discoverer.Discoverer(self.source_path)
-
-        # Connect self.__discovered to the 'discovered' event
-        self.discoverer.connect('discovered', self.__discovered)
-
-    def __discovered(self, data, is_media):
-        '''
-        Callback for media discoverer.
-        '''
-        if not is_media:
-            self.__stop()
-            raise Exception('Could not discover {0}'.format(self.source_path))
-
-        _log.debug('__discovered, data: {0}'.format(data))
-
-        self.data = data
-
-        self._on_discovered()
-
-        # Tell the transcoding pipeline to start running
-        self.pipeline.set_state(gst.STATE_PLAYING)
-        _log.info('Transcoding...')
-
-    def _on_discovered(self):
-        self.__setup_videoscale_capsfilter()
 
-    def _setup_pass(self):
-        self.pipeline = gst.Pipeline('VideoTranscoderPipeline')
+    def _setup_pipeline(self):
+        _log.debug('Setting up transcoding pipeline')
+        # Create the pipeline bin.
+        self.pipeline = Gst.Pipeline.new('VideoTranscoderPipeline')
 
-        self.filesrc = gst.element_factory_make('filesrc', 'filesrc')
+        # Create all GStreamer elements, starting with
+        # filesrc & decoder
+        self.filesrc = Gst.ElementFactory.make('filesrc', 'filesrc')
         self.filesrc.set_property('location', self.source_path)
         self.pipeline.add(self.filesrc)
 
-        self.decoder = gst.element_factory_make('decodebin2', 'decoder')
-
-        self.decoder.connect('new-decoded-pad', self._on_dynamic_pad)
+        self.decoder = Gst.ElementFactory.make('decodebin', 'decoder')
+        self.decoder.connect('pad-added', self._on_dynamic_pad)
         self.pipeline.add(self.decoder)
 
-        self.ffmpegcolorspace = gst.element_factory_make('ffmpegcolorspace', 'ffmpegcolorspace')
-        self.pipeline.add(self.ffmpegcolorspace)
+        # Video elements
+        self.videoqueue = Gst.ElementFactory.make('queue', 'videoqueue')
+        self.pipeline.add(self.videoqueue)
+
+        self.videorate = Gst.ElementFactory.make('videorate', 'videorate')
+        self.pipeline.add(self.videorate)
+
+        self.videoconvert = Gst.ElementFactory.make('videoconvert',
+                                                    'videoconvert')
+        self.pipeline.add(self.videoconvert)
 
-        self.videoscale = gst.element_factory_make('videoscale', 'videoscale')
-        self.videoscale.set_property('method', 2)  # I'm not sure this works
-        self.videoscale.set_property('add-borders', 0)
+        self.videoscale = Gst.ElementFactory.make('videoscale', 'videoscale')
         self.pipeline.add(self.videoscale)
 
-        self.capsfilter = gst.element_factory_make('capsfilter', 'capsfilter')
+        self.capsfilter = Gst.ElementFactory.make('capsfilter', 'capsfilter')
         self.pipeline.add(self.capsfilter)
 
-        self.vp8enc = gst.element_factory_make('vp8enc', 'vp8enc')
-        self.vp8enc.set_property('quality', 6)
-        self.vp8enc.set_property('threads', 2)
-        self.vp8enc.set_property('speed', 2)
+        self.vp8enc = Gst.ElementFactory.make('vp8enc', 'vp8enc')
+        self.vp8enc.set_property('threads', self.vp8_threads)
         self.pipeline.add(self.vp8enc)
 
+        # Audio elements
+        self.audioqueue = Gst.ElementFactory.make('queue', 'audioqueue')
+        self.pipeline.add(self.audioqueue)
 
-        # Audio
-        self.audioconvert = gst.element_factory_make('audioconvert', 'audioconvert')
+        self.audiorate = Gst.ElementFactory.make('audiorate', 'audiorate')
+        self.audiorate.set_property('tolerance', 80000000)
+        self.pipeline.add(self.audiorate)
+
+        self.audioconvert = Gst.ElementFactory.make('audioconvert', 'audioconvert')
         self.pipeline.add(self.audioconvert)
 
-        self.vorbisenc = gst.element_factory_make('vorbisenc', 'vorbisenc')
-        self.vorbisenc.set_property('quality', 0.7)
-        self.pipeline.add(self.vorbisenc)
+        self.audiocapsfilter = Gst.ElementFactory.make('capsfilter',
+                                                       'audiocapsfilter')
+        audiocaps = Gst.Caps.new_empty()
+        audiocaps_struct = Gst.Structure.new_empty('audio/x-raw')
+        audiocaps.append_structure(audiocaps_struct)
+        self.audiocapsfilter.set_property('caps', audiocaps)
+        self.pipeline.add(self.audiocapsfilter)
 
+        self.vorbisenc = Gst.ElementFactory.make('vorbisenc', 'vorbisenc')
+        self.vorbisenc.set_property('quality', self.vorbis_quality)
+        self.pipeline.add(self.vorbisenc)
 
-        self.webmmux = gst.element_factory_make('webmmux', 'webmmux')
+        # WebMmux & filesink
+        self.webmmux = Gst.ElementFactory.make('webmmux', 'webmmux')
         self.pipeline.add(self.webmmux)
 
-        self.filesink = gst.element_factory_make('filesink', 'filesink')
+        self.filesink = Gst.ElementFactory.make('filesink', 'filesink')
         self.filesink.set_property('location', self.destination_path)
         self.pipeline.add(self.filesink)
 
+        # Progressreport
+        self.progressreport = Gst.ElementFactory.make(
+            'progressreport', 'progressreport')
+        # Update every second
+        self.progressreport.set_property('update-freq', 1)
+        self.progressreport.set_property('silent', True)
+        self.pipeline.add(self.progressreport)
+
+    def _link_elements(self):
+        '''
+        Link all the elements
+
+        This code depends on data from the discoverer and is called
+        from __discovered
+        '''
+        _log.debug('linking elements')
+        # Link the filesrc element to the decoder. The decoder then emits
+        # 'new-decoded-pad' which links decoded src pads to either a video
+        # or audio sink
         self.filesrc.link(self.decoder)
-        self.ffmpegcolorspace.link(self.videoscale)
+        # link the rest
+        self.videoqueue.link(self.videorate)
+        self.videorate.link(self.videoconvert)
+        self.videoconvert.link(self.videoscale)
         self.videoscale.link(self.capsfilter)
         self.capsfilter.link(self.vp8enc)
         self.vp8enc.link(self.webmmux)
 
-        # Audio
-        self.audioconvert.link(self.vorbisenc)
-        self.vorbisenc.link(self.webmmux)
-
-        self.webmmux.link(self.filesink)
-
+        if self.data.is_audio:
+            # Link all the audio elements in a row to webmmux
+            self.audioqueue.link(self.audiorate)
+            self.audiorate.link(self.audioconvert)
+            self.audioconvert.link(self.audiocapsfilter)
+            self.audiocapsfilter.link(self.vorbisenc)
+            self.vorbisenc.link(self.webmmux)
+        self.webmmux.link(self.progressreport)
+        self.progressreport.link(self.filesink)
+
+        # Setup the message bus and connect _on_message to the pipeline
         self._setup_bus()
 
-    def _on_dynamic_pad(self, dbin, pad, islast):
+    def _on_dynamic_pad(self, dbin, pad):
         '''
-        Callback called when ``decodebin2`` has a pad that we can connect to
+        Callback called when ``decodebin`` has a pad that we can connect to
         '''
-        _log.debug('Linked {0}'.format(pad))
-
-        #pdb.set_trace()
-
-        if self.ffmpegcolorspace.get_pad_template('sink')\
-                .get_caps().intersect(pad.get_caps()).is_empty():
-            pad.link(
-                self.audioconvert.get_pad('sink'))
+        # Intersect the capabilities of the video sink and the pad src
+        # Then check if they have no common capabilities.
+        if (self.videorate.get_static_pad('sink').get_pad_template()
+                .get_caps().intersect(pad.query_caps()).is_empty()):
+            # It is NOT a video src pad.
+            pad.link(self.audioqueue.get_static_pad('sink'))
         else:
-            pad.link(
-                self.ffmpegcolorspace.get_pad('sink'))
+            # It IS a video src pad.
+            _log.debug('linking video to the pad dynamically')
+            pad.link(self.videoqueue.get_static_pad('sink'))
 
     def _setup_bus(self):
         self.bus = self.pipeline.get_bus()
@@ -349,71 +322,74 @@ class VideoTranscoder:
         self.bus.connect('message', self._on_message)
 
     def __setup_videoscale_capsfilter(self):
-        caps = ['video/x-raw-yuv', 'pixel-aspect-ratio=1/1']
-
-        if self.data.videoheight > self.data.videowidth:
-            # Whoa! We have ourselves a portrait video!
-            caps.append('height={0}'.format(
-                    self.destination_dimensions[1]))
+        '''
+        Sets up the output format (width, height) for the video
+        '''
+        caps_struct = Gst.Structure.new_empty('video/x-raw')
+        caps_struct.set_value('pixel-aspect-ratio', Gst.Fraction(1, 1))
+        caps_struct.set_value('framerate', Gst.Fraction(30, 1))
+        video_info = self.data.get_video_streams()[0]
+        if video_info.get_height() > video_info.get_width():
+            # portrait
+            caps_struct.set_value('height', self.destination_dimensions[1])
         else:
-            # It's a landscape, phew, how normal.
-            caps.append('width={0}'.format(
-                    self.destination_dimensions[0]))
-
-        self.capsfilter.set_property(
-            'caps',
-            gst.caps_from_string(
-                ', '.join(caps)))
-        gst.DEBUG_BIN_TO_DOT_FILE (
-            self.pipeline,
-            gst.DEBUG_GRAPH_SHOW_ALL,
-            'supersimple-debug-graph')
+            # landscape
+            caps_struct.set_value('width', self.destination_dimensions[0])
+        caps = Gst.Caps.new_empty()
+        caps.append_structure(caps_struct)
+        self.capsfilter.set_property('caps', caps)
 
     def _on_message(self, bus, message):
-        _log.debug((bus, message))
-
-        t = message.type
-
-        if t == gst.MESSAGE_EOS:
-            self._discover_dst_and_stop()
+        _log.debug((bus, message, message.type))
+        if message.type == Gst.MessageType.EOS:
+            self.dst_data = discover(self.destination_path)
+            self.__stop()
             _log.info('Done')
-        elif t == gst.MESSAGE_ERROR:
-            _log.error((bus, message))
+        elif message.type == Gst.MessageType.ELEMENT:
+            if message.has_name('progress'):
+                structure = message.get_structure()
+                # Update progress state if it has changed
+                (success, percent) = structure.get_int('percent')
+                if self.progress_percentage != percent and success:
+                    self.progress_percentage = percent
+                    if self._progress_callback:
+                        self._progress_callback(percent)
+                    _log.info('{percent}% done...'.format(percent=percent))
+        elif message.type == Gst.MessageType.ERROR:
+            _log.error('Got error: {0}'.format(message.parse_error()))
             self.__stop()
 
-    def _discover_dst_and_stop(self):
-        self.dst_discoverer = discoverer.Discoverer(self.destination_path)
-
-        self.dst_discoverer.connect('discovered', self.__dst_discovered)
-
-        self.dst_discoverer.discover()
-
+    def __stop(self):
+        _log.debug(self.loop)
 
-    def __dst_discovered(self, data, is_media):
-        self.dst_data = data
+        if hasattr(self, 'pipeline'):
+            # Stop executing the pipeline
+            self.pipeline.set_state(Gst.State.NULL)
 
-        self.__stop()
+        # This kills the loop, mercifully
+        GObject.idle_add(self.__stop_mainloop)
 
-    def __stop(self):
-        _log.debug(self.loop)
+    def __stop_mainloop(self):
+        '''
+        Wrapper for GObject.MainLoop.quit()
 
-        self.pipeline.set_state(gst.STATE_NULL)
+        This wrapper makes us able to see if self.loop.quit has been called
+        '''
+        _log.info('Terminating MainLoop')
 
-        gobject.idle_add(self.loop.quit)
+        self.loop.quit()
 
 
 if __name__ == '__main__':
-    import os
-    os.environ["GST_DEBUG_DUMP_DOT_DIR"] = "/tmp"
-    os.putenv('GST_DEBUG_DUMP_DOT_DIR', '/tmp')
+    os.nice(19)
     from optparse import OptionParser
 
     parser = OptionParser(
-        usage='%prog [-v] -a [ video | thumbnail ] SRC DEST')
+        usage='%prog [-v] -a [ video | thumbnail | discover ] SRC [ DEST ]')
 
     parser.add_option('-a', '--action',
                       dest='action',
-                      help='One of "video" or "thumbnail"')
+                      help='One of "video", "discover" or "thumbnail"')
 
     parser.add_option('-v',
                       dest='verbose',
@@ -425,6 +401,10 @@ if __name__ == '__main__':
                       action='store_true',
                       help='Dear program, please be quiet unless *error*')
 
+    parser.add_option('-w', '--width',
+                      type=int,
+                      default=180)
+
     (options, args) = parser.parse_args()
 
     if options.verbose:
@@ -437,12 +417,18 @@ if __name__ == '__main__':
 
     _log.debug(args)
 
-    if not len(args) == 2:
+    if not len(args) == 2 and not options.action == 'discover':
         parser.print_help()
         sys.exit()
 
+    transcoder = VideoTranscoder()
+
     if options.action == 'thumbnail':
-        VideoThumbnailer(*args)
+        args.append(options.width)
+        VideoThumbnailerMarkII(*args)
     elif options.action == 'video':
-        transcoder = VideoTranscoder(*args)
-        pdb.set_trace()
+        def cb(data):
+            print('I\'m a callback!')
+        transcoder.transcode(*args, progress_callback=cb)
+    elif options.action == 'discover':
+        print transcoder.discover(*args)