Merge remote branch 'remotes/manolinux/683_text_separate_tags_by_commas_and_spaces'
[mediagoblin.git] / mediagoblin / media_types / video / transcoders.py
index 8115bb385b063c0112a1e97b5df0b59fdbf8aaaf..d7ed14cad25633971d867e2d6a3a318234a9087f 100644 (file)
 # along with this program.  If not, see <http://www.gnu.org/licenses/>.
 
 from __future__ import division
+
+import os
+os.putenv('GST_DEBUG_DUMP_DOT_DIR', '/tmp')
+
 import sys
 import logging
 import pdb
+import urllib
 
 _log = logging.getLogger(__name__)
 logging.basicConfig()
 _log.setLevel(logging.DEBUG)
 
+CPU_COUNT = 2
+try:
+    import multiprocessing
+    try:
+        CPU_COUNT = multiprocessing.cpu_count()
+    except NotImplementedError:
+        _log.warning('multiprocessing.cpu_count not implemented')
+        pass
+except ImportError:
+    _log.warning('Could not import multiprocessing, defaulting to 2 CPU cores')
+    pass
+
+try:
+    import gtk
+except:
+    raise Exception('Could not find pygtk')
+
 try:
     import gobject
     gobject.threads_init()
 except:
-    _log.error('Could not import gobject')
-    raise Exception()
+    raise Exception('gobject could not be found')
 
 try:
     import pygst
     pygst.require('0.10')
     import gst
-    from gst import pbutils
     from gst.extend import discoverer
 except:
-    _log.error('pygst could not be imported')
-    raise Exception()
+    raise Exception('gst/pygst 0.10 could not be found')
 
 
 class VideoThumbnailer:
-    '''
-    Creates a video thumbnail
-
-     - Sets up discoverer & transcoding pipeline.
-       Discoverer finds out information about the media file
-     - Launches gobject.MainLoop, this triggers the discoverer to start running
-     - Once the discoverer is done, it calls the __discovered callback function
-     - The __discovered callback function launches the transcoding process
-     - The _on_message callback is called from the transcoding process until it gets a 
-       message of type gst.MESSAGE_EOS, then it calls __stop which shuts down the
-       gobject.MainLoop
-    '''
-    def __init__(self, src, dst, **kwargs):
-        _log.info('Initializing VideoThumbnailer...')
+    # Declaration of thumbnailer states
+    STATE_NULL = 0
+    STATE_HALTING = 1
+    STATE_PROCESSING = 2
+
+    # The current thumbnailer state
+    state = STATE_NULL
+
+    # This will contain the thumbnailing pipeline
+    thumbnail_pipeline = None
+
+    buffer_probes = {}
+
+    errors = []
+
+    def __init__(self, source_path, dest_path):
+        '''
+        Set up playbin pipeline in order to get video properties.
+
+        Initializes and runs the gobject.MainLoop()
+        '''
+        self.source_path = source_path
+        self.dest_path = dest_path
 
         self.loop = gobject.MainLoop()
-        self.source_path = src
-        self.destination_path = dst
 
-        self.destination_dimensions = kwargs.get('dimensions') or (180, 180)
+        # Set up the playbin. It will be used to discover certain
+        # properties of the input file
+        self.playbin = gst.element_factory_make('playbin')
 
-        if not type(self.destination_dimensions) == tuple:
-            raise Exception('dimensions must be tuple: (width, height)')
+        self.videosink = gst.element_factory_make('fakesink', 'videosink')
+        self.playbin.set_property('video-sink', self.videosink)
 
-        self._setup()
-        self._run()
+        self.audiosink = gst.element_factory_make('fakesink', 'audiosink')
+        self.playbin.set_property('audio-sink', self.audiosink)
 
-    def _setup(self):
-        self._setup_pass()
-        self._setup_discover()
+        self.bus = self.playbin.get_bus()
+        self.bus.add_signal_watch()
+        self.watch_id = self.bus.connect('message', self._on_bus_message)
 
-    def _run(self):
-        _log.info('Discovering...')
-        self.discoverer.discover()
-        _log.info('Done')
+        self.playbin.set_property('uri', 'file:{0}'.format(
+                urllib.pathname2url(self.source_path)))
 
-        _log.debug('Initializing MainLoop()')
+        self.playbin.set_state(gst.STATE_PAUSED)
+
+        self.run()
+
+    def run(self):
         self.loop.run()
 
-    def _setup_discover(self):
-        self.discoverer = discoverer.Discoverer(self.source_path)
+    def _on_bus_message(self, bus, message):
+        _log.debug(' BUS MESSAGE: {0}'.format(message))
 
-        # Connect self.__discovered to the 'discovered' event
-        self.discoverer.connect('discovered', self.__discovered)
+        if message.type == gst.MESSAGE_ERROR:
+            gobject.idle_add(self._on_bus_error)
 
-    def __discovered(self, data, is_media):
+        elif message.type == gst.MESSAGE_STATE_CHANGED:
+            # The pipeline state has changed
+            # Parse state changing data
+            _prev, state, _pending = message.parse_state_changed()
+
+            _log.debug('State changed: {0}'.format(state))
+
+            if state == gst.STATE_PAUSED:
+                if message.src == self.playbin:
+                    gobject.idle_add(self._on_bus_paused)
+
+    def _on_bus_paused(self):
         '''
-        Callback for media discoverer.
+        Set up thumbnailing pipeline
         '''
-        if not is_media:
-            self.__stop()
-            raise Exception('Could not discover {0}'.format(self.source_path))
+        current_video = self.playbin.get_property('current-video')
 
-        _log.debug('__discovered, data: {0}'.format(data))
+        if current_video == 0:
+            _log.debug('Found current video from playbin')
+        else:
+            _log.error('Could not get any current video from playbin!')
 
-        self.data = data
+        self.duration = self._get_duration(self.playbin)
+        _log.info('Video length: {0}'.format(self.duration / gst.SECOND))
 
-        self._on_discovered()
+        _log.info('Setting up thumbnailing pipeline')
+        self.thumbnail_pipeline = gst.parse_launch(
+            'filesrc location="{0}" ! decodebin ! '
+            'ffmpegcolorspace ! videoscale ! '
+            'video/x-raw-rgb,depth=24,bpp=24,pixel-aspect-ratio=1/1,width=180 ! '
+            'fakesink signal-handoffs=True'.format(self.source_path))
 
-        # Tell the transcoding pipeline to start running
-        self.pipeline.set_state(gst.STATE_PLAYING)
-        _log.info('Transcoding...')
+        self.thumbnail_bus = self.thumbnail_pipeline.get_bus()
+        self.thumbnail_bus.add_signal_watch()
+        self.thumbnail_watch_id = self.thumbnail_bus.connect(
+            'message', self._on_thumbnail_bus_message)
 
-    def _on_discovered(self):
-        self.__setup_capsfilter()
+        self.thumbnail_pipeline.set_state(gst.STATE_PAUSED)
 
-    def _setup_pass(self):
-        self.pipeline = gst.Pipeline('VideoThumbnailerPipeline')
+        #gobject.timeout_add(3000, self._on_timeout)
 
-        self.filesrc = gst.element_factory_make('filesrc', 'filesrc')
-        self.filesrc.set_property('location', self.source_path)
-        self.pipeline.add(self.filesrc)
+        return False
 
-        self.decoder = gst.element_factory_make('decodebin2', 'decoder')
+    def _on_thumbnail_bus_message(self, bus, message):
+        _log.debug('Thumbnail bus called, message: {0}'.format(message))
 
-        self.decoder.connect('new-decoded-pad', self._on_dynamic_pad)
-        self.pipeline.add(self.decoder)
+        if message.type == gst.MESSAGE_ERROR:
+            _log.error(message)
+            gobject.idle_add(self._on_bus_error)
 
-        self.ffmpegcolorspace = gst.element_factory_make('ffmpegcolorspace', 'ffmpegcolorspace')
-        self.pipeline.add(self.ffmpegcolorspace)
+        if message.type == gst.MESSAGE_STATE_CHANGED:
+            _prev, state, _pending = message.parse_state_changed()
 
-        self.videoscale = gst.element_factory_make('videoscale', 'videoscale')
-        self.videoscale.set_property('method', 'bilinear')
-        self.pipeline.add(self.videoscale)
+            if (state == gst.STATE_PAUSED and
+                not self.state == self.STATE_PROCESSING and
+                message.src == self.thumbnail_pipeline):
+                _log.info('Pipeline paused, processing')
+                self.state = self.STATE_PROCESSING
 
-        self.capsfilter = gst.element_factory_make('capsfilter', 'capsfilter')
-        self.pipeline.add(self.capsfilter)
+                for sink in self.thumbnail_pipeline.sinks():
+                    name = sink.get_name()
+                    factoryname = sink.get_factory().get_name()
 
-        self.jpegenc = gst.element_factory_make('jpegenc', 'jpegenc')
-        self.pipeline.add(self.jpegenc)
+                    if factoryname == 'fakesink':
+                        sinkpad = sink.get_pad('sink')
 
-        self.filesink = gst.element_factory_make('filesink', 'filesink')
-        self.filesink.set_property('location', self.destination_path)
-        self.pipeline.add(self.filesink)
+                        self.buffer_probes[name] = sinkpad.add_buffer_probe(
+                            self.buffer_probe_handler, name)
 
-        # Link all the elements together
-        self.filesrc.link(self.decoder)
-        self.ffmpegcolorspace.link(self.videoscale)
-        self.videoscale.link(self.capsfilter)
-        self.capsfilter.link(self.jpegenc)
-        self.jpegenc.link(self.filesink)
+                        _log.info('Added buffer probe')
 
-        self._setup_bus()
+                        break
 
-    def _setup_bus(self):
-        self.bus = self.pipeline.get_bus()
-        self.bus.add_signal_watch()
-        self.bus.connect('message', self._on_message)
+                # Apply the wadsworth constant, fallback to 1 second
+                seek_amount = max(self.duration / 100 * 30, 1 * gst.SECOND)
 
-    def __setup_capsfilter(self):
-        thumbsizes = self.calculate_resize()  # Returns tuple with (width, height)
+                _log.debug('seek amount: {0}'.format(seek_amount))
 
-        self.capsfilter.set_property(
-            'caps',
-            gst.caps_from_string('video/x-raw-rgb, width={width}, height={height}'.format(
-                    width=thumbsizes[0],
-                    height=thumbsizes[1]
-                    )))
+                seek_result = self.thumbnail_pipeline.seek(
+                    1.0,
+                    gst.FORMAT_TIME,
+                    gst.SEEK_FLAG_FLUSH | gst.SEEK_FLAG_ACCURATE,
+                    gst.SEEK_TYPE_SET,
+                    seek_amount,
+                    gst.SEEK_TYPE_NONE,
+                    0)
 
-    def calculate_resize(self):
-        x_ratio = self.destination_dimensions[0] / self.data.videowidth
-        y_ratio = self.destination_dimensions[1] / self.data.videoheight
+                if not seek_result:
+                    self.errors.append('COULD_NOT_SEEK')
+                    _log.error('Couldn\'t seek! result: {0}'.format(
+                            seek_result))
+                    _log.info(message)
+                    self.shutdown()
+                else:
+                    pass
+                    #self.thumbnail_pipeline.set_state(gst.STATE_PAUSED)
+                    #pdb.set_trace()
 
-        if self.data.videoheight > self.data.videowidth:
-            # We're dealing with a portrait!
-            dimensions = (
-                int(self.data.videowidth * y_ratio),
-                180)
-        else:
-            dimensions = (
-                180,
-                int(self.data.videoheight * x_ratio))
+    def buffer_probe_handler_real(self, pad, buff, name):
+        '''
+        Capture buffers as gdk_pixbufs when told to.
+        '''
+        try:
+            caps = buff.caps
+            if caps is None:
+                _log.error('No caps passed to buffer probe handler!')
+                self.shutdown()
+                return False
+
+            _log.debug('caps: {0}'.format(caps))
+
+            filters = caps[0]
+            width = filters["width"]
+            height = filters["height"]
+
+            pixbuf = gtk.gdk.pixbuf_new_from_data(
+                buff.data, gtk.gdk.COLORSPACE_RGB, False, 8,
+                width, height, width * 3)
+
+            # NOTE: 200x136 is sort of arbitrary.  it's larger than what
+            # the ui uses at the time of this writing.
+            # new_width, new_height = scaled_size((width, height), (200, 136))
+
+            #pixbuf = pixbuf.scale_simple(
+            #new_width, new_height, gtk.gdk.INTERP_BILINEAR)
+
+            pixbuf.save(self.dest_path, 'jpeg')
+            _log.info('Saved thumbnail')
+            del pixbuf
+            self.shutdown()
+        except gst.QueryError:
+            pass
+        return False
+
+    def buffer_probe_handler(self, pad, buff, name):
+        '''
+        Proxy function for buffer_probe_handler_real
+        '''
+        gobject.idle_add(
+            lambda: self.buffer_probe_handler_real(pad, buff, name))
 
-        return dimensions
+        return True
 
-    def _on_message(self, bus, message):
-        _log.debug((bus, message))
+    def _get_duration(self, pipeline, retries=0):
+        '''
+        Get the duration of a pipeline.
 
-        t = message.type
+        Retries 5 times.
+        '''
+        if retries == 5:
+            return 0
 
-        if t == gst.MESSAGE_EOS:
-            self.__stop()
-            _log.info('Done')
-        elif t == gst.MESSAGE_ERROR:
-            _log.error((bus, message))
-            self.__stop()
+        try:
+            return pipeline.query_duration(gst.FORMAT_TIME)[0] 
+        except gst.QueryError:
+            return self._get_duration(pipeline, retries + 1)
 
-    def _on_dynamic_pad(self, dbin, pad, islast):
+    def _on_timeout(self):
+        _log.error('TIMEOUT! DROP EVERYTHING!')
+        self.shutdown()
+
+    def _on_bus_error(self, *args):
+        _log.error('AHAHAHA! Error! args: {0}'.format(args))
+
+    def shutdown(self):
         '''
-        Callback called when ``decodebin2`` has a pad that we can connect to
+        Tell gobject to call __halt when the mainloop is idle.
         '''
-        pad.link(
-            self.ffmpegcolorspace.get_pad('sink'))
+        _log.info('Shutting down')
+        self.__halt()
 
-    def __stop(self):
-        _log.debug(self.loop)
+    def __halt(self):
+        '''
+        Halt all pipelines and shut down the main loop
+        '''
+        _log.info('Halting...')
+        self.state = self.STATE_HALTING
 
-        self.pipeline.set_state(gst.STATE_NULL)
+        self.__disconnect()
+
+        gobject.idle_add(self.__halt_final)
+
+    def __disconnect(self):
+        _log.debug('Disconnecting...')
+        if not self.playbin is None:
+            self.playbin.set_state(gst.STATE_NULL)
+            for sink in self.playbin.sinks():
+                name = sink.get_name()
+                factoryname = sink.get_factory().get_name()
+
+                _log.debug('Disconnecting {0}'.format(name))
+
+                if factoryname == "fakesink":
+                    pad = sink.get_pad("sink")
+                    pad.remove_buffer_probe(self.buffer_probes[name])
+                    del self.buffer_probes[name]
+
+        self.playbin = None
 
-        gobject.idle_add(self.loop.quit)
+        if self.bus is not None:
+            self.bus.disconnect(self.watch_id)
+            self.bus = None
+
+
+    def __halt_final(self):
+        _log.info('Done')
+        if self.errors:
+            _log.error(','.join(self.errors))
+            
+        self.loop.quit()
 
 
 class VideoTranscoder:
@@ -210,8 +335,11 @@ class VideoTranscoder:
 
     Transcodes the SRC video file to a VP8 WebM video file at DST
 
-    TODO:
-    - Audio pipeline
+     - Does the same thing as VideoThumbnailer, but produces a WebM vp8
+       and vorbis video file.
+     - The VideoTranscoder exceeds the VideoThumbnailer in the way
+       that it was refined afterwards and therefore is done more
+       correctly.
     '''
     def __init__(self, src, dst, **kwargs):
         _log.info('Initializing VideoTranscoder...')
@@ -220,7 +348,9 @@ class VideoTranscoder:
         self.source_path = src
         self.destination_path = dst
 
+        # Options
         self.destination_dimensions = kwargs.get('dimensions') or (640, 640)
+        self._progress_callback = kwargs.get('progress_callback') or None
 
         if not type(self.destination_dimensions) == tuple:
             raise Exception('dimensions must be tuple: (width, height)')
@@ -229,8 +359,8 @@ class VideoTranscoder:
         self._run()
 
     def _setup(self):
-        self._setup_pass()
         self._setup_discover()
+        self._setup_pipeline()
 
     def _run(self):
         _log.info('Discovering...')
@@ -241,6 +371,7 @@ class VideoTranscoder:
         self.loop.run()
 
     def _setup_discover(self):
+        _log.debug('Setting up discoverer')
         self.discoverer = discoverer.Discoverer(self.source_path)
 
         # Connect self.__discovered to the 'discovered' event
@@ -254,37 +385,47 @@ class VideoTranscoder:
             self.__stop()
             raise Exception('Could not discover {0}'.format(self.source_path))
 
-        _log.debug('__discovered, data: {0}'.format(data))
+        _log.debug('__discovered, data: {0}'.format(data.__dict__))
 
         self.data = data
 
-        self._on_discovered()
+        # Launch things that should be done after discovery
+        self._link_elements()
+        self.__setup_videoscale_capsfilter()
 
         # Tell the transcoding pipeline to start running
         self.pipeline.set_state(gst.STATE_PLAYING)
         _log.info('Transcoding...')
 
-    def _on_discovered(self):
-        self.__setup_videoscale_capsfilter()
-
-    def _setup_pass(self):
+    def _setup_pipeline(self):
+        _log.debug('Setting up transcoding pipeline')
+        # Create the pipeline bin.
         self.pipeline = gst.Pipeline('VideoTranscoderPipeline')
 
+        # Create all GStreamer elements, starting with
+        # filesrc & decoder
         self.filesrc = gst.element_factory_make('filesrc', 'filesrc')
         self.filesrc.set_property('location', self.source_path)
         self.pipeline.add(self.filesrc)
 
         self.decoder = gst.element_factory_make('decodebin2', 'decoder')
-
         self.decoder.connect('new-decoded-pad', self._on_dynamic_pad)
         self.pipeline.add(self.decoder)
 
-        self.ffmpegcolorspace = gst.element_factory_make('ffmpegcolorspace', 'ffmpegcolorspace')
-        self.pipeline.add(self.ffmpegcolorspace)
+        # Video elements
+        self.videoqueue = gst.element_factory_make('queue', 'videoqueue')
+        self.pipeline.add(self.videoqueue)
+
+        self.videorate = gst.element_factory_make('videorate', 'videorate')
+        self.pipeline.add(self.videorate)
 
-        self.videoscale = gst.element_factory_make('videoscale', 'videoscale')
-        self.videoscale.set_property('method', 2)  # I'm not sure this works
-        self.videoscale.set_property('add-borders', 0)
+        self.ffmpegcolorspace = gst.element_factory_make(
+            'ffmpegcolorspace', 'ffmpegcolorspace')
+        self.pipeline.add(self.ffmpegcolorspace)
+        
+        self.videoscale = gst.element_factory_make('ffvideoscale', 'videoscale')
+        #self.videoscale.set_property('method', 2)  # I'm not sure this works
+        #self.videoscale.set_property('add-borders', 0)
         self.pipeline.add(self.videoscale)
 
         self.capsfilter = gst.element_factory_make('capsfilter', 'capsfilter')
@@ -293,19 +434,32 @@ class VideoTranscoder:
         self.vp8enc = gst.element_factory_make('vp8enc', 'vp8enc')
         self.vp8enc.set_property('quality', 6)
         self.vp8enc.set_property('threads', 2)
-        self.vp8enc.set_property('speed', 2)
         self.pipeline.add(self.vp8enc)
 
+        # Audio elements
+        self.audioqueue = gst.element_factory_make('queue', 'audioqueue')
+        self.pipeline.add(self.audioqueue)
+
+        self.audiorate = gst.element_factory_make('audiorate', 'audiorate')
+        self.audiorate.set_property('tolerance', 80000000)
+        self.pipeline.add(self.audiorate)
 
-        # Audio
         self.audioconvert = gst.element_factory_make('audioconvert', 'audioconvert')
         self.pipeline.add(self.audioconvert)
 
+        self.audiocapsfilter = gst.element_factory_make('capsfilter', 'audiocapsfilter')
+        audiocaps = ['audio/x-raw-float']
+        self.audiocapsfilter.set_property(
+            'caps',
+            gst.caps_from_string(
+                ','.join(audiocaps)))
+        self.pipeline.add(self.audiocapsfilter)
+
         self.vorbisenc = gst.element_factory_make('vorbisenc', 'vorbisenc')
-        self.vorbisenc.set_property('quality', 0.7)
+        self.vorbisenc.set_property('quality', 1)
         self.pipeline.add(self.vorbisenc)
 
-
+        # WebMmux & filesink
         self.webmmux = gst.element_factory_make('webmmux', 'webmmux')
         self.pipeline.add(self.webmmux)
 
@@ -313,35 +467,69 @@ class VideoTranscoder:
         self.filesink.set_property('location', self.destination_path)
         self.pipeline.add(self.filesink)
 
-        self.filesrc.link(self.decoder)
-        self.ffmpegcolorspace.link(self.videoscale)
-        self.videoscale.link(self.capsfilter)
-        self.capsfilter.link(self.vp8enc)
-        self.vp8enc.link(self.webmmux)
+        # Progressreport
+        self.progressreport = gst.element_factory_make(
+            'progressreport', 'progressreport')
+        # Update every second
+        self.progressreport.set_property('update-freq', 1)
+        self.progressreport.set_property('silent', True)
+        self.pipeline.add(self.progressreport)
 
-        # Audio
-        self.audioconvert.link(self.vorbisenc)
-        self.vorbisenc.link(self.webmmux)
+    def _link_elements(self):
+        '''
+        Link all the elements
 
-        self.webmmux.link(self.filesink)
+        This code depends on data from the discoverer and is called
+        from __discovered
+        '''
+        _log.debug('linking elements')
+        # Link the filesrc element to the decoder. The decoder then emits
+        # 'new-decoded-pad' which links decoded src pads to either a video
+        # or audio sink
+        self.filesrc.link(self.decoder)
 
+        # Link all the video elements in a row to webmmux
+        gst.element_link_many(
+            self.videoqueue,
+            self.videorate,
+            self.ffmpegcolorspace,
+            self.videoscale,
+            self.capsfilter,
+            self.vp8enc,
+            self.webmmux)
+
+        if self.data.is_audio:
+            # Link all the audio elements in a row to webmux
+            gst.element_link_many(
+                self.audioqueue,
+                self.audiorate,
+                self.audioconvert,
+                self.audiocapsfilter,
+                self.vorbisenc,
+                self.webmmux)
+
+        gst.element_link_many(
+            self.webmmux,
+            self.progressreport,
+            self.filesink)
+
+        # Setup the message bus and connect _on_message to the pipeline
         self._setup_bus()
 
+
     def _on_dynamic_pad(self, dbin, pad, islast):
         '''
         Callback called when ``decodebin2`` has a pad that we can connect to
         '''
-        _log.debug('Linked {0}'.format(pad))
-
-        #pdb.set_trace()
-
+        # Intersect the capabilities of the video sink and the pad src
+        # Then check if they have no common capabilities.
         if self.ffmpegcolorspace.get_pad_template('sink')\
                 .get_caps().intersect(pad.get_caps()).is_empty():
-            pad.link(
-                self.audioconvert.get_pad('sink'))
+            # It is NOT a video src pad.
+            pad.link(self.audioqueue.get_pad('sink'))
         else:
-            pad.link(
-                self.ffmpegcolorspace.get_pad('sink'))
+            # It IS a video src pad.
+            pad.link(self.videoqueue.get_pad('sink'))
 
     def _setup_bus(self):
         self.bus = self.pipeline.get_bus()
@@ -349,7 +537,10 @@ class VideoTranscoder:
         self.bus.connect('message', self._on_message)
 
     def __setup_videoscale_capsfilter(self):
-        caps = ['video/x-raw-yuv', 'pixel-aspect-ratio=1/1']
+        '''
+        Sets up the output format (width, height) for the video
+        '''
+        caps = ['video/x-raw-yuv', 'pixel-aspect-ratio=1/1', 'framerate=30/1']
 
         if self.data.videoheight > self.data.videowidth:
             # Whoa! We have ourselves a portrait video!
@@ -363,20 +554,28 @@ class VideoTranscoder:
         self.capsfilter.set_property(
             'caps',
             gst.caps_from_string(
-                ', '.join(caps)))
-        gst.DEBUG_BIN_TO_DOT_FILE (
-            self.pipeline,
-            gst.DEBUG_GRAPH_SHOW_ALL,
-            'supersimple-debug-graph')
+                ','.join(caps)))
 
     def _on_message(self, bus, message):
-        _log.debug((bus, message))
+        _log.debug((bus, message, message.type))
 
         t = message.type
 
         if t == gst.MESSAGE_EOS:
             self._discover_dst_and_stop()
             _log.info('Done')
+
+        elif t == gst.MESSAGE_ELEMENT:
+            if message.structure.get_name() == 'progress':
+                data = dict(message.structure)
+
+                if self._progress_callback:
+                    self._progress_callback(data)
+
+                _log.info('{percent}% done...'.format(
+                        percent=data.get('percent')))
+                _log.debug(data)
+
         elif t == gst.MESSAGE_ERROR:
             _log.error((bus, message))
             self.__stop()
@@ -397,15 +596,25 @@ class VideoTranscoder:
     def __stop(self):
         _log.debug(self.loop)
 
+        # Stop executing the pipeline
         self.pipeline.set_state(gst.STATE_NULL)
 
-        gobject.idle_add(self.loop.quit)
+        # This kills the loop, mercifully
+        gobject.idle_add(self.__stop_mainloop)
+
+    def __stop_mainloop(self):
+        '''
+        Wrapper for gobject.MainLoop.quit()
+
+        This wrapper makes us able to see if self.loop.quit has been called
+        '''
+        _log.info('Terminating MainLoop')
+
+        self.loop.quit()
 
 
 if __name__ == '__main__':
-    import os
-    os.environ["GST_DEBUG_DUMP_DOT_DIR"] = "/tmp"
-    os.putenv('GST_DEBUG_DUMP_DOT_DIR', '/tmp')
+    os.nice(19)
     from optparse import OptionParser
 
     parser = OptionParser(
@@ -444,5 +653,6 @@ if __name__ == '__main__':
     if options.action == 'thumbnail':
         VideoThumbnailer(*args)
     elif options.action == 'video':
-        transcoder = VideoTranscoder(*args)
-        pdb.set_trace()
+        def cb(data):
+            print('I\'m a callback!')
+        transcoder = VideoTranscoder(*args, progress_callback=cb)