1 # GNU MediaGoblin -- federated, autonomous media hosting
2 # Copyright (C) 2011, 2012 MediaGoblin contributors. See AUTHORS.
4 # This program is free software: you can redistribute it and/or modify
5 # it under the terms of the GNU Affero General Public License as published by
6 # the Free Software Foundation, either version 3 of the License, or
7 # (at your option) any later version.
9 # This program is distributed in the hope that it will be useful,
10 # but WITHOUT ANY WARRANTY; without even the implied warranty of
11 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 # GNU Affero General Public License for more details.
14 # You should have received a copy of the GNU Affero General Public License
15 # along with this program. If not, see <http://www.gnu.org/licenses/>.
17 from __future__
import division
, print_function
22 import multiprocessing
24 from mediagoblin
.media_types
.tools
import discover
25 from mediagoblin
.tools
.translate
import lazy_pass_to_ugettext
as _
27 #os.environ['GST_DEBUG'] = '4,python:4'
33 gi
.require_version('Gst', '1.0')
34 from gi
.repository
import GLib
, Gst
36 # init before import to work around https://bugzilla.gnome.org/show_bug.cgi?id=736260
37 from gi
.repository
import GstPbutils
46 _log
= logging
.getLogger(__name__
)
51 CPU_COUNT
= multiprocessing
.cpu_count()
52 except NotImplementedError:
53 _log
.warning('multiprocessing.cpu_count not implemented')
55 os
.putenv('GST_DEBUG_DUMP_DOT_DIR', '/tmp')
58 def capture_thumb(video_path
, dest_path
, width
=None, height
=None, percent
=0.5):
59 def pad_added(element
, pad
, connect_to
):
60 '''This is a callback to dynamically add element to pipeline'''
61 caps
= pad
.query_caps(None)
62 name
= caps
.to_string()
63 _log
.debug('on_pad_added: {0}'.format(name
))
64 if name
.startswith('video') and not connect_to
.is_linked():
67 # construct pipeline: uridecodebin ! videoconvert ! videoscale ! \
69 pipeline
= Gst
.Pipeline()
70 uridecodebin
= Gst
.ElementFactory
.make('uridecodebin', None)
71 uridecodebin
.set_property('uri', 'file://{0}'.format(video_path
))
72 videoconvert
= Gst
.ElementFactory
.make('videoconvert', None)
73 uridecodebin
.connect('pad-added', pad_added
,
74 videoconvert
.get_static_pad('sink'))
75 videoscale
= Gst
.ElementFactory
.make('videoscale', None)
77 # create caps for video scaling
78 caps_struct
= Gst
.Structure
.new_empty('video/x-raw')
79 caps_struct
.set_value('pixel-aspect-ratio', Gst
.Fraction(1, 1))
80 caps_struct
.set_value('format', 'RGB')
82 caps_struct
.set_value('height', height
)
84 caps_struct
.set_value('width', width
)
85 caps
= Gst
.Caps
.new_empty()
86 caps
.append_structure(caps_struct
)
88 # sink everything to memory
89 appsink
= Gst
.ElementFactory
.make('appsink', None)
90 appsink
.set_property('caps', caps
)
92 # add everything to pipeline
93 elements
= [uridecodebin
, videoconvert
, videoscale
, appsink
]
96 videoconvert
.link(videoscale
)
97 videoscale
.link(appsink
)
99 # pipeline constructed, starting playing, but first some preparations
100 # seek to 50% of the file is required
101 pipeline
.set_state(Gst
.State
.PAUSED
)
102 # timeout of 3 seconds below was set experimentally
103 state
= pipeline
.get_state(Gst
.SECOND
* 3)
104 if state
[0] != Gst
.StateChangeReturn
.SUCCESS
:
105 _log
.warning('state change failed, {0}'.format(state
))
109 (success
, duration
) = pipeline
.query_duration(Gst
.Format
.TIME
)
111 _log
.warning('query_duration failed')
114 seek_to
= int(duration
* int(percent
* 100) / 100)
115 _log
.debug('Seeking to {0} of {1}'.format(
116 float(seek_to
) / Gst
.SECOND
, float(duration
) / Gst
.SECOND
))
117 seek
= pipeline
.seek_simple(Gst
.Format
.TIME
, Gst
.SeekFlags
.FLUSH
, seek_to
)
119 _log
.warning('seek failed')
122 # get sample, retrieve it's format and save
123 sample
= appsink
.emit("pull-preroll")
125 _log
.warning('could not get sample')
127 caps
= sample
.get_caps()
129 _log
.warning('could not get snapshot format')
131 structure
= caps
.get_structure(0)
132 (success
, width
) = structure
.get_int('width')
133 (success
, height
) = structure
.get_int('height')
134 buffer = sample
.get_buffer()
136 # get the image from the buffer and save it to disk
137 im
= Image
.frombytes('RGB', (width
, height
),
138 buffer.extract_dup(0, buffer.get_size()))
140 _log
.info('thumbnail saved to {0}'.format(dest_path
))
143 pipeline
.set_state(Gst
.State
.NULL
)
146 class VideoTranscoder(object):
150 Transcodes the SRC video file to a VP8 WebM video file at DST
152 - Produces a WebM vp8 and vorbis video file.
155 _log
.info('Initializing VideoTranscoder...')
156 self
.progress_percentage
= None
157 self
.loop
= GLib
.MainLoop()
159 def transcode(self
, src
, dst
, **kwargs
):
161 Transcode a video file into a 'medium'-sized version.
163 self
.source_path
= src
164 self
.destination_path
= dst
167 self
.destination_dimensions
= kwargs
.get('dimensions', (640, 640))
168 self
.vp8_quality
= kwargs
.get('vp8_quality', 8)
169 # Number of threads used by vp8enc:
170 # number of real cores - 1 as per recommendation on
171 # <http://www.webmproject.org/tools/encoder-parameters/#6-multi-threaded-encode-and-decode>
172 self
.vp8_threads
= kwargs
.get('vp8_threads', CPU_COUNT
- 1)
174 # 0 means auto-detect, but dict.get() only falls back to CPU_COUNT
175 # if value is None, this will correct our incompatibility with
177 # This will also correct cases where there's only 1 CPU core, see
178 # original self.vp8_threads assignment above.
179 if self
.vp8_threads
== 0:
180 self
.vp8_threads
= CPU_COUNT
183 self
.vorbis_quality
= kwargs
.get('vorbis_quality', 0.3)
185 self
._progress
_callback
= kwargs
.get('progress_callback') or None
187 if not type(self
.destination_dimensions
) == tuple:
188 raise Exception('dimensions must be tuple: (width, height)')
190 self
._setup
_pipeline
()
191 self
.data
= discover(self
.source_path
)
192 self
._link
_elements
()
193 self
.__setup
_videoscale
_capsfilter
()
194 self
.pipeline
.set_state(Gst
.State
.PLAYING
)
195 _log
.info('Transcoding...')
196 _log
.debug('Initializing MainLoop()')
200 def _setup_pipeline(self
):
201 _log
.debug('Setting up transcoding pipeline')
202 # Create the pipeline bin.
203 self
.pipeline
= Gst
.Pipeline
.new('VideoTranscoderPipeline')
205 # Create all GStreamer elements, starting with
207 self
.filesrc
= Gst
.ElementFactory
.make('filesrc', 'filesrc')
208 self
.filesrc
.set_property('location', self
.source_path
)
209 self
.pipeline
.add(self
.filesrc
)
211 self
.decoder
= Gst
.ElementFactory
.make('decodebin', 'decoder')
212 self
.decoder
.connect('pad-added', self
._on
_dynamic
_pad
)
213 self
.pipeline
.add(self
.decoder
)
216 self
.videoqueue
= Gst
.ElementFactory
.make('queue', 'videoqueue')
217 self
.pipeline
.add(self
.videoqueue
)
219 self
.videorate
= Gst
.ElementFactory
.make('videorate', 'videorate')
220 self
.pipeline
.add(self
.videorate
)
222 self
.videoconvert
= Gst
.ElementFactory
.make('videoconvert',
224 self
.pipeline
.add(self
.videoconvert
)
226 self
.videoscale
= Gst
.ElementFactory
.make('videoscale', 'videoscale')
227 self
.pipeline
.add(self
.videoscale
)
229 self
.capsfilter
= Gst
.ElementFactory
.make('capsfilter', 'capsfilter')
230 self
.pipeline
.add(self
.capsfilter
)
232 self
.vp8enc
= Gst
.ElementFactory
.make('vp8enc', 'vp8enc')
233 self
.vp8enc
.set_property('threads', self
.vp8_threads
)
234 self
.pipeline
.add(self
.vp8enc
)
237 self
.audioqueue
= Gst
.ElementFactory
.make('queue', 'audioqueue')
238 self
.pipeline
.add(self
.audioqueue
)
240 self
.audiorate
= Gst
.ElementFactory
.make('audiorate', 'audiorate')
241 self
.audiorate
.set_property('tolerance', 80000000)
242 self
.pipeline
.add(self
.audiorate
)
244 self
.audioconvert
= Gst
.ElementFactory
.make('audioconvert', 'audioconvert')
245 self
.pipeline
.add(self
.audioconvert
)
246 self
.audiocapsfilter
= Gst
.ElementFactory
.make('capsfilter',
248 audiocaps
= Gst
.Caps
.new_empty()
249 audiocaps_struct
= Gst
.Structure
.new_empty('audio/x-raw')
250 audiocaps
.append_structure(audiocaps_struct
)
251 self
.audiocapsfilter
.set_property('caps', audiocaps
)
252 self
.pipeline
.add(self
.audiocapsfilter
)
254 self
.vorbisenc
= Gst
.ElementFactory
.make('vorbisenc', 'vorbisenc')
255 self
.vorbisenc
.set_property('quality', self
.vorbis_quality
)
256 self
.pipeline
.add(self
.vorbisenc
)
259 self
.webmmux
= Gst
.ElementFactory
.make('webmmux', 'webmmux')
260 self
.pipeline
.add(self
.webmmux
)
262 self
.filesink
= Gst
.ElementFactory
.make('filesink', 'filesink')
263 self
.filesink
.set_property('location', self
.destination_path
)
264 self
.pipeline
.add(self
.filesink
)
267 self
.progressreport
= Gst
.ElementFactory
.make(
268 'progressreport', 'progressreport')
269 # Update every second
270 self
.progressreport
.set_property('update-freq', 1)
271 self
.progressreport
.set_property('silent', True)
272 self
.pipeline
.add(self
.progressreport
)
274 def _link_elements(self
):
276 Link all the elements
278 This code depends on data from the discoverer and is called
281 _log
.debug('linking elements')
282 # Link the filesrc element to the decoder. The decoder then emits
283 # 'new-decoded-pad' which links decoded src pads to either a video
285 self
.filesrc
.link(self
.decoder
)
287 self
.videoqueue
.link(self
.videorate
)
288 self
.videorate
.link(self
.videoconvert
)
289 self
.videoconvert
.link(self
.videoscale
)
290 self
.videoscale
.link(self
.capsfilter
)
291 self
.capsfilter
.link(self
.vp8enc
)
292 self
.vp8enc
.link(self
.webmmux
)
294 if self
.data
.get_audio_streams():
295 self
.audioqueue
.link(self
.audiorate
)
296 self
.audiorate
.link(self
.audioconvert
)
297 self
.audioconvert
.link(self
.audiocapsfilter
)
298 self
.audiocapsfilter
.link(self
.vorbisenc
)
299 self
.vorbisenc
.link(self
.webmmux
)
300 self
.webmmux
.link(self
.progressreport
)
301 self
.progressreport
.link(self
.filesink
)
303 # Setup the message bus and connect _on_message to the pipeline
306 def _on_dynamic_pad(self
, dbin
, pad
):
308 Callback called when ``decodebin`` has a pad that we can connect to
310 # Intersect the capabilities of the video sink and the pad src
311 # Then check if they have no common capabilities.
312 if (self
.videorate
.get_static_pad('sink').get_pad_template()
313 .get_caps().intersect(pad
.query_caps()).is_empty()):
314 # It is NOT a video src pad.
315 _log
.debug('linking audio to the pad dynamically')
316 pad
.link(self
.audioqueue
.get_static_pad('sink'))
318 # It IS a video src pad.
319 _log
.debug('linking video to the pad dynamically')
320 pad
.link(self
.videoqueue
.get_static_pad('sink'))
322 def _setup_bus(self
):
323 self
.bus
= self
.pipeline
.get_bus()
324 self
.bus
.add_signal_watch()
325 self
.bus
.connect('message', self
._on
_message
)
327 def __setup_videoscale_capsfilter(self
):
329 Sets up the output format (width, height) for the video
331 caps_struct
= Gst
.Structure
.new_empty('video/x-raw')
332 caps_struct
.set_value('pixel-aspect-ratio', Gst
.Fraction(1, 1))
333 caps_struct
.set_value('framerate', Gst
.Fraction(30, 1))
334 video_info
= self
.data
.get_video_streams()[0]
335 if video_info
.get_height() > video_info
.get_width():
337 caps_struct
.set_value('height', self
.destination_dimensions
[1])
340 caps_struct
.set_value('width', self
.destination_dimensions
[0])
341 caps
= Gst
.Caps
.new_empty()
342 caps
.append_structure(caps_struct
)
343 self
.capsfilter
.set_property('caps', caps
)
345 def _on_message(self
, bus
, message
):
346 _log
.debug((bus
, message
, message
.type))
347 if message
.type == Gst
.MessageType
.EOS
:
348 self
.dst_data
= discover(self
.destination_path
)
351 elif message
.type == Gst
.MessageType
.ELEMENT
:
352 if message
.has_name('progress'):
353 structure
= message
.get_structure()
354 # Update progress state if it has changed
355 (success
, percent
) = structure
.get_int('percent')
356 if self
.progress_percentage
!= percent
and success
:
357 self
.progress_percentage
= percent
358 if self
._progress
_callback
:
359 self
._progress
_callback
(percent
)
360 _log
.info('{percent}% done...'.format(percent
=percent
))
361 elif message
.type == Gst
.MessageType
.ERROR
:
362 _log
.error('Got error: {0}'.format(message
.parse_error()))
367 _log
.debug(self
.loop
)
369 if hasattr(self
, 'pipeline'):
370 # Stop executing the pipeline
371 self
.pipeline
.set_state(Gst
.State
.NULL
)
373 # This kills the loop, mercifully
374 GLib
.idle_add(self
.__stop
_mainloop
)
376 def __stop_mainloop(self
):
378 Wrapper for GLib.MainLoop.quit()
380 This wrapper makes us able to see if self.loop.quit has been called
382 _log
.info('Terminating MainLoop')
387 if __name__
== '__main__':
389 from optparse
import OptionParser
391 parser
= OptionParser(
392 usage
='%prog [-v] -a [ video | thumbnail | discover ] SRC [ DEST ]')
394 parser
.add_option('-a', '--action',
396 help='One of "video", "discover" or "thumbnail"')
398 parser
.add_option('-v',
401 help='Output debug information')
403 parser
.add_option('-q',
406 help='Dear program, please be quiet unless *error*')
408 parser
.add_option('-w', '--width',
412 (options
, args
) = parser
.parse_args()
415 _log
.setLevel(logging
.DEBUG
)
417 _log
.setLevel(logging
.INFO
)
420 _log
.setLevel(logging
.ERROR
)
424 if not len(args
) == 2 and not options
.action
== 'discover':
428 transcoder
= VideoTranscoder()
430 if options
.action
== 'thumbnail':
431 args
.append(options
.width
)
432 VideoThumbnailerMarkII(*args
)
433 elif options
.action
== 'video':
435 print('I\'m a callback!')
436 transcoder
.transcode(*args
, progress_callback
=cb
)
437 elif options
.action
== 'discover':
438 print(transcoder
.discover(*args
))