Commit | Line | Data |
---|---|---|
26729e02 | 1 | # GNU MediaGoblin -- federated, autonomous media hosting |
cf29e8a8 | 2 | # Copyright (C) 2011, 2012 MediaGoblin contributors. See AUTHORS. |
26729e02 JW |
3 | # |
4 | # This program is free software: you can redistribute it and/or modify | |
5 | # it under the terms of the GNU Affero General Public License as published by | |
6 | # the Free Software Foundation, either version 3 of the License, or | |
7 | # (at your option) any later version. | |
8 | # | |
9 | # This program is distributed in the hope that it will be useful, | |
10 | # but WITHOUT ANY WARRANTY; without even the implied warranty of | |
11 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
12 | # GNU Affero General Public License for more details. | |
13 | # | |
14 | # You should have received a copy of the GNU Affero General Public License | |
15 | # along with this program. If not, see <http://www.gnu.org/licenses/>. | |
16 | ||
2c1b0dc3 | 17 | from __future__ import division, print_function |
206ef749 JW |
18 | |
19 | import os | |
26729e02 JW |
20 | import sys |
21 | import logging | |
ff3136d0 | 22 | import multiprocessing |
9834c876 | 23 | |
91f5f5e7 | 24 | from mediagoblin.media_types.tools import discover |
9834c876 | 25 | from mediagoblin.tools.translate import lazy_pass_to_ugettext as _ |
91f5f5e7 BB |
26 | |
27 | #os.environ['GST_DEBUG'] = '4,python:4' | |
2d7b6bde JW |
28 | |
29 | old_argv = sys.argv | |
30 | sys.argv = [] | |
31 | ||
91f5f5e7 BB |
32 | import gi |
33 | gi.require_version('Gst', '1.0') | |
c12a97b0 | 34 | from gi.repository import GLib, Gst |
91f5f5e7 | 35 | Gst.init(None) |
355d062d BB |
36 | # init before import to work around https://bugzilla.gnome.org/show_bug.cgi?id=736260 |
37 | from gi.repository import GstPbutils | |
2d7b6bde JW |
38 | |
39 | sys.argv = old_argv | |
ff3136d0 | 40 | import struct |
d0e9f843 AL |
41 | try: |
42 | from PIL import Image | |
43 | except ImportError: | |
44 | import Image | |
a249b6d3 | 45 | |
ab0d5b59 JW |
46 | _log = logging.getLogger(__name__) |
47 | ||
64fd0462 | 48 | CPU_COUNT = 2 |
64fd0462 | 49 | |
315266b4 | 50 | try: |
ff3136d0 JW |
51 | CPU_COUNT = multiprocessing.cpu_count() |
52 | except NotImplementedError: | |
53 | _log.warning('multiprocessing.cpu_count not implemented') | |
206ef749 | 54 | |
ff3136d0 JW |
55 | os.putenv('GST_DEBUG_DUMP_DOT_DIR', '/tmp') |
56 | ||
57 | ||
7e266d5a BB |
58 | def capture_thumb(video_path, dest_path, width=None, height=None, percent=0.5): |
59 | def pad_added(element, pad, connect_to): | |
91f5f5e7 BB |
60 | '''This is a callback to dynamically add element to pipeline''' |
61 | caps = pad.query_caps(None) | |
62 | name = caps.to_string() | |
7e266d5a BB |
63 | _log.debug('on_pad_added: {0}'.format(name)) |
64 | if name.startswith('video') and not connect_to.is_linked(): | |
65 | pad.link(connect_to) | |
91f5f5e7 BB |
66 | |
67 | # construct pipeline: uridecodebin ! videoconvert ! videoscale ! \ | |
7e266d5a | 68 | # ! CAPS ! appsink |
91f5f5e7 BB |
69 | pipeline = Gst.Pipeline() |
70 | uridecodebin = Gst.ElementFactory.make('uridecodebin', None) | |
7e266d5a | 71 | uridecodebin.set_property('uri', 'file://{0}'.format(video_path)) |
91f5f5e7 | 72 | videoconvert = Gst.ElementFactory.make('videoconvert', None) |
7e266d5a | 73 | uridecodebin.connect('pad-added', pad_added, |
91f5f5e7 BB |
74 | videoconvert.get_static_pad('sink')) |
75 | videoscale = Gst.ElementFactory.make('videoscale', None) | |
76 | ||
7e266d5a | 77 | # create caps for video scaling |
91f5f5e7 BB |
78 | caps_struct = Gst.Structure.new_empty('video/x-raw') |
79 | caps_struct.set_value('pixel-aspect-ratio', Gst.Fraction(1, 1)) | |
80 | caps_struct.set_value('format', 'RGB') | |
7e266d5a BB |
81 | if height: |
82 | caps_struct.set_value('height', height) | |
83 | if width: | |
84 | caps_struct.set_value('width', width) | |
91f5f5e7 BB |
85 | caps = Gst.Caps.new_empty() |
86 | caps.append_structure(caps_struct) | |
87 | ||
88 | # sink everything to memory | |
89 | appsink = Gst.ElementFactory.make('appsink', None) | |
90 | appsink.set_property('caps', caps) | |
91 | ||
92 | # add everything to pipeline | |
93 | elements = [uridecodebin, videoconvert, videoscale, appsink] | |
94 | for e in elements: | |
95 | pipeline.add(e) | |
96 | videoconvert.link(videoscale) | |
97 | videoscale.link(appsink) | |
98 | ||
7e266d5a | 99 | # pipeline constructed, starting playing, but first some preparations |
91f5f5e7 BB |
100 | # seek to 50% of the file is required |
101 | pipeline.set_state(Gst.State.PAUSED) | |
102 | # timeout of 3 seconds below was set experimentally | |
103 | state = pipeline.get_state(Gst.SECOND * 3) | |
104 | if state[0] != Gst.StateChangeReturn.SUCCESS: | |
105 | _log.warning('state change failed, {0}'.format(state)) | |
106 | return | |
107 | ||
108 | # get duration | |
109 | (success, duration) = pipeline.query_duration(Gst.Format.TIME) | |
110 | if not success: | |
7e266d5a | 111 | _log.warning('query_duration failed') |
91f5f5e7 BB |
112 | return |
113 | ||
7e266d5a BB |
114 | seek_to = int(duration * int(percent * 100) / 100) |
115 | _log.debug('Seeking to {0} of {1}'.format( | |
91f5f5e7 BB |
116 | float(seek_to) / Gst.SECOND, float(duration) / Gst.SECOND)) |
117 | seek = pipeline.seek_simple(Gst.Format.TIME, Gst.SeekFlags.FLUSH, seek_to) | |
7e266d5a BB |
118 | if not seek: |
119 | _log.warning('seek failed') | |
91f5f5e7 BB |
120 | return |
121 | ||
7e266d5a BB |
122 | # get sample, retrieve it's format and save |
123 | sample = appsink.emit("pull-preroll") | |
124 | if not sample: | |
125 | _log.warning('could not get sample') | |
126 | return | |
127 | caps = sample.get_caps() | |
128 | if not caps: | |
129 | _log.warning('could not get snapshot format') | |
91f5f5e7 | 130 | return |
7e266d5a BB |
131 | structure = caps.get_structure(0) |
132 | (success, width) = structure.get_int('width') | |
133 | (success, height) = structure.get_int('height') | |
134 | buffer = sample.get_buffer() | |
91f5f5e7 BB |
135 | |
136 | # get the image from the buffer and save it to disk | |
6e38fec8 | 137 | im = Image.frombytes('RGB', (width, height), |
7e266d5a BB |
138 | buffer.extract_dup(0, buffer.get_size())) |
139 | im.save(dest_path) | |
140 | _log.info('thumbnail saved to {0}'.format(dest_path)) | |
91f5f5e7 | 141 | |
7e266d5a | 142 | # cleanup |
91f5f5e7 | 143 | pipeline.set_state(Gst.State.NULL) |
e4a1b6d2 JW |
144 | |
145 | ||
b06ea4ab | 146 | class VideoTranscoder(object): |
a249b6d3 JW |
147 | ''' |
148 | Video transcoder | |
149 | ||
e9c1b938 JW |
150 | Transcodes the SRC video file to a VP8 WebM video file at DST |
151 | ||
91f5f5e7 | 152 | - Produces a WebM vp8 and vorbis video file. |
a249b6d3 | 153 | ''' |
10085b77 | 154 | def __init__(self): |
a249b6d3 | 155 | _log.info('Initializing VideoTranscoder...') |
0efc4e4d | 156 | self.progress_percentage = None |
c12a97b0 | 157 | self.loop = GLib.MainLoop() |
10085b77 JW |
158 | |
159 | def transcode(self, src, dst, **kwargs): | |
160 | ''' | |
161 | Transcode a video file into a 'medium'-sized version. | |
162 | ''' | |
a249b6d3 JW |
163 | self.source_path = src |
164 | self.destination_path = dst | |
165 | ||
196a5181 JW |
166 | # vp8enc options |
167 | self.destination_dimensions = kwargs.get('dimensions', (640, 640)) | |
168 | self.vp8_quality = kwargs.get('vp8_quality', 8) | |
169 | # Number of threads used by vp8enc: | |
170 | # number of real cores - 1 as per recommendation on | |
171 | # <http://www.webmproject.org/tools/encoder-parameters/#6-multi-threaded-encode-and-decode> | |
172 | self.vp8_threads = kwargs.get('vp8_threads', CPU_COUNT - 1) | |
173 | ||
174 | # 0 means auto-detect, but dict.get() only falls back to CPU_COUNT | |
175 | # if value is None, this will correct our incompatibility with | |
176 | # dict.get() | |
177 | # This will also correct cases where there's only 1 CPU core, see | |
178 | # original self.vp8_threads assignment above. | |
179 | if self.vp8_threads == 0: | |
180 | self.vp8_threads = CPU_COUNT | |
181 | ||
182 | # vorbisenc options | |
183 | self.vorbis_quality = kwargs.get('vorbis_quality', 0.3) | |
184 | ||
206ef749 | 185 | self._progress_callback = kwargs.get('progress_callback') or None |
a249b6d3 JW |
186 | |
187 | if not type(self.destination_dimensions) == tuple: | |
188 | raise Exception('dimensions must be tuple: (width, height)') | |
189 | ||
206ef749 | 190 | self._setup_pipeline() |
91f5f5e7 | 191 | self.data = discover(self.source_path) |
206ef749 JW |
192 | self._link_elements() |
193 | self.__setup_videoscale_capsfilter() | |
91f5f5e7 | 194 | self.pipeline.set_state(Gst.State.PLAYING) |
a249b6d3 | 195 | _log.info('Transcoding...') |
91f5f5e7 BB |
196 | _log.debug('Initializing MainLoop()') |
197 | self.loop.run() | |
198 | ||
a249b6d3 | 199 | |
206ef749 JW |
200 | def _setup_pipeline(self): |
201 | _log.debug('Setting up transcoding pipeline') | |
202 | # Create the pipeline bin. | |
91f5f5e7 | 203 | self.pipeline = Gst.Pipeline.new('VideoTranscoderPipeline') |
a249b6d3 | 204 | |
206ef749 JW |
205 | # Create all GStreamer elements, starting with |
206 | # filesrc & decoder | |
91f5f5e7 | 207 | self.filesrc = Gst.ElementFactory.make('filesrc', 'filesrc') |
a249b6d3 JW |
208 | self.filesrc.set_property('location', self.source_path) |
209 | self.pipeline.add(self.filesrc) | |
210 | ||
91f5f5e7 BB |
211 | self.decoder = Gst.ElementFactory.make('decodebin', 'decoder') |
212 | self.decoder.connect('pad-added', self._on_dynamic_pad) | |
a249b6d3 JW |
213 | self.pipeline.add(self.decoder) |
214 | ||
206ef749 | 215 | # Video elements |
91f5f5e7 | 216 | self.videoqueue = Gst.ElementFactory.make('queue', 'videoqueue') |
b33701b8 JW |
217 | self.pipeline.add(self.videoqueue) |
218 | ||
91f5f5e7 | 219 | self.videorate = Gst.ElementFactory.make('videorate', 'videorate') |
64fd0462 JW |
220 | self.pipeline.add(self.videorate) |
221 | ||
91f5f5e7 BB |
222 | self.videoconvert = Gst.ElementFactory.make('videoconvert', |
223 | 'videoconvert') | |
224 | self.pipeline.add(self.videoconvert) | |
c56d4b55 | 225 | |
91f5f5e7 | 226 | self.videoscale = Gst.ElementFactory.make('videoscale', 'videoscale') |
a249b6d3 JW |
227 | self.pipeline.add(self.videoscale) |
228 | ||
91f5f5e7 | 229 | self.capsfilter = Gst.ElementFactory.make('capsfilter', 'capsfilter') |
a249b6d3 JW |
230 | self.pipeline.add(self.capsfilter) |
231 | ||
91f5f5e7 | 232 | self.vp8enc = Gst.ElementFactory.make('vp8enc', 'vp8enc') |
196a5181 | 233 | self.vp8enc.set_property('threads', self.vp8_threads) |
e9c1b938 JW |
234 | self.pipeline.add(self.vp8enc) |
235 | ||
206ef749 | 236 | # Audio elements |
91f5f5e7 | 237 | self.audioqueue = Gst.ElementFactory.make('queue', 'audioqueue') |
b33701b8 JW |
238 | self.pipeline.add(self.audioqueue) |
239 | ||
91f5f5e7 | 240 | self.audiorate = Gst.ElementFactory.make('audiorate', 'audiorate') |
359781f0 | 241 | self.audiorate.set_property('tolerance', 80000000) |
64fd0462 JW |
242 | self.pipeline.add(self.audiorate) |
243 | ||
91f5f5e7 | 244 | self.audioconvert = Gst.ElementFactory.make('audioconvert', 'audioconvert') |
e9c1b938 | 245 | self.pipeline.add(self.audioconvert) |
91f5f5e7 BB |
246 | self.audiocapsfilter = Gst.ElementFactory.make('capsfilter', |
247 | 'audiocapsfilter') | |
248 | audiocaps = Gst.Caps.new_empty() | |
249 | audiocaps_struct = Gst.Structure.new_empty('audio/x-raw') | |
250 | audiocaps.append_structure(audiocaps_struct) | |
251 | self.audiocapsfilter.set_property('caps', audiocaps) | |
64fd0462 JW |
252 | self.pipeline.add(self.audiocapsfilter) |
253 | ||
91f5f5e7 | 254 | self.vorbisenc = Gst.ElementFactory.make('vorbisenc', 'vorbisenc') |
196a5181 | 255 | self.vorbisenc.set_property('quality', self.vorbis_quality) |
e9c1b938 JW |
256 | self.pipeline.add(self.vorbisenc) |
257 | ||
206ef749 | 258 | # WebMmux & filesink |
91f5f5e7 | 259 | self.webmmux = Gst.ElementFactory.make('webmmux', 'webmmux') |
a249b6d3 JW |
260 | self.pipeline.add(self.webmmux) |
261 | ||
91f5f5e7 | 262 | self.filesink = Gst.ElementFactory.make('filesink', 'filesink') |
e9c1b938 JW |
263 | self.filesink.set_property('location', self.destination_path) |
264 | self.pipeline.add(self.filesink) | |
a249b6d3 | 265 | |
206ef749 | 266 | # Progressreport |
91f5f5e7 | 267 | self.progressreport = Gst.ElementFactory.make( |
206ef749 JW |
268 | 'progressreport', 'progressreport') |
269 | # Update every second | |
270 | self.progressreport.set_property('update-freq', 1) | |
271 | self.progressreport.set_property('silent', True) | |
272 | self.pipeline.add(self.progressreport) | |
273 | ||
274 | def _link_elements(self): | |
275 | ''' | |
276 | Link all the elements | |
277 | ||
278 | This code depends on data from the discoverer and is called | |
279 | from __discovered | |
280 | ''' | |
281 | _log.debug('linking elements') | |
282 | # Link the filesrc element to the decoder. The decoder then emits | |
283 | # 'new-decoded-pad' which links decoded src pads to either a video | |
284 | # or audio sink | |
a249b6d3 | 285 | self.filesrc.link(self.decoder) |
91f5f5e7 BB |
286 | # link the rest |
287 | self.videoqueue.link(self.videorate) | |
288 | self.videorate.link(self.videoconvert) | |
289 | self.videoconvert.link(self.videoscale) | |
290 | self.videoscale.link(self.capsfilter) | |
291 | self.capsfilter.link(self.vp8enc) | |
292 | self.vp8enc.link(self.webmmux) | |
e9c1b938 | 293 | |
57d8212a | 294 | if self.data.get_audio_streams(): |
91f5f5e7 BB |
295 | self.audioqueue.link(self.audiorate) |
296 | self.audiorate.link(self.audioconvert) | |
297 | self.audioconvert.link(self.audiocapsfilter) | |
298 | self.audiocapsfilter.link(self.vorbisenc) | |
299 | self.vorbisenc.link(self.webmmux) | |
300 | self.webmmux.link(self.progressreport) | |
301 | self.progressreport.link(self.filesink) | |
a249b6d3 | 302 | |
206ef749 | 303 | # Setup the message bus and connect _on_message to the pipeline |
a249b6d3 JW |
304 | self._setup_bus() |
305 | ||
91f5f5e7 | 306 | def _on_dynamic_pad(self, dbin, pad): |
a249b6d3 | 307 | ''' |
91f5f5e7 | 308 | Callback called when ``decodebin`` has a pad that we can connect to |
a249b6d3 | 309 | ''' |
206ef749 JW |
310 | # Intersect the capabilities of the video sink and the pad src |
311 | # Then check if they have no common capabilities. | |
91f5f5e7 BB |
312 | if (self.videorate.get_static_pad('sink').get_pad_template() |
313 | .get_caps().intersect(pad.query_caps()).is_empty()): | |
206ef749 | 314 | # It is NOT a video src pad. |
57d8212a | 315 | _log.debug('linking audio to the pad dynamically') |
91f5f5e7 | 316 | pad.link(self.audioqueue.get_static_pad('sink')) |
e9c1b938 | 317 | else: |
206ef749 | 318 | # It IS a video src pad. |
91f5f5e7 BB |
319 | _log.debug('linking video to the pad dynamically') |
320 | pad.link(self.videoqueue.get_static_pad('sink')) | |
a249b6d3 JW |
321 | |
322 | def _setup_bus(self): | |
323 | self.bus = self.pipeline.get_bus() | |
324 | self.bus.add_signal_watch() | |
325 | self.bus.connect('message', self._on_message) | |
326 | ||
e9c1b938 | 327 | def __setup_videoscale_capsfilter(self): |
206ef749 JW |
328 | ''' |
329 | Sets up the output format (width, height) for the video | |
330 | ''' | |
91f5f5e7 BB |
331 | caps_struct = Gst.Structure.new_empty('video/x-raw') |
332 | caps_struct.set_value('pixel-aspect-ratio', Gst.Fraction(1, 1)) | |
333 | caps_struct.set_value('framerate', Gst.Fraction(30, 1)) | |
334 | video_info = self.data.get_video_streams()[0] | |
335 | if video_info.get_height() > video_info.get_width(): | |
336 | # portrait | |
337 | caps_struct.set_value('height', self.destination_dimensions[1]) | |
a249b6d3 | 338 | else: |
91f5f5e7 BB |
339 | # landscape |
340 | caps_struct.set_value('width', self.destination_dimensions[0]) | |
341 | caps = Gst.Caps.new_empty() | |
342 | caps.append_structure(caps_struct) | |
343 | self.capsfilter.set_property('caps', caps) | |
a249b6d3 JW |
344 | |
345 | def _on_message(self, bus, message): | |
206ef749 | 346 | _log.debug((bus, message, message.type)) |
91f5f5e7 BB |
347 | if message.type == Gst.MessageType.EOS: |
348 | self.dst_data = discover(self.destination_path) | |
349 | self.__stop() | |
a249b6d3 | 350 | _log.info('Done') |
91f5f5e7 BB |
351 | elif message.type == Gst.MessageType.ELEMENT: |
352 | if message.has_name('progress'): | |
353 | structure = message.get_structure() | |
0efc4e4d | 354 | # Update progress state if it has changed |
91f5f5e7 BB |
355 | (success, percent) = structure.get_int('percent') |
356 | if self.progress_percentage != percent and success: | |
357 | self.progress_percentage = percent | |
0efc4e4d | 358 | if self._progress_callback: |
91f5f5e7 BB |
359 | self._progress_callback(percent) |
360 | _log.info('{percent}% done...'.format(percent=percent)) | |
361 | elif message.type == Gst.MessageType.ERROR: | |
362 | _log.error('Got error: {0}'.format(message.parse_error())) | |
bd50f8bf | 363 | self.dst_data = None |
a249b6d3 JW |
364 | self.__stop() |
365 | ||
366 | def __stop(self): | |
26729e02 JW |
367 | _log.debug(self.loop) |
368 | ||
4f4f2531 JW |
369 | if hasattr(self, 'pipeline'): |
370 | # Stop executing the pipeline | |
91f5f5e7 | 371 | self.pipeline.set_state(Gst.State.NULL) |
26729e02 | 372 | |
206ef749 | 373 | # This kills the loop, mercifully |
c12a97b0 | 374 | GLib.idle_add(self.__stop_mainloop) |
206ef749 JW |
375 | |
376 | def __stop_mainloop(self): | |
377 | ''' | |
c12a97b0 | 378 | Wrapper for GLib.MainLoop.quit() |
206ef749 JW |
379 | |
380 | This wrapper makes us able to see if self.loop.quit has been called | |
381 | ''' | |
382 | _log.info('Terminating MainLoop') | |
383 | ||
384 | self.loop.quit() | |
26729e02 JW |
385 | |
386 | ||
387 | if __name__ == '__main__': | |
206ef749 | 388 | os.nice(19) |
a249b6d3 JW |
389 | from optparse import OptionParser |
390 | ||
391 | parser = OptionParser( | |
10085b77 | 392 | usage='%prog [-v] -a [ video | thumbnail | discover ] SRC [ DEST ]') |
a249b6d3 JW |
393 | |
394 | parser.add_option('-a', '--action', | |
395 | dest='action', | |
10085b77 | 396 | help='One of "video", "discover" or "thumbnail"') |
a249b6d3 JW |
397 | |
398 | parser.add_option('-v', | |
399 | dest='verbose', | |
400 | action='store_true', | |
401 | help='Output debug information') | |
402 | ||
403 | parser.add_option('-q', | |
404 | dest='quiet', | |
405 | action='store_true', | |
406 | help='Dear program, please be quiet unless *error*') | |
407 | ||
b06ea4ab JW |
408 | parser.add_option('-w', '--width', |
409 | type=int, | |
410 | default=180) | |
411 | ||
a249b6d3 JW |
412 | (options, args) = parser.parse_args() |
413 | ||
414 | if options.verbose: | |
415 | _log.setLevel(logging.DEBUG) | |
416 | else: | |
417 | _log.setLevel(logging.INFO) | |
418 | ||
419 | if options.quiet: | |
420 | _log.setLevel(logging.ERROR) | |
421 | ||
422 | _log.debug(args) | |
423 | ||
10085b77 | 424 | if not len(args) == 2 and not options.action == 'discover': |
a249b6d3 JW |
425 | parser.print_help() |
426 | sys.exit() | |
427 | ||
10085b77 JW |
428 | transcoder = VideoTranscoder() |
429 | ||
a249b6d3 | 430 | if options.action == 'thumbnail': |
b06ea4ab | 431 | args.append(options.width) |
e4a1b6d2 | 432 | VideoThumbnailerMarkII(*args) |
a249b6d3 | 433 | elif options.action == 'video': |
206ef749 JW |
434 | def cb(data): |
435 | print('I\'m a callback!') | |
10085b77 JW |
436 | transcoder.transcode(*args, progress_callback=cb) |
437 | elif options.action == 'discover': | |
9051f993 | 438 | print(transcoder.discover(*args)) |