simplify an if and fix string formatting
[mediagoblin.git] / mediagoblin / media_types / video / processing.py
... / ...
CommitLineData
1# GNU MediaGoblin -- federated, autonomous media hosting
2# Copyright (C) 2011, 2012 MediaGoblin contributors. See AUTHORS.
3#
4# This program is free software: you can redistribute it and/or modify
5# it under the terms of the GNU Affero General Public License as published by
6# the Free Software Foundation, either version 3 of the License, or
7# (at your option) any later version.
8#
9# This program is distributed in the hope that it will be useful,
10# but WITHOUT ANY WARRANTY; without even the implied warranty of
11# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12# GNU Affero General Public License for more details.
13#
14# You should have received a copy of the GNU Affero General Public License
15# along with this program. If not, see <http://www.gnu.org/licenses/>.
16
17import argparse
18import os.path
19import logging
20import datetime
21import celery
22
23import six
24
25from celery import group
26from mediagoblin import mg_globals as mgg
27from mediagoblin.processing import (
28 FilenameBuilder, BaseProcessingFail,
29 ProgressCallback, MediaProcessor,
30 ProcessingManager, request_from_args,
31 get_process_filename, store_public,
32 copy_original, get_entry_and_processing_manager)
33from mediagoblin.tools.translate import lazy_pass_to_ugettext as _
34from mediagoblin.media_types import MissingComponents
35
36from . import transcoders
37from .util import skip_transcode, ACCEPTED_RESOLUTIONS
38
39_log = logging.getLogger(__name__)
40_log.setLevel(logging.DEBUG)
41
42MEDIA_TYPE = 'mediagoblin.media_types.video'
43
44
45class VideoTranscodingFail(BaseProcessingFail):
46 '''
47 Error raised if video transcoding fails
48 '''
49 general_message = _(u'Video transcoding failed')
50
51
52def sniffer(media_file):
53 '''New style sniffer, used in two-steps check; requires to have .name'''
54 _log.info('Sniffing {0}'.format(MEDIA_TYPE))
55 try:
56 data = transcoders.discover(media_file.name)
57 except Exception as e:
58 # this is usually GLib.GError, but we don't really care which one
59 _log.warning(u'GStreamer: {0}'.format(six.text_type(e)))
60 raise MissingComponents(u'GStreamer: {0}'.format(six.text_type(e)))
61 _log.debug('Discovered: {0}'.format(data))
62
63 if not data.get_video_streams():
64 raise MissingComponents('No video streams found in this video')
65
66 if data.get_result() != 0: # it's 0 if success
67 try:
68 missing = data.get_misc().get_string('name')
69 _log.warning('GStreamer: missing {0}'.format(missing))
70 except AttributeError as e:
71 # AttributeError happens here on gstreamer >1.4, when get_misc
72 # returns None. There is a special function to get info about
73 # missing plugin. This info should be printed to logs for admin and
74 # showed to the user in a short and nice version
75 details = data.get_missing_elements_installer_details()
76 _log.warning('GStreamer: missing: {0}'.format(', '.join(details)))
77 missing = u', '.join([u'{0} ({1})'.format(*d.split('|')[3:])
78 for d in details])
79 raise MissingComponents(u'{0} is missing'.format(missing))
80
81 return MEDIA_TYPE
82
83
84EXCLUDED_EXTS = ["nef", "svg"]
85
86def sniff_handler(media_file, filename):
87 name, ext = os.path.splitext(filename)
88 clean_ext = ext.lower()[1:]
89
90 if clean_ext in EXCLUDED_EXTS:
91 # We don't handle this filetype, though gstreamer might think we can
92 _log.info('Refused to process {0} due to excluded extension'.format(filename))
93 return None
94
95 try:
96 return sniffer(media_file)
97 except:
98 _log.error('Could not discover {0}'.format(filename))
99 return None
100
101def get_tags(stream_info):
102 'gets all tags and their values from stream info'
103 taglist = stream_info.get_tags()
104 if not taglist:
105 return {}
106 tags = []
107 taglist.foreach(
108 lambda list, tag: tags.append((tag, list.get_value_index(tag, 0))))
109 tags = dict(tags)
110
111 # date/datetime should be converted from GDate/GDateTime to strings
112 if 'date' in tags:
113 date = tags['date']
114 tags['date'] = "%s-%s-%s" % (
115 date.year, date.month, date.day)
116
117 if 'datetime' in tags:
118 # TODO: handle timezone info; gst.get_time_zone_offset +
119 # python's tzinfo should help
120 dt = tags['datetime']
121 try:
122 tags['datetime'] = datetime.datetime(
123 dt.get_year(), dt.get_month(), dt.get_day(), dt.get_hour(),
124 dt.get_minute(), dt.get_second(),
125 dt.get_microsecond()).isoformat()
126 except:
127 tags['datetime'] = None
128 for k, v in tags.copy().items():
129 # types below are accepted by json; others must not present
130 if not isinstance(v, (dict, list, six.string_types, int, float, bool,
131 type(None))):
132 del tags[k]
133 return dict(tags)
134
135def store_metadata(media_entry, metadata):
136 """
137 Store metadata from this video for this media entry.
138 """
139 stored_metadata = dict()
140 audio_info_list = metadata.get_audio_streams()
141 if audio_info_list:
142 stored_metadata['audio'] = []
143 for audio_info in audio_info_list:
144 stored_metadata['audio'].append(
145 {
146 'channels': audio_info.get_channels(),
147 'bitrate': audio_info.get_bitrate(),
148 'depth': audio_info.get_depth(),
149 'languange': audio_info.get_language(),
150 'sample_rate': audio_info.get_sample_rate(),
151 'tags': get_tags(audio_info)
152 })
153
154 video_info_list = metadata.get_video_streams()
155 if video_info_list:
156 stored_metadata['video'] = []
157 for video_info in video_info_list:
158 stored_metadata['video'].append(
159 {
160 'width': video_info.get_width(),
161 'height': video_info.get_height(),
162 'bitrate': video_info.get_bitrate(),
163 'depth': video_info.get_depth(),
164 'videorate': [video_info.get_framerate_num(),
165 video_info.get_framerate_denom()],
166 'tags': get_tags(video_info)
167 })
168
169 stored_metadata['common'] = {
170 'duration': metadata.get_duration(),
171 'tags': get_tags(metadata),
172 }
173 # Only save this field if there's something to save
174 if len(stored_metadata):
175 media_entry.media_data_init(orig_metadata=stored_metadata)
176
177
178@celery.task()
179def main_task(entry_id, resolution, medium_size, **process_info):
180 """
181 Main celery task to transcode the video to the default resolution
182 and store original video metadata.
183 """
184 _log.debug('MediaEntry processing')
185 entry, manager = get_entry_and_processing_manager(entry_id)
186 with CommonVideoProcessor(manager, entry) as processor:
187 processor.common_setup(resolution)
188 processor.transcode(medium_size=tuple(medium_size),
189 vp8_quality=process_info['vp8_quality'],
190 vp8_threads=process_info['vp8_threads'],
191 vorbis_quality=process_info['vorbis_quality'])
192 processor.generate_thumb(thumb_size=process_info['thumb_size'])
193 processor.store_orig_metadata()
194 # Make state of entry as processed
195 entry.state = u'processed'
196 entry.save()
197 _log.info(u'MediaEntry ID {0} is processed (transcoded to default'
198 ' resolution): {1}'.format(entry.id, medium_size))
199 _log.debug('MediaEntry processed')
200
201
202@celery.task()
203def complementary_task(entry_id, resolution, medium_size, **process_info):
204 """
205 Side celery task to transcode the video to other resolutions
206 """
207 entry, manager = get_entry_and_processing_manager(entry_id)
208 with CommonVideoProcessor(manager, entry) as processor:
209 processor.common_setup(resolution)
210 processor.transcode(medium_size=tuple(medium_size),
211 vp8_quality=process_info['vp8_quality'],
212 vp8_threads=process_info['vp8_threads'],
213 vorbis_quality=process_info['vorbis_quality'])
214 _log.info(u'MediaEntry ID {0} is transcoded to {1}'.format(
215 entry.id, medium_size))
216
217
218@celery.task()
219def processing_cleanup(entry_id):
220 _log.debug('Entered processing_cleanup')
221 entry, manager = get_entry_and_processing_manager(entry_id)
222 with CommonVideoProcessor(manager, entry) as processor:
223 # no need to specify a resolution here
224 processor.common_setup()
225 processor.copy_original()
226 processor.keep_best()
227 processor.delete_queue_file()
228 _log.debug('Deleted queue_file')
229
230
231class CommonVideoProcessor(MediaProcessor):
232 """
233 Provides a base for various video processing steps
234 """
235 acceptable_files = ['original, best_quality', 'webm_144p', 'webm_360p',
236 'webm_480p', 'webm_720p', 'webm_1080p', 'webm_video']
237
238 def common_setup(self, resolution=None):
239 self.video_config = mgg \
240 .global_config['plugins'][MEDIA_TYPE]
241
242 # Pull down and set up the processing file
243 self.process_filename = get_process_filename(
244 self.entry, self.workbench, self.acceptable_files)
245 self.name_builder = FilenameBuilder(self.process_filename)
246
247 self.transcoder = transcoders.VideoTranscoder()
248 self.did_transcode = False
249
250 if resolution:
251 self.curr_file = 'webm_' + str(resolution)
252 self.part_filename = (self.name_builder.fill('{basename}.' +
253 str(resolution) + '.webm'))
254 else:
255 self.curr_file = 'webm_video'
256 self.part_filename = self.name_builder.fill('{basename}.medium.webm')
257
258
259 def copy_original(self):
260 # If we didn't transcode, then we need to keep the original
261 self.did_transcode = False
262 for each_res in self.video_config['available_resolutions']:
263 if 'webm_{}'.format(each_res) in self.entry.media_files:
264 self.did_transcode = True
265 break
266 if not self.did_transcode or self.video_config['keep_original']:
267 copy_original(
268 self.entry, self.process_filename,
269 self.name_builder.fill('{basename}{ext}'))
270
271
272 def keep_best(self):
273 """
274 If there is no original, keep the best file that we have
275 """
276 best_file = None
277 best_file_dim = (0, 0)
278 for each_res in self.video_config['available_resolutions']:
279 curr_dim = ACCEPTED_RESOLUTIONS[each_res]
280 if curr_dim[0] >= best_file_dim[0] and curr_dim[1] >= best_file_dim[1]:
281 best_file = each_res
282 best_file_dim = curr_dim
283 if not self.entry.media_files.get('best_quality'):
284 # Save the best quality file if no original?
285 if not self.entry.media_files.get('original') and \
286 self.entry.media_files.get(str(best_file)):
287 self.entry.media_files['best_quality'] = self.entry \
288 .media_files[str(best_file)]
289
290
291 def _skip_processing(self, keyname, **kwargs):
292 file_metadata = self.entry.get_file_metadata(keyname)
293
294 if not file_metadata:
295 return False
296 skip = True
297
298 if 'webm' in keyname:
299 if kwargs.get('medium_size') != file_metadata.get('medium_size'):
300 skip = False
301 elif kwargs.get('vp8_quality') != file_metadata.get('vp8_quality'):
302 skip = False
303 elif kwargs.get('vp8_threads') != file_metadata.get('vp8_threads'):
304 skip = False
305 elif kwargs.get('vorbis_quality') != \
306 file_metadata.get('vorbis_quality'):
307 skip = False
308 elif keyname == 'thumb':
309 if kwargs.get('thumb_size') != file_metadata.get('thumb_size'):
310 skip = False
311
312 return skip
313
314
315 def transcode(self, medium_size=None, vp8_quality=None, vp8_threads=None,
316 vorbis_quality=None):
317 progress_callback = ProgressCallback(self.entry)
318 tmp_dst = os.path.join(self.workbench.dir, self.part_filename)
319
320 if not medium_size:
321 medium_size = (
322 mgg.global_config['media:medium']['max_width'],
323 mgg.global_config['media:medium']['max_height'])
324 if not vp8_quality:
325 vp8_quality = self.video_config['vp8_quality']
326 if not vp8_threads:
327 vp8_threads = self.video_config['vp8_threads']
328 if not vorbis_quality:
329 vorbis_quality = self.video_config['vorbis_quality']
330
331 file_metadata = {'medium_size': medium_size,
332 'vp8_threads': vp8_threads,
333 'vp8_quality': vp8_quality,
334 'vorbis_quality': vorbis_quality}
335
336 if self._skip_processing(self.curr_file, **file_metadata):
337 return
338
339 metadata = transcoders.discover(self.process_filename)
340 orig_dst_dimensions = (metadata.get_video_streams()[0].get_width(),
341 metadata.get_video_streams()[0].get_height())
342
343 # Figure out whether or not we need to transcode this video or
344 # if we can skip it
345 if skip_transcode(metadata, medium_size):
346 _log.debug('Skipping transcoding')
347
348 # If there is an original and transcoded, delete the transcoded
349 # since it must be of lower quality then the original
350 if self.entry.media_files.get('original') and \
351 self.entry.media_files.get(self.curr_file):
352 self.entry.media_files[self.curr_file].delete()
353
354 else:
355 _log.debug('Entered transcoder')
356 video_config = (mgg.global_config['plugins']
357 ['mediagoblin.media_types.video'])
358 num_res = len(video_config['available_resolutions'])
359 default_res = video_config['default_resolution']
360 self.transcoder.transcode(self.process_filename, tmp_dst,
361 default_res, num_res,
362 vp8_quality=vp8_quality,
363 vp8_threads=vp8_threads,
364 vorbis_quality=vorbis_quality,
365 progress_callback=progress_callback,
366 dimensions=tuple(medium_size))
367 if self.transcoder.dst_data:
368 # Push transcoded video to public storage
369 _log.debug('Saving medium...')
370 store_public(self.entry, self.curr_file, tmp_dst, self.part_filename)
371 _log.debug('Saved medium')
372
373 self.entry.set_file_metadata(self.curr_file, **file_metadata)
374
375 self.did_transcode = True
376
377 def generate_thumb(self, thumb_size=None):
378 _log.debug("Enter generate_thumb()")
379 # Temporary file for the video thumbnail (cleaned up with workbench)
380 tmp_thumb = os.path.join(self.workbench.dir,
381 self.name_builder.fill(
382 '{basename}.thumbnail.jpg'))
383
384 if not thumb_size:
385 thumb_size = (mgg.global_config['media:thumb']['max_width'],)
386
387 if self._skip_processing('thumb', thumb_size=thumb_size):
388 return
389
390 # We will only use the width so that the correct scale is kept
391 transcoders.capture_thumb(
392 self.process_filename,
393 tmp_thumb,
394 thumb_size[0])
395
396 # Checking if the thumbnail was correctly created. If it was not,
397 # then just give up.
398 if not os.path.exists (tmp_thumb):
399 return
400
401 # Push the thumbnail to public storage
402 _log.debug('Saving thumbnail...')
403 store_public(self.entry, 'thumb', tmp_thumb,
404 self.name_builder.fill('{basename}.thumbnail.jpg'))
405
406 self.entry.set_file_metadata('thumb', thumb_size=thumb_size)
407
408 def store_orig_metadata(self):
409 # Extract metadata and keep a record of it
410 metadata = transcoders.discover(self.process_filename)
411
412 # metadata's stream info here is a DiscovererContainerInfo instance,
413 # it gets split into DiscovererAudioInfo and DiscovererVideoInfo;
414 # metadata itself has container-related data in tags, like video-codec
415 store_metadata(self.entry, metadata)
416 _log.debug("Stored original video metadata")
417
418
419class InitialProcessor(CommonVideoProcessor):
420 """
421 Initial processing steps for new video
422 """
423 name = "initial"
424 description = "Initial processing"
425
426 @classmethod
427 def media_is_eligible(cls, entry=None, state=None):
428 if not state:
429 state = entry.state
430 return state in (
431 "unprocessed", "failed")
432
433 @classmethod
434 def generate_parser(cls):
435 parser = argparse.ArgumentParser(
436 description=cls.description,
437 prog=cls.name)
438
439 parser.add_argument(
440 '--medium_size',
441 nargs=2,
442 metavar=('max_width', 'max_height'),
443 type=int)
444
445 parser.add_argument(
446 '--vp8_quality',
447 type=int,
448 help='Range 0..10')
449
450 parser.add_argument(
451 '--vp8_threads',
452 type=int,
453 help='0 means number_of_CPUs - 1')
454
455 parser.add_argument(
456 '--vorbis_quality',
457 type=float,
458 help='Range -0.1..1')
459
460 parser.add_argument(
461 '--thumb_size',
462 nargs=2,
463 metavar=('max_width', 'max_height'),
464 type=int)
465
466 return parser
467
468 @classmethod
469 def args_to_request(cls, args):
470 return request_from_args(
471 args, ['medium_size', 'vp8_quality', 'vp8_threads',
472 'vorbis_quality', 'thumb_size'])
473
474 def process(self, medium_size=None, vp8_threads=None, vp8_quality=None,
475 vorbis_quality=None, thumb_size=None, resolution=None):
476 self.common_setup(resolution=resolution)
477 self.store_orig_metadata()
478 self.transcode(medium_size=medium_size, vp8_quality=vp8_quality,
479 vp8_threads=vp8_threads, vorbis_quality=vorbis_quality)
480
481 self.generate_thumb(thumb_size=thumb_size)
482 self.delete_queue_file()
483
484
485class Resizer(CommonVideoProcessor):
486 """
487 Video thumbnail resizing process steps for processed media
488 """
489 name = 'resize'
490 description = 'Resize thumbnail'
491 thumb_size = 'thumb_size'
492
493 @classmethod
494 def media_is_eligible(cls, entry=None, state=None):
495 if not state:
496 state = entry.state
497 return state in 'processed'
498
499 @classmethod
500 def generate_parser(cls):
501 parser = argparse.ArgumentParser(
502 description=cls.description,
503 prog=cls.name)
504
505 parser.add_argument(
506 '--thumb_size',
507 nargs=2,
508 metavar=('max_width', 'max_height'),
509 type=int)
510
511 # Needed for gmg reprocess thumbs to work
512 parser.add_argument(
513 'file',
514 nargs='?',
515 default='thumb',
516 choices=['thumb'])
517
518 return parser
519
520 @classmethod
521 def args_to_request(cls, args):
522 return request_from_args(
523 args, ['thumb_size', 'file'])
524
525 def process(self, thumb_size=None, file=None):
526 self.common_setup()
527 self.generate_thumb(thumb_size=thumb_size)
528
529
530class Transcoder(CommonVideoProcessor):
531 """
532 Transcoding processing steps for processed video
533 """
534 name = 'transcode'
535 description = 'Re-transcode video'
536
537 @classmethod
538 def media_is_eligible(cls, entry=None, state=None):
539 if not state:
540 state = entry.state
541 return state in 'processed'
542
543 @classmethod
544 def generate_parser(cls):
545 parser = argparse.ArgumentParser(
546 description=cls.description,
547 prog=cls.name)
548
549 parser.add_argument(
550 '--medium_size',
551 nargs=2,
552 metavar=('max_width', 'max_height'),
553 type=int)
554
555 parser.add_argument(
556 '--vp8_quality',
557 type=int,
558 help='Range 0..10')
559
560 parser.add_argument(
561 '--vp8_threads',
562 type=int,
563 help='0 means number_of_CPUs - 1')
564
565 parser.add_argument(
566 '--vorbis_quality',
567 type=float,
568 help='Range -0.1..1')
569
570 return parser
571
572 @classmethod
573 def args_to_request(cls, args):
574 return request_from_args(
575 args, ['medium_size', 'vp8_threads', 'vp8_quality',
576 'vorbis_quality'])
577
578 def process(self, medium_size=None, vp8_quality=None, vp8_threads=None,
579 vorbis_quality=None):
580 self.common_setup()
581 self.transcode(medium_size=medium_size, vp8_threads=vp8_threads,
582 vp8_quality=vp8_quality, vorbis_quality=vorbis_quality)
583
584
585class VideoProcessingManager(ProcessingManager):
586 def __init__(self):
587 super(VideoProcessingManager, self).__init__()
588 self.add_processor(InitialProcessor)
589 self.add_processor(Resizer)
590 self.add_processor(Transcoder)
591
592 def workflow(self, entry, feed_url, reprocess_action, reprocess_info=None):
593
594 video_config = mgg.global_config['plugins'][MEDIA_TYPE]
595 def_res = video_config['default_resolution']
596 priority_num = len(video_config['available_resolutions']) + 1
597
598 entry.state = u'processing'
599 entry.save()
600
601 reprocess_info = reprocess_info or {}
602 if 'vp8_quality' not in reprocess_info:
603 reprocess_info['vp8_quality'] = None
604 if 'vorbis_quality' not in reprocess_info:
605 reprocess_info['vorbis_quality'] = None
606 if 'vp8_threads' not in reprocess_info:
607 reprocess_info['vp8_threads'] = None
608 if 'thumb_size' not in reprocess_info:
609 reprocess_info['thumb_size'] = None
610
611 tasks_list = [main_task.signature(args=(entry.id, def_res,
612 ACCEPTED_RESOLUTIONS[def_res]),
613 kwargs=reprocess_info, queue='default',
614 priority=priority_num, immutable=True)]
615
616 for comp_res in video_config['available_resolutions']:
617 if comp_res != def_res:
618 priority_num += -1
619 tasks_list.append(
620 complementary_task.signature(args=(entry.id, comp_res,
621 ACCEPTED_RESOLUTIONS[comp_res]),
622 kwargs=reprocess_info, queue='default',
623 priority=priority_num, immutable=True)
624 )
625
626 transcoding_tasks = group(tasks_list)
627 cleanup_task = processing_cleanup.signature(args=(entry.id,),
628 queue='default', immutable=True)
629
630 return (transcoding_tasks, cleanup_task)