1 # GNU MediaGoblin -- federated, autonomous media hosting
2 # Copyright (C) 2011, 2012 MediaGoblin contributors. See AUTHORS.
4 # This program is free software: you can redistribute it and/or modify
5 # it under the terms of the GNU Affero General Public License as published by
6 # the Free Software Foundation, either version 3 of the License, or
7 # (at your option) any later version.
9 # This program is distributed in the hope that it will be useful,
10 # but WITHOUT ANY WARRANTY; without even the implied warranty of
11 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 # GNU Affero General Public License for more details.
14 # You should have received a copy of the GNU Affero General Public License
15 # along with this program. If not, see <http://www.gnu.org/licenses/>.
22 from mediagoblin
import mg_globals
as mgg
23 from mediagoblin
.processing
import (
24 FilenameBuilder
, BaseProcessingFail
,
25 ProgressCallback
, MediaProcessor
,
26 ProcessingManager
, request_from_args
,
27 get_process_filename
, store_public
,
29 from mediagoblin
.tools
.translate
import lazy_pass_to_ugettext
as _
31 from . import transcoders
32 from .util
import skip_transcode
34 _log
= logging
.getLogger(__name__
)
35 _log
.setLevel(logging
.DEBUG
)
37 MEDIA_TYPE
= 'mediagoblin.media_types.video'
40 class VideoTranscodingFail(BaseProcessingFail
):
42 Error raised if video transcoding fails
44 general_message
= _(u
'Video transcoding failed')
47 EXCLUDED_EXTS
= ["nef", "cr2"]
49 def sniff_handler(media_file
, filename
):
50 data
= transcoders
.discover(media_file
.name
)
52 _log
.info('Sniffing {0}'.format(MEDIA_TYPE
))
53 _log
.debug('Discovered: {0}'.format(data
))
56 _log
.error('Could not discover {0}'.format(filename
))
59 if data
.get_video_streams():
65 def store_metadata(media_entry
, metadata
):
67 Store metadata from this video for this media entry.
69 stored_metadata
= dict()
70 audio_info_list
= metadata
.get_audio_streams()
72 audio_info
= audio_info_list
[0]
73 stored_metadata
['audiochannels'] = audio_info
.get_channels()
74 # video is always there
75 video_info
= metadata
.get_video_streams()[0]
76 # Let's pull out the easy, not having to be converted ones first
77 stored_metadata
= dict()
78 audio_info_list
= metadata
.get_audio_streams()
80 audio_info
= audio_info_list
[0]
81 stored_metadata
['audiochannels'] = audio_info
.get_channels()
82 # video is always there
83 video_info
= metadata
.get_video_streams()[0]
84 # Let's pull out the easy, not having to be converted ones first
85 stored_metadata
['videoheight'] = video_info
.get_height()
86 stored_metadata
['videowidth'] = video_info
.get_width()
87 stored_metadata
['videolength'] = metadata
.get_duration()
88 stored_metadata
['mimetype'] = metadata
.get_tags().get_string('mimetype')
89 # We have to convert videorate into a sequence because it's a
90 # special type normally..
91 stored_metadata
['videorate'] = [video_info
.get_framerate_num(),
92 video_info
.get_framerate_denom()]
94 if metadata
.get_tags():
95 tags_metadata
= metadata
.get_tags()
96 # we don't use *all* of these, but we know these ones are
98 # get_string returns (success, value) tuple
100 [(key
, tags_metadata
.get_string(key
)[1])
102 "application-name", "artist", "audio-codec", "bitrate",
103 "container-format", "copyright", "encoder",
104 "encoder-version", "license", "nominal-bitrate", "title",
106 if tags_metadata
.get_string(key
)[0]])
107 (success
, date
) = tags_metadata
.get_date('date')
109 tags
['date'] = "%s-%s-%s" % (
110 date
.year
, date
.month
, date
.day
)
112 # TODO: handle timezone info; gst.get_time_zone_offset +
113 # python's tzinfo should help
114 (success
, dt
) = tags_metadata
.get_date_time('datetime')
116 tags
['datetime'] = datetime
.datetime(
117 dt
.get_year(), dt
.get_month(), dt
.get_day(), dt
.get_hour(),
118 dt
.get_minute(), dt
.get_second(),
119 dt
.get_microsecond()).isoformat()
120 stored_metadata
['tags'] = tags
121 # Only save this field if there's something to save
122 if len(stored_metadata
):
123 media_entry
.media_data_init(
124 orig_metadata
=stored_metadata
)
127 class CommonVideoProcessor(MediaProcessor
):
129 Provides a base for various video processing steps
131 acceptable_files
= ['original', 'best_quality', 'webm_video']
133 def common_setup(self
):
134 self
.video_config
= mgg \
135 .global_config
['plugins'][MEDIA_TYPE
]
137 # Pull down and set up the processing file
138 self
.process_filename
= get_process_filename(
139 self
.entry
, self
.workbench
, self
.acceptable_files
)
140 self
.name_builder
= FilenameBuilder(self
.process_filename
)
142 self
.transcoder
= transcoders
.VideoTranscoder()
143 self
.did_transcode
= False
145 def copy_original(self
):
146 # If we didn't transcode, then we need to keep the original
147 if not self
.did_transcode
or \
148 (self
.video_config
['keep_original'] and self
.did_transcode
):
150 self
.entry
, self
.process_filename
,
151 self
.name_builder
.fill('{basename}{ext}'))
153 def _keep_best(self
):
155 If there is no original, keep the best file that we have
157 if not self
.entry
.media_files
.get('best_quality'):
158 # Save the best quality file if no original?
159 if not self
.entry
.media_files
.get('original') and \
160 self
.entry
.media_files
.get('webm_video'):
161 self
.entry
.media_files
['best_quality'] = self
.entry \
162 .media_files
['webm_video']
164 def _skip_processing(self
, keyname
, **kwargs
):
165 file_metadata
= self
.entry
.get_file_metadata(keyname
)
167 if not file_metadata
:
171 if keyname
== 'webm_video':
172 if kwargs
.get('medium_size') != file_metadata
.get('medium_size'):
174 elif kwargs
.get('vp8_quality') != file_metadata
.get('vp8_quality'):
176 elif kwargs
.get('vp8_threads') != file_metadata
.get('vp8_threads'):
178 elif kwargs
.get('vorbis_quality') != \
179 file_metadata
.get('vorbis_quality'):
181 elif keyname
== 'thumb':
182 if kwargs
.get('thumb_size') != file_metadata
.get('thumb_size'):
188 def transcode(self
, medium_size
=None, vp8_quality
=None, vp8_threads
=None,
189 vorbis_quality
=None):
190 progress_callback
= ProgressCallback(self
.entry
)
191 tmp_dst
= os
.path
.join(self
.workbench
.dir,
192 self
.name_builder
.fill('{basename}.medium.webm'))
196 mgg
.global_config
['media:medium']['max_width'],
197 mgg
.global_config
['media:medium']['max_height'])
199 vp8_quality
= self
.video_config
['vp8_quality']
201 vp8_threads
= self
.video_config
['vp8_threads']
202 if not vorbis_quality
:
203 vorbis_quality
= self
.video_config
['vorbis_quality']
205 file_metadata
= {'medium_size': medium_size
,
206 'vp8_threads': vp8_threads
,
207 'vp8_quality': vp8_quality
,
208 'vorbis_quality': vorbis_quality
}
210 if self
._skip
_processing
('webm_video', **file_metadata
):
213 # Extract metadata and keep a record of it
214 metadata
= transcoders
.discover(self
.process_filename
)
215 # metadata's stream info here is a DiscovererContainerInfo instance,
216 # it gets split into DiscovererAudioInfo and DiscovererVideoInfo;
217 # metadata itself has container-related data in tags, like video-codec
218 store_metadata(self
.entry
, metadata
)
220 # Figure out whether or not we need to transcode this video or
222 if skip_transcode(metadata
, medium_size
):
223 _log
.debug('Skipping transcoding')
225 dst_dimensions
= metadata
['videowidth'], metadata
['videoheight']
227 # If there is an original and transcoded, delete the transcoded
228 # since it must be of lower quality then the original
229 if self
.entry
.media_files
.get('original') and \
230 self
.entry
.media_files
.get('webm_video'):
231 self
.entry
.media_files
['webm_video'].delete()
234 self
.transcoder
.transcode(self
.process_filename
, tmp_dst
,
235 vp8_quality
=vp8_quality
,
236 vp8_threads
=vp8_threads
,
237 vorbis_quality
=vorbis_quality
,
238 progress_callback
=progress_callback
,
239 dimensions
=tuple(medium_size
))
240 video_info
= self
.transcoder
.dst_data
.get_video_streams()[0]
241 dst_dimensions
= (video_info
.get_width(), video_info
.get_height())
244 # Push transcoded video to public storage
245 _log
.debug('Saving medium...')
246 store_public(self
.entry
, 'webm_video', tmp_dst
,
247 self
.name_builder
.fill('{basename}.medium.webm'))
248 _log
.debug('Saved medium')
250 self
.entry
.set_file_metadata('webm_video', **file_metadata
)
252 self
.did_transcode
= True
254 # Save the width and height of the transcoded video
255 self
.entry
.media_data_init(
256 width
=dst_dimensions
[0],
257 height
=dst_dimensions
[1])
259 def generate_thumb(self
, thumb_size
=None):
260 # Temporary file for the video thumbnail (cleaned up with workbench)
261 tmp_thumb
= os
.path
.join(self
.workbench
.dir,
262 self
.name_builder
.fill(
263 '{basename}.thumbnail.jpg'))
266 thumb_size
= (mgg
.global_config
['media:thumb']['max_width'],)
268 if self
._skip
_processing
('thumb', thumb_size
=thumb_size
):
271 # We will only use the width so that the correct scale is kept
272 transcoders
.capture_thumb(
273 self
.process_filename
,
277 # Checking if the thumbnail was correctly created. If it was not,
279 if not os
.path
.exists (tmp_thumb
):
282 # Push the thumbnail to public storage
283 _log
.debug('Saving thumbnail...')
284 store_public(self
.entry
, 'thumb', tmp_thumb
,
285 self
.name_builder
.fill('{basename}.thumbnail.jpg'))
287 self
.entry
.set_file_metadata('thumb', thumb_size
=thumb_size
)
289 class InitialProcessor(CommonVideoProcessor
):
291 Initial processing steps for new video
294 description
= "Initial processing"
297 def media_is_eligible(cls
, entry
=None, state
=None):
301 "unprocessed", "failed")
304 def generate_parser(cls
):
305 parser
= argparse
.ArgumentParser(
306 description
=cls
.description
,
312 metavar
=('max_width', 'max_height'),
323 help='0 means number_of_CPUs - 1')
328 help='Range -0.1..1')
333 metavar
=('max_width', 'max_height'),
339 def args_to_request(cls
, args
):
340 return request_from_args(
341 args
, ['medium_size', 'vp8_quality', 'vp8_threads',
342 'vorbis_quality', 'thumb_size'])
344 def process(self
, medium_size
=None, vp8_threads
=None, vp8_quality
=None,
345 vorbis_quality
=None, thumb_size
=None):
348 self
.transcode(medium_size
=medium_size
, vp8_quality
=vp8_quality
,
349 vp8_threads
=vp8_threads
, vorbis_quality
=vorbis_quality
)
352 self
.generate_thumb(thumb_size
=thumb_size
)
353 self
.delete_queue_file()
356 class Resizer(CommonVideoProcessor
):
358 Video thumbnail resizing process steps for processed media
361 description
= 'Resize thumbnail'
362 thumb_size
= 'thumb_size'
365 def media_is_eligible(cls
, entry
=None, state
=None):
368 return state
in 'processed'
371 def generate_parser(cls
):
372 parser
= argparse
.ArgumentParser(
373 description
=cls
.description
,
379 metavar
=('max_width', 'max_height'),
382 # Needed for gmg reprocess thumbs to work
392 def args_to_request(cls
, args
):
393 return request_from_args(
394 args
, ['thumb_size', 'file'])
396 def process(self
, thumb_size
=None, file=None):
398 self
.generate_thumb(thumb_size
=thumb_size
)
401 class Transcoder(CommonVideoProcessor
):
403 Transcoding processing steps for processed video
406 description
= 'Re-transcode video'
409 def media_is_eligible(cls
, entry
=None, state
=None):
412 return state
in 'processed'
415 def generate_parser(cls
):
416 parser
= argparse
.ArgumentParser(
417 description
=cls
.description
,
423 metavar
=('max_width', 'max_height'),
434 help='0 means number_of_CPUs - 1')
439 help='Range -0.1..1')
444 def args_to_request(cls
, args
):
445 return request_from_args(
446 args
, ['medium_size', 'vp8_threads', 'vp8_quality',
449 def process(self
, medium_size
=None, vp8_quality
=None, vp8_threads
=None,
450 vorbis_quality
=None):
452 self
.transcode(medium_size
=medium_size
, vp8_threads
=vp8_threads
,
453 vp8_quality
=vp8_quality
, vorbis_quality
=vorbis_quality
)
456 class VideoProcessingManager(ProcessingManager
):
458 super(VideoProcessingManager
, self
).__init
__()
459 self
.add_processor(InitialProcessor
)
460 self
.add_processor(Resizer
)
461 self
.add_processor(Transcoder
)