1 # GNU MediaGoblin -- federated, autonomous media hosting
2 # Copyright (C) 2011, 2012 MediaGoblin contributors. See AUTHORS.
4 # This program is free software: you can redistribute it and/or modify
5 # it under the terms of the GNU Affero General Public License as published by
6 # the Free Software Foundation, either version 3 of the License, or
7 # (at your option) any later version.
9 # This program is distributed in the hope that it will be useful,
10 # but WITHOUT ANY WARRANTY; without even the implied warranty of
11 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 # GNU Affero General Public License for more details.
14 # You should have received a copy of the GNU Affero General Public License
15 # along with this program. If not, see <http://www.gnu.org/licenses/>.
22 from mediagoblin
import mg_globals
as mgg
23 from mediagoblin
.processing
import (
24 FilenameBuilder
, BaseProcessingFail
,
25 ProgressCallback
, MediaProcessor
,
26 ProcessingManager
, request_from_args
,
27 get_process_filename
, store_public
,
29 from mediagoblin
.tools
.translate
import lazy_pass_to_ugettext
as _
31 from . import transcoders
32 from .util
import skip_transcode
34 _log
= logging
.getLogger(__name__
)
35 _log
.setLevel(logging
.DEBUG
)
37 MEDIA_TYPE
= 'mediagoblin.media_types.video'
40 class VideoTranscodingFail(BaseProcessingFail
):
42 Error raised if video transcoding fails
44 general_message
= _(u
'Video transcoding failed')
47 EXCLUDED_EXTS
= ["nef", "cr2"]
49 def sniff_handler(media_file
, filename
):
50 name
, ext
= os
.path
.splitext(filename
)
51 clean_ext
= ext
.lower()[1:]
53 if clean_ext
in EXCLUDED_EXTS
:
54 # We don't handle this filetype, though gstreamer might think we can
57 transcoder
= transcoders
.VideoTranscoder()
58 data
= transcoder
.discover(media_file
.name
)
60 _log
.info('Sniffing {0}'.format(MEDIA_TYPE
))
61 _log
.debug('Discovered: {0}'.format(data
))
64 _log
.error('Could not discover {0}'.format(filename
))
67 if data
['is_video'] is True:
73 def store_metadata(media_entry
, metadata
):
75 Store metadata from this video for this media entry.
77 stored_metadata
= dict()
78 audio_info_list
= metadata
.get_audio_streams()
80 audio_info
= audio_info_list
[0]
81 stored_metadata
['audiochannels'] = audio_info
.get_channels()
82 # video is always there
83 video_info
= metadata
.get_video_streams()[0]
84 # Let's pull out the easy, not having to be converted ones first
85 stored_metadata
= dict(
88 "videoheight", "videolength", "videowidth",
89 "audiorate", "audiolength", "audiochannels", "audiowidth",
93 # We have to convert videorate into a sequence because it's a
94 # special type normally..
96 if "videorate" in metadata
:
97 videorate
= metadata
["videorate"]
98 stored_metadata
["videorate"] = [videorate
.num
, videorate
.denom
]
100 # Also make a whitelist conversion of the tags.
101 if "tags" in metadata
:
102 tags_metadata
= metadata
['tags']
104 # we don't use *all* of these, but we know these ones are
107 [(key
, tags_metadata
[key
])
109 "application-name", "artist", "audio-codec", "bitrate",
110 "container-format", "copyright", "encoder",
111 "encoder-version", "license", "nominal-bitrate", "title",
113 if key
in tags_metadata
])
114 if 'date' in tags_metadata
:
115 date
= tags_metadata
['date']
116 tags
['date'] = "%s-%s-%s" % (
117 date
.year
, date
.month
, date
.day
)
119 # TODO: handle timezone info; gst.get_time_zone_offset +
120 # python's tzinfo should help
121 if 'datetime' in tags_metadata
:
122 dt
= tags_metadata
['datetime']
123 tags
['datetime'] = datetime
.datetime(
124 dt
.get_year(), dt
.get_month(), dt
.get_day(), dt
.get_hour(),
125 dt
.get_minute(), dt
.get_second(),
126 dt
.get_microsecond()).isoformat()
128 stored_metadata
['tags'] = tags
130 # Only save this field if there's something to save
131 if len(stored_metadata
):
132 media_entry
.media_data_init(
133 orig_metadata
=stored_metadata
)
136 class CommonVideoProcessor(MediaProcessor
):
138 Provides a base for various video processing steps
140 acceptable_files
= ['original', 'best_quality', 'webm_video']
142 def common_setup(self
):
143 self
.video_config
= mgg \
144 .global_config
['plugins'][MEDIA_TYPE
]
146 # Pull down and set up the processing file
147 self
.process_filename
= get_process_filename(
148 self
.entry
, self
.workbench
, self
.acceptable_files
)
149 self
.name_builder
= FilenameBuilder(self
.process_filename
)
151 self
.transcoder
= transcoders
.VideoTranscoder()
152 self
.did_transcode
= False
154 def copy_original(self
):
155 # If we didn't transcode, then we need to keep the original
156 if not self
.did_transcode
or \
157 (self
.video_config
['keep_original'] and self
.did_transcode
):
159 self
.entry
, self
.process_filename
,
160 self
.name_builder
.fill('{basename}{ext}'))
162 def _keep_best(self
):
164 If there is no original, keep the best file that we have
166 if not self
.entry
.media_files
.get('best_quality'):
167 # Save the best quality file if no original?
168 if not self
.entry
.media_files
.get('original') and \
169 self
.entry
.media_files
.get('webm_video'):
170 self
.entry
.media_files
['best_quality'] = self
.entry \
171 .media_files
['webm_video']
173 def _skip_processing(self
, keyname
, **kwargs
):
174 file_metadata
= self
.entry
.get_file_metadata(keyname
)
176 if not file_metadata
:
180 if keyname
== 'webm_video':
181 if kwargs
.get('medium_size') != file_metadata
.get('medium_size'):
183 elif kwargs
.get('vp8_quality') != file_metadata
.get('vp8_quality'):
185 elif kwargs
.get('vp8_threads') != file_metadata
.get('vp8_threads'):
187 elif kwargs
.get('vorbis_quality') != \
188 file_metadata
.get('vorbis_quality'):
190 elif keyname
== 'thumb':
191 if kwargs
.get('thumb_size') != file_metadata
.get('thumb_size'):
197 def transcode(self
, medium_size
=None, vp8_quality
=None, vp8_threads
=None,
198 vorbis_quality
=None):
199 progress_callback
= ProgressCallback(self
.entry
)
200 tmp_dst
= os
.path
.join(self
.workbench
.dir,
201 self
.name_builder
.fill('{basename}.medium.webm'))
205 mgg
.global_config
['media:medium']['max_width'],
206 mgg
.global_config
['media:medium']['max_height'])
208 vp8_quality
= self
.video_config
['vp8_quality']
210 vp8_threads
= self
.video_config
['vp8_threads']
211 if not vorbis_quality
:
212 vorbis_quality
= self
.video_config
['vorbis_quality']
214 file_metadata
= {'medium_size': medium_size
,
215 'vp8_threads': vp8_threads
,
216 'vp8_quality': vp8_quality
,
217 'vorbis_quality': vorbis_quality
}
219 if self
._skip
_processing
('webm_video', **file_metadata
):
222 # Extract metadata and keep a record of it
223 metadata
= self
.transcoder
.discover(self
.process_filename
)
224 store_metadata(self
.entry
, metadata
)
226 # Figure out whether or not we need to transcode this video or
228 if skip_transcode(metadata
, medium_size
):
229 _log
.debug('Skipping transcoding')
231 dst_dimensions
= metadata
['videowidth'], metadata
['videoheight']
233 # If there is an original and transcoded, delete the transcoded
234 # since it must be of lower quality then the original
235 if self
.entry
.media_files
.get('original') and \
236 self
.entry
.media_files
.get('webm_video'):
237 self
.entry
.media_files
['webm_video'].delete()
240 self
.transcoder
.transcode(self
.process_filename
, tmp_dst
,
241 vp8_quality
=vp8_quality
,
242 vp8_threads
=vp8_threads
,
243 vorbis_quality
=vorbis_quality
,
244 progress_callback
=progress_callback
,
245 dimensions
=tuple(medium_size
))
247 dst_dimensions
= self
.transcoder
.dst_data
.videowidth
,\
248 self
.transcoder
.dst_data
.videoheight
252 # Push transcoded video to public storage
253 _log
.debug('Saving medium...')
254 store_public(self
.entry
, 'webm_video', tmp_dst
,
255 self
.name_builder
.fill('{basename}.medium.webm'))
256 _log
.debug('Saved medium')
258 self
.entry
.set_file_metadata('webm_video', **file_metadata
)
260 self
.did_transcode
= True
262 # Save the width and height of the transcoded video
263 self
.entry
.media_data_init(
264 width
=dst_dimensions
[0],
265 height
=dst_dimensions
[1])
267 def generate_thumb(self
, thumb_size
=None):
268 # Temporary file for the video thumbnail (cleaned up with workbench)
269 tmp_thumb
= os
.path
.join(self
.workbench
.dir,
270 self
.name_builder
.fill(
271 '{basename}.thumbnail.jpg'))
274 thumb_size
= (mgg
.global_config
['media:thumb']['max_width'],)
276 if self
._skip
_processing
('thumb', thumb_size
=thumb_size
):
279 # We will only use the width so that the correct scale is kept
280 transcoders
.capture_thumb(
281 self
.process_filename
,
285 # Checking if the thumbnail was correctly created. If it was not,
287 if not os
.path
.exists (tmp_thumb
):
290 # Push the thumbnail to public storage
291 _log
.debug('Saving thumbnail...')
292 store_public(self
.entry
, 'thumb', tmp_thumb
,
293 self
.name_builder
.fill('{basename}.thumbnail.jpg'))
295 self
.entry
.set_file_metadata('thumb', thumb_size
=thumb_size
)
297 class InitialProcessor(CommonVideoProcessor
):
299 Initial processing steps for new video
302 description
= "Initial processing"
305 def media_is_eligible(cls
, entry
=None, state
=None):
309 "unprocessed", "failed")
312 def generate_parser(cls
):
313 parser
= argparse
.ArgumentParser(
314 description
=cls
.description
,
320 metavar
=('max_width', 'max_height'),
331 help='0 means number_of_CPUs - 1')
336 help='Range -0.1..1')
341 metavar
=('max_width', 'max_height'),
347 def args_to_request(cls
, args
):
348 return request_from_args(
349 args
, ['medium_size', 'vp8_quality', 'vp8_threads',
350 'vorbis_quality', 'thumb_size'])
352 def process(self
, medium_size
=None, vp8_threads
=None, vp8_quality
=None,
353 vorbis_quality
=None, thumb_size
=None):
356 self
.transcode(medium_size
=medium_size
, vp8_quality
=vp8_quality
,
357 vp8_threads
=vp8_threads
, vorbis_quality
=vorbis_quality
)
360 self
.generate_thumb(thumb_size
=thumb_size
)
361 self
.delete_queue_file()
364 class Resizer(CommonVideoProcessor
):
366 Video thumbnail resizing process steps for processed media
369 description
= 'Resize thumbnail'
370 thumb_size
= 'thumb_size'
373 def media_is_eligible(cls
, entry
=None, state
=None):
376 return state
in 'processed'
379 def generate_parser(cls
):
380 parser
= argparse
.ArgumentParser(
381 description
=cls
.description
,
387 metavar
=('max_width', 'max_height'),
390 # Needed for gmg reprocess thumbs to work
400 def args_to_request(cls
, args
):
401 return request_from_args(
402 args
, ['thumb_size', 'file'])
404 def process(self
, thumb_size
=None, file=None):
406 self
.generate_thumb(thumb_size
=thumb_size
)
409 class Transcoder(CommonVideoProcessor
):
411 Transcoding processing steps for processed video
414 description
= 'Re-transcode video'
417 def media_is_eligible(cls
, entry
=None, state
=None):
420 return state
in 'processed'
423 def generate_parser(cls
):
424 parser
= argparse
.ArgumentParser(
425 description
=cls
.description
,
431 metavar
=('max_width', 'max_height'),
442 help='0 means number_of_CPUs - 1')
447 help='Range -0.1..1')
452 def args_to_request(cls
, args
):
453 return request_from_args(
454 args
, ['medium_size', 'vp8_threads', 'vp8_quality',
457 def process(self
, medium_size
=None, vp8_quality
=None, vp8_threads
=None,
458 vorbis_quality
=None):
460 self
.transcode(medium_size
=medium_size
, vp8_threads
=vp8_threads
,
461 vp8_quality
=vp8_quality
, vorbis_quality
=vorbis_quality
)
464 class VideoProcessingManager(ProcessingManager
):
466 super(VideoProcessingManager
, self
).__init
__()
467 self
.add_processor(InitialProcessor
)
468 self
.add_processor(Resizer
)
469 self
.add_processor(Transcoder
)