If there is an original video file and we skip transcoding, delete the webm_640 file
from mediagoblin.processing import (
create_pub_filepath, FilenameBuilder,
MediaProcessor, ProcessingManager,
- get_orig_filename, copy_original,
+ get_process_filename, copy_original,
store_public, request_from_args)
from mediagoblin.media_types.ascii import asciitoimage
"""
Provides a base for various ascii processing steps
"""
+ acceptable_files = ['original', 'unicode']
+
def common_setup(self):
self.ascii_config = mgg.global_config[
'media_type:mediagoblin.media_types.ascii']
self.workbench.dir, 'convirsions')
os.mkdir(self.conversions_subdir)
- # Pull down and set up the original file
- self.orig_filename = get_orig_filename(
- self.entry, self.workbench)
- self.name_builder = FilenameBuilder(self.orig_filename)
+ # Pull down and set up the processing file
+ self.process_filename = get_process_filename(
+ self.entry, self.workbench, self.acceptable_files)
+ self.name_builder = FilenameBuilder(self.process_filename)
self.charset = None
def copy_original(self):
copy_original(
- self.entry, self.orig_filename,
+ self.entry, self.process_filename,
self.name_builder.fill('{basename}{ext}'))
def _detect_charset(self, orig_file):
orig_file.seek(0)
def store_unicode_file(self):
- with file(self.orig_filename, 'rb') as orig_file:
+ with file(self.process_filename, 'rb') as orig_file:
self._detect_charset(orig_file)
unicode_filepath = create_pub_filepath(self.entry,
'ascii-portable.txt')
self.entry.media_files['unicode'] = unicode_filepath
def generate_thumb(self, font=None, thumb_size=None):
- with file(self.orig_filename, 'rb') as orig_file:
+ with file(self.process_filename, 'rb') as orig_file:
# If no font kwarg, check config
if not font:
font = self.ascii_config.get('thumbnail_font', None)
from mediagoblin.processing import (
BadMediaFail, FilenameBuilder,
ProgressCallback, MediaProcessor, ProcessingManager,
- request_from_args, get_orig_filename,
+ request_from_args, get_process_filename,
store_public, copy_original)
from mediagoblin.media_types.audio.transcoders import (
"""
Provides a base for various audio processing steps
"""
+ acceptable_files = ['original', 'webm_audio']
def common_setup(self):
"""
self.audio_config = mgg \
.global_config['media_type:mediagoblin.media_types.audio']
- # Pull down and set up the original file
- self.orig_filename = get_orig_filename(
- self.entry, self.workbench)
- self.name_builder = FilenameBuilder(self.orig_filename)
+ # Pull down and set up the processing file
+ self.process_filename = get_process_filename(
+ self.entry, self.workbench, self.acceptable_files)
+ self.name_builder = FilenameBuilder(self.process_filename)
self.transcoder = AudioTranscoder()
self.thumbnailer = AudioThumbnailer()
def copy_original(self):
if self.audio_config['keep_original']:
copy_original(
- self.entry, self.orig_filename,
+ self.entry, self.process_filename,
self.name_builder.fill('{basename}{ext}'))
def transcode(self, quality=None):
'{basename}{ext}'))
self.transcoder.transcode(
- self.orig_filename,
+ self.process_filename,
webm_audio_tmp,
quality=quality,
progress_callback=progress_callback)
_log.info('Creating OGG source for spectrogram')
self.transcoder.transcode(
- self.orig_filename,
+ self.process_filename,
wav_tmp,
mux_string='vorbisenc quality={0} ! oggmux'.format(
self.audio_config['quality']))
from mediagoblin.processing import (
BadMediaFail, FilenameBuilder,
MediaProcessor, ProcessingManager,
- request_from_args, get_orig_filename,
+ request_from_args, get_process_filename,
store_public, copy_original)
from mediagoblin.tools.exif import exif_fix_image_orientation, \
extract_exif, clean_exif, get_gps_data, get_useful, \
"""
Provides a base for various media processing steps
"""
+ # list of acceptable file keys in order of prefrence for reprocessing
+ acceptable_files = ['original', 'medium']
+
def common_setup(self):
"""
Set up the workbench directory and pull down the original file
self.workbench.dir, 'convirsions')
os.mkdir(self.conversions_subdir)
- # Pull down and set up the original file
- self.orig_filename = get_orig_filename(
- self.entry, self.workbench)
- self.name_builder = FilenameBuilder(self.orig_filename)
+ # Pull down and set up the processing file
+ self.process_filename = get_process_filename(
+ self.entry, self.workbench, self.acceptable_files)
+ self.name_builder = FilenameBuilder(self.process_filename)
# Exif extraction
- self.exif_tags = extract_exif(self.orig_filename)
+ self.exif_tags = extract_exif(self.process_filename)
def generate_medium_if_applicable(self, size=None, quality=None,
filter=None):
if not filter:
filter = self.image_config['resize_filter']
- resize_tool(self.entry, False, 'medium', self.orig_filename,
+ resize_tool(self.entry, False, 'medium', self.process_filename,
self.name_builder.fill('{basename}.medium{ext}'),
self.conversions_subdir, self.exif_tags, quality,
filter, size)
if not filter:
filter = self.image_config['resize_filter']
- resize_tool(self.entry, True, 'thumb', self.orig_filename,
+ resize_tool(self.entry, True, 'thumb', self.process_filename,
self.name_builder.fill('{basename}.thumbnail{ext}'),
self.conversions_subdir, self.exif_tags, quality,
filter, size)
def copy_original(self):
copy_original(
- self.entry, self.orig_filename,
+ self.entry, self.process_filename,
self.name_builder.fill('{basename}{ext}'))
def extract_metadata(self):
from mediagoblin.processing import (
FilenameBuilder, BadMediaFail,
MediaProcessor, ProcessingManager,
- request_from_args, get_orig_filename,
+ request_from_args, get_process_filename,
store_public, copy_original)
from mediagoblin.tools.translate import fake_ugettext_passthrough as _
"""
Provides a base for various pdf processing steps
"""
+ acceptable_files = ['original', 'pdf']
+
def common_setup(self):
"""
Set up common pdf processing steps
"""
- # Pull down and set up the original file
- self.orig_filename = get_orig_filename(
- self.entry, self.workbench)
- self.name_builder = FilenameBuilder(self.orig_filename)
+ # Pull down and set up the processing file
+ self.process_filename = get_process_filename(
+ self.entry, self.workbench, self.acceptable_files)
+ self.name_builder = FilenameBuilder(self.process_filename)
self._set_pdf_filename()
def _set_pdf_filename(self):
if self.name_builder.ext == 'pdf':
- self.pdf_filename = self.orig_filename
+ self.pdf_filename = self.process_filename
elif self.entry.media_files.get('pdf'):
self.pdf_filename = self.workbench.localized_file(
mgg.public_store, self.entry.media_files['pdf'])
def copy_original(self):
copy_original(
- self.entry, self.orig_filename,
+ self.entry, self.process_filename,
self.name_builder.fill('{basename}{ext}'))
def generate_thumb(self, thumb_size=None):
"""
Store the pdf. If the file is not a pdf, make it a pdf
"""
- tmp_pdf = self.orig_filename
+ tmp_pdf = self.process_filename
unoconv = where('unoconv')
Popen(executable=unoconv,
- args=[unoconv, '-v', '-f', 'pdf', self.orig_filename]).wait()
+ args=[unoconv, '-v', '-f', 'pdf', self.process_filename]).wait()
if not os.path.exists(tmp_pdf):
_log.debug('unoconv failed to convert file to pdf')
from mediagoblin.processing import (
FilenameBuilder, MediaProcessor,
ProcessingManager, request_from_args,
- get_orig_filename, store_public,
+ get_process_filename, store_public,
copy_original)
from mediagoblin.media_types.stl import model_loader
"""
Provides a common base for various stl processing steps
"""
+ acceptable_files = ['original']
def common_setup(self):
- # Pull down and set up the original file
- self.orig_filename = get_orig_filename(
- self.entry, self.workbench)
- self.name_builder = FilenameBuilder(self.orig_filename)
+ # Pull down and set up the processing file
+ self.process_filename = get_process_filename(
+ self.entry, self.workbench, self.acceptable_files)
+ self.name_builder = FilenameBuilder(self.process_filename)
self._set_ext()
self._set_model()
Attempt to parse the model file and divine some useful
information about it.
"""
- with open(self.orig_filename, 'rb') as model_file:
+ with open(self.process_filename, 'rb') as model_file:
self.model = model_loader.auto_detect(model_file, self.ext)
def _set_greatest(self):
def copy_original(self):
copy_original(
- self.entry, self.orig_filename,
+ self.entry, self.process_filename,
self.name_builder.fill('{basename}{ext}'))
def _snap(self, keyname, name, camera, size, project="ORTHO"):
filename = self.name_builder.fill(name)
workbench_path = self.workbench.joinpath(filename)
shot = {
- "model_path": self.orig_filename,
+ "model_path": self.process_filename,
"model_ext": self.ext,
"camera_coord": camera,
"camera_focus": self.model.average,
FilenameBuilder, BaseProcessingFail,
ProgressCallback, MediaProcessor,
ProcessingManager, request_from_args,
- get_orig_filename, store_public,
+ get_process_filename, store_public,
copy_original)
from mediagoblin.tools.translate import lazy_pass_to_ugettext as _
"""
Provides a base for various video processing steps
"""
+ acceptable_files = ['original', 'webm_640']
def common_setup(self):
self.video_config = mgg \
.global_config['media_type:mediagoblin.media_types.video']
- # Pull down and set up the original file
- self.orig_filename = get_orig_filename(
- self.entry, self.workbench)
- self.name_builder = FilenameBuilder(self.orig_filename)
+ # Pull down and set up the processing file
+ self.process_filename = get_process_filename(
+ self.entry, self.workbench, self.acceptable_files)
+ self.name_builder = FilenameBuilder(self.process_filename)
self.transcoder = transcoders.VideoTranscoder()
self.did_transcode = False
if not self.did_transcode or \
(self.video_config['keep_original'] and self.did_transcode):
copy_original(
- self.entry, self.orig_filename,
+ self.entry, self.process_filename,
self.name_builder.fill('{basename}{ext}'))
def transcode(self, medium_size=None, vp8_quality=None, vp8_threads=None,
vorbis_quality = self.video_config['vorbis_quality']
# Extract metadata and keep a record of it
- metadata = self.transcoder.discover(self.orig_filename)
+ metadata = self.transcoder.discover(self.process_filename)
store_metadata(self.entry, metadata)
# Figure out whether or not we need to transcode this video or
dst_dimensions = metadata['videowidth'], metadata['videoheight']
+ # If there is an original and transcoded, delete the transcoded
+ # since it must be of lower quality then the original
+ if self.entry.media_files.get('original') and \
+ self.entry.media_files.get('webm_640'):
+ self.entry.media_files['webm_640'].delete()
+
else:
- self.transcoder.transcode(self.orig_filename, tmp_dst,
+ self.transcoder.transcode(self.process_filename, tmp_dst,
vp8_quality=vp8_quality,
vp8_threads=vp8_threads,
vorbis_quality=vorbis_quality,
mgg.global_config['media:thumb']['max_height'])
transcoders.VideoThumbnailerMarkII(
- self.orig_filename,
+ self.process_filename,
tmp_thumb,
thumb_size[0],
thumb_size[1])
u'fail_metadata': {}})
-def get_orig_filename(entry, workbench):
+def get_process_filename(entry, workbench, acceptable_files):
"""
- Get the a filename for the original, on local storage
+ Try and get the queued file if available, otherwise return the first file
+ in the acceptable_files that we have.
- If the media entry has a queued_media_file, use that, otherwise
- use the original.
-
- In the future, this will return the highest quality file available
- if neither the original or queued file are available by checking
- some ordered list of preferred keys.
+ If no acceptable_files, raise ProcessFileNotFound
"""
if entry.queued_media_file:
- orig_filepath = entry.queued_media_file
+ filepath = entry.queued_media_file
storage = mgg.queue_store
else:
- orig_filepath = entry.media_files['original']
- storage = mgg.public_store
+ for keyname in acceptable_files:
+ if entry.media_files.get(keyname):
+ filepath = entry.media_files[keyname]
+ storage = mgg.public_store
+ break
+
+ if not filepath:
+ raise ProcessFileNotFound()
- orig_filename = workbench.localized_file(
- storage, orig_filepath,
+ filename = workbench.localized_file(
+ storage, filepath,
'source')
- return orig_filename
+ if not os.path.exists(filename):
+ raise ProcessFileNotFound()
+
+ return filename
def store_public(entry, keyname, local_file, target_name=None,
Error that should be raised when copying to public store fails
"""
general_message = _('Copying to public storage failed.')
+
+
+class ProcessFileNotFound(BaseProcessingFail):
+ """
+ Error that should be raised when an acceptable file for processing
+ is not found.
+ """
+ general_message = _(u'An acceptable processing file was not found')