import json
import logging
-from os.path import splitext
-from werkzeug.exceptions import BadRequest, Forbidden
+from werkzeug.exceptions import BadRequest
from werkzeug.wrappers import Response
+from mediagoblin.tools.translate import pass_to_ugettext as _
from mediagoblin.tools.response import json_response
from mediagoblin.decorators import require_active_login
from mediagoblin.meddleware.csrf import csrf_exempt
-from mediagoblin.media_types import sniff_media
+from mediagoblin.media_types import \
+ InvalidFileType, FileTypeNotSupported
from mediagoblin.plugins.api.tools import api_auth, get_entry_serializable
-from mediagoblin.submit.lib import check_file_field, prepare_queue_task, \
- run_process_media, new_upload_entry
+from mediagoblin.submit.lib import \
+ check_file_field, submit_media, get_upload_file_limits, \
+ FileUploadLimit, UserUploadLimit, UserPastUploadLimit
_log = logging.getLogger(__name__)
_log.debug('File field not found')
raise BadRequest()
- media_file = request.files['file']
-
- media_type, media_manager = sniff_media(media_file)
-
- entry = new_upload_entry(request.user)
- entry.media_type = unicode(media_type)
- entry.title = unicode(request.form.get('title')
- or splitext(media_file.filename)[0])
-
- entry.description = unicode(request.form.get('description'))
- entry.license = unicode(request.form.get('license', ''))
-
- entry.generate_slug()
-
- # queue appropriately
- queue_file = prepare_queue_task(request.app, entry, media_file.filename)
-
- with queue_file:
- queue_file.write(request.files['file'].stream.read())
-
- # Save now so we have this data before kicking off processing
- entry.save()
-
- if request.form.get('callback_url'):
- metadata = request.db.ProcessingMetaData()
- metadata.media_entry = entry
- metadata.callback_url = unicode(request.form['callback_url'])
- metadata.save()
-
- # Pass off to processing
- #
- # (... don't change entry after this point to avoid race
- # conditions with changes to the document via processing code)
- feed_url = request.urlgen(
- 'mediagoblin.user_pages.atom_feed',
- qualified=True, user=request.user.username)
- run_process_media(entry, feed_url)
-
- return json_response(get_entry_serializable(entry, request.urlgen))
+ upload_limit, max_file_size = get_upload_file_limits(request.user)
+
+ callback_url = request.form.get('callback_url')
+ if callback_url:
+ callback_url = unicode(callback_url)
+ try:
+ entry = submit_media(
+ mg_app=request.app, user=request.user,
+ submitted_file=request.files['file'],
+ filename=request.files['file'].filename,
+ title=unicode(request.form.get('title')),
+ description=unicode(request.form.get('description')),
+ license=unicode(request.form.get('license', '')),
+ upload_limit=upload_limit, max_file_size=max_file_size,
+ callback_url=callback_url)
+
+ return json_response(get_entry_serializable(entry, request.urlgen))
+
+ # Handle upload limit issues
+ except FileUploadLimit:
+ raise BadRequest(
+ _(u'Sorry, the file size is too big.'))
+ except UserUploadLimit:
+ raise BadRequest(
+ _('Sorry, uploading this file will put you over your'
+ ' upload limit.'))
+ except UserPastUploadLimit:
+ raise BadRequest(
+ _('Sorry, you have reached your upload limit.'))
+
+ except Exception as e:
+ '''
+ This section is intended to catch exceptions raised in
+ mediagoblin.media_types
+ '''
+ if isinstance(e, InvalidFileType) or \
+ isinstance(e, FileTypeNotSupported):
+ raise BadRequest(unicode(e))
+ else:
+ raise
@api_auth
from mediagoblin import mg_globals
from mediagoblin.tools.text import convert_to_tag_list_of_dicts
-from mediagoblin.db.models import MediaEntry
+from mediagoblin.db.models import MediaEntry, ProcessingMetaData
from mediagoblin.processing import mark_entry_failed
from mediagoblin.processing.task import ProcessMedia
from mediagoblin.notifications import add_comment_subscription
title=None, description=None,
license=None, tags_string=u"",
upload_limit=None, max_file_size=None,
+ callback_url=None,
# If provided we'll do the feed_url update, otherwise ignore
- urlgen=None):
+ urlgen=None,):
"""
Args:
- mg_app: The MediaGoblinApp instantiated for this process
with this entry
- upload_limit: size in megabytes that's the per-user upload limit
- max_file_size: maximum size each file can be that's uploaded
+ - callback_url: possible post-hook to call after submission
- urlgen: if provided, used to do the feed_url update
"""
if upload_limit and user.uploaded >= upload_limit:
# Save now so we have this data before kicking off processing
entry.save()
+ # Various "submit to stuff" things, callbackurl and this silly urlgen
+ # thing
+ if callback_url:
+ metadata = ProcessingMetaData()
+ metadata.media_entry = entry
+ metadata.callback_url = callback_url
+ metadata.save()
+
if urlgen:
feed_url = urlgen(
'mediagoblin.user_pages.atom_feed',