# GNU MediaGoblin -- federated, autonomous media hosting
-# Copyright (C) 2011 MediaGoblin contributors. See AUTHORS.
+# Copyright (C) 2011, 2012 MediaGoblin contributors. See AUTHORS.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
-import datetime, uuid
-
-from mongokit import Document
-
-from mediagoblin import util
-from mediagoblin.auth import lib as auth_lib
-from mediagoblin import mg_globals
-from mediagoblin.db import migrations
-from mediagoblin.db.util import ASCENDING, DESCENDING, ObjectId
-from mediagoblin.util import Pagination
-from mediagoblin.util import DISPLAY_IMAGE_FETCHING_ORDER
-from mediagoblin.tools import url
-
-###################
-# Custom validators
-###################
+"""
+TODO: indexes on foreignkeys, where useful.
+"""
+
+import logging
+import datetime
+import sys
+
+from sqlalchemy import Column, Integer, Unicode, UnicodeText, DateTime, \
+ Boolean, ForeignKey, UniqueConstraint, PrimaryKeyConstraint, \
+ SmallInteger
+from sqlalchemy.orm import relationship, backref
+from sqlalchemy.orm.collections import attribute_mapped_collection
+from sqlalchemy.sql.expression import desc
+from sqlalchemy.ext.associationproxy import association_proxy
+from sqlalchemy.util import memoized_property
+
+from mediagoblin.db.extratypes import PathTupleWithSlashes, JSONEncoded
+from mediagoblin.db.base import Base, DictReadAttrProxy, Session
+from mediagoblin.db.mixin import UserMixin, MediaEntryMixin, MediaCommentMixin, CollectionMixin, CollectionItemMixin
+from mediagoblin.tools.files import delete_media_files
+
+# It's actually kind of annoying how sqlalchemy-migrate does this, if
+# I understand it right, but whatever. Anyway, don't remove this :P
+#
+# We could do migration calls more manually instead of relying on
+# this import-based meddling...
+from migrate import changeset
-########
-# Models
-########
+_log = logging.getLogger(__name__)
-class User(Document):
+class User(Base, UserMixin):
"""
- A user of MediaGoblin.
-
- Structure:
- - username: The username of this user, should be unique to this instance.
- - email: Email address of this user
- - created: When the user was created
- - plugin_data: a mapping of extra plugin information for this User.
- Nothing uses this yet as we don't have plugins, but someday we
- might... :)
- - pw_hash: Hashed version of user's password.
- - email_verified: Whether or not the user has verified their email or not.
- Most parts of the site are disabled for users who haven't yet.
- - status: whether or not the user is active, etc. Currently only has two
- values, 'needs_email_verification' or 'active'. (In the future, maybe
- we'll change this to a boolean with a key of 'active' and have a
- separate field for a reason the user's been disabled if that's
- appropriate... email_verified is already separate, after all.)
- - verification_key: If the user is awaiting email verification, the user
- will have to provide this key (which will be encoded in the presented
- URL) in order to confirm their email as active.
- - is_admin: Whether or not this user is an administrator or not.
- - url: this user's personal webpage/website, if appropriate.
- - bio: biography of this user (plaintext, in markdown)
- - bio_html: biography of the user converted to proper HTML.
+ TODO: We should consider moving some rarely used fields
+ into some sort of "shadow" table.
"""
- __collection__ = 'users'
-
- structure = {
- 'username': unicode,
- 'email': unicode,
- 'created': datetime.datetime,
- 'plugin_data': dict, # plugins can dump stuff here.
- 'pw_hash': unicode,
- 'email_verified': bool,
- 'status': unicode,
- 'verification_key': unicode,
- 'is_admin': bool,
- 'url' : unicode,
- 'bio' : unicode, # May contain markdown
- 'bio_html': unicode, # May contain plaintext, or HTML
- 'fp_verification_key': unicode, # forgotten password verification key
- 'fp_token_expire': datetime.datetime
- }
-
- required_fields = ['username', 'created', 'pw_hash', 'email']
-
- default_values = {
- 'created': datetime.datetime.utcnow,
- 'email_verified': False,
- 'status': u'needs_email_verification',
- 'verification_key': lambda: unicode(uuid.uuid4()),
- 'is_admin': False}
-
- def check_login(self, password):
- """
- See if a user can login with this password
- """
- return auth_lib.bcrypt_check_password(
- password, self['pw_hash'])
-
-
-class MediaEntry(Document):
+ __tablename__ = "core__users"
+
+ id = Column(Integer, primary_key=True)
+ username = Column(Unicode, nullable=False, unique=True)
+ email = Column(Unicode, nullable=False)
+ created = Column(DateTime, nullable=False, default=datetime.datetime.now)
+ pw_hash = Column(Unicode, nullable=False)
+ email_verified = Column(Boolean, default=False)
+ status = Column(Unicode, default=u"needs_email_verification", nullable=False)
+ # Intented to be nullable=False, but migrations would not work for it
+ # set to nullable=True implicitly.
+ wants_comment_notification = Column(Boolean, default=True)
+ verification_key = Column(Unicode)
+ is_admin = Column(Boolean, default=False, nullable=False)
+ url = Column(Unicode)
+ bio = Column(UnicodeText) # ??
+ fp_verification_key = Column(Unicode)
+ fp_token_expire = Column(DateTime)
+
+ ## TODO
+ # plugin data would be in a separate model
+
+ def __repr__(self):
+ return '<{0} #{1} {2} {3} "{4}">'.format(
+ self.__class__.__name__,
+ self.id,
+ 'verified' if self.email_verified else 'non-verified',
+ 'admin' if self.is_admin else 'user',
+ self.username)
+
+ def delete(self, **kwargs):
+ """Deletes a User and all related entries/comments/files/..."""
+ # Delete this user's Collections and all contained CollectionItems
+ for collection in self.collections:
+ collection.delete(commit=False)
+
+ media_entries = MediaEntry.query.filter(MediaEntry.uploader == self.id)
+ for media in media_entries:
+ # TODO: Make sure that "MediaEntry.delete()" also deletes
+ # all related files/Comments
+ media.delete(del_orphan_tags=False, commit=False)
+
+ # Delete now unused tags
+ # TODO: import here due to cyclic imports!!! This cries for refactoring
+ from mediagoblin.db.util import clean_orphan_tags
+ clean_orphan_tags(commit=False)
+
+ # Delete user, pass through commit=False/True in kwargs
+ super(User, self).delete(**kwargs)
+ _log.info('Deleted user "{0}" account'.format(self.username))
+
+
+class MediaEntry(Base, MediaEntryMixin):
"""
- Record of a piece of media.
-
- Structure:
- - uploader: A reference to a User who uploaded this.
-
- - title: Title of this work
-
- - slug: A normalized "slug" which can be used as part of a URL to retrieve
- this work, such as 'my-works-name-in-slug-form' may be viewable by
- 'http://mg.example.org/u/username/m/my-works-name-in-slug-form/'
- Note that since URLs are constructed this way, slugs must be unique
- per-uploader. (An index is provided to enforce that but code should be
- written on the python side to ensure this as well.)
-
- - created: Date and time of when this piece of work was uploaded.
-
- - description: Uploader-set description of this work. This can be marked
- up with MarkDown for slight fanciness (links, boldness, italics,
- paragraphs...)
-
- - description_html: Rendered version of the description, run through
- Markdown and cleaned with our cleaning tool.
-
- - media_type: What type of media is this? Currently we only support
- 'image' ;)
-
- - media_data: Extra information that's media-format-dependent.
- For example, images might contain some EXIF data that's not appropriate
- to other formats. You might store it like:
-
- mediaentry['media_data']['exif'] = {
- 'manufacturer': 'CASIO',
- 'model': 'QV-4000',
- 'exposure_time': .659}
-
- Alternately for video you might store:
-
- # play length in seconds
- mediaentry['media_data']['play_length'] = 340
-
- ... so what's appropriate here really depends on the media type.
-
- - plugin_data: a mapping of extra plugin information for this User.
- Nothing uses this yet as we don't have plugins, but someday we
- might... :)
+ TODO: Consider fetching the media_files using join
+ """
+ __tablename__ = "core__media_entries"
+
+ id = Column(Integer, primary_key=True)
+ uploader = Column(Integer, ForeignKey(User.id), nullable=False, index=True)
+ title = Column(Unicode, nullable=False)
+ slug = Column(Unicode)
+ created = Column(DateTime, nullable=False, default=datetime.datetime.now,
+ index=True)
+ description = Column(UnicodeText) # ??
+ media_type = Column(Unicode, nullable=False)
+ state = Column(Unicode, default=u'unprocessed', nullable=False)
+ # or use sqlalchemy.types.Enum?
+ license = Column(Unicode)
+ collected = Column(Integer, default=0)
+
+ fail_error = Column(Unicode)
+ fail_metadata = Column(JSONEncoded)
+
+ transcoding_progress = Column(SmallInteger)
+
+ queued_media_file = Column(PathTupleWithSlashes)
+
+ queued_task_id = Column(Unicode)
+
+ __table_args__ = (
+ UniqueConstraint('uploader', 'slug'),
+ {})
+
+ get_uploader = relationship(User)
+
+ media_files_helper = relationship("MediaFile",
+ collection_class=attribute_mapped_collection("name"),
+ cascade="all, delete-orphan"
+ )
+ media_files = association_proxy('media_files_helper', 'file_path',
+ creator=lambda k, v: MediaFile(name=k, file_path=v)
+ )
+
+ attachment_files_helper = relationship("MediaAttachmentFile",
+ order_by="MediaAttachmentFile.created"
+ )
+ attachment_files = association_proxy("attachment_files_helper", "dict_view",
+ creator=lambda v: MediaAttachmentFile(
+ name=v["name"], filepath=v["filepath"])
+ )
+
+ tags_helper = relationship("MediaTag",
+ cascade="all, delete-orphan" # should be automatically deleted
+ )
+ tags = association_proxy("tags_helper", "dict_view",
+ creator=lambda v: MediaTag(name=v["name"], slug=v["slug"])
+ )
+
+ collections_helper = relationship("CollectionItem",
+ cascade="all, delete-orphan"
+ )
+ collections = association_proxy("collections_helper", "in_collection")
+
+ ## TODO
+ # media_data
+ # fail_error
+
+ def get_comments(self, ascending=False):
+ order_col = MediaComment.created
+ if not ascending:
+ order_col = desc(order_col)
+ return MediaComment.query.filter_by(
+ media_entry=self.id).order_by(order_col)
- - tags: A list of tags. Each tag is stored as a dictionary that has a key
- for the actual name and the normalized name-as-slug, so ultimately this
- looks like:
- [{'name': 'Gully Gardens',
- 'slug': 'gully-gardens'},
- {'name': 'Castle Adventure Time?!",
- 'slug': 'castle-adventure-time'}]
+ def url_to_prev(self, urlgen):
+ """get the next 'newer' entry by this user"""
+ media = MediaEntry.query.filter(
+ (MediaEntry.uploader == self.uploader)
+ & (MediaEntry.state == u'processed')
+ & (MediaEntry.id > self.id)).order_by(MediaEntry.id).first()
- - state: What's the state of this file? Active, inactive, disabled, etc...
- But really for now there are only two states:
- "unprocessed": uploaded but needs to go through processing for display
- "processed": processed and able to be displayed
+ if media is not None:
+ return media.url_for_self(urlgen)
- - queued_media_file: storage interface style filepath describing a file
- queued for processing. This is stored in the mg_globals.queue_store
- storage system.
+ def url_to_next(self, urlgen):
+ """get the next 'older' entry by this user"""
+ media = MediaEntry.query.filter(
+ (MediaEntry.uploader == self.uploader)
+ & (MediaEntry.state == u'processed')
+ & (MediaEntry.id < self.id)).order_by(desc(MediaEntry.id)).first()
- - queued_task_id: celery task id. Use this to fetch the task state.
+ if media is not None:
+ return media.url_for_self(urlgen)
- - media_files: Files relevant to this that have actually been processed
- and are available for various types of display. Stored like:
- {'thumb': ['dir1', 'dir2', 'pic.png'}
+ #@memoized_property
+ @property
+ def media_data(self):
+ session = Session()
- - attachment_files: A list of "attachment" files, ones that aren't
- critical to this piece of media but may be usefully relevant to people
- viewing the work. (currently unused.)
+ return session.query(self.media_data_table).filter_by(
+ media_entry=self.id).first()
- - fail_error: path to the exception raised
- - fail_metadata:
- """
- __collection__ = 'media_entries'
-
- structure = {
- 'uploader': ObjectId,
- 'title': unicode,
- 'slug': unicode,
- 'created': datetime.datetime,
- 'description': unicode, # May contain markdown/up
- 'description_html': unicode, # May contain plaintext, or HTML
- 'media_type': unicode,
- 'media_data': dict, # extra data relevant to this media_type
- 'plugin_data': dict, # plugins can dump stuff here.
- 'tags': [dict],
- 'state': unicode,
-
- # For now let's assume there can only be one main file queued
- # at a time
- 'queued_media_file': [unicode],
- 'queued_task_id': unicode,
-
- # A dictionary of logical names to filepaths
- 'media_files': dict,
-
- # The following should be lists of lists, in appropriate file
- # record form
- 'attachment_files': list,
-
- # If things go badly in processing things, we'll store that
- # data here
- 'fail_error': unicode,
- 'fail_metadata': dict}
-
- required_fields = [
- 'uploader', 'created', 'media_type', 'slug']
-
- default_values = {
- 'created': datetime.datetime.utcnow,
- 'state': u'unprocessed'}
-
- def get_comments(self):
- return self.db.MediaComment.find({
- 'media_entry': self['_id']}).sort('created', DESCENDING)
-
- def get_display_media(self, media_map, fetch_order=DISPLAY_IMAGE_FETCHING_ORDER):
+ def media_data_init(self, **kwargs):
"""
- Find the best media for display.
-
- Args:
- - media_map: a dict like
- {u'image_size': [u'dir1', u'dir2', u'image.jpg']}
- - fetch_order: the order we should try fetching images in
-
- Returns:
- (media_size, media_path)
+ Initialize or update the contents of a media entry's media_data row
"""
- media_sizes = media_map.keys()
+ session = Session()
+
+ media_data = session.query(self.media_data_table).filter_by(
+ media_entry=self.id).first()
+
+ # No media data, so actually add a new one
+ if media_data is None:
+ media_data = self.media_data_table(
+ media_entry=self.id,
+ **kwargs)
+ session.add(media_data)
+ # Update old media data
+ else:
+ for field, value in kwargs.iteritems():
+ setattr(media_data, field, value)
+
+ @memoized_property
+ def media_data_table(self):
+ # TODO: memoize this
+ models_module = self.media_type + '.models'
+ __import__(models_module)
+ return sys.modules[models_module].DATA_MODEL
+
+ def __repr__(self):
+ safe_title = self.title.encode('ascii', 'replace')
+
+ return '<{classname} {id}: {title}>'.format(
+ classname=self.__class__.__name__,
+ id=self.id,
+ title=safe_title)
+
+ def delete(self, del_orphan_tags=True, **kwargs):
+ """Delete MediaEntry and all related files/attachments/comments
+
+ This will *not* automatically delete unused collections, which
+ can remain empty...
+
+ :param del_orphan_tags: True/false if we delete unused Tags too
+ :param commit: True/False if this should end the db transaction"""
+ # User's CollectionItems are automatically deleted via "cascade".
+ # Delete all the associated comments
+ for comment in self.get_comments():
+ comment.delete(commit=False)
+
+ # Delete all related files/attachments
+ try:
+ delete_media_files(self)
+ except OSError, error:
+ # Returns list of files we failed to delete
+ _log.error('No such files from the user "{1}" to delete: '
+ '{0}'.format(str(error), self.get_uploader))
+ _log.info('Deleted Media entry id "{0}"'.format(self.id))
+ # Related MediaTag's are automatically cleaned, but we might
+ # want to clean out unused Tag's too.
+ if del_orphan_tags:
+ # TODO: Import here due to cyclic imports!!!
+ # This cries for refactoring
+ from mediagoblin.db.util import clean_orphan_tags
+ clean_orphan_tags(commit=False)
+ # pass through commit=False/True in kwargs
+ super(MediaEntry, self).delete(**kwargs)
+
+
+class FileKeynames(Base):
+ """
+ keywords for various places.
+ currently the MediaFile keys
+ """
+ __tablename__ = "core__file_keynames"
+ id = Column(Integer, primary_key=True)
+ name = Column(Unicode, unique=True)
- for media_size in DISPLAY_IMAGE_FETCHING_ORDER:
- if media_size in media_sizes:
- return media_map[media_size]
+ def __repr__(self):
+ return "<FileKeyname %r: %r>" % (self.id, self.name)
- def main_mediafile(self):
- pass
+ @classmethod
+ def find_or_new(cls, name):
+ t = cls.query.filter_by(name=name).first()
+ if t is not None:
+ return t
+ return cls(name=name)
- def generate_slug(self):
- self['slug'] = url.slugify(self['title'])
- duplicate = mg_globals.database.media_entries.find_one(
- {'slug': self['slug']})
+class MediaFile(Base):
+ """
+ TODO: Highly consider moving "name" into a new table.
+ TODO: Consider preloading said table in software
+ """
+ __tablename__ = "core__mediafiles"
- if duplicate:
- self['slug'] = "%s-%s" % (self['_id'], self['slug'])
+ media_entry = Column(
+ Integer, ForeignKey(MediaEntry.id),
+ nullable=False)
+ name_id = Column(SmallInteger, ForeignKey(FileKeynames.id), nullable=False)
+ file_path = Column(PathTupleWithSlashes)
- def url_for_self(self, urlgen):
- """
- Generate an appropriate url for ourselves
+ __table_args__ = (
+ PrimaryKeyConstraint('media_entry', 'name_id'),
+ {})
- Use a slug if we have one, else use our '_id'.
- """
- uploader = self.uploader()
+ def __repr__(self):
+ return "<MediaFile %s: %r>" % (self.name, self.file_path)
- if self.get('slug'):
- return urlgen(
- 'mediagoblin.user_pages.media_home',
- user=uploader['username'],
- media=self['slug'])
- else:
- return urlgen(
- 'mediagoblin.user_pages.media_home',
- user=uploader['username'],
- media=unicode(self['_id']))
+ name_helper = relationship(FileKeynames, lazy="joined", innerjoin=True)
+ name = association_proxy('name_helper', 'name',
+ creator=FileKeynames.find_or_new
+ )
- def url_to_prev(self, urlgen):
- """
- Provide a url to the previous entry from this user, if there is one
- """
- cursor = self.db.MediaEntry.find({'_id' : {"$gt": self['_id']},
- 'uploader': self['uploader'],
- 'state': 'processed'}).sort(
- '_id', ASCENDING).limit(1)
- if cursor.count():
- return urlgen('mediagoblin.user_pages.media_home',
- user=self.uploader()['username'],
- media=unicode(cursor[0]['slug']))
- def url_to_next(self, urlgen):
- """
- Provide a url to the next entry from this user, if there is one
- """
- cursor = self.db.MediaEntry.find({'_id' : {"$lt": self['_id']},
- 'uploader': self['uploader'],
- 'state': 'processed'}).sort(
- '_id', DESCENDING).limit(1)
+class MediaAttachmentFile(Base):
+ __tablename__ = "core__attachment_files"
+
+ id = Column(Integer, primary_key=True)
+ media_entry = Column(
+ Integer, ForeignKey(MediaEntry.id),
+ nullable=False)
+ name = Column(Unicode, nullable=False)
+ filepath = Column(PathTupleWithSlashes)
+ created = Column(DateTime, nullable=False, default=datetime.datetime.now)
- if cursor.count():
- return urlgen('mediagoblin.user_pages.media_home',
- user=self.uploader()['username'],
- media=unicode(cursor[0]['slug']))
-
- def uploader(self):
- return self.db.User.find_one({'_id': self['uploader']})
-
- def get_fail_exception(self):
- """
- Get the exception that's appropriate for this error
- """
- if self['fail_error']:
- return util.import_component(self['fail_error'])
+ @property
+ def dict_view(self):
+ """A dict like view on this object"""
+ return DictReadAttrProxy(self)
+
+
+class Tag(Base):
+ __tablename__ = "core__tags"
+
+ id = Column(Integer, primary_key=True)
+ slug = Column(Unicode, nullable=False, unique=True)
+
+ def __repr__(self):
+ return "<Tag %r: %r>" % (self.id, self.slug)
+
+ @classmethod
+ def find_or_new(cls, slug):
+ t = cls.query.filter_by(slug=slug).first()
+ if t is not None:
+ return t
+ return cls(slug=slug)
+
+
+class MediaTag(Base):
+ __tablename__ = "core__media_tags"
+
+ id = Column(Integer, primary_key=True)
+ media_entry = Column(
+ Integer, ForeignKey(MediaEntry.id),
+ nullable=False, index=True)
+ tag = Column(Integer, ForeignKey(Tag.id), nullable=False, index=True)
+ name = Column(Unicode)
+ # created = Column(DateTime, nullable=False, default=datetime.datetime.now)
+
+ __table_args__ = (
+ UniqueConstraint('tag', 'media_entry'),
+ {})
+
+ tag_helper = relationship(Tag)
+ slug = association_proxy('tag_helper', 'slug',
+ creator=Tag.find_or_new
+ )
+
+ def __init__(self, name=None, slug=None):
+ Base.__init__(self)
+ if name is not None:
+ self.name = name
+ if slug is not None:
+ self.tag_helper = Tag.find_or_new(slug)
+
+ @property
+ def dict_view(self):
+ """A dict like view on this object"""
+ return DictReadAttrProxy(self)
+
+
+class MediaComment(Base, MediaCommentMixin):
+ __tablename__ = "core__media_comments"
+
+ id = Column(Integer, primary_key=True)
+ media_entry = Column(
+ Integer, ForeignKey(MediaEntry.id), nullable=False, index=True)
+ author = Column(Integer, ForeignKey(User.id), nullable=False)
+ created = Column(DateTime, nullable=False, default=datetime.datetime.now)
+ content = Column(UnicodeText, nullable=False)
+
+ get_author = relationship(User)
+
+
+class Collection(Base, CollectionMixin):
+ """An 'album' or 'set' of media by a user.
+
+ On deletion, contained CollectionItems get automatically reaped via
+ SQL cascade"""
+ __tablename__ = "core__collections"
+ id = Column(Integer, primary_key=True)
+ title = Column(Unicode, nullable=False)
+ slug = Column(Unicode)
+ created = Column(DateTime, nullable=False, default=datetime.datetime.now,
+ index=True)
+ description = Column(UnicodeText)
+ creator = Column(Integer, ForeignKey(User.id), nullable=False)
+ # TODO: No of items in Collection. Badly named, can we migrate to num_items?
+ items = Column(Integer, default=0)
-class MediaComment(Document):
- """
- A comment on a MediaEntry.
-
- Structure:
- - media_entry: The media entry this comment is attached to
- - author: user who posted this comment
- - created: when the comment was created
- - content: plaintext (but markdown'able) version of the comment's content.
- - content_html: the actual html-rendered version of the comment displayed.
- Run through Markdown and the HTML cleaner.
- """
+ get_creator = relationship(User, backref="collections")
- __collection__ = 'media_comments'
+ def get_collection_items(self, ascending=False):
+ #TODO, is this still needed with self.collection_items being available?
+ order_col = CollectionItem.position
+ if not ascending:
+ order_col = desc(order_col)
+ return CollectionItem.query.filter_by(
+ collection=self.id).order_by(order_col)
+
+
+class CollectionItem(Base, CollectionItemMixin):
+ __tablename__ = "core__collection_items"
+
+ id = Column(Integer, primary_key=True)
+ media_entry = Column(
+ Integer, ForeignKey(MediaEntry.id), nullable=False, index=True)
+ collection = Column(Integer, ForeignKey(Collection.id), nullable=False)
+ note = Column(UnicodeText, nullable=True)
+ added = Column(DateTime, nullable=False, default=datetime.datetime.now)
+ position = Column(Integer)
+ in_collection = relationship("Collection",
+ backref=backref(
+ "collection_items",
+ cascade="all, delete-orphan"))
+
+ get_media_entry = relationship(MediaEntry)
+
+ __table_args__ = (
+ UniqueConstraint('collection', 'media_entry'),
+ {})
+
+ @property
+ def dict_view(self):
+ """A dict like view on this object"""
+ return DictReadAttrProxy(self)
+
+
+class ProcessingMetaData(Base):
+ __tablename__ = 'core__processing_metadata'
+
+ id = Column(Integer, primary_key=True)
+ media_entry_id = Column(Integer, ForeignKey(MediaEntry.id), nullable=False,
+ index=True)
+ media_entry = relationship(MediaEntry,
+ backref=backref('processing_metadata',
+ cascade='all, delete-orphan'))
+ callback_url = Column(Unicode)
+
+ @property
+ def dict_view(self):
+ """A dict like view on this object"""
+ return DictReadAttrProxy(self)
+
+
+MODELS = [
+ User, MediaEntry, Tag, MediaTag, MediaComment, Collection, CollectionItem, MediaFile, FileKeynames,
+ MediaAttachmentFile, ProcessingMetaData]
- structure = {
- 'media_entry': ObjectId,
- 'author': ObjectId,
- 'created': datetime.datetime,
- 'content': unicode,
- 'content_html': unicode}
- required_fields = [
- 'media_entry', 'author', 'created', 'content']
+######################################################
+# Special, migrations-tracking table
+#
+# Not listed in MODELS because this is special and not
+# really migrated, but used for migrations (for now)
+######################################################
- default_values = {
- 'created': datetime.datetime.utcnow}
+class MigrationData(Base):
+ __tablename__ = "core__migrations"
- def media_entry(self):
- return self.db.MediaEntry.find_one({'_id': self['media_entry']})
+ name = Column(Unicode, primary_key=True)
+ version = Column(Integer, nullable=False, default=0)
- def author(self):
- return self.db.User.find_one({'_id': self['author']})
+######################################################
-REGISTER_MODELS = [
- MediaEntry,
- User,
- MediaComment]
+def show_table_init(engine_uri):
+ if engine_uri is None:
+ engine_uri = 'sqlite:///:memory:'
+ from sqlalchemy import create_engine
+ engine = create_engine(engine_uri, echo=True)
+ Base.metadata.create_all(engine)
-def register_models(connection):
- """
- Register all models in REGISTER_MODELS with this connection.
- """
- connection.register(REGISTER_MODELS)
+if __name__ == '__main__':
+ from sys import argv
+ print repr(argv)
+ if len(argv) == 2:
+ uri = argv[1]
+ else:
+ uri = None
+ show_table_init(uri)