Merge branch 'master' into processing
[mediagoblin.git] / mediagoblin / db / models.py
1 # GNU MediaGoblin -- federated, autonomous media hosting
2 # Copyright (C) 2011 Free Software Foundation, Inc
3 #
4 # This program is free software: you can redistribute it and/or modify
5 # it under the terms of the GNU Affero General Public License as published by
6 # the Free Software Foundation, either version 3 of the License, or
7 # (at your option) any later version.
8 #
9 # This program is distributed in the hope that it will be useful,
10 # but WITHOUT ANY WARRANTY; without even the implied warranty of
11 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 # GNU Affero General Public License for more details.
13 #
14 # You should have received a copy of the GNU Affero General Public License
15 # along with this program. If not, see <http://www.gnu.org/licenses/>.
16
17 import datetime, uuid
18
19 from mongokit import Document
20
21 from mediagoblin import util
22 from mediagoblin.auth import lib as auth_lib
23 from mediagoblin import mg_globals
24 from mediagoblin.db import migrations
25 from mediagoblin.db.util import ASCENDING, DESCENDING, ObjectId
26 from mediagoblin.util import Pagination
27 from mediagoblin.util import DISPLAY_IMAGE_FETCHING_ORDER
28
29
30 ###################
31 # Custom validators
32 ###################
33
34 ########
35 # Models
36 ########
37
38
39 class User(Document):
40 """
41 A user of MediaGoblin.
42
43 Structure:
44 - username: The username of this user, should be unique to this instance.
45 - email: Email address of this user
46 - created: When the user was created
47 - plugin_data: a mapping of extra plugin information for this User.
48 Nothing uses this yet as we don't have plugins, but someday we
49 might... :)
50 - pw_hash: Hashed version of user's password.
51 - email_verified: Whether or not the user has verified their email or not.
52 Most parts of the site are disabled for users who haven't yet.
53 - status: whether or not the user is active, etc. Currently only has two
54 values, 'needs_email_verification' or 'active'. (In the future, maybe
55 we'll change this to a boolean with a key of 'active' and have a
56 separate field for a reason the user's been disabled if that's
57 appropriate... email_verified is already separate, after all.)
58 - verification_key: If the user is awaiting email verification, the user
59 will have to provide this key (which will be encoded in the presented
60 URL) in order to confirm their email as active.
61 - is_admin: Whether or not this user is an administrator or not.
62 - url: this user's personal webpage/website, if appropriate.
63 - bio: biography of this user (plaintext, in markdown)
64 - bio_html: biography of the user converted to proper HTML.
65 """
66 __collection__ = 'users'
67
68 structure = {
69 'username': unicode,
70 'email': unicode,
71 'created': datetime.datetime,
72 'plugin_data': dict, # plugins can dump stuff here.
73 'pw_hash': unicode,
74 'email_verified': bool,
75 'status': unicode,
76 'verification_key': unicode,
77 'is_admin': bool,
78 'url' : unicode,
79 'bio' : unicode, # May contain markdown
80 'bio_html': unicode, # May contain plaintext, or HTML
81 }
82
83 required_fields = ['username', 'created', 'pw_hash', 'email']
84
85 default_values = {
86 'created': datetime.datetime.utcnow,
87 'email_verified': False,
88 'status': u'needs_email_verification',
89 'verification_key': lambda: unicode(uuid.uuid4()),
90 'is_admin': False}
91
92 def check_login(self, password):
93 """
94 See if a user can login with this password
95 """
96 return auth_lib.bcrypt_check_password(
97 password, self['pw_hash'])
98
99
100 class MediaEntry(Document):
101 """
102 Record of a piece of media.
103
104 Structure:
105 - uploader: A reference to a User who uploaded this.
106
107 - title: Title of this work
108
109 - slug: A normalized "slug" which can be used as part of a URL to retrieve
110 this work, such as 'my-works-name-in-slug-form' may be viewable by
111 'http://mg.example.org/u/username/m/my-works-name-in-slug-form/'
112 Note that since URLs are constructed this way, slugs must be unique
113 per-uploader. (An index is provided to enforce that but code should be
114 written on the python side to ensure this as well.)
115
116 - created: Date and time of when this piece of work was uploaded.
117
118 - description: Uploader-set description of this work. This can be marked
119 up with MarkDown for slight fanciness (links, boldness, italics,
120 paragraphs...)
121
122 - description_html: Rendered version of the description, run through
123 Markdown and cleaned with our cleaning tool.
124
125 - media_type: What type of media is this? Currently we only support
126 'image' ;)
127
128 - media_data: Extra information that's media-format-dependent.
129 For example, images might contain some EXIF data that's not appropriate
130 to other formats. You might store it like:
131
132 mediaentry['media_data']['exif'] = {
133 'manufacturer': 'CASIO',
134 'model': 'QV-4000',
135 'exposure_time': .659}
136
137 Alternately for video you might store:
138
139 # play length in seconds
140 mediaentry['media_data']['play_length'] = 340
141
142 ... so what's appropriate here really depends on the media type.
143
144 - plugin_data: a mapping of extra plugin information for this User.
145 Nothing uses this yet as we don't have plugins, but someday we
146 might... :)
147
148 - tags: A list of tags. Each tag is stored as a dictionary that has a key
149 for the actual name and the normalized name-as-slug, so ultimately this
150 looks like:
151 [{'name': 'Gully Gardens',
152 'slug': 'gully-gardens'},
153 {'name': 'Castle Adventure Time?!",
154 'slug': 'castle-adventure-time'}]
155
156 - state: What's the state of this file? Active, inactive, disabled, etc...
157 But really for now there are only two states:
158 "unprocessed": uploaded but needs to go through processing for display
159 "processed": processed and able to be displayed
160
161 - queued_media_file: storage interface style filepath describing a file
162 queued for processing. This is stored in the mg_globals.queue_store
163 storage system.
164
165 - queued_task_id: celery task id. Use this to fetch the task state.
166
167 - media_files: Files relevant to this that have actually been processed
168 and are available for various types of display. Stored like:
169 {'thumb': ['dir1', 'dir2', 'pic.png'}
170
171 - attachment_files: A list of "attachment" files, ones that aren't
172 critical to this piece of media but may be usefully relevant to people
173 viewing the work. (currently unused.)
174 """
175 __collection__ = 'media_entries'
176
177 structure = {
178 'uploader': ObjectId,
179 'title': unicode,
180 'slug': unicode,
181 'created': datetime.datetime,
182 'description': unicode, # May contain markdown/up
183 'description_html': unicode, # May contain plaintext, or HTML
184 'media_type': unicode,
185 'media_data': dict, # extra data relevant to this media_type
186 'plugin_data': dict, # plugins can dump stuff here.
187 'tags': [dict],
188 'state': unicode,
189
190 # For now let's assume there can only be one main file queued
191 # at a time
192 'queued_media_file': [unicode],
193 'queued_task_id': unicode,
194
195 # A dictionary of logical names to filepaths
196 'media_files': dict,
197
198 # The following should be lists of lists, in appropriate file
199 # record form
200 'attachment_files': list}
201
202 required_fields = [
203 'uploader', 'created', 'media_type', 'slug']
204
205 default_values = {
206 'created': datetime.datetime.utcnow,
207 'state': u'unprocessed'}
208
209 def get_comments(self):
210 return self.db.MediaComment.find({
211 'media_entry': self['_id']}).sort('created', DESCENDING)
212
213 def get_display_media(self, media_map, fetch_order=DISPLAY_IMAGE_FETCHING_ORDER):
214 """
215 Find the best media for display.
216
217 Args:
218 - media_map: a dict like
219 {u'image_size': [u'dir1', u'dir2', u'image.jpg']}
220 - fetch_order: the order we should try fetching images in
221
222 Returns:
223 (media_size, media_path)
224 """
225 media_sizes = media_map.keys()
226
227 for media_size in DISPLAY_IMAGE_FETCHING_ORDER:
228 if media_size in media_sizes:
229 return media_map[media_size]
230
231 def main_mediafile(self):
232 pass
233
234 def generate_slug(self):
235 self['slug'] = util.slugify(self['title'])
236
237 duplicate = mg_globals.database.media_entries.find_one(
238 {'slug': self['slug']})
239
240 if duplicate:
241 self['slug'] = "%s-%s" % (self['_id'], self['slug'])
242
243 def url_for_self(self, urlgen):
244 """
245 Generate an appropriate url for ourselves
246
247 Use a slug if we have one, else use our '_id'.
248 """
249 uploader = self.uploader()
250
251 if self.get('slug'):
252 return urlgen(
253 'mediagoblin.user_pages.media_home',
254 user=uploader['username'],
255 media=self['slug'])
256 else:
257 return urlgen(
258 'mediagoblin.user_pages.media_home',
259 user=uploader['username'],
260 media=unicode(self['_id']))
261
262 def url_to_prev(self, urlgen):
263 """
264 Provide a url to the previous entry from this user, if there is one
265 """
266 cursor = self.db.MediaEntry.find({'_id' : {"$gt": self['_id']},
267 'uploader': self['uploader'],
268 'state': 'processed'}).sort(
269 '_id', ASCENDING).limit(1)
270 if cursor.count():
271 return urlgen('mediagoblin.user_pages.media_home',
272 user=self.uploader()['username'],
273 media=unicode(cursor[0]['slug']))
274
275 def url_to_next(self, urlgen):
276 """
277 Provide a url to the next entry from this user, if there is one
278 """
279 cursor = self.db.MediaEntry.find({'_id' : {"$lt": self['_id']},
280 'uploader': self['uploader'],
281 'state': 'processed'}).sort(
282 '_id', DESCENDING).limit(1)
283
284 if cursor.count():
285 return urlgen('mediagoblin.user_pages.media_home',
286 user=self.uploader()['username'],
287 media=unicode(cursor[0]['slug']))
288
289 def uploader(self):
290 return self.db.User.find_one({'_id': self['uploader']})
291
292
293 class MediaComment(Document):
294 """
295 A comment on a MediaEntry.
296
297 Structure:
298 - media_entry: The media entry this comment is attached to
299 - author: user who posted this comment
300 - created: when the comment was created
301 - content: plaintext (but markdown'able) version of the comment's content.
302 - content_html: the actual html-rendered version of the comment displayed.
303 Run through Markdown and the HTML cleaner.
304 """
305
306 __collection__ = 'media_comments'
307
308 structure = {
309 'media_entry': ObjectId,
310 'author': ObjectId,
311 'created': datetime.datetime,
312 'content': unicode,
313 'content_html': unicode}
314
315 required_fields = [
316 'media_entry', 'author', 'created', 'content']
317
318 default_values = {
319 'created': datetime.datetime.utcnow}
320
321 def media_entry(self):
322 return self.db.MediaEntry.find_one({'_id': self['media_entry']})
323
324 def author(self):
325 return self.db.User.find_one({'_id': self['author']})
326
327
328 REGISTER_MODELS = [
329 MediaEntry,
330 User,
331 MediaComment]
332
333
334 def register_models(connection):
335 """
336 Register all models in REGISTER_MODELS with this connection.
337 """
338 connection.register(REGISTER_MODELS)
339