Merge remote branch 'remotes/gullydwarf-cfdv/f360_tagging' into mergetags
[mediagoblin.git] / mediagoblin / db / models.py
1 # GNU MediaGoblin -- federated, autonomous media hosting
2 # Copyright (C) 2011 Free Software Foundation, Inc
3 #
4 # This program is free software: you can redistribute it and/or modify
5 # it under the terms of the GNU Affero General Public License as published by
6 # the Free Software Foundation, either version 3 of the License, or
7 # (at your option) any later version.
8 #
9 # This program is distributed in the hope that it will be useful,
10 # but WITHOUT ANY WARRANTY; without even the implied warranty of
11 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 # GNU Affero General Public License for more details.
13 #
14 # You should have received a copy of the GNU Affero General Public License
15 # along with this program. If not, see <http://www.gnu.org/licenses/>.
16
17 import datetime, uuid
18
19 from mongokit import Document
20
21 from mediagoblin import util
22 from mediagoblin.auth import lib as auth_lib
23 from mediagoblin import mg_globals
24 from mediagoblin.db import migrations
25 from mediagoblin.db.util import ASCENDING, DESCENDING, ObjectId
26 from mediagoblin.util import Pagination
27 from mediagoblin.util import DISPLAY_IMAGE_FETCHING_ORDER
28
29
30 ###################
31 # Custom validators
32 ###################
33
34 ########
35 # Models
36 ########
37
38
39 class User(Document):
40 """
41 A user of MediaGoblin.
42
43 Structure:
44 - username: The username of this user, should be unique to this instance.
45 - email: Email address of this user
46 - created: When the user was created
47 - plugin_data: a mapping of extra plugin information for this User.
48 Nothing uses this yet as we don't have plugins, but someday we
49 might... :)
50 - pw_hash: Hashed version of user's password.
51 - email_verified: Whether or not the user has verified their email or not.
52 Most parts of the site are disabled for users who haven't yet.
53 - status: whether or not the user is active, etc. Currently only has two
54 values, 'needs_email_verification' or 'active'. (In the future, maybe
55 we'll change this to a boolean with a key of 'active' and have a
56 separate field for a reason the user's been disabled if that's
57 appropriate... email_verified is already separate, after all.)
58 - verification_key: If the user is awaiting email verification, the user
59 will have to provide this key (which will be encoded in the presented
60 URL) in order to confirm their email as active.
61 - is_admin: Whether or not this user is an administrator or not.
62 - url: this user's personal webpage/website, if appropriate.
63 - bio: biography of this user (plaintext, in markdown)
64 - bio_html: biography of the user converted to proper HTML.
65 """
66 __collection__ = 'users'
67
68 structure = {
69 'username': unicode,
70 'email': unicode,
71 'created': datetime.datetime,
72 'plugin_data': dict, # plugins can dump stuff here.
73 'pw_hash': unicode,
74 'email_verified': bool,
75 'status': unicode,
76 'verification_key': unicode,
77 'is_admin': bool,
78 'url' : unicode,
79 'bio' : unicode, # May contain markdown
80 'bio_html': unicode, # May contain plaintext, or HTML
81 }
82
83 required_fields = ['username', 'created', 'pw_hash', 'email']
84
85 default_values = {
86 'created': datetime.datetime.utcnow,
87 'email_verified': False,
88 'status': u'needs_email_verification',
89 'verification_key': lambda: unicode(uuid.uuid4()),
90 'is_admin': False}
91
92 def check_login(self, password):
93 """
94 See if a user can login with this password
95 """
96 return auth_lib.bcrypt_check_password(
97 password, self['pw_hash'])
98
99
100 class MediaEntry(Document):
101 """
102 Record of a piece of media.
103
104 Structure:
105 - uploader: A reference to a User who uploaded this.
106
107 - title: Title of this work
108
109 - slug: A normalized "slug" which can be used as part of a URL to retrieve
110 this work, such as 'my-works-name-in-slug-form' may be viewable by
111 'http://mg.example.org/u/username/m/my-works-name-in-slug-form/'
112 Note that since URLs are constructed this way, slugs must be unique
113 per-uploader. (An index is provided to enforce that but code should be
114 written on the python side to ensure this as well.)
115
116 - created: Date and time of when this piece of work was uploaded.
117
118 - description: Uploader-set description of this work. This can be marked
119 up with MarkDown for slight fanciness (links, boldness, italics,
120 paragraphs...)
121
122 - description_html: Rendered version of the description, run through
123 Markdown and cleaned with our cleaning tool.
124
125 - media_type: What type of media is this? Currently we only support
126 'image' ;)
127
128 - media_data: Extra information that's media-format-dependent.
129 For example, images might contain some EXIF data that's not appropriate
130 to other formats. You might store it like:
131
132 mediaentry['media_data']['exif'] = {
133 'manufacturer': 'CASIO',
134 'model': 'QV-4000',
135 'exposure_time': .659}
136
137 Alternately for video you might store:
138
139 # play length in seconds
140 mediaentry['media_data']['play_length'] = 340
141
142 ... so what's appropriate here really depends on the media type.
143
144 - plugin_data: a mapping of extra plugin information for this User.
145 Nothing uses this yet as we don't have plugins, but someday we
146 might... :)
147
148 - tags: A list of tags. Each tag is stored as a dictionary that has a key
149 for the actual name and the normalized name-as-slug, so ultimately this
150 looks like:
151 [{'name': 'Gully Gardens',
152 'slug': 'gully-gardens'},
153 {'name': 'Castle Adventure Time?!",
154 'slug': 'castle-adventure-time'}]
155
156 - state: What's the state of this file? Active, inactive, disabled, etc...
157 But really for now there are only two states:
158 "unprocessed": uploaded but needs to go through processing for display
159 "processed": processed and able to be displayed
160
161 - queued_media_file: storage interface style filepath describing a file
162 queued for processing. This is stored in the mg_globals.queue_store
163 storage system.
164
165 - media_files: Files relevant to this that have actually been processed
166 and are available for various types of display. Stored like:
167 {'thumb': ['dir1', 'dir2', 'pic.png'}
168
169 - attachment_files: A list of "attachment" files, ones that aren't
170 critical to this piece of media but may be usefully relevant to people
171 viewing the work. (currently unused.)
172
173 - thumbnail_file: Deprecated... we should remove this ;)
174 """
175 __collection__ = 'media_entries'
176
177 structure = {
178 'uploader': ObjectId,
179 'title': unicode,
180 'slug': unicode,
181 'created': datetime.datetime,
182 'description': unicode, # May contain markdown/up
183 'description_html': unicode, # May contain plaintext, or HTML
184 'media_type': unicode,
185 'media_data': dict, # extra data relevant to this media_type
186 'plugin_data': dict, # plugins can dump stuff here.
187 'tags': [dict],
188 'state': unicode,
189
190 # For now let's assume there can only be one main file queued
191 # at a time
192 'queued_media_file': [unicode],
193
194 # A dictionary of logical names to filepaths
195 'media_files': dict,
196
197 # The following should be lists of lists, in appropriate file
198 # record form
199 'attachment_files': list,
200
201 # This one should just be a single file record
202 'thumbnail_file': [unicode]}
203
204 required_fields = [
205 'uploader', 'created', 'media_type', 'slug']
206
207 default_values = {
208 'created': datetime.datetime.utcnow,
209 'state': u'unprocessed'}
210
211 def get_comments(self):
212 return self.db.MediaComment.find({
213 'media_entry': self['_id']}).sort('created', DESCENDING)
214
215 def get_display_media(self, media_map, fetch_order=DISPLAY_IMAGE_FETCHING_ORDER):
216 """
217 Find the best media for display.
218
219 Args:
220 - media_map: a dict like
221 {u'image_size': [u'dir1', u'dir2', u'image.jpg']}
222 - fetch_order: the order we should try fetching images in
223
224 Returns:
225 (media_size, media_path)
226 """
227 media_sizes = media_map.keys()
228
229 for media_size in DISPLAY_IMAGE_FETCHING_ORDER:
230 if media_size in media_sizes:
231 return media_map[media_size]
232
233 def main_mediafile(self):
234 pass
235
236 def generate_slug(self):
237 self['slug'] = util.slugify(self['title'])
238
239 duplicate = mg_globals.database.media_entries.find_one(
240 {'slug': self['slug']})
241
242 if duplicate:
243 self['slug'] = "%s-%s" % (self['_id'], self['slug'])
244
245 def url_for_self(self, urlgen):
246 """
247 Generate an appropriate url for ourselves
248
249 Use a slug if we have one, else use our '_id'.
250 """
251 uploader = self.uploader()
252
253 if self.get('slug'):
254 return urlgen(
255 'mediagoblin.user_pages.media_home',
256 user=uploader['username'],
257 media=self['slug'])
258 else:
259 return urlgen(
260 'mediagoblin.user_pages.media_home',
261 user=uploader['username'],
262 media=unicode(self['_id']))
263
264 def url_to_prev(self, urlgen):
265 """
266 Provide a url to the previous entry from this user, if there is one
267 """
268 cursor = self.db.MediaEntry.find({'_id' : {"$gt": self['_id']},
269 'uploader': self['uploader'],
270 'state': 'processed'}).sort(
271 '_id', ASCENDING).limit(1)
272 if cursor.count():
273 return urlgen('mediagoblin.user_pages.media_home',
274 user=self.uploader()['username'],
275 media=unicode(cursor[0]['slug']))
276
277 def url_to_next(self, urlgen):
278 """
279 Provide a url to the next entry from this user, if there is one
280 """
281 cursor = self.db.MediaEntry.find({'_id' : {"$lt": self['_id']},
282 'uploader': self['uploader'],
283 'state': 'processed'}).sort(
284 '_id', DESCENDING).limit(1)
285
286 if cursor.count():
287 return urlgen('mediagoblin.user_pages.media_home',
288 user=self.uploader()['username'],
289 media=unicode(cursor[0]['slug']))
290
291 def uploader(self):
292 return self.db.User.find_one({'_id': self['uploader']})
293
294
295 class MediaComment(Document):
296 """
297 A comment on a MediaEntry.
298
299 Structure:
300 - media_entry: The media entry this comment is attached to
301 - author: user who posted this comment
302 - created: when the comment was created
303 - content: plaintext (but markdown'able) version of the comment's content.
304 - content_html: the actual html-rendered version of the comment displayed.
305 Run through Markdown and the HTML cleaner.
306 """
307
308 __collection__ = 'media_comments'
309
310 structure = {
311 'media_entry': ObjectId,
312 'author': ObjectId,
313 'created': datetime.datetime,
314 'content': unicode,
315 'content_html': unicode}
316
317 required_fields = [
318 'media_entry', 'author', 'created', 'content']
319
320 default_values = {
321 'created': datetime.datetime.utcnow}
322
323 def media_entry(self):
324 return self.db.MediaEntry.find_one({'_id': self['media_entry']})
325
326 def author(self):
327 return self.db.User.find_one({'_id': self['author']})
328
329
330 REGISTER_MODELS = [
331 MediaEntry,
332 User,
333 MediaComment]
334
335
336 def register_models(connection):
337 """
338 Register all models in REGISTER_MODELS with this connection.
339 """
340 connection.register(REGISTER_MODELS)
341