| 1 | # GNU MediaGoblin -- federated, autonomous media hosting |
| 2 | # Copyright (C) 2011, 2012 MediaGoblin contributors. See AUTHORS. |
| 3 | # |
| 4 | # This program is free software: you can redistribute it and/or modify |
| 5 | # it under the terms of the GNU Affero General Public License as published by |
| 6 | # the Free Software Foundation, either version 3 of the License, or |
| 7 | # (at your option) any later version. |
| 8 | # |
| 9 | # This program is distributed in the hope that it will be useful, |
| 10 | # but WITHOUT ANY WARRANTY; without even the implied warranty of |
| 11 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
| 12 | # GNU Affero General Public License for more details. |
| 13 | # |
| 14 | # You should have received a copy of the GNU Affero General Public License |
| 15 | # along with this program. If not, see <http://www.gnu.org/licenses/>. |
| 16 | |
| 17 | import copy |
| 18 | from math import ceil, floor |
| 19 | from itertools import count |
| 20 | from werkzeug.datastructures import MultiDict |
| 21 | |
| 22 | from six.moves import range, urllib, zip |
| 23 | |
| 24 | PAGINATION_DEFAULT_PER_PAGE = 30 |
| 25 | |
| 26 | |
| 27 | class Pagination(object): |
| 28 | """ |
| 29 | Pagination class for database queries. |
| 30 | |
| 31 | Initialization through __init__(self, cursor, page=1, per_page=2), |
| 32 | get actual data slice through __call__(). |
| 33 | """ |
| 34 | |
| 35 | def __init__(self, page, cursor, per_page=PAGINATION_DEFAULT_PER_PAGE, |
| 36 | jump_to_id=False): |
| 37 | """ |
| 38 | Initializes Pagination |
| 39 | |
| 40 | Args: |
| 41 | - page: requested page |
| 42 | - per_page: number of objects per page |
| 43 | - cursor: db cursor |
| 44 | - jump_to_id: object id, sets the page to the page containing the |
| 45 | object with id == jump_to_id. |
| 46 | """ |
| 47 | self.page = page |
| 48 | self.per_page = per_page |
| 49 | self.cursor = cursor |
| 50 | self.total_count = self.cursor.count() |
| 51 | self.active_id = None |
| 52 | |
| 53 | if jump_to_id: |
| 54 | cursor = copy.copy(self.cursor) |
| 55 | |
| 56 | for (doc, increment) in list(zip(cursor, count(0))): |
| 57 | if doc.id == jump_to_id: |
| 58 | self.page = 1 + int(floor(increment / self.per_page)) |
| 59 | |
| 60 | self.active_id = jump_to_id |
| 61 | break |
| 62 | |
| 63 | def __call__(self): |
| 64 | """ |
| 65 | Returns slice of objects for the requested page |
| 66 | """ |
| 67 | # TODO, return None for out of index so templates can |
| 68 | # distinguish between empty galleries and out-of-bound pages??? |
| 69 | return self.cursor.slice( |
| 70 | (self.page - 1) * self.per_page, |
| 71 | self.page * self.per_page) |
| 72 | |
| 73 | @property |
| 74 | def pages(self): |
| 75 | return int(ceil(self.total_count / float(self.per_page))) |
| 76 | |
| 77 | @property |
| 78 | def has_prev(self): |
| 79 | return self.page > 1 |
| 80 | |
| 81 | @property |
| 82 | def has_next(self): |
| 83 | return self.page < self.pages |
| 84 | |
| 85 | def iter_pages(self, left_edge=2, left_current=2, |
| 86 | right_current=5, right_edge=2): |
| 87 | last = 0 |
| 88 | for num in range(1, self.pages + 1): |
| 89 | if num <= left_edge or \ |
| 90 | (num > self.page - left_current - 1 and \ |
| 91 | num < self.page + right_current) or \ |
| 92 | num > self.pages - right_edge: |
| 93 | if last + 1 != num: |
| 94 | yield None |
| 95 | yield num |
| 96 | last = num |
| 97 | |
| 98 | def get_page_url_explicit(self, base_url, get_params, page_no): |
| 99 | """ |
| 100 | Get a page url by adding a page= parameter to the base url |
| 101 | """ |
| 102 | if isinstance(get_params, MultiDict): |
| 103 | new_get_params = get_params.to_dict() |
| 104 | else: |
| 105 | new_get_params = dict(get_params) or {} |
| 106 | |
| 107 | new_get_params['page'] = page_no |
| 108 | return "%s?%s" % ( |
| 109 | base_url, urllib.parse.urlencode(new_get_params)) |
| 110 | |
| 111 | def get_page_url(self, request, page_no): |
| 112 | """ |
| 113 | Get a new page url based of the request, and the new page number. |
| 114 | |
| 115 | This is a nice wrapper around get_page_url_explicit() |
| 116 | """ |
| 117 | return self.get_page_url_explicit( |
| 118 | request.full_path, request.GET, page_no) |