*docstrings* -> *docs/* -> *manual/*
+----
+
+#### Version `0.4.1` (2013-08-):
+
+* __new__: `__getitem__()` in `diaspy.models.Post`,
+* __new__: `json()` method in `diaspy.streams.Generic` adds the possibility to export streams to JSON,
+* __new__: `full()` method in `diaspy.streams.Generic` will try to fetch full stream (containing all posts),
+* __new__: `setEmail()` method in `diaspy.settings.Settings`,
+* __new__: `setLanguage()` method in `diaspy.settings.Settings`,
+* __new__: `downloadPhotos()` method in `diaspy.settings.Settings`,
+
+* __fix__: fixed some bugs in regular expressions used by `diaspy` internals
+ (html tag removal, so you get nicer notifications),
+
+
----
#### Version `0.4.0` (2013-08-20):
import json
+import os
import re
import urllib
+import warnings
-from diaspy import errors
+from diaspy import errors, streams
class Settings():
self._connection = connection
def downloadxml(self):
+ """Returns downloaded XML.
+ """
request = self._connection.get('user/export')
return request.text
+ def downloadPhotos(self, size='large', path='.', _critical=False, _stream=None):
+ """Downloads photos into the current working directory.
+ Sizes are: large, medium, small.
+ Filename is: {photo_guid}.{extension}
+
+ Normally, this method will catch urllib-generated errors and
+ just issue warnings about photos that couldn't be downloaded.
+ However, with _critical param set to True errors will become
+ critical - the will be reraised in finally block.
+
+ :param size: size of the photos to download - large, medium or small
+ :type size: str
+ :param path: path to download (defaults to current working directory
+ :type path: str
+ :param _stream: diaspy.streams.Generic-like object (only for testing)
+ :param _critical: if True urllib errors will be reraised after generating a warning (may be removed)
+
+ :returns: integer, number of photos downloaded
+ """
+ photos = 0
+ if _stream is not None: stream = _stream
+ else: stream = streams.Activity
+ stream = stream(self._connection)
+ stream.full()
+ for i, post in enumerate(stream):
+ if post['photos']:
+ for n, photo in enumerate(post['photos']):
+ name = '{0}.{1}'.format(photo['guid'], photo['sizes'][size].split('.')[-1])
+ filename = os.path.join(path, name)
+ try:
+ urllib.request.urlretrieve(url=photo['sizes'][size], filename=filename)
+ except (urllib.error.HTTPError, urllib.error.URLError) as e:
+ warnings.warn('downloading image {0} from post {1}: {2}'.format(photo['guid'], post['guid'], e))
+ finally:
+ if _critical: raise
+ photos += 1
+ return photos
+
def setEmail(self, email):
"""Changes user's email.
"""
new_stream = self._obtain(max_time=max_time)
self._expand(new_stream)
+ def full(self):
+ """Fetches full stream - containing all posts.
+ WARNING: this can be a **VERY** time consuming function on slow connections of massive streams.
+
+ :returns: integer, lenght of the stream
+ """
+ oldstream = self.copy()
+ self.more()
+ while len(oldstream) != len(self):
+ oldstream = self.copy()
+ self.more()
+ return len(self)
+
def copy(self):
"""Returns copy (list of posts) of current stream.
"""
return [p for p in self._stream]
+ def json(self, comments=False):
+ """Returns JSON encoded string containing stream's data.
+
+ :param comments: to include comments or not to include 'em, that is the question this param holds answer to
+ :type comments: bool
+ """
+ stream = [post for post in self._stream]
+ if comments:
+ for i, post in enumerate(stream):
+ post._fetchcomments()
+ comments = [c.data for c in post.comments]
+ post['interactions']['comments'] = comments
+ stream[i] = post
+ stream = [post.data for post in stream]
+ return json.dumps(stream)
+
class Outer(Generic):
"""Object used by diaspy.models.User to represent