1 # GNU MediaGoblin -- federated, autonomous media hosting
2 # Copyright (C) 2011 MediaGoblin contributors. See AUTHORS.
4 # This program is free software: you can redistribute it and/or modify
5 # it under the terms of the GNU Affero General Public License as published by
6 # the Free Software Foundation, either version 3 of the License, or
7 # (at your option) any later version.
9 # This program is distributed in the hope that it will be useful,
10 # but WITHOUT ANY WARRANTY; without even the implied warranty of
11 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 # GNU Affero General Public License for more details.
14 # You should have received a copy of the GNU Affero General Public License
15 # along with this program. If not, see <http://www.gnu.org/licenses/>.
17 from mediagoblin
import mg_globals
18 from mediagoblin
.db
.open import setup_connection_and_db_from_config
19 from mediagoblin
.init
.config
import read_mediagoblin_config
20 from mediagoblin
.storage
.filestorage
import BasicFileStorage
21 from mediagoblin
.init
import setup_storage
, setup_global_and_app_config
31 from contextlib
import closing
33 _log
= logging
.getLogger('gmg.import_export')
35 _log
.setLevel(logging
.INFO
)
38 def import_export_parse_setup(subparser
):
40 subparser
.add_argument(
42 subparser
.add_argument(
43 '--mongodump_path', default
='mongodump',
44 help='mongodump binary')
45 subparser
.add_argument(
46 '--mongorestore_path', default
='mongorestore',
47 help='mongorestore binary')
48 subparser
.add_argument(
50 help='Temporary directory where files will be temporarily dumped')
53 def _import_media(db
, args
):
57 Must be called after _import_database()
59 _log
.info('-> Importing media...')
61 media_cache
= BasicFileStorage(
62 args
._cache
_path
['media'])
64 # TODO: Add import of queue files
65 queue_cache
= BasicFileStorage(
66 args
._cache
_path
['queue'])
68 for entry
in db
.media_entries
.find():
69 for name
, path
in entry
['media_files'].items():
70 _log
.info('Importing: {0} - {1}'.format(
74 media_file
= mg_globals
.public_store
.get_file(path
, mode
='wb')
76 media_cache
.get_file(path
, mode
='rb').read())
78 _log
.info('...Media imported')
81 def _import_database(db
, args
):
83 Restore mongo database from ___.bson files
85 _log
.info('-> Importing database...')
87 p
= subprocess
.Popen([
88 args
.mongorestore_path
,
90 os
.path
.join(args
._cache
_path
['database'], db
.name
)])
94 _log
.info('...Database imported')
99 Restore mongo database and media files from a tar archive
101 if not args
.cache_path
:
102 args
.cache_path
= tempfile
.mkdtemp()
104 setup_global_and_app_config(args
.conf_file
)
106 # Creates mg_globals.public_store and mg_globals.queue_store
109 global_config
, app_config
= setup_global_and_app_config(args
.conf_file
)
110 connection
, db
= setup_connection_and_db_from_config(
111 app_config
, use_pymongo
=True)
117 tf
.extractall(args
.cache_path
)
119 args
.cache_path
= os
.path
.join(
120 args
.cache_path
, 'mediagoblin-data')
121 args
= _setup_paths(args
)
123 # Import database from extracted data
124 _import_database(db
, args
)
126 _import_media(db
, args
)
131 def _setup_paths(args
):
133 Populate ``args`` variable with cache subpaths
135 args
._cache
_path
= dict()
139 'database': 'database'}
141 for key
, val
in PATH_MAP
.items():
142 args
._cache
_path
[key
] = os
.path
.join(args
.cache_path
, val
)
147 def _create_archive(args
):
149 Create the tar archive
151 _log
.info('-> Compressing to archive')
158 tf
.add(args
.cache_path
, 'mediagoblin-data/')
160 _log
.info('...Archiving done')
165 Remove cache directory
167 shutil
.rmtree(args
.cache_path
)
170 def _export_check(args
):
172 Run security checks for export command
174 if os
.path
.exists(args
.tar_file
):
175 overwrite
= raw_input(
176 'The output file already exists. '
177 'Are you **SURE** you want to overwrite it? '
179 if not overwrite
== 'yes':
187 def _export_database(db
, args
):
188 _log
.info('-> Exporting database...')
190 p
= subprocess
.Popen([
193 '-o', args
._cache
_path
['database']])
197 _log
.info('...Database exported')
200 def _export_media(db
, args
):
201 _log
.info('-> Exporting media...')
203 media_cache
= BasicFileStorage(
204 args
._cache
_path
['media'])
206 # TODO: Add export of queue files
207 queue_cache
= BasicFileStorage(
208 args
._cache
_path
['queue'])
210 for entry
in db
.media_entries
.find():
211 for name
, path
in entry
['media_files'].items():
212 _log
.info('Exporting {0} - {1}'.format(
216 mc_file
= media_cache
.get_file(path
, mode
='wb')
218 mg_globals
.public_store
.get_file(path
, mode
='rb').read())
220 _log
.info('...Media exported')
223 def env_export(args
):
225 Export database and media files to a tar archive
228 if os
.path
.exists(args
.cache_path
):
229 _log
.error('The cache directory must not exist before you run this script')
230 _log
.error('Cache directory: {0}'.format(args
.cache_path
))
234 args
.cache_path
= tempfile
.mkdtemp()
236 args
= _setup_paths(args
)
238 if not _export_check(args
):
239 _log
.error('Checks did not pass, exiting')
242 globa_config
, app_config
= setup_global_and_app_config(args
.conf_file
)
246 connection
, db
= setup_connection_and_db_from_config(
247 app_config
, use_pymongo
=True)
249 _export_database(db
, args
)
251 _export_media(db
, args
)
253 _create_archive(args
)