1 # GNU MediaGoblin -- federated, autonomous media hosting
2 # Copyright (C) 2011 MediaGoblin contributors. See AUTHORS.
4 # This program is free software: you can redistribute it and/or modify
5 # it under the terms of the GNU Affero General Public License as published by
6 # the Free Software Foundation, either version 3 of the License, or
7 # (at your option) any later version.
9 # This program is distributed in the hope that it will be useful,
10 # but WITHOUT ANY WARRANTY; without even the implied warranty of
11 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 # GNU Affero General Public License for more details.
14 # You should have received a copy of the GNU Affero General Public License
15 # along with this program. If not, see <http://www.gnu.org/licenses/>.
17 from mediagoblin
import mg_globals
18 from mediagoblin
.db
.open import setup_connection_and_db_from_config
19 from mediagoblin
.init
.config
import read_mediagoblin_config
20 from mediagoblin
.storage
import BasicFileStorage
21 from mediagoblin
.init
import setup_storage
, setup_global_and_app_config
30 from contextlib
import closing
33 def import_export_parse_setup(subparser
):
35 subparser
.add_argument(
37 subparser
.add_argument(
38 '-cf', '--conf_file', default
='mediagoblin.ini',
39 help='Config file used to set up environment')
40 subparser
.add_argument(
41 '--mongodump_path', default
='mongodump',
42 help='mongodump binary')
43 subparser
.add_argument(
44 '--mongorestore_path', default
='mongorestore',
45 help='mongorestore binary')
46 subparser
.add_argument(
48 help='Temporary directory where files will be temporarily dumped')
51 def _import_media(db
, args
):
55 Must be called after _import_database()
57 print "\n== Importing media ==\n"
59 media_cache
= BasicFileStorage(
60 args
._cache
_path
['media'])
62 # TODO: Add import of queue files
63 queue_cache
= BasicFileStorage(
64 args
._cache
_path
['queue'])
66 for entry
in db
.media_entries
.find():
67 for name
, path
in entry
['media_files'].items():
68 media_file
= mg_globals
.public_store
.get_file(path
, mode
='wb')
70 media_cache
.get_file(path
, mode
='rb').read())
72 print "\n== Media imported ==\n"
75 def _import_database(db
, args
):
77 Restore mongo database from ___.bson files
79 print "\n== Importing database ==\n"
81 p
= subprocess
.Popen([
82 args
.mongorestore_path
,
84 os
.path
.join(args
._cache
_path
['database'], db
.name
)])
88 print "\n== Database imported ==\n"
93 Restore mongo database and media files from a tar archive
95 if not args
.cache_path
:
96 args
.cache_path
= tempfile
.mkdtemp()
98 setup_global_and_app_config(args
.conf_file
)
100 # Creates mg_globals.public_store and mg_globals.queue_store
103 config
, validation_result
= read_mediagoblin_config(args
.conf_file
)
104 connection
, db
= setup_connection_and_db_from_config(
105 config
['mediagoblin'], use_pymongo
=True)
111 tf
.extractall(args
.cache_path
)
113 args
.cache_path
= os
.path
.join(
114 args
.cache_path
, 'mediagoblin-data')
115 args
= _setup_paths(args
)
117 # Import database from extracted data
118 _import_database(db
, args
)
120 _import_media(db
, args
)
125 def _setup_paths(args
):
127 Populate ``args`` variable with cache subpaths
129 args
._cache
_path
= dict()
133 'database': 'database'}
135 for key
, val
in PATH_MAP
.items():
136 args
._cache
_path
[key
] = os
.path
.join(args
.cache_path
, val
)
141 def _create_archive(args
):
143 Create the tar archive
145 print "\n== Compressing to archive ==\n"
152 tf
.add(args
.cache_path
, 'mediagoblin-data/')
154 print "\n== Archiving done ==\n"
159 Remove cache directory
161 shutil
.rmtree(args
.cache_path
)
164 def _export_check(args
):
166 Run security checks for export command
168 if os
.path
.exists(args
.tar_file
):
169 overwrite
= raw_input(
170 'The output file already exists. '
171 'Are you **SURE** you want to overwrite it? '
173 if not overwrite
== 'yes':
181 def _export_database(db
, args
):
182 print "\n== Exporting database ==\n"
184 command
= '{mongodump_path} -d {database} -o {mongodump_cache}'.format(
185 mongodump_path
=args
.mongodump_path
,
187 mongodump_cache
=args
._cache
_path
['database'])
189 p
= subprocess
.Popen([
192 '-o', args
._cache
_path
['database']])
196 print "\n== Database exported ==\n"
199 def _export_media(db
, args
):
200 print "\n== Exporting media ==\n"
202 media_cache
= BasicFileStorage(
203 args
._cache
_path
['media'])
205 # TODO: Add export of queue files
206 queue_cache
= BasicFileStorage(
207 args
._cache
_path
['queue'])
209 for entry
in db
.media_entries
.find():
210 for name
, path
in entry
['media_files'].items():
211 mc_file
= media_cache
.get_file(path
, mode
='wb')
213 mg_globals
.public_store
.get_file(path
, mode
='rb').read())
215 print "\n== Media exported ==\n"
218 def env_export(args
):
220 Export database and media files to a tar archive
223 if os
.path
.exists(args
.cache_path
):
224 print 'The cache directory must not exist before you run this script'
225 print 'Cache directory: ', args
.cache_path
229 args
.cache_path
= tempfile
.mkdtemp()
231 args
= _setup_paths(args
)
233 if not _export_check(args
):
234 print "\n== Checks did not pass, exiting ==\n"
237 setup_global_and_app_config(args
.conf_file
)
240 config
, validation_result
= read_mediagoblin_config(args
.conf_file
)
241 connection
, db
= setup_connection_and_db_from_config(
242 config
['mediagoblin'], use_pymongo
=True)
244 _export_database(db
, args
)
246 _export_media(db
, args
)
248 _create_archive(args
)