-
Notifications
You must be signed in to change notification settings - Fork 86
/
tableslurp
executable file
·424 lines (364 loc) · 16.4 KB
/
tableslurp
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
#!/usr/bin/env python
#
# -*- mode:python; sh-basic-offset:4; indent-tabs-mode:nil; coding:utf-8 -*-
# vim:set tabstop=4 softtabstop=4 expandtab shiftwidth=4 fileencoding=utf-8:
#
# Copyright (c) 2012, Jorge A Gallegos <kad@blegh.net>
#
# Permission to use, copy, modify, and/or distribute this software for any
# purpose with or without fee is hereby granted, provided that the above
# copyright notice and this permission notice appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
# OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
import argparse
import boto
from dateutil import parser
import grp
import json
import logging
import threading
import os
import pwd
import socket
import sys
from Queue import Queue
import re
from datetime import datetime
log = logging.getLogger('tableslurp')
stderr = logging.StreamHandler()
stderr.setFormatter(logging.Formatter(
'%(name)s [%(asctime)s] %(levelname)s %(message)s'))
log.addHandler(stderr)
if os.environ.get('TDEBUG', False):
log.setLevel(logging.DEBUG)
else:
log.setLevel(logging.INFO)
clog_regex = re.compile('CommitLog-\d-(\d+).log')
def parse_clog(clog):
return clog_regex.search(clog).group(1)
def get_bucket(region, key, secret, token, bucket_name):
# unsure if boto is thread-safe, will reconnect every time
log.debug('Connecting to s3')
if token:
conn = boto.s3.connect_to_region(region,
aws_access_key_id=key,
aws_secret_access_key=secret,
security_token=token)
else:
conn = boto.s3.connect_to_region(region,
aws_access_key_id=key,
aws_secret_access_key=secret,
security_token=token)
bucket = conn.get_bucket(bucket_name)
log.debug('Connected to s3')
return bucket
def find_latest_listdirjson_for_every_table(bucket, prefix):
latest_listdirjsons={}
# Get all ks folders, e.g /var/lib/cassandra/<ks>
keyspace_dirs = [_.name for _ in list(bucket.list(prefix='%s/' % prefix, delimiter='/'))]
for keyspace_dir in keyspace_dirs:
# Get all tbl folders for every ks folder, e.g /var/lib/cassandra/<ks>/<tbl>
table_dirs = [_.name for _ in list(bucket.list(prefix=keyspace_dir, delimiter='/'))]
for table_dir in table_dirs:
# Get all listdir.json files for every tbl folder,
# e.g /var/lib/cassandra/<ks>/<tbl>/mc-1/2/3/4...-Data.db-listdir.json
keys = [_ for _ in bucket.list(prefix=table_dir, delimiter='/') if _.name.endswith('-listdir.json')]
if keys:
keys.sort(key=lambda l: parser.parse(l.last_modified))
key = keys.pop()
table = table_dir.replace('%s/' % prefix, '').rstrip('/')
latest_listdirjsons[table] = key
return latest_listdirjsons
def build_recursive_fileset(bucket, prefix):
table_to_latest_listdirjson_mapping = find_latest_listdirjson_for_every_table(bucket, prefix)
table_to_files_mapping = {}
for table, listdirjson in table_to_latest_listdirjson_mapping.items():
json_data = json.loads(listdirjson.get_contents_as_string())
# There should only ever be one latest listdir.json and hence, one set of (latest) SSTables
sstables = json_data.itervalues().next()
table_to_files_mapping[table] = sstables
log.info("Found %s with %d files", table, len(sstables))
return table_to_files_mapping
def build_clog_fileset(bucket, prefix, clog_prefix):
fileset=[]
table_to_latest_listdirjson_mapping = find_latest_listdirjson_for_every_table(bucket, prefix)
listdirjsons = table_to_latest_listdirjson_mapping.values()
listdirjsons.sort(key=lambda l: parser.parse(l.last_modified), reverse=True)
oldest_key = listdirjsons.pop()
oldest_timestamp = datetime.strptime(oldest_key.last_modified, "%Y-%m-%dT%H:%M:%S.%fZ").strftime('%s')
oldest_timestamp_ms = int(oldest_timestamp) * 1000
clogs = [_.name for _ in bucket.list(prefix='%s/' % clog_prefix)]
clogs.sort(reverse=True)
for clog in clogs:
if parse_clog(clog) > oldest_timestamp_ms:
fileset.append(clog.replace('%s/' % clog_prefix, '').rstrip('/'))
else:
break
return fileset
def build_single_fileset(bucket, prefix, origin, target_file):
key = None
# If you want to restore a fileset in particular
if target_file:
key = bucket.get_key('%s/%s-listdir.json' %
(prefix, target_file))
else:
prefix_part_count = prefix.count('/') + 1
keys = [_ for _ in bucket.list(prefix='%s/' %
(prefix,)) if
_.name.endswith('-listdir.json') and _.name.count('/') == prefix_part_count]
if keys:
keys.sort(key=lambda l: parser.parse(l.last_modified))
key = keys.pop()
if not key:
raise LookupError('Cannot find anything to restore from %s:%s/%s' %
(bucket.name, prefix, target_file or ''))
json_data = json.loads(key.get_contents_as_string())
fileset = json_data[origin]
log.info('Fileset contains %d files to download' % (len(fileset)))
return fileset
class DownloadCounter(object):
filename = None
attemptcount = 0
def __init__(self, filename):
self.filename = filename
def increment(self):
self.attemptcount += 1
class DownloadHandler(object):
key = None
secret = None
token = None
bucket_name = None
owner = None
group = None
preserve = False
target = None
origin = None
prefix = None
force = False
name = socket.getfqdn()
fileset = []
queue = Queue()
num_threads = 4
threads = {}
def __init__(self, args=None, target=None, prefix=None, fileset=None):
self.target = target
self.origin = args.origin[0]
self.preserve = args.preserve
self.key = args.aws_key
self.secret = args.aws_secret
self.region = args.aws_region
self.token = args.token
self.bucket_name = args.bucket[0]
self.num_threads = args.threads
self.force = args.force
if args.name:
self.name = args.name
self.prefix = prefix
self.fileset=fileset
# It may be a bit sub-optimal, but I rather fail sooner than later
(owner, group) = self._check_metadata()
if not self.preserve:
owner = args.owner
group = args.group
try:
self.owner = pwd.getpwnam(owner).pw_uid
self.group = grp.getgrnam(group).gr_gid
except Exception as e:
log.error(e)
raise OSError('User/Group pair %s:%s does not exist' %
(owner, group,))
def _get_bucket(self):
return get_bucket(self.region, self.key, self.secret, self.token, self.bucket_name)
def _check_metadata(self):
bucket = self._get_bucket()
k = None
for fileset in self.fileset:
k = bucket.get_key('%s/%s' % (self.prefix, fileset))
if k is not None:
break
# The librato branch introduced this
owner = None
group = None
if k is None:
log.warn('Can not fetch metadata information')
return (owner, group)
meta = k.get_metadata('stat')
log.debug('Metadata is %s' % (meta,))
if meta:
try:
json_data = json.loads(meta)
owner = json_data['user']
group = json_data['group']
except TypeError as te:
log.debug(te)
log.warning('Could not parse stat metadata for %s' % (k.name,))
except KeyError as ke:
log.debug(ke)
log.warning('Incomplete stat metadata for %s, will ignore' %
(k.name,))
return (owner, group)
def _test_permissions(self):
log.info('Will now try to test writing to the target dir %s' %
(self.target,))
try:
if os.path.isdir(self.target) == False:
log.debug('Creating temp file in %s' % (self.target,))
os.makedirs(self.target)
log.debug('Changing owner:group for %s to %s:%s' %
(self.target, self.owner, self.group,))
os.chown(self.target, self.owner, self.group)
except Exception as e:
log.debug(e)
log.exception('%s exists' % (self.target,))
log.info('Will write to %s' % (self.target,))
def _worker(self, idx, queue):
log.info('Thread #%d processing items' % (idx, ))
bucket = self._get_bucket()
while queue.empty() == False:
queueddownload = queue.get()
fname = queueddownload.filename
keypath = '%s/%s' % (self.prefix, fname,)
destfile = os.path.join(self.target, os.path.basename(fname))
log.debug('Checking if we need to download %s to %s' %
(keypath, destfile,))
if queueddownload.attemptcount < 5:
download = False
#Retry downloading until we succeed
try:
key = bucket.get_key(keypath)
log.debug('Key object is %s' % key)
if os.path.isfile(destfile):
stat = os.stat(destfile)
if self.force:
download = True
elif stat.st_size != key.size:
log.info('%s and %s size differs, will '
're-download' % (key.name, destfile,))
download = True
else:
download = True
if download and key:
log.info('Downloading %s from %s to %s' %
(key.name, bucket.name, destfile))
key.get_contents_to_filename(destfile)
log.debug('Changing owner:group for %s to %s:%s' %
(destfile, self.owner, self.group,))
os.chown(destfile, self.owner, self.group)
except Exception as e:
log.debug(e)
log.exception('Failed to download `%s` retrying' %
(fname,))
#We can't download, try again
queueddownload.increment()
queue.put(queueddownload)
else:
log.info('Tried to download %s too many times. Giving up' %
fname)
#Pop the task regardless of state. If it fails we've put it back
queue.task_done()
log.info('Thread #%d finished processing' % (idx,))
def run(self):
self._test_permissions()
log.info('Running')
#queue up the filesets
for filename in self.fileset:
log.info('Pushing file %s onto queue' % filename)
self.queue.put(DownloadCounter(filename))
# launch threads and attach an event to them
for idx in range(0, self.num_threads):
self.threads[idx] = {}
# e = threading.Event()
t = threading.Thread(target=self._worker,
kwargs={'idx': idx, 'queue': self.queue})
t.setDaemon(True)
self.threads[idx] = t
t.start()
#Wait for everything to finish downloading
self.queue.join()
log.info('My job is done.')
def main():
p = pwd.getpwnam(os.environ['USER'])
owner = p.pw_name
group = [_.gr_name for _ in grp.getgrall() if _.gr_gid == p.pw_gid][0]
ap = argparse.ArgumentParser(
description='This is the companion script to the `tablesnap` program '
'which you can use to restore files from an Amazon S3 bucket to any '
'given local directory which you have write-permissions on. While the '
'code is straightforward, the program assumes the files you are '
'restoring got previously backed up with `tablesnap`',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
ap.add_argument('-k', '--aws-key',
default=os.environ.get('AWS_ACCESS_KEY_ID'),
help='Amazon S3 Key (default from AWS_ACCESS_KEY_ID in environment)')
ap.add_argument('-s', '--aws-secret',
default=os.environ.get('AWS_SECRET_ACCESS_KEY'),
help='Amazon S3 Secret (default from AWS_SECRET_ACCESS_KEY in environment)')
ap.add_argument('--token',
default=os.environ.get('AWS_SECURITY_TOKEN'),
help='Amazon S3 Token (default from AWS_SECURITY_TOKEN in environment)')
ap.add_argument('--aws-region',
default='us-east-1',
choices=[region.name for region in boto.s3.regions()],
help='AWS region to connect to.')
ap.add_argument('-p', '--preserve', default=False, action='store_true',
help='Preserve the permissions (if they exist) from the source. '
'This overrides -o and -g')
ap.add_argument('-o', '--owner', default=owner,
help='After download, chown files to this user.')
ap.add_argument('-g', '--group', default=group,
help='After download, chgrp files to this group.')
ap.add_argument('-t', '--threads', type=int, default=4,
help='Split the download between this many threads')
include_group = ap.add_mutually_exclusive_group()
include_group.add_argument('-f', '--file',
help='If specified, will download the file-set this file belongs to '
'instead of the latest one available.')
include_group.add_argument('-r', '--recursive', default=False, action='store_true',
help='Recursively download the files at the given origin (path), using latest ones available.')
include_group.add_argument('--commitlogs',
help='Download commitlogs, assuming up to the most recent batch possible. \n'
'If your bucket structure is <fqdn or name>/var/lib/cassandra/commitlog \n'
'provide /var/lib/cassandra/commitlog. \n')
ap.add_argument('--force', default=False, action='store_true',
help='Force download files even if they exist')
ap.add_argument('-n', '--name', default=socket.getfqdn(),
help='Use this name instead of the FQDN to prefix the bucket dir')
ap.add_argument('bucket', nargs=1,
help='S3 bucket to download files from')
ap.add_argument('origin', nargs=1,
help='Path inside the bucket to the directory you want to download '
'files from. '
'E.g if your bucket structure is <fqdn or name>/var/lib/cassandra/data/<ks>/<tbl>/<files>. \n'
'With --recursive or --commitlogs, you should provide /var/lib/cassandra/data. \n'
'Otherwise, provide /var/lib/cassandra/data/<ks>/<tbl> if you want to restore just one table.')
ap.add_argument('target', nargs=1,
help='Path in the local FS where files should be downloaded to')
args = ap.parse_args()
prefix='%s:%s' % (args.name, args.origin[0])
bucket=get_bucket(args.aws_region, args.aws_key, args.aws_secret, args.token, args.bucket[0])
log.info('Building fileset')
if args.recursive:
table_to_files_mapping=build_recursive_fileset(bucket, prefix)
for table,fileset in table_to_files_mapping.iteritems():
dh = DownloadHandler(args, target=os.path.join(args.target[0], table),
prefix=os.path.join(prefix, table), fileset=fileset)
dh.run()
elif args.commitlogs:
clog_prefix = '%s:%s' % (args.name, args.commitlogs)
fileset = build_clog_fileset(bucket, prefix, clog_prefix)
dh = DownloadHandler(args, target=os.path.join(args.target[0]), prefix=clog_prefix,
fileset=fileset)
dh.run()
else:
fileset=build_single_fileset(bucket, prefix, args.origin[0], args.file)
dh = DownloadHandler(args, target=args.target[0], prefix=prefix,
fileset=fileset)
dh.run()
if __name__ == '__main__':
sys.exit(main())