support restore operation on s3 keys

This commit is contained in:
Konstantinos Koukopoulos 2014-03-26 19:15:08 +02:00
parent c9d5dffa24
commit 8ada1d7829
3 changed files with 59 additions and 2 deletions

View File

@ -20,6 +20,7 @@ class FakeKey(object):
self.last_modified = datetime.datetime.now()
self._storage_class = storage
self._metadata = {}
self._expiry = None
def copy(self, new_name = None):
r = copy.deepcopy(self)
@ -40,6 +41,9 @@ class FakeKey(object):
self.value += value
self.last_modified = datetime.datetime.now()
def restore(self, days):
self._expiry = datetime.datetime.now() + datetime.timedelta(days)
@property
def etag(self):
value_md5 = hashlib.md5()
@ -69,7 +73,9 @@ class FakeKey(object):
if self._storage_class is not None:
if self._storage_class != 'STANDARD':
r['x-amz-storage-class'] = self._storage_class
if self._expiry is not None:
r['x-amz-restore'] = 'ongoing-request="false", expiry-date={}'.format(
self.expiry_date)
return r
@property
@ -83,6 +89,11 @@ class FakeKey(object):
else:
return 'STANDARD'
@property
def expiry_date(self):
if self._expiry is not None:
return self._expiry.strftime("%a, %d %b %Y %H:%M:%S GMT")
class FakeMultipart(object):
def __init__(self, key_name):

View File

@ -6,6 +6,7 @@ from jinja2 import Template
from .exceptions import BucketAlreadyExists
from .models import s3_backend
from .utils import bucket_name_from_url
from xml.dom import minidom
def parse_key_name(pth):
@ -261,8 +262,17 @@ class ResponseObject(object):
)
template = Template(S3_MULTIPART_COMPLETE_TOO_SMALL_ERROR)
return 400, headers, template.render()
elif parsed_url.query == 'restore':
es = minidom.parseString(body).getElementsByTagName('Days')
days = es[0].childNodes[0].wholeText
key = self.backend.get_key(bucket_name, key_name)
r = 202
if key.expiry_date is not None:
r = 200
key.restore(int(days))
return r, headers, ""
else:
raise NotImplementedError("Method POST had only been implemented for multipart uploads so far")
raise NotImplementedError("Method POST had only been implemented for multipart uploads and restore operations, so far")
else:
raise NotImplementedError("Method {0} has not been impelemented in the S3 backend yet".format(method))

View File

@ -355,3 +355,39 @@ def test_copy_key_reduced_redundancy():
keys = dict([(k.name, k) for k in bucket])
keys['new-key'].storage_class.should.equal("REDUCED_REDUNDANCY")
keys['the-key'].storage_class.should.equal("STANDARD")
@freeze_time("2012-01-01 12:00:00")
@mock_s3
def test_restore_key():
conn = boto.connect_s3('the_key', 'the_secret')
bucket = conn.create_bucket("foobar")
key = Key(bucket)
key.key = "the-key"
key.set_contents_from_string("some value")
list(bucket)[0].ongoing_restore.should.be.none
key.restore(1)
key = bucket.get_key('the-key')
key.ongoing_restore.should_not.be.none
key.ongoing_restore.should.be.false
key.expiry_date.should.equal("Mon, 02 Jan 2012 12:00:00 GMT")
key.restore(2)
key = bucket.get_key('the-key')
key.ongoing_restore.should_not.be.none
key.ongoing_restore.should.be.false
key.expiry_date.should.equal("Tue, 03 Jan 2012 12:00:00 GMT")
@freeze_time("2012-01-01 12:00:00")
@mock_s3
def test_restore_key_headers():
conn = boto.connect_s3('the_key', 'the_secret')
bucket = conn.create_bucket("foobar")
key = Key(bucket)
key.key = "the-key"
key.set_contents_from_string("some value")
key.restore(1, headers={'foo': 'bar'})
key = bucket.get_key('the-key')
key.ongoing_restore.should_not.be.none
key.ongoing_restore.should.be.false
key.expiry_date.should.equal("Mon, 02 Jan 2012 12:00:00 GMT")