Added basic implementation of key/bucket versioning
This commit is contained in:
parent
50d2608478
commit
5409d99ca2
@ -20,3 +20,4 @@ Moto is written by Steve Pulec with contributions from:
|
||||
* [Chris St. Pierre](https://github.com/stpierre)
|
||||
* [Frank Mata](https://github.com/matafc)
|
||||
* [Clint Ecker](https://github.com/clintecker)
|
||||
* [Richard Eames](https://github.com/Naddiseo)
|
||||
|
@ -14,7 +14,8 @@ UPLOAD_PART_MIN_SIZE = 5242880
|
||||
|
||||
|
||||
class FakeKey(object):
|
||||
def __init__(self, name, value, storage="STANDARD", etag=None):
|
||||
|
||||
def __init__(self, name, value, storage="STANDARD", etag=None, is_versioned=False, version_id=0):
|
||||
self.name = name
|
||||
self.value = value
|
||||
self.last_modified = datetime.datetime.now()
|
||||
@ -22,6 +23,8 @@ class FakeKey(object):
|
||||
self._metadata = {}
|
||||
self._expiry = None
|
||||
self._etag = etag
|
||||
self._version_id = version_id
|
||||
self._is_versioned = is_versioned
|
||||
|
||||
def copy(self, new_name=None):
|
||||
r = copy.deepcopy(self)
|
||||
@ -42,6 +45,10 @@ class FakeKey(object):
|
||||
self.value += value
|
||||
self.last_modified = datetime.datetime.now()
|
||||
self._etag = None # must recalculate etag
|
||||
if self._is_versioned:
|
||||
self._version_id += 1
|
||||
else:
|
||||
self._is_versioned = 0
|
||||
|
||||
def restore(self, days):
|
||||
self._expiry = datetime.datetime.now() + datetime.timedelta(days)
|
||||
@ -79,6 +86,10 @@ class FakeKey(object):
|
||||
if self._expiry is not None:
|
||||
rhdr = 'ongoing-request="false", expiry-date="{0}"'
|
||||
r['x-amz-restore'] = rhdr.format(self.expiry_date)
|
||||
|
||||
if self._is_versioned:
|
||||
r['x-amz-version-id'] = self._version_id
|
||||
|
||||
return r
|
||||
|
||||
@property
|
||||
@ -137,10 +148,16 @@ class FakeMultipart(object):
|
||||
|
||||
|
||||
class FakeBucket(object):
|
||||
|
||||
def __init__(self, name):
|
||||
self.name = name
|
||||
self.keys = {}
|
||||
self.multiparts = {}
|
||||
self.versioning_status = None
|
||||
|
||||
@property
|
||||
def is_versioned(self):
|
||||
return self.versioning_status == 'Enabled'
|
||||
|
||||
|
||||
class S3Backend(BaseBackend):
|
||||
@ -171,12 +188,30 @@ class S3Backend(BaseBackend):
|
||||
return self.buckets.pop(bucket_name)
|
||||
return None
|
||||
|
||||
def set_bucket_versioning(self, bucket_name, status):
|
||||
self.buckets[bucket_name].versioning_status = status
|
||||
|
||||
def get_bucket_versioning(self, bucket_name):
|
||||
return self.buckets[bucket_name].versioning_status
|
||||
|
||||
def set_key(self, bucket_name, key_name, value, storage=None, etag=None):
|
||||
key_name = clean_key_name(key_name)
|
||||
|
||||
bucket = self.buckets[bucket_name]
|
||||
new_key = FakeKey(name=key_name, value=value,
|
||||
storage=storage, etag=etag)
|
||||
|
||||
old_key = bucket.keys.get(key_name, None)
|
||||
if old_key is not None and bucket.is_versioned:
|
||||
new_version_id = old_key._version_id + 1
|
||||
else:
|
||||
new_version_id = 0
|
||||
|
||||
new_key = FakeKey(
|
||||
name=key_name,
|
||||
value=value,
|
||||
storage=storage,
|
||||
etag=etag,
|
||||
is_versioned=bucket.is_versioned,
|
||||
version_id=new_version_id)
|
||||
bucket.keys[key_name] = new_key
|
||||
|
||||
return new_key
|
||||
|
@ -48,7 +48,7 @@ class ResponseObject(object):
|
||||
elif method == 'GET':
|
||||
return self._bucket_response_get(bucket_name, querystring, headers)
|
||||
elif method == 'PUT':
|
||||
return self._bucket_response_put(bucket_name, headers)
|
||||
return self._bucket_response_put(request, bucket_name, querystring, headers)
|
||||
elif method == 'DELETE':
|
||||
return self._bucket_response_delete(bucket_name, headers)
|
||||
elif method == 'POST':
|
||||
@ -73,14 +73,17 @@ class ResponseObject(object):
|
||||
return 200, headers, template.render(
|
||||
bucket_name=bucket_name,
|
||||
uploads=multiparts)
|
||||
|
||||
elif 'versioning' in querystring:
|
||||
versioning = self.backend.get_bucket_versioning(bucket_name)
|
||||
template = Template(S3_BUCKET_GET_VERSIONING)
|
||||
return 200, headers, template.render(status=versioning)
|
||||
bucket = self.backend.get_bucket(bucket_name)
|
||||
if bucket:
|
||||
prefix = querystring.get('prefix', [None])[0]
|
||||
delimiter = querystring.get('delimiter', [None])[0]
|
||||
result_keys, result_folders = self.backend.prefix_query(bucket, prefix, delimiter)
|
||||
template = Template(S3_BUCKET_GET_RESPONSE)
|
||||
return template.render(
|
||||
return 200, headers, template.render(
|
||||
bucket=bucket,
|
||||
prefix=prefix,
|
||||
delimiter=delimiter,
|
||||
@ -90,13 +93,22 @@ class ResponseObject(object):
|
||||
else:
|
||||
return 404, headers, ""
|
||||
|
||||
def _bucket_response_put(self, bucket_name, headers):
|
||||
def _bucket_response_put(self, request, bucket_name, querystring, headers):
|
||||
if 'versioning' in querystring:
|
||||
ver = re.search('<Status>([A-Za-z]+)</Status>', request.body)
|
||||
if ver:
|
||||
self.backend.set_bucket_versioning(bucket_name, ver.group(1))
|
||||
template = Template(S3_BUCKET_VERSIONING)
|
||||
return template.render(bucket_versioning_status=ver.group(1))
|
||||
else:
|
||||
return 404, headers, ""
|
||||
else:
|
||||
try:
|
||||
new_bucket = self.backend.create_bucket(bucket_name)
|
||||
except BucketAlreadyExists:
|
||||
return 409, headers, ""
|
||||
template = Template(S3_BUCKET_CREATE_RESPONSE)
|
||||
return template.render(bucket=new_bucket)
|
||||
return 200, headers, template.render(bucket=new_bucket)
|
||||
|
||||
def _bucket_response_delete(self, bucket_name, headers):
|
||||
removed_bucket = self.backend.delete_bucket(bucket_name)
|
||||
@ -411,6 +423,21 @@ S3_DELETE_BUCKET_WITH_ITEMS_ERROR = """<?xml version="1.0" encoding="UTF-8"?>
|
||||
<HostId>sdfgdsfgdsfgdfsdsfgdfs</HostId>
|
||||
</Error>"""
|
||||
|
||||
S3_BUCKET_VERSIONING = """
|
||||
<VersioningConfiguration xmlns="http://s3.amazonaws.com/doc/2006-03-01/">
|
||||
<Status>{{ bucket_versioning_status }}</Status>
|
||||
</VersioningConfiguration>
|
||||
"""
|
||||
|
||||
S3_BUCKET_GET_VERSIONING = """
|
||||
{% if status is none %}
|
||||
<VersioningConfiguration xmlns="http://s3.amazonaws.com/doc/2006-03-01/"/>
|
||||
{% else %}
|
||||
<VersioningConfiguration xmlns="http://s3.amazonaws.com/doc/2006-03-01/">
|
||||
<Status>{{ status }}</Status>
|
||||
</VersioningConfiguration>
|
||||
{% endif %}
|
||||
"""
|
||||
S3_DELETE_KEYS_RESPONSE = """<?xml version="1.0" encoding="UTF-8"?>
|
||||
<DeleteResult xmlns="http://s3.amazonaws.com/doc/2006-03-01">
|
||||
{% for k in deleted %}
|
||||
|
@ -508,3 +508,38 @@ def test_restore_key_headers():
|
||||
key.ongoing_restore.should_not.be.none
|
||||
key.ongoing_restore.should.be.false
|
||||
key.expiry_date.should.equal("Mon, 02 Jan 2012 12:00:00 GMT")
|
||||
|
||||
|
||||
@mock_s3
|
||||
def test_get_versioning_status():
|
||||
conn = boto.connect_s3('the_key', 'the_secret')
|
||||
bucket = conn.create_bucket('foobar')
|
||||
d = bucket.get_versioning_status()
|
||||
d.should.be.empty
|
||||
|
||||
bucket.configure_versioning(versioning=True)
|
||||
d = bucket.get_versioning_status()
|
||||
d.shouldnt.be.empty
|
||||
d.should.have.key('Versioning').being.equal('Enabled')
|
||||
|
||||
bucket.configure_versioning(versioning=False)
|
||||
d = bucket.get_versioning_status()
|
||||
d.should.have.key('Versioning').being.equal('Suspended')
|
||||
|
||||
|
||||
@mock_s3
|
||||
def test_key_version():
|
||||
conn = boto.connect_s3('the_key', 'the_secret')
|
||||
bucket = conn.create_bucket('foobar')
|
||||
bucket.configure_versioning(versioning=True)
|
||||
|
||||
key = Key(bucket)
|
||||
key.key = 'the-key'
|
||||
key.version_id.should.be.none
|
||||
key.set_contents_from_string('some string')
|
||||
key.version_id.should.equal('0')
|
||||
key.set_contents_from_string('some string')
|
||||
key.version_id.should.equal('1')
|
||||
|
||||
key = bucket.get_key('the-key')
|
||||
key.version_id.should.equal('1')
|
||||
|
Loading…
Reference in New Issue
Block a user