From 2ba64e1322c0e62b0604b04e9f5a34342e4acadb Mon Sep 17 00:00:00 2001 From: Konstantinos Koukopoulos Date: Wed, 26 Mar 2014 16:02:14 +0200 Subject: [PATCH 1/5] support replacing key metadata when copying --- moto/s3/models.py | 3 +++ moto/s3/responses.py | 26 +++++++++++++++++--------- tests/test_s3/test_s3.py | 16 ++++++++++++++++ 3 files changed, 36 insertions(+), 9 deletions(-) diff --git a/moto/s3/models.py b/moto/s3/models.py index 2def8db00..af96f0516 100644 --- a/moto/s3/models.py +++ b/moto/s3/models.py @@ -22,6 +22,9 @@ class FakeKey(object): def set_metadata(self, key, metadata): self._metadata[key] = metadata + def clear_metadata(self): + self._metadata = {} + def append_to_value(self, value): self.value += value self.last_modified = datetime.datetime.now() diff --git a/moto/s3/responses.py b/moto/s3/responses.py index 27aa0118d..998d1e304 100644 --- a/moto/s3/responses.py +++ b/moto/s3/responses.py @@ -125,6 +125,18 @@ class ResponseObject(object): status_code, headers, response_content = response return status_code, headers, response_content + def _key_set_metadata(self, request, key, replace=False): + meta_regex = re.compile('^x-amz-meta-([a-zA-Z0-9\-_]+)$', flags=re.IGNORECASE) + if replace is True: + key.clear_metadata() + for header in request.headers: + if isinstance(header, basestring): + result = meta_regex.match(header) + if result: + meta_key = result.group(0).lower() + metadata = request.headers[header] + key.set_metadata(meta_key, metadata) + def _key_response(self, request, full_url, headers): parsed_url = urlparse(full_url) query = parse_qs(parsed_url.query) @@ -182,6 +194,10 @@ class ResponseObject(object): # Copy key src_bucket, src_key = request.headers.get("x-amz-copy-source").split("/", 1) self.backend.copy_key(src_bucket, src_key, bucket_name, key_name) + mdirective = request.headers.get('x-amz-metadata-directive') + if mdirective is not None and mdirective == 'REPLACE': + new_key = self.backend.get_key(bucket_name, key_name) + self._key_set_metadata(request, new_key, replace=True) template = Template(S3_OBJECT_COPY_RESPONSE) return template.render(key=src_key) streaming_request = hasattr(request, 'streaming') and request.streaming @@ -196,16 +212,8 @@ class ResponseObject(object): # Initial data new_key = self.backend.set_key(bucket_name, key_name, body) request.streaming = True + self._key_set_metadata(request, new_key) - #Metadata - meta_regex = re.compile('^x-amz-meta-([a-zA-Z0-9\-_]+)$', flags=re.IGNORECASE) - for header in request.headers: - if isinstance(header, basestring): - result = meta_regex.match(header) - if result: - meta_key = result.group(0).lower() - metadata = request.headers[header] - new_key.set_metadata(meta_key, metadata) template = Template(S3_OBJECT_RESPONSE) headers.update(new_key.response_dict) return 200, headers, template.render(key=new_key) diff --git a/tests/test_s3/test_s3.py b/tests/test_s3/test_s3.py index 66866335a..b39bd6cb8 100644 --- a/tests/test_s3/test_s3.py +++ b/tests/test_s3/test_s3.py @@ -143,6 +143,22 @@ def test_set_metadata(): bucket.get_key('the-key').get_metadata('md').should.equal('Metadatastring') +@mock_s3 +def test_copy_key_replace_metadata(): + conn = boto.connect_s3('the_key', 'the_secret') + bucket = conn.create_bucket("foobar") + key = Key(bucket) + key.key = "the-key" + key.set_metadata('md', 'Metadatastring') + key.set_contents_from_string("some value") + + bucket.copy_key('new-key', 'foobar', 'the-key', + metadata={'momd': 'Mometadatastring'}) + + bucket.get_key("new-key").get_metadata('md').should.be.none + bucket.get_key("new-key").get_metadata('momd').should.equal('Mometadatastring') + + @freeze_time("2012-01-01 12:00:00") @mock_s3 def test_last_modified(): From 1f49148a649e1e00dfe884aaa26088a56b6b407d Mon Sep 17 00:00:00 2001 From: Konstantinos Koukopoulos Date: Wed, 26 Mar 2014 17:41:07 +0200 Subject: [PATCH 2/5] when copying keys create new objects using deepcopy --- moto/s3/models.py | 12 +++++++++++- 1 file changed, 11 insertions(+), 1 deletion(-) diff --git a/moto/s3/models.py b/moto/s3/models.py index af96f0516..76cfb6b66 100644 --- a/moto/s3/models.py +++ b/moto/s3/models.py @@ -2,6 +2,7 @@ import os import base64 import datetime import hashlib +import copy from moto.core import BaseBackend from moto.core.utils import iso_8601_datetime, rfc_1123_datetime @@ -19,6 +20,12 @@ class FakeKey(object): self.last_modified = datetime.datetime.now() self._metadata = {} + def copy(self, new_name = None): + r = copy.deepcopy(self) + if new_name is not None: + r.name = new_name + return r + def set_metadata(self, key, metadata): self._metadata[key] = metadata @@ -229,6 +236,9 @@ class S3Backend(BaseBackend): dest_key_name = clean_key_name(dest_key_name) src_bucket = self.buckets[src_bucket_name] dest_bucket = self.buckets[dest_bucket_name] - dest_bucket.keys[dest_key_name] = src_bucket.keys[src_key_name] + key = src_bucket.keys[src_key_name] + if dest_key_name != src_key_name: + key = key.copy(dest_key_name) + dest_bucket.keys[dest_key_name] = key s3_backend = S3Backend() From c9d5dffa248089e3b418c08841d67284e6ac38ba Mon Sep 17 00:00:00 2001 From: Konstantinos Koukopoulos Date: Wed, 26 Mar 2014 17:52:31 +0200 Subject: [PATCH 3/5] support reduced redundancy storage --- moto/s3/models.py | 29 ++++++++++++++++++++++++----- moto/s3/responses.py | 11 +++++++---- tests/test_s3/test_s3.py | 29 +++++++++++++++++++++++++++++ 3 files changed, 60 insertions(+), 9 deletions(-) diff --git a/moto/s3/models.py b/moto/s3/models.py index 76cfb6b66..7b3f78779 100644 --- a/moto/s3/models.py +++ b/moto/s3/models.py @@ -14,10 +14,11 @@ UPLOAD_PART_MIN_SIZE = 5242880 class FakeKey(object): - def __init__(self, name, value): + def __init__(self, name, value, storage=None): self.name = name self.value = value self.last_modified = datetime.datetime.now() + self._storage_class = storage self._metadata = {} def copy(self, new_name = None): @@ -32,6 +33,9 @@ class FakeKey(object): def clear_metadata(self): self._metadata = {} + def set_storage_class(self, storage_class): + self._storage_class = storage_class + def append_to_value(self, value): self.value += value self.last_modified = datetime.datetime.now() @@ -58,15 +62,27 @@ class FakeKey(object): @property def response_dict(self): - return { + r = { 'etag': self.etag, 'last-modified': self.last_modified_RFC1123, } + if self._storage_class is not None: + if self._storage_class != 'STANDARD': + r['x-amz-storage-class'] = self._storage_class + + return r @property def size(self): return len(self.value) + @property + def storage_class(self): + if self._storage_class is not None: + return self._storage_class + else: + return 'STANDARD' + class FakeMultipart(object): def __init__(self, key_name): @@ -140,11 +156,12 @@ class S3Backend(BaseBackend): return self.buckets.pop(bucket_name) return None - def set_key(self, bucket_name, key_name, value): + def set_key(self, bucket_name, key_name, value, storage=None): key_name = clean_key_name(key_name) bucket = self.buckets[bucket_name] - new_key = FakeKey(name=key_name, value=value) + new_key = FakeKey(name=key_name, value=value, + storage=storage) bucket.keys[key_name] = new_key return new_key @@ -231,7 +248,7 @@ class S3Backend(BaseBackend): bucket = self.buckets[bucket_name] return bucket.keys.pop(key_name) - def copy_key(self, src_bucket_name, src_key_name, dest_bucket_name, dest_key_name): + def copy_key(self, src_bucket_name, src_key_name, dest_bucket_name, dest_key_name, storage=None): src_key_name = clean_key_name(src_key_name) dest_key_name = clean_key_name(dest_key_name) src_bucket = self.buckets[src_bucket_name] @@ -240,5 +257,7 @@ class S3Backend(BaseBackend): if dest_key_name != src_key_name: key = key.copy(dest_key_name) dest_bucket.keys[dest_key_name] = key + if storage is not None: + dest_bucket.keys[dest_key_name].set_storage_class(storage) s3_backend = S3Backend() diff --git a/moto/s3/responses.py b/moto/s3/responses.py index 998d1e304..a0ee1fd10 100644 --- a/moto/s3/responses.py +++ b/moto/s3/responses.py @@ -190,10 +190,13 @@ class ResponseObject(object): headers.update(key.response_dict) return 200, headers, response + storage_class = request.headers.get('x-amz-storage-class', 'STANDARD') + if 'x-amz-copy-source' in request.headers: # Copy key src_bucket, src_key = request.headers.get("x-amz-copy-source").split("/", 1) - self.backend.copy_key(src_bucket, src_key, bucket_name, key_name) + self.backend.copy_key(src_bucket, src_key, bucket_name, key_name, + storage=storage_class) mdirective = request.headers.get('x-amz-metadata-directive') if mdirective is not None and mdirective == 'REPLACE': new_key = self.backend.get_key(bucket_name, key_name) @@ -210,7 +213,8 @@ class ResponseObject(object): new_key = self.backend.append_to_key(bucket_name, key_name, body) else: # Initial data - new_key = self.backend.set_key(bucket_name, key_name, body) + new_key = self.backend.set_key(bucket_name, key_name, body, + storage=storage_class) request.streaming = True self._key_set_metadata(request, new_key) @@ -292,12 +296,11 @@ S3_BUCKET_GET_RESPONSE = """ {{ key.last_modified_ISO8601 }} {{ key.etag }} {{ key.size }} - STANDARD + {{ key.storage_class }} 75aa57f09aa0c8caeab4f8c24e99d10f8e7faeebf76c078efc7c6caea54ba06a webfile - STANDARD {% endfor %} {% if delimiter %} diff --git a/tests/test_s3/test_s3.py b/tests/test_s3/test_s3.py index b39bd6cb8..e72c8e517 100644 --- a/tests/test_s3/test_s3.py +++ b/tests/test_s3/test_s3.py @@ -326,3 +326,32 @@ def test_bucket_key_listing_order(): delimiter = '/' keys = [x.name for x in bucket.list(prefix + 'x', delimiter)] keys.should.equal([u'toplevel/x/']) + + +@mock_s3 +def test_key_with_reduced_redundancy(): + conn = boto.connect_s3() + bucket = conn.create_bucket('test_bucket_name') + + key = Key(bucket, 'test_rr_key') + key.set_contents_from_string('value1', reduced_redundancy=True) + # we use the bucket iterator because of: + # https:/github.com/boto/boto/issues/1173 + list(bucket)[0].storage_class.should.equal('REDUCED_REDUNDANCY') + + +@mock_s3 +def test_copy_key_reduced_redundancy(): + conn = boto.connect_s3('the_key', 'the_secret') + bucket = conn.create_bucket("foobar") + key = Key(bucket) + key.key = "the-key" + key.set_contents_from_string("some value") + + bucket.copy_key('new-key', 'foobar', 'the-key', storage_class='REDUCED_REDUNDANCY') + + # we use the bucket iterator because of: + # https:/github.com/boto/boto/issues/1173 + keys = dict([(k.name, k) for k in bucket]) + keys['new-key'].storage_class.should.equal("REDUCED_REDUNDANCY") + keys['the-key'].storage_class.should.equal("STANDARD") From 8ada1d7829705d39a8ccac46022df0d165594fd2 Mon Sep 17 00:00:00 2001 From: Konstantinos Koukopoulos Date: Wed, 26 Mar 2014 19:15:08 +0200 Subject: [PATCH 4/5] support restore operation on s3 keys --- moto/s3/models.py | 13 ++++++++++++- moto/s3/responses.py | 12 +++++++++++- tests/test_s3/test_s3.py | 36 ++++++++++++++++++++++++++++++++++++ 3 files changed, 59 insertions(+), 2 deletions(-) diff --git a/moto/s3/models.py b/moto/s3/models.py index 7b3f78779..28a828875 100644 --- a/moto/s3/models.py +++ b/moto/s3/models.py @@ -20,6 +20,7 @@ class FakeKey(object): self.last_modified = datetime.datetime.now() self._storage_class = storage self._metadata = {} + self._expiry = None def copy(self, new_name = None): r = copy.deepcopy(self) @@ -40,6 +41,9 @@ class FakeKey(object): self.value += value self.last_modified = datetime.datetime.now() + def restore(self, days): + self._expiry = datetime.datetime.now() + datetime.timedelta(days) + @property def etag(self): value_md5 = hashlib.md5() @@ -69,7 +73,9 @@ class FakeKey(object): if self._storage_class is not None: if self._storage_class != 'STANDARD': r['x-amz-storage-class'] = self._storage_class - + if self._expiry is not None: + r['x-amz-restore'] = 'ongoing-request="false", expiry-date={}'.format( + self.expiry_date) return r @property @@ -83,6 +89,11 @@ class FakeKey(object): else: return 'STANDARD' + @property + def expiry_date(self): + if self._expiry is not None: + return self._expiry.strftime("%a, %d %b %Y %H:%M:%S GMT") + class FakeMultipart(object): def __init__(self, key_name): diff --git a/moto/s3/responses.py b/moto/s3/responses.py index a0ee1fd10..ed45789e0 100644 --- a/moto/s3/responses.py +++ b/moto/s3/responses.py @@ -6,6 +6,7 @@ from jinja2 import Template from .exceptions import BucketAlreadyExists from .models import s3_backend from .utils import bucket_name_from_url +from xml.dom import minidom def parse_key_name(pth): @@ -261,8 +262,17 @@ class ResponseObject(object): ) template = Template(S3_MULTIPART_COMPLETE_TOO_SMALL_ERROR) return 400, headers, template.render() + elif parsed_url.query == 'restore': + es = minidom.parseString(body).getElementsByTagName('Days') + days = es[0].childNodes[0].wholeText + key = self.backend.get_key(bucket_name, key_name) + r = 202 + if key.expiry_date is not None: + r = 200 + key.restore(int(days)) + return r, headers, "" else: - raise NotImplementedError("Method POST had only been implemented for multipart uploads so far") + raise NotImplementedError("Method POST had only been implemented for multipart uploads and restore operations, so far") else: raise NotImplementedError("Method {0} has not been impelemented in the S3 backend yet".format(method)) diff --git a/tests/test_s3/test_s3.py b/tests/test_s3/test_s3.py index e72c8e517..e32cd3992 100644 --- a/tests/test_s3/test_s3.py +++ b/tests/test_s3/test_s3.py @@ -355,3 +355,39 @@ def test_copy_key_reduced_redundancy(): keys = dict([(k.name, k) for k in bucket]) keys['new-key'].storage_class.should.equal("REDUCED_REDUNDANCY") keys['the-key'].storage_class.should.equal("STANDARD") + + +@freeze_time("2012-01-01 12:00:00") +@mock_s3 +def test_restore_key(): + conn = boto.connect_s3('the_key', 'the_secret') + bucket = conn.create_bucket("foobar") + key = Key(bucket) + key.key = "the-key" + key.set_contents_from_string("some value") + list(bucket)[0].ongoing_restore.should.be.none + key.restore(1) + key = bucket.get_key('the-key') + key.ongoing_restore.should_not.be.none + key.ongoing_restore.should.be.false + key.expiry_date.should.equal("Mon, 02 Jan 2012 12:00:00 GMT") + key.restore(2) + key = bucket.get_key('the-key') + key.ongoing_restore.should_not.be.none + key.ongoing_restore.should.be.false + key.expiry_date.should.equal("Tue, 03 Jan 2012 12:00:00 GMT") + + +@freeze_time("2012-01-01 12:00:00") +@mock_s3 +def test_restore_key_headers(): + conn = boto.connect_s3('the_key', 'the_secret') + bucket = conn.create_bucket("foobar") + key = Key(bucket) + key.key = "the-key" + key.set_contents_from_string("some value") + key.restore(1, headers={'foo': 'bar'}) + key = bucket.get_key('the-key') + key.ongoing_restore.should_not.be.none + key.ongoing_restore.should.be.false + key.expiry_date.should.equal("Mon, 02 Jan 2012 12:00:00 GMT") From 11938dae7a020c20ce38d7ea9086c49c26eab0e2 Mon Sep 17 00:00:00 2001 From: Konstantinos Koukopoulos Date: Thu, 27 Mar 2014 10:00:50 +0200 Subject: [PATCH 5/5] fix restore header formatting --- moto/s3/models.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/moto/s3/models.py b/moto/s3/models.py index 28a828875..8d556cb91 100644 --- a/moto/s3/models.py +++ b/moto/s3/models.py @@ -74,8 +74,8 @@ class FakeKey(object): if self._storage_class != 'STANDARD': r['x-amz-storage-class'] = self._storage_class if self._expiry is not None: - r['x-amz-restore'] = 'ongoing-request="false", expiry-date={}'.format( - self.expiry_date) + rhdr = 'ongoing-request="false", expiry-date="{0}"' + r['x-amz-restore'] = rhdr.format(self.expiry_date) return r @property