commit
069c48b43a
@ -2,6 +2,7 @@ import os
|
|||||||
import base64
|
import base64
|
||||||
import datetime
|
import datetime
|
||||||
import hashlib
|
import hashlib
|
||||||
|
import copy
|
||||||
|
|
||||||
from moto.core import BaseBackend
|
from moto.core import BaseBackend
|
||||||
from moto.core.utils import iso_8601_datetime, rfc_1123_datetime
|
from moto.core.utils import iso_8601_datetime, rfc_1123_datetime
|
||||||
@ -13,19 +14,36 @@ UPLOAD_PART_MIN_SIZE = 5242880
|
|||||||
|
|
||||||
|
|
||||||
class FakeKey(object):
|
class FakeKey(object):
|
||||||
def __init__(self, name, value):
|
def __init__(self, name, value, storage=None):
|
||||||
self.name = name
|
self.name = name
|
||||||
self.value = value
|
self.value = value
|
||||||
self.last_modified = datetime.datetime.now()
|
self.last_modified = datetime.datetime.now()
|
||||||
|
self._storage_class = storage
|
||||||
self._metadata = {}
|
self._metadata = {}
|
||||||
|
self._expiry = None
|
||||||
|
|
||||||
|
def copy(self, new_name = None):
|
||||||
|
r = copy.deepcopy(self)
|
||||||
|
if new_name is not None:
|
||||||
|
r.name = new_name
|
||||||
|
return r
|
||||||
|
|
||||||
def set_metadata(self, key, metadata):
|
def set_metadata(self, key, metadata):
|
||||||
self._metadata[key] = metadata
|
self._metadata[key] = metadata
|
||||||
|
|
||||||
|
def clear_metadata(self):
|
||||||
|
self._metadata = {}
|
||||||
|
|
||||||
|
def set_storage_class(self, storage_class):
|
||||||
|
self._storage_class = storage_class
|
||||||
|
|
||||||
def append_to_value(self, value):
|
def append_to_value(self, value):
|
||||||
self.value += value
|
self.value += value
|
||||||
self.last_modified = datetime.datetime.now()
|
self.last_modified = datetime.datetime.now()
|
||||||
|
|
||||||
|
def restore(self, days):
|
||||||
|
self._expiry = datetime.datetime.now() + datetime.timedelta(days)
|
||||||
|
|
||||||
@property
|
@property
|
||||||
def etag(self):
|
def etag(self):
|
||||||
value_md5 = hashlib.md5()
|
value_md5 = hashlib.md5()
|
||||||
@ -48,15 +66,34 @@ class FakeKey(object):
|
|||||||
|
|
||||||
@property
|
@property
|
||||||
def response_dict(self):
|
def response_dict(self):
|
||||||
return {
|
r = {
|
||||||
'etag': self.etag,
|
'etag': self.etag,
|
||||||
'last-modified': self.last_modified_RFC1123,
|
'last-modified': self.last_modified_RFC1123,
|
||||||
}
|
}
|
||||||
|
if self._storage_class is not None:
|
||||||
|
if self._storage_class != 'STANDARD':
|
||||||
|
r['x-amz-storage-class'] = self._storage_class
|
||||||
|
if self._expiry is not None:
|
||||||
|
rhdr = 'ongoing-request="false", expiry-date="{0}"'
|
||||||
|
r['x-amz-restore'] = rhdr.format(self.expiry_date)
|
||||||
|
return r
|
||||||
|
|
||||||
@property
|
@property
|
||||||
def size(self):
|
def size(self):
|
||||||
return len(self.value)
|
return len(self.value)
|
||||||
|
|
||||||
|
@property
|
||||||
|
def storage_class(self):
|
||||||
|
if self._storage_class is not None:
|
||||||
|
return self._storage_class
|
||||||
|
else:
|
||||||
|
return 'STANDARD'
|
||||||
|
|
||||||
|
@property
|
||||||
|
def expiry_date(self):
|
||||||
|
if self._expiry is not None:
|
||||||
|
return self._expiry.strftime("%a, %d %b %Y %H:%M:%S GMT")
|
||||||
|
|
||||||
|
|
||||||
class FakeMultipart(object):
|
class FakeMultipart(object):
|
||||||
def __init__(self, key_name):
|
def __init__(self, key_name):
|
||||||
@ -130,11 +167,12 @@ class S3Backend(BaseBackend):
|
|||||||
return self.buckets.pop(bucket_name)
|
return self.buckets.pop(bucket_name)
|
||||||
return None
|
return None
|
||||||
|
|
||||||
def set_key(self, bucket_name, key_name, value):
|
def set_key(self, bucket_name, key_name, value, storage=None):
|
||||||
key_name = clean_key_name(key_name)
|
key_name = clean_key_name(key_name)
|
||||||
|
|
||||||
bucket = self.buckets[bucket_name]
|
bucket = self.buckets[bucket_name]
|
||||||
new_key = FakeKey(name=key_name, value=value)
|
new_key = FakeKey(name=key_name, value=value,
|
||||||
|
storage=storage)
|
||||||
bucket.keys[key_name] = new_key
|
bucket.keys[key_name] = new_key
|
||||||
|
|
||||||
return new_key
|
return new_key
|
||||||
@ -221,11 +259,16 @@ class S3Backend(BaseBackend):
|
|||||||
bucket = self.buckets[bucket_name]
|
bucket = self.buckets[bucket_name]
|
||||||
return bucket.keys.pop(key_name)
|
return bucket.keys.pop(key_name)
|
||||||
|
|
||||||
def copy_key(self, src_bucket_name, src_key_name, dest_bucket_name, dest_key_name):
|
def copy_key(self, src_bucket_name, src_key_name, dest_bucket_name, dest_key_name, storage=None):
|
||||||
src_key_name = clean_key_name(src_key_name)
|
src_key_name = clean_key_name(src_key_name)
|
||||||
dest_key_name = clean_key_name(dest_key_name)
|
dest_key_name = clean_key_name(dest_key_name)
|
||||||
src_bucket = self.buckets[src_bucket_name]
|
src_bucket = self.buckets[src_bucket_name]
|
||||||
dest_bucket = self.buckets[dest_bucket_name]
|
dest_bucket = self.buckets[dest_bucket_name]
|
||||||
dest_bucket.keys[dest_key_name] = src_bucket.keys[src_key_name]
|
key = src_bucket.keys[src_key_name]
|
||||||
|
if dest_key_name != src_key_name:
|
||||||
|
key = key.copy(dest_key_name)
|
||||||
|
dest_bucket.keys[dest_key_name] = key
|
||||||
|
if storage is not None:
|
||||||
|
dest_bucket.keys[dest_key_name].set_storage_class(storage)
|
||||||
|
|
||||||
s3_backend = S3Backend()
|
s3_backend = S3Backend()
|
||||||
|
@ -6,6 +6,7 @@ from jinja2 import Template
|
|||||||
from .exceptions import BucketAlreadyExists
|
from .exceptions import BucketAlreadyExists
|
||||||
from .models import s3_backend
|
from .models import s3_backend
|
||||||
from .utils import bucket_name_from_url
|
from .utils import bucket_name_from_url
|
||||||
|
from xml.dom import minidom
|
||||||
|
|
||||||
|
|
||||||
def parse_key_name(pth):
|
def parse_key_name(pth):
|
||||||
@ -128,6 +129,18 @@ class ResponseObject(object):
|
|||||||
status_code, headers, response_content = response
|
status_code, headers, response_content = response
|
||||||
return status_code, headers, response_content
|
return status_code, headers, response_content
|
||||||
|
|
||||||
|
def _key_set_metadata(self, request, key, replace=False):
|
||||||
|
meta_regex = re.compile('^x-amz-meta-([a-zA-Z0-9\-_]+)$', flags=re.IGNORECASE)
|
||||||
|
if replace is True:
|
||||||
|
key.clear_metadata()
|
||||||
|
for header in request.headers:
|
||||||
|
if isinstance(header, basestring):
|
||||||
|
result = meta_regex.match(header)
|
||||||
|
if result:
|
||||||
|
meta_key = result.group(0).lower()
|
||||||
|
metadata = request.headers[header]
|
||||||
|
key.set_metadata(meta_key, metadata)
|
||||||
|
|
||||||
def _key_response(self, request, full_url, headers):
|
def _key_response(self, request, full_url, headers):
|
||||||
parsed_url = urlparse(full_url)
|
parsed_url = urlparse(full_url)
|
||||||
query = parse_qs(parsed_url.query)
|
query = parse_qs(parsed_url.query)
|
||||||
@ -181,10 +194,17 @@ class ResponseObject(object):
|
|||||||
headers.update(key.response_dict)
|
headers.update(key.response_dict)
|
||||||
return 200, headers, response
|
return 200, headers, response
|
||||||
|
|
||||||
|
storage_class = request.headers.get('x-amz-storage-class', 'STANDARD')
|
||||||
|
|
||||||
if 'x-amz-copy-source' in request.headers:
|
if 'x-amz-copy-source' in request.headers:
|
||||||
# Copy key
|
# Copy key
|
||||||
src_bucket, src_key = request.headers.get("x-amz-copy-source").split("/", 1)
|
src_bucket, src_key = request.headers.get("x-amz-copy-source").split("/", 1)
|
||||||
self.backend.copy_key(src_bucket, src_key, bucket_name, key_name)
|
self.backend.copy_key(src_bucket, src_key, bucket_name, key_name,
|
||||||
|
storage=storage_class)
|
||||||
|
mdirective = request.headers.get('x-amz-metadata-directive')
|
||||||
|
if mdirective is not None and mdirective == 'REPLACE':
|
||||||
|
new_key = self.backend.get_key(bucket_name, key_name)
|
||||||
|
self._key_set_metadata(request, new_key, replace=True)
|
||||||
template = Template(S3_OBJECT_COPY_RESPONSE)
|
template = Template(S3_OBJECT_COPY_RESPONSE)
|
||||||
return template.render(key=src_key)
|
return template.render(key=src_key)
|
||||||
streaming_request = hasattr(request, 'streaming') and request.streaming
|
streaming_request = hasattr(request, 'streaming') and request.streaming
|
||||||
@ -197,18 +217,11 @@ class ResponseObject(object):
|
|||||||
new_key = self.backend.append_to_key(bucket_name, key_name, body)
|
new_key = self.backend.append_to_key(bucket_name, key_name, body)
|
||||||
else:
|
else:
|
||||||
# Initial data
|
# Initial data
|
||||||
new_key = self.backend.set_key(bucket_name, key_name, body)
|
new_key = self.backend.set_key(bucket_name, key_name, body,
|
||||||
|
storage=storage_class)
|
||||||
request.streaming = True
|
request.streaming = True
|
||||||
|
self._key_set_metadata(request, new_key)
|
||||||
|
|
||||||
#Metadata
|
|
||||||
meta_regex = re.compile('^x-amz-meta-([a-zA-Z0-9\-_]+)$', flags=re.IGNORECASE)
|
|
||||||
for header in request.headers:
|
|
||||||
if isinstance(header, basestring):
|
|
||||||
result = meta_regex.match(header)
|
|
||||||
if result:
|
|
||||||
meta_key = result.group(0).lower()
|
|
||||||
metadata = request.headers[header]
|
|
||||||
new_key.set_metadata(meta_key, metadata)
|
|
||||||
template = Template(S3_OBJECT_RESPONSE)
|
template = Template(S3_OBJECT_RESPONSE)
|
||||||
headers.update(new_key.response_dict)
|
headers.update(new_key.response_dict)
|
||||||
return 200, headers, template.render(key=new_key)
|
return 200, headers, template.render(key=new_key)
|
||||||
@ -252,8 +265,17 @@ class ResponseObject(object):
|
|||||||
)
|
)
|
||||||
template = Template(S3_MULTIPART_COMPLETE_TOO_SMALL_ERROR)
|
template = Template(S3_MULTIPART_COMPLETE_TOO_SMALL_ERROR)
|
||||||
return 400, headers, template.render()
|
return 400, headers, template.render()
|
||||||
|
elif parsed_url.query == 'restore':
|
||||||
|
es = minidom.parseString(body).getElementsByTagName('Days')
|
||||||
|
days = es[0].childNodes[0].wholeText
|
||||||
|
key = self.backend.get_key(bucket_name, key_name)
|
||||||
|
r = 202
|
||||||
|
if key.expiry_date is not None:
|
||||||
|
r = 200
|
||||||
|
key.restore(int(days))
|
||||||
|
return r, headers, ""
|
||||||
else:
|
else:
|
||||||
raise NotImplementedError("Method POST had only been implemented for multipart uploads so far")
|
raise NotImplementedError("Method POST had only been implemented for multipart uploads and restore operations, so far")
|
||||||
else:
|
else:
|
||||||
raise NotImplementedError("Method {0} has not been impelemented in the S3 backend yet".format(method))
|
raise NotImplementedError("Method {0} has not been impelemented in the S3 backend yet".format(method))
|
||||||
|
|
||||||
@ -287,12 +309,11 @@ S3_BUCKET_GET_RESPONSE = """<?xml version="1.0" encoding="UTF-8"?>
|
|||||||
<LastModified>{{ key.last_modified_ISO8601 }}</LastModified>
|
<LastModified>{{ key.last_modified_ISO8601 }}</LastModified>
|
||||||
<ETag>{{ key.etag }}</ETag>
|
<ETag>{{ key.etag }}</ETag>
|
||||||
<Size>{{ key.size }}</Size>
|
<Size>{{ key.size }}</Size>
|
||||||
<StorageClass>STANDARD</StorageClass>
|
<StorageClass>{{ key.storage_class }}</StorageClass>
|
||||||
<Owner>
|
<Owner>
|
||||||
<ID>75aa57f09aa0c8caeab4f8c24e99d10f8e7faeebf76c078efc7c6caea54ba06a</ID>
|
<ID>75aa57f09aa0c8caeab4f8c24e99d10f8e7faeebf76c078efc7c6caea54ba06a</ID>
|
||||||
<DisplayName>webfile</DisplayName>
|
<DisplayName>webfile</DisplayName>
|
||||||
</Owner>
|
</Owner>
|
||||||
<StorageClass>STANDARD</StorageClass>
|
|
||||||
</Contents>
|
</Contents>
|
||||||
{% endfor %}
|
{% endfor %}
|
||||||
{% if delimiter %}
|
{% if delimiter %}
|
||||||
|
@ -143,6 +143,22 @@ def test_set_metadata():
|
|||||||
bucket.get_key('the-key').get_metadata('md').should.equal('Metadatastring')
|
bucket.get_key('the-key').get_metadata('md').should.equal('Metadatastring')
|
||||||
|
|
||||||
|
|
||||||
|
@mock_s3
|
||||||
|
def test_copy_key_replace_metadata():
|
||||||
|
conn = boto.connect_s3('the_key', 'the_secret')
|
||||||
|
bucket = conn.create_bucket("foobar")
|
||||||
|
key = Key(bucket)
|
||||||
|
key.key = "the-key"
|
||||||
|
key.set_metadata('md', 'Metadatastring')
|
||||||
|
key.set_contents_from_string("some value")
|
||||||
|
|
||||||
|
bucket.copy_key('new-key', 'foobar', 'the-key',
|
||||||
|
metadata={'momd': 'Mometadatastring'})
|
||||||
|
|
||||||
|
bucket.get_key("new-key").get_metadata('md').should.be.none
|
||||||
|
bucket.get_key("new-key").get_metadata('momd').should.equal('Mometadatastring')
|
||||||
|
|
||||||
|
|
||||||
@freeze_time("2012-01-01 12:00:00")
|
@freeze_time("2012-01-01 12:00:00")
|
||||||
@mock_s3
|
@mock_s3
|
||||||
def test_last_modified():
|
def test_last_modified():
|
||||||
@ -310,3 +326,68 @@ def test_bucket_key_listing_order():
|
|||||||
delimiter = '/'
|
delimiter = '/'
|
||||||
keys = [x.name for x in bucket.list(prefix + 'x', delimiter)]
|
keys = [x.name for x in bucket.list(prefix + 'x', delimiter)]
|
||||||
keys.should.equal([u'toplevel/x/'])
|
keys.should.equal([u'toplevel/x/'])
|
||||||
|
|
||||||
|
|
||||||
|
@mock_s3
|
||||||
|
def test_key_with_reduced_redundancy():
|
||||||
|
conn = boto.connect_s3()
|
||||||
|
bucket = conn.create_bucket('test_bucket_name')
|
||||||
|
|
||||||
|
key = Key(bucket, 'test_rr_key')
|
||||||
|
key.set_contents_from_string('value1', reduced_redundancy=True)
|
||||||
|
# we use the bucket iterator because of:
|
||||||
|
# https:/github.com/boto/boto/issues/1173
|
||||||
|
list(bucket)[0].storage_class.should.equal('REDUCED_REDUNDANCY')
|
||||||
|
|
||||||
|
|
||||||
|
@mock_s3
|
||||||
|
def test_copy_key_reduced_redundancy():
|
||||||
|
conn = boto.connect_s3('the_key', 'the_secret')
|
||||||
|
bucket = conn.create_bucket("foobar")
|
||||||
|
key = Key(bucket)
|
||||||
|
key.key = "the-key"
|
||||||
|
key.set_contents_from_string("some value")
|
||||||
|
|
||||||
|
bucket.copy_key('new-key', 'foobar', 'the-key', storage_class='REDUCED_REDUNDANCY')
|
||||||
|
|
||||||
|
# we use the bucket iterator because of:
|
||||||
|
# https:/github.com/boto/boto/issues/1173
|
||||||
|
keys = dict([(k.name, k) for k in bucket])
|
||||||
|
keys['new-key'].storage_class.should.equal("REDUCED_REDUNDANCY")
|
||||||
|
keys['the-key'].storage_class.should.equal("STANDARD")
|
||||||
|
|
||||||
|
|
||||||
|
@freeze_time("2012-01-01 12:00:00")
|
||||||
|
@mock_s3
|
||||||
|
def test_restore_key():
|
||||||
|
conn = boto.connect_s3('the_key', 'the_secret')
|
||||||
|
bucket = conn.create_bucket("foobar")
|
||||||
|
key = Key(bucket)
|
||||||
|
key.key = "the-key"
|
||||||
|
key.set_contents_from_string("some value")
|
||||||
|
list(bucket)[0].ongoing_restore.should.be.none
|
||||||
|
key.restore(1)
|
||||||
|
key = bucket.get_key('the-key')
|
||||||
|
key.ongoing_restore.should_not.be.none
|
||||||
|
key.ongoing_restore.should.be.false
|
||||||
|
key.expiry_date.should.equal("Mon, 02 Jan 2012 12:00:00 GMT")
|
||||||
|
key.restore(2)
|
||||||
|
key = bucket.get_key('the-key')
|
||||||
|
key.ongoing_restore.should_not.be.none
|
||||||
|
key.ongoing_restore.should.be.false
|
||||||
|
key.expiry_date.should.equal("Tue, 03 Jan 2012 12:00:00 GMT")
|
||||||
|
|
||||||
|
|
||||||
|
@freeze_time("2012-01-01 12:00:00")
|
||||||
|
@mock_s3
|
||||||
|
def test_restore_key_headers():
|
||||||
|
conn = boto.connect_s3('the_key', 'the_secret')
|
||||||
|
bucket = conn.create_bucket("foobar")
|
||||||
|
key = Key(bucket)
|
||||||
|
key.key = "the-key"
|
||||||
|
key.set_contents_from_string("some value")
|
||||||
|
key.restore(1, headers={'foo': 'bar'})
|
||||||
|
key = bucket.get_key('the-key')
|
||||||
|
key.ongoing_restore.should_not.be.none
|
||||||
|
key.ongoing_restore.should.be.false
|
||||||
|
key.expiry_date.should.equal("Mon, 02 Jan 2012 12:00:00 GMT")
|
||||||
|
Loading…
x
Reference in New Issue
Block a user