support reduced redundancy storage
This commit is contained in:
parent
1f49148a64
commit
c9d5dffa24
@ -14,10 +14,11 @@ UPLOAD_PART_MIN_SIZE = 5242880
|
||||
|
||||
|
||||
class FakeKey(object):
|
||||
def __init__(self, name, value):
|
||||
def __init__(self, name, value, storage=None):
|
||||
self.name = name
|
||||
self.value = value
|
||||
self.last_modified = datetime.datetime.now()
|
||||
self._storage_class = storage
|
||||
self._metadata = {}
|
||||
|
||||
def copy(self, new_name = None):
|
||||
@ -32,6 +33,9 @@ class FakeKey(object):
|
||||
def clear_metadata(self):
|
||||
self._metadata = {}
|
||||
|
||||
def set_storage_class(self, storage_class):
|
||||
self._storage_class = storage_class
|
||||
|
||||
def append_to_value(self, value):
|
||||
self.value += value
|
||||
self.last_modified = datetime.datetime.now()
|
||||
@ -58,15 +62,27 @@ class FakeKey(object):
|
||||
|
||||
@property
|
||||
def response_dict(self):
|
||||
return {
|
||||
r = {
|
||||
'etag': self.etag,
|
||||
'last-modified': self.last_modified_RFC1123,
|
||||
}
|
||||
if self._storage_class is not None:
|
||||
if self._storage_class != 'STANDARD':
|
||||
r['x-amz-storage-class'] = self._storage_class
|
||||
|
||||
return r
|
||||
|
||||
@property
|
||||
def size(self):
|
||||
return len(self.value)
|
||||
|
||||
@property
|
||||
def storage_class(self):
|
||||
if self._storage_class is not None:
|
||||
return self._storage_class
|
||||
else:
|
||||
return 'STANDARD'
|
||||
|
||||
|
||||
class FakeMultipart(object):
|
||||
def __init__(self, key_name):
|
||||
@ -140,11 +156,12 @@ class S3Backend(BaseBackend):
|
||||
return self.buckets.pop(bucket_name)
|
||||
return None
|
||||
|
||||
def set_key(self, bucket_name, key_name, value):
|
||||
def set_key(self, bucket_name, key_name, value, storage=None):
|
||||
key_name = clean_key_name(key_name)
|
||||
|
||||
bucket = self.buckets[bucket_name]
|
||||
new_key = FakeKey(name=key_name, value=value)
|
||||
new_key = FakeKey(name=key_name, value=value,
|
||||
storage=storage)
|
||||
bucket.keys[key_name] = new_key
|
||||
|
||||
return new_key
|
||||
@ -231,7 +248,7 @@ class S3Backend(BaseBackend):
|
||||
bucket = self.buckets[bucket_name]
|
||||
return bucket.keys.pop(key_name)
|
||||
|
||||
def copy_key(self, src_bucket_name, src_key_name, dest_bucket_name, dest_key_name):
|
||||
def copy_key(self, src_bucket_name, src_key_name, dest_bucket_name, dest_key_name, storage=None):
|
||||
src_key_name = clean_key_name(src_key_name)
|
||||
dest_key_name = clean_key_name(dest_key_name)
|
||||
src_bucket = self.buckets[src_bucket_name]
|
||||
@ -240,5 +257,7 @@ class S3Backend(BaseBackend):
|
||||
if dest_key_name != src_key_name:
|
||||
key = key.copy(dest_key_name)
|
||||
dest_bucket.keys[dest_key_name] = key
|
||||
if storage is not None:
|
||||
dest_bucket.keys[dest_key_name].set_storage_class(storage)
|
||||
|
||||
s3_backend = S3Backend()
|
||||
|
@ -190,10 +190,13 @@ class ResponseObject(object):
|
||||
headers.update(key.response_dict)
|
||||
return 200, headers, response
|
||||
|
||||
storage_class = request.headers.get('x-amz-storage-class', 'STANDARD')
|
||||
|
||||
if 'x-amz-copy-source' in request.headers:
|
||||
# Copy key
|
||||
src_bucket, src_key = request.headers.get("x-amz-copy-source").split("/", 1)
|
||||
self.backend.copy_key(src_bucket, src_key, bucket_name, key_name)
|
||||
self.backend.copy_key(src_bucket, src_key, bucket_name, key_name,
|
||||
storage=storage_class)
|
||||
mdirective = request.headers.get('x-amz-metadata-directive')
|
||||
if mdirective is not None and mdirective == 'REPLACE':
|
||||
new_key = self.backend.get_key(bucket_name, key_name)
|
||||
@ -210,7 +213,8 @@ class ResponseObject(object):
|
||||
new_key = self.backend.append_to_key(bucket_name, key_name, body)
|
||||
else:
|
||||
# Initial data
|
||||
new_key = self.backend.set_key(bucket_name, key_name, body)
|
||||
new_key = self.backend.set_key(bucket_name, key_name, body,
|
||||
storage=storage_class)
|
||||
request.streaming = True
|
||||
self._key_set_metadata(request, new_key)
|
||||
|
||||
@ -292,12 +296,11 @@ S3_BUCKET_GET_RESPONSE = """<?xml version="1.0" encoding="UTF-8"?>
|
||||
<LastModified>{{ key.last_modified_ISO8601 }}</LastModified>
|
||||
<ETag>{{ key.etag }}</ETag>
|
||||
<Size>{{ key.size }}</Size>
|
||||
<StorageClass>STANDARD</StorageClass>
|
||||
<StorageClass>{{ key.storage_class }}</StorageClass>
|
||||
<Owner>
|
||||
<ID>75aa57f09aa0c8caeab4f8c24e99d10f8e7faeebf76c078efc7c6caea54ba06a</ID>
|
||||
<DisplayName>webfile</DisplayName>
|
||||
</Owner>
|
||||
<StorageClass>STANDARD</StorageClass>
|
||||
</Contents>
|
||||
{% endfor %}
|
||||
{% if delimiter %}
|
||||
|
@ -326,3 +326,32 @@ def test_bucket_key_listing_order():
|
||||
delimiter = '/'
|
||||
keys = [x.name for x in bucket.list(prefix + 'x', delimiter)]
|
||||
keys.should.equal([u'toplevel/x/'])
|
||||
|
||||
|
||||
@mock_s3
|
||||
def test_key_with_reduced_redundancy():
|
||||
conn = boto.connect_s3()
|
||||
bucket = conn.create_bucket('test_bucket_name')
|
||||
|
||||
key = Key(bucket, 'test_rr_key')
|
||||
key.set_contents_from_string('value1', reduced_redundancy=True)
|
||||
# we use the bucket iterator because of:
|
||||
# https:/github.com/boto/boto/issues/1173
|
||||
list(bucket)[0].storage_class.should.equal('REDUCED_REDUNDANCY')
|
||||
|
||||
|
||||
@mock_s3
|
||||
def test_copy_key_reduced_redundancy():
|
||||
conn = boto.connect_s3('the_key', 'the_secret')
|
||||
bucket = conn.create_bucket("foobar")
|
||||
key = Key(bucket)
|
||||
key.key = "the-key"
|
||||
key.set_contents_from_string("some value")
|
||||
|
||||
bucket.copy_key('new-key', 'foobar', 'the-key', storage_class='REDUCED_REDUNDANCY')
|
||||
|
||||
# we use the bucket iterator because of:
|
||||
# https:/github.com/boto/boto/issues/1173
|
||||
keys = dict([(k.name, k) for k in bucket])
|
||||
keys['new-key'].storage_class.should.equal("REDUCED_REDUNDANCY")
|
||||
keys['the-key'].storage_class.should.equal("STANDARD")
|
||||
|
Loading…
Reference in New Issue
Block a user