Cleanup S3 a bit and add more coverage

This commit is contained in:
Steve Pulec 2014-03-30 11:50:36 -04:00
parent 815bb8846c
commit 1767d17018
4 changed files with 250 additions and 195 deletions

View File

@ -14,7 +14,7 @@ UPLOAD_PART_MIN_SIZE = 5242880
class FakeKey(object):
def __init__(self, name, value, storage=None):
def __init__(self, name, value, storage="STANDARD"):
self.name = name
self.value = value
self.last_modified = datetime.datetime.now()
@ -70,7 +70,6 @@ class FakeKey(object):
'etag': self.etag,
'last-modified': self.last_modified_RFC1123,
}
if self._storage_class is not None:
if self._storage_class != 'STANDARD':
r['x-amz-storage-class'] = self._storage_class
if self._expiry is not None:
@ -84,10 +83,7 @@ class FakeKey(object):
@property
def storage_class(self):
if self._storage_class is not None:
return self._storage_class
else:
return 'STANDARD'
@property
def expiry_date(self):

View File

@ -29,7 +29,6 @@ class ResponseObject(object):
response = self._bucket_response(request, full_url, headers)
if isinstance(response, basestring):
return 200, headers, response
else:
status_code, headers, response_content = response
return status_code, headers, response_content
@ -45,13 +44,26 @@ class ResponseObject(object):
return self.all_buckets()
if method == 'HEAD':
return self._bucket_response_head(bucket_name, headers)
elif method == 'GET':
return self._bucket_response_get(bucket_name, querystring, headers)
elif method == 'PUT':
return self._bucket_response_put(bucket_name, headers)
elif method == 'DELETE':
return self._bucket_response_delete(bucket_name, headers)
elif method == 'POST':
return self._bucket_response_post(request, bucket_name, headers)
else:
raise NotImplementedError("Method {0} has not been impelemented in the S3 backend yet".format(method))
def _bucket_response_head(self, bucket_name, headers):
bucket = self.backend.get_bucket(bucket_name)
if bucket:
return 200, headers, ""
else:
return 404, headers, ""
elif method == 'GET':
def _bucket_response_get(self, bucket_name, querystring, headers):
bucket = self.backend.get_bucket(bucket_name)
if bucket:
prefix = querystring.get('prefix', [None])[0]
@ -67,14 +79,16 @@ class ResponseObject(object):
)
else:
return 404, headers, ""
elif method == 'PUT':
def _bucket_response_put(self, bucket_name, headers):
try:
new_bucket = self.backend.create_bucket(bucket_name)
except BucketAlreadyExists:
return 409, headers, ""
template = Template(S3_BUCKET_CREATE_RESPONSE)
return template.render(bucket=new_bucket)
elif method == 'DELETE':
def _bucket_response_delete(self, bucket_name, headers):
removed_bucket = self.backend.delete_bucket(bucket_name)
if removed_bucket is None:
# Non-existant bucket
@ -88,7 +102,8 @@ class ResponseObject(object):
# Tried to delete a bucket that still has keys
template = Template(S3_DELETE_BUCKET_WITH_ITEMS_ERROR)
return 409, headers, template.render(bucket=removed_bucket)
elif method == 'POST':
def _bucket_response_post(self, request, bucket_name, headers):
#POST to bucket-url should create file from form
if hasattr(request, 'form'):
#Not HTTPretty
@ -118,8 +133,6 @@ class ResponseObject(object):
metadata = form[form_id]
new_key.set_metadata(meta_key, metadata)
return 200, headers, ""
else:
raise NotImplementedError("Method {0} has not been impelemented in the S3 backend yet".format(method))
def key_response(self, request, full_url, headers):
response = self._key_response(request, full_url, headers)
@ -147,7 +160,6 @@ class ResponseObject(object):
method = request.method
key_name = self.parse_key_name(parsed_url.path)
bucket_name = self.bucket_name_from_url(full_url)
if hasattr(request, 'body'):
@ -158,6 +170,19 @@ class ResponseObject(object):
body = request.data
if method == 'GET':
return self._key_response_get(bucket_name, query, key_name, headers)
elif method == 'PUT':
return self._key_response_put(request, body, bucket_name, query, key_name, headers)
elif method == 'HEAD':
return self._key_response_head(bucket_name, key_name, headers)
elif method == 'DELETE':
return self._key_response_delete(bucket_name, query, key_name, headers)
elif method == 'POST':
return self._key_response_post(body, parsed_url, bucket_name, query, key_name, headers)
else:
raise NotImplementedError("Method {0} has not been impelemented in the S3 backend yet".format(method))
def _key_response_get(self, bucket_name, query, key_name, headers):
if 'uploadId' in query:
upload_id = query['uploadId'][0]
parts = self.backend.list_multipart(bucket_name, upload_id)
@ -175,7 +200,8 @@ class ResponseObject(object):
return 200, headers, key.value
else:
return 404, headers, ""
if method == 'PUT':
def _key_response_put(self, request, body, bucket_name, query, key_name, headers):
if 'uploadId' in query and 'partNumber' in query:
upload_id = query['uploadId'][0]
part_number = int(query['partNumber'][0])
@ -225,7 +251,8 @@ class ResponseObject(object):
template = Template(S3_OBJECT_RESPONSE)
headers.update(new_key.response_dict)
return 200, headers, template.render(key=new_key)
elif method == 'HEAD':
def _key_response_head(self, bucket_name, key_name, headers):
key = self.backend.get_key(bucket_name, key_name)
if key:
headers.update(key.metadata)
@ -233,7 +260,8 @@ class ResponseObject(object):
return 200, headers, ""
else:
return 404, headers, ""
elif method == 'DELETE':
def _key_response_delete(self, bucket_name, query, key_name, headers):
if 'uploadId' in query:
upload_id = query['uploadId'][0]
self.backend.cancel_multipart(bucket_name, upload_id)
@ -241,7 +269,8 @@ class ResponseObject(object):
removed_key = self.backend.delete_key(bucket_name, key_name)
template = Template(S3_DELETE_OBJECT_SUCCESS)
return 204, headers, template.render(bucket=removed_key)
elif method == 'POST':
def _key_response_post(self, body, parsed_url, bucket_name, query, key_name, headers):
if body == '' and parsed_url.query == 'uploads':
multipart = self.backend.initiate_multipart(bucket_name, key_name)
template = Template(S3_MULTIPART_INITIATE_RESPONSE)
@ -276,8 +305,6 @@ class ResponseObject(object):
return r, headers, ""
else:
raise NotImplementedError("Method POST had only been implemented for multipart uploads and restore operations, so far")
else:
raise NotImplementedError("Method {0} has not been impelemented in the S3 backend yet".format(method))
S3ResponseInstance = ResponseObject(s3_backend, bucket_name_from_url, parse_key_name)

View File

@ -66,6 +66,35 @@ def test_multipart_upload():
bucket.get_key("the-key").get_contents_as_string().should.equal(part1 + part2)
@mock_s3
def test_multipart_upload_with_copy_key():
conn = boto.connect_s3('the_key', 'the_secret')
bucket = conn.create_bucket("foobar")
key = Key(bucket)
key.key = "original-key"
key.set_contents_from_string("key_value")
multipart = bucket.initiate_multipart_upload("the-key")
part1 = '0' * 5242880
multipart.upload_part_from_file(BytesIO(part1), 1)
multipart.copy_part_from_key("foobar", "original-key", 2)
multipart.complete_upload()
bucket.get_key("the-key").get_contents_as_string().should.equal(part1 + "key_value")
@mock_s3
def test_multipart_upload_cancel():
conn = boto.connect_s3('the_key', 'the_secret')
bucket = conn.create_bucket("foobar")
multipart = bucket.initiate_multipart_upload("the-key")
part1 = '0' * 5242880
multipart.upload_part_from_file(BytesIO(part1), 1)
multipart.cancel_upload()
# TODO we really need some sort of assertion here, but we don't currently
# have the ability to list mulipart uploads for a bucket.
@mock_s3
def test_missing_key():
conn = boto.connect_s3('the_key', 'the_secret')

View File

@ -30,6 +30,9 @@ def test_s3_server_bucket_create():
res.status_code.should.equal(200)
res.data.should.contain("ListBucketResult")
res = test_client.get('/missing-bucket', 'http://localhost:5000')
res.status_code.should.equal(404)
res = test_client.put('/foobar/bar', 'http://localhost:5000', data='test value')
res.status_code.should.equal(200)