Cleanup S3 model methods to better deal with missing buckets.

This commit is contained in:
Steve Pulec 2014-07-08 21:20:29 -04:00
parent be25a2ba99
commit 5e35348c0d
4 changed files with 104 additions and 76 deletions

View File

@ -1,2 +1,6 @@
class BucketAlreadyExists(Exception):
pass
class MissingBucket(Exception):
pass

View File

@ -7,7 +7,7 @@ import itertools
from moto.core import BaseBackend
from moto.core.utils import iso_8601_datetime, rfc_1123_datetime
from .exceptions import BucketAlreadyExists
from .exceptions import BucketAlreadyExists, MissingBucket
from .utils import clean_key_name, _VersionedKeyStore
UPLOAD_ID_BYTES = 43
@ -177,40 +177,42 @@ class S3Backend(BaseBackend):
return self.buckets.values()
def get_bucket(self, bucket_name):
return self.buckets.get(bucket_name)
try:
return self.buckets[bucket_name]
except KeyError:
raise MissingBucket()
def delete_bucket(self, bucket_name):
bucket = self.buckets.get(bucket_name)
if bucket:
if bucket.keys:
# Can't delete a bucket with keys
return False
else:
return self.buckets.pop(bucket_name)
return None
bucket = self.get_bucket(bucket_name)
if bucket.keys:
# Can't delete a bucket with keys
return False
else:
return self.buckets.pop(bucket_name)
def set_bucket_versioning(self, bucket_name, status):
self.buckets[bucket_name].versioning_status = status
self.get_bucket(bucket_name).versioning_status = status
def get_bucket_versioning(self, bucket_name):
return self.buckets[bucket_name].versioning_status
return self.get_bucket(bucket_name).versioning_status
def get_bucket_versions(self, bucket_name, delimiter=None,
encoding_type=None,
key_marker=None,
max_keys=None,
version_id_marker=None):
bucket = self.buckets[bucket_name]
bucket = self.get_bucket(bucket_name)
if any((delimiter, encoding_type, key_marker, version_id_marker)):
raise NotImplementedError(
"Called get_bucket_versions with some of delimiter, encoding_type, key_marker, version_id_marker")
return itertools.chain(*(l for _, l in bucket.keys.iterlists()))
def set_key(self, bucket_name, key_name, value, storage=None, etag=None):
key_name = clean_key_name(key_name)
bucket = self.buckets[bucket_name]
bucket = self.get_bucket(bucket_name)
old_key = bucket.keys.get(key_name, None)
if old_key is not None and bucket.is_versioned:
@ -248,14 +250,14 @@ class S3Backend(BaseBackend):
return key
def initiate_multipart(self, bucket_name, key_name):
bucket = self.buckets[bucket_name]
bucket = self.get_bucket(bucket_name)
new_multipart = FakeMultipart(key_name)
bucket.multiparts[new_multipart.id] = new_multipart
return new_multipart
def complete_multipart(self, bucket_name, multipart_id):
bucket = self.buckets[bucket_name]
bucket = self.get_bucket(bucket_name)
multipart = bucket.multiparts[multipart_id]
value, etag = multipart.complete()
if value is None:
@ -265,27 +267,27 @@ class S3Backend(BaseBackend):
return self.set_key(bucket_name, multipart.key_name, value, etag=etag)
def cancel_multipart(self, bucket_name, multipart_id):
bucket = self.buckets[bucket_name]
bucket = self.get_bucket(bucket_name)
del bucket.multiparts[multipart_id]
def list_multipart(self, bucket_name, multipart_id):
bucket = self.buckets[bucket_name]
bucket = self.get_bucket(bucket_name)
return bucket.multiparts[multipart_id].list_parts()
def get_all_multiparts(self, bucket_name):
bucket = self.buckets[bucket_name]
bucket = self.get_bucket(bucket_name)
return bucket.multiparts
def set_part(self, bucket_name, multipart_id, part_id, value):
bucket = self.buckets[bucket_name]
bucket = self.get_bucket(bucket_name)
multipart = bucket.multiparts[multipart_id]
return multipart.set_part(part_id, value)
def copy_part(self, dest_bucket_name, multipart_id, part_id,
src_bucket_name, src_key_name):
src_key_name = clean_key_name(src_key_name)
src_bucket = self.buckets[src_bucket_name]
dest_bucket = self.buckets[dest_bucket_name]
src_bucket = self.get_bucket(src_bucket_name)
dest_bucket = self.get_bucket(dest_bucket_name)
multipart = dest_bucket.multiparts[multipart_id]
return multipart.set_part(part_id, src_bucket.keys[src_key_name].value)
@ -317,14 +319,14 @@ class S3Backend(BaseBackend):
def delete_key(self, bucket_name, key_name):
key_name = clean_key_name(key_name)
bucket = self.buckets[bucket_name]
bucket = self.get_bucket(bucket_name)
return bucket.keys.pop(key_name)
def copy_key(self, src_bucket_name, src_key_name, dest_bucket_name, dest_key_name, storage=None):
src_key_name = clean_key_name(src_key_name)
dest_key_name = clean_key_name(dest_key_name)
src_bucket = self.buckets[src_bucket_name]
dest_bucket = self.buckets[dest_bucket_name]
src_bucket = self.get_bucket(src_bucket_name)
dest_bucket = self.get_bucket(dest_bucket_name)
key = src_bucket.keys[src_key_name]
if dest_key_name != src_key_name:
key = key.copy(dest_key_name)

View File

@ -3,7 +3,7 @@ import re
from jinja2 import Template
from .exceptions import BucketAlreadyExists
from .exceptions import BucketAlreadyExists, MissingBucket
from .models import s3_backend
from .utils import bucket_name_from_url
from xml.dom import minidom
@ -26,7 +26,11 @@ class ResponseObject(object):
return template.render(buckets=all_buckets)
def bucket_response(self, request, full_url, headers):
response = self._bucket_response(request, full_url, headers)
try:
response = self._bucket_response(request, full_url, headers)
except MissingBucket:
return 404, headers, ""
if isinstance(response, basestring):
return 200, headers, response
else:
@ -57,11 +61,12 @@ class ResponseObject(object):
raise NotImplementedError("Method {0} has not been impelemented in the S3 backend yet".format(method))
def _bucket_response_head(self, bucket_name, headers):
bucket = self.backend.get_bucket(bucket_name)
if bucket:
return 200, headers, ""
else:
try:
self.backend.get_bucket(bucket_name)
except MissingBucket:
return 404, headers, ""
else:
return 200, headers, ""
def _bucket_response_get(self, bucket_name, querystring, headers):
if 'uploads' in querystring:
@ -104,22 +109,23 @@ class ResponseObject(object):
is_truncated='false',
)
bucket = self.backend.get_bucket(bucket_name)
if bucket:
prefix = querystring.get('prefix', [None])[0]
delimiter = querystring.get('delimiter', [None])[0]
result_keys, result_folders = self.backend.prefix_query(bucket, prefix, delimiter)
template = Template(S3_BUCKET_GET_RESPONSE)
return 200, headers, template.render(
bucket=bucket,
prefix=prefix,
delimiter=delimiter,
result_keys=result_keys,
result_folders=result_folders
)
else:
try:
bucket = self.backend.get_bucket(bucket_name)
except MissingBucket:
return 404, headers, ""
prefix = querystring.get('prefix', [None])[0]
delimiter = querystring.get('delimiter', [None])[0]
result_keys, result_folders = self.backend.prefix_query(bucket, prefix, delimiter)
template = Template(S3_BUCKET_GET_RESPONSE)
return 200, headers, template.render(
bucket=bucket,
prefix=prefix,
delimiter=delimiter,
result_keys=result_keys,
result_folders=result_folders
)
def _bucket_response_put(self, request, bucket_name, querystring, headers):
if 'versioning' in querystring:
ver = re.search('<Status>([A-Za-z]+)</Status>', request.body)
@ -138,12 +144,14 @@ class ResponseObject(object):
return 200, headers, template.render(bucket=new_bucket)
def _bucket_response_delete(self, bucket_name, headers):
removed_bucket = self.backend.delete_bucket(bucket_name)
if removed_bucket is None:
try:
removed_bucket = self.backend.delete_bucket(bucket_name)
except MissingBucket:
# Non-existant bucket
template = Template(S3_DELETE_NON_EXISTING_BUCKET)
return 404, headers, template.render(bucket_name=bucket_name)
elif removed_bucket:
if removed_bucket:
# Bucket exists
template = Template(S3_DELETE_BUCKET_SUCCESS)
return 204, headers, template.render(bucket=removed_bucket)
@ -198,13 +206,17 @@ class ResponseObject(object):
key_name = k.firstChild.nodeValue
self.backend.delete_key(bucket_name, key_name)
deleted_names.append(key_name)
except KeyError as e:
except KeyError:
error_names.append(key_name)
return 200, headers, template.render(deleted=deleted_names,delete_errors=error_names)
return 200, headers, template.render(deleted=deleted_names, delete_errors=error_names)
def key_response(self, request, full_url, headers):
response = self._key_response(request, full_url, headers)
try:
response = self._key_response(request, full_url, headers)
except MissingBucket:
return 404, headers, ""
if isinstance(response, basestring):
return 200, headers, response
else:
@ -455,43 +467,43 @@ S3_DELETE_BUCKET_WITH_ITEMS_ERROR = """<?xml version="1.0" encoding="UTF-8"?>
S3_BUCKET_VERSIONING = """
<?xml version="1.0" encoding="UTF-8"?>
<VersioningConfiguration xmlns="http://s3.amazonaws.com/doc/2006-03-01/">
<Status>{{ bucket_versioning_status }}</Status>
<Status>{{ bucket_versioning_status }}</Status>
</VersioningConfiguration>
"""
S3_BUCKET_GET_VERSIONING = """
<?xml version="1.0" encoding="UTF-8"?>
{% if status is none %}
<VersioningConfiguration xmlns="http://s3.amazonaws.com/doc/2006-03-01/"/>
<VersioningConfiguration xmlns="http://s3.amazonaws.com/doc/2006-03-01/"/>
{% else %}
<VersioningConfiguration xmlns="http://s3.amazonaws.com/doc/2006-03-01/">
<Status>{{ status }}</Status>
</VersioningConfiguration>
<VersioningConfiguration xmlns="http://s3.amazonaws.com/doc/2006-03-01/">
<Status>{{ status }}</Status>
</VersioningConfiguration>
{% endif %}
"""
S3_BUCKET_GET_VERSIONS = """<?xml version="1.0" encoding="UTF-8"?>
<ListVersionsResult xmlns="http://s3.amazonaws.com/doc/2006-03-01">
<Name>{{ bucket.name }}</Name>
<Prefix>{{ prefix }}</Prefix>
<KeyMarker>{{ key_marker }}</KeyMarker>
<MaxKeys>{{ max_keys }}</MaxKeys>
<IsTruncated>{{ is_truncated }}</IsTruncated>
{% for key in key_list %}
<Version>
<Key>{{ key.name }}</Key>
<VersionId>{{ key._version_id }}</VersionId>
<IsLatest>false</IsLatest>
<LastModified>{{ key.last_modified_ISO8601 }}</LastModified>
<ETag>{{ key.etag }}</ETag>
<Size>{{ key.size }}</Size>
<StorageClass>{{ key.storage_class }}</StorageClass>
<Owner>
<ID>75aa57f09aa0c8caeab4f8c24e99d10f8e7faeebf76c078efc7c6caea54ba06a</ID>
<DisplayName>webfile</DisplayName>
</Owner>
</Version>
{% endfor %}
<Name>{{ bucket.name }}</Name>
<Prefix>{{ prefix }}</Prefix>
<KeyMarker>{{ key_marker }}</KeyMarker>
<MaxKeys>{{ max_keys }}</MaxKeys>
<IsTruncated>{{ is_truncated }}</IsTruncated>
{% for key in key_list %}
<Version>
<Key>{{ key.name }}</Key>
<VersionId>{{ key._version_id }}</VersionId>
<IsLatest>false</IsLatest>
<LastModified>{{ key.last_modified_ISO8601 }}</LastModified>
<ETag>{{ key.etag }}</ETag>
<Size>{{ key.size }}</Size>
<StorageClass>{{ key.storage_class }}</StorageClass>
<Owner>
<ID>75aa57f09aa0c8caeab4f8c24e99d10f8e7faeebf76c078efc7c6caea54ba06a</ID>
<DisplayName>webfile</DisplayName>
</Owner>
</Version>
{% endfor %}
</ListVersionsResult>
"""

View File

@ -149,6 +149,16 @@ def test_list_multiparts():
uploads.should.be.empty
@mock_s3
def test_key_save_to_missing_bucket():
conn = boto.connect_s3('the_key', 'the_secret')
bucket = conn.get_bucket('mybucket', validate=False)
key = Key(bucket)
key.key = "the-key"
key.set_contents_from_string.when.called_with("foobar").should.throw(S3ResponseError)
@mock_s3
def test_missing_key():
conn = boto.connect_s3('the_key', 'the_secret')