moto/moto/s3/models.py

264 lines
8.2 KiB
Python
Raw Normal View History

2013-03-26 14:52:33 +00:00
import os
import base64
2013-03-29 21:45:33 +00:00
import datetime
import hashlib
import copy
2013-02-18 21:09:40 +00:00
from moto.core import BaseBackend
2013-05-24 21:22:34 +00:00
from moto.core.utils import iso_8601_datetime, rfc_1123_datetime
from .exceptions import BucketAlreadyExists
2013-04-13 23:00:37 +00:00
from .utils import clean_key_name
2013-02-18 21:09:40 +00:00
UPLOAD_ID_BYTES = 43
UPLOAD_PART_MIN_SIZE = 5242880
2013-11-15 09:53:39 +00:00
2013-02-18 21:09:40 +00:00
class FakeKey(object):
2014-03-26 15:52:31 +00:00
def __init__(self, name, value, storage=None):
2013-02-18 21:09:40 +00:00
self.name = name
self.value = value
2013-03-29 21:45:33 +00:00
self.last_modified = datetime.datetime.now()
2014-03-26 15:52:31 +00:00
self._storage_class = storage
self._metadata = {}
2013-05-17 23:41:39 +00:00
def copy(self, new_name = None):
r = copy.deepcopy(self)
if new_name is not None:
r.name = new_name
return r
def set_metadata(self, key, metadata):
self._metadata[key] = metadata
2013-02-18 21:09:40 +00:00
def clear_metadata(self):
self._metadata = {}
2014-03-26 15:52:31 +00:00
def set_storage_class(self, storage_class):
self._storage_class = storage_class
def append_to_value(self, value):
self.value += value
self.last_modified = datetime.datetime.now()
2013-02-18 21:09:40 +00:00
@property
def etag(self):
value_md5 = hashlib.md5()
2013-11-14 15:47:03 +00:00
value_md5.update(bytes(self.value))
2013-02-18 21:09:40 +00:00
return '"{0}"'.format(value_md5.hexdigest())
2013-03-29 21:45:33 +00:00
@property
def last_modified_ISO8601(self):
2013-05-24 21:22:34 +00:00
return iso_8601_datetime(self.last_modified)
2013-03-29 21:45:33 +00:00
@property
def last_modified_RFC1123(self):
# Different datetime formats depending on how the key is obtained
# https://github.com/boto/boto/issues/466
2013-05-24 21:22:34 +00:00
return rfc_1123_datetime(self.last_modified)
2013-05-17 23:41:39 +00:00
@property
def metadata(self):
return self._metadata
2013-03-29 21:45:33 +00:00
@property
def response_dict(self):
2014-03-26 15:52:31 +00:00
r = {
2013-03-29 21:45:33 +00:00
'etag': self.etag,
'last-modified': self.last_modified_RFC1123,
}
2014-03-26 15:52:31 +00:00
if self._storage_class is not None:
if self._storage_class != 'STANDARD':
r['x-amz-storage-class'] = self._storage_class
return r
2013-03-29 21:45:33 +00:00
@property
def size(self):
return len(self.value)
2014-03-26 15:52:31 +00:00
@property
def storage_class(self):
if self._storage_class is not None:
return self._storage_class
else:
return 'STANDARD'
2013-02-26 05:31:01 +00:00
2013-03-26 14:52:33 +00:00
class FakeMultipart(object):
def __init__(self, key_name):
self.key_name = key_name
self.parts = {}
2013-11-15 09:53:39 +00:00
self.id = base64.b64encode(os.urandom(UPLOAD_ID_BYTES)).replace('=', '').replace('+', '')
2013-03-26 14:52:33 +00:00
def complete(self):
total = bytearray()
last_part_name = len(self.list_parts())
2013-03-26 14:52:33 +00:00
2013-09-30 09:09:35 +00:00
for part in self.list_parts():
2013-11-15 09:53:39 +00:00
if part.name != last_part_name and len(part.value) < UPLOAD_PART_MIN_SIZE:
return
2013-09-30 09:09:35 +00:00
total.extend(part.value)
2013-03-26 14:52:33 +00:00
return total
def set_part(self, part_id, value):
if part_id < 1:
return
2013-03-26 14:52:33 +00:00
key = FakeKey(part_id, value)
self.parts[part_id] = key
return key
2013-03-26 14:52:33 +00:00
2013-09-30 09:09:35 +00:00
def list_parts(self):
parts = []
for part_id, index in enumerate(sorted(self.parts.keys()), start=1):
# Make sure part ids are continuous
if part_id != index:
return
parts.append(self.parts[part_id])
return parts
2013-03-26 14:52:33 +00:00
2013-02-18 21:09:40 +00:00
class FakeBucket(object):
def __init__(self, name):
self.name = name
2013-02-18 22:17:19 +00:00
self.keys = {}
2013-03-26 14:52:33 +00:00
self.multiparts = {}
2013-02-18 21:09:40 +00:00
2013-02-18 21:31:42 +00:00
class S3Backend(BaseBackend):
2013-02-18 21:09:40 +00:00
def __init__(self):
self.buckets = {}
def create_bucket(self, bucket_name):
if bucket_name in self.buckets:
raise BucketAlreadyExists()
2013-02-18 21:09:40 +00:00
new_bucket = FakeBucket(name=bucket_name)
self.buckets[bucket_name] = new_bucket
return new_bucket
2013-02-18 22:31:15 +00:00
def get_all_buckets(self):
return self.buckets.values()
2013-02-18 21:09:40 +00:00
def get_bucket(self, bucket_name):
return self.buckets.get(bucket_name)
2013-02-18 22:17:19 +00:00
def delete_bucket(self, bucket_name):
bucket = self.buckets.get(bucket_name)
if bucket:
if bucket.keys:
# Can't delete a bucket with keys
return False
else:
return self.buckets.pop(bucket_name)
return None
2013-02-18 22:17:19 +00:00
2014-03-26 15:52:31 +00:00
def set_key(self, bucket_name, key_name, value, storage=None):
2013-04-13 23:00:37 +00:00
key_name = clean_key_name(key_name)
2013-02-18 21:09:40 +00:00
bucket = self.buckets[bucket_name]
2014-03-26 15:52:31 +00:00
new_key = FakeKey(name=key_name, value=value,
storage=storage)
2013-02-18 22:17:19 +00:00
bucket.keys[key_name] = new_key
2013-02-18 21:09:40 +00:00
return new_key
def append_to_key(self, bucket_name, key_name, value):
key_name = clean_key_name(key_name)
key = self.get_key(bucket_name, key_name)
key.append_to_value(value)
return key
2013-02-18 21:09:40 +00:00
def get_key(self, bucket_name, key_name):
2013-04-13 23:00:37 +00:00
key_name = clean_key_name(key_name)
2013-03-05 13:14:43 +00:00
bucket = self.get_bucket(bucket_name)
if bucket:
return bucket.keys.get(key_name)
2013-02-18 21:09:40 +00:00
2013-03-26 14:52:33 +00:00
def initiate_multipart(self, bucket_name, key_name):
bucket = self.buckets[bucket_name]
new_multipart = FakeMultipart(key_name)
bucket.multiparts[new_multipart.id] = new_multipart
return new_multipart
def complete_multipart(self, bucket_name, multipart_id):
bucket = self.buckets[bucket_name]
multipart = bucket.multiparts[multipart_id]
value = multipart.complete()
if value is None:
return
del bucket.multiparts[multipart_id]
2013-03-26 14:52:33 +00:00
return self.set_key(bucket_name, multipart.key_name, value)
2013-03-26 14:52:33 +00:00
2013-09-30 15:36:25 +00:00
def cancel_multipart(self, bucket_name, multipart_id):
bucket = self.buckets[bucket_name]
del bucket.multiparts[multipart_id]
2013-09-30 09:09:35 +00:00
def list_multipart(self, bucket_name, multipart_id):
bucket = self.buckets[bucket_name]
return bucket.multiparts[multipart_id].list_parts()
2013-03-26 14:52:33 +00:00
def set_part(self, bucket_name, multipart_id, part_id, value):
bucket = self.buckets[bucket_name]
multipart = bucket.multiparts[multipart_id]
return multipart.set_part(part_id, value)
def copy_part(self, dest_bucket_name, multipart_id, part_id,
src_bucket_name, src_key_name):
src_key_name = clean_key_name(src_key_name)
src_bucket = self.buckets[src_bucket_name]
dest_bucket = self.buckets[dest_bucket_name]
multipart = dest_bucket.multiparts[multipart_id]
return multipart.set_part(part_id, src_bucket.keys[src_key_name].value)
def prefix_query(self, bucket, prefix, delimiter):
key_results = set()
folder_results = set()
if prefix:
for key_name, key in bucket.keys.iteritems():
if key_name.startswith(prefix):
key_without_prefix = key_name.replace(prefix, "", 1)
if delimiter and delimiter in key_without_prefix:
# If delimiter, we need to split out folder_results
key_without_delimiter = key_without_prefix.split(delimiter)[0]
2013-10-04 00:34:13 +00:00
folder_results.add("{0}{1}{2}".format(prefix, key_without_delimiter, delimiter))
else:
key_results.add(key)
else:
for key_name, key in bucket.keys.iteritems():
if delimiter and delimiter in key_name:
# If delimiter, we need to split out folder_results
folder_results.add(key_name.split(delimiter)[0])
else:
key_results.add(key)
key_results = sorted(key_results, key=lambda key: key.name)
folder_results = [folder_name for folder_name in sorted(folder_results, key=lambda key: key)]
return key_results, folder_results
2013-02-18 22:17:19 +00:00
def delete_key(self, bucket_name, key_name):
2013-04-13 23:00:37 +00:00
key_name = clean_key_name(key_name)
2013-02-18 22:17:19 +00:00
bucket = self.buckets[bucket_name]
return bucket.keys.pop(key_name)
2013-02-18 21:09:40 +00:00
2014-03-26 15:52:31 +00:00
def copy_key(self, src_bucket_name, src_key_name, dest_bucket_name, dest_key_name, storage=None):
2013-04-13 23:00:37 +00:00
src_key_name = clean_key_name(src_key_name)
dest_key_name = clean_key_name(dest_key_name)
2013-02-20 04:29:46 +00:00
src_bucket = self.buckets[src_bucket_name]
dest_bucket = self.buckets[dest_bucket_name]
key = src_bucket.keys[src_key_name]
if dest_key_name != src_key_name:
key = key.copy(dest_key_name)
dest_bucket.keys[dest_key_name] = key
2014-03-26 15:52:31 +00:00
if storage is not None:
dest_bucket.keys[dest_key_name].set_storage_class(storage)
2013-02-18 21:09:40 +00:00
2013-02-18 21:31:42 +00:00
s3_backend = S3Backend()