moto/moto/s3/models.py

1545 lines
51 KiB
Python
Raw Normal View History

# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import json
2013-03-26 14:52:33 +00:00
import os
import base64
2013-03-29 17:45:33 -04:00
import datetime
import hashlib
import copy
2014-06-27 16:21:32 -06:00
import itertools
2014-08-26 13:25:50 -04:00
import codecs
import random
import string
import tempfile
import sys
import time
import uuid
2014-08-26 13:25:50 -04:00
import six
2013-02-18 16:09:40 -05:00
from bisect import insort
2017-03-11 23:41:12 -05:00
from moto.core import BaseBackend, BaseModel
2014-11-29 23:34:40 -05:00
from moto.core.utils import iso_8601_datetime_with_milliseconds, rfc_1123_datetime
from .exceptions import (
2019-10-31 08:44:26 -07:00
BucketAlreadyExists,
MissingBucket,
InvalidBucketName,
InvalidPart,
InvalidRequest,
EntityTooSmall,
MissingKey,
InvalidNotificationDestination,
MalformedXML,
InvalidStorageClass,
InvalidTargetBucketForLogging,
DuplicateTagKeys,
CrossLocationLoggingProhibitted,
2019-12-09 17:38:26 -08:00
NoSuchPublicAccessBlockConfiguration,
InvalidPublicAccessBlockConfiguration,
)
from .utils import clean_key_name, _VersionedKeyStore
2013-02-18 16:09:40 -05:00
MAX_BUCKET_NAME_LENGTH = 63
MIN_BUCKET_NAME_LENGTH = 3
UPLOAD_ID_BYTES = 43
UPLOAD_PART_MIN_SIZE = 5242880
2019-10-31 08:44:26 -07:00
STORAGE_CLASS = [
"STANDARD",
"REDUCED_REDUNDANCY",
"STANDARD_IA",
"ONEZONE_IA",
"INTELLIGENT_TIERING",
"GLACIER",
"DEEP_ARCHIVE",
]
2018-12-20 11:15:15 -08:00
DEFAULT_KEY_BUFFER_SIZE = 16 * 1024 * 1024
DEFAULT_TEXT_ENCODING = sys.getdefaultencoding()
2019-10-31 08:44:26 -07:00
OWNER = "75aa57f09aa0c8caeab4f8c24e99d10f8e7faeebf76c078efc7c6caea54ba06a"
2013-11-15 11:53:39 +02:00
2013-02-18 16:09:40 -05:00
class FakeDeleteMarker(BaseModel):
def __init__(self, key):
self.key = key
self.name = key.name
self.last_modified = datetime.datetime.utcnow()
self._version_id = str(uuid.uuid4())
@property
def last_modified_ISO8601(self):
return iso_8601_datetime_with_milliseconds(self.last_modified)
@property
def version_id(self):
return self._version_id
2017-03-11 23:41:12 -05:00
class FakeKey(BaseModel):
def __init__(
self,
name,
value,
storage="STANDARD",
etag=None,
is_versioned=False,
version_id=0,
max_buffer_size=DEFAULT_KEY_BUFFER_SIZE,
2019-10-31 08:44:26 -07:00
multipart=None,
):
2013-02-18 16:09:40 -05:00
self.name = name
self.last_modified = datetime.datetime.utcnow()
2019-10-31 08:44:26 -07:00
self.acl = get_canned_acl("private")
self.website_redirect_location = None
2017-02-15 22:35:45 -05:00
self._storage_class = storage if storage else "STANDARD"
self._metadata = {}
2014-03-26 19:15:08 +02:00
self._expiry = None
self._etag = etag
self._version_id = version_id
self._is_versioned = is_versioned
2017-07-15 22:36:12 -04:00
self._tagging = FakeTagging()
self.multipart = multipart
2013-05-17 19:41:39 -04:00
2018-12-20 11:15:15 -08:00
self._value_buffer = tempfile.SpooledTemporaryFile(max_size=max_buffer_size)
self._max_buffer_size = max_buffer_size
self.value = value
@property
def version_id(self):
return self._version_id
@property
def value(self):
2018-12-20 11:15:15 -08:00
self._value_buffer.seek(0)
return self._value_buffer.read()
@value.setter
def value(self, new_value):
2018-12-20 11:15:15 -08:00
self._value_buffer.seek(0)
self._value_buffer.truncate()
# Hack for working around moto's own unit tests; this probably won't
# actually get hit in normal use.
if isinstance(new_value, six.text_type):
new_value = new_value.encode(DEFAULT_TEXT_ENCODING)
2018-12-20 11:15:15 -08:00
self._value_buffer.write(new_value)
def copy(self, new_name=None, new_is_versioned=None):
r = copy.deepcopy(self)
if new_name is not None:
r.name = new_name
if new_is_versioned is not None:
r._is_versioned = new_is_versioned
r.refresh_version()
return r
def set_metadata(self, metadata, replace=False):
if replace:
self._metadata = {}
self._metadata.update(metadata)
2017-07-15 22:36:12 -04:00
def set_tagging(self, tagging):
self._tagging = tagging
def set_storage_class(self, storage):
if storage is not None and storage not in STORAGE_CLASS:
raise InvalidStorageClass(storage=storage)
self._storage_class = storage
2014-03-26 17:52:31 +02:00
2015-10-07 00:04:22 -07:00
def set_acl(self, acl):
self.acl = acl
def append_to_value(self, value):
2018-12-20 11:15:15 -08:00
self._value_buffer.seek(0, os.SEEK_END)
self._value_buffer.write(value)
self.last_modified = datetime.datetime.utcnow()
self._etag = None # must recalculate etag
if self._is_versioned:
self._version_id = str(uuid.uuid4())
else:
self._version_id = None
2013-02-18 16:09:40 -05:00
2014-03-26 19:15:08 +02:00
def restore(self, days):
self._expiry = datetime.datetime.utcnow() + datetime.timedelta(days)
2014-03-26 19:15:08 +02:00
def refresh_version(self):
self._version_id = str(uuid.uuid4())
self.last_modified = datetime.datetime.utcnow()
2013-02-18 16:09:40 -05:00
@property
def etag(self):
if self._etag is None:
value_md5 = hashlib.md5()
2018-12-20 11:15:15 -08:00
self._value_buffer.seek(0)
while True:
2018-12-20 11:15:15 -08:00
block = self._value_buffer.read(DEFAULT_KEY_BUFFER_SIZE)
if not block:
break
value_md5.update(block)
self._etag = value_md5.hexdigest()
return '"{0}"'.format(self._etag)
2013-02-18 16:09:40 -05:00
2013-03-29 17:45:33 -04:00
@property
def last_modified_ISO8601(self):
2014-11-29 23:34:40 -05:00
return iso_8601_datetime_with_milliseconds(self.last_modified)
2013-03-29 17:45:33 -04:00
@property
def last_modified_RFC1123(self):
# Different datetime formats depending on how the key is obtained
# https://github.com/boto/boto/issues/466
2013-05-24 17:22:34 -04:00
return rfc_1123_datetime(self.last_modified)
2013-05-17 19:41:39 -04:00
@property
def metadata(self):
return self._metadata
2013-03-29 17:45:33 -04:00
2017-07-15 22:36:12 -04:00
@property
def tagging(self):
return self._tagging
2013-03-29 17:45:33 -04:00
@property
def response_dict(self):
res = {
2019-10-31 08:44:26 -07:00
"ETag": self.etag,
"last-modified": self.last_modified_RFC1123,
"content-length": str(self.size),
2013-03-29 17:45:33 -04:00
}
2019-10-31 08:44:26 -07:00
if self._storage_class != "STANDARD":
res["x-amz-storage-class"] = self._storage_class
2014-03-26 19:15:08 +02:00
if self._expiry is not None:
2014-03-27 10:00:50 +02:00
rhdr = 'ongoing-request="false", expiry-date="{0}"'
2019-10-31 08:44:26 -07:00
res["x-amz-restore"] = rhdr.format(self.expiry_date)
if self._is_versioned:
2019-10-31 08:44:26 -07:00
res["x-amz-version-id"] = str(self.version_id)
if self.website_redirect_location:
2019-10-31 08:44:26 -07:00
res["x-amz-website-redirect-location"] = self.website_redirect_location
return res
2013-03-29 17:45:33 -04:00
@property
def size(self):
2018-12-20 11:15:15 -08:00
self._value_buffer.seek(0, os.SEEK_END)
return self._value_buffer.tell()
2014-03-26 17:52:31 +02:00
@property
def storage_class(self):
2014-03-30 11:50:36 -04:00
return self._storage_class
2014-03-26 17:52:31 +02:00
2014-03-26 19:15:08 +02:00
@property
def expiry_date(self):
if self._expiry is not None:
return self._expiry.strftime("%a, %d %b %Y %H:%M:%S GMT")
2018-12-20 11:15:15 -08:00
# Keys need to be pickleable due to some implementation details of boto3.
# Since file objects aren't pickleable, we need to override the default
# behavior. The following is adapted from the Python docs:
# https://docs.python.org/3/library/pickle.html#handling-stateful-objects
def __getstate__(self):
state = self.__dict__.copy()
2019-10-31 08:44:26 -07:00
state["value"] = self.value
del state["_value_buffer"]
2018-12-20 11:15:15 -08:00
return state
def __setstate__(self, state):
2019-10-31 08:44:26 -07:00
self.__dict__.update({k: v for k, v in six.iteritems(state) if k != "value"})
2018-12-20 11:15:15 -08:00
2019-10-31 08:44:26 -07:00
self._value_buffer = tempfile.SpooledTemporaryFile(
max_size=self._max_buffer_size
)
self.value = state["value"]
2018-12-20 11:15:15 -08:00
2013-02-26 00:31:01 -05:00
2017-03-11 23:41:12 -05:00
class FakeMultipart(BaseModel):
def __init__(self, key_name, metadata):
2013-03-26 14:52:33 +00:00
self.key_name = key_name
self.metadata = metadata
2013-03-26 14:52:33 +00:00
self.parts = {}
self.partlist = [] # ordered list of part ID's
2014-08-26 13:25:50 -04:00
rand_b64 = base64.b64encode(os.urandom(UPLOAD_ID_BYTES))
2019-10-31 08:44:26 -07:00
self.id = rand_b64.decode("utf-8").replace("=", "").replace("+", "")
def complete(self, body):
2014-08-26 13:25:50 -04:00
decode_hex = codecs.getdecoder("hex_codec")
2013-03-26 14:52:33 +00:00
total = bytearray()
md5s = bytearray()
2013-03-26 14:52:33 +00:00
last = None
count = 0
for pn, etag in body:
part = self.parts.get(pn)
part_etag = None
if part is not None:
2019-10-31 08:44:26 -07:00
part_etag = part.etag.replace('"', "")
etag = etag.replace('"', "")
if part is None or part_etag != etag:
raise InvalidPart()
if last is not None and len(last.value) < UPLOAD_PART_MIN_SIZE:
raise EntityTooSmall()
2014-08-26 13:25:50 -04:00
md5s.extend(decode_hex(part_etag)[0])
2013-09-30 12:09:35 +03:00
total.extend(part.value)
last = part
count += 1
2013-03-26 14:52:33 +00:00
etag = hashlib.md5()
etag.update(bytes(md5s))
return total, "{0}-{1}".format(etag.hexdigest(), count)
2013-03-26 14:52:33 +00:00
def set_part(self, part_id, value):
if part_id < 1:
return
2013-03-26 14:52:33 +00:00
key = FakeKey(part_id, value)
self.parts[part_id] = key
if part_id not in self.partlist:
insort(self.partlist, part_id)
return key
2013-03-26 14:52:33 +00:00
2013-09-30 12:09:35 +03:00
def list_parts(self):
for part_id in self.partlist:
yield self.parts[part_id]
2013-09-30 12:09:35 +03:00
2013-03-26 14:52:33 +00:00
2017-03-11 23:41:12 -05:00
class FakeGrantee(BaseModel):
2019-10-31 08:44:26 -07:00
def __init__(self, id="", uri="", display_name=""):
2015-10-07 00:04:22 -07:00
self.id = id
self.uri = uri
self.display_name = display_name
2017-09-16 06:08:27 -07:00
def __eq__(self, other):
if not isinstance(other, FakeGrantee):
return False
2019-10-31 08:44:26 -07:00
return (
self.id == other.id
and self.uri == other.uri
and self.display_name == other.display_name
)
2017-09-16 06:08:27 -07:00
2015-10-07 00:04:22 -07:00
@property
def type(self):
2019-10-31 08:44:26 -07:00
return "Group" if self.uri else "CanonicalUser"
2015-10-07 00:04:22 -07:00
2017-09-16 06:08:27 -07:00
def __repr__(self):
2019-10-31 08:44:26 -07:00
return "FakeGrantee(display_name: '{}', id: '{}', uri: '{}')".format(
self.display_name, self.id, self.uri
)
2017-09-16 06:08:27 -07:00
2015-10-07 00:04:22 -07:00
2019-10-31 08:44:26 -07:00
ALL_USERS_GRANTEE = FakeGrantee(uri="http://acs.amazonaws.com/groups/global/AllUsers")
2017-02-23 21:37:43 -05:00
AUTHENTICATED_USERS_GRANTEE = FakeGrantee(
2019-10-31 08:44:26 -07:00
uri="http://acs.amazonaws.com/groups/global/AuthenticatedUsers"
)
LOG_DELIVERY_GRANTEE = FakeGrantee(uri="http://acs.amazonaws.com/groups/s3/LogDelivery")
2015-10-07 00:04:22 -07:00
2019-10-31 08:44:26 -07:00
PERMISSION_FULL_CONTROL = "FULL_CONTROL"
PERMISSION_WRITE = "WRITE"
PERMISSION_READ = "READ"
PERMISSION_WRITE_ACP = "WRITE_ACP"
PERMISSION_READ_ACP = "READ_ACP"
2015-10-07 00:04:22 -07:00
CAMEL_CASED_PERMISSIONS = {
2019-10-31 08:44:26 -07:00
"FULL_CONTROL": "FullControl",
"WRITE": "Write",
"READ": "Read",
"WRITE_ACP": "WriteAcp",
"READ_ACP": "ReadAcp",
}
2015-10-07 00:04:22 -07:00
2017-03-11 23:41:12 -05:00
class FakeGrant(BaseModel):
2015-10-07 00:04:22 -07:00
def __init__(self, grantees, permissions):
self.grantees = grantees
self.permissions = permissions
2017-09-16 06:08:27 -07:00
def __repr__(self):
2019-10-31 08:44:26 -07:00
return "FakeGrant(grantees: {}, permissions: {})".format(
self.grantees, self.permissions
)
2017-09-16 06:08:27 -07:00
2015-10-07 00:04:22 -07:00
2017-03-11 23:41:12 -05:00
class FakeAcl(BaseModel):
def __init__(self, grants=None):
grants = grants or []
2015-10-07 00:04:22 -07:00
self.grants = grants
2017-09-16 06:08:27 -07:00
@property
def public_read(self):
for grant in self.grants:
if ALL_USERS_GRANTEE in grant.grantees:
if PERMISSION_READ in grant.permissions:
return True
if PERMISSION_FULL_CONTROL in grant.permissions:
return True
return False
def __repr__(self):
return "FakeAcl(grants: {})".format(self.grants)
def to_config_dict(self):
"""Returns the object into the format expected by AWS Config"""
data = {
2019-10-31 08:44:26 -07:00
"grantSet": None, # Always setting this to None. Feel free to change.
"owner": {"displayName": None, "id": OWNER},
}
# Add details for each Grant:
grant_list = []
for grant in self.grants:
2019-10-31 08:44:26 -07:00
permissions = (
grant.permissions
if isinstance(grant.permissions, list)
else [grant.permissions]
)
for permission in permissions:
for grantee in grant.grantees:
# Config does not add the owner if its permissions are FULL_CONTROL:
2019-10-31 08:44:26 -07:00
if permission == "FULL_CONTROL" and grantee.id == OWNER:
continue
if grantee.uri:
2019-10-31 08:44:26 -07:00
grant_list.append(
{
"grantee": grantee.uri.split(
"http://acs.amazonaws.com/groups/s3/"
)[1],
"permission": CAMEL_CASED_PERMISSIONS[permission],
}
)
else:
2019-10-31 08:44:26 -07:00
grant_list.append(
{
"grantee": {
"id": grantee.id,
"displayName": None
if not grantee.display_name
else grantee.display_name,
},
"permission": CAMEL_CASED_PERMISSIONS[permission],
}
)
if grant_list:
2019-10-31 08:44:26 -07:00
data["grantList"] = grant_list
return data
2015-10-07 00:04:22 -07:00
def get_canned_acl(acl):
owner_grantee = FakeGrantee(id=OWNER)
2015-10-07 00:04:22 -07:00
grants = [FakeGrant([owner_grantee], [PERMISSION_FULL_CONTROL])]
2019-10-31 08:44:26 -07:00
if acl == "private":
2015-10-07 00:04:22 -07:00
pass # no other permissions
2019-10-31 08:44:26 -07:00
elif acl == "public-read":
2015-10-07 00:04:22 -07:00
grants.append(FakeGrant([ALL_USERS_GRANTEE], [PERMISSION_READ]))
2019-10-31 08:44:26 -07:00
elif acl == "public-read-write":
2017-02-23 21:37:43 -05:00
grants.append(
2019-10-31 08:44:26 -07:00
FakeGrant([ALL_USERS_GRANTEE], [PERMISSION_READ, PERMISSION_WRITE])
)
elif acl == "authenticated-read":
grants.append(FakeGrant([AUTHENTICATED_USERS_GRANTEE], [PERMISSION_READ]))
elif acl == "bucket-owner-read":
2015-10-07 00:04:22 -07:00
pass # TODO: bucket owner ACL
2019-10-31 08:44:26 -07:00
elif acl == "bucket-owner-full-control":
2015-10-07 00:04:22 -07:00
pass # TODO: bucket owner ACL
2019-10-31 08:44:26 -07:00
elif acl == "aws-exec-read":
pass # TODO: bucket owner, EC2 Read
2019-10-31 08:44:26 -07:00
elif acl == "log-delivery-write":
grants.append(
FakeGrant([LOG_DELIVERY_GRANTEE], [PERMISSION_READ_ACP, PERMISSION_WRITE])
)
2015-10-07 00:04:22 -07:00
else:
2019-10-31 08:44:26 -07:00
assert False, "Unknown canned acl: %s" % (acl,)
2015-10-07 00:04:22 -07:00
return FakeAcl(grants=grants)
2017-07-15 22:36:12 -04:00
class FakeTagging(BaseModel):
def __init__(self, tag_set=None):
self.tag_set = tag_set or FakeTagSet()
class FakeTagSet(BaseModel):
def __init__(self, tags=None):
self.tags = tags or []
class FakeTag(BaseModel):
def __init__(self, key, value=None):
self.key = key
self.value = value
class LifecycleFilter(BaseModel):
def __init__(self, prefix=None, tag=None, and_filter=None):
self.prefix = prefix
self.tag = tag
self.and_filter = and_filter
def to_config_dict(self):
if self.prefix is not None:
return {
2019-10-31 08:44:26 -07:00
"predicate": {"type": "LifecyclePrefixPredicate", "prefix": self.prefix}
}
elif self.tag:
return {
2019-10-31 08:44:26 -07:00
"predicate": {
"type": "LifecycleTagPredicate",
"tag": {"key": self.tag.key, "value": self.tag.value},
}
}
else:
return {
2019-10-31 08:44:26 -07:00
"predicate": {
"type": "LifecycleAndOperator",
"operands": self.and_filter.to_config_dict(),
}
}
class LifecycleAndFilter(BaseModel):
def __init__(self, prefix=None, tags=None):
self.prefix = prefix
self.tags = tags
def to_config_dict(self):
data = []
if self.prefix is not None:
2019-10-31 08:44:26 -07:00
data.append({"type": "LifecyclePrefixPredicate", "prefix": self.prefix})
for tag in self.tags:
2019-10-31 08:44:26 -07:00
data.append(
{
"type": "LifecycleTagPredicate",
"tag": {"key": tag.key, "value": tag.value},
}
)
return data
2017-03-11 23:41:12 -05:00
class LifecycleRule(BaseModel):
2019-10-31 08:44:26 -07:00
def __init__(
self,
id=None,
prefix=None,
lc_filter=None,
status=None,
expiration_days=None,
expiration_date=None,
transition_days=None,
transition_date=None,
storage_class=None,
expired_object_delete_marker=None,
nve_noncurrent_days=None,
nvt_noncurrent_days=None,
nvt_storage_class=None,
aimu_days=None,
):
2015-06-02 23:11:23 -04:00
self.id = id
self.prefix = prefix
self.filter = lc_filter
2015-06-02 23:11:23 -04:00
self.status = status
self.expiration_days = expiration_days
self.expiration_date = expiration_date
self.transition_days = transition_days
self.transition_date = transition_date
self.storage_class = storage_class
self.expired_object_delete_marker = expired_object_delete_marker
self.nve_noncurrent_days = nve_noncurrent_days
self.nvt_noncurrent_days = nvt_noncurrent_days
self.nvt_storage_class = nvt_storage_class
self.aimu_days = aimu_days
2015-06-02 23:11:23 -04:00
def to_config_dict(self):
"""Converts the object to the AWS Config data dict.
Note: The following are missing that should be added in the future:
- transitions (returns None for now)
- noncurrentVersionTransitions (returns None for now)
:param kwargs:
:return:
"""
lifecycle_dict = {
2019-10-31 08:44:26 -07:00
"id": self.id,
"prefix": self.prefix,
"status": self.status,
"expirationInDays": int(self.expiration_days)
if self.expiration_days
else None,
"expiredObjectDeleteMarker": self.expired_object_delete_marker,
"noncurrentVersionExpirationInDays": -1 or int(self.nve_noncurrent_days),
"expirationDate": self.expiration_date,
"transitions": None, # Replace me with logic to fill in
"noncurrentVersionTransitions": None, # Replace me with logic to fill in
}
if self.aimu_days:
2019-10-31 08:44:26 -07:00
lifecycle_dict["abortIncompleteMultipartUpload"] = {
"daysAfterInitiation": self.aimu_days
}
else:
2019-10-31 08:44:26 -07:00
lifecycle_dict["abortIncompleteMultipartUpload"] = None
# Format the filter:
if self.prefix is None and self.filter is None:
2019-10-31 08:44:26 -07:00
lifecycle_dict["filter"] = {"predicate": None}
elif self.prefix:
2019-10-31 08:44:26 -07:00
lifecycle_dict["filter"] = None
else:
2019-10-31 08:44:26 -07:00
lifecycle_dict["filter"] = self.filter.to_config_dict()
return lifecycle_dict
2015-06-02 23:11:23 -04:00
class CorsRule(BaseModel):
2019-10-31 08:44:26 -07:00
def __init__(
self,
allowed_methods,
allowed_origins,
allowed_headers=None,
expose_headers=None,
max_age_seconds=None,
):
self.allowed_methods = (
[allowed_methods]
if isinstance(allowed_methods, six.string_types)
else allowed_methods
)
self.allowed_origins = (
[allowed_origins]
if isinstance(allowed_origins, six.string_types)
else allowed_origins
)
self.allowed_headers = (
[allowed_headers]
if isinstance(allowed_headers, six.string_types)
else allowed_headers
)
self.exposed_headers = (
[expose_headers]
if isinstance(expose_headers, six.string_types)
else expose_headers
)
self.max_age_seconds = max_age_seconds
class Notification(BaseModel):
def __init__(self, arn, events, filters=None, id=None):
2019-10-31 08:44:26 -07:00
self.id = (
id
if id
else "".join(
random.choice(string.ascii_letters + string.digits) for _ in range(50)
)
)
self.arn = arn
self.events = events
self.filters = filters if filters else {}
def to_config_dict(self):
data = {}
# Type and ARN will be filled in by NotificationConfiguration's to_config_dict:
2019-10-31 08:44:26 -07:00
data["events"] = [event for event in self.events]
if self.filters:
2019-10-31 08:44:26 -07:00
data["filter"] = {
"s3KeyFilter": {
"filterRules": [
{"name": fr["Name"], "value": fr["Value"]}
for fr in self.filters["S3Key"]["FilterRule"]
]
}
}
else:
2019-10-31 08:44:26 -07:00
data["filter"] = None
2019-12-09 17:38:26 -08:00
# Not sure why this is a thing since AWS just seems to return this as filters ¯\_(ツ)_/¯
data["objectPrefixes"] = []
return data
class NotificationConfiguration(BaseModel):
def __init__(self, topic=None, queue=None, cloud_function=None):
2019-10-31 08:44:26 -07:00
self.topic = (
[
Notification(
t["Topic"], t["Event"], filters=t.get("Filter"), id=t.get("Id")
)
for t in topic
]
if topic
else []
)
self.queue = (
[
Notification(
q["Queue"], q["Event"], filters=q.get("Filter"), id=q.get("Id")
)
for q in queue
]
if queue
else []
)
self.cloud_function = (
[
Notification(
c["CloudFunction"],
c["Event"],
filters=c.get("Filter"),
id=c.get("Id"),
)
for c in cloud_function
]
if cloud_function
else []
)
def to_config_dict(self):
2019-10-31 08:44:26 -07:00
data = {"configurations": {}}
for topic in self.topic:
topic_config = topic.to_config_dict()
2019-10-31 08:44:26 -07:00
topic_config["topicARN"] = topic.arn
topic_config["type"] = "TopicConfiguration"
data["configurations"][topic.id] = topic_config
for queue in self.queue:
queue_config = queue.to_config_dict()
2019-10-31 08:44:26 -07:00
queue_config["queueARN"] = queue.arn
queue_config["type"] = "QueueConfiguration"
data["configurations"][queue.id] = queue_config
for cloud_function in self.cloud_function:
cf_config = cloud_function.to_config_dict()
2019-10-31 08:44:26 -07:00
cf_config["queueARN"] = cloud_function.arn
cf_config["type"] = "LambdaConfiguration"
data["configurations"][cloud_function.id] = cf_config
return data
2019-12-09 17:38:26 -08:00
def convert_str_to_bool(item):
"""Converts a boolean string to a boolean value"""
if isinstance(item, str):
return item.lower() == "true"
return False
class PublicAccessBlock(BaseModel):
def __init__(
self,
block_public_acls,
ignore_public_acls,
block_public_policy,
restrict_public_buckets,
):
# The boto XML appears to expect these values to exist as lowercase strings...
self.block_public_acls = block_public_acls or "false"
self.ignore_public_acls = ignore_public_acls or "false"
self.block_public_policy = block_public_policy or "false"
self.restrict_public_buckets = restrict_public_buckets or "false"
def to_config_dict(self):
# Need to make the string values booleans for Config:
return {
"blockPublicAcls": convert_str_to_bool(self.block_public_acls),
"ignorePublicAcls": convert_str_to_bool(self.ignore_public_acls),
"blockPublicPolicy": convert_str_to_bool(self.block_public_policy),
"restrictPublicBuckets": convert_str_to_bool(self.restrict_public_buckets),
}
2017-03-11 23:41:12 -05:00
class FakeBucket(BaseModel):
2014-12-10 20:44:00 -05:00
def __init__(self, name, region_name):
2013-02-18 16:09:40 -05:00
self.name = name
2014-12-10 20:44:00 -05:00
self.region_name = region_name
self.keys = _VersionedKeyStore()
2013-03-26 14:52:33 +00:00
self.multiparts = {}
self.versioning_status = None
2015-06-02 23:11:23 -04:00
self.rules = []
2015-07-23 17:33:52 -04:00
self.policy = None
self.website_configuration = None
2019-10-31 08:44:26 -07:00
self.acl = get_canned_acl("private")
self.tags = FakeTagging()
self.cors = []
self.logging = {}
self.notification_configuration = None
self.accelerate_configuration = None
2019-10-31 08:44:26 -07:00
self.payer = "BucketOwner"
self.creation_date = datetime.datetime.utcnow()
2019-12-09 17:38:26 -08:00
self.public_access_block = None
2014-12-10 20:44:00 -05:00
@property
def location(self):
return self.region_name
@property
def is_versioned(self):
2019-10-31 08:44:26 -07:00
return self.versioning_status == "Enabled"
2013-02-18 16:09:40 -05:00
2015-06-02 23:11:23 -04:00
def set_lifecycle(self, rules):
self.rules = []
for rule in rules:
# Extract and validate actions from Lifecycle rule
2019-10-31 08:44:26 -07:00
expiration = rule.get("Expiration")
transition = rule.get("Transition")
try:
2019-10-31 08:44:26 -07:00
top_level_prefix = (
rule["Prefix"] or ""
) # If it's `None` the set to the empty string
except KeyError:
top_level_prefix = None
nve_noncurrent_days = None
2019-10-31 08:44:26 -07:00
if rule.get("NoncurrentVersionExpiration") is not None:
if rule["NoncurrentVersionExpiration"].get("NoncurrentDays") is None:
raise MalformedXML()
2019-10-31 08:44:26 -07:00
nve_noncurrent_days = rule["NoncurrentVersionExpiration"][
"NoncurrentDays"
]
nvt_noncurrent_days = None
nvt_storage_class = None
2019-10-31 08:44:26 -07:00
if rule.get("NoncurrentVersionTransition") is not None:
if rule["NoncurrentVersionTransition"].get("NoncurrentDays") is None:
raise MalformedXML()
2019-10-31 08:44:26 -07:00
if rule["NoncurrentVersionTransition"].get("StorageClass") is None:
raise MalformedXML()
2019-10-31 08:44:26 -07:00
nvt_noncurrent_days = rule["NoncurrentVersionTransition"][
"NoncurrentDays"
]
nvt_storage_class = rule["NoncurrentVersionTransition"]["StorageClass"]
aimu_days = None
2019-10-31 08:44:26 -07:00
if rule.get("AbortIncompleteMultipartUpload") is not None:
if (
rule["AbortIncompleteMultipartUpload"].get("DaysAfterInitiation")
is None
):
raise MalformedXML()
2019-10-31 08:44:26 -07:00
aimu_days = rule["AbortIncompleteMultipartUpload"][
"DaysAfterInitiation"
]
eodm = None
if expiration and expiration.get("ExpiredObjectDeleteMarker") is not None:
# This cannot be set if Date or Days is set:
if expiration.get("Days") or expiration.get("Date"):
raise MalformedXML()
eodm = expiration["ExpiredObjectDeleteMarker"]
# Pull out the filter:
lc_filter = None
if rule.get("Filter"):
# Can't have both `Filter` and `Prefix` (need to check for the presence of the key):
try:
# 'Prefix' cannot be outside of a Filter:
if rule["Prefix"] or not rule["Prefix"]:
raise MalformedXML()
except KeyError:
pass
filters = 0
try:
2019-10-31 08:44:26 -07:00
prefix_filter = (
rule["Filter"]["Prefix"] or ""
) # If it's `None` the set to the empty string
filters += 1
except KeyError:
prefix_filter = None
and_filter = None
if rule["Filter"].get("And"):
filters += 1
and_tags = []
if rule["Filter"]["And"].get("Tag"):
if not isinstance(rule["Filter"]["And"]["Tag"], list):
2019-10-31 08:44:26 -07:00
rule["Filter"]["And"]["Tag"] = [
rule["Filter"]["And"]["Tag"]
]
for t in rule["Filter"]["And"]["Tag"]:
2019-10-31 08:44:26 -07:00
and_tags.append(FakeTag(t["Key"], t.get("Value", "")))
try:
2019-10-31 08:44:26 -07:00
and_prefix = (
rule["Filter"]["And"]["Prefix"] or ""
) # If it's `None` then set to the empty string
except KeyError:
and_prefix = None
and_filter = LifecycleAndFilter(prefix=and_prefix, tags=and_tags)
filter_tag = None
if rule["Filter"].get("Tag"):
filters += 1
2019-10-31 08:44:26 -07:00
filter_tag = FakeTag(
rule["Filter"]["Tag"]["Key"],
rule["Filter"]["Tag"].get("Value", ""),
)
# Can't have more than 1 filter:
if filters > 1:
raise MalformedXML()
2019-10-31 08:44:26 -07:00
lc_filter = LifecycleFilter(
prefix=prefix_filter, tag=filter_tag, and_filter=and_filter
)
# If no top level prefix and no filter is present, then this is invalid:
if top_level_prefix is None:
try:
2019-10-31 08:44:26 -07:00
rule["Filter"]
except KeyError:
raise MalformedXML()
2019-10-31 08:44:26 -07:00
self.rules.append(
LifecycleRule(
id=rule.get("ID"),
prefix=top_level_prefix,
lc_filter=lc_filter,
status=rule["Status"],
expiration_days=expiration.get("Days") if expiration else None,
expiration_date=expiration.get("Date") if expiration else None,
transition_days=transition.get("Days") if transition else None,
transition_date=transition.get("Date") if transition else None,
storage_class=transition.get("StorageClass")
if transition
else None,
expired_object_delete_marker=eodm,
nve_noncurrent_days=nve_noncurrent_days,
nvt_noncurrent_days=nvt_noncurrent_days,
nvt_storage_class=nvt_storage_class,
aimu_days=aimu_days,
)
)
2015-06-02 23:11:23 -04:00
def delete_lifecycle(self):
self.rules = []
def set_cors(self, rules):
self.cors = []
if len(rules) > 100:
raise MalformedXML()
for rule in rules:
2019-10-31 08:44:26 -07:00
assert isinstance(rule["AllowedMethod"], list) or isinstance(
rule["AllowedMethod"], six.string_types
)
assert isinstance(rule["AllowedOrigin"], list) or isinstance(
rule["AllowedOrigin"], six.string_types
)
assert isinstance(rule.get("AllowedHeader", []), list) or isinstance(
rule.get("AllowedHeader", ""), six.string_types
)
assert isinstance(rule.get("ExposedHeader", []), list) or isinstance(
rule.get("ExposedHeader", ""), six.string_types
)
assert isinstance(rule.get("MaxAgeSeconds", "0"), six.string_types)
if isinstance(rule["AllowedMethod"], six.string_types):
methods = [rule["AllowedMethod"]]
else:
methods = rule["AllowedMethod"]
for method in methods:
if method not in ["GET", "PUT", "HEAD", "POST", "DELETE"]:
raise InvalidRequest(method)
2019-10-31 08:44:26 -07:00
self.cors.append(
CorsRule(
rule["AllowedMethod"],
rule["AllowedOrigin"],
rule.get("AllowedHeader"),
rule.get("ExposedHeader"),
rule.get("MaxAgeSecond"),
)
)
def delete_cors(self):
self.cors = []
def set_tags(self, tagging):
self.tags = tagging
def delete_tags(self):
self.tags = FakeTagging()
@property
def tagging(self):
return self.tags
def set_logging(self, logging_config, bucket_backend):
if not logging_config:
self.logging = {}
return
# Target bucket must exist in the same account (assuming all moto buckets are in the same account):
if not bucket_backend.buckets.get(logging_config["TargetBucket"]):
2019-10-31 08:44:26 -07:00
raise InvalidTargetBucketForLogging(
"The target bucket for logging does not exist."
)
# Does the target bucket have the log-delivery WRITE and READ_ACP permissions?
write = read_acp = False
for grant in bucket_backend.buckets[logging_config["TargetBucket"]].acl.grants:
# Must be granted to: http://acs.amazonaws.com/groups/s3/LogDelivery
for grantee in grant.grantees:
if grantee.uri == "http://acs.amazonaws.com/groups/s3/LogDelivery":
2019-10-31 08:44:26 -07:00
if (
"WRITE" in grant.permissions
or "FULL_CONTROL" in grant.permissions
):
write = True
2019-10-31 08:44:26 -07:00
if (
"READ_ACP" in grant.permissions
or "FULL_CONTROL" in grant.permissions
):
read_acp = True
break
if not write or not read_acp:
2019-10-31 08:44:26 -07:00
raise InvalidTargetBucketForLogging(
"You must give the log-delivery group WRITE and READ_ACP"
" permissions to the target bucket"
)
# Buckets must also exist within the same region:
2019-10-31 08:44:26 -07:00
if (
bucket_backend.buckets[logging_config["TargetBucket"]].region_name
!= self.region_name
):
raise CrossLocationLoggingProhibitted()
# Checks pass -- set the logging config:
self.logging = logging_config
def set_notification_configuration(self, notification_config):
if not notification_config:
self.notification_configuration = None
return
self.notification_configuration = NotificationConfiguration(
topic=notification_config.get("TopicConfiguration"),
queue=notification_config.get("QueueConfiguration"),
2019-10-31 08:44:26 -07:00
cloud_function=notification_config.get("CloudFunctionConfiguration"),
)
# Validate that the region is correct:
for thing in ["topic", "queue", "cloud_function"]:
for t in getattr(self.notification_configuration, thing):
region = t.arn.split(":")[3]
if region != self.region_name:
raise InvalidNotificationDestination()
def set_accelerate_configuration(self, accelerate_config):
2019-10-31 08:44:26 -07:00
if self.accelerate_configuration is None and accelerate_config == "Suspended":
# Cannot "suspend" a not active acceleration. Leaves it undefined
return
self.accelerate_configuration = accelerate_config
def set_website_configuration(self, website_configuration):
self.website_configuration = website_configuration
def get_cfn_attribute(self, attribute_name):
from moto.cloudformation.exceptions import UnformattedGetAttTemplateException
2019-10-31 08:44:26 -07:00
if attribute_name == "DomainName":
raise NotImplementedError('"Fn::GetAtt" : [ "{0}" , "DomainName" ]"')
elif attribute_name == "WebsiteURL":
raise NotImplementedError('"Fn::GetAtt" : [ "{0}" , "WebsiteURL" ]"')
raise UnformattedGetAttTemplateException()
2015-11-11 20:26:29 -05:00
def set_acl(self, acl):
self.acl = acl
2016-08-15 10:57:40 -07:00
@property
def physical_resource_id(self):
return self.name
@classmethod
def create_from_cloudformation_json(
2019-10-31 08:44:26 -07:00
cls, resource_name, cloudformation_json, region_name
):
2016-08-15 10:57:40 -07:00
bucket = s3_backend.create_bucket(resource_name, region_name)
return bucket
def to_config_dict(self):
"""Return the AWS Config JSON format of this S3 bucket.
Note: The following features are not implemented and will need to be if you care about them:
- Bucket Accelerate Configuration
"""
config_dict = {
2019-10-31 08:44:26 -07:00
"version": "1.3",
"configurationItemCaptureTime": str(self.creation_date),
"configurationItemStatus": "ResourceDiscovered",
"configurationStateId": str(
int(time.mktime(self.creation_date.timetuple()))
), # PY2 and 3 compatible
"configurationItemMD5Hash": "",
"arn": "arn:aws:s3:::{}".format(self.name),
"resourceType": "AWS::S3::Bucket",
"resourceId": self.name,
"resourceName": self.name,
"awsRegion": self.region_name,
"availabilityZone": "Regional",
"resourceCreationTime": str(self.creation_date),
"relatedEvents": [],
"relationships": [],
"tags": {tag.key: tag.value for tag in self.tagging.tag_set.tags},
"configuration": {
"name": self.name,
"owner": {"id": OWNER},
"creationDate": self.creation_date.isoformat(),
},
}
# Make the supplementary configuration:
# This is a dobule-wrapped JSON for some reason...
2019-10-31 08:44:26 -07:00
s_config = {
"AccessControlList": json.dumps(json.dumps(self.acl.to_config_dict()))
}
2019-12-09 17:38:26 -08:00
if self.public_access_block:
s_config["PublicAccessBlockConfiguration"] = json.dumps(
self.public_access_block.to_config_dict()
)
# Tagging is special:
2019-10-31 08:44:26 -07:00
if config_dict["tags"]:
s_config["BucketTaggingConfiguration"] = json.dumps(
{"tagSets": [{"tags": config_dict["tags"]}]}
)
# TODO implement Accelerate Configuration:
2019-10-31 08:44:26 -07:00
s_config["BucketAccelerateConfiguration"] = {"status": None}
if self.rules:
2019-10-31 08:44:26 -07:00
s_config["BucketLifecycleConfiguration"] = {
"rules": [rule.to_config_dict() for rule in self.rules]
}
2019-10-31 08:44:26 -07:00
s_config["BucketLoggingConfiguration"] = {
"destinationBucketName": self.logging.get("TargetBucket", None),
"logFilePrefix": self.logging.get("TargetPrefix", None),
}
2019-10-31 08:44:26 -07:00
s_config["BucketPolicy"] = {
"policyText": self.policy.decode("utf-8") if self.policy else None
}
2019-10-31 08:44:26 -07:00
s_config["IsRequesterPaysEnabled"] = (
"false" if self.payer == "BucketOwner" else "true"
)
if self.notification_configuration:
2019-10-31 08:44:26 -07:00
s_config[
"BucketNotificationConfiguration"
] = self.notification_configuration.to_config_dict()
else:
2019-10-31 08:44:26 -07:00
s_config["BucketNotificationConfiguration"] = {"configurations": {}}
2019-10-31 08:44:26 -07:00
config_dict["supplementaryConfiguration"] = s_config
return config_dict
2013-02-18 16:09:40 -05:00
2013-02-18 16:31:42 -05:00
class S3Backend(BaseBackend):
2013-02-18 16:09:40 -05:00
def __init__(self):
self.buckets = {}
2014-12-10 20:44:00 -05:00
def create_bucket(self, bucket_name, region_name):
if bucket_name in self.buckets:
raise BucketAlreadyExists(bucket=bucket_name)
if not MIN_BUCKET_NAME_LENGTH <= len(bucket_name) <= MAX_BUCKET_NAME_LENGTH:
raise InvalidBucketName()
2014-12-10 20:44:00 -05:00
new_bucket = FakeBucket(name=bucket_name, region_name=region_name)
2013-02-18 16:09:40 -05:00
self.buckets[bucket_name] = new_bucket
return new_bucket
2013-02-18 17:31:15 -05:00
def get_all_buckets(self):
return self.buckets.values()
2013-02-18 16:09:40 -05:00
def get_bucket(self, bucket_name):
try:
return self.buckets[bucket_name]
except KeyError:
raise MissingBucket(bucket=bucket_name)
2013-02-18 16:09:40 -05:00
2013-02-18 17:17:19 -05:00
def delete_bucket(self, bucket_name):
bucket = self.get_bucket(bucket_name)
if bucket.keys:
# Can't delete a bucket with keys
return False
else:
return self.buckets.pop(bucket_name)
2013-02-18 17:17:19 -05:00
def set_bucket_versioning(self, bucket_name, status):
self.get_bucket(bucket_name).versioning_status = status
def get_bucket_versioning(self, bucket_name):
return self.get_bucket(bucket_name).versioning_status
def get_bucket_latest_versions(self, bucket_name):
versions = self.get_bucket_versions(bucket_name)
latest_modified_per_key = {}
latest_versions = {}
for version in versions:
name = version.name
last_modified = version.last_modified
version_id = version.version_id
latest_modified_per_key[name] = max(
2019-10-31 08:44:26 -07:00
last_modified, latest_modified_per_key.get(name, datetime.datetime.min)
)
if last_modified == latest_modified_per_key[name]:
latest_versions[name] = version_id
return latest_versions
2019-10-31 08:44:26 -07:00
def get_bucket_versions(
self,
bucket_name,
delimiter=None,
encoding_type=None,
key_marker=None,
max_keys=None,
version_id_marker=None,
prefix="",
):
bucket = self.get_bucket(bucket_name)
2014-06-27 16:21:32 -06:00
if any((delimiter, key_marker, version_id_marker)):
2014-06-27 16:21:32 -06:00
raise NotImplementedError(
2019-10-31 08:44:26 -07:00
"Called get_bucket_versions with some of delimiter, encoding_type, key_marker, version_id_marker"
)
2014-06-27 16:21:32 -06:00
2019-10-31 08:44:26 -07:00
return itertools.chain(
*(l for key, l in bucket.keys.iterlists() if key.startswith(prefix))
)
2015-07-23 17:33:52 -04:00
def get_bucket_policy(self, bucket_name):
return self.get_bucket(bucket_name).policy
def set_bucket_policy(self, bucket_name, policy):
self.get_bucket(bucket_name).policy = policy
def delete_bucket_policy(self, bucket_name, body):
bucket = self.get_bucket(bucket_name)
bucket.policy = None
2015-06-02 23:11:23 -04:00
def set_bucket_lifecycle(self, bucket_name, rules):
bucket = self.get_bucket(bucket_name)
bucket.set_lifecycle(rules)
def set_bucket_website_configuration(self, bucket_name, website_configuration):
bucket = self.get_bucket(bucket_name)
bucket.set_website_configuration(website_configuration)
def get_bucket_website_configuration(self, bucket_name):
bucket = self.get_bucket(bucket_name)
return bucket.website_configuration
2019-12-09 17:38:26 -08:00
def get_bucket_public_access_block(self, bucket_name):
bucket = self.get_bucket(bucket_name)
if not bucket.public_access_block:
raise NoSuchPublicAccessBlockConfiguration()
return bucket.public_access_block
def set_key(
2019-10-31 08:44:26 -07:00
self, bucket_name, key_name, value, storage=None, etag=None, multipart=None
):
2013-04-13 19:00:37 -04:00
key_name = clean_key_name(key_name)
if storage is not None and storage not in STORAGE_CLASS:
raise InvalidStorageClass(storage=storage)
2013-04-13 19:00:37 -04:00
bucket = self.get_bucket(bucket_name)
new_key = FakeKey(
name=key_name,
value=value,
storage=storage,
etag=etag,
is_versioned=bucket.is_versioned,
version_id=str(uuid.uuid4()) if bucket.is_versioned else None,
multipart=multipart,
)
keys = [
2019-10-31 08:44:26 -07:00
key
for key in bucket.keys.getlist(key_name, [])
if key.version_id != new_key.version_id
] + [new_key]
bucket.keys.setlist(key_name, keys)
2013-02-18 16:09:40 -05:00
return new_key
def append_to_key(self, bucket_name, key_name, value):
key_name = clean_key_name(key_name)
key = self.get_key(bucket_name, key_name)
key.append_to_value(value)
return key
def get_key(self, bucket_name, key_name, version_id=None, part_number=None):
2013-04-13 19:00:37 -04:00
key_name = clean_key_name(key_name)
2013-03-05 08:14:43 -05:00
bucket = self.get_bucket(bucket_name)
key = None
2013-03-05 08:14:43 -05:00
if bucket:
2014-06-27 16:21:32 -06:00
if version_id is None:
if key_name in bucket.keys:
key = bucket.keys[key_name]
2014-06-27 16:21:32 -06:00
else:
for key_version in bucket.keys.getlist(key_name, default=[]):
if str(key_version.version_id) == str(version_id):
key = key_version
break
if part_number and key and key.multipart:
key = key.multipart.parts[part_number]
if isinstance(key, FakeKey):
return key
else:
return None
2013-02-18 16:09:40 -05:00
2019-10-17 00:16:16 -04:00
def set_key_tagging(self, bucket_name, key_name, tagging, version_id=None):
key = self.get_key(bucket_name, key_name, version_id)
2017-07-15 22:36:12 -04:00
if key is None:
raise MissingKey(key_name)
key.set_tagging(tagging)
return key
def put_bucket_tagging(self, bucket_name, tagging):
tag_keys = [tag.key for tag in tagging.tag_set.tags]
if len(tag_keys) != len(set(tag_keys)):
raise DuplicateTagKeys()
bucket = self.get_bucket(bucket_name)
bucket.set_tags(tagging)
def delete_bucket_tagging(self, bucket_name):
bucket = self.get_bucket(bucket_name)
bucket.delete_tags()
def put_bucket_cors(self, bucket_name, cors_rules):
bucket = self.get_bucket(bucket_name)
bucket.set_cors(cors_rules)
def put_bucket_logging(self, bucket_name, logging_config):
bucket = self.get_bucket(bucket_name)
bucket.set_logging(logging_config, self)
def delete_bucket_cors(self, bucket_name):
bucket = self.get_bucket(bucket_name)
bucket.delete_cors()
2019-12-09 17:38:26 -08:00
def delete_bucket_public_access_block(self, bucket_name):
bucket = self.get_bucket(bucket_name)
bucket.public_access_block = None
def put_bucket_notification_configuration(self, bucket_name, notification_config):
bucket = self.get_bucket(bucket_name)
bucket.set_notification_configuration(notification_config)
2019-10-31 08:44:26 -07:00
def put_bucket_accelerate_configuration(
self, bucket_name, accelerate_configuration
):
if accelerate_configuration not in ["Enabled", "Suspended"]:
raise MalformedXML()
bucket = self.get_bucket(bucket_name)
2019-10-31 08:44:26 -07:00
if bucket.name.find(".") != -1:
raise InvalidRequest("PutBucketAccelerateConfiguration")
bucket.set_accelerate_configuration(accelerate_configuration)
2019-12-09 17:38:26 -08:00
def put_bucket_public_access_block(self, bucket_name, pub_block_config):
bucket = self.get_bucket(bucket_name)
if not pub_block_config:
raise InvalidPublicAccessBlockConfiguration()
bucket.public_access_block = PublicAccessBlock(
pub_block_config.get("BlockPublicAcls"),
pub_block_config.get("IgnorePublicAcls"),
pub_block_config.get("BlockPublicPolicy"),
pub_block_config.get("RestrictPublicBuckets"),
)
def initiate_multipart(self, bucket_name, key_name, metadata):
bucket = self.get_bucket(bucket_name)
new_multipart = FakeMultipart(key_name, metadata)
2013-03-26 14:52:33 +00:00
bucket.multiparts[new_multipart.id] = new_multipart
return new_multipart
def complete_multipart(self, bucket_name, multipart_id, body):
bucket = self.get_bucket(bucket_name)
2013-03-26 14:52:33 +00:00
multipart = bucket.multiparts[multipart_id]
value, etag = multipart.complete(body)
2013-03-26 14:52:33 +00:00
if value is None:
return
del bucket.multiparts[multipart_id]
2013-03-26 14:52:33 +00:00
key = self.set_key(
2019-10-31 08:44:26 -07:00
bucket_name, multipart.key_name, value, etag=etag, multipart=multipart
)
key.set_metadata(multipart.metadata)
return key
2013-03-26 14:52:33 +00:00
2013-09-30 18:36:25 +03:00
def cancel_multipart(self, bucket_name, multipart_id):
bucket = self.get_bucket(bucket_name)
2013-09-30 18:36:25 +03:00
del bucket.multiparts[multipart_id]
2013-09-30 12:09:35 +03:00
def list_multipart(self, bucket_name, multipart_id):
bucket = self.get_bucket(bucket_name)
return list(bucket.multiparts[multipart_id].list_parts())
2013-09-30 12:09:35 +03:00
2014-04-02 19:03:40 +03:00
def get_all_multiparts(self, bucket_name):
bucket = self.get_bucket(bucket_name)
2014-04-02 19:03:40 +03:00
return bucket.multiparts
2013-03-26 14:52:33 +00:00
def set_part(self, bucket_name, multipart_id, part_id, value):
bucket = self.get_bucket(bucket_name)
2013-03-26 14:52:33 +00:00
multipart = bucket.multiparts[multipart_id]
return multipart.set_part(part_id, value)
2019-10-31 08:44:26 -07:00
def copy_part(
self,
dest_bucket_name,
multipart_id,
part_id,
src_bucket_name,
src_key_name,
src_version_id,
start_byte,
end_byte,
):
dest_bucket = self.get_bucket(dest_bucket_name)
multipart = dest_bucket.multiparts[multipart_id]
2019-10-31 08:44:26 -07:00
src_value = self.get_key(
src_bucket_name, src_key_name, version_id=src_version_id
).value
if start_byte is not None:
2019-10-31 08:44:26 -07:00
src_value = src_value[start_byte : end_byte + 1]
return multipart.set_part(part_id, src_value)
def prefix_query(self, bucket, prefix, delimiter):
key_results = set()
folder_results = set()
if prefix:
2014-08-26 13:25:50 -04:00
for key_name, key in bucket.keys.items():
if key_name.startswith(prefix):
key_without_prefix = key_name.replace(prefix, "", 1)
if delimiter and delimiter in key_without_prefix:
# If delimiter, we need to split out folder_results
2019-10-31 08:44:26 -07:00
key_without_delimiter = key_without_prefix.split(delimiter)[0]
folder_results.add(
"{0}{1}{2}".format(prefix, key_without_delimiter, delimiter)
)
else:
key_results.add(key)
else:
2014-08-26 13:25:50 -04:00
for key_name, key in bucket.keys.items():
if delimiter and delimiter in key_name:
# If delimiter, we need to split out folder_results
2019-10-31 08:44:26 -07:00
folder_results.add(key_name.split(delimiter)[0] + delimiter)
else:
key_results.add(key)
2019-10-31 08:44:26 -07:00
key_results = filter(
lambda key: not isinstance(key, FakeDeleteMarker), key_results
)
key_results = sorted(key_results, key=lambda key: key.name)
2019-10-31 08:44:26 -07:00
folder_results = [
folder_name for folder_name in sorted(folder_results, key=lambda key: key)
]
return key_results, folder_results
def _set_delete_marker(self, bucket_name, key_name):
bucket = self.get_bucket(bucket_name)
2019-10-31 08:44:26 -07:00
bucket.keys[key_name] = FakeDeleteMarker(key=bucket.keys[key_name])
def delete_key(self, bucket_name, key_name, version_id=None):
2013-04-13 19:00:37 -04:00
key_name = clean_key_name(key_name)
bucket = self.get_bucket(bucket_name)
try:
if not bucket.is_versioned:
bucket.keys.pop(key_name)
else:
if version_id is None:
self._set_delete_marker(bucket_name, key_name)
else:
if key_name not in bucket.keys:
raise KeyError
bucket.keys.setlist(
key_name,
[
key
for key in bucket.keys.getlist(key_name)
if str(key.version_id) != str(version_id)
2019-10-31 08:44:26 -07:00
],
)
2018-05-03 01:40:49 -07:00
if not bucket.keys.getlist(key_name):
bucket.keys.pop(key_name)
return True
except KeyError:
return False
2013-02-18 16:09:40 -05:00
2019-10-31 08:44:26 -07:00
def copy_key(
self,
src_bucket_name,
src_key_name,
dest_bucket_name,
dest_key_name,
storage=None,
acl=None,
src_version_id=None,
):
2013-04-13 19:00:37 -04:00
src_key_name = clean_key_name(src_key_name)
dest_key_name = clean_key_name(dest_key_name)
dest_bucket = self.get_bucket(dest_bucket_name)
2019-10-31 08:44:26 -07:00
key = self.get_key(src_bucket_name, src_key_name, version_id=src_version_id)
new_key = key.copy(dest_key_name, dest_bucket.is_versioned)
2014-03-26 17:52:31 +02:00
if storage is not None:
new_key.set_storage_class(storage)
2015-10-07 00:04:22 -07:00
if acl is not None:
new_key.set_acl(acl)
dest_bucket.keys[dest_key_name] = new_key
2013-02-18 16:09:40 -05:00
2015-11-11 20:26:29 -05:00
def set_bucket_acl(self, bucket_name, acl):
bucket = self.get_bucket(bucket_name)
bucket.set_acl(acl)
def get_bucket_acl(self, bucket_name):
bucket = self.get_bucket(bucket_name)
return bucket.acl
2013-02-18 16:31:42 -05:00
s3_backend = S3Backend()