Prep Release 2.2.4 (#4205)

* S3 - Refactor logic to models.py, out of Response-class

* Changelog for release 2.2.4
This commit is contained in:
Bert Blommers 2021-08-21 15:05:40 +01:00 committed by GitHub
parent 25e79c6e67
commit 8d4596a366
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
5 changed files with 275 additions and 132 deletions

View File

@ -4,6 +4,42 @@ Moto Changelog
Unreleased
-----
2.2.4
-----
New Methods:
* ConfigService:
* delete_config_rule()
* describe_config_rule()
* put_config_rule()
* EC2:
* create_egress_only_internet_gateway()
* delete_egress_only_internet_gateway()
* describe_egress_only_internet_gateways()
* Fargate:
* create_fargate_profile()
* delete_fargate_profile()
* describe_fargate_profile()
* list_fargate_profiles()
* IOT:
* deprecate_thing_type()
* S3:
* get_object_lock_configuration()
* put_object_legal_hold()
* put_object_lock_configuration()
* put_object_retention()
Miscellaneous:
* CloudFormation:describe_stack_resource() now throws an exception of the LogicalResourceId does not exist
* CloudFormation: AWS::Events::Rule now supports the EventPattern-property
* CloudFormation: Improved Parameter handling
* EC2:describe_instances() now handles wildcards correctly when filtering by tags
* EC2:terminate_instances() now throws an exception when trying to terminate a protected instance
* ELBv2:describe_rules() now returns the correct value for the IsDefault-attribute
* IOT:create_thing() now throws an exception if the thing type is deprecated
* IOT:update_thing() now throws an exception if the thing type is deprecated
* S3:create_bucket() now supports the ObjectLockEnabledForBucket-parameter
* S3:putObject() is fixed for the Java SDK, which failed with a eTag-validation
2.2.3
-----
New Methods:

View File

@ -2445,7 +2445,7 @@
## config
<details>
<summary>33% implemented</summary>
<summary>37% implemented</summary>
- [X] batch_get_aggregate_resource_config
- [X] batch_get_resource_config
@ -3270,6 +3270,7 @@
- [ ] deregister_certificate
- [ ] deregister_event_topic
- [ ] describe_certificate
- [ ] describe_client_authentication_settings
- [ ] describe_conditional_forwarders
- [ ] describe_directories
- [ ] describe_domain_controllers
@ -3392,7 +3393,7 @@
## ec2
<details>
<summary>31% implemented</summary>
<summary>32% implemented</summary>
- [ ] accept_reserved_instances_exchange_quote
- [ ] accept_transit_gateway_multicast_domain_associations
@ -3447,7 +3448,7 @@
- [ ] create_default_subnet
- [ ] create_default_vpc
- [X] create_dhcp_options
- [ ] create_egress_only_internet_gateway
- [X] create_egress_only_internet_gateway
- [ ] create_fleet
- [X] create_flow_logs
- [ ] create_fpga_image
@ -3508,7 +3509,7 @@
- [ ] delete_client_vpn_route
- [X] delete_customer_gateway
- [ ] delete_dhcp_options
- [ ] delete_egress_only_internet_gateway
- [X] delete_egress_only_internet_gateway
- [ ] delete_fleets
- [X] delete_flow_logs
- [ ] delete_fpga_image
@ -3583,7 +3584,7 @@
- [ ] describe_conversion_tasks
- [ ] describe_customer_gateways
- [X] describe_dhcp_options
- [ ] describe_egress_only_internet_gateways
- [X] describe_egress_only_internet_gateways
- [ ] describe_elastic_gpus
- [ ] describe_export_image_tasks
- [ ] describe_export_tasks
@ -4031,29 +4032,29 @@
## eks
<details>
<summary>25% implemented</summary>
<summary>37% implemented</summary>
- [ ] associate_encryption_config
- [ ] associate_identity_provider_config
- [ ] create_addon
- [X] create_cluster
- [ ] create_fargate_profile
- [X] create_fargate_profile
- [X] create_nodegroup
- [ ] delete_addon
- [X] delete_cluster
- [ ] delete_fargate_profile
- [X] delete_fargate_profile
- [X] delete_nodegroup
- [ ] describe_addon
- [ ] describe_addon_versions
- [X] describe_cluster
- [ ] describe_fargate_profile
- [X] describe_fargate_profile
- [ ] describe_identity_provider_config
- [X] describe_nodegroup
- [ ] describe_update
- [ ] disassociate_identity_provider_config
- [ ] list_addons
- [X] list_clusters
- [ ] list_fargate_profiles
- [X] list_fargate_profiles
- [ ] list_identity_provider_configs
- [X] list_nodegroups
- [ ] list_tags_for_resource
@ -5628,7 +5629,7 @@
## iot
<details>
<summary>28% implemented</summary>
<summary>29% implemented</summary>
- [ ] accept_certificate_transfer
- [ ] add_thing_to_billing_group
@ -5703,7 +5704,7 @@
- [X] delete_topic_rule
- [ ] delete_topic_rule_destination
- [ ] delete_v2_logging_level
- [ ] deprecate_thing_type
- [X] deprecate_thing_type
- [ ] describe_account_audit_configuration
- [ ] describe_audit_finding
- [ ] describe_audit_mitigation_actions_task
@ -7587,6 +7588,47 @@
- [ ] update_vod_source
</details>
## memorydb
<details>
<summary>0% implemented</summary>
- [ ] batch_update_cluster
- [ ] copy_snapshot
- [ ] create_acl
- [ ] create_cluster
- [ ] create_parameter_group
- [ ] create_snapshot
- [ ] create_subnet_group
- [ ] create_user
- [ ] delete_acl
- [ ] delete_cluster
- [ ] delete_parameter_group
- [ ] delete_snapshot
- [ ] delete_subnet_group
- [ ] delete_user
- [ ] describe_acls
- [ ] describe_clusters
- [ ] describe_engine_versions
- [ ] describe_events
- [ ] describe_parameter_groups
- [ ] describe_parameters
- [ ] describe_service_updates
- [ ] describe_snapshots
- [ ] describe_subnet_groups
- [ ] describe_users
- [ ] failover_shard
- [ ] list_allowed_node_type_updates
- [ ] list_tags
- [ ] reset_parameter_group
- [ ] tag_resource
- [ ] untag_resource
- [ ] update_acl
- [ ] update_cluster
- [ ] update_parameter_group
- [ ] update_subnet_group
- [ ] update_user
</details>
## meteringmarketplace
<details>
<summary>0% implemented</summary>
@ -9352,20 +9394,20 @@
## s3
<details>
<summary>25% implemented</summary>
<summary>52% implemented</summary>
- [ ] abort_multipart_upload
- [ ] complete_multipart_upload
- [ ] copy_object
- [X] abort_multipart_upload
- [X] complete_multipart_upload
- [X] copy_object
- [X] create_bucket
- [ ] create_multipart_upload
- [X] create_multipart_upload
- [X] delete_bucket
- [ ] delete_bucket_analytics_configuration
- [X] delete_bucket_cors
- [X] delete_bucket_encryption
- [ ] delete_bucket_intelligent_tiering_configuration
- [ ] delete_bucket_inventory_configuration
- [ ] delete_bucket_lifecycle
- [X] delete_bucket_lifecycle
- [ ] delete_bucket_metrics_configuration
- [ ] delete_bucket_ownership_controls
- [X] delete_bucket_policy
@ -9374,8 +9416,8 @@
- [X] delete_bucket_website
- [X] delete_object
- [X] delete_object_tagging
- [ ] delete_objects
- [ ] delete_public_access_block
- [X] delete_objects
- [X] delete_public_access_block
- [ ] get_bucket_accelerate_configuration
- [X] get_bucket_acl
- [ ] get_bucket_analytics_configuration
@ -9383,9 +9425,9 @@
- [X] get_bucket_encryption
- [ ] get_bucket_intelligent_tiering_configuration
- [ ] get_bucket_inventory_configuration
- [ ] get_bucket_lifecycle
- [X] get_bucket_lifecycle
- [ ] get_bucket_lifecycle_configuration
- [ ] get_bucket_location
- [X] get_bucket_location
- [X] get_bucket_logging
- [ ] get_bucket_metrics_configuration
- [ ] get_bucket_notification
@ -9399,55 +9441,55 @@
- [X] get_bucket_versioning
- [ ] get_bucket_website
- [X] get_object
- [ ] get_object_acl
- [X] get_object_acl
- [ ] get_object_legal_hold
- [ ] get_object_lock_configuration
- [X] get_object_lock_configuration
- [ ] get_object_retention
- [ ] get_object_tagging
- [X] get_object_tagging
- [ ] get_object_torrent
- [ ] get_public_access_block
- [ ] head_bucket
- [ ] head_object
- [X] get_public_access_block
- [X] head_bucket
- [X] head_object
- [ ] list_bucket_analytics_configurations
- [ ] list_bucket_intelligent_tiering_configurations
- [ ] list_bucket_inventory_configurations
- [ ] list_bucket_metrics_configurations
- [ ] list_buckets
- [X] list_buckets
- [ ] list_multipart_uploads
- [ ] list_object_versions
- [X] list_object_versions
- [ ] list_objects
- [ ] list_objects_v2
- [ ] list_parts
- [X] put_bucket_accelerate_configuration
- [ ] put_bucket_acl
- [X] put_bucket_acl
- [ ] put_bucket_analytics_configuration
- [X] put_bucket_cors
- [X] put_bucket_encryption
- [ ] put_bucket_intelligent_tiering_configuration
- [ ] put_bucket_inventory_configuration
- [ ] put_bucket_lifecycle
- [X] put_bucket_lifecycle
- [ ] put_bucket_lifecycle_configuration
- [X] put_bucket_logging
- [ ] put_bucket_metrics_configuration
- [ ] put_bucket_notification
- [X] put_bucket_notification_configuration
- [ ] put_bucket_ownership_controls
- [ ] put_bucket_policy
- [X] put_bucket_policy
- [ ] put_bucket_replication
- [ ] put_bucket_request_payment
- [X] put_bucket_tagging
- [ ] put_bucket_versioning
- [ ] put_bucket_website
- [ ] put_object
- [ ] put_object_acl
- [ ] put_object_legal_hold
- [ ] put_object_lock_configuration
- [ ] put_object_retention
- [X] put_object
- [X] put_object_acl
- [X] put_object_legal_hold
- [X] put_object_lock_configuration
- [X] put_object_retention
- [ ] put_object_tagging
- [ ] put_public_access_block
- [ ] restore_object
- [ ] select_object_content
- [ ] upload_part
- [X] upload_part
- [ ] upload_part_copy
- [ ] write_get_object_response
</details>
@ -9791,6 +9833,7 @@
<summary>0% implemented</summary>
- [ ] invoke_endpoint
- [ ] invoke_endpoint_async
</details>
## savingsplans

View File

@ -45,7 +45,7 @@ from .exceptions import (
NoSuchUpload,
)
from .cloud_formation import cfn_to_api_encryption, is_replacement_update
from .utils import clean_key_name, _VersionedKeyStore
from .utils import clean_key_name, _VersionedKeyStore, undo_clean_key_name
from ..settings import get_s3_default_key_buffer_size, S3_UPLOAD_PART_MIN_SIZE
MAX_BUCKET_NAME_LENGTH = 63
@ -1344,7 +1344,7 @@ class S3Backend(BaseBackend):
self.buckets[bucket_name] = new_bucket
return new_bucket
def get_all_buckets(self):
def list_buckets(self):
return self.buckets.values()
def get_bucket(self, bucket_name):
@ -1353,6 +1353,9 @@ class S3Backend(BaseBackend):
except KeyError:
raise MissingBucket(bucket=bucket_name)
def head_bucket(self, bucket_name):
return self.get_bucket(bucket_name)
def delete_bucket(self, bucket_name):
bucket = self.get_bucket(bucket_name)
if bucket.keys:
@ -1371,7 +1374,7 @@ class S3Backend(BaseBackend):
return self.get_bucket(bucket_name).encryption
def get_bucket_latest_versions(self, bucket_name):
versions = self.get_bucket_versions(bucket_name)
versions = self.list_object_versions(bucket_name)
latest_modified_per_key = {}
latest_versions = {}
@ -1387,7 +1390,7 @@ class S3Backend(BaseBackend):
return latest_versions
def get_bucket_versions(
def list_object_versions(
self,
bucket_name,
delimiter=None,
@ -1411,7 +1414,7 @@ class S3Backend(BaseBackend):
def get_bucket_policy(self, bucket_name):
return self.get_bucket(bucket_name).policy
def set_bucket_policy(self, bucket_name, policy):
def put_bucket_policy(self, bucket_name, policy):
self.get_bucket(bucket_name).policy = policy
def delete_bucket_policy(self, bucket_name, body):
@ -1424,10 +1427,14 @@ class S3Backend(BaseBackend):
def delete_bucket_encryption(self, bucket_name):
self.get_bucket(bucket_name).encryption = None
def set_bucket_lifecycle(self, bucket_name, rules):
def put_bucket_lifecycle(self, bucket_name, rules):
bucket = self.get_bucket(bucket_name)
bucket.set_lifecycle(rules)
def delete_bucket_lifecycle(self, bucket_name):
bucket = self.get_bucket(bucket_name)
bucket.delete_lifecycle()
def set_bucket_website_configuration(self, bucket_name, website_configuration):
bucket = self.get_bucket(bucket_name)
bucket.website_configuration = website_configuration
@ -1440,7 +1447,7 @@ class S3Backend(BaseBackend):
bucket = self.get_bucket(bucket_name)
bucket.website_configuration = None
def get_bucket_public_access_block(self, bucket_name):
def get_public_access_block(self, bucket_name):
bucket = self.get_bucket(bucket_name)
if not bucket.public_access_block:
@ -1458,7 +1465,7 @@ class S3Backend(BaseBackend):
return self.account_public_access_block
def set_object(
def put_object(
self,
bucket_name,
key_name,
@ -1504,6 +1511,25 @@ class S3Backend(BaseBackend):
return new_key
def put_object_acl(self, bucket_name, key_name, acl):
key = self.get_object(bucket_name, key_name)
# TODO: Support the XML-based ACL format
if key is not None:
key.set_acl(acl)
else:
raise MissingKey(key_name)
def put_object_legal_hold(
self, bucket_name, key_name, version_id, legal_hold_status
):
key = self.get_object(bucket_name, key_name, version_id=version_id)
key.lock_legal_status = legal_hold_status
def put_object_retention(self, bucket_name, key_name, version_id, retention):
key = self.get_object(bucket_name, key_name, version_id=version_id)
key.lock_mode = retention[0]
key.lock_until = retention[1]
def append_to_key(self, bucket_name, key_name, value):
key_name = clean_key_name(key_name)
@ -1534,7 +1560,22 @@ class S3Backend(BaseBackend):
else:
return None
def get_key_tags(self, key):
def head_object(self, bucket_name, key_name, version_id=None, part_number=None):
return self.get_object(bucket_name, key_name, version_id, part_number)
def get_object_acl(self, key):
return key.acl
def get_object_lock_configuration(self, bucket_name):
bucket = self.get_bucket(bucket_name)
return (
bucket.object_lock_enabled,
bucket.default_lock_mode,
bucket.default_lock_days,
bucket.default_lock_years,
)
def get_object_tagging(self, key):
return self.tagger.list_tags_for_resource(key.arn)
def set_key_tags(self, key, tags, key_name=None):
@ -1557,7 +1598,9 @@ class S3Backend(BaseBackend):
bucket.arn, [{"Key": key, "Value": value} for key, value in tags.items()],
)
def put_bucket_lock(self, bucket_name, lock_enabled, mode, days, years):
def put_object_lock_configuration(
self, bucket_name, lock_enabled, mode, days, years
):
bucket = self.get_bucket(bucket_name)
if bucket.keys.item_size() > 0:
@ -1587,7 +1630,7 @@ class S3Backend(BaseBackend):
bucket = self.get_bucket(bucket_name)
bucket.delete_cors()
def delete_bucket_public_access_block(self, bucket_name):
def delete_public_access_block(self, bucket_name):
bucket = self.get_bucket(bucket_name)
bucket.public_access_block = None
@ -1656,19 +1699,35 @@ class S3Backend(BaseBackend):
return
del bucket.multiparts[multipart_id]
key = self.set_object(
key = self.put_object(
bucket_name, multipart.key_name, value, etag=etag, multipart=multipart
)
key.set_metadata(multipart.metadata)
return key
def cancel_multipart(self, bucket_name, multipart_id):
def abort_multipart_upload(self, bucket_name, multipart_id):
bucket = self.get_bucket(bucket_name)
multipart_data = bucket.multiparts.get(multipart_id, None)
if not multipart_data:
raise NoSuchUpload(upload_id=multipart_id)
del bucket.multiparts[multipart_id]
def create_multipart_upload(self, bucket_name, key_name, metadata, storage_type):
multipart = FakeMultipart(key_name, metadata)
multipart.storage = storage_type
bucket = self.get_bucket(bucket_name)
bucket.multiparts[multipart.id] = multipart
return multipart.id
def complete_multipart_upload(self, bucket_name, multipart_id, body):
bucket = self.get_bucket(bucket_name)
multipart = bucket.multiparts[multipart_id]
value, etag = multipart.complete(body)
if value is not None:
del bucket.multiparts[multipart_id]
return multipart, value, etag
def list_multipart(self, bucket_name, multipart_id):
bucket = self.get_bucket(bucket_name)
if multipart_id not in bucket.multiparts:
@ -1679,7 +1738,7 @@ class S3Backend(BaseBackend):
bucket = self.get_bucket(bucket_name)
return bucket.multiparts
def set_part(self, bucket_name, multipart_id, part_id, value):
def upload_part(self, bucket_name, multipart_id, part_id, value):
bucket = self.get_bucket(bucket_name)
multipart = bucket.multiparts[multipart_id]
return multipart.set_part(part_id, value)
@ -1791,7 +1850,19 @@ class S3Backend(BaseBackend):
except KeyError:
return False, None
def copy_key(
def delete_objects(self, bucket_name, objects):
deleted_objects = []
for object_ in objects:
key_name = object_["Key"]
version_id = object_.get("VersionId", None)
self.delete_object(
bucket_name, undo_clean_key_name(key_name), version_id=version_id
)
deleted_objects.append((key_name, version_id))
return deleted_objects
def copy_object(
self,
src_bucket_name,
src_key_name,
@ -1819,7 +1890,7 @@ class S3Backend(BaseBackend):
dest_bucket.keys[dest_key_name] = new_key
def set_bucket_acl(self, bucket_name, acl):
def put_bucket_acl(self, bucket_name, acl):
bucket = self.get_bucket(bucket_name)
bucket.set_acl(acl)
@ -1831,6 +1902,15 @@ class S3Backend(BaseBackend):
bucket = self.get_bucket(bucket_name)
return bucket.cors
def get_bucket_lifecycle(self, bucket_name):
bucket = self.get_bucket(bucket_name)
return bucket.rules
def get_bucket_location(self, bucket_name):
bucket = self.get_bucket(bucket_name)
return bucket.location
def get_bucket_logging(self, bucket_name):
bucket = self.get_bucket(bucket_name)
return bucket.logging

View File

@ -64,12 +64,10 @@ from .models import (
FakeGrant,
FakeAcl,
FakeKey,
FakeMultipart,
)
from .utils import (
bucket_name_from_url,
clean_key_name,
undo_clean_key_name,
metadata_from_headers,
parse_region_from_url,
)
@ -187,7 +185,7 @@ class ResponseObject(_TemplateEnvironmentMixin, ActionAuthenticatorMixin):
self._authenticate_and_authorize_s3_action()
# No bucket specified. Listing all buckets
all_buckets = self.backend.get_all_buckets()
all_buckets = self.backend.list_buckets()
template = self.response_template(S3_ALL_BUCKETS)
return template.render(buckets=all_buckets)
@ -335,7 +333,7 @@ class ResponseObject(_TemplateEnvironmentMixin, ActionAuthenticatorMixin):
def _bucket_response_head(self, bucket_name):
try:
self.backend.get_bucket(bucket_name)
self.backend.head_bucket(bucket_name)
except MissingBucket:
# Unless we do this, boto3 does not raise ClientError on
# HEAD (which the real API responds with), and instead
@ -349,14 +347,16 @@ class ResponseObject(_TemplateEnvironmentMixin, ActionAuthenticatorMixin):
self._authenticate_and_authorize_s3_action()
if "object-lock" in querystring:
bucket = self.backend.get_bucket(bucket_name)
(
lock_enabled,
mode,
days,
years,
) = self.backend.get_object_lock_configuration(bucket_name)
template = self.response_template(S3_BUCKET_LOCK_CONFIGURATION)
return template.render(
lock_enabled=bucket.object_lock_enabled,
mode=bucket.default_lock_mode,
days=bucket.default_lock_days,
years=bucket.default_lock_years,
lock_enabled=lock_enabled, mode=mode, days=days, years=years,
)
if "uploads" in querystring:
@ -378,22 +378,21 @@ class ResponseObject(_TemplateEnvironmentMixin, ActionAuthenticatorMixin):
template = self.response_template(S3_ALL_MULTIPARTS)
return template.render(bucket_name=bucket_name, uploads=multiparts)
elif "location" in querystring:
bucket = self.backend.get_bucket(bucket_name)
location = self.backend.get_bucket_location(bucket_name)
template = self.response_template(S3_BUCKET_LOCATION)
location = bucket.location
# us-east-1 is different - returns a None location
if location == DEFAULT_REGION_NAME:
location = None
return template.render(location=location)
elif "lifecycle" in querystring:
bucket = self.backend.get_bucket(bucket_name)
if not bucket.rules:
rules = self.backend.get_bucket_lifecycle(bucket_name)
if not rules:
template = self.response_template(S3_NO_LIFECYCLE)
return 404, {}, template.render(bucket_name=bucket_name)
template = self.response_template(S3_BUCKET_LIFECYCLE_CONFIGURATION)
return template.render(rules=bucket.rules)
return template.render(rules=rules)
elif "versioning" in querystring:
versioning = self.backend.get_bucket_versioning(bucket_name)
template = self.response_template(S3_BUCKET_GET_VERSIONING)
@ -413,9 +412,9 @@ class ResponseObject(_TemplateEnvironmentMixin, ActionAuthenticatorMixin):
return 404, {}, template.render(bucket_name=bucket_name)
return 200, {}, website_configuration
elif "acl" in querystring:
bucket = self.backend.get_bucket(bucket_name)
acl = self.backend.get_bucket_acl(bucket_name)
template = self.response_template(S3_OBJECT_ACL_RESPONSE)
return template.render(obj=bucket)
return template.render(acl=acl)
elif "tagging" in querystring:
tags = self.backend.get_bucket_tagging(bucket_name)["Tags"]
# "Special Error" if no tags:
@ -454,9 +453,7 @@ class ResponseObject(_TemplateEnvironmentMixin, ActionAuthenticatorMixin):
template = self.response_template(S3_BUCKET_ACCELERATE)
return template.render(bucket=bucket)
elif "publicAccessBlock" in querystring:
public_block_config = self.backend.get_bucket_public_access_block(
bucket_name
)
public_block_config = self.backend.get_public_access_block(bucket_name)
template = self.response_template(S3_PUBLIC_ACCESS_BLOCK_CONFIGURATION)
return template.render(public_block_config=public_block_config)
@ -469,7 +466,7 @@ class ResponseObject(_TemplateEnvironmentMixin, ActionAuthenticatorMixin):
version_id_marker = querystring.get("version-id-marker", [None])[0]
bucket = self.backend.get_bucket(bucket_name)
versions = self.backend.get_bucket_versions(
versions = self.backend.list_object_versions(
bucket_name,
delimiter=delimiter,
encoding_type=encoding_type,
@ -698,7 +695,7 @@ class ResponseObject(_TemplateEnvironmentMixin, ActionAuthenticatorMixin):
if not self.backend.get_bucket(bucket_name).object_lock_enabled:
raise BucketMustHaveLockeEnabled
self.backend.put_bucket_lock(
self.backend.put_object_lock_configuration(
bucket_name,
config["enabled"],
config["mode"],
@ -720,17 +717,17 @@ class ResponseObject(_TemplateEnvironmentMixin, ActionAuthenticatorMixin):
if not isinstance(rules, list):
# If there is only one rule, xmldict returns just the item
rules = [rules]
self.backend.set_bucket_lifecycle(bucket_name, rules)
self.backend.put_bucket_lifecycle(bucket_name, rules)
return ""
elif "policy" in querystring:
self.backend.set_bucket_policy(bucket_name, body)
self.backend.put_bucket_policy(bucket_name, body)
return "True"
elif "acl" in querystring:
# Headers are first. If not set, then look at the body (consistent with the documentation):
acls = self._acl_from_headers(request.headers)
if not acls:
acls = self._acl_from_xml(body)
self.backend.set_bucket_acl(bucket_name, acls)
self.backend.put_bucket_acl(bucket_name, acls)
return ""
elif "tagging" in querystring:
tagging = self._bucket_tagging_from_xml(body)
@ -831,7 +828,7 @@ class ResponseObject(_TemplateEnvironmentMixin, ActionAuthenticatorMixin):
if "x-amz-acl" in request.headers:
# TODO: Support the XML-based ACL format
self.backend.set_bucket_acl(
self.backend.put_bucket_acl(
bucket_name, self._acl_from_headers(request.headers)
)
@ -859,14 +856,13 @@ class ResponseObject(_TemplateEnvironmentMixin, ActionAuthenticatorMixin):
self.backend.delete_bucket_cors(bucket_name)
return 204, {}, ""
elif "lifecycle" in querystring:
bucket = self.backend.get_bucket(bucket_name)
bucket.delete_lifecycle()
self.backend.delete_bucket_lifecycle(bucket_name)
return 204, {}, ""
elif "publicAccessBlock" in querystring:
self.backend.delete_bucket_public_access_block(bucket_name)
self.backend.delete_public_access_block(bucket_name)
return 204, {}, ""
elif "encryption" in querystring:
bucket = self.backend.delete_bucket_encryption(bucket_name)
self.backend.delete_bucket_encryption(bucket_name)
return 204, {}, ""
removed_bucket = self.backend.delete_bucket(bucket_name)
@ -939,7 +935,7 @@ class ResponseObject(_TemplateEnvironmentMixin, ActionAuthenticatorMixin):
else:
status_code = 204
new_key = self.backend.set_object(bucket_name, key, f)
new_key = self.backend.put_object(bucket_name, key, f)
if form.get("acl"):
acl = get_canned_acl(form.get("acl"))
@ -975,18 +971,9 @@ class ResponseObject(_TemplateEnvironmentMixin, ActionAuthenticatorMixin):
if len(objects) == 0:
raise MalformedXML()
deleted_objects = []
deleted_objects = self.backend.delete_objects(bucket_name, objects)
error_names = []
for object_ in objects:
key_name = object_["Key"]
version_id = object_.get("VersionId", None)
self.backend.delete_object(
bucket_name, undo_clean_key_name(key_name), version_id=version_id
)
deleted_objects.append((key_name, version_id))
return (
200,
{},
@ -1286,10 +1273,11 @@ class ResponseObject(_TemplateEnvironmentMixin, ActionAuthenticatorMixin):
return 304, response_headers, "Not Modified"
if "acl" in query:
acl = s3_backend.get_object_acl(key)
template = self.response_template(S3_OBJECT_ACL_RESPONSE)
return 200, response_headers, template.render(obj=key)
return 200, response_headers, template.render(acl=acl)
if "tagging" in query:
tags = self.backend.get_key_tags(key)["Tags"]
tags = self.backend.get_object_tagging(key)["Tags"]
template = self.response_template(S3_OBJECT_TAGGING_RESPONSE)
return 200, response_headers, template.render(tags=tags)
@ -1343,7 +1331,9 @@ class ResponseObject(_TemplateEnvironmentMixin, ActionAuthenticatorMixin):
template = self.response_template(S3_MULTIPART_UPLOAD_RESPONSE)
response = template.render(part=key)
else:
key = self.backend.set_part(bucket_name, upload_id, part_number, body)
key = self.backend.upload_part(
bucket_name, upload_id, part_number, body
)
response = ""
response_headers.update(key.response_dict)
return 200, response_headers, response
@ -1387,29 +1377,25 @@ class ResponseObject(_TemplateEnvironmentMixin, ActionAuthenticatorMixin):
if not lock_enabled:
raise LockNotEnabled
version_id = query.get("VersionId")
key = self.backend.get_object(bucket_name, key_name, version_id=version_id)
retention = self._mode_until_from_xml(body)
key.lock_mode = retention[0]
key.lock_until = retention[1]
self.backend.put_object_retention(
bucket_name, key_name, version_id=version_id, retention=retention
)
return 200, response_headers, ""
if "legal-hold" in query:
if not lock_enabled:
raise LockNotEnabled
version_id = query.get("VersionId")
key = self.backend.get_object(bucket_name, key_name, version_id=version_id)
legal_hold_status = self._legal_hold_status_from_xml(body)
key.lock_legal_status = legal_hold_status
self.backend.put_object_legal_hold(
bucket_name, key_name, version_id, legal_hold_status
)
return 200, response_headers, ""
if "acl" in query:
key = self.backend.get_object(bucket_name, key_name)
# TODO: Support the XML-based ACL format
if key is not None:
key.set_acl(acl)
return 200, response_headers, ""
else:
raise MissingKey(key_name)
self.backend.put_object_acl(bucket_name, key_name, acl)
return 200, response_headers, ""
if "tagging" in query:
if "versionId" in query:
@ -1447,7 +1433,7 @@ class ResponseObject(_TemplateEnvironmentMixin, ActionAuthenticatorMixin):
):
raise ObjectNotInActiveTierError(key)
self.backend.copy_key(
self.backend.copy_object(
src_bucket,
src_key,
bucket_name,
@ -1482,7 +1468,7 @@ class ResponseObject(_TemplateEnvironmentMixin, ActionAuthenticatorMixin):
else:
# Initial data
new_key = self.backend.set_object(
new_key = self.backend.put_object(
bucket_name,
key_name,
body,
@ -1520,7 +1506,7 @@ class ResponseObject(_TemplateEnvironmentMixin, ActionAuthenticatorMixin):
if_none_match = headers.get("If-None-Match", None)
if_unmodified_since = headers.get("If-Unmodified-Since", None)
key = self.backend.get_object(
key = self.backend.head_object(
bucket_name, key_name, version_id=version_id, part_number=part_number
)
if key:
@ -1869,7 +1855,7 @@ class ResponseObject(_TemplateEnvironmentMixin, ActionAuthenticatorMixin):
if query.get("uploadId"):
upload_id = query["uploadId"][0]
self.backend.cancel_multipart(bucket_name, upload_id)
self.backend.abort_multipart_upload(bucket_name, upload_id)
return 204, {}, ""
version_id = query.get("versionId", [None])[0]
if "tagging" in query:
@ -1902,15 +1888,14 @@ class ResponseObject(_TemplateEnvironmentMixin, ActionAuthenticatorMixin):
if body == b"" and "uploads" in query:
metadata = metadata_from_headers(request.headers)
multipart = FakeMultipart(key_name, metadata)
multipart.storage = request.headers.get("x-amz-storage-class", "STANDARD")
bucket = self.backend.get_bucket(bucket_name)
bucket.multiparts[multipart.id] = multipart
storage_type = request.headers.get("x-amz-storage-class", "STANDARD")
multipart_id = self.backend.create_multipart_upload(
bucket_name, key_name, metadata, storage_type
)
template = self.response_template(S3_MULTIPART_INITIATE_RESPONSE)
response = template.render(
bucket_name=bucket_name, key_name=key_name, upload_id=multipart.id
bucket_name=bucket_name, key_name=key_name, upload_id=multipart_id
)
return 200, {}, response
@ -1918,15 +1903,13 @@ class ResponseObject(_TemplateEnvironmentMixin, ActionAuthenticatorMixin):
body = self._complete_multipart_body(body)
multipart_id = query["uploadId"][0]
bucket = self.backend.get_bucket(bucket_name)
multipart = bucket.multiparts[multipart_id]
value, etag = multipart.complete(body)
multipart, value, etag = self.backend.complete_multipart_upload(
bucket_name, multipart_id, body
)
if value is None:
return 400, {}, ""
del bucket.multiparts[multipart_id]
key = self.backend.set_object(
key = self.backend.put_object(
bucket_name,
multipart.key_name,
value,
@ -2270,7 +2253,7 @@ S3_OBJECT_ACL_RESPONSE = """<?xml version="1.0" encoding="UTF-8"?>
<DisplayName>webfile</DisplayName>
</Owner>
<AccessControlList>
{% for grant in obj.acl.grants %}
{% for grant in acl.grants %}
<Grant>
{% for grantee in grant.grantees %}
<Grantee xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"

View File

@ -2675,6 +2675,7 @@ def test_boto3_multipart_etag():
Body=part2,
)["ETag"]
)
s3.complete_multipart_upload(
Bucket="mybucket",
Key="the-key",
@ -4540,7 +4541,7 @@ def test_s3_lifecycle_config_dict():
"AbortIncompleteMultipartUpload": {"DaysAfterInitiation": 1},
},
]
s3_config_query.backends["global"].set_bucket_lifecycle("bucket1", lifecycle)
s3_config_query.backends["global"].put_bucket_lifecycle("bucket1", lifecycle)
# Get the rules for this:
lifecycles = [
@ -4755,7 +4756,7 @@ def test_s3_acl_to_config_dict():
FakeGrant([FakeGrantee(id=OWNER)], "FULL_CONTROL"),
]
)
s3_config_query.backends["global"].set_bucket_acl("logbucket", log_acls)
s3_config_query.backends["global"].put_bucket_acl("logbucket", log_acls)
acls = s3_config_query.backends["global"].buckets["logbucket"].acl.to_config_dict()
assert acls == {
@ -4781,7 +4782,7 @@ def test_s3_acl_to_config_dict():
FakeGrant([FakeGrantee(id=OWNER)], "WRITE_ACP"),
]
)
s3_config_query.backends["global"].set_bucket_acl("logbucket", log_acls)
s3_config_query.backends["global"].put_bucket_acl("logbucket", log_acls)
acls = s3_config_query.backends["global"].buckets["logbucket"].acl.to_config_dict()
assert acls == {
"grantSet": None,
@ -4828,7 +4829,7 @@ def test_s3_config_dict():
]
)
s3_config_query.backends["global"].set_bucket_acl("logbucket", log_acls)
s3_config_query.backends["global"].put_bucket_acl("logbucket", log_acls)
s3_config_query.backends["global"].put_bucket_logging(
"bucket1", {"TargetBucket": "logbucket", "TargetPrefix": ""}
)
@ -4848,7 +4849,7 @@ def test_s3_config_dict():
# The policy is a byte array -- need to encode in Python 3
pass_policy = bytes(policy, "utf-8")
s3_config_query.backends["global"].set_bucket_policy("bucket1", pass_policy)
s3_config_query.backends["global"].put_bucket_policy("bucket1", pass_policy)
# Get the us-west-2 bucket and verify that it works properly:
bucket1_result = s3_config_query.get_config_resource("bucket1")