diff --git a/.bumpversion.cfg b/.bumpversion.cfg new file mode 100644 index 000000000..32a01af8f --- /dev/null +++ b/.bumpversion.cfg @@ -0,0 +1,7 @@ +[bumpversion] +current_version = 1.2.0 + +[bumpversion:file:setup.py] + +[bumpversion:file:moto/__init__.py] + diff --git a/CHANGELOG.md b/CHANGELOG.md index b10967f64..4dac737b2 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,11 +1,18 @@ Moto Changelog =================== -Latest +1.2.0 ------ + + * Supports filtering AMIs by self * Implemented signal_workflow_execution for SWF * Wired SWF backend to the moto server - * Fixed incorrect handling of task list parameter on start_workflow_execution + * Revamped lambda function storage to do versioning + * IOT improvements + * RDS improvements + * Implemented CloudWatch get_metric_statistics + * Improved Cloudformation EC2 support + * Implemented Cloudformation change_set endpoints 1.1.25 ----- diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 1266d508e..f28083221 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -1,4 +1,25 @@ ### Contributing code -If you have improvements to Moto, send us your pull requests! For those -just getting started, Github has a [howto](https://help.github.com/articles/using-pull-requests/). +Moto has a [Code of Conduct](https://github.com/spulec/moto/blob/master/CODE_OF_CONDUCT.md), you can expect to be treated with respect at all times when interacting with this project. + +## Is there a missing feature? + +Moto is easier to contribute to than you probably think. There's [a list of which endpoints have been implemented](https://github.com/spulec/moto/blob/master/IMPLEMENTATION_COVERAGE.md) and we invite you to add new endpoints to existing services or to add new services. + +How to teach Moto to support a new AWS endpoint: + +* Create an issue describing what's missing. This is where we'll all talk about the new addition and help you get it done. +* Create a [pull request](https://help.github.com/articles/using-pull-requests/) and mention the issue # in the PR description. +* Try to add a failing test case. For example, if you're trying to implement `boto3.client('acm').import_certificate()` you'll want to add a new method called `def test_import_certificate` to `tests/test_acm/test_acm.py`. +* If you can also implement the code that gets that test passing that's great. If not, just ask the community for a hand and somebody will assist you. + +# Maintainers + +## Releasing a new version of Moto + +You'll need a PyPi account and a Dockerhub account to release Moto. After we release a new PyPi package we build and push the [motoserver/moto](https://hub.docker.com/r/motoserver/moto/) Docker image. + +* First, `scripts/bump_version` modifies the version and opens a PR +* Then, merge the new pull request +* Finally, generate and ship the new artifacts with `make publish` + diff --git a/Makefile b/Makefile index 5d8bc7011..2b90c0ec2 100644 --- a/Makefile +++ b/Makefile @@ -36,7 +36,7 @@ tag_github_release: git tag `python setup.py --version` git push origin `python setup.py --version` -publish: implementation_coverage \ +publish: upload_pypi_artifact \ tag_github_release \ push_dockerhub_image diff --git a/moto/__init__.py b/moto/__init__.py index 3508dfeda..9d292a3e1 100644 --- a/moto/__init__.py +++ b/moto/__init__.py @@ -3,7 +3,7 @@ import logging # logging.getLogger('boto').setLevel(logging.CRITICAL) __title__ = 'moto' -__version__ = '1.0.1' +__version__ = '1.2.0', from .acm import mock_acm # flake8: noqa from .apigateway import mock_apigateway, mock_apigateway_deprecated # flake8: noqa diff --git a/moto/cloudformation/models.py b/moto/cloudformation/models.py index 70c15d697..57f42df56 100644 --- a/moto/cloudformation/models.py +++ b/moto/cloudformation/models.py @@ -107,7 +107,8 @@ class FakeStack(BaseModel): def update(self, template, role_arn=None, parameters=None, tags=None): self._add_stack_event("UPDATE_IN_PROGRESS", resource_status_reason="User Initiated") self.template = template - self.resource_map.update(json.loads(template), parameters) + self._parse_template() + self.resource_map.update(self.template_dict, parameters) self.output_map = self._create_output_map() self._add_stack_event("UPDATE_COMPLETE") self.status = "UPDATE_COMPLETE" diff --git a/moto/core/responses.py b/moto/core/responses.py index 52be602f6..5afe5e168 100644 --- a/moto/core/responses.py +++ b/moto/core/responses.py @@ -108,6 +108,7 @@ class BaseResponse(_TemplateEnvironmentMixin): # to extract region, use [^.] region_regex = re.compile(r'\.(?P[a-z]{2}-[a-z]+-\d{1})\.amazonaws\.com') param_list_regex = re.compile(r'(.*)\.(\d+)\.') + access_key_regex = re.compile(r'AWS.*(?P(? 9Gjjt1m+cjU4OPvX9O9/8RuvnG41MRb/18Oux2o5H5MY7ISNTlXN+Dz9IG62/ILVxhAGI0qyPfg= """ + +S3_LOGGING_CONFIG = """ + + + {{ logging["TargetBucket"] }} + {{ logging["TargetPrefix"] }} + {% if logging.get("TargetGrants") %} + + {% for grant in logging["TargetGrants"] %} + + + {% if grant.grantees[0].uri %} + {{ grant.grantees[0].uri }} + {% endif %} + {% if grant.grantees[0].id %} + {{ grant.grantees[0].id }} + {% endif %} + {% if grant.grantees[0].display_name %} + {{ grant.grantees[0].display_name }} + {% endif %} + + {{ grant.permissions[0] }} + + {% endfor %} + + {% endif %} + + +""" + +S3_NO_LOGGING_CONFIG = """ + +""" diff --git a/moto/xray/mock_client.py b/moto/xray/mock_client.py index 6e2164d63..135796054 100644 --- a/moto/xray/mock_client.py +++ b/moto/xray/mock_client.py @@ -51,7 +51,7 @@ def mock_xray_client(f): aws_xray_sdk.core.xray_recorder._emitter = MockEmitter() try: - f(*args, **kwargs) + return f(*args, **kwargs) finally: if old_xray_context_var is None: diff --git a/scripts/bump_version b/scripts/bump_version new file mode 100755 index 000000000..d1af3a84b --- /dev/null +++ b/scripts/bump_version @@ -0,0 +1,27 @@ +#!/bin/bash + +main() { + set -euo pipefail # Bash safemode + + local version=$1 + if [[ -z "${version}" ]]; then + echo "USAGE: $0 1.3.2" + echo "Provide a new version number as an argument to bump the version" + echo -n "Current:" + grep version= setup.py + return 1 + fi + + &>/dev/null which bumpversion || pip install bumpversion + bumpversion --new-version ${version} patch + + git checkout -b version-${version} + # Commit the new version + git commit -a -m "bumping to version ${version}" + # Commit an updated IMPLEMENTATION_COVERAGE.md + make implementation_coverage || true + # Open a PR + open https://github.com/spulec/moto/compare/master...version-${version} +} + +main $@ diff --git a/setup.py b/setup.py index 201622627..27c635944 100755 --- a/setup.py +++ b/setup.py @@ -39,7 +39,7 @@ else: setup( name='moto', - version='1.1.25', + version='1.2.0', description='A library that allows your python tests to easily' ' mock out the boto library', author='Steve Pulec', diff --git a/tests/test_cloudformation/test_cloudformation_stack_integration.py b/tests/test_cloudformation/test_cloudformation_stack_integration.py index 051d8bed7..3a7525585 100644 --- a/tests/test_cloudformation/test_cloudformation_stack_integration.py +++ b/tests/test_cloudformation/test_cloudformation_stack_integration.py @@ -752,6 +752,9 @@ def test_vpc_single_instance_in_subnet(): security_group.vpc_id.should.equal(vpc.id) stack = conn.describe_stacks()[0] + + vpc.tags.should.have.key('Application').which.should.equal(stack.stack_id) + resources = stack.describe_resources() vpc_resource = [ resource for resource in resources if resource.resource_type == 'AWS::EC2::VPC'][0] diff --git a/tests/test_ec2/test_amis.py b/tests/test_ec2/test_amis.py index 9ba782414..64102418c 100755 --- a/tests/test_ec2/test_amis.py +++ b/tests/test_ec2/test_amis.py @@ -705,3 +705,17 @@ def test_ami_filter_by_owner_id(): assert all(ubuntu_ids) and ubuntu_ids[0] == ubuntu_id # Check we actually have a subset of images assert len(ubuntu_ids) < len(all_ids) + +@mock_ec2 +def test_ami_filter_by_self(): + client = boto3.client('ec2', region_name='us-east-1') + + my_images = client.describe_images(Owners=['self']) + assert len(my_images) == 0 + + # Create a new image + instance = ec2.create_instances(ImageId='ami-1234abcd', MinCount=1, MaxCount=1)[0] + image = instance.create_image(Name='test-image') + + my_images = client.describe_images(Owners=['self']) + assert len(my_images) == 1 diff --git a/tests/test_ec2/test_availability_zones_and_regions.py b/tests/test_ec2/test_availability_zones_and_regions.py index 7226cacaf..c64f075ca 100644 --- a/tests/test_ec2/test_availability_zones_and_regions.py +++ b/tests/test_ec2/test_availability_zones_and_regions.py @@ -36,6 +36,11 @@ def test_boto3_describe_regions(): for rec in resp['Regions']: rec['Endpoint'].should.contain(rec['RegionName']) + test_region = 'us-east-1' + resp = ec2.describe_regions(RegionNames=[test_region]) + resp['Regions'].should.have.length_of(1) + resp['Regions'][0].should.have.key('RegionName').which.should.equal(test_region) + @mock_ec2 def test_boto3_availability_zones(): diff --git a/tests/test_ecr/test_ecr_boto3.py b/tests/test_ecr/test_ecr_boto3.py index 00628e22f..b4497ef60 100644 --- a/tests/test_ecr/test_ecr_boto3.py +++ b/tests/test_ecr/test_ecr_boto3.py @@ -9,7 +9,7 @@ import re import sure # noqa import boto3 -from botocore.exceptions import ClientError +from botocore.exceptions import ClientError, ParamValidationError from dateutil.tz import tzlocal from moto import mock_ecr @@ -445,3 +445,117 @@ def test_get_authorization_token_explicit_regions(): } ]) + + +@mock_ecr +def test_batch_get_image(): + client = boto3.client('ecr', region_name='us-east-1') + _ = client.create_repository( + repositoryName='test_repository' + ) + + _ = client.put_image( + repositoryName='test_repository', + imageManifest=json.dumps(_create_image_manifest()), + imageTag='latest' + ) + + _ = client.put_image( + repositoryName='test_repository', + imageManifest=json.dumps(_create_image_manifest()), + imageTag='v1' + ) + + _ = client.put_image( + repositoryName='test_repository', + imageManifest=json.dumps(_create_image_manifest()), + imageTag='v2' + ) + + response = client.batch_get_image( + repositoryName='test_repository', + imageIds=[ + { + 'imageTag': 'v2' + }, + ], + ) + + type(response['images']).should.be(list) + len(response['images']).should.be(1) + + response['images'][0]['imageManifest'].should.contain("vnd.docker.distribution.manifest.v2+json") + response['images'][0]['registryId'].should.equal("012345678910") + response['images'][0]['repositoryName'].should.equal("test_repository") + + response['images'][0]['imageId']['imageTag'].should.equal("v2") + response['images'][0]['imageId']['imageDigest'].should.contain("sha") + + type(response['failures']).should.be(list) + len(response['failures']).should.be(0) + + +@mock_ecr +def test_batch_get_image_that_doesnt_exist(): + client = boto3.client('ecr', region_name='us-east-1') + _ = client.create_repository( + repositoryName='test_repository' + ) + + _ = client.put_image( + repositoryName='test_repository', + imageManifest=json.dumps(_create_image_manifest()), + imageTag='latest' + ) + + _ = client.put_image( + repositoryName='test_repository', + imageManifest=json.dumps(_create_image_manifest()), + imageTag='v1' + ) + + _ = client.put_image( + repositoryName='test_repository', + imageManifest=json.dumps(_create_image_manifest()), + imageTag='v2' + ) + + response = client.batch_get_image( + repositoryName='test_repository', + imageIds=[ + { + 'imageTag': 'v5' + }, + ], + ) + + type(response['images']).should.be(list) + len(response['images']).should.be(0) + + type(response['failures']).should.be(list) + len(response['failures']).should.be(1) + response['failures'][0]['failureReason'].should.equal("Requested image not found") + response['failures'][0]['failureCode'].should.equal("ImageNotFound") + response['failures'][0]['imageId']['imageTag'].should.equal("v5") + + +@mock_ecr +def test_batch_get_image_no_tags(): + client = boto3.client('ecr', region_name='us-east-1') + _ = client.create_repository( + repositoryName='test_repository' + ) + + _ = client.put_image( + repositoryName='test_repository', + imageManifest=json.dumps(_create_image_manifest()), + imageTag='latest' + ) + + error_msg = re.compile( + r".*Missing required parameter in input: \"imageIds\".*", + re.MULTILINE) + + client.batch_get_image.when.called_with( + repositoryName='test_repository').should.throw( + ParamValidationError, error_msg) diff --git a/tests/test_iam/test_iam.py b/tests/test_iam/test_iam.py index d50f6999e..b4dfe532d 100644 --- a/tests/test_iam/test_iam.py +++ b/tests/test_iam/test_iam.py @@ -651,3 +651,21 @@ def test_attach_detach_user_policy(): resp = client.list_attached_user_policies(UserName=user.name) resp['AttachedPolicies'].should.have.length_of(0) + + +@mock_iam +def test_update_access_key(): + iam = boto3.resource('iam', region_name='us-east-1') + client = iam.meta.client + username = 'test-user' + iam.create_user(UserName=username) + with assert_raises(ClientError): + client.update_access_key(UserName=username, + AccessKeyId='non-existent-key', + Status='Inactive') + key = client.create_access_key(UserName=username)['AccessKey'] + client.update_access_key(UserName=username, + AccessKeyId=key['AccessKeyId'], + Status='Inactive') + resp = client.list_access_keys(UserName=username) + resp['AccessKeyMetadata'][0]['Status'].should.equal('Inactive') diff --git a/tests/test_iot/test_iot.py b/tests/test_iot/test_iot.py index 31631e459..7c01934d3 100644 --- a/tests/test_iot/test_iot.py +++ b/tests/test_iot/test_iot.py @@ -177,3 +177,192 @@ def test_principal_thing(): res.should.have.key('things').which.should.have.length_of(0) res = client.list_thing_principals(thingName=thing_name) res.should.have.key('principals').which.should.have.length_of(0) + + +@mock_iot +def test_thing_groups(): + client = boto3.client('iot', region_name='ap-northeast-1') + name = 'my-thing' + group_name = 'my-group-name' + + # thing group + thing_group = client.create_thing_group(thingGroupName=group_name) + thing_group.should.have.key('thingGroupName').which.should.equal(group_name) + thing_group.should.have.key('thingGroupArn') + + res = client.list_thing_groups() + res.should.have.key('thingGroups').which.should.have.length_of(1) + for thing_group in res['thingGroups']: + thing_group.should.have.key('groupName').which.should_not.be.none + thing_group.should.have.key('groupArn').which.should_not.be.none + + thing_group = client.describe_thing_group(thingGroupName=group_name) + thing_group.should.have.key('thingGroupName').which.should.equal(group_name) + thing_group.should.have.key('thingGroupProperties') + thing_group.should.have.key('thingGroupMetadata') + thing_group.should.have.key('version') + + # delete thing group + client.delete_thing_group(thingGroupName=group_name) + res = client.list_thing_groups() + res.should.have.key('thingGroups').which.should.have.length_of(0) + + # props create test + props = { + 'thingGroupDescription': 'my first thing group', + 'attributePayload': { + 'attributes': { + 'key1': 'val01', + 'Key02': 'VAL2' + } + } + } + thing_group = client.create_thing_group(thingGroupName=group_name, thingGroupProperties=props) + thing_group.should.have.key('thingGroupName').which.should.equal(group_name) + thing_group.should.have.key('thingGroupArn') + + thing_group = client.describe_thing_group(thingGroupName=group_name) + thing_group.should.have.key('thingGroupProperties')\ + .which.should.have.key('attributePayload')\ + .which.should.have.key('attributes') + res_props = thing_group['thingGroupProperties']['attributePayload']['attributes'] + res_props.should.have.key('key1').which.should.equal('val01') + res_props.should.have.key('Key02').which.should.equal('VAL2') + + # props update test with merge + new_props = { + 'attributePayload': { + 'attributes': { + 'k3': 'v3' + }, + 'merge': True + } + } + client.update_thing_group( + thingGroupName=group_name, + thingGroupProperties=new_props + ) + thing_group = client.describe_thing_group(thingGroupName=group_name) + thing_group.should.have.key('thingGroupProperties')\ + .which.should.have.key('attributePayload')\ + .which.should.have.key('attributes') + res_props = thing_group['thingGroupProperties']['attributePayload']['attributes'] + res_props.should.have.key('key1').which.should.equal('val01') + res_props.should.have.key('Key02').which.should.equal('VAL2') + + res_props.should.have.key('k3').which.should.equal('v3') + + # props update test + new_props = { + 'attributePayload': { + 'attributes': { + 'k4': 'v4' + } + } + } + client.update_thing_group( + thingGroupName=group_name, + thingGroupProperties=new_props + ) + thing_group = client.describe_thing_group(thingGroupName=group_name) + thing_group.should.have.key('thingGroupProperties')\ + .which.should.have.key('attributePayload')\ + .which.should.have.key('attributes') + res_props = thing_group['thingGroupProperties']['attributePayload']['attributes'] + res_props.should.have.key('k4').which.should.equal('v4') + res_props.should_not.have.key('key1') + + +@mock_iot +def test_thing_group_relations(): + client = boto3.client('iot', region_name='ap-northeast-1') + name = 'my-thing' + group_name = 'my-group-name' + + # thing group + thing_group = client.create_thing_group(thingGroupName=group_name) + thing_group.should.have.key('thingGroupName').which.should.equal(group_name) + thing_group.should.have.key('thingGroupArn') + + # thing + thing = client.create_thing(thingName=name) + thing.should.have.key('thingName').which.should.equal(name) + thing.should.have.key('thingArn') + + # add in 4 way + client.add_thing_to_thing_group( + thingGroupName=group_name, + thingName=name + ) + client.add_thing_to_thing_group( + thingGroupArn=thing_group['thingGroupArn'], + thingArn=thing['thingArn'] + ) + client.add_thing_to_thing_group( + thingGroupName=group_name, + thingArn=thing['thingArn'] + ) + client.add_thing_to_thing_group( + thingGroupArn=thing_group['thingGroupArn'], + thingName=name + ) + + things = client.list_things_in_thing_group( + thingGroupName=group_name + ) + things.should.have.key('things') + things['things'].should.have.length_of(1) + + thing_groups = client.list_thing_groups_for_thing( + thingName=name + ) + thing_groups.should.have.key('thingGroups') + thing_groups['thingGroups'].should.have.length_of(1) + + # remove in 4 way + client.remove_thing_from_thing_group( + thingGroupName=group_name, + thingName=name + ) + client.remove_thing_from_thing_group( + thingGroupArn=thing_group['thingGroupArn'], + thingArn=thing['thingArn'] + ) + client.remove_thing_from_thing_group( + thingGroupName=group_name, + thingArn=thing['thingArn'] + ) + client.remove_thing_from_thing_group( + thingGroupArn=thing_group['thingGroupArn'], + thingName=name + ) + things = client.list_things_in_thing_group( + thingGroupName=group_name + ) + things.should.have.key('things') + things['things'].should.have.length_of(0) + + # update thing group for thing + client.update_thing_groups_for_thing( + thingName=name, + thingGroupsToAdd=[ + group_name + ] + ) + things = client.list_things_in_thing_group( + thingGroupName=group_name + ) + things.should.have.key('things') + things['things'].should.have.length_of(1) + + client.update_thing_groups_for_thing( + thingName=name, + thingGroupsToRemove=[ + group_name + ] + ) + things = client.list_things_in_thing_group( + thingGroupName=group_name + ) + things.should.have.key('things') + things['things'].should.have.length_of(0) diff --git a/tests/test_s3/test_s3.py b/tests/test_s3/test_s3.py index 829941d79..33752af60 100644 --- a/tests/test_s3/test_s3.py +++ b/tests/test_s3/test_s3.py @@ -50,6 +50,7 @@ def reduced_min_part_size(f): return f(*args, **kwargs) finally: s3model.UPLOAD_PART_MIN_SIZE = orig_size + return wrapped @@ -883,11 +884,12 @@ def test_s3_object_in_public_bucket(): s3_anonymous.Object(key='file.txt', bucket_name='test-bucket').get() exc.exception.response['Error']['Code'].should.equal('403') - params = {'Bucket': 'test-bucket','Key': 'file.txt'} + params = {'Bucket': 'test-bucket', 'Key': 'file.txt'} presigned_url = boto3.client('s3').generate_presigned_url('get_object', params, ExpiresIn=900) response = requests.get(presigned_url) assert response.status_code == 200 + @mock_s3 def test_s3_object_in_private_bucket(): s3 = boto3.resource('s3') @@ -1102,6 +1104,7 @@ def test_boto3_key_etag(): resp = s3.get_object(Bucket='mybucket', Key='steve') resp['ETag'].should.equal('"d32bda93738f7e03adb22e66c90fbc04"') + @mock_s3 def test_website_redirect_location(): s3 = boto3.client('s3', region_name='us-east-1') @@ -1116,6 +1119,7 @@ def test_website_redirect_location(): resp = s3.get_object(Bucket='mybucket', Key='steve') resp['WebsiteRedirectLocation'].should.equal(url) + @mock_s3 def test_boto3_list_keys_xml_escaped(): s3 = boto3.client('s3', region_name='us-east-1') @@ -1627,7 +1631,7 @@ def test_boto3_put_bucket_cors(): }) e = err.exception e.response["Error"]["Code"].should.equal("InvalidRequest") - e.response["Error"]["Message"].should.equal("Found unsupported HTTP method in CORS config. " + e.response["Error"]["Message"].should.equal("Found unsupported HTTP method in CORS config. " "Unsupported method is NOTREAL") with assert_raises(ClientError) as err: @@ -1732,6 +1736,249 @@ def test_boto3_delete_bucket_cors(): e.response["Error"]["Message"].should.equal("The CORS configuration does not exist") +@mock_s3 +def test_put_bucket_acl_body(): + s3 = boto3.client("s3", region_name="us-east-1") + s3.create_bucket(Bucket="bucket") + bucket_owner = s3.get_bucket_acl(Bucket="bucket")["Owner"] + s3.put_bucket_acl(Bucket="bucket", AccessControlPolicy={ + "Grants": [ + { + "Grantee": { + "URI": "http://acs.amazonaws.com/groups/s3/LogDelivery", + "Type": "Group" + }, + "Permission": "WRITE" + }, + { + "Grantee": { + "URI": "http://acs.amazonaws.com/groups/s3/LogDelivery", + "Type": "Group" + }, + "Permission": "READ_ACP" + } + ], + "Owner": bucket_owner + }) + + result = s3.get_bucket_acl(Bucket="bucket") + assert len(result["Grants"]) == 2 + for g in result["Grants"]: + assert g["Grantee"]["URI"] == "http://acs.amazonaws.com/groups/s3/LogDelivery" + assert g["Grantee"]["Type"] == "Group" + assert g["Permission"] in ["WRITE", "READ_ACP"] + + # With one: + s3.put_bucket_acl(Bucket="bucket", AccessControlPolicy={ + "Grants": [ + { + "Grantee": { + "URI": "http://acs.amazonaws.com/groups/s3/LogDelivery", + "Type": "Group" + }, + "Permission": "WRITE" + } + ], + "Owner": bucket_owner + }) + result = s3.get_bucket_acl(Bucket="bucket") + assert len(result["Grants"]) == 1 + + # With no owner: + with assert_raises(ClientError) as err: + s3.put_bucket_acl(Bucket="bucket", AccessControlPolicy={ + "Grants": [ + { + "Grantee": { + "URI": "http://acs.amazonaws.com/groups/s3/LogDelivery", + "Type": "Group" + }, + "Permission": "WRITE" + } + ] + }) + assert err.exception.response["Error"]["Code"] == "MalformedACLError" + + # With incorrect permission: + with assert_raises(ClientError) as err: + s3.put_bucket_acl(Bucket="bucket", AccessControlPolicy={ + "Grants": [ + { + "Grantee": { + "URI": "http://acs.amazonaws.com/groups/s3/LogDelivery", + "Type": "Group" + }, + "Permission": "lskjflkasdjflkdsjfalisdjflkdsjf" + } + ], + "Owner": bucket_owner + }) + assert err.exception.response["Error"]["Code"] == "MalformedACLError" + + # Clear the ACLs: + result = s3.put_bucket_acl(Bucket="bucket", AccessControlPolicy={"Grants": [], "Owner": bucket_owner}) + assert not result.get("Grants") + + +@mock_s3 +def test_boto3_put_bucket_logging(): + s3 = boto3.client("s3", region_name="us-east-1") + bucket_name = "mybucket" + log_bucket = "logbucket" + wrong_region_bucket = "wrongregionlogbucket" + s3.create_bucket(Bucket=bucket_name) + s3.create_bucket(Bucket=log_bucket) # Adding the ACL for log-delivery later... + s3.create_bucket(Bucket=wrong_region_bucket, CreateBucketConfiguration={"LocationConstraint": "us-west-2"}) + + # No logging config: + result = s3.get_bucket_logging(Bucket=bucket_name) + assert not result.get("LoggingEnabled") + + # A log-bucket that doesn't exist: + with assert_raises(ClientError) as err: + s3.put_bucket_logging(Bucket=bucket_name, BucketLoggingStatus={ + "LoggingEnabled": { + "TargetBucket": "IAMNOTREAL", + "TargetPrefix": "" + } + }) + assert err.exception.response["Error"]["Code"] == "InvalidTargetBucketForLogging" + + # A log-bucket that's missing the proper ACLs for LogDelivery: + with assert_raises(ClientError) as err: + s3.put_bucket_logging(Bucket=bucket_name, BucketLoggingStatus={ + "LoggingEnabled": { + "TargetBucket": log_bucket, + "TargetPrefix": "" + } + }) + assert err.exception.response["Error"]["Code"] == "InvalidTargetBucketForLogging" + assert "log-delivery" in err.exception.response["Error"]["Message"] + + # Add the proper "log-delivery" ACL to the log buckets: + bucket_owner = s3.get_bucket_acl(Bucket=log_bucket)["Owner"] + for bucket in [log_bucket, wrong_region_bucket]: + s3.put_bucket_acl(Bucket=bucket, AccessControlPolicy={ + "Grants": [ + { + "Grantee": { + "URI": "http://acs.amazonaws.com/groups/s3/LogDelivery", + "Type": "Group" + }, + "Permission": "WRITE" + }, + { + "Grantee": { + "URI": "http://acs.amazonaws.com/groups/s3/LogDelivery", + "Type": "Group" + }, + "Permission": "READ_ACP" + }, + { + "Grantee": { + "Type": "CanonicalUser", + "ID": bucket_owner["ID"] + }, + "Permission": "FULL_CONTROL" + } + ], + "Owner": bucket_owner + }) + + # A log-bucket that's in the wrong region: + with assert_raises(ClientError) as err: + s3.put_bucket_logging(Bucket=bucket_name, BucketLoggingStatus={ + "LoggingEnabled": { + "TargetBucket": wrong_region_bucket, + "TargetPrefix": "" + } + }) + assert err.exception.response["Error"]["Code"] == "CrossLocationLoggingProhibitted" + + # Correct logging: + s3.put_bucket_logging(Bucket=bucket_name, BucketLoggingStatus={ + "LoggingEnabled": { + "TargetBucket": log_bucket, + "TargetPrefix": "{}/".format(bucket_name) + } + }) + result = s3.get_bucket_logging(Bucket=bucket_name) + assert result["LoggingEnabled"]["TargetBucket"] == log_bucket + assert result["LoggingEnabled"]["TargetPrefix"] == "{}/".format(bucket_name) + assert not result["LoggingEnabled"].get("TargetGrants") + + # And disabling: + s3.put_bucket_logging(Bucket=bucket_name, BucketLoggingStatus={}) + assert not s3.get_bucket_logging(Bucket=bucket_name).get("LoggingEnabled") + + # And enabling with multiple target grants: + s3.put_bucket_logging(Bucket=bucket_name, BucketLoggingStatus={ + "LoggingEnabled": { + "TargetBucket": log_bucket, + "TargetPrefix": "{}/".format(bucket_name), + "TargetGrants": [ + { + "Grantee": { + "ID": "SOMEIDSTRINGHERE9238748923734823917498237489237409123840983274", + "Type": "CanonicalUser" + }, + "Permission": "READ" + }, + { + "Grantee": { + "ID": "SOMEIDSTRINGHERE9238748923734823917498237489237409123840983274", + "Type": "CanonicalUser" + }, + "Permission": "WRITE" + } + ] + } + }) + + result = s3.get_bucket_logging(Bucket=bucket_name) + assert len(result["LoggingEnabled"]["TargetGrants"]) == 2 + assert result["LoggingEnabled"]["TargetGrants"][0]["Grantee"]["ID"] == \ + "SOMEIDSTRINGHERE9238748923734823917498237489237409123840983274" + + # Test with just 1 grant: + s3.put_bucket_logging(Bucket=bucket_name, BucketLoggingStatus={ + "LoggingEnabled": { + "TargetBucket": log_bucket, + "TargetPrefix": "{}/".format(bucket_name), + "TargetGrants": [ + { + "Grantee": { + "ID": "SOMEIDSTRINGHERE9238748923734823917498237489237409123840983274", + "Type": "CanonicalUser" + }, + "Permission": "READ" + } + ] + } + }) + result = s3.get_bucket_logging(Bucket=bucket_name) + assert len(result["LoggingEnabled"]["TargetGrants"]) == 1 + + # With an invalid grant: + with assert_raises(ClientError) as err: + s3.put_bucket_logging(Bucket=bucket_name, BucketLoggingStatus={ + "LoggingEnabled": { + "TargetBucket": log_bucket, + "TargetPrefix": "{}/".format(bucket_name), + "TargetGrants": [ + { + "Grantee": { + "ID": "SOMEIDSTRINGHERE9238748923734823917498237489237409123840983274", + "Type": "CanonicalUser" + }, + "Permission": "NOTAREALPERM" + } + ] + } + }) + assert err.exception.response["Error"]["Code"] == "MalformedXML" + + @mock_s3 def test_boto3_put_object_tagging(): s3 = boto3.client('s3', region_name='us-east-1') @@ -1939,11 +2186,10 @@ def test_get_stream_gzipped(): Bucket='moto-tests', Key='keyname', ) - res = zlib.decompress(obj['Body'].read(), 16+zlib.MAX_WBITS) + res = zlib.decompress(obj['Body'].read(), 16 + zlib.MAX_WBITS) assert res == payload - TEST_XML = """\