From 172396e6a8e19a8142964ca112a56beaf4e87554 Mon Sep 17 00:00:00 2001 From: Jack Danger Date: Mon, 20 Nov 2017 13:17:24 -0800 Subject: [PATCH 001/182] Updating CONTRIBUTING with release instructions --- CONTRIBUTING.md | 25 +++++++++++++++++++++++-- Makefile | 2 +- scripts/bump_version | 22 ++++++++++++++++++++++ 3 files changed, 46 insertions(+), 3 deletions(-) create mode 100755 scripts/bump_version diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 1266d508e..f28083221 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -1,4 +1,25 @@ ### Contributing code -If you have improvements to Moto, send us your pull requests! For those -just getting started, Github has a [howto](https://help.github.com/articles/using-pull-requests/). +Moto has a [Code of Conduct](https://github.com/spulec/moto/blob/master/CODE_OF_CONDUCT.md), you can expect to be treated with respect at all times when interacting with this project. + +## Is there a missing feature? + +Moto is easier to contribute to than you probably think. There's [a list of which endpoints have been implemented](https://github.com/spulec/moto/blob/master/IMPLEMENTATION_COVERAGE.md) and we invite you to add new endpoints to existing services or to add new services. + +How to teach Moto to support a new AWS endpoint: + +* Create an issue describing what's missing. This is where we'll all talk about the new addition and help you get it done. +* Create a [pull request](https://help.github.com/articles/using-pull-requests/) and mention the issue # in the PR description. +* Try to add a failing test case. For example, if you're trying to implement `boto3.client('acm').import_certificate()` you'll want to add a new method called `def test_import_certificate` to `tests/test_acm/test_acm.py`. +* If you can also implement the code that gets that test passing that's great. If not, just ask the community for a hand and somebody will assist you. + +# Maintainers + +## Releasing a new version of Moto + +You'll need a PyPi account and a Dockerhub account to release Moto. After we release a new PyPi package we build and push the [motoserver/moto](https://hub.docker.com/r/motoserver/moto/) Docker image. + +* First, `scripts/bump_version` modifies the version and opens a PR +* Then, merge the new pull request +* Finally, generate and ship the new artifacts with `make publish` + diff --git a/Makefile b/Makefile index 99b7f2620..9324a61cd 100644 --- a/Makefile +++ b/Makefile @@ -36,7 +36,7 @@ tag_github_release: git tag `python setup.py --version` git push origin `python setup.py --version` -publish: implementation_coverage \ +publish: upload_pypi_artifact \ tag_github_release \ push_dockerhub_image diff --git a/scripts/bump_version b/scripts/bump_version new file mode 100755 index 000000000..53030700e --- /dev/null +++ b/scripts/bump_version @@ -0,0 +1,22 @@ +#!/bin/bash + +main() { + local version=$1 + if [[ -z "${version}" ]]; then + echo "USAGE: $0 1.3.2" + echo "Provide a new version number as an argument to bump the version" + echo -n "Current:" + grep version= setup.py + return 1 + fi + sed -i '' "s/version=.*$/version='${version}',/g" setup.py + git checkout -b version-${version} + # Commit the new version + git commit setup.py -m "bumping to version ${version}" + # Commit an updated IMPLEMENTATION_COVERAGE.md + make implementation_coverage || true + # Open a PR + open https://github.com/spulec/moto/compare/master...version-${version} +} + +main $@ From a1f2ba21ee7fc7703738bf3987431cc81803b44e Mon Sep 17 00:00:00 2001 From: Terry Cain Date: Sun, 26 Nov 2017 22:29:23 +0000 Subject: [PATCH 002/182] Adds if_not_exists function to DynamoDB Update Expression Fixes #1358 --- moto/dynamodb2/models.py | 20 +++++++- tests/test_dynamodb2/test_dynamodb.py | 68 +++++++++++++++++++++++++++ 2 files changed, 86 insertions(+), 2 deletions(-) diff --git a/moto/dynamodb2/models.py b/moto/dynamodb2/models.py index 0a48c277a..f4e61ca9f 100644 --- a/moto/dynamodb2/models.py +++ b/moto/dynamodb2/models.py @@ -135,7 +135,9 @@ class Item(BaseModel): assert len(parts) % 2 == 0, "Mismatched operators and values in update expression: '{}'".format(update_expression) for action, valstr in zip(parts[:-1:2], parts[1::2]): action = action.upper() - values = valstr.split(',') + + # "Should" retain arguments in side (...) + values = re.split(r',(?![^(]*\))', valstr) for value in values: # A Real value value = value.lstrip(":").rstrip(",").strip() @@ -145,9 +147,23 @@ class Item(BaseModel): if action == "REMOVE": self.attrs.pop(value, None) elif action == 'SET': - key, value = value.split("=") + key, value = value.split("=", 1) key = key.strip() value = value.strip() + + # If not exists, changes value to a default if needed, else its the same as it was + if value.startswith('if_not_exists'): + # Function signature + match = re.match(r'.*if_not_exists\((?P.+),\s*(?P.+)\).*', value) + if not match: + raise TypeError + + path, value = match.groups() + + # If it already exists, get its value so we dont overwrite it + if path in self.attrs: + value = self.attrs[path].cast_value + if value in expression_attribute_values: value = DynamoType(expression_attribute_values[value]) else: diff --git a/tests/test_dynamodb2/test_dynamodb.py b/tests/test_dynamodb2/test_dynamodb.py index 05daf23aa..d7c5b5843 100644 --- a/tests/test_dynamodb2/test_dynamodb.py +++ b/tests/test_dynamodb2/test_dynamodb.py @@ -1067,3 +1067,71 @@ def test_update_item_on_map(): resp = table.scan() resp['Items'][0]['body'].should.equal({'nested': {'data': 'new_value'}}) + + +# https://github.com/spulec/moto/issues/1358 +@mock_dynamodb2 +def test_update_if_not_exists(): + dynamodb = boto3.resource('dynamodb', region_name='us-east-1') + + # Create the DynamoDB table. + dynamodb.create_table( + TableName='users', + KeySchema=[ + { + 'AttributeName': 'forum_name', + 'KeyType': 'HASH' + }, + { + 'AttributeName': 'subject', + 'KeyType': 'RANGE' + }, + ], + AttributeDefinitions=[ + { + 'AttributeName': 'forum_name', + 'AttributeType': 'S' + }, + { + 'AttributeName': 'subject', + 'AttributeType': 'S' + }, + ], + ProvisionedThroughput={ + 'ReadCapacityUnits': 5, + 'WriteCapacityUnits': 5 + } + ) + table = dynamodb.Table('users') + + table.put_item(Item={ + 'forum_name': 'the-key', + 'subject': '123' + }) + + table.update_item(Key={ + 'forum_name': 'the-key', + 'subject': '123' + }, + UpdateExpression='SET created_at = if_not_exists(created_at, :created_at)', + ExpressionAttributeValues={ + ':created_at': 123 + } + ) + + resp = table.scan() + assert resp['Items'][0]['created_at'] == 123 + + table.update_item(Key={ + 'forum_name': 'the-key', + 'subject': '123' + }, + UpdateExpression='SET created_at = if_not_exists(created_at, :created_at)', + ExpressionAttributeValues={ + ':created_at': 456 + } + ) + + resp = table.scan() + # Still the original value + assert resp['Items'][0]['created_at'] == 123 From df7a7958c1e20ffa76d07bbbd1ffd24cf75c2995 Mon Sep 17 00:00:00 2001 From: William Richard Date: Thu, 14 Dec 2017 07:06:04 -0500 Subject: [PATCH 003/182] Path is an optional property of instance profile cloudformation resource (#1382) * Path is an optional property of instance profile cloudformation resource http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-iam-instanceprofile.html * Path is also optional for iam role clouformation resources Based on http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-iam-role.html * Use `properities.get` with a default instead of doing default handling myself --- moto/iam/models.py | 4 +- .../test_cloudformation_stack_integration.py | 94 +++++++++++++------ 2 files changed, 69 insertions(+), 29 deletions(-) diff --git a/moto/iam/models.py b/moto/iam/models.py index 7839d3a74..57d24826d 100644 --- a/moto/iam/models.py +++ b/moto/iam/models.py @@ -122,7 +122,7 @@ class Role(BaseModel): role = iam_backend.create_role( role_name=resource_name, assume_role_policy_document=properties['AssumeRolePolicyDocument'], - path=properties['Path'], + path=properties.get('Path', '/'), ) policies = properties.get('Policies', []) @@ -173,7 +173,7 @@ class InstanceProfile(BaseModel): role_ids = properties['Roles'] return iam_backend.create_instance_profile( name=resource_name, - path=properties['Path'], + path=properties.get('Path', '/'), role_ids=role_ids, ) diff --git a/tests/test_cloudformation/test_cloudformation_stack_integration.py b/tests/test_cloudformation/test_cloudformation_stack_integration.py index c4a138de7..051d8bed7 100644 --- a/tests/test_cloudformation/test_cloudformation_stack_integration.py +++ b/tests/test_cloudformation/test_cloudformation_stack_integration.py @@ -891,19 +891,25 @@ def test_iam_roles(): "my-launch-config": { "Properties": { - "IamInstanceProfile": {"Ref": "my-instance-profile"}, + "IamInstanceProfile": {"Ref": "my-instance-profile-with-path"}, "ImageId": "ami-1234abcd", }, "Type": "AWS::AutoScaling::LaunchConfiguration" }, - "my-instance-profile": { + "my-instance-profile-with-path": { "Properties": { "Path": "my-path", - "Roles": [{"Ref": "my-role"}], + "Roles": [{"Ref": "my-role-with-path"}], }, "Type": "AWS::IAM::InstanceProfile" }, - "my-role": { + "my-instance-profile-no-path": { + "Properties": { + "Roles": [{"Ref": "my-role-no-path"}], + }, + "Type": "AWS::IAM::InstanceProfile" + }, + "my-role-with-path": { "Properties": { "AssumeRolePolicyDocument": { "Statement": [ @@ -961,6 +967,26 @@ def test_iam_roles(): ] }, "Type": "AWS::IAM::Role" + }, + "my-role-no-path": { + "Properties": { + "AssumeRolePolicyDocument": { + "Statement": [ + { + "Action": [ + "sts:AssumeRole" + ], + "Effect": "Allow", + "Principal": { + "Service": [ + "ec2.amazonaws.com" + ] + } + } + ] + }, + }, + "Type": "AWS::IAM::Role" } } } @@ -974,37 +1000,51 @@ def test_iam_roles(): iam_conn = boto.iam.connect_to_region("us-west-1") - role_result = iam_conn.list_roles()['list_roles_response'][ - 'list_roles_result']['roles'][0] - role = iam_conn.get_role(role_result.role_name) - role.role_name.should.contain("my-role") - role.path.should.equal("my-path") + role_results = iam_conn.list_roles()['list_roles_response'][ + 'list_roles_result']['roles'] + role_name_to_id = {} + for role_result in role_results: + role = iam_conn.get_role(role_result.role_name) + role.role_name.should.contain("my-role") + if 'with-path' in role.role_name: + role_name_to_id['with-path'] = role.role_id + role.path.should.equal("my-path") + else: + role_name_to_id['no-path'] = role.role_id + role.role_name.should.contain('no-path') + role.path.should.equal('/') - instance_profile_response = iam_conn.list_instance_profiles()[ - 'list_instance_profiles_response'] - cfn_instance_profile = instance_profile_response[ - 'list_instance_profiles_result']['instance_profiles'][0] - instance_profile = iam_conn.get_instance_profile( - cfn_instance_profile.instance_profile_name) - instance_profile.instance_profile_name.should.contain( - "my-instance-profile") - instance_profile.path.should.equal("my-path") - instance_profile.role_id.should.equal(role.role_id) + instance_profile_responses = iam_conn.list_instance_profiles()[ + 'list_instance_profiles_response']['list_instance_profiles_result']['instance_profiles'] + instance_profile_responses.should.have.length_of(2) + instance_profile_names = [] + + for instance_profile_response in instance_profile_responses: + instance_profile = iam_conn.get_instance_profile(instance_profile_response.instance_profile_name) + instance_profile_names.append(instance_profile.instance_profile_name) + instance_profile.instance_profile_name.should.contain( + "my-instance-profile") + if "with-path" in instance_profile.instance_profile_name: + instance_profile.path.should.equal("my-path") + instance_profile.role_id.should.equal(role_name_to_id['with-path']) + else: + instance_profile.instance_profile_name.should.contain('no-path') + instance_profile.role_id.should.equal(role_name_to_id['no-path']) + instance_profile.path.should.equal('/') autoscale_conn = boto.ec2.autoscale.connect_to_region("us-west-1") launch_config = autoscale_conn.get_all_launch_configurations()[0] - launch_config.instance_profile_name.should.contain("my-instance-profile") + launch_config.instance_profile_name.should.contain("my-instance-profile-with-path") stack = conn.describe_stacks()[0] resources = stack.describe_resources() - instance_profile_resource = [ - resource for resource in resources if resource.resource_type == 'AWS::IAM::InstanceProfile'][0] - instance_profile_resource.physical_resource_id.should.equal( - instance_profile.instance_profile_name) + instance_profile_resources = [ + resource for resource in resources if resource.resource_type == 'AWS::IAM::InstanceProfile'] + {ip.physical_resource_id for ip in instance_profile_resources}.should.equal(set(instance_profile_names)) - role_resource = [ - resource for resource in resources if resource.resource_type == 'AWS::IAM::Role'][0] - role_resource.physical_resource_id.should.equal(role.role_id) + role_resources = [ + resource for resource in resources if resource.resource_type == 'AWS::IAM::Role'] + {r.physical_resource_id for r in role_resources}.should.equal(set(role_name_to_id.values())) @mock_ec2_deprecated() From e75f3ef4d4f7b0fa7e14dbb4431d379b68ae5502 Mon Sep 17 00:00:00 2001 From: Alex M Date: Thu, 14 Dec 2017 04:07:23 -0800 Subject: [PATCH 004/182] Implement execute change set endpoint (#1391) --- IMPLEMENTATION_COVERAGE.md | 4 +-- moto/cloudformation/models.py | 18 ++++++++++++ moto/cloudformation/responses.py | 28 +++++++++++++++++++ .../test_cloudformation_stack_crud_boto3.py | 24 ++++++++++++++++ 4 files changed, 72 insertions(+), 2 deletions(-) diff --git a/IMPLEMENTATION_COVERAGE.md b/IMPLEMENTATION_COVERAGE.md index 76944e3fe..e2a3b7af0 100644 --- a/IMPLEMENTATION_COVERAGE.md +++ b/IMPLEMENTATION_COVERAGE.md @@ -329,7 +329,7 @@ - [ ] update_schema - [ ] update_typed_link_facet -## cloudformation - 20% implemented +## cloudformation - 23% implemented - [ ] cancel_update_stack - [ ] continue_update_rollback - [X] create_change_set @@ -350,7 +350,7 @@ - [ ] describe_stack_set_operation - [X] describe_stacks - [ ] estimate_template_cost -- [ ] execute_change_set +- [X] execute_change_set - [ ] get_stack_policy - [ ] get_template - [ ] get_template_summary diff --git a/moto/cloudformation/models.py b/moto/cloudformation/models.py index 42809608b..70c15d697 100644 --- a/moto/cloudformation/models.py +++ b/moto/cloudformation/models.py @@ -188,6 +188,24 @@ class CloudFormationBackend(BaseBackend): self.change_sets[change_set_id] = stack return change_set_id, stack.stack_id + def execute_change_set(self, change_set_name, stack_name=None): + stack = None + if change_set_name in self.change_sets: + # This means arn was passed in + stack = self.change_sets[change_set_name] + else: + for cs in self.change_sets: + if self.change_sets[cs].name == change_set_name: + stack = self.change_sets[cs] + if stack is None: + raise ValidationError(stack_name) + if stack.events[-1].resource_status == 'REVIEW_IN_PROGRESS': + stack._add_stack_event('CREATE_COMPLETE') + else: + stack._add_stack_event('UPDATE_IN_PROGRESS') + stack._add_stack_event('UPDATE_COMPLETE') + return True + def describe_stacks(self, name_or_stack_id): stacks = self.stacks.values() if name_or_stack_id: diff --git a/moto/cloudformation/responses.py b/moto/cloudformation/responses.py index 93d59f686..07d263652 100644 --- a/moto/cloudformation/responses.py +++ b/moto/cloudformation/responses.py @@ -118,6 +118,24 @@ class CloudFormationResponse(BaseResponse): template = self.response_template(CREATE_CHANGE_SET_RESPONSE_TEMPLATE) return template.render(stack_id=stack_id, change_set_id=change_set_id) + @amzn_request_id + def execute_change_set(self): + stack_name = self._get_param('StackName') + change_set_name = self._get_param('ChangeSetName') + self.cloudformation_backend.execute_change_set( + stack_name=stack_name, + change_set_name=change_set_name, + ) + if self.request_json: + return json.dumps({ + 'ExecuteChangeSetResponse': { + 'ExecuteChangeSetResult': {}, + } + }) + else: + template = self.response_template(EXECUTE_CHANGE_SET_RESPONSE_TEMPLATE) + return template.render() + def describe_stacks(self): stack_name_or_id = None if self._get_param('StackName'): @@ -302,6 +320,16 @@ CREATE_CHANGE_SET_RESPONSE_TEMPLATE = """ """ +EXECUTE_CHANGE_SET_RESPONSE_TEMPLATE = """ + + + + + {{ request_id }} + + +""" + DESCRIBE_STACKS_TEMPLATE = """ diff --git a/tests/test_cloudformation/test_cloudformation_stack_crud_boto3.py b/tests/test_cloudformation/test_cloudformation_stack_crud_boto3.py index d8b8cf142..1f3bfdec7 100644 --- a/tests/test_cloudformation/test_cloudformation_stack_crud_boto3.py +++ b/tests/test_cloudformation/test_cloudformation_stack_crud_boto3.py @@ -337,6 +337,30 @@ def test_create_change_set_from_s3_url(): assert 'arn:aws:cloudformation:us-east-1:123456789:stack/NewStack' in response['StackId'] +@mock_cloudformation +def test_execute_change_set_w_arn(): + cf_conn = boto3.client('cloudformation', region_name='us-east-1') + change_set = cf_conn.create_change_set( + StackName='NewStack', + TemplateBody=dummy_template_json, + ChangeSetName='NewChangeSet', + ChangeSetType='CREATE', + ) + cf_conn.execute_change_set(ChangeSetName=change_set['Id']) + + +@mock_cloudformation +def test_execute_change_set_w_name(): + cf_conn = boto3.client('cloudformation', region_name='us-east-1') + change_set = cf_conn.create_change_set( + StackName='NewStack', + TemplateBody=dummy_template_json, + ChangeSetName='NewChangeSet', + ChangeSetType='CREATE', + ) + cf_conn.execute_change_set(ChangeSetName='NewStack', StackName='NewStack') + + @mock_cloudformation def test_describe_stack_pagination(): conn = boto3.client('cloudformation', region_name='us-east-1') From 7a4e48e8df2957fdadb7c6f91184e24a39407ed4 Mon Sep 17 00:00:00 2001 From: Semyon Maryasin Date: Sat, 16 Dec 2017 05:16:45 +0300 Subject: [PATCH 005/182] mock_xray_client: do return what f() returned fixes #1399 this won't help with fixtures though --- moto/xray/mock_client.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/moto/xray/mock_client.py b/moto/xray/mock_client.py index 6e2164d63..135796054 100644 --- a/moto/xray/mock_client.py +++ b/moto/xray/mock_client.py @@ -51,7 +51,7 @@ def mock_xray_client(f): aws_xray_sdk.core.xray_recorder._emitter = MockEmitter() try: - f(*args, **kwargs) + return f(*args, **kwargs) finally: if old_xray_context_var is None: From 21606bc8aedf29d09892c080de924fcd83a9b7ba Mon Sep 17 00:00:00 2001 From: NimbusScale Date: Mon, 18 Dec 2017 20:44:04 -0800 Subject: [PATCH 006/182] update support JSON or YAML --- moto/cloudformation/models.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/moto/cloudformation/models.py b/moto/cloudformation/models.py index 70c15d697..b89d76605 100644 --- a/moto/cloudformation/models.py +++ b/moto/cloudformation/models.py @@ -107,7 +107,7 @@ class FakeStack(BaseModel): def update(self, template, role_arn=None, parameters=None, tags=None): self._add_stack_event("UPDATE_IN_PROGRESS", resource_status_reason="User Initiated") self.template = template - self.resource_map.update(json.loads(template), parameters) + self.resource_map.update(self.template_dict, parameters) self.output_map = self._create_output_map() self._add_stack_event("UPDATE_COMPLETE") self.status = "UPDATE_COMPLETE" From bb4bc01999ba47e3210b44b89153093bb421c6dd Mon Sep 17 00:00:00 2001 From: Joe Keegan Date: Thu, 21 Dec 2017 12:10:27 -0800 Subject: [PATCH 007/182] update self.template_dict based on new template --- moto/cloudformation/models.py | 1 + 1 file changed, 1 insertion(+) diff --git a/moto/cloudformation/models.py b/moto/cloudformation/models.py index b89d76605..57f42df56 100644 --- a/moto/cloudformation/models.py +++ b/moto/cloudformation/models.py @@ -107,6 +107,7 @@ class FakeStack(BaseModel): def update(self, template, role_arn=None, parameters=None, tags=None): self._add_stack_event("UPDATE_IN_PROGRESS", resource_status_reason="User Initiated") self.template = template + self._parse_template() self.resource_map.update(self.template_dict, parameters) self.output_map = self._create_output_map() self._add_stack_event("UPDATE_COMPLETE") From 6f6a881e52632a48abd1f1ac6836cd1a86d09996 Mon Sep 17 00:00:00 2001 From: Joe Keegan Date: Thu, 21 Dec 2017 14:12:43 -0800 Subject: [PATCH 008/182] rerun tests From c68cd650e77f801da4a10fcec3ec616378e5fb72 Mon Sep 17 00:00:00 2001 From: Nuwan Goonasekera Date: Fri, 22 Dec 2017 18:50:18 +0530 Subject: [PATCH 009/182] Make sure invalid or malformed AMIs raise an exception Closes: https://github.com/spulec/moto/issues/1408 --- moto/ec2/models.py | 10 ++++++++++ tests/test_ec2/test_amis.py | 14 ++++++++++++++ 2 files changed, 24 insertions(+) diff --git a/moto/ec2/models.py b/moto/ec2/models.py index 80bbf8439..932f535a1 100755 --- a/moto/ec2/models.py +++ b/moto/ec2/models.py @@ -48,6 +48,7 @@ from .exceptions import ( InvalidRouteError, InvalidInstanceIdError, InvalidAMIIdError, + MalformedAMIIdError, InvalidAMIAttributeItemValueError, InvalidSnapshotIdError, InvalidVolumeIdError, @@ -1122,6 +1123,9 @@ class Ami(TaggedEC2Resource): class AmiBackend(object): + + AMI_REGEX = re.compile("ami-[a-z0-9]+") + def __init__(self): self.amis = {} @@ -1170,6 +1174,12 @@ class AmiBackend(object): if ami_ids: images = [ami for ami in images if ami.id in ami_ids] + if len(ami_ids) > len(images): + unknown_ids = set(ami_ids) - set(images) + for id in unknown_ids: + if not self.AMI_REGEX.match(id): + raise MalformedAMIIdError(id) + raise InvalidAMIIdError(unknown_ids) # Generic filters if filters: diff --git a/tests/test_ec2/test_amis.py b/tests/test_ec2/test_amis.py index 1029ba39e..9ba782414 100755 --- a/tests/test_ec2/test_amis.py +++ b/tests/test_ec2/test_amis.py @@ -8,6 +8,7 @@ import boto3 import boto.ec2 import boto3 from boto.exception import EC2ResponseError, EC2ResponseError +from botocore.exceptions import ClientError import sure # noqa @@ -666,6 +667,19 @@ def test_ami_attribute_error_cases(): cm.exception.request_id.should_not.be.none +@mock_ec2 +def test_ami_describe_non_existent(): + ec2 = boto3.resource('ec2', region_name='us-west-1') + # Valid pattern but non-existent id + img = ec2.Image('ami-abcd1234') + with assert_raises(ClientError): + img.load() + # Invalid ami pattern + img = ec2.Image('not_an_ami_id') + with assert_raises(ClientError): + img.load() + + @mock_ec2 def test_ami_filter_wildcard(): ec2 = boto3.resource('ec2', region_name='us-west-1') From bd037742ad581afe7e3fb9a2e255577a6ea13f47 Mon Sep 17 00:00:00 2001 From: Jack Danger Date: Mon, 20 Nov 2017 12:13:44 -0800 Subject: [PATCH 010/182] Make releasing easier by making Makefile resilient --- Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Makefile b/Makefile index 99b7f2620..5d8bc7011 100644 --- a/Makefile +++ b/Makefile @@ -43,7 +43,7 @@ publish: implementation_coverage \ implementation_coverage: ./scripts/implementation_coverage.py > IMPLEMENTATION_COVERAGE.md - git commit IMPLEMENTATION_COVERAGE.md -m "Updating implementation coverage" + git commit IMPLEMENTATION_COVERAGE.md -m "Updating implementation coverage" || true scaffold: @pip install -r requirements-dev.txt > /dev/null From 025df574e45ecb37d1f83f68688d114e59a9dd7a Mon Sep 17 00:00:00 2001 From: Jack Danger Date: Fri, 22 Dec 2017 09:16:27 -0800 Subject: [PATCH 011/182] Updating implementation coverage --- IMPLEMENTATION_COVERAGE.md | 590 ++++++++++++++++++++++++++++++++++++- 1 file changed, 579 insertions(+), 11 deletions(-) diff --git a/IMPLEMENTATION_COVERAGE.md b/IMPLEMENTATION_COVERAGE.md index e2a3b7af0..4a3ebd215 100644 --- a/IMPLEMENTATION_COVERAGE.md +++ b/IMPLEMENTATION_COVERAGE.md @@ -11,7 +11,45 @@ - [X] request_certificate - [ ] resend_validation_email -## apigateway - 18% implemented +## alexaforbusiness - 0% implemented +- [ ] associate_device_with_room +- [ ] associate_skill_group_with_room +- [ ] create_profile +- [ ] create_room +- [ ] create_skill_group +- [ ] create_user +- [ ] delete_profile +- [ ] delete_room +- [ ] delete_room_skill_parameter +- [ ] delete_skill_group +- [ ] delete_user +- [ ] disassociate_device_from_room +- [ ] disassociate_skill_group_from_room +- [ ] get_device +- [ ] get_profile +- [ ] get_room +- [ ] get_room_skill_parameter +- [ ] get_skill_group +- [ ] list_skills +- [ ] list_tags +- [ ] put_room_skill_parameter +- [ ] resolve_room +- [ ] revoke_invitation +- [ ] search_devices +- [ ] search_profiles +- [ ] search_rooms +- [ ] search_skill_groups +- [ ] search_users +- [ ] send_invitation +- [ ] start_device_sync +- [ ] tag_resource +- [ ] untag_resource +- [ ] update_device +- [ ] update_profile +- [ ] update_room +- [ ] update_skill_group + +## apigateway - 17% implemented - [ ] create_api_key - [ ] create_authorizer - [ ] create_base_path_mapping @@ -26,6 +64,7 @@ - [X] create_stage - [ ] create_usage_plan - [ ] create_usage_plan_key +- [ ] create_vpc_link - [ ] delete_api_key - [ ] delete_authorizer - [ ] delete_base_path_mapping @@ -46,6 +85,7 @@ - [ ] delete_stage - [ ] delete_usage_plan - [ ] delete_usage_plan_key +- [ ] delete_vpc_link - [ ] flush_stage_authorizers_cache - [ ] flush_stage_cache - [ ] generate_client_certificate @@ -87,11 +127,14 @@ - [ ] get_sdk_types - [X] get_stage - [X] get_stages +- [ ] get_tags - [ ] get_usage - [ ] get_usage_plan - [ ] get_usage_plan_key - [ ] get_usage_plan_keys - [ ] get_usage_plans +- [ ] get_vpc_link +- [ ] get_vpc_links - [ ] import_api_keys - [ ] import_documentation_parts - [ ] import_rest_api @@ -101,8 +144,10 @@ - [ ] put_method - [ ] put_method_response - [ ] put_rest_api +- [ ] tag_resource - [ ] test_invoke_authorizer - [ ] test_invoke_method +- [ ] untag_resource - [ ] update_account - [ ] update_api_key - [ ] update_authorizer @@ -124,6 +169,7 @@ - [X] update_stage - [ ] update_usage - [ ] update_usage_plan +- [ ] update_vpc_link ## application-autoscaling - 0% implemented - [ ] delete_scaling_policy @@ -160,14 +206,45 @@ - [ ] expire_session - [ ] list_associated_fleets - [ ] list_associated_stacks +- [ ] list_tags_for_resource - [ ] start_fleet - [ ] start_image_builder - [ ] stop_fleet - [ ] stop_image_builder +- [ ] tag_resource +- [ ] untag_resource - [ ] update_directory_config - [ ] update_fleet - [ ] update_stack +## appsync - 0% implemented +- [ ] create_api_key +- [ ] create_data_source +- [ ] create_graphql_api +- [ ] create_resolver +- [ ] create_type +- [ ] delete_api_key +- [ ] delete_data_source +- [ ] delete_graphql_api +- [ ] delete_resolver +- [ ] delete_type +- [ ] get_data_source +- [ ] get_graphql_api +- [ ] get_introspection_schema +- [ ] get_resolver +- [ ] get_schema_creation_status +- [ ] get_type +- [ ] list_api_keys +- [ ] list_data_sources +- [ ] list_graphql_apis +- [ ] list_resolvers +- [ ] list_types +- [ ] start_schema_creation +- [ ] update_data_source +- [ ] update_graphql_api +- [ ] update_resolver +- [ ] update_type + ## athena - 0% implemented - [ ] batch_get_named_query - [ ] batch_get_query_execution @@ -268,6 +345,24 @@ - [ ] update_notification - [ ] update_subscriber +## ce - 0% implemented +- [ ] get_cost_and_usage +- [ ] get_dimension_values +- [ ] get_reservation_utilization +- [ ] get_tags + +## cloud9 - 0% implemented +- [ ] create_environment_ec2 +- [ ] create_environment_membership +- [ ] delete_environment +- [ ] delete_environment_membership +- [ ] describe_environment_memberships +- [ ] describe_environment_status +- [ ] describe_environments +- [ ] list_environments +- [ ] update_environment +- [ ] update_environment_membership + ## clouddirectory - 0% implemented - [ ] add_facet_to_object - [ ] apply_schema @@ -294,6 +389,7 @@ - [ ] detach_typed_link - [ ] disable_directory - [ ] enable_directory +- [ ] get_applied_schema_version - [ ] get_directory - [ ] get_facet - [ ] get_object_information @@ -328,8 +424,10 @@ - [ ] update_object_attributes - [ ] update_schema - [ ] update_typed_link_facet +- [ ] upgrade_applied_schema +- [ ] upgrade_published_schema -## cloudformation - 23% implemented +## cloudformation - 21% implemented - [ ] cancel_update_stack - [ ] continue_update_rollback - [X] create_change_set @@ -367,6 +465,7 @@ - [ ] signal_resource - [ ] stop_stack_set_operation - [X] update_stack +- [ ] update_stack_instances - [ ] update_stack_set - [ ] update_termination_protection - [ ] validate_template @@ -482,7 +581,7 @@ - [ ] stop_logging - [ ] update_trail -## cloudwatch - 53% implemented +## cloudwatch - 60% implemented - [X] delete_alarms - [X] delete_dashboards - [ ] describe_alarm_history @@ -491,7 +590,7 @@ - [ ] disable_alarm_actions - [ ] enable_alarm_actions - [X] get_dashboard -- [ ] get_metric_statistics +- [X] get_metric_statistics - [X] list_dashboards - [ ] list_metrics - [X] put_dashboard @@ -507,6 +606,7 @@ - [ ] create_webhook - [ ] delete_project - [ ] delete_webhook +- [ ] invalidate_project_cache - [ ] list_builds - [ ] list_builds_for_project - [ ] list_curated_environment_images @@ -518,20 +618,37 @@ ## codecommit - 0% implemented - [ ] batch_get_repositories - [ ] create_branch +- [ ] create_pull_request - [ ] create_repository - [ ] delete_branch +- [ ] delete_comment_content - [ ] delete_repository +- [ ] describe_pull_request_events - [ ] get_blob - [ ] get_branch +- [ ] get_comment +- [ ] get_comments_for_compared_commit +- [ ] get_comments_for_pull_request - [ ] get_commit - [ ] get_differences +- [ ] get_merge_conflicts +- [ ] get_pull_request - [ ] get_repository - [ ] get_repository_triggers - [ ] list_branches +- [ ] list_pull_requests - [ ] list_repositories +- [ ] merge_pull_request_by_fast_forward +- [ ] post_comment_for_compared_commit +- [ ] post_comment_for_pull_request +- [ ] post_comment_reply - [ ] put_repository_triggers - [ ] test_repository_triggers +- [ ] update_comment - [ ] update_default_branch +- [ ] update_pull_request_description +- [ ] update_pull_request_status +- [ ] update_pull_request_title - [ ] update_repository_description - [ ] update_repository_name @@ -567,6 +684,7 @@ - [ ] list_deployments - [ ] list_git_hub_account_token_names - [ ] list_on_premises_instances +- [ ] put_lifecycle_event_hook_execution_status - [ ] register_application_revision - [ ] register_on_premises_instance - [ ] remove_tags_from_on_premises_instances @@ -661,13 +779,17 @@ - [ ] admin_link_provider_for_user - [ ] admin_list_devices - [ ] admin_list_groups_for_user +- [ ] admin_list_user_auth_events - [ ] admin_remove_user_from_group - [ ] admin_reset_user_password - [ ] admin_respond_to_auth_challenge +- [ ] admin_set_user_mfa_preference - [ ] admin_set_user_settings +- [ ] admin_update_auth_event_feedback - [ ] admin_update_device_status - [ ] admin_update_user_attributes - [ ] admin_user_global_sign_out +- [ ] associate_software_token - [ ] change_password - [ ] confirm_device - [ ] confirm_forgot_password @@ -689,6 +811,7 @@ - [ ] delete_user_pool_domain - [ ] describe_identity_provider - [ ] describe_resource_server +- [ ] describe_risk_configuration - [ ] describe_user_import_job - [ ] describe_user_pool - [ ] describe_user_pool_client @@ -702,6 +825,7 @@ - [ ] get_ui_customization - [ ] get_user - [ ] get_user_attribute_verification_code +- [ ] get_user_pool_mfa_config - [ ] global_sign_out - [ ] initiate_auth - [ ] list_devices @@ -715,11 +839,15 @@ - [ ] list_users_in_group - [ ] resend_confirmation_code - [ ] respond_to_auth_challenge +- [ ] set_risk_configuration - [ ] set_ui_customization +- [ ] set_user_mfa_preference +- [ ] set_user_pool_mfa_config - [ ] set_user_settings - [ ] sign_up - [ ] start_user_import_job - [ ] stop_user_import_job +- [ ] update_auth_event_feedback - [ ] update_device_status - [ ] update_group - [ ] update_identity_provider @@ -727,6 +855,7 @@ - [ ] update_user_attributes - [ ] update_user_pool - [ ] update_user_pool_client +- [ ] verify_software_token - [ ] verify_user_attribute ## cognito-sync - 0% implemented @@ -748,6 +877,19 @@ - [ ] unsubscribe_from_dataset - [ ] update_records +## comprehend - 0% implemented +- [ ] batch_detect_dominant_language +- [ ] batch_detect_entities +- [ ] batch_detect_key_phrases +- [ ] batch_detect_sentiment +- [ ] describe_topics_detection_job +- [ ] detect_dominant_language +- [ ] detect_entities +- [ ] detect_key_phrases +- [ ] detect_sentiment +- [ ] list_topics_detection_jobs +- [ ] start_topics_detection_job + ## config - 0% implemented - [ ] delete_config_rule - [ ] delete_configuration_recorder @@ -1030,23 +1172,33 @@ - [ ] update_radius - [ ] verify_trust -## dynamodb - 36% implemented +## dynamodb - 24% implemented - [ ] batch_get_item - [ ] batch_write_item +- [ ] create_backup +- [ ] create_global_table - [X] create_table +- [ ] delete_backup - [X] delete_item - [X] delete_table +- [ ] describe_backup +- [ ] describe_continuous_backups +- [ ] describe_global_table - [ ] describe_limits - [ ] describe_table - [ ] describe_time_to_live - [X] get_item +- [ ] list_backups +- [ ] list_global_tables - [ ] list_tables - [ ] list_tags_of_resource - [X] put_item - [X] query +- [ ] restore_table_from_backup - [X] scan - [ ] tag_resource - [ ] untag_resource +- [ ] update_global_table - [ ] update_item - [ ] update_table - [ ] update_time_to_live @@ -1057,8 +1209,9 @@ - [ ] get_shard_iterator - [ ] list_streams -## ec2 - 39% implemented +## ec2 - 36% implemented - [ ] accept_reserved_instances_exchange_quote +- [ ] accept_vpc_endpoint_connections - [X] accept_vpc_peering_connection - [X] allocate_address - [ ] allocate_hosts @@ -1100,6 +1253,8 @@ - [ ] create_instance_export_task - [X] create_internet_gateway - [X] create_key_pair +- [ ] create_launch_template +- [ ] create_launch_template_version - [X] create_nat_gateway - [X] create_network_acl - [X] create_network_acl_entry @@ -1117,6 +1272,8 @@ - [X] create_volume - [X] create_vpc - [ ] create_vpc_endpoint +- [ ] create_vpc_endpoint_connection_notification +- [ ] create_vpc_endpoint_service_configuration - [X] create_vpc_peering_connection - [X] create_vpn_connection - [ ] create_vpn_connection_route @@ -1128,6 +1285,8 @@ - [ ] delete_fpga_image - [X] delete_internet_gateway - [X] delete_key_pair +- [ ] delete_launch_template +- [ ] delete_launch_template_versions - [X] delete_nat_gateway - [X] delete_network_acl - [X] delete_network_acl_entry @@ -1143,6 +1302,8 @@ - [X] delete_tags - [X] delete_volume - [X] delete_vpc +- [ ] delete_vpc_endpoint_connection_notifications +- [ ] delete_vpc_endpoint_service_configurations - [ ] delete_vpc_endpoints - [X] delete_vpc_peering_connection - [X] delete_vpn_connection @@ -1174,10 +1335,13 @@ - [ ] describe_import_image_tasks - [ ] describe_import_snapshot_tasks - [X] describe_instance_attribute +- [ ] describe_instance_credit_specifications - [ ] describe_instance_status - [ ] describe_instances - [X] describe_internet_gateways - [X] describe_key_pairs +- [ ] describe_launch_template_versions +- [ ] describe_launch_templates - [ ] describe_moving_addresses - [ ] describe_nat_gateways - [ ] describe_network_acls @@ -1214,6 +1378,10 @@ - [X] describe_vpc_attribute - [ ] describe_vpc_classic_link - [ ] describe_vpc_classic_link_dns_support +- [ ] describe_vpc_endpoint_connection_notifications +- [ ] describe_vpc_endpoint_connections +- [ ] describe_vpc_endpoint_service_configurations +- [ ] describe_vpc_endpoint_service_permissions - [ ] describe_vpc_endpoint_services - [ ] describe_vpc_endpoints - [ ] describe_vpc_peering_connections @@ -1240,6 +1408,7 @@ - [ ] get_console_output - [ ] get_console_screenshot - [ ] get_host_reservation_purchase_preview +- [ ] get_launch_template_data - [ ] get_password_data - [ ] get_reserved_instances_exchange_quote - [ ] import_image @@ -1253,7 +1422,9 @@ - [ ] modify_identity_id_format - [ ] modify_image_attribute - [X] modify_instance_attribute +- [ ] modify_instance_credit_specification - [ ] modify_instance_placement +- [ ] modify_launch_template - [X] modify_network_interface_attribute - [ ] modify_reserved_instances - [ ] modify_snapshot_attribute @@ -1263,6 +1434,9 @@ - [ ] modify_volume_attribute - [X] modify_vpc_attribute - [ ] modify_vpc_endpoint +- [ ] modify_vpc_endpoint_connection_notification +- [ ] modify_vpc_endpoint_service_configuration +- [ ] modify_vpc_endpoint_service_permissions - [ ] modify_vpc_peering_connection_options - [ ] modify_vpc_tenancy - [ ] monitor_instances @@ -1272,6 +1446,7 @@ - [ ] purchase_scheduled_instances - [X] reboot_instances - [ ] register_image +- [ ] reject_vpc_endpoint_connections - [X] reject_vpc_peering_connection - [X] release_address - [ ] release_hosts @@ -1611,7 +1786,6 @@ - [ ] create_delivery_stream - [ ] delete_delivery_stream - [ ] describe_delivery_stream -- [ ] get_kinesis_stream - [ ] list_delivery_streams - [ ] put_record - [ ] put_record_batch @@ -1810,6 +1984,9 @@ - [ ] create_group_version - [ ] create_logger_definition - [ ] create_logger_definition_version +- [ ] create_resource_definition +- [ ] create_resource_definition_version +- [ ] create_software_update_job - [ ] create_subscription_definition - [ ] create_subscription_definition_version - [ ] delete_core_definition @@ -1817,6 +1994,7 @@ - [ ] delete_function_definition - [ ] delete_group - [ ] delete_logger_definition +- [ ] delete_resource_definition - [ ] delete_subscription_definition - [ ] disassociate_role_from_group - [ ] disassociate_service_role_from_account @@ -1835,6 +2013,8 @@ - [ ] get_group_version - [ ] get_logger_definition - [ ] get_logger_definition_version +- [ ] get_resource_definition +- [ ] get_resource_definition_version - [ ] get_service_role_for_account - [ ] get_subscription_definition - [ ] get_subscription_definition_version @@ -1850,6 +2030,8 @@ - [ ] list_groups - [ ] list_logger_definition_versions - [ ] list_logger_definitions +- [ ] list_resource_definition_versions +- [ ] list_resource_definitions - [ ] list_subscription_definition_versions - [ ] list_subscription_definitions - [ ] reset_deployments @@ -1860,8 +2042,48 @@ - [ ] update_group - [ ] update_group_certificate_configuration - [ ] update_logger_definition +- [ ] update_resource_definition - [ ] update_subscription_definition +## guardduty - 0% implemented +- [ ] accept_invitation +- [ ] archive_findings +- [ ] create_detector +- [ ] create_ip_set +- [ ] create_members +- [ ] create_sample_findings +- [ ] create_threat_intel_set +- [ ] decline_invitations +- [ ] delete_detector +- [ ] delete_invitations +- [ ] delete_ip_set +- [ ] delete_members +- [ ] delete_threat_intel_set +- [ ] disassociate_from_master_account +- [ ] disassociate_members +- [ ] get_detector +- [ ] get_findings +- [ ] get_findings_statistics +- [ ] get_invitations_count +- [ ] get_ip_set +- [ ] get_master_account +- [ ] get_members +- [ ] get_threat_intel_set +- [ ] invite_members +- [ ] list_detectors +- [ ] list_findings +- [ ] list_invitations +- [ ] list_ip_sets +- [ ] list_members +- [ ] list_threat_intel_sets +- [ ] start_monitoring_members +- [ ] stop_monitoring_members +- [ ] unarchive_findings +- [ ] update_detector +- [ ] update_findings_feedback +- [ ] update_ip_set +- [ ] update_threat_intel_set + ## health - 0% implemented - [ ] describe_affected_entities - [ ] describe_entity_aggregates @@ -1870,7 +2092,7 @@ - [ ] describe_event_types - [ ] describe_events -## iam - 46% implemented +## iam - 47% implemented - [ ] add_client_id_to_open_id_connect_provider - [X] add_role_to_instance_profile - [X] add_user_to_group @@ -2037,64 +2259,130 @@ - [ ] unsubscribe_from_event - [ ] update_assessment_target -## iot - 45% implemented +## iot - 21% implemented - [ ] accept_certificate_transfer +- [ ] add_thing_to_thing_group +- [ ] associate_targets_with_job +- [ ] attach_policy - [X] attach_principal_policy - [X] attach_thing_principal - [ ] cancel_certificate_transfer +- [ ] cancel_job +- [ ] clear_default_authorizer +- [ ] create_authorizer - [ ] create_certificate_from_csr +- [ ] create_job - [X] create_keys_and_certificate +- [ ] create_ota_update - [X] create_policy - [ ] create_policy_version +- [ ] create_role_alias +- [ ] create_stream - [X] create_thing +- [ ] create_thing_group - [X] create_thing_type - [ ] create_topic_rule +- [ ] delete_authorizer - [ ] delete_ca_certificate - [X] delete_certificate +- [ ] delete_ota_update - [X] delete_policy - [ ] delete_policy_version - [ ] delete_registration_code +- [ ] delete_role_alias +- [ ] delete_stream - [X] delete_thing +- [ ] delete_thing_group - [X] delete_thing_type - [ ] delete_topic_rule +- [ ] delete_v2_logging_level - [ ] deprecate_thing_type +- [ ] describe_authorizer - [ ] describe_ca_certificate - [X] describe_certificate +- [ ] describe_default_authorizer - [ ] describe_endpoint +- [ ] describe_event_configurations +- [ ] describe_index +- [ ] describe_job +- [ ] describe_job_execution +- [ ] describe_role_alias +- [ ] describe_stream - [X] describe_thing +- [ ] describe_thing_group +- [ ] describe_thing_registration_task - [X] describe_thing_type +- [ ] detach_policy - [X] detach_principal_policy - [X] detach_thing_principal - [ ] disable_topic_rule - [ ] enable_topic_rule +- [ ] get_effective_policies +- [ ] get_indexing_configuration +- [ ] get_job_document - [ ] get_logging_options +- [ ] get_ota_update - [X] get_policy - [ ] get_policy_version - [ ] get_registration_code - [ ] get_topic_rule +- [ ] get_v2_logging_options +- [ ] list_attached_policies +- [ ] list_authorizers - [ ] list_ca_certificates - [X] list_certificates - [ ] list_certificates_by_ca +- [ ] list_indices +- [ ] list_job_executions_for_job +- [ ] list_job_executions_for_thing +- [ ] list_jobs +- [ ] list_ota_updates - [ ] list_outgoing_certificates - [X] list_policies - [X] list_policy_principals - [ ] list_policy_versions - [X] list_principal_policies - [X] list_principal_things +- [ ] list_role_aliases +- [ ] list_streams +- [ ] list_targets_for_policy +- [ ] list_thing_groups +- [ ] list_thing_groups_for_thing - [X] list_thing_principals +- [ ] list_thing_registration_task_reports +- [ ] list_thing_registration_tasks - [X] list_thing_types - [X] list_things +- [ ] list_things_in_thing_group - [ ] list_topic_rules +- [ ] list_v2_logging_levels - [ ] register_ca_certificate - [ ] register_certificate +- [ ] register_thing - [ ] reject_certificate_transfer +- [ ] remove_thing_from_thing_group - [ ] replace_topic_rule +- [ ] search_index +- [ ] set_default_authorizer - [ ] set_default_policy_version - [ ] set_logging_options +- [ ] set_v2_logging_level +- [ ] set_v2_logging_options +- [ ] start_thing_registration_task +- [ ] stop_thing_registration_task +- [ ] test_authorization +- [ ] test_invoke_authorizer - [ ] transfer_certificate +- [ ] update_authorizer - [ ] update_ca_certificate - [X] update_certificate +- [ ] update_event_configurations +- [ ] update_indexing_configuration +- [ ] update_role_alias +- [ ] update_stream - [X] update_thing +- [ ] update_thing_group +- [ ] update_thing_groups_for_thing ## iot-data - 0% implemented - [ ] delete_thing_shadow @@ -2102,13 +2390,20 @@ - [ ] publish - [ ] update_thing_shadow -## kinesis - 61% implemented +## iot-jobs-data - 0% implemented +- [ ] describe_job_execution +- [ ] get_pending_job_executions +- [ ] start_next_pending_job_execution +- [ ] update_job_execution + +## kinesis - 59% implemented - [X] add_tags_to_stream - [X] create_stream - [ ] decrease_stream_retention_period - [X] delete_stream - [ ] describe_limits - [X] describe_stream +- [ ] describe_stream_summary - [ ] disable_enhanced_monitoring - [ ] enable_enhanced_monitoring - [X] get_records @@ -2125,6 +2420,13 @@ - [ ] stop_stream_encryption - [ ] update_shard_count +## kinesis-video-archived-media - 0% implemented +- [ ] get_media_for_fragment_list +- [ ] list_fragments + +## kinesis-video-media - 0% implemented +- [ ] get_media + ## kinesisanalytics - 0% implemented - [ ] add_application_cloud_watch_logging_option - [ ] add_application_input @@ -2144,6 +2446,18 @@ - [ ] stop_application - [ ] update_application +## kinesisvideo - 0% implemented +- [ ] create_stream +- [ ] delete_stream +- [ ] describe_stream +- [ ] get_data_endpoint +- [ ] list_streams +- [ ] list_tags_for_stream +- [ ] tag_stream +- [ ] untag_stream +- [ ] update_data_retention +- [ ] update_stream + ## kms - 25% implemented - [ ] cancel_key_deletion - [ ] create_alias @@ -2189,6 +2503,7 @@ - [ ] delete_alias - [ ] delete_event_source_mapping - [ ] delete_function +- [ ] delete_function_concurrency - [ ] get_account_settings - [ ] get_alias - [ ] get_event_source_mapping @@ -2203,6 +2518,7 @@ - [ ] list_tags - [ ] list_versions_by_function - [ ] publish_version +- [ ] put_function_concurrency - [ ] remove_permission - [ ] tag_resource - [ ] untag_resource @@ -2254,6 +2570,8 @@ ## lightsail - 0% implemented - [ ] allocate_static_ip - [ ] attach_disk +- [ ] attach_instances_to_load_balancer +- [ ] attach_load_balancer_tls_certificate - [ ] attach_static_ip - [ ] close_instance_public_ports - [ ] create_disk @@ -2265,6 +2583,8 @@ - [ ] create_instances - [ ] create_instances_from_snapshot - [ ] create_key_pair +- [ ] create_load_balancer +- [ ] create_load_balancer_tls_certificate - [ ] delete_disk - [ ] delete_disk_snapshot - [ ] delete_domain @@ -2272,7 +2592,10 @@ - [ ] delete_instance - [ ] delete_instance_snapshot - [ ] delete_key_pair +- [ ] delete_load_balancer +- [ ] delete_load_balancer_tls_certificate - [ ] detach_disk +- [ ] detach_instances_from_load_balancer - [ ] detach_static_ip - [ ] download_default_key_pair - [ ] get_active_names @@ -2294,6 +2617,10 @@ - [ ] get_instances - [ ] get_key_pair - [ ] get_key_pairs +- [ ] get_load_balancer +- [ ] get_load_balancer_metric_data +- [ ] get_load_balancer_tls_certificates +- [ ] get_load_balancers - [ ] get_operation - [ ] get_operations - [ ] get_operations_for_resource @@ -2311,6 +2638,7 @@ - [ ] stop_instance - [ ] unpeer_vpc - [ ] update_domain_entry +- [ ] update_load_balancer_attribute ## logs - 24% implemented - [ ] associate_kms_key @@ -2384,6 +2712,73 @@ - [ ] generate_data_set - [ ] start_support_data_export +## mediaconvert - 0% implemented +- [ ] cancel_job +- [ ] create_job +- [ ] create_job_template +- [ ] create_preset +- [ ] create_queue +- [ ] delete_job_template +- [ ] delete_preset +- [ ] delete_queue +- [ ] describe_endpoints +- [ ] get_job +- [ ] get_job_template +- [ ] get_preset +- [ ] get_queue +- [ ] list_job_templates +- [ ] list_jobs +- [ ] list_presets +- [ ] list_queues +- [ ] update_job_template +- [ ] update_preset +- [ ] update_queue + +## medialive - 0% implemented +- [ ] create_channel +- [ ] create_input +- [ ] create_input_security_group +- [ ] delete_channel +- [ ] delete_input +- [ ] delete_input_security_group +- [ ] describe_channel +- [ ] describe_input +- [ ] describe_input_security_group +- [ ] list_channels +- [ ] list_input_security_groups +- [ ] list_inputs +- [ ] start_channel +- [ ] stop_channel + +## mediapackage - 0% implemented +- [ ] create_channel +- [ ] create_origin_endpoint +- [ ] delete_channel +- [ ] delete_origin_endpoint +- [ ] describe_channel +- [ ] describe_origin_endpoint +- [ ] list_channels +- [ ] list_origin_endpoints +- [ ] rotate_channel_credentials +- [ ] update_channel +- [ ] update_origin_endpoint + +## mediastore - 0% implemented +- [ ] create_container +- [ ] delete_container +- [ ] delete_container_policy +- [ ] describe_container +- [ ] get_container_policy +- [ ] list_containers +- [ ] put_container_policy + +## mediastore-data - 0% implemented +- [ ] delete_object +- [ ] describe_object +- [ ] get_object +- [ ] list_items +- [ ] put_object + ## meteringmarketplace - 0% implemented - [ ] batch_meter_usage - [ ] meter_usage @@ -2418,6 +2813,25 @@ - [ ] list_projects - [ ] update_project +## mq - 0% implemented +- [ ] create_broker +- [ ] create_configuration +- [ ] create_user +- [ ] delete_broker +- [ ] delete_user +- [ ] describe_broker +- [ ] describe_configuration +- [ ] describe_configuration_revision +- [ ] describe_user +- [ ] list_brokers +- [ ] list_configuration_revisions +- [ ] list_configurations +- [ ] list_users +- [ ] reboot_broker +- [ ] update_broker +- [ ] update_configuration +- [ ] update_user + ## mturk - 0% implemented - [ ] accept_qualification_request - [ ] approve_assignment @@ -2831,18 +3245,51 @@ ## rekognition - 0% implemented - [ ] compare_faces - [ ] create_collection +- [ ] create_stream_processor - [ ] delete_collection - [ ] delete_faces +- [ ] delete_stream_processor +- [ ] describe_stream_processor - [ ] detect_faces - [ ] detect_labels - [ ] detect_moderation_labels +- [ ] detect_text - [ ] get_celebrity_info +- [ ] get_celebrity_recognition +- [ ] get_content_moderation +- [ ] get_face_detection +- [ ] get_face_search +- [ ] get_label_detection +- [ ] get_person_tracking - [ ] index_faces - [ ] list_collections - [ ] list_faces +- [ ] list_stream_processors - [ ] recognize_celebrities - [ ] search_faces - [ ] search_faces_by_image +- [ ] start_celebrity_recognition +- [ ] start_content_moderation +- [ ] start_face_detection +- [ ] start_face_search +- [ ] start_label_detection +- [ ] start_person_tracking +- [ ] start_stream_processor +- [ ] stop_stream_processor + +## resource-groups - 0% implemented +- [ ] create_group +- [ ] delete_group +- [ ] get_group +- [ ] get_group_query +- [ ] get_tags +- [ ] list_group_resources +- [ ] list_groups +- [ ] search_resources +- [ ] tag +- [ ] untag +- [ ] update_group +- [ ] update_group_query ## resourcegroupstaggingapi - 60% implemented - [X] get_resources @@ -3014,6 +3461,40 @@ - [ ] upload_part - [ ] upload_part_copy +## sagemaker - 0% implemented +- [ ] add_tags +- [ ] create_endpoint +- [ ] create_endpoint_config +- [ ] create_model +- [ ] create_notebook_instance +- [ ] create_presigned_notebook_instance_url +- [ ] create_training_job +- [ ] delete_endpoint +- [ ] delete_endpoint_config +- [ ] delete_model +- [ ] delete_notebook_instance +- [ ] delete_tags +- [ ] describe_endpoint +- [ ] describe_endpoint_config +- [ ] describe_model +- [ ] describe_notebook_instance +- [ ] describe_training_job +- [ ] list_endpoint_configs +- [ ] list_endpoints +- [ ] list_models +- [ ] list_notebook_instances +- [ ] list_tags +- [ ] list_training_jobs +- [ ] start_notebook_instance +- [ ] stop_notebook_instance +- [ ] stop_training_job +- [ ] update_endpoint +- [ ] update_endpoint_weights_and_capacities +- [ ] update_notebook_instance + +## sagemaker-runtime - 0% implemented +- [ ] invoke_endpoint + ## sdb - 0% implemented - [ ] batch_delete_attributes - [ ] batch_put_attributes @@ -3026,6 +3507,17 @@ - [ ] put_attributes - [ ] select +## serverlessrepo - 0% implemented +- [ ] create_application +- [ ] create_application_version +- [ ] create_cloud_formation_change_set +- [ ] get_application +- [ ] get_application_policy +- [ ] list_application_versions +- [ ] list_applications +- [ ] put_application_policy +- [ ] update_application + ## servicecatalog - 0% implemented - [ ] accept_portfolio_share - [ ] associate_principal_with_portfolio @@ -3081,11 +3573,31 @@ - [ ] update_provisioning_artifact - [ ] update_tag_option -## ses - 12% implemented +## servicediscovery - 0% implemented +- [ ] create_private_dns_namespace +- [ ] create_public_dns_namespace +- [ ] create_service +- [ ] delete_namespace +- [ ] delete_service +- [ ] deregister_instance +- [ ] get_instance +- [ ] get_instances_health_status +- [ ] get_namespace +- [ ] get_operation +- [ ] get_service +- [ ] list_instances +- [ ] list_namespaces +- [ ] list_operations +- [ ] list_services +- [ ] register_instance +- [ ] update_service + +## ses - 11% implemented - [ ] clone_receipt_rule_set - [ ] create_configuration_set - [ ] create_configuration_set_event_destination - [ ] create_configuration_set_tracking_options +- [ ] create_custom_verification_email_template - [ ] create_receipt_filter - [ ] create_receipt_rule - [ ] create_receipt_rule_set @@ -3093,6 +3605,7 @@ - [ ] delete_configuration_set - [ ] delete_configuration_set_event_destination - [ ] delete_configuration_set_tracking_options +- [ ] delete_custom_verification_email_template - [X] delete_identity - [ ] delete_identity_policy - [ ] delete_receipt_filter @@ -3105,6 +3618,7 @@ - [ ] describe_receipt_rule - [ ] describe_receipt_rule_set - [ ] get_account_sending_enabled +- [ ] get_custom_verification_email_template - [ ] get_identity_dkim_attributes - [ ] get_identity_mail_from_domain_attributes - [ ] get_identity_notification_attributes @@ -3114,6 +3628,7 @@ - [ ] get_send_statistics - [ ] get_template - [ ] list_configuration_sets +- [ ] list_custom_verification_email_templates - [X] list_identities - [ ] list_identity_policies - [ ] list_receipt_filters @@ -3124,6 +3639,7 @@ - [ ] reorder_receipt_rule_set - [ ] send_bounce - [ ] send_bulk_templated_email +- [ ] send_custom_verification_email - [X] send_email - [X] send_raw_email - [ ] send_templated_email @@ -3140,6 +3656,7 @@ - [ ] update_configuration_set_reputation_metrics_enabled - [ ] update_configuration_set_sending_enabled - [ ] update_configuration_set_tracking_options +- [ ] update_custom_verification_email_template - [ ] update_receipt_rule - [ ] update_template - [ ] verify_domain_dkim @@ -3155,6 +3672,7 @@ - [ ] describe_attack - [ ] describe_protection - [ ] describe_subscription +- [ ] get_subscription_state - [ ] list_attacks - [ ] list_protections @@ -3269,6 +3787,7 @@ - [ ] describe_activations - [ ] describe_association - [ ] describe_automation_executions +- [ ] describe_automation_step_executions - [ ] describe_available_patches - [ ] describe_document - [ ] describe_document_permission @@ -3410,6 +3929,7 @@ - [ ] list_volume_initiators - [ ] list_volume_recovery_points - [ ] list_volumes +- [ ] notify_when_uploaded - [ ] refresh_cache - [ ] remove_tags_from_resource - [ ] reset_cache @@ -3485,6 +4005,9 @@ - [X] start_workflow_execution - [X] terminate_workflow_execution +## translate - 0% implemented +- [ ] translate_text + ## waf - 0% implemented - [ ] create_byte_match_set - [ ] create_geo_match_set @@ -3493,6 +4016,7 @@ - [ ] create_regex_match_set - [ ] create_regex_pattern_set - [ ] create_rule +- [ ] create_rule_group - [ ] create_size_constraint_set - [ ] create_sql_injection_match_set - [ ] create_web_acl @@ -3504,6 +4028,7 @@ - [ ] delete_regex_match_set - [ ] delete_regex_pattern_set - [ ] delete_rule +- [ ] delete_rule_group - [ ] delete_size_constraint_set - [ ] delete_sql_injection_match_set - [ ] delete_web_acl @@ -3518,20 +4043,24 @@ - [ ] get_regex_match_set - [ ] get_regex_pattern_set - [ ] get_rule +- [ ] get_rule_group - [ ] get_sampled_requests - [ ] get_size_constraint_set - [ ] get_sql_injection_match_set - [ ] get_web_acl - [ ] get_xss_match_set +- [ ] list_activated_rules_in_rule_group - [ ] list_byte_match_sets - [ ] list_geo_match_sets - [ ] list_ip_sets - [ ] list_rate_based_rules - [ ] list_regex_match_sets - [ ] list_regex_pattern_sets +- [ ] list_rule_groups - [ ] list_rules - [ ] list_size_constraint_sets - [ ] list_sql_injection_match_sets +- [ ] list_subscribed_rule_groups - [ ] list_web_acls - [ ] list_xss_match_sets - [ ] update_byte_match_set @@ -3541,6 +4070,7 @@ - [ ] update_regex_match_set - [ ] update_regex_pattern_set - [ ] update_rule +- [ ] update_rule_group - [ ] update_size_constraint_set - [ ] update_sql_injection_match_set - [ ] update_web_acl @@ -3555,6 +4085,7 @@ - [ ] create_regex_match_set - [ ] create_regex_pattern_set - [ ] create_rule +- [ ] create_rule_group - [ ] create_size_constraint_set - [ ] create_sql_injection_match_set - [ ] create_web_acl @@ -3566,6 +4097,7 @@ - [ ] delete_regex_match_set - [ ] delete_regex_pattern_set - [ ] delete_rule +- [ ] delete_rule_group - [ ] delete_size_constraint_set - [ ] delete_sql_injection_match_set - [ ] delete_web_acl @@ -3581,12 +4113,14 @@ - [ ] get_regex_match_set - [ ] get_regex_pattern_set - [ ] get_rule +- [ ] get_rule_group - [ ] get_sampled_requests - [ ] get_size_constraint_set - [ ] get_sql_injection_match_set - [ ] get_web_acl - [ ] get_web_acl_for_resource - [ ] get_xss_match_set +- [ ] list_activated_rules_in_rule_group - [ ] list_byte_match_sets - [ ] list_geo_match_sets - [ ] list_ip_sets @@ -3594,9 +4128,11 @@ - [ ] list_regex_match_sets - [ ] list_regex_pattern_sets - [ ] list_resources_for_web_acl +- [ ] list_rule_groups - [ ] list_rules - [ ] list_size_constraint_sets - [ ] list_sql_injection_match_sets +- [ ] list_subscribed_rule_groups - [ ] list_web_acls - [ ] list_xss_match_sets - [ ] update_byte_match_set @@ -3606,6 +4142,7 @@ - [ ] update_regex_match_set - [ ] update_regex_pattern_set - [ ] update_rule +- [ ] update_rule_group - [ ] update_size_constraint_set - [ ] update_sql_injection_match_set - [ ] update_web_acl @@ -3634,6 +4171,7 @@ - [ ] describe_comments - [ ] describe_document_versions - [ ] describe_folder_contents +- [ ] describe_groups - [ ] describe_notification_subscriptions - [ ] describe_resource_permissions - [ ] describe_root_folders @@ -3652,6 +4190,36 @@ - [ ] update_folder - [ ] update_user +## workmail - 0% implemented +- [ ] associate_delegate_to_resource +- [ ] associate_member_to_group +- [ ] create_alias +- [ ] create_group +- [ ] create_resource +- [ ] create_user +- [ ] delete_alias +- [ ] delete_group +- [ ] delete_resource +- [ ] delete_user +- [ ] deregister_from_work_mail +- [ ] describe_group +- [ ] describe_organization +- [ ] describe_resource +- [ ] describe_user +- [ ] disassociate_delegate_from_resource +- [ ] disassociate_member_from_group +- [ ] list_aliases +- [ ] list_group_members +- [ ] list_groups +- [ ] list_organizations +- [ ] list_resource_delegates +- [ ] list_resources +- [ ] list_users +- [ ] register_to_work_mail +- [ ] reset_password +- [ ] update_primary_email_address +- [ ] update_resource + ## workspaces - 0% implemented - [ ] create_tags - [ ] create_workspaces From 3cede60f5bd2d0b036358391a2342b39c4e586bd Mon Sep 17 00:00:00 2001 From: George Ionita Date: Sat, 23 Dec 2017 05:45:05 +0200 Subject: [PATCH 012/182] improved SWF support --- CHANGELOG.md | 4 ++++ IMPLEMENTATION_COVERAGE.md | 4 ++-- moto/backends.py | 2 ++ moto/swf/models/__init__.py | 16 ++++++++++++- moto/swf/models/history_event.py | 1 + moto/swf/models/workflow_execution.py | 8 +++++++ moto/swf/responses.py | 23 ++++++++++++++++--- .../responses/test_workflow_executions.py | 14 +++++++++++ 8 files changed, 66 insertions(+), 6 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 740aac2cb..b10967f64 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -3,6 +3,10 @@ Moto Changelog Latest ------ + * Implemented signal_workflow_execution for SWF + * Wired SWF backend to the moto server + * Fixed incorrect handling of task list parameter on start_workflow_execution + 1.1.25 ----- diff --git a/IMPLEMENTATION_COVERAGE.md b/IMPLEMENTATION_COVERAGE.md index 4a3ebd215..4b92fa927 100644 --- a/IMPLEMENTATION_COVERAGE.md +++ b/IMPLEMENTATION_COVERAGE.md @@ -3972,7 +3972,7 @@ - [ ] refresh_trusted_advisor_check - [ ] resolve_case -## swf - 54% implemented +## swf - 58% implemented - [ ] count_closed_workflow_executions - [ ] count_open_workflow_executions - [X] count_pending_activity_tasks @@ -4001,7 +4001,7 @@ - [X] respond_activity_task_completed - [X] respond_activity_task_failed - [X] respond_decision_task_completed -- [ ] signal_workflow_execution +- [X] signal_workflow_execution - [X] start_workflow_execution - [X] terminate_workflow_execution diff --git a/moto/backends.py b/moto/backends.py index 6baf35f05..dc85aacdd 100644 --- a/moto/backends.py +++ b/moto/backends.py @@ -34,6 +34,7 @@ from moto.sns import sns_backends from moto.sqs import sqs_backends from moto.ssm import ssm_backends from moto.sts import sts_backends +from moto.swf import swf_backends from moto.xray import xray_backends from moto.iot import iot_backends from moto.iotdata import iotdata_backends @@ -76,6 +77,7 @@ BACKENDS = { 'sqs': sqs_backends, 'ssm': ssm_backends, 'sts': sts_backends, + 'swf': swf_backends, 'route53': route53_backends, 'lambda': lambda_backends, 'xray': xray_backends, diff --git a/moto/swf/models/__init__.py b/moto/swf/models/__init__.py index 833596a23..a8bc57f40 100644 --- a/moto/swf/models/__init__.py +++ b/moto/swf/models/__init__.py @@ -21,7 +21,7 @@ from .history_event import HistoryEvent # flake8: noqa from .timeout import Timeout # flake8: noqa from .workflow_type import WorkflowType # flake8: noqa from .workflow_execution import WorkflowExecution # flake8: noqa - +from time import sleep KNOWN_SWF_TYPES = { "activity": ActivityType, @@ -198,6 +198,9 @@ class SWFBackend(BaseBackend): wfe.start_decision_task(task.task_token, identity=identity) return task else: + # Sleeping here will prevent clients that rely on the timeout from + # entering in a busy waiting loop. + sleep(1) return None def count_pending_decision_tasks(self, domain_name, task_list): @@ -293,6 +296,9 @@ class SWFBackend(BaseBackend): wfe.start_activity_task(task.task_token, identity=identity) return task else: + # Sleeping here will prevent clients that rely on the timeout from + # entering in a busy waiting loop. + sleep(1) return None def count_pending_activity_tasks(self, domain_name, task_list): @@ -379,6 +385,14 @@ class SWFBackend(BaseBackend): if details: activity_task.details = details + def signal_workflow_execution(self, domain_name, signal_name, workflow_id, input=None, run_id=None): + # process timeouts on all objects + self._process_timeouts() + domain = self._get_domain(domain_name) + wfe = domain.get_workflow_execution( + workflow_id, run_id=run_id, raise_if_closed=True) + wfe.signal(signal_name, input) + swf_backends = {} for region in boto.swf.regions(): diff --git a/moto/swf/models/history_event.py b/moto/swf/models/history_event.py index 0dc21a09a..e7ddfd924 100644 --- a/moto/swf/models/history_event.py +++ b/moto/swf/models/history_event.py @@ -25,6 +25,7 @@ SUPPORTED_HISTORY_EVENT_TYPES = ( "ActivityTaskTimedOut", "DecisionTaskTimedOut", "WorkflowExecutionTimedOut", + "WorkflowExecutionSignaled" ) diff --git a/moto/swf/models/workflow_execution.py b/moto/swf/models/workflow_execution.py index 2f41c287f..3d01f9192 100644 --- a/moto/swf/models/workflow_execution.py +++ b/moto/swf/models/workflow_execution.py @@ -599,6 +599,14 @@ class WorkflowExecution(BaseModel): self.close_status = "TERMINATED" self.close_cause = "OPERATOR_INITIATED" + def signal(self, signal_name, input): + self._add_event( + "WorkflowExecutionSignaled", + signal_name=signal_name, + input=input, + ) + self.schedule_decision_task() + def first_timeout(self): if not self.open or not self.start_timestamp: return None diff --git a/moto/swf/responses.py b/moto/swf/responses.py index 1ee89bfc1..6f002d3d4 100644 --- a/moto/swf/responses.py +++ b/moto/swf/responses.py @@ -326,9 +326,9 @@ class SWFResponse(BaseResponse): _workflow_type = self._params["workflowType"] workflow_name = _workflow_type["name"] workflow_version = _workflow_type["version"] - _default_task_list = self._params.get("defaultTaskList") - if _default_task_list: - task_list = _default_task_list.get("name") + _task_list = self._params.get("taskList") + if _task_list: + task_list = _task_list.get("name") else: task_list = None child_policy = self._params.get("childPolicy") @@ -507,3 +507,20 @@ class SWFResponse(BaseResponse): ) # TODO: make it dynamic when we implement activity tasks cancellation return json.dumps({"cancelRequested": False}) + + def signal_workflow_execution(self): + domain_name = self._params["domain"] + signal_name = self._params["signalName"] + workflow_id = self._params["workflowId"] + _input = self._params["input"] + run_id = self._params["runId"] + + self._check_string(domain_name) + self._check_string(signal_name) + self._check_string(workflow_id) + self._check_none_or_string(_input) + self._check_none_or_string(run_id) + + self.swf_backend.signal_workflow_execution( + domain_name, signal_name, workflow_id, _input, run_id) + return "" diff --git a/tests/test_swf/responses/test_workflow_executions.py b/tests/test_swf/responses/test_workflow_executions.py index 5c97c778b..88e3caa75 100644 --- a/tests/test_swf/responses/test_workflow_executions.py +++ b/tests/test_swf/responses/test_workflow_executions.py @@ -34,6 +34,20 @@ def test_start_workflow_execution(): "test-domain", "uid-abcd1234", "test-workflow", "v1.0") wf.should.contain("runId") +@mock_swf_deprecated +def test_signal_workflow_execution(): + conn = setup_swf_environment() + hsh = conn.start_workflow_execution( + "test-domain", "uid-abcd1234", "test-workflow", "v1.0") + run_id = hsh["runId"] + + wfe = conn.signal_workflow_execution( + "test-domain", "my_signal", "uid-abcd1234", "my_input", run_id) + + wfe = conn.describe_workflow_execution( + "test-domain", run_id, "uid-abcd1234") + + wfe["openCounts"]["openDecisionTasks"].should.equal(2) @mock_swf_deprecated def test_start_already_started_workflow_execution(): From a4d1319821986536443efa8b0981a3e777ecc963 Mon Sep 17 00:00:00 2001 From: Jack Danger Date: Wed, 27 Dec 2017 11:06:26 -0800 Subject: [PATCH 013/182] Adding comment inviting a future person to help use bumpversion --- scripts/bump_version | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/scripts/bump_version b/scripts/bump_version index 53030700e..fe7ec1970 100755 --- a/scripts/bump_version +++ b/scripts/bump_version @@ -9,7 +9,12 @@ main() { grep version= setup.py return 1 fi + + # TODO: replace this with the bumpversion pip package, I couldn't + # figure out how to use that for these files sed -i '' "s/version=.*$/version='${version}',/g" setup.py + sed -i '' "s/__version__ = .*$/__version__ = '${version}',/g" moto/__init__.py + git checkout -b version-${version} # Commit the new version git commit setup.py -m "bumping to version ${version}" From 59cf81fd568bcb0dee9dd5d8665d27cd42b71730 Mon Sep 17 00:00:00 2001 From: captainkerk Date: Wed, 27 Dec 2017 19:17:59 +0000 Subject: [PATCH 014/182] AWS API raises an exception if both AZ and VPCZoneIdentifier params are empty. mock that exception, fix tests to follow that pattern. --- moto/autoscaling/models.py | 12 +- tests/test_autoscaling/test_autoscaling.py | 144 ++++++++++++++------- tests/test_autoscaling/test_elbv2.py | 22 ++-- tests/test_autoscaling/test_policies.py | 4 + tests/test_autoscaling/utils.py | 17 +++ 5 files changed, 137 insertions(+), 62 deletions(-) create mode 100644 tests/test_autoscaling/utils.py diff --git a/moto/autoscaling/models.py b/moto/autoscaling/models.py index ab99e4119..304671f66 100644 --- a/moto/autoscaling/models.py +++ b/moto/autoscaling/models.py @@ -7,7 +7,7 @@ from moto.elb import elb_backends from moto.elbv2 import elbv2_backends from moto.elb.exceptions import LoadBalancerNotFoundError from .exceptions import ( - ResourceContentionError, + AutoscalingClientError, ResourceContentionError, ) # http://docs.aws.amazon.com/AutoScaling/latest/DeveloperGuide/AS_Concepts.html#Cooldown @@ -155,14 +155,22 @@ class FakeAutoScalingGroup(BaseModel): autoscaling_backend, tags): self.autoscaling_backend = autoscaling_backend self.name = name + + if not availability_zones and not vpc_zone_identifier: + raise AutoscalingClientError( + "ValidationError", + "At least one Availability Zone or VPC Subnet is required." + ) self.availability_zones = availability_zones + self.vpc_zone_identifier = vpc_zone_identifier + self.max_size = max_size self.min_size = min_size self.launch_config = self.autoscaling_backend.launch_configurations[ launch_config_name] self.launch_config_name = launch_config_name - self.vpc_zone_identifier = vpc_zone_identifier + self.default_cooldown = default_cooldown if default_cooldown else DEFAULT_COOLDOWN self.health_check_period = health_check_period diff --git a/tests/test_autoscaling/test_autoscaling.py b/tests/test_autoscaling/test_autoscaling.py index 5ed6c3aa5..a02cfb0c9 100644 --- a/tests/test_autoscaling/test_autoscaling.py +++ b/tests/test_autoscaling/test_autoscaling.py @@ -11,10 +11,13 @@ import sure # noqa from moto import mock_autoscaling, mock_ec2_deprecated, mock_elb_deprecated, mock_elb, mock_autoscaling_deprecated, mock_ec2 from tests.helpers import requires_boto_gte +from utils import setup_networking + @mock_autoscaling_deprecated @mock_elb_deprecated def test_create_autoscaling_group(): + mocked_networking = setup_networking() elb_conn = boto.ec2.elb.connect_to_region('us-east-1') elb_conn.create_load_balancer( 'test_lb', zones=[], listeners=[(80, 8080, 'http')]) @@ -39,7 +42,7 @@ def test_create_autoscaling_group(): launch_config=config, load_balancers=["test_lb"], placement_group="test_placement", - vpc_zone_identifier='subnet-1234abcd', + vpc_zone_identifier=mocked_networking['subnet1'], termination_policies=["OldestInstance", "NewestInstance"], tags=[Tag( resource_id='tester_group', @@ -59,7 +62,7 @@ def test_create_autoscaling_group(): group.max_size.should.equal(2) group.min_size.should.equal(2) group.instances.should.have.length_of(2) - group.vpc_zone_identifier.should.equal('subnet-1234abcd') + group.vpc_zone_identifier.should.equal(mocked_networking['subnet1']) group.launch_config_name.should.equal('tester') group.default_cooldown.should.equal(60) group.health_check_period.should.equal(100) @@ -80,6 +83,8 @@ def test_create_autoscaling_group(): def test_create_autoscaling_groups_defaults(): """ Test with the minimum inputs and check that all of the proper defaults are assigned for the other attributes """ + + mocked_networking = setup_networking() conn = boto.connect_autoscale() config = LaunchConfiguration( name='tester', @@ -93,6 +98,7 @@ def test_create_autoscaling_groups_defaults(): max_size=2, min_size=2, launch_config=config, + vpc_zone_identifier=mocked_networking['subnet1'], ) conn.create_auto_scaling_group(group) @@ -105,7 +111,7 @@ def test_create_autoscaling_groups_defaults(): # Defaults list(group.availability_zones).should.equal([]) group.desired_capacity.should.equal(2) - group.vpc_zone_identifier.should.equal('') + group.vpc_zone_identifier.should.equal(mocked_networking['subnet1']) group.default_cooldown.should.equal(300) group.health_check_period.should.equal(300) group.health_check_type.should.equal("EC2") @@ -117,6 +123,7 @@ def test_create_autoscaling_groups_defaults(): @mock_autoscaling def test_list_many_autoscaling_groups(): + mocked_networking = setup_networking() conn = boto3.client('autoscaling', region_name='us-east-1') conn.create_launch_configuration(LaunchConfigurationName='TestLC') @@ -124,7 +131,8 @@ def test_list_many_autoscaling_groups(): conn.create_auto_scaling_group(AutoScalingGroupName='TestGroup%d' % i, MinSize=1, MaxSize=2, - LaunchConfigurationName='TestLC') + LaunchConfigurationName='TestLC', + VPCZoneIdentifier=mocked_networking['subnet1']) response = conn.describe_auto_scaling_groups() groups = response["AutoScalingGroups"] @@ -142,6 +150,7 @@ def test_list_many_autoscaling_groups(): @mock_autoscaling @mock_ec2 def test_list_many_autoscaling_groups(): + mocked_networking = setup_networking() conn = boto3.client('autoscaling', region_name='us-east-1') conn.create_launch_configuration(LaunchConfigurationName='TestLC') @@ -155,7 +164,8 @@ def test_list_many_autoscaling_groups(): "PropagateAtLaunch": True, "Key": 'TestTagKey1', "Value": 'TestTagValue1' - }]) + }], + VPCZoneIdentifier=mocked_networking['subnet1']) ec2 = boto3.client('ec2', region_name='us-east-1') instances = ec2.describe_instances() @@ -167,6 +177,7 @@ def test_list_many_autoscaling_groups(): @mock_autoscaling_deprecated def test_autoscaling_group_describe_filter(): + mocked_networking = setup_networking() conn = boto.connect_autoscale() config = LaunchConfiguration( name='tester', @@ -180,6 +191,7 @@ def test_autoscaling_group_describe_filter(): max_size=2, min_size=2, launch_config=config, + vpc_zone_identifier=mocked_networking['subnet1'], ) conn.create_auto_scaling_group(group) group.name = 'tester_group2' @@ -194,6 +206,7 @@ def test_autoscaling_group_describe_filter(): @mock_autoscaling_deprecated def test_autoscaling_update(): + mocked_networking = setup_networking() conn = boto.connect_autoscale() config = LaunchConfiguration( name='tester', @@ -209,12 +222,12 @@ def test_autoscaling_update(): max_size=2, min_size=2, launch_config=config, - vpc_zone_identifier='subnet-1234abcd', + vpc_zone_identifier=mocked_networking['subnet1'], ) conn.create_auto_scaling_group(group) group = conn.get_all_groups()[0] - group.vpc_zone_identifier.should.equal('subnet-1234abcd') + group.vpc_zone_identifier.should.equal(mocked_networking['subnet1']) group.vpc_zone_identifier = 'subnet-5678efgh' group.update() @@ -225,6 +238,7 @@ def test_autoscaling_update(): @mock_autoscaling_deprecated def test_autoscaling_tags_update(): + mocked_networking = setup_networking() conn = boto.connect_autoscale() config = LaunchConfiguration( name='tester', @@ -240,13 +254,13 @@ def test_autoscaling_tags_update(): max_size=2, min_size=2, launch_config=config, - vpc_zone_identifier='subnet-1234abcd', tags=[Tag( resource_id='tester_group', key='test_key', value='test_value', propagate_at_launch=True )], + vpc_zone_identifier=mocked_networking['subnet1'], ) conn.create_auto_scaling_group(group) @@ -267,6 +281,7 @@ def test_autoscaling_tags_update(): @mock_autoscaling_deprecated def test_autoscaling_group_delete(): + mocked_networking = setup_networking() conn = boto.connect_autoscale() config = LaunchConfiguration( name='tester', @@ -280,6 +295,7 @@ def test_autoscaling_group_delete(): max_size=2, min_size=2, launch_config=config, + vpc_zone_identifier=mocked_networking['subnet1'], ) conn.create_auto_scaling_group(group) @@ -292,6 +308,7 @@ def test_autoscaling_group_delete(): @mock_ec2_deprecated @mock_autoscaling_deprecated def test_autoscaling_group_describe_instances(): + mocked_networking = setup_networking() conn = boto.connect_autoscale() config = LaunchConfiguration( name='tester', @@ -305,6 +322,7 @@ def test_autoscaling_group_describe_instances(): max_size=2, min_size=2, launch_config=config, + vpc_zone_identifier=mocked_networking['subnet1'], ) conn.create_auto_scaling_group(group) @@ -326,6 +344,7 @@ def test_autoscaling_group_describe_instances(): @requires_boto_gte("2.8") @mock_autoscaling_deprecated def test_set_desired_capacity_up(): + mocked_networking = setup_networking() conn = boto.connect_autoscale() config = LaunchConfiguration( name='tester', @@ -341,7 +360,7 @@ def test_set_desired_capacity_up(): max_size=2, min_size=2, launch_config=config, - vpc_zone_identifier='subnet-1234abcd', + vpc_zone_identifier=mocked_networking['subnet1'], ) conn.create_auto_scaling_group(group) @@ -361,6 +380,7 @@ def test_set_desired_capacity_up(): @requires_boto_gte("2.8") @mock_autoscaling_deprecated def test_set_desired_capacity_down(): + mocked_networking = setup_networking() conn = boto.connect_autoscale() config = LaunchConfiguration( name='tester', @@ -376,7 +396,7 @@ def test_set_desired_capacity_down(): max_size=2, min_size=2, launch_config=config, - vpc_zone_identifier='subnet-1234abcd', + vpc_zone_identifier=mocked_networking['subnet1'], ) conn.create_auto_scaling_group(group) @@ -396,6 +416,7 @@ def test_set_desired_capacity_down(): @requires_boto_gte("2.8") @mock_autoscaling_deprecated def test_set_desired_capacity_the_same(): + mocked_networking = setup_networking() conn = boto.connect_autoscale() config = LaunchConfiguration( name='tester', @@ -411,7 +432,7 @@ def test_set_desired_capacity_the_same(): max_size=2, min_size=2, launch_config=config, - vpc_zone_identifier='subnet-1234abcd', + vpc_zone_identifier=mocked_networking['subnet1'], ) conn.create_auto_scaling_group(group) @@ -431,6 +452,7 @@ def test_set_desired_capacity_the_same(): @mock_autoscaling_deprecated @mock_elb_deprecated def test_autoscaling_group_with_elb(): + mocked_networking = setup_networking() elb_conn = boto.connect_elb() zones = ['us-east-1a', 'us-east-1b'] ports = [(80, 8080, 'http'), (443, 8443, 'tcp')] @@ -451,6 +473,7 @@ def test_autoscaling_group_with_elb(): min_size=2, launch_config=config, load_balancers=["my-lb"], + vpc_zone_identifier=mocked_networking['subnet1'], ) conn.create_auto_scaling_group(group) group = conn.get_all_groups()[0] @@ -488,6 +511,7 @@ Boto3 @mock_autoscaling @mock_elb def test_describe_load_balancers(): + mocked_networking = setup_networking() INSTANCE_COUNT = 2 elb_client = boto3.client('elb', region_name='us-east-1') @@ -514,16 +538,19 @@ def test_describe_load_balancers(): "Key": 'test_key', "Value": 'test_value', "PropagateAtLaunch": True - }] + }], + VPCZoneIdentifier=mocked_networking['subnet1'], ) response = client.describe_load_balancers(AutoScalingGroupName='test_asg') list(response['LoadBalancers']).should.have.length_of(1) response['LoadBalancers'][0]['LoadBalancerName'].should.equal('my-lb') + @mock_autoscaling @mock_elb def test_create_elb_and_autoscaling_group_no_relationship(): + mocked_networking = setup_networking() INSTANCE_COUNT = 2 ELB_NAME = 'my-elb' @@ -546,6 +573,7 @@ def test_create_elb_and_autoscaling_group_no_relationship(): MinSize=0, MaxSize=INSTANCE_COUNT, DesiredCapacity=INSTANCE_COUNT, + VPCZoneIdentifier=mocked_networking['subnet1'], ) # autoscaling group and elb should have no relationship @@ -562,6 +590,7 @@ def test_create_elb_and_autoscaling_group_no_relationship(): @mock_autoscaling @mock_elb def test_attach_load_balancer(): + mocked_networking = setup_networking() INSTANCE_COUNT = 2 elb_client = boto3.client('elb', region_name='us-east-1') @@ -587,7 +616,8 @@ def test_attach_load_balancer(): "Key": 'test_key', "Value": 'test_value', "PropagateAtLaunch": True - }] + }], + VPCZoneIdentifier=mocked_networking['subnet1'], ) response = client.attach_load_balancers( @@ -609,6 +639,7 @@ def test_attach_load_balancer(): @mock_autoscaling @mock_elb def test_detach_load_balancer(): + mocked_networking = setup_networking() INSTANCE_COUNT = 2 elb_client = boto3.client('elb', region_name='us-east-1') @@ -635,7 +666,8 @@ def test_detach_load_balancer(): "Key": 'test_key', "Value": 'test_value', "PropagateAtLaunch": True - }] + }], + VPCZoneIdentifier=mocked_networking['subnet1'], ) response = client.detach_load_balancers( @@ -654,6 +686,7 @@ def test_detach_load_balancer(): @mock_autoscaling def test_create_autoscaling_group_boto3(): + mocked_networking = setup_networking() client = boto3.client('autoscaling', region_name='us-east-1') _ = client.create_launch_configuration( LaunchConfigurationName='test_launch_configuration' @@ -676,13 +709,15 @@ def test_create_autoscaling_group_boto3(): 'Key': 'not-propogated-tag-key', 'Value': 'not-propogate-tag-value', 'PropagateAtLaunch': False - }] + }], + VPCZoneIdentifier=mocked_networking['subnet1'], ) response['ResponseMetadata']['HTTPStatusCode'].should.equal(200) @mock_autoscaling def test_describe_autoscaling_groups_boto3(): + mocked_networking = setup_networking() client = boto3.client('autoscaling', region_name='us-east-1') _ = client.create_launch_configuration( LaunchConfigurationName='test_launch_configuration' @@ -692,7 +727,8 @@ def test_describe_autoscaling_groups_boto3(): LaunchConfigurationName='test_launch_configuration', MinSize=0, MaxSize=20, - DesiredCapacity=5 + DesiredCapacity=5, + VPCZoneIdentifier=mocked_networking['subnet1'], ) response = client.describe_auto_scaling_groups( AutoScalingGroupNames=["test_asg"] @@ -704,6 +740,7 @@ def test_describe_autoscaling_groups_boto3(): @mock_autoscaling def test_update_autoscaling_group_boto3(): + mocked_networking = setup_networking() client = boto3.client('autoscaling', region_name='us-east-1') _ = client.create_launch_configuration( LaunchConfigurationName='test_launch_configuration' @@ -713,7 +750,8 @@ def test_update_autoscaling_group_boto3(): LaunchConfigurationName='test_launch_configuration', MinSize=0, MaxSize=20, - DesiredCapacity=5 + DesiredCapacity=5, + VPCZoneIdentifier=mocked_networking['subnet1'], ) response = client.update_auto_scaling_group( @@ -729,6 +767,7 @@ def test_update_autoscaling_group_boto3(): @mock_autoscaling def test_autoscaling_taqs_update_boto3(): + mocked_networking = setup_networking() client = boto3.client('autoscaling', region_name='us-east-1') _ = client.create_launch_configuration( LaunchConfigurationName='test_launch_configuration' @@ -739,14 +778,13 @@ def test_autoscaling_taqs_update_boto3(): MinSize=0, MaxSize=20, DesiredCapacity=5, - Tags=[ - { - "ResourceId": 'test_asg', - "Key": 'test_key', - "Value": 'test_value', - "PropagateAtLaunch": True - }, - ] + Tags=[{ + "ResourceId": 'test_asg', + "Key": 'test_key', + "Value": 'test_value', + "PropagateAtLaunch": True + }], + VPCZoneIdentifier=mocked_networking['subnet1'], ) client.create_or_update_tags(Tags=[{ @@ -769,6 +807,7 @@ def test_autoscaling_taqs_update_boto3(): @mock_autoscaling def test_autoscaling_describe_policies_boto3(): + mocked_networking = setup_networking() client = boto3.client('autoscaling', region_name='us-east-1') _ = client.create_launch_configuration( LaunchConfigurationName='test_launch_configuration' @@ -784,7 +823,8 @@ def test_autoscaling_describe_policies_boto3(): "Key": 'test_key', "Value": 'test_value', "PropagateAtLaunch": True - }] + }], + VPCZoneIdentifier=mocked_networking['subnet1'], ) client.put_scaling_policy( @@ -825,6 +865,7 @@ def test_autoscaling_describe_policies_boto3(): @mock_autoscaling @mock_ec2 def test_detach_one_instance_decrement(): + mocked_networking = setup_networking() client = boto3.client('autoscaling', region_name='us-east-1') _ = client.create_launch_configuration( LaunchConfigurationName='test_launch_configuration' @@ -835,13 +876,14 @@ def test_detach_one_instance_decrement(): MinSize=0, MaxSize=2, DesiredCapacity=2, - Tags=[ - {'ResourceId': 'test_asg', - 'ResourceType': 'auto-scaling-group', - 'Key': 'propogated-tag-key', - 'Value': 'propogate-tag-value', - 'PropagateAtLaunch': True - }] + Tags=[{ + 'ResourceId': 'test_asg', + 'ResourceType': 'auto-scaling-group', + 'Key': 'propogated-tag-key', + 'Value': 'propogate-tag-value', + 'PropagateAtLaunch': True + }], + VPCZoneIdentifier=mocked_networking['subnet1'], ) response = client.describe_auto_scaling_groups( AutoScalingGroupNames=['test_asg'] @@ -878,6 +920,7 @@ def test_detach_one_instance_decrement(): @mock_autoscaling @mock_ec2 def test_detach_one_instance(): + mocked_networking = setup_networking() client = boto3.client('autoscaling', region_name='us-east-1') _ = client.create_launch_configuration( LaunchConfigurationName='test_launch_configuration' @@ -888,13 +931,14 @@ def test_detach_one_instance(): MinSize=0, MaxSize=2, DesiredCapacity=2, - Tags=[ - {'ResourceId': 'test_asg', - 'ResourceType': 'auto-scaling-group', - 'Key': 'propogated-tag-key', - 'Value': 'propogate-tag-value', - 'PropagateAtLaunch': True - }] + Tags=[{ + 'ResourceId': 'test_asg', + 'ResourceType': 'auto-scaling-group', + 'Key': 'propogated-tag-key', + 'Value': 'propogate-tag-value', + 'PropagateAtLaunch': True + }], + VPCZoneIdentifier=mocked_networking['subnet1'], ) response = client.describe_auto_scaling_groups( AutoScalingGroupNames=['test_asg'] @@ -930,6 +974,7 @@ def test_detach_one_instance(): @mock_autoscaling @mock_ec2 def test_attach_one_instance(): + mocked_networking = setup_networking() client = boto3.client('autoscaling', region_name='us-east-1') _ = client.create_launch_configuration( LaunchConfigurationName='test_launch_configuration' @@ -940,13 +985,14 @@ def test_attach_one_instance(): MinSize=0, MaxSize=4, DesiredCapacity=2, - Tags=[ - {'ResourceId': 'test_asg', - 'ResourceType': 'auto-scaling-group', - 'Key': 'propogated-tag-key', - 'Value': 'propogate-tag-value', - 'PropagateAtLaunch': True - }] + Tags=[{ + 'ResourceId': 'test_asg', + 'ResourceType': 'auto-scaling-group', + 'Key': 'propogated-tag-key', + 'Value': 'propogate-tag-value', + 'PropagateAtLaunch': True + }], + VPCZoneIdentifier=mocked_networking['subnet1'], ) response = client.describe_auto_scaling_groups( AutoScalingGroupNames=['test_asg'] @@ -969,6 +1015,7 @@ def test_attach_one_instance(): @mock_autoscaling @mock_ec2 def test_describe_instance_health(): + mocked_networking = setup_networking() client = boto3.client('autoscaling', region_name='us-east-1') _ = client.create_launch_configuration( LaunchConfigurationName='test_launch_configuration' @@ -979,6 +1026,7 @@ def test_describe_instance_health(): MinSize=2, MaxSize=4, DesiredCapacity=2, + VPCZoneIdentifier=mocked_networking['subnet1'], ) response = client.describe_auto_scaling_groups( @@ -991,6 +1039,7 @@ def test_describe_instance_health(): @mock_autoscaling @mock_ec2 def test_set_instance_health(): + mocked_networking = setup_networking() client = boto3.client('autoscaling', region_name='us-east-1') _ = client.create_launch_configuration( LaunchConfigurationName='test_launch_configuration' @@ -1001,6 +1050,7 @@ def test_set_instance_health(): MinSize=2, MaxSize=4, DesiredCapacity=2, + VPCZoneIdentifier=mocked_networking['subnet1'], ) response = client.describe_auto_scaling_groups( diff --git a/tests/test_autoscaling/test_elbv2.py b/tests/test_autoscaling/test_elbv2.py index 89ec4a399..00a80f6c6 100644 --- a/tests/test_autoscaling/test_elbv2.py +++ b/tests/test_autoscaling/test_elbv2.py @@ -3,22 +3,21 @@ import boto3 from moto import mock_autoscaling, mock_ec2, mock_elbv2 +from utils import setup_networking + @mock_elbv2 -@mock_ec2 @mock_autoscaling def test_attach_detach_target_groups(): + mocked_networking = setup_networking() INSTANCE_COUNT = 2 client = boto3.client('autoscaling', region_name='us-east-1') elbv2_client = boto3.client('elbv2', region_name='us-east-1') - ec2 = boto3.resource('ec2', region_name='us-east-1') - - vpc = ec2.create_vpc(CidrBlock='172.28.7.0/24', InstanceTenancy='default') response = elbv2_client.create_target_group( Name='a-target', Protocol='HTTP', Port=8080, - VpcId=vpc.id, + VpcId=mocked_networking['vpc'], HealthCheckProtocol='HTTP', HealthCheckPort='8080', HealthCheckPath='/', @@ -40,7 +39,7 @@ def test_attach_detach_target_groups(): MaxSize=INSTANCE_COUNT, DesiredCapacity=INSTANCE_COUNT, TargetGroupARNs=[target_group_arn], - VPCZoneIdentifier=vpc.id) + VPCZoneIdentifier=mocked_networking['subnet1']) # create asg without attaching to target group client.create_auto_scaling_group( AutoScalingGroupName='test_asg2', @@ -48,7 +47,7 @@ def test_attach_detach_target_groups(): MinSize=0, MaxSize=INSTANCE_COUNT, DesiredCapacity=INSTANCE_COUNT, - VPCZoneIdentifier=vpc.id) + VPCZoneIdentifier=mocked_networking['subnet2']) response = client.describe_load_balancer_target_groups( AutoScalingGroupName='test_asg') @@ -74,21 +73,18 @@ def test_attach_detach_target_groups(): list(response['TargetHealthDescriptions']).should.have.length_of(INSTANCE_COUNT) @mock_elbv2 -@mock_ec2 @mock_autoscaling def test_detach_all_target_groups(): + mocked_networking = setup_networking() INSTANCE_COUNT = 2 client = boto3.client('autoscaling', region_name='us-east-1') elbv2_client = boto3.client('elbv2', region_name='us-east-1') - ec2 = boto3.resource('ec2', region_name='us-east-1') - - vpc = ec2.create_vpc(CidrBlock='172.28.7.0/24', InstanceTenancy='default') response = elbv2_client.create_target_group( Name='a-target', Protocol='HTTP', Port=8080, - VpcId=vpc.id, + VpcId=mocked_networking['vpc'], HealthCheckProtocol='HTTP', HealthCheckPort='8080', HealthCheckPath='/', @@ -109,7 +105,7 @@ def test_detach_all_target_groups(): MaxSize=INSTANCE_COUNT, DesiredCapacity=INSTANCE_COUNT, TargetGroupARNs=[target_group_arn], - VPCZoneIdentifier=vpc.id) + VPCZoneIdentifier=mocked_networking['vpc']) response = client.describe_load_balancer_target_groups( AutoScalingGroupName='test_asg') diff --git a/tests/test_autoscaling/test_policies.py b/tests/test_autoscaling/test_policies.py index 54c64b749..49edb34db 100644 --- a/tests/test_autoscaling/test_policies.py +++ b/tests/test_autoscaling/test_policies.py @@ -7,8 +7,11 @@ import sure # noqa from moto import mock_autoscaling_deprecated +from utils import setup_networking + def setup_autoscale_group(): + mocked_networking = setup_networking() conn = boto.connect_autoscale() config = LaunchConfiguration( name='tester', @@ -22,6 +25,7 @@ def setup_autoscale_group(): max_size=2, min_size=2, launch_config=config, + vpc_zone_identifier=mocked_networking['subnet1'], ) conn.create_auto_scaling_group(group) return group diff --git a/tests/test_autoscaling/utils.py b/tests/test_autoscaling/utils.py new file mode 100644 index 000000000..4844ce026 --- /dev/null +++ b/tests/test_autoscaling/utils.py @@ -0,0 +1,17 @@ +import boto3 +from moto import mock_ec2 + + +@mock_ec2 +def setup_networking(): + ec2 = boto3.resource('ec2', region_name='us-east-1') + vpc = ec2.create_vpc(CidrBlock='10.11.0.0/16') + subnet1 = ec2.create_subnet( + VpcId=vpc.id, + CidrBlock='10.11.1.0/24', + AvailabilityZone='us-east-1a') + subnet2 = ec2.create_subnet( + VpcId=vpc.id, + CidrBlock='10.11.2.0/24', + AvailabilityZone='us-east-1b') + return {'vpc': vpc.id, 'subnet1': subnet1.id, 'subnet2': subnet2.id} From f7d8e3beb1983d2c61a236e51e66e84640422f9b Mon Sep 17 00:00:00 2001 From: captainkerk Date: Wed, 27 Dec 2017 20:22:26 +0000 Subject: [PATCH 015/182] flake8 fix --- moto/autoscaling/models.py | 1 - 1 file changed, 1 deletion(-) diff --git a/moto/autoscaling/models.py b/moto/autoscaling/models.py index 304671f66..af65c2a56 100644 --- a/moto/autoscaling/models.py +++ b/moto/autoscaling/models.py @@ -171,7 +171,6 @@ class FakeAutoScalingGroup(BaseModel): launch_config_name] self.launch_config_name = launch_config_name - self.default_cooldown = default_cooldown if default_cooldown else DEFAULT_COOLDOWN self.health_check_period = health_check_period self.health_check_type = health_check_type if health_check_type else "EC2" From fe1293ee5ba65569d322d4173bb1350053da2498 Mon Sep 17 00:00:00 2001 From: captainkerk Date: Wed, 27 Dec 2017 20:31:57 +0000 Subject: [PATCH 016/182] remove unnecessary empty line --- tests/test_autoscaling/test_autoscaling.py | 1 - 1 file changed, 1 deletion(-) diff --git a/tests/test_autoscaling/test_autoscaling.py b/tests/test_autoscaling/test_autoscaling.py index a02cfb0c9..597bbc375 100644 --- a/tests/test_autoscaling/test_autoscaling.py +++ b/tests/test_autoscaling/test_autoscaling.py @@ -546,7 +546,6 @@ def test_describe_load_balancers(): list(response['LoadBalancers']).should.have.length_of(1) response['LoadBalancers'][0]['LoadBalancerName'].should.equal('my-lb') - @mock_autoscaling @mock_elb def test_create_elb_and_autoscaling_group_no_relationship(): From 24f83e91f26f95c27877a7049ecceda422f0944a Mon Sep 17 00:00:00 2001 From: Waldemar Hummer Date: Wed, 27 Dec 2017 22:58:24 -0500 Subject: [PATCH 017/182] return 404 error on missing action --- moto/core/responses.py | 3 +++ moto/core/utils.py | 2 ++ 2 files changed, 5 insertions(+) diff --git a/moto/core/responses.py b/moto/core/responses.py index 52be602f6..ae91cdc02 100644 --- a/moto/core/responses.py +++ b/moto/core/responses.py @@ -272,6 +272,9 @@ class BaseResponse(_TemplateEnvironmentMixin): headers['status'] = str(headers['status']) return status, headers, body + if not action: + return 404, headers, '' + raise NotImplementedError( "The {0} action has not been implemented".format(action)) diff --git a/moto/core/utils.py b/moto/core/utils.py index 43f05672e..86e7632b0 100644 --- a/moto/core/utils.py +++ b/moto/core/utils.py @@ -18,6 +18,8 @@ def camelcase_to_underscores(argument): python underscore variable like the_new_attribute''' result = '' prev_char_title = True + if not argument: + return argument for index, char in enumerate(argument): try: next_char_title = argument[index + 1].istitle() From 6da22f9fa41b1f5d2c661f4d69369270b86b5ee7 Mon Sep 17 00:00:00 2001 From: Gordon Irving Date: Thu, 28 Dec 2017 19:04:37 +0000 Subject: [PATCH 018/182] fix adding tags to vpc created by cloudformation --- moto/ec2/models.py | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/moto/ec2/models.py b/moto/ec2/models.py index 932f535a1..1f372b57a 100755 --- a/moto/ec2/models.py +++ b/moto/ec2/models.py @@ -2004,6 +2004,11 @@ class VPC(TaggedEC2Resource): cidr_block=properties['CidrBlock'], instance_tenancy=properties.get('InstanceTenancy', 'default') ) + for tag in properties.get("Tags", []): + tag_key = tag["Key"] + tag_value = tag["Value"] + vpc.add_tag(tag_key, tag_value) + return vpc @property From 5fed6988da49516d8214722a477c760876c2f5ee Mon Sep 17 00:00:00 2001 From: Gordon Irving Date: Thu, 28 Dec 2017 17:16:49 +0000 Subject: [PATCH 019/182] describe_regions: handle region-names parameter --- moto/ec2/models.py | 11 +++++++++-- moto/ec2/responses/availability_zones_and_regions.py | 3 ++- 2 files changed, 11 insertions(+), 3 deletions(-) diff --git a/moto/ec2/models.py b/moto/ec2/models.py index 932f535a1..b9759099b 100755 --- a/moto/ec2/models.py +++ b/moto/ec2/models.py @@ -1261,8 +1261,15 @@ class RegionsAndZonesBackend(object): (region, [Zone(region + c, region) for c in 'abc']) for region in [r.name for r in regions]) - def describe_regions(self): - return self.regions + def describe_regions(self, region_names=[]): + if len(region_names) == 0: + return self.regions + ret = [] + for name in region_names: + for region in self.regions: + if region.name == name: + ret.append(region) + return ret def describe_availability_zones(self): return self.zones[self.region_name] diff --git a/moto/ec2/responses/availability_zones_and_regions.py b/moto/ec2/responses/availability_zones_and_regions.py index 3d0a5ab05..a6e35a89c 100644 --- a/moto/ec2/responses/availability_zones_and_regions.py +++ b/moto/ec2/responses/availability_zones_and_regions.py @@ -10,7 +10,8 @@ class AvailabilityZonesAndRegions(BaseResponse): return template.render(zones=zones) def describe_regions(self): - regions = self.ec2_backend.describe_regions() + region_names = self._get_multi_param('RegionName') + regions = self.ec2_backend.describe_regions(region_names) template = self.response_template(DESCRIBE_REGIONS_RESPONSE) return template.render(regions=regions) From e9b81bb3253cd7375617a733a5761752d645ee66 Mon Sep 17 00:00:00 2001 From: Gordon Irving Date: Thu, 28 Dec 2017 19:27:53 +0000 Subject: [PATCH 020/182] add test for vpc tags --- .../test_cloudformation_stack_integration.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/tests/test_cloudformation/test_cloudformation_stack_integration.py b/tests/test_cloudformation/test_cloudformation_stack_integration.py index 051d8bed7..3a7525585 100644 --- a/tests/test_cloudformation/test_cloudformation_stack_integration.py +++ b/tests/test_cloudformation/test_cloudformation_stack_integration.py @@ -752,6 +752,9 @@ def test_vpc_single_instance_in_subnet(): security_group.vpc_id.should.equal(vpc.id) stack = conn.describe_stacks()[0] + + vpc.tags.should.have.key('Application').which.should.equal(stack.stack_id) + resources = stack.describe_resources() vpc_resource = [ resource for resource in resources if resource.resource_type == 'AWS::EC2::VPC'][0] From 4d9833b972440cb003a042906a1789fb20ec2fde Mon Sep 17 00:00:00 2001 From: Gordon Irving Date: Thu, 28 Dec 2017 21:02:58 +0000 Subject: [PATCH 021/182] add test for descrie_regions with args --- tests/test_ec2/test_availability_zones_and_regions.py | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/tests/test_ec2/test_availability_zones_and_regions.py b/tests/test_ec2/test_availability_zones_and_regions.py index 7226cacaf..c64f075ca 100644 --- a/tests/test_ec2/test_availability_zones_and_regions.py +++ b/tests/test_ec2/test_availability_zones_and_regions.py @@ -36,6 +36,11 @@ def test_boto3_describe_regions(): for rec in resp['Regions']: rec['Endpoint'].should.contain(rec['RegionName']) + test_region = 'us-east-1' + resp = ec2.describe_regions(RegionNames=[test_region]) + resp['Regions'].should.have.length_of(1) + resp['Regions'][0].should.have.key('RegionName').which.should.equal(test_region) + @mock_ec2 def test_boto3_availability_zones(): From 144611ff99d190f579d8baf927ba9c447caa6ce7 Mon Sep 17 00:00:00 2001 From: captainkerk Date: Fri, 29 Dec 2017 03:00:53 +0000 Subject: [PATCH 022/182] define setup_networking_deprecated() method to create supporting resources for tests that use deprecated methods --- moto/autoscaling/exceptions.py | 2 +- tests/test_autoscaling/test_autoscaling.py | 24 +++++++++++----------- tests/test_autoscaling/test_policies.py | 4 ++-- tests/test_autoscaling/utils.py | 12 ++++++++++- 4 files changed, 26 insertions(+), 16 deletions(-) diff --git a/moto/autoscaling/exceptions.py b/moto/autoscaling/exceptions.py index 15b2e4f4a..05af25980 100644 --- a/moto/autoscaling/exceptions.py +++ b/moto/autoscaling/exceptions.py @@ -3,7 +3,7 @@ from moto.core.exceptions import RESTError class AutoscalingClientError(RESTError): - code = 500 + code = 400 class ResourceContentionError(AutoscalingClientError): diff --git a/tests/test_autoscaling/test_autoscaling.py b/tests/test_autoscaling/test_autoscaling.py index 597bbc375..453d14096 100644 --- a/tests/test_autoscaling/test_autoscaling.py +++ b/tests/test_autoscaling/test_autoscaling.py @@ -11,13 +11,13 @@ import sure # noqa from moto import mock_autoscaling, mock_ec2_deprecated, mock_elb_deprecated, mock_elb, mock_autoscaling_deprecated, mock_ec2 from tests.helpers import requires_boto_gte -from utils import setup_networking +from utils import setup_networking, setup_networking_deprecated @mock_autoscaling_deprecated @mock_elb_deprecated def test_create_autoscaling_group(): - mocked_networking = setup_networking() + mocked_networking = setup_networking_deprecated() elb_conn = boto.ec2.elb.connect_to_region('us-east-1') elb_conn.create_load_balancer( 'test_lb', zones=[], listeners=[(80, 8080, 'http')]) @@ -84,7 +84,7 @@ def test_create_autoscaling_groups_defaults(): """ Test with the minimum inputs and check that all of the proper defaults are assigned for the other attributes """ - mocked_networking = setup_networking() + mocked_networking = setup_networking_deprecated() conn = boto.connect_autoscale() config = LaunchConfiguration( name='tester', @@ -177,7 +177,7 @@ def test_list_many_autoscaling_groups(): @mock_autoscaling_deprecated def test_autoscaling_group_describe_filter(): - mocked_networking = setup_networking() + mocked_networking = setup_networking_deprecated() conn = boto.connect_autoscale() config = LaunchConfiguration( name='tester', @@ -206,7 +206,7 @@ def test_autoscaling_group_describe_filter(): @mock_autoscaling_deprecated def test_autoscaling_update(): - mocked_networking = setup_networking() + mocked_networking = setup_networking_deprecated() conn = boto.connect_autoscale() config = LaunchConfiguration( name='tester', @@ -238,7 +238,7 @@ def test_autoscaling_update(): @mock_autoscaling_deprecated def test_autoscaling_tags_update(): - mocked_networking = setup_networking() + mocked_networking = setup_networking_deprecated() conn = boto.connect_autoscale() config = LaunchConfiguration( name='tester', @@ -281,7 +281,7 @@ def test_autoscaling_tags_update(): @mock_autoscaling_deprecated def test_autoscaling_group_delete(): - mocked_networking = setup_networking() + mocked_networking = setup_networking_deprecated() conn = boto.connect_autoscale() config = LaunchConfiguration( name='tester', @@ -308,7 +308,7 @@ def test_autoscaling_group_delete(): @mock_ec2_deprecated @mock_autoscaling_deprecated def test_autoscaling_group_describe_instances(): - mocked_networking = setup_networking() + mocked_networking = setup_networking_deprecated() conn = boto.connect_autoscale() config = LaunchConfiguration( name='tester', @@ -344,7 +344,7 @@ def test_autoscaling_group_describe_instances(): @requires_boto_gte("2.8") @mock_autoscaling_deprecated def test_set_desired_capacity_up(): - mocked_networking = setup_networking() + mocked_networking = setup_networking_deprecated() conn = boto.connect_autoscale() config = LaunchConfiguration( name='tester', @@ -380,7 +380,7 @@ def test_set_desired_capacity_up(): @requires_boto_gte("2.8") @mock_autoscaling_deprecated def test_set_desired_capacity_down(): - mocked_networking = setup_networking() + mocked_networking = setup_networking_deprecated() conn = boto.connect_autoscale() config = LaunchConfiguration( name='tester', @@ -416,7 +416,7 @@ def test_set_desired_capacity_down(): @requires_boto_gte("2.8") @mock_autoscaling_deprecated def test_set_desired_capacity_the_same(): - mocked_networking = setup_networking() + mocked_networking = setup_networking_deprecated() conn = boto.connect_autoscale() config = LaunchConfiguration( name='tester', @@ -452,7 +452,7 @@ def test_set_desired_capacity_the_same(): @mock_autoscaling_deprecated @mock_elb_deprecated def test_autoscaling_group_with_elb(): - mocked_networking = setup_networking() + mocked_networking = setup_networking_deprecated() elb_conn = boto.connect_elb() zones = ['us-east-1a', 'us-east-1b'] ports = [(80, 8080, 'http'), (443, 8443, 'tcp')] diff --git a/tests/test_autoscaling/test_policies.py b/tests/test_autoscaling/test_policies.py index 49edb34db..e6b01163f 100644 --- a/tests/test_autoscaling/test_policies.py +++ b/tests/test_autoscaling/test_policies.py @@ -7,11 +7,11 @@ import sure # noqa from moto import mock_autoscaling_deprecated -from utils import setup_networking +from utils import setup_networking_deprecated def setup_autoscale_group(): - mocked_networking = setup_networking() + mocked_networking = setup_networking_deprecated() conn = boto.connect_autoscale() config = LaunchConfiguration( name='tester', diff --git a/tests/test_autoscaling/utils.py b/tests/test_autoscaling/utils.py index 4844ce026..b167ba5f5 100644 --- a/tests/test_autoscaling/utils.py +++ b/tests/test_autoscaling/utils.py @@ -1,5 +1,6 @@ +import boto import boto3 -from moto import mock_ec2 +from moto import mock_ec2, mock_ec2_deprecated @mock_ec2 @@ -15,3 +16,12 @@ def setup_networking(): CidrBlock='10.11.2.0/24', AvailabilityZone='us-east-1b') return {'vpc': vpc.id, 'subnet1': subnet1.id, 'subnet2': subnet2.id} + +@mock_ec2_deprecated +def setup_networking_deprecated(): + conn = boto.connect_vpc() + vpc = conn.create_vpc("10.11.0.0/16") + subnet1 = conn.create_subnet(vpc.id, "10.11.1.0/24") + subnet2 = conn.create_subnet(vpc.id, "10.11.2.0/24") + return {'vpc': vpc.id, 'subnet1': subnet1.id, 'subnet2': subnet2.id} + From cd96de4903cec7ba95a66301ef75fc4eee8878b7 Mon Sep 17 00:00:00 2001 From: captainkerk Date: Fri, 29 Dec 2017 03:32:25 +0000 Subject: [PATCH 023/182] ResourceContentionError is indeed a 500 --- moto/autoscaling/exceptions.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/moto/autoscaling/exceptions.py b/moto/autoscaling/exceptions.py index 05af25980..7dd81e0d6 100644 --- a/moto/autoscaling/exceptions.py +++ b/moto/autoscaling/exceptions.py @@ -6,7 +6,8 @@ class AutoscalingClientError(RESTError): code = 400 -class ResourceContentionError(AutoscalingClientError): +class ResourceContentionError(RESTError): + code = 500 def __init__(self): super(ResourceContentionError, self).__init__( From b855fee2e4109eab04a9d55699ffeffb1ca51df6 Mon Sep 17 00:00:00 2001 From: Mike Bjerkness Date: Sat, 30 Dec 2017 20:39:23 -0600 Subject: [PATCH 024/182] Add batch_get_image support for ECR (#1406) * Add batch_get_image for ECR * Add tests for batch_get_image * Add tests for batch_get_image * Undo local commits * Undo local commits * Adding object representation for batch_get_image * Update responses. Add a couple more tests. --- moto/ecr/models.py | 52 ++++++++++++-- moto/ecr/responses.py | 10 ++- tests/test_ecr/test_ecr_boto3.py | 116 ++++++++++++++++++++++++++++++- 3 files changed, 170 insertions(+), 8 deletions(-) diff --git a/moto/ecr/models.py b/moto/ecr/models.py index f5b6f24e4..e20c550c9 100644 --- a/moto/ecr/models.py +++ b/moto/ecr/models.py @@ -1,14 +1,14 @@ from __future__ import unicode_literals -# from datetime import datetime + +import hashlib +from copy import copy from random import random from moto.core import BaseBackend, BaseModel from moto.ec2 import ec2_backends -from copy import copy -import hashlib - from moto.ecr.exceptions import ImageNotFoundException, RepositoryNotFoundException +from botocore.exceptions import ParamValidationError DEFAULT_REGISTRY_ID = '012345678910' @@ -145,6 +145,17 @@ class Image(BaseObject): response_object['imagePushedAt'] = '2017-05-09' return response_object + @property + def response_batch_get_image(self): + response_object = {} + response_object['imageId'] = {} + response_object['imageId']['imageTag'] = self.image_tag + response_object['imageId']['imageDigest'] = self.get_image_digest() + response_object['imageManifest'] = self.image_manifest + response_object['repositoryName'] = self.repository + response_object['registryId'] = self.registry_id + return response_object + class ECRBackend(BaseBackend): @@ -245,6 +256,39 @@ class ECRBackend(BaseBackend): repository.images.append(image) return image + def batch_get_image(self, repository_name, registry_id=None, image_ids=None, accepted_media_types=None): + if repository_name in self.repositories: + repository = self.repositories[repository_name] + else: + raise RepositoryNotFoundException(repository_name, registry_id or DEFAULT_REGISTRY_ID) + + if not image_ids: + raise ParamValidationError(msg='Missing required parameter in input: "imageIds"') + + response = { + 'images': [], + 'failures': [], + } + + for image_id in image_ids: + found = False + for image in repository.images: + if (('imageDigest' in image_id and image.get_image_digest() == image_id['imageDigest']) or + ('imageTag' in image_id and image.image_tag == image_id['imageTag'])): + found = True + response['images'].append(image.response_batch_get_image) + + if not found: + response['failures'].append({ + 'imageId': { + 'imageTag': image_id.get('imageTag', 'null') + }, + 'failureCode': 'ImageNotFound', + 'failureReason': 'Requested image not found' + }) + + return response + ecr_backends = {} for region, ec2_backend in ec2_backends.items(): diff --git a/moto/ecr/responses.py b/moto/ecr/responses.py index 6207de4eb..ca45c63c9 100644 --- a/moto/ecr/responses.py +++ b/moto/ecr/responses.py @@ -89,9 +89,13 @@ class ECRResponse(BaseResponse): 'ECR.batch_delete_image is not yet implemented') def batch_get_image(self): - if self.is_not_dryrun('BatchGetImage'): - raise NotImplementedError( - 'ECR.batch_get_image is not yet implemented') + repository_str = self._get_param('repositoryName') + registry_id = self._get_param('registryId') + image_ids = self._get_param('imageIds') + accepted_media_types = self._get_param('acceptedMediaTypes') + + response = self.ecr_backend.batch_get_image(repository_str, registry_id, image_ids, accepted_media_types) + return json.dumps(response) def can_paginate(self): if self.is_not_dryrun('CanPaginate'): diff --git a/tests/test_ecr/test_ecr_boto3.py b/tests/test_ecr/test_ecr_boto3.py index 00628e22f..b4497ef60 100644 --- a/tests/test_ecr/test_ecr_boto3.py +++ b/tests/test_ecr/test_ecr_boto3.py @@ -9,7 +9,7 @@ import re import sure # noqa import boto3 -from botocore.exceptions import ClientError +from botocore.exceptions import ClientError, ParamValidationError from dateutil.tz import tzlocal from moto import mock_ecr @@ -445,3 +445,117 @@ def test_get_authorization_token_explicit_regions(): } ]) + + +@mock_ecr +def test_batch_get_image(): + client = boto3.client('ecr', region_name='us-east-1') + _ = client.create_repository( + repositoryName='test_repository' + ) + + _ = client.put_image( + repositoryName='test_repository', + imageManifest=json.dumps(_create_image_manifest()), + imageTag='latest' + ) + + _ = client.put_image( + repositoryName='test_repository', + imageManifest=json.dumps(_create_image_manifest()), + imageTag='v1' + ) + + _ = client.put_image( + repositoryName='test_repository', + imageManifest=json.dumps(_create_image_manifest()), + imageTag='v2' + ) + + response = client.batch_get_image( + repositoryName='test_repository', + imageIds=[ + { + 'imageTag': 'v2' + }, + ], + ) + + type(response['images']).should.be(list) + len(response['images']).should.be(1) + + response['images'][0]['imageManifest'].should.contain("vnd.docker.distribution.manifest.v2+json") + response['images'][0]['registryId'].should.equal("012345678910") + response['images'][0]['repositoryName'].should.equal("test_repository") + + response['images'][0]['imageId']['imageTag'].should.equal("v2") + response['images'][0]['imageId']['imageDigest'].should.contain("sha") + + type(response['failures']).should.be(list) + len(response['failures']).should.be(0) + + +@mock_ecr +def test_batch_get_image_that_doesnt_exist(): + client = boto3.client('ecr', region_name='us-east-1') + _ = client.create_repository( + repositoryName='test_repository' + ) + + _ = client.put_image( + repositoryName='test_repository', + imageManifest=json.dumps(_create_image_manifest()), + imageTag='latest' + ) + + _ = client.put_image( + repositoryName='test_repository', + imageManifest=json.dumps(_create_image_manifest()), + imageTag='v1' + ) + + _ = client.put_image( + repositoryName='test_repository', + imageManifest=json.dumps(_create_image_manifest()), + imageTag='v2' + ) + + response = client.batch_get_image( + repositoryName='test_repository', + imageIds=[ + { + 'imageTag': 'v5' + }, + ], + ) + + type(response['images']).should.be(list) + len(response['images']).should.be(0) + + type(response['failures']).should.be(list) + len(response['failures']).should.be(1) + response['failures'][0]['failureReason'].should.equal("Requested image not found") + response['failures'][0]['failureCode'].should.equal("ImageNotFound") + response['failures'][0]['imageId']['imageTag'].should.equal("v5") + + +@mock_ecr +def test_batch_get_image_no_tags(): + client = boto3.client('ecr', region_name='us-east-1') + _ = client.create_repository( + repositoryName='test_repository' + ) + + _ = client.put_image( + repositoryName='test_repository', + imageManifest=json.dumps(_create_image_manifest()), + imageTag='latest' + ) + + error_msg = re.compile( + r".*Missing required parameter in input: \"imageIds\".*", + re.MULTILINE) + + client.batch_get_image.when.called_with( + repositoryName='test_repository').should.throw( + ParamValidationError, error_msg) From 633decc6c06744d73706946be97619fbeb8e45f7 Mon Sep 17 00:00:00 2001 From: Boris Gvozdev Date: Tue, 2 Jan 2018 11:30:39 +1100 Subject: [PATCH 025/182] SNS: do not duplicate subscriptions --- moto/sns/models.py | 10 ++++++++++ tests/test_sns/test_subscriptions_boto3.py | 17 +++++++++++++++++ 2 files changed, 27 insertions(+) diff --git a/moto/sns/models.py b/moto/sns/models.py index 3d6f6507e..70587d980 100644 --- a/moto/sns/models.py +++ b/moto/sns/models.py @@ -247,11 +247,21 @@ class SNSBackend(BaseBackend): setattr(topic, attribute_name, attribute_value) def subscribe(self, topic_arn, endpoint, protocol): + # AWS doesn't create duplicates + old_subscription = self._find_subscription(topic_arn, endpoint, protocol) + if old_subscription: + return old_subscription topic = self.get_topic(topic_arn) subscription = Subscription(topic, endpoint, protocol) self.subscriptions[subscription.arn] = subscription return subscription + def _find_subscription(self, topic_arn, endpoint, protocol): + for subscription in self.subscriptions.values(): + if subscription.topic.arn == topic_arn and subscription.endpoint == endpoint and subscription.protocol == protocol: + return subscription + return None + def unsubscribe(self, subscription_arn): self.subscriptions.pop(subscription_arn) diff --git a/tests/test_sns/test_subscriptions_boto3.py b/tests/test_sns/test_subscriptions_boto3.py index 4446febfc..59cef221f 100644 --- a/tests/test_sns/test_subscriptions_boto3.py +++ b/tests/test_sns/test_subscriptions_boto3.py @@ -25,6 +25,23 @@ def test_subscribe_sms(): ) resp.should.contain('SubscriptionArn') +@mock_sns +def test_double_subscription(): + client = boto3.client('sns', region_name='us-east-1') + client.create_topic(Name="some-topic") + resp = client.create_topic(Name="some-topic") + arn = resp['TopicArn'] + + do_subscribe_sqs = lambda sqs_arn: client.subscribe( + TopicArn=arn, + Protocol='sqs', + Endpoint=sqs_arn + ) + resp1 = do_subscribe_sqs('arn:aws:sqs:elasticmq:000000000000:foo') + resp2 = do_subscribe_sqs('arn:aws:sqs:elasticmq:000000000000:foo') + + resp1['SubscriptionArn'].should.equal(resp2['SubscriptionArn']) + @mock_sns def test_subscribe_bad_sms(): From 770281aef2132de8b4d8abb7ec3325a3038b6f09 Mon Sep 17 00:00:00 2001 From: Mike Grima Date: Tue, 2 Jan 2018 23:47:57 -0500 Subject: [PATCH 026/182] Added put_bucket_logging support (#1401) - Also added put acl for XML - Put logging will also verify that the destination bucket exists in the same region with the proper ACLs attached. --- moto/s3/exceptions.py | 27 +++++ moto/s3/models.py | 39 ++++++ moto/s3/responses.py | 163 +++++++++++++++++++++++-- tests/test_s3/test_s3.py | 254 ++++++++++++++++++++++++++++++++++++++- 4 files changed, 468 insertions(+), 15 deletions(-) diff --git a/moto/s3/exceptions.py b/moto/s3/exceptions.py index 24704e7ef..08dd02313 100644 --- a/moto/s3/exceptions.py +++ b/moto/s3/exceptions.py @@ -111,3 +111,30 @@ class MalformedXML(S3ClientError): "MalformedXML", "The XML you provided was not well-formed or did not validate against our published schema", *args, **kwargs) + + +class MalformedACLError(S3ClientError): + code = 400 + + def __init__(self, *args, **kwargs): + super(MalformedACLError, self).__init__( + "MalformedACLError", + "The XML you provided was not well-formed or did not validate against our published schema", + *args, **kwargs) + + +class InvalidTargetBucketForLogging(S3ClientError): + code = 400 + + def __init__(self, msg): + super(InvalidTargetBucketForLogging, self).__init__("InvalidTargetBucketForLogging", msg) + + +class CrossLocationLoggingProhibitted(S3ClientError): + code = 403 + + def __init__(self): + super(CrossLocationLoggingProhibitted, self).__init__( + "CrossLocationLoggingProhibitted", + "Cross S3 location logging not allowed." + ) diff --git a/moto/s3/models.py b/moto/s3/models.py index 91d3c1e2d..7eb89531f 100644 --- a/moto/s3/models.py +++ b/moto/s3/models.py @@ -347,6 +347,7 @@ class FakeBucket(BaseModel): self.acl = get_canned_acl('private') self.tags = FakeTagging() self.cors = [] + self.logging = {} @property def location(self): @@ -422,6 +423,40 @@ class FakeBucket(BaseModel): def tagging(self): return self.tags + def set_logging(self, logging_config, bucket_backend): + if not logging_config: + self.logging = {} + else: + from moto.s3.exceptions import InvalidTargetBucketForLogging, CrossLocationLoggingProhibitted + # Target bucket must exist in the same account (assuming all moto buckets are in the same account): + if not bucket_backend.buckets.get(logging_config["TargetBucket"]): + raise InvalidTargetBucketForLogging("The target bucket for logging does not exist.") + + # Does the target bucket have the log-delivery WRITE and READ_ACP permissions? + write = read_acp = False + for grant in bucket_backend.buckets[logging_config["TargetBucket"]].acl.grants: + # Must be granted to: http://acs.amazonaws.com/groups/s3/LogDelivery + for grantee in grant.grantees: + if grantee.uri == "http://acs.amazonaws.com/groups/s3/LogDelivery": + if "WRITE" in grant.permissions or "FULL_CONTROL" in grant.permissions: + write = True + + if "READ_ACP" in grant.permissions or "FULL_CONTROL" in grant.permissions: + read_acp = True + + break + + if not write or not read_acp: + raise InvalidTargetBucketForLogging("You must give the log-delivery group WRITE and READ_ACP" + " permissions to the target bucket") + + # Buckets must also exist within the same region: + if bucket_backend.buckets[logging_config["TargetBucket"]].region_name != self.region_name: + raise CrossLocationLoggingProhibitted() + + # Checks pass -- set the logging config: + self.logging = logging_config + def set_website_configuration(self, website_configuration): self.website_configuration = website_configuration @@ -608,6 +643,10 @@ class S3Backend(BaseBackend): bucket = self.get_bucket(bucket_name) bucket.set_cors(cors_rules) + def put_bucket_logging(self, bucket_name, logging_config): + bucket = self.get_bucket(bucket_name) + bucket.set_logging(logging_config, self) + def delete_bucket_cors(self, bucket_name): bucket = self.get_bucket(bucket_name) bucket.delete_cors() diff --git a/moto/s3/responses.py b/moto/s3/responses.py index 6abb4f2d1..8d2caf098 100755 --- a/moto/s3/responses.py +++ b/moto/s3/responses.py @@ -11,11 +11,13 @@ import xmltodict from moto.packages.httpretty.core import HTTPrettyRequest from moto.core.responses import _TemplateEnvironmentMixin -from moto.s3bucket_path.utils import bucket_name_from_url as bucketpath_bucket_name_from_url, parse_key_name as bucketpath_parse_key_name, is_delete_keys as bucketpath_is_delete_keys +from moto.s3bucket_path.utils import bucket_name_from_url as bucketpath_bucket_name_from_url, \ + parse_key_name as bucketpath_parse_key_name, is_delete_keys as bucketpath_is_delete_keys - -from .exceptions import BucketAlreadyExists, S3ClientError, MissingBucket, MissingKey, InvalidPartOrder -from .models import s3_backend, get_canned_acl, FakeGrantee, FakeGrant, FakeAcl, FakeKey, FakeTagging, FakeTagSet, FakeTag +from .exceptions import BucketAlreadyExists, S3ClientError, MissingBucket, MissingKey, InvalidPartOrder, MalformedXML, \ + MalformedACLError +from .models import s3_backend, get_canned_acl, FakeGrantee, FakeGrant, FakeAcl, FakeKey, FakeTagging, FakeTagSet, \ + FakeTag from .utils import bucket_name_from_url, metadata_from_headers from xml.dom import minidom @@ -70,8 +72,9 @@ class ResponseObject(_TemplateEnvironmentMixin): match = re.match(r'^\[(.+)\](:\d+)?$', host) if match: - match = re.match(r'^(((?=.*(::))(?!.*\3.+\3))\3?|[\dA-F]{1,4}:)([\dA-F]{1,4}(\3|:\b)|\2){5}(([\dA-F]{1,4}(\3|:\b|$)|\2){2}|(((2[0-4]|1\d|[1-9])?\d|25[0-5])\.?\b){4})\Z', - match.groups()[0], re.IGNORECASE) + match = re.match( + r'^(((?=.*(::))(?!.*\3.+\3))\3?|[\dA-F]{1,4}:)([\dA-F]{1,4}(\3|:\b)|\2){5}(([\dA-F]{1,4}(\3|:\b|$)|\2){2}|(((2[0-4]|1\d|[1-9])?\d|25[0-5])\.?\b){4})\Z', + match.groups()[0], re.IGNORECASE) if match: return False @@ -229,6 +232,13 @@ class ResponseObject(_TemplateEnvironmentMixin): return 404, {}, template.render(bucket_name=bucket_name) template = self.response_template(S3_BUCKET_TAGGING_RESPONSE) return template.render(bucket=bucket) + elif 'logging' in querystring: + bucket = self.backend.get_bucket(bucket_name) + if not bucket.logging: + template = self.response_template(S3_NO_LOGGING_CONFIG) + return 200, {}, template.render() + template = self.response_template(S3_LOGGING_CONFIG) + return 200, {}, template.render(logging=bucket.logging) elif "cors" in querystring: bucket = self.backend.get_bucket(bucket_name) if len(bucket.cors) == 0: @@ -324,8 +334,7 @@ class ResponseObject(_TemplateEnvironmentMixin): limit = continuation_token or start_after result_keys = self._get_results_from_token(result_keys, limit) - result_keys, is_truncated, \ - next_continuation_token = self._truncate_result(result_keys, max_keys) + result_keys, is_truncated, next_continuation_token = self._truncate_result(result_keys, max_keys) return template.render( bucket=bucket, @@ -380,8 +389,11 @@ class ResponseObject(_TemplateEnvironmentMixin): self.backend.set_bucket_policy(bucket_name, body) return 'True' elif 'acl' in querystring: - # TODO: Support the XML-based ACL format - self.backend.set_bucket_acl(bucket_name, self._acl_from_headers(request.headers)) + # Headers are first. If not set, then look at the body (consistent with the documentation): + acls = self._acl_from_headers(request.headers) + if not acls: + acls = self._acl_from_xml(body) + self.backend.set_bucket_acl(bucket_name, acls) return "" elif "tagging" in querystring: tagging = self._bucket_tagging_from_xml(body) @@ -391,12 +403,18 @@ class ResponseObject(_TemplateEnvironmentMixin): self.backend.set_bucket_website_configuration(bucket_name, body) return "" elif "cors" in querystring: - from moto.s3.exceptions import MalformedXML try: self.backend.put_bucket_cors(bucket_name, self._cors_from_xml(body)) return "" except KeyError: raise MalformedXML() + elif "logging" in querystring: + try: + self.backend.put_bucket_logging(bucket_name, self._logging_from_xml(body)) + return "" + except KeyError: + raise MalformedXML() + else: if body: try: @@ -515,6 +533,7 @@ class ResponseObject(_TemplateEnvironmentMixin): def toint(i): return int(i) if i else None + begin, end = map(toint, rspec.split('-')) if begin is not None: # byte range end = last if end is None else min(end, last) @@ -731,6 +750,58 @@ class ResponseObject(_TemplateEnvironmentMixin): else: return 404, response_headers, "" + def _acl_from_xml(self, xml): + parsed_xml = xmltodict.parse(xml) + if not parsed_xml.get("AccessControlPolicy"): + raise MalformedACLError() + + # The owner is needed for some reason... + if not parsed_xml["AccessControlPolicy"].get("Owner"): + # TODO: Validate that the Owner is actually correct. + raise MalformedACLError() + + # If empty, then no ACLs: + if parsed_xml["AccessControlPolicy"].get("AccessControlList") is None: + return [] + + if not parsed_xml["AccessControlPolicy"]["AccessControlList"].get("Grant"): + raise MalformedACLError() + + permissions = [ + "READ", + "WRITE", + "READ_ACP", + "WRITE_ACP", + "FULL_CONTROL" + ] + + if not isinstance(parsed_xml["AccessControlPolicy"]["AccessControlList"]["Grant"], list): + parsed_xml["AccessControlPolicy"]["AccessControlList"]["Grant"] = \ + [parsed_xml["AccessControlPolicy"]["AccessControlList"]["Grant"]] + + grants = self._get_grants_from_xml(parsed_xml["AccessControlPolicy"]["AccessControlList"]["Grant"], + MalformedACLError, permissions) + return FakeAcl(grants) + + def _get_grants_from_xml(self, grant_list, exception_type, permissions): + grants = [] + for grant in grant_list: + if grant.get("Permission", "") not in permissions: + raise exception_type() + + if grant["Grantee"].get("@xsi:type", "") not in ["CanonicalUser", "AmazonCustomerByEmail", "Group"]: + raise exception_type() + + # TODO: Verify that the proper grantee data is supplied based on the type. + + grants.append(FakeGrant( + [FakeGrantee(id=grant["Grantee"].get("ID", ""), display_name=grant["Grantee"].get("DisplayName", ""), + uri=grant["Grantee"].get("URI", ""))], + [grant["Permission"]]) + ) + + return grants + def _acl_from_headers(self, headers): canned_acl = headers.get('x-amz-acl', '') if canned_acl: @@ -814,6 +885,42 @@ class ResponseObject(_TemplateEnvironmentMixin): return [parsed_xml["CORSConfiguration"]["CORSRule"]] + def _logging_from_xml(self, xml): + parsed_xml = xmltodict.parse(xml) + + if not parsed_xml["BucketLoggingStatus"].get("LoggingEnabled"): + return {} + + if not parsed_xml["BucketLoggingStatus"]["LoggingEnabled"].get("TargetBucket"): + raise MalformedXML() + + if not parsed_xml["BucketLoggingStatus"]["LoggingEnabled"].get("TargetPrefix"): + parsed_xml["BucketLoggingStatus"]["LoggingEnabled"]["TargetPrefix"] = "" + + # Get the ACLs: + if parsed_xml["BucketLoggingStatus"]["LoggingEnabled"].get("TargetGrants"): + permissions = [ + "READ", + "WRITE", + "FULL_CONTROL" + ] + if not isinstance(parsed_xml["BucketLoggingStatus"]["LoggingEnabled"]["TargetGrants"]["Grant"], list): + target_grants = self._get_grants_from_xml( + [parsed_xml["BucketLoggingStatus"]["LoggingEnabled"]["TargetGrants"]["Grant"]], + MalformedXML, + permissions + ) + else: + target_grants = self._get_grants_from_xml( + parsed_xml["BucketLoggingStatus"]["LoggingEnabled"]["TargetGrants"]["Grant"], + MalformedXML, + permissions + ) + + parsed_xml["BucketLoggingStatus"]["LoggingEnabled"]["TargetGrants"] = target_grants + + return parsed_xml["BucketLoggingStatus"]["LoggingEnabled"] + def _key_response_delete(self, bucket_name, query, key_name, headers): if query.get('uploadId'): upload_id = query['uploadId'][0] @@ -1322,3 +1429,37 @@ S3_NO_CORS_CONFIG = """ 9Gjjt1m+cjU4OPvX9O9/8RuvnG41MRb/18Oux2o5H5MY7ISNTlXN+Dz9IG62/ILVxhAGI0qyPfg= """ + +S3_LOGGING_CONFIG = """ + + + {{ logging["TargetBucket"] }} + {{ logging["TargetPrefix"] }} + {% if logging.get("TargetGrants") %} + + {% for grant in logging["TargetGrants"] %} + + + {% if grant.grantees[0].uri %} + {{ grant.grantees[0].uri }} + {% endif %} + {% if grant.grantees[0].id %} + {{ grant.grantees[0].id }} + {% endif %} + {% if grant.grantees[0].display_name %} + {{ grant.grantees[0].display_name }} + {% endif %} + + {{ grant.permissions[0] }} + + {% endfor %} + + {% endif %} + + +""" + +S3_NO_LOGGING_CONFIG = """ + +""" diff --git a/tests/test_s3/test_s3.py b/tests/test_s3/test_s3.py index 829941d79..33752af60 100644 --- a/tests/test_s3/test_s3.py +++ b/tests/test_s3/test_s3.py @@ -50,6 +50,7 @@ def reduced_min_part_size(f): return f(*args, **kwargs) finally: s3model.UPLOAD_PART_MIN_SIZE = orig_size + return wrapped @@ -883,11 +884,12 @@ def test_s3_object_in_public_bucket(): s3_anonymous.Object(key='file.txt', bucket_name='test-bucket').get() exc.exception.response['Error']['Code'].should.equal('403') - params = {'Bucket': 'test-bucket','Key': 'file.txt'} + params = {'Bucket': 'test-bucket', 'Key': 'file.txt'} presigned_url = boto3.client('s3').generate_presigned_url('get_object', params, ExpiresIn=900) response = requests.get(presigned_url) assert response.status_code == 200 + @mock_s3 def test_s3_object_in_private_bucket(): s3 = boto3.resource('s3') @@ -1102,6 +1104,7 @@ def test_boto3_key_etag(): resp = s3.get_object(Bucket='mybucket', Key='steve') resp['ETag'].should.equal('"d32bda93738f7e03adb22e66c90fbc04"') + @mock_s3 def test_website_redirect_location(): s3 = boto3.client('s3', region_name='us-east-1') @@ -1116,6 +1119,7 @@ def test_website_redirect_location(): resp = s3.get_object(Bucket='mybucket', Key='steve') resp['WebsiteRedirectLocation'].should.equal(url) + @mock_s3 def test_boto3_list_keys_xml_escaped(): s3 = boto3.client('s3', region_name='us-east-1') @@ -1627,7 +1631,7 @@ def test_boto3_put_bucket_cors(): }) e = err.exception e.response["Error"]["Code"].should.equal("InvalidRequest") - e.response["Error"]["Message"].should.equal("Found unsupported HTTP method in CORS config. " + e.response["Error"]["Message"].should.equal("Found unsupported HTTP method in CORS config. " "Unsupported method is NOTREAL") with assert_raises(ClientError) as err: @@ -1732,6 +1736,249 @@ def test_boto3_delete_bucket_cors(): e.response["Error"]["Message"].should.equal("The CORS configuration does not exist") +@mock_s3 +def test_put_bucket_acl_body(): + s3 = boto3.client("s3", region_name="us-east-1") + s3.create_bucket(Bucket="bucket") + bucket_owner = s3.get_bucket_acl(Bucket="bucket")["Owner"] + s3.put_bucket_acl(Bucket="bucket", AccessControlPolicy={ + "Grants": [ + { + "Grantee": { + "URI": "http://acs.amazonaws.com/groups/s3/LogDelivery", + "Type": "Group" + }, + "Permission": "WRITE" + }, + { + "Grantee": { + "URI": "http://acs.amazonaws.com/groups/s3/LogDelivery", + "Type": "Group" + }, + "Permission": "READ_ACP" + } + ], + "Owner": bucket_owner + }) + + result = s3.get_bucket_acl(Bucket="bucket") + assert len(result["Grants"]) == 2 + for g in result["Grants"]: + assert g["Grantee"]["URI"] == "http://acs.amazonaws.com/groups/s3/LogDelivery" + assert g["Grantee"]["Type"] == "Group" + assert g["Permission"] in ["WRITE", "READ_ACP"] + + # With one: + s3.put_bucket_acl(Bucket="bucket", AccessControlPolicy={ + "Grants": [ + { + "Grantee": { + "URI": "http://acs.amazonaws.com/groups/s3/LogDelivery", + "Type": "Group" + }, + "Permission": "WRITE" + } + ], + "Owner": bucket_owner + }) + result = s3.get_bucket_acl(Bucket="bucket") + assert len(result["Grants"]) == 1 + + # With no owner: + with assert_raises(ClientError) as err: + s3.put_bucket_acl(Bucket="bucket", AccessControlPolicy={ + "Grants": [ + { + "Grantee": { + "URI": "http://acs.amazonaws.com/groups/s3/LogDelivery", + "Type": "Group" + }, + "Permission": "WRITE" + } + ] + }) + assert err.exception.response["Error"]["Code"] == "MalformedACLError" + + # With incorrect permission: + with assert_raises(ClientError) as err: + s3.put_bucket_acl(Bucket="bucket", AccessControlPolicy={ + "Grants": [ + { + "Grantee": { + "URI": "http://acs.amazonaws.com/groups/s3/LogDelivery", + "Type": "Group" + }, + "Permission": "lskjflkasdjflkdsjfalisdjflkdsjf" + } + ], + "Owner": bucket_owner + }) + assert err.exception.response["Error"]["Code"] == "MalformedACLError" + + # Clear the ACLs: + result = s3.put_bucket_acl(Bucket="bucket", AccessControlPolicy={"Grants": [], "Owner": bucket_owner}) + assert not result.get("Grants") + + +@mock_s3 +def test_boto3_put_bucket_logging(): + s3 = boto3.client("s3", region_name="us-east-1") + bucket_name = "mybucket" + log_bucket = "logbucket" + wrong_region_bucket = "wrongregionlogbucket" + s3.create_bucket(Bucket=bucket_name) + s3.create_bucket(Bucket=log_bucket) # Adding the ACL for log-delivery later... + s3.create_bucket(Bucket=wrong_region_bucket, CreateBucketConfiguration={"LocationConstraint": "us-west-2"}) + + # No logging config: + result = s3.get_bucket_logging(Bucket=bucket_name) + assert not result.get("LoggingEnabled") + + # A log-bucket that doesn't exist: + with assert_raises(ClientError) as err: + s3.put_bucket_logging(Bucket=bucket_name, BucketLoggingStatus={ + "LoggingEnabled": { + "TargetBucket": "IAMNOTREAL", + "TargetPrefix": "" + } + }) + assert err.exception.response["Error"]["Code"] == "InvalidTargetBucketForLogging" + + # A log-bucket that's missing the proper ACLs for LogDelivery: + with assert_raises(ClientError) as err: + s3.put_bucket_logging(Bucket=bucket_name, BucketLoggingStatus={ + "LoggingEnabled": { + "TargetBucket": log_bucket, + "TargetPrefix": "" + } + }) + assert err.exception.response["Error"]["Code"] == "InvalidTargetBucketForLogging" + assert "log-delivery" in err.exception.response["Error"]["Message"] + + # Add the proper "log-delivery" ACL to the log buckets: + bucket_owner = s3.get_bucket_acl(Bucket=log_bucket)["Owner"] + for bucket in [log_bucket, wrong_region_bucket]: + s3.put_bucket_acl(Bucket=bucket, AccessControlPolicy={ + "Grants": [ + { + "Grantee": { + "URI": "http://acs.amazonaws.com/groups/s3/LogDelivery", + "Type": "Group" + }, + "Permission": "WRITE" + }, + { + "Grantee": { + "URI": "http://acs.amazonaws.com/groups/s3/LogDelivery", + "Type": "Group" + }, + "Permission": "READ_ACP" + }, + { + "Grantee": { + "Type": "CanonicalUser", + "ID": bucket_owner["ID"] + }, + "Permission": "FULL_CONTROL" + } + ], + "Owner": bucket_owner + }) + + # A log-bucket that's in the wrong region: + with assert_raises(ClientError) as err: + s3.put_bucket_logging(Bucket=bucket_name, BucketLoggingStatus={ + "LoggingEnabled": { + "TargetBucket": wrong_region_bucket, + "TargetPrefix": "" + } + }) + assert err.exception.response["Error"]["Code"] == "CrossLocationLoggingProhibitted" + + # Correct logging: + s3.put_bucket_logging(Bucket=bucket_name, BucketLoggingStatus={ + "LoggingEnabled": { + "TargetBucket": log_bucket, + "TargetPrefix": "{}/".format(bucket_name) + } + }) + result = s3.get_bucket_logging(Bucket=bucket_name) + assert result["LoggingEnabled"]["TargetBucket"] == log_bucket + assert result["LoggingEnabled"]["TargetPrefix"] == "{}/".format(bucket_name) + assert not result["LoggingEnabled"].get("TargetGrants") + + # And disabling: + s3.put_bucket_logging(Bucket=bucket_name, BucketLoggingStatus={}) + assert not s3.get_bucket_logging(Bucket=bucket_name).get("LoggingEnabled") + + # And enabling with multiple target grants: + s3.put_bucket_logging(Bucket=bucket_name, BucketLoggingStatus={ + "LoggingEnabled": { + "TargetBucket": log_bucket, + "TargetPrefix": "{}/".format(bucket_name), + "TargetGrants": [ + { + "Grantee": { + "ID": "SOMEIDSTRINGHERE9238748923734823917498237489237409123840983274", + "Type": "CanonicalUser" + }, + "Permission": "READ" + }, + { + "Grantee": { + "ID": "SOMEIDSTRINGHERE9238748923734823917498237489237409123840983274", + "Type": "CanonicalUser" + }, + "Permission": "WRITE" + } + ] + } + }) + + result = s3.get_bucket_logging(Bucket=bucket_name) + assert len(result["LoggingEnabled"]["TargetGrants"]) == 2 + assert result["LoggingEnabled"]["TargetGrants"][0]["Grantee"]["ID"] == \ + "SOMEIDSTRINGHERE9238748923734823917498237489237409123840983274" + + # Test with just 1 grant: + s3.put_bucket_logging(Bucket=bucket_name, BucketLoggingStatus={ + "LoggingEnabled": { + "TargetBucket": log_bucket, + "TargetPrefix": "{}/".format(bucket_name), + "TargetGrants": [ + { + "Grantee": { + "ID": "SOMEIDSTRINGHERE9238748923734823917498237489237409123840983274", + "Type": "CanonicalUser" + }, + "Permission": "READ" + } + ] + } + }) + result = s3.get_bucket_logging(Bucket=bucket_name) + assert len(result["LoggingEnabled"]["TargetGrants"]) == 1 + + # With an invalid grant: + with assert_raises(ClientError) as err: + s3.put_bucket_logging(Bucket=bucket_name, BucketLoggingStatus={ + "LoggingEnabled": { + "TargetBucket": log_bucket, + "TargetPrefix": "{}/".format(bucket_name), + "TargetGrants": [ + { + "Grantee": { + "ID": "SOMEIDSTRINGHERE9238748923734823917498237489237409123840983274", + "Type": "CanonicalUser" + }, + "Permission": "NOTAREALPERM" + } + ] + } + }) + assert err.exception.response["Error"]["Code"] == "MalformedXML" + + @mock_s3 def test_boto3_put_object_tagging(): s3 = boto3.client('s3', region_name='us-east-1') @@ -1939,11 +2186,10 @@ def test_get_stream_gzipped(): Bucket='moto-tests', Key='keyname', ) - res = zlib.decompress(obj['Body'].read(), 16+zlib.MAX_WBITS) + res = zlib.decompress(obj['Body'].read(), 16 + zlib.MAX_WBITS) assert res == payload - TEST_XML = """\ From 71af9317f236a5fb884fa6aec1a31e8abced26b8 Mon Sep 17 00:00:00 2001 From: Toshiya Kawasaki Date: Thu, 4 Jan 2018 18:59:37 +0900 Subject: [PATCH 027/182] Add group features to iot (#1402) * Add thing group features * thing thing-group relation * clean up comments --- moto/iot/exceptions.py | 12 ++- moto/iot/models.py | 150 ++++++++++++++++++++++++++++- moto/iot/responses.py | 136 ++++++++++++++++++++++++-- tests/test_iot/test_iot.py | 189 +++++++++++++++++++++++++++++++++++++ 4 files changed, 475 insertions(+), 12 deletions(-) diff --git a/moto/iot/exceptions.py b/moto/iot/exceptions.py index 4bb01c095..47435eeb5 100644 --- a/moto/iot/exceptions.py +++ b/moto/iot/exceptions.py @@ -16,9 +16,17 @@ class ResourceNotFoundException(IoTClientError): class InvalidRequestException(IoTClientError): - def __init__(self): + def __init__(self, msg=None): self.code = 400 super(InvalidRequestException, self).__init__( "InvalidRequestException", - "The request is not valid." + msg or "The request is not valid." + ) + + +class VersionConflictException(IoTClientError): + def __init__(self, name): + self.code = 409 + super(VersionConflictException, self).__init__( + 'The version for thing %s does not match the expected version.' % name ) diff --git a/moto/iot/models.py b/moto/iot/models.py index 1efa6690e..77b0dde08 100644 --- a/moto/iot/models.py +++ b/moto/iot/models.py @@ -9,7 +9,8 @@ from moto.core import BaseBackend, BaseModel from collections import OrderedDict from .exceptions import ( ResourceNotFoundException, - InvalidRequestException + InvalidRequestException, + VersionConflictException ) @@ -44,6 +45,7 @@ class FakeThingType(BaseModel): self.region_name = region_name self.thing_type_name = thing_type_name self.thing_type_properties = thing_type_properties + self.thing_type_id = str(uuid.uuid4()) # I don't know the rule of id t = time.time() self.metadata = { 'deprecated': False, @@ -54,11 +56,37 @@ class FakeThingType(BaseModel): def to_dict(self): return { 'thingTypeName': self.thing_type_name, + 'thingTypeId': self.thing_type_id, 'thingTypeProperties': self.thing_type_properties, 'thingTypeMetadata': self.metadata } +class FakeThingGroup(BaseModel): + def __init__(self, thing_group_name, parent_group_name, thing_group_properties, region_name): + self.region_name = region_name + self.thing_group_name = thing_group_name + self.thing_group_id = str(uuid.uuid4()) # I don't know the rule of id + self.version = 1 # TODO: tmp + self.parent_group_name = parent_group_name + self.thing_group_properties = thing_group_properties or {} + t = time.time() + self.metadata = { + 'creationData': int(t * 1000) / 1000.0 + } + self.arn = 'arn:aws:iot:%s:1:thinggroup/%s' % (self.region_name, thing_group_name) + self.things = OrderedDict() + + def to_dict(self): + return { + 'thingGroupName': self.thing_group_name, + 'thingGroupId': self.thing_group_id, + 'version': self.version, + 'thingGroupProperties': self.thing_group_properties, + 'thingGroupMetadata': self.metadata + } + + class FakeCertificate(BaseModel): def __init__(self, certificate_pem, status, region_name): m = hashlib.sha256() @@ -137,6 +165,7 @@ class IoTBackend(BaseBackend): self.region_name = region_name self.things = OrderedDict() self.thing_types = OrderedDict() + self.thing_groups = OrderedDict() self.certificates = OrderedDict() self.policies = OrderedDict() self.principal_policies = OrderedDict() @@ -359,6 +388,125 @@ class IoTBackend(BaseBackend): principals = [k[0] for k, v in self.principal_things.items() if k[1] == thing_name] return principals + def describe_thing_group(self, thing_group_name): + thing_groups = [_ for _ in self.thing_groups.values() if _.thing_group_name == thing_group_name] + if len(thing_groups) == 0: + raise ResourceNotFoundException() + return thing_groups[0] + + def create_thing_group(self, thing_group_name, parent_group_name, thing_group_properties): + thing_group = FakeThingGroup(thing_group_name, parent_group_name, thing_group_properties, self.region_name) + self.thing_groups[thing_group.arn] = thing_group + return thing_group.thing_group_name, thing_group.arn, thing_group.thing_group_id + + def delete_thing_group(self, thing_group_name, expected_version): + thing_group = self.describe_thing_group(thing_group_name) + del self.thing_groups[thing_group.arn] + + def list_thing_groups(self, parent_group, name_prefix_filter, recursive): + thing_groups = self.thing_groups.values() + return thing_groups + + def update_thing_group(self, thing_group_name, thing_group_properties, expected_version): + thing_group = self.describe_thing_group(thing_group_name) + if expected_version and expected_version != thing_group.version: + raise VersionConflictException(thing_group_name) + attribute_payload = thing_group_properties.get('attributePayload', None) + if attribute_payload is not None and 'attributes' in attribute_payload: + do_merge = attribute_payload.get('merge', False) + attributes = attribute_payload['attributes'] + if not do_merge: + thing_group.thing_group_properties['attributePayload']['attributes'] = attributes + else: + thing_group.thing_group_properties['attributePayload']['attributes'].update(attributes) + elif attribute_payload is not None and 'attributes' not in attribute_payload: + thing_group.attributes = {} + thing_group.version = thing_group.version + 1 + return thing_group.version + + def _identify_thing_group(self, thing_group_name, thing_group_arn): + # identify thing group + if thing_group_name is None and thing_group_arn is None: + raise InvalidRequestException( + ' Both thingGroupArn and thingGroupName are empty. Need to specify at least one of them' + ) + if thing_group_name is not None: + thing_group = self.describe_thing_group(thing_group_name) + if thing_group_arn and thing_group.arn != thing_group_arn: + raise InvalidRequestException( + 'ThingGroupName thingGroupArn does not match specified thingGroupName in request' + ) + elif thing_group_arn is not None: + if thing_group_arn not in self.thing_groups: + raise InvalidRequestException() + thing_group = self.thing_groups[thing_group_arn] + return thing_group + + def _identify_thing(self, thing_name, thing_arn): + # identify thing + if thing_name is None and thing_arn is None: + raise InvalidRequestException( + 'Both thingArn and thingName are empty. Need to specify at least one of them' + ) + if thing_name is not None: + thing = self.describe_thing(thing_name) + if thing_arn and thing.arn != thing_arn: + raise InvalidRequestException( + 'ThingName thingArn does not match specified thingName in request' + ) + elif thing_arn is not None: + if thing_arn not in self.things: + raise InvalidRequestException() + thing = self.things[thing_arn] + return thing + + def add_thing_to_thing_group(self, thing_group_name, thing_group_arn, thing_name, thing_arn): + thing_group = self._identify_thing_group(thing_group_name, thing_group_arn) + thing = self._identify_thing(thing_name, thing_arn) + if thing.arn in thing_group.things: + # aws ignores duplicate registration + return + thing_group.things[thing.arn] = thing + + def remove_thing_from_thing_group(self, thing_group_name, thing_group_arn, thing_name, thing_arn): + thing_group = self._identify_thing_group(thing_group_name, thing_group_arn) + thing = self._identify_thing(thing_name, thing_arn) + if thing.arn not in thing_group.things: + # aws ignores non-registered thing + return + del thing_group.things[thing.arn] + + def list_things_in_thing_group(self, thing_group_name, recursive): + thing_group = self.describe_thing_group(thing_group_name) + return thing_group.things.values() + + def list_thing_groups_for_thing(self, thing_name): + thing = self.describe_thing(thing_name) + all_thing_groups = self.list_thing_groups(None, None, None) + ret = [] + for thing_group in all_thing_groups: + if thing.arn in thing_group.things: + ret.append({ + 'groupName': thing_group.thing_group_name, + 'groupArn': thing_group.arn + }) + return ret + + def update_thing_groups_for_thing(self, thing_name, thing_groups_to_add, thing_groups_to_remove): + thing = self.describe_thing(thing_name) + for thing_group_name in thing_groups_to_add: + thing_group = self.describe_thing_group(thing_group_name) + self.add_thing_to_thing_group( + thing_group.thing_group_name, None, + thing.thing_name, None + ) + for thing_group_name in thing_groups_to_remove: + thing_group = self.describe_thing_group(thing_group_name) + self.remove_thing_from_thing_group( + thing_group.thing_group_name, None, + thing.thing_name, None + ) + available_regions = boto3.session.Session().get_available_regions("iot") iot_backends = {region: IoTBackend(region) for region in available_regions} diff --git a/moto/iot/responses.py b/moto/iot/responses.py index bbe2bb016..f59c105da 100644 --- a/moto/iot/responses.py +++ b/moto/iot/responses.py @@ -38,8 +38,7 @@ class IoTResponse(BaseResponse): thing_types = self.iot_backend.list_thing_types( thing_type_name=thing_type_name ) - - # TODO: support next_token and max_results + # TODO: implement pagination in the future next_token = None return json.dumps(dict(thingTypes=[_.to_dict() for _ in thing_types], nextToken=next_token)) @@ -54,7 +53,7 @@ class IoTResponse(BaseResponse): attribute_value=attribute_value, thing_type_name=thing_type_name, ) - # TODO: support next_token and max_results + # TODO: implement pagination in the future next_token = None return json.dumps(dict(things=[_.to_dict() for _ in things], nextToken=next_token)) @@ -63,7 +62,6 @@ class IoTResponse(BaseResponse): thing = self.iot_backend.describe_thing( thing_name=thing_name, ) - print(thing.to_dict(include_default_client_id=True)) return json.dumps(thing.to_dict(include_default_client_id=True)) def describe_thing_type(self): @@ -135,7 +133,7 @@ class IoTResponse(BaseResponse): # marker = self._get_param("marker") # ascending_order = self._get_param("ascendingOrder") certificates = self.iot_backend.list_certificates() - # TODO: handle pagination + # TODO: implement pagination in the future return json.dumps(dict(certificates=[_.to_dict() for _ in certificates])) def update_certificate(self): @@ -162,7 +160,7 @@ class IoTResponse(BaseResponse): # ascending_order = self._get_param("ascendingOrder") policies = self.iot_backend.list_policies() - # TODO: handle pagination + # TODO: implement pagination in the future return json.dumps(dict(policies=[_.to_dict() for _ in policies])) def get_policy(self): @@ -205,7 +203,7 @@ class IoTResponse(BaseResponse): policies = self.iot_backend.list_principal_policies( principal_arn=principal ) - # TODO: handle pagination + # TODO: implement pagination in the future next_marker = None return json.dumps(dict(policies=[_.to_dict() for _ in policies], nextMarker=next_marker)) @@ -217,7 +215,7 @@ class IoTResponse(BaseResponse): principals = self.iot_backend.list_policy_principals( policy_name=policy_name, ) - # TODO: handle pagination + # TODO: implement pagination in the future next_marker = None return json.dumps(dict(principals=principals, nextMarker=next_marker)) @@ -246,7 +244,7 @@ class IoTResponse(BaseResponse): things = self.iot_backend.list_principal_things( principal_arn=principal, ) - # TODO: handle pagination + # TODO: implement pagination in the future next_token = None return json.dumps(dict(things=things, nextToken=next_token)) @@ -256,3 +254,123 @@ class IoTResponse(BaseResponse): thing_name=thing_name, ) return json.dumps(dict(principals=principals)) + + def describe_thing_group(self): + thing_group_name = self._get_param("thingGroupName") + thing_group = self.iot_backend.describe_thing_group( + thing_group_name=thing_group_name, + ) + return json.dumps(thing_group.to_dict()) + + def create_thing_group(self): + thing_group_name = self._get_param("thingGroupName") + parent_group_name = self._get_param("parentGroupName") + thing_group_properties = self._get_param("thingGroupProperties") + thing_group_name, thing_group_arn, thing_group_id = self.iot_backend.create_thing_group( + thing_group_name=thing_group_name, + parent_group_name=parent_group_name, + thing_group_properties=thing_group_properties, + ) + return json.dumps(dict( + thingGroupName=thing_group_name, + thingGroupArn=thing_group_arn, + thingGroupId=thing_group_id) + ) + + def delete_thing_group(self): + thing_group_name = self._get_param("thingGroupName") + expected_version = self._get_param("expectedVersion") + self.iot_backend.delete_thing_group( + thing_group_name=thing_group_name, + expected_version=expected_version, + ) + return json.dumps(dict()) + + def list_thing_groups(self): + # next_token = self._get_param("nextToken") + # max_results = self._get_int_param("maxResults") + parent_group = self._get_param("parentGroup") + name_prefix_filter = self._get_param("namePrefixFilter") + recursive = self._get_param("recursive") + thing_groups = self.iot_backend.list_thing_groups( + parent_group=parent_group, + name_prefix_filter=name_prefix_filter, + recursive=recursive, + ) + next_token = None + rets = [{'groupName': _.thing_group_name, 'groupArn': _.arn} for _ in thing_groups] + # TODO: implement pagination in the future + return json.dumps(dict(thingGroups=rets, nextToken=next_token)) + + def update_thing_group(self): + thing_group_name = self._get_param("thingGroupName") + thing_group_properties = self._get_param("thingGroupProperties") + expected_version = self._get_param("expectedVersion") + version = self.iot_backend.update_thing_group( + thing_group_name=thing_group_name, + thing_group_properties=thing_group_properties, + expected_version=expected_version, + ) + return json.dumps(dict(version=version)) + + def add_thing_to_thing_group(self): + thing_group_name = self._get_param("thingGroupName") + thing_group_arn = self._get_param("thingGroupArn") + thing_name = self._get_param("thingName") + thing_arn = self._get_param("thingArn") + self.iot_backend.add_thing_to_thing_group( + thing_group_name=thing_group_name, + thing_group_arn=thing_group_arn, + thing_name=thing_name, + thing_arn=thing_arn, + ) + return json.dumps(dict()) + + def remove_thing_from_thing_group(self): + thing_group_name = self._get_param("thingGroupName") + thing_group_arn = self._get_param("thingGroupArn") + thing_name = self._get_param("thingName") + thing_arn = self._get_param("thingArn") + self.iot_backend.remove_thing_from_thing_group( + thing_group_name=thing_group_name, + thing_group_arn=thing_group_arn, + thing_name=thing_name, + thing_arn=thing_arn, + ) + return json.dumps(dict()) + + def list_things_in_thing_group(self): + thing_group_name = self._get_param("thingGroupName") + recursive = self._get_param("recursive") + # next_token = self._get_param("nextToken") + # max_results = self._get_int_param("maxResults") + things = self.iot_backend.list_things_in_thing_group( + thing_group_name=thing_group_name, + recursive=recursive, + ) + next_token = None + thing_names = [_.thing_name for _ in things] + # TODO: implement pagination in the future + return json.dumps(dict(things=thing_names, nextToken=next_token)) + + def list_thing_groups_for_thing(self): + thing_name = self._get_param("thingName") + # next_token = self._get_param("nextToken") + # max_results = self._get_int_param("maxResults") + thing_groups = self.iot_backend.list_thing_groups_for_thing( + thing_name=thing_name + ) + next_token = None + # TODO: implement pagination in the future + return json.dumps(dict(thingGroups=thing_groups, nextToken=next_token)) + + def update_thing_groups_for_thing(self): + thing_name = self._get_param("thingName") + thing_groups_to_add = self._get_param("thingGroupsToAdd") or [] + thing_groups_to_remove = self._get_param("thingGroupsToRemove") or [] + self.iot_backend.update_thing_groups_for_thing( + thing_name=thing_name, + thing_groups_to_add=thing_groups_to_add, + thing_groups_to_remove=thing_groups_to_remove, + ) + return json.dumps(dict()) diff --git a/tests/test_iot/test_iot.py b/tests/test_iot/test_iot.py index 31631e459..7c01934d3 100644 --- a/tests/test_iot/test_iot.py +++ b/tests/test_iot/test_iot.py @@ -177,3 +177,192 @@ def test_principal_thing(): res.should.have.key('things').which.should.have.length_of(0) res = client.list_thing_principals(thingName=thing_name) res.should.have.key('principals').which.should.have.length_of(0) + + +@mock_iot +def test_thing_groups(): + client = boto3.client('iot', region_name='ap-northeast-1') + name = 'my-thing' + group_name = 'my-group-name' + + # thing group + thing_group = client.create_thing_group(thingGroupName=group_name) + thing_group.should.have.key('thingGroupName').which.should.equal(group_name) + thing_group.should.have.key('thingGroupArn') + + res = client.list_thing_groups() + res.should.have.key('thingGroups').which.should.have.length_of(1) + for thing_group in res['thingGroups']: + thing_group.should.have.key('groupName').which.should_not.be.none + thing_group.should.have.key('groupArn').which.should_not.be.none + + thing_group = client.describe_thing_group(thingGroupName=group_name) + thing_group.should.have.key('thingGroupName').which.should.equal(group_name) + thing_group.should.have.key('thingGroupProperties') + thing_group.should.have.key('thingGroupMetadata') + thing_group.should.have.key('version') + + # delete thing group + client.delete_thing_group(thingGroupName=group_name) + res = client.list_thing_groups() + res.should.have.key('thingGroups').which.should.have.length_of(0) + + # props create test + props = { + 'thingGroupDescription': 'my first thing group', + 'attributePayload': { + 'attributes': { + 'key1': 'val01', + 'Key02': 'VAL2' + } + } + } + thing_group = client.create_thing_group(thingGroupName=group_name, thingGroupProperties=props) + thing_group.should.have.key('thingGroupName').which.should.equal(group_name) + thing_group.should.have.key('thingGroupArn') + + thing_group = client.describe_thing_group(thingGroupName=group_name) + thing_group.should.have.key('thingGroupProperties')\ + .which.should.have.key('attributePayload')\ + .which.should.have.key('attributes') + res_props = thing_group['thingGroupProperties']['attributePayload']['attributes'] + res_props.should.have.key('key1').which.should.equal('val01') + res_props.should.have.key('Key02').which.should.equal('VAL2') + + # props update test with merge + new_props = { + 'attributePayload': { + 'attributes': { + 'k3': 'v3' + }, + 'merge': True + } + } + client.update_thing_group( + thingGroupName=group_name, + thingGroupProperties=new_props + ) + thing_group = client.describe_thing_group(thingGroupName=group_name) + thing_group.should.have.key('thingGroupProperties')\ + .which.should.have.key('attributePayload')\ + .which.should.have.key('attributes') + res_props = thing_group['thingGroupProperties']['attributePayload']['attributes'] + res_props.should.have.key('key1').which.should.equal('val01') + res_props.should.have.key('Key02').which.should.equal('VAL2') + + res_props.should.have.key('k3').which.should.equal('v3') + + # props update test + new_props = { + 'attributePayload': { + 'attributes': { + 'k4': 'v4' + } + } + } + client.update_thing_group( + thingGroupName=group_name, + thingGroupProperties=new_props + ) + thing_group = client.describe_thing_group(thingGroupName=group_name) + thing_group.should.have.key('thingGroupProperties')\ + .which.should.have.key('attributePayload')\ + .which.should.have.key('attributes') + res_props = thing_group['thingGroupProperties']['attributePayload']['attributes'] + res_props.should.have.key('k4').which.should.equal('v4') + res_props.should_not.have.key('key1') + + +@mock_iot +def test_thing_group_relations(): + client = boto3.client('iot', region_name='ap-northeast-1') + name = 'my-thing' + group_name = 'my-group-name' + + # thing group + thing_group = client.create_thing_group(thingGroupName=group_name) + thing_group.should.have.key('thingGroupName').which.should.equal(group_name) + thing_group.should.have.key('thingGroupArn') + + # thing + thing = client.create_thing(thingName=name) + thing.should.have.key('thingName').which.should.equal(name) + thing.should.have.key('thingArn') + + # add in 4 way + client.add_thing_to_thing_group( + thingGroupName=group_name, + thingName=name + ) + client.add_thing_to_thing_group( + thingGroupArn=thing_group['thingGroupArn'], + thingArn=thing['thingArn'] + ) + client.add_thing_to_thing_group( + thingGroupName=group_name, + thingArn=thing['thingArn'] + ) + client.add_thing_to_thing_group( + thingGroupArn=thing_group['thingGroupArn'], + thingName=name + ) + + things = client.list_things_in_thing_group( + thingGroupName=group_name + ) + things.should.have.key('things') + things['things'].should.have.length_of(1) + + thing_groups = client.list_thing_groups_for_thing( + thingName=name + ) + thing_groups.should.have.key('thingGroups') + thing_groups['thingGroups'].should.have.length_of(1) + + # remove in 4 way + client.remove_thing_from_thing_group( + thingGroupName=group_name, + thingName=name + ) + client.remove_thing_from_thing_group( + thingGroupArn=thing_group['thingGroupArn'], + thingArn=thing['thingArn'] + ) + client.remove_thing_from_thing_group( + thingGroupName=group_name, + thingArn=thing['thingArn'] + ) + client.remove_thing_from_thing_group( + thingGroupArn=thing_group['thingGroupArn'], + thingName=name + ) + things = client.list_things_in_thing_group( + thingGroupName=group_name + ) + things.should.have.key('things') + things['things'].should.have.length_of(0) + + # update thing group for thing + client.update_thing_groups_for_thing( + thingName=name, + thingGroupsToAdd=[ + group_name + ] + ) + things = client.list_things_in_thing_group( + thingGroupName=group_name + ) + things.should.have.key('things') + things['things'].should.have.length_of(1) + + client.update_thing_groups_for_thing( + thingName=name, + thingGroupsToRemove=[ + group_name + ] + ) + things = client.list_things_in_thing_group( + thingGroupName=group_name + ) + things.should.have.key('things') + things['things'].should.have.length_of(0) From 56ce26a72809a7e7b56c56002e843e406b2dfd46 Mon Sep 17 00:00:00 2001 From: Nuwan Goonasekera Date: Thu, 4 Jan 2018 15:31:17 +0530 Subject: [PATCH 028/182] Added support for filtering AMIs by self (#1398) * Added support for filtering AMIs by self Closes: https://github.com/spulec/moto/issues/1396 * Adjusted regex to also match signature v4 and fixed py3 compatibility --- moto/core/responses.py | 16 ++++++++++++++++ moto/ec2/models.py | 15 +++++++++++---- moto/ec2/responses/amis.py | 5 +++-- tests/test_ec2/test_amis.py | 14 ++++++++++++++ 4 files changed, 44 insertions(+), 6 deletions(-) diff --git a/moto/core/responses.py b/moto/core/responses.py index ae91cdc02..5afe5e168 100644 --- a/moto/core/responses.py +++ b/moto/core/responses.py @@ -108,6 +108,7 @@ class BaseResponse(_TemplateEnvironmentMixin): # to extract region, use [^.] region_regex = re.compile(r'\.(?P[a-z]{2}-[a-z]+-\d{1})\.amazonaws\.com') param_list_regex = re.compile(r'(.*)\.(\d+)\.') + access_key_regex = re.compile(r'AWS.*(?P(? Date: Mon, 8 Jan 2018 13:18:50 +0100 Subject: [PATCH 029/182] Change name of 'state' attribute of 'FakeAlarm' CloudWatch model to 'state_value'. This ensures that the 'StateValue' returned by 'describe_alarms' is correct. The 'DESCRIBE_ALARMS_TEMPLATE' response template references a 'state_value' attribute on the 'FakeAlarm' model which does not exist; it is named 'state'. This commit updates the attribute to be called 'state_value', in-line with the naming convention used elsewhere. --- moto/cloudwatch/models.py | 8 ++++---- tests/test_cloudwatch/test_cloudwatch_boto3.py | 2 ++ 2 files changed, 6 insertions(+), 4 deletions(-) diff --git a/moto/cloudwatch/models.py b/moto/cloudwatch/models.py index 395f4f0ba..ba6569981 100644 --- a/moto/cloudwatch/models.py +++ b/moto/cloudwatch/models.py @@ -74,18 +74,18 @@ class FakeAlarm(BaseModel): self.state_reason = '' self.state_reason_data = '{}' - self.state = 'OK' + self.state_value = 'OK' self.state_updated_timestamp = datetime.utcnow() def update_state(self, reason, reason_data, state_value): # History type, that then decides what the rest of the items are, can be one of ConfigurationUpdate | StateUpdate | Action self.history.append( - ('StateUpdate', self.state_reason, self.state_reason_data, self.state, self.state_updated_timestamp) + ('StateUpdate', self.state_reason, self.state_reason_data, self.state_value, self.state_updated_timestamp) ) self.state_reason = reason self.state_reason_data = reason_data - self.state = state_value + self.state_value = state_value self.state_updated_timestamp = datetime.utcnow() @@ -221,7 +221,7 @@ class CloudWatchBackend(BaseBackend): ] def get_alarms_by_state_value(self, target_state): - return filter(lambda alarm: alarm.state == target_state, self.alarms.values()) + return filter(lambda alarm: alarm.state_value == target_state, self.alarms.values()) def delete_alarms(self, alarm_names): for alarm_name in alarm_names: diff --git a/tests/test_cloudwatch/test_cloudwatch_boto3.py b/tests/test_cloudwatch/test_cloudwatch_boto3.py index b2c44cd51..5fbf75749 100644 --- a/tests/test_cloudwatch/test_cloudwatch_boto3.py +++ b/tests/test_cloudwatch/test_cloudwatch_boto3.py @@ -127,12 +127,14 @@ def test_alarm_state(): ) len(resp['MetricAlarms']).should.equal(1) resp['MetricAlarms'][0]['AlarmName'].should.equal('testalarm1') + resp['MetricAlarms'][0]['StateValue'].should.equal('ALARM') resp = client.describe_alarms( StateValue='OK' ) len(resp['MetricAlarms']).should.equal(1) resp['MetricAlarms'][0]['AlarmName'].should.equal('testalarm2') + resp['MetricAlarms'][0]['StateValue'].should.equal('OK') # Just for sanity resp = client.describe_alarms() From 350cf9257e6d1c84e971070cdbebc471a54d5b7f Mon Sep 17 00:00:00 2001 From: Brian Pandola Date: Wed, 10 Jan 2018 15:29:08 -0800 Subject: [PATCH 030/182] Add update_access_key endpoint (#1423) --- moto/iam/models.py | 12 ++++++++++++ moto/iam/responses.py | 8 ++++++++ tests/test_iam/test_iam.py | 18 ++++++++++++++++++ 3 files changed, 38 insertions(+) diff --git a/moto/iam/models.py b/moto/iam/models.py index 57d24826d..32ca144c3 100644 --- a/moto/iam/models.py +++ b/moto/iam/models.py @@ -349,6 +349,14 @@ class User(BaseModel): raise IAMNotFoundException( "Key {0} not found".format(access_key_id)) + def update_access_key(self, access_key_id, status): + for key in self.access_keys: + if key.access_key_id == access_key_id: + key.status = status + break + else: + raise IAMNotFoundException("The Access Key with id {0} cannot be found".format(access_key_id)) + def get_cfn_attribute(self, attribute_name): from moto.cloudformation.exceptions import UnformattedGetAttTemplateException if attribute_name == 'Arn': @@ -817,6 +825,10 @@ class IAMBackend(BaseBackend): key = user.create_access_key() return key + def update_access_key(self, user_name, access_key_id, status): + user = self.get_user(user_name) + user.update_access_key(access_key_id, status) + def get_all_access_keys(self, user_name, marker=None, max_items=None): user = self.get_user(user_name) keys = user.get_all_access_keys() diff --git a/moto/iam/responses.py b/moto/iam/responses.py index 0e11c09d5..9931cb8d0 100644 --- a/moto/iam/responses.py +++ b/moto/iam/responses.py @@ -440,6 +440,14 @@ class IamResponse(BaseResponse): template = self.response_template(CREATE_ACCESS_KEY_TEMPLATE) return template.render(key=key) + def update_access_key(self): + user_name = self._get_param('UserName') + access_key_id = self._get_param('AccessKeyId') + status = self._get_param('Status') + iam_backend.update_access_key(user_name, access_key_id, status) + template = self.response_template(GENERIC_EMPTY_TEMPLATE) + return template.render(name='UpdateAccessKey') + def list_access_keys(self): user_name = self._get_param('UserName') diff --git a/tests/test_iam/test_iam.py b/tests/test_iam/test_iam.py index d50f6999e..b4dfe532d 100644 --- a/tests/test_iam/test_iam.py +++ b/tests/test_iam/test_iam.py @@ -651,3 +651,21 @@ def test_attach_detach_user_policy(): resp = client.list_attached_user_policies(UserName=user.name) resp['AttachedPolicies'].should.have.length_of(0) + + +@mock_iam +def test_update_access_key(): + iam = boto3.resource('iam', region_name='us-east-1') + client = iam.meta.client + username = 'test-user' + iam.create_user(UserName=username) + with assert_raises(ClientError): + client.update_access_key(UserName=username, + AccessKeyId='non-existent-key', + Status='Inactive') + key = client.create_access_key(UserName=username)['AccessKey'] + client.update_access_key(UserName=username, + AccessKeyId=key['AccessKeyId'], + Status='Inactive') + resp = client.list_access_keys(UserName=username) + resp['AccessKeyMetadata'][0]['Status'].should.equal('Inactive') From 681726b82679dadefbbba2f30f267ac0c754eeb4 Mon Sep 17 00:00:00 2001 From: Jack Danger Date: Wed, 27 Dec 2017 11:08:09 -0800 Subject: [PATCH 031/182] Including the in-source version number --- moto/__init__.py | 2 +- scripts/bump_version | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/moto/__init__.py b/moto/__init__.py index 3508dfeda..9d292a3e1 100644 --- a/moto/__init__.py +++ b/moto/__init__.py @@ -3,7 +3,7 @@ import logging # logging.getLogger('boto').setLevel(logging.CRITICAL) __title__ = 'moto' -__version__ = '1.0.1' +__version__ = '1.2.0', from .acm import mock_acm # flake8: noqa from .apigateway import mock_apigateway, mock_apigateway_deprecated # flake8: noqa diff --git a/scripts/bump_version b/scripts/bump_version index fe7ec1970..b5dc43562 100755 --- a/scripts/bump_version +++ b/scripts/bump_version @@ -17,7 +17,7 @@ main() { git checkout -b version-${version} # Commit the new version - git commit setup.py -m "bumping to version ${version}" + git commit setup.py moto/__init__.py -m "bumping to version ${version}" # Commit an updated IMPLEMENTATION_COVERAGE.md make implementation_coverage || true # Open a PR From fbaca6a130a232d9c1b49b37633b334675befd30 Mon Sep 17 00:00:00 2001 From: Jack Danger Date: Wed, 27 Dec 2017 11:12:47 -0800 Subject: [PATCH 032/182] Updating CHANGELOG for 1.2.0 --- CHANGELOG.md | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index b10967f64..15ddbec45 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,11 +1,17 @@ Moto Changelog =================== -Latest +1.2.0 ------ + * Implemented signal_workflow_execution for SWF * Wired SWF backend to the moto server - * Fixed incorrect handling of task list parameter on start_workflow_execution + * Revamped lambda function storage to do versioning + * IOT improvements + * RDS improvements + * Implemented CloudWatch get_metric_statistics + * Improved Cloudformation EC2 support + * Implemented Cloudformation change_set endpoints 1.1.25 ----- From c348fd25018589d7a964ea011e108c80e3775203 Mon Sep 17 00:00:00 2001 From: Jack Danger Date: Wed, 10 Jan 2018 15:01:40 -0800 Subject: [PATCH 033/182] Adding .bumpversion.cfg --- .bumpversion.cfg | 8 ++++++++ 1 file changed, 8 insertions(+) create mode 100644 .bumpversion.cfg diff --git a/.bumpversion.cfg b/.bumpversion.cfg new file mode 100644 index 000000000..b775ca46c --- /dev/null +++ b/.bumpversion.cfg @@ -0,0 +1,8 @@ +[bumpversion] +current_version = 1.1.25 + +[bumpversion:file:setup.py] + +[bumpversion:file:moto/__init__.py] + +[bumpversion:file:setup.cfg] From 38711b398cc148c350a2eeb0545562a80178aaf0 Mon Sep 17 00:00:00 2001 From: Jack Danger Date: Wed, 10 Jan 2018 15:02:41 -0800 Subject: [PATCH 034/182] bringing old version number into line --- moto/__init__.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/moto/__init__.py b/moto/__init__.py index 9d292a3e1..12a4edc6d 100644 --- a/moto/__init__.py +++ b/moto/__init__.py @@ -3,7 +3,7 @@ import logging # logging.getLogger('boto').setLevel(logging.CRITICAL) __title__ = 'moto' -__version__ = '1.2.0', +__version__ = '1.1.25', from .acm import mock_acm # flake8: noqa from .apigateway import mock_apigateway, mock_apigateway_deprecated # flake8: noqa From 85e0e2d2c0b962768a750edb01f7aa27e2074f0e Mon Sep 17 00:00:00 2001 From: Jack Danger Date: Wed, 10 Jan 2018 15:03:04 -0800 Subject: [PATCH 035/182] not committing version to setup.cfg --- .bumpversion.cfg | 1 - 1 file changed, 1 deletion(-) diff --git a/.bumpversion.cfg b/.bumpversion.cfg index b775ca46c..add9882e0 100644 --- a/.bumpversion.cfg +++ b/.bumpversion.cfg @@ -5,4 +5,3 @@ current_version = 1.1.25 [bumpversion:file:moto/__init__.py] -[bumpversion:file:setup.cfg] From 24fee6726af5ebc85bb66e6fd9f390feff41b9d2 Mon Sep 17 00:00:00 2001 From: Jack Danger Date: Wed, 10 Jan 2018 15:04:32 -0800 Subject: [PATCH 036/182] bumping version to 1.2.0 --- .bumpversion.cfg | 2 +- moto/__init__.py | 2 +- setup.py | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/.bumpversion.cfg b/.bumpversion.cfg index add9882e0..32a01af8f 100644 --- a/.bumpversion.cfg +++ b/.bumpversion.cfg @@ -1,5 +1,5 @@ [bumpversion] -current_version = 1.1.25 +current_version = 1.2.0 [bumpversion:file:setup.py] diff --git a/moto/__init__.py b/moto/__init__.py index 12a4edc6d..9d292a3e1 100644 --- a/moto/__init__.py +++ b/moto/__init__.py @@ -3,7 +3,7 @@ import logging # logging.getLogger('boto').setLevel(logging.CRITICAL) __title__ = 'moto' -__version__ = '1.1.25', +__version__ = '1.2.0', from .acm import mock_acm # flake8: noqa from .apigateway import mock_apigateway, mock_apigateway_deprecated # flake8: noqa diff --git a/setup.py b/setup.py index 201622627..27c635944 100755 --- a/setup.py +++ b/setup.py @@ -39,7 +39,7 @@ else: setup( name='moto', - version='1.1.25', + version='1.2.0', description='A library that allows your python tests to easily' ' mock out the boto library', author='Steve Pulec', From 58c37c6fdfc1cabca09f956792caa5237164f4f5 Mon Sep 17 00:00:00 2001 From: Jack Danger Date: Wed, 10 Jan 2018 15:06:32 -0800 Subject: [PATCH 037/182] using bumpversion package for scripts/bumpversion --- scripts/bump_version | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/scripts/bump_version b/scripts/bump_version index b5dc43562..5315f26f0 100755 --- a/scripts/bump_version +++ b/scripts/bump_version @@ -10,10 +10,8 @@ main() { return 1 fi - # TODO: replace this with the bumpversion pip package, I couldn't - # figure out how to use that for these files - sed -i '' "s/version=.*$/version='${version}',/g" setup.py - sed -i '' "s/__version__ = .*$/__version__ = '${version}',/g" moto/__init__.py + &>/dev/null which bumpversion || pip install bumpversion + bumpversion --new-version ${version} patch git checkout -b version-${version} # Commit the new version From 021303a2af40fc648975cdfc3e2393178ab70add Mon Sep 17 00:00:00 2001 From: Jack Danger Date: Wed, 10 Jan 2018 15:07:40 -0800 Subject: [PATCH 038/182] simplifying committing of changed versioned files --- scripts/bump_version | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/scripts/bump_version b/scripts/bump_version index 5315f26f0..d1af3a84b 100755 --- a/scripts/bump_version +++ b/scripts/bump_version @@ -1,6 +1,8 @@ #!/bin/bash main() { + set -euo pipefail # Bash safemode + local version=$1 if [[ -z "${version}" ]]; then echo "USAGE: $0 1.3.2" @@ -15,7 +17,7 @@ main() { git checkout -b version-${version} # Commit the new version - git commit setup.py moto/__init__.py -m "bumping to version ${version}" + git commit -a -m "bumping to version ${version}" # Commit an updated IMPLEMENTATION_COVERAGE.md make implementation_coverage || true # Open a PR From 738dc083c813857d565ea823ba0ca7754a0e4632 Mon Sep 17 00:00:00 2001 From: Jack Danger Date: Wed, 10 Jan 2018 15:32:16 -0800 Subject: [PATCH 039/182] updating CHANGELOG --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 15ddbec45..4dac737b2 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -4,6 +4,7 @@ Moto Changelog 1.2.0 ------ + * Supports filtering AMIs by self * Implemented signal_workflow_execution for SWF * Wired SWF backend to the moto server * Revamped lambda function storage to do versioning From da4a6fe6162fb981b82199f35dacff7fc8973959 Mon Sep 17 00:00:00 2001 From: Waldemar Hummer Date: Wed, 10 Jan 2018 19:57:49 -0500 Subject: [PATCH 040/182] implement Fn::GetAZs function in CloudFormation --- moto/cloudformation/parsing.py | 10 +++++++ .../test_cloudformation/test_stack_parsing.py | 27 +++++++++++++++++++ 2 files changed, 37 insertions(+) diff --git a/moto/cloudformation/parsing.py b/moto/cloudformation/parsing.py index e617fa9f8..81f47f4a3 100644 --- a/moto/cloudformation/parsing.py +++ b/moto/cloudformation/parsing.py @@ -106,6 +106,8 @@ NULL_MODELS = [ "AWS::CloudFormation::WaitConditionHandle", ] +DEFAULT_REGION = 'us-east-1' + logger = logging.getLogger("moto") @@ -203,6 +205,14 @@ def clean_json(resource_json, resources_map): if any(values): return values[0] + if 'Fn::GetAZs' in resource_json: + region = resource_json.get('Fn::GetAZs') or DEFAULT_REGION + result = [] + # TODO: make this configurable, to reflect the real AWS AZs + for az in ('a', 'b', 'c', 'd'): + result.append('%s%s' % (region, az)) + return result + cleaned_json = {} for key, value in resource_json.items(): cleaned_val = clean_json(value, resources_map) diff --git a/tests/test_cloudformation/test_stack_parsing.py b/tests/test_cloudformation/test_stack_parsing.py index d9fe4d80d..af7e608db 100644 --- a/tests/test_cloudformation/test_stack_parsing.py +++ b/tests/test_cloudformation/test_stack_parsing.py @@ -75,6 +75,14 @@ get_attribute_output = { } } +get_availability_zones_output = { + "Outputs": { + "Output1": { + "Value": {"Fn::GetAZs": ""} + } + } +} + split_select_template = { "AWSTemplateFormatVersion": "2010-09-09", "Resources": { @@ -146,6 +154,8 @@ bad_outputs_template = dict( list(dummy_template.items()) + list(bad_output.items())) get_attribute_outputs_template = dict( list(dummy_template.items()) + list(get_attribute_output.items())) +get_availability_zones_template = dict( + list(dummy_template.items()) + list(get_availability_zones_output.items())) dummy_template_json = json.dumps(dummy_template) name_type_template_json = json.dumps(name_type_template) @@ -153,6 +163,8 @@ output_type_template_json = json.dumps(outputs_template) bad_output_template_json = json.dumps(bad_outputs_template) get_attribute_outputs_template_json = json.dumps( get_attribute_outputs_template) +get_availability_zones_template_json = json.dumps( + get_availability_zones_template) split_select_template_json = json.dumps(split_select_template) sub_template_json = json.dumps(sub_template) export_value_template_json = json.dumps(export_value_template) @@ -243,6 +255,21 @@ def test_parse_stack_with_get_attribute_outputs(): output.value.should.equal("my-queue") +def test_parse_stack_with_get_availability_zones(): + stack = FakeStack( + stack_id="test_id", + name="test_stack", + template=get_availability_zones_template_json, + parameters={}, + region_name='us-east-1') + + stack.output_map.should.have.length_of(1) + list(stack.output_map.keys())[0].should.equal('Output1') + output = list(stack.output_map.values())[0] + output.should.be.a(Output) + output.value.should.equal([ "us-east-1a", "us-east-1b", "us-east-1c", "us-east-1d" ]) + + def test_parse_stack_with_bad_get_attribute_outputs(): FakeStack.when.called_with( "test_id", "test_stack", bad_output_template_json, {}, "us-west-1").should.throw(ValidationError) From 00a4249b74c1816bc468b480ae1ff6ff27351ff8 Mon Sep 17 00:00:00 2001 From: William Richard Date: Tue, 5 Dec 2017 15:47:04 -0500 Subject: [PATCH 041/182] Make test_amis not executable, so nose runs it In trying to debug changes to the ami mock introduced in 1.1.25, I noticed that the ami tests were not running. Turns out that nose does not run test files that are executable. http://nose.readthedocs.io/en/latest/finding_tests.html The ami test file was the only test file I could find that had the executable bit set. --- tests/test_ec2/test_amis.py | 0 1 file changed, 0 insertions(+), 0 deletions(-) mode change 100755 => 100644 tests/test_ec2/test_amis.py diff --git a/tests/test_ec2/test_amis.py b/tests/test_ec2/test_amis.py old mode 100755 new mode 100644 From e0d4728c5d2a149c8b6cd74d4d8a3cf0c294bc73 Mon Sep 17 00:00:00 2001 From: William Richard Date: Tue, 5 Dec 2017 16:53:30 -0500 Subject: [PATCH 042/182] Fix ami tests - missing and malformed image ids - test_ami_filters - test_ami_copy tests - test_ami_create_and_delete test - test_ami_filter_wildcard test - the rest of the tests by using the non-deprecated mock_ec2 --- moto/ec2/models.py | 71 +++++++++++------------ moto/ec2/responses/amis.py | 4 +- tests/test_ec2/test_amis.py | 110 +++++++++++++++++++++--------------- 3 files changed, 102 insertions(+), 83 deletions(-) diff --git a/moto/ec2/models.py b/moto/ec2/models.py index f22e39b8b..edf6087b9 100755 --- a/moto/ec2/models.py +++ b/moto/ec2/models.py @@ -48,7 +48,6 @@ from .exceptions import ( InvalidRouteError, InvalidInstanceIdError, InvalidAMIIdError, - MalformedAMIIdError, InvalidAMIAttributeItemValueError, InvalidSnapshotIdError, InvalidVolumeIdError, @@ -68,8 +67,8 @@ from .exceptions import ( InvalidCustomerGatewayIdError, RulesPerSecurityGroupLimitExceededError, MotoNotImplementedError, - FilterNotImplementedError -) + FilterNotImplementedError, + MalformedAMIIdError) from .utils import ( EC2_RESOURCE_TO_PREFIX, EC2_PREFIX_TO_RESOURCE, @@ -1032,11 +1031,11 @@ class TagBackend(object): class Ami(TaggedEC2Resource): def __init__(self, ec2_backend, ami_id, instance=None, source_ami=None, - name=None, description=None, owner_id=None, + name=None, description=None, owner_id=111122223333, public=False, virtualization_type=None, architecture=None, state='available', creation_date=None, platform=None, image_type='machine', image_location=None, hypervisor=None, - root_device_type=None, root_device_name=None, sriov='simple', + root_device_type='standard', root_device_name='/dev/sda1', sriov='simple', region_name='us-east-1a' ): self.ec2_backend = ec2_backend @@ -1137,14 +1136,13 @@ class AmiBackend(object): ami_id = ami['ami_id'] self.amis[ami_id] = Ami(self, **ami) - def create_image(self, instance_id, name=None, description=None, - context=None): + def create_image(self, instance_id, name=None, description=None, context=None): # TODO: check that instance exists and pull info from it. ami_id = random_ami_id() instance = self.get_instance(instance_id) ami = Ami(self, ami_id, instance=instance, source_ami=None, name=name, description=description, - owner_id=context.get_current_user() if context else None) + owner_id=context.get_current_user() if context else '111122223333') self.amis[ami_id] = ami return ami @@ -1161,36 +1159,39 @@ class AmiBackend(object): context=None): images = self.amis.values() - # Limit images by launch permissions - if exec_users: - tmp_images = [] - for ami in images: - for user_id in exec_users: - if user_id in ami.launch_permission_users: - tmp_images.append(ami) - images = tmp_images + if len(ami_ids): + # boto3 seems to default to just searching based on ami ids if that parameter is passed + # and if no images are found, it raises an errors + malformed_ami_ids = [ami_id for ami_id in ami_ids if not ami_id.startswith('ami-')] + if malformed_ami_ids: + raise MalformedAMIIdError(malformed_ami_ids) - # Limit by owner ids - if owners: - # support filtering by Owners=['self'] - owners = list(map( - lambda o: context.get_current_user() - if context and o == 'self' else o, - owners)) - images = [ami for ami in images if ami.owner_id in owners] - - if ami_ids: images = [ami for ami in images if ami.id in ami_ids] - if len(ami_ids) > len(images): - unknown_ids = set(ami_ids) - set(images) - for id in unknown_ids: - if not self.AMI_REGEX.match(id): - raise MalformedAMIIdError(id) - raise InvalidAMIIdError(unknown_ids) + if len(images) == 0: + raise InvalidAMIIdError(ami_ids) + else: + # Limit images by launch permissions + if exec_users: + tmp_images = [] + for ami in images: + for user_id in exec_users: + if user_id in ami.launch_permission_users: + tmp_images.append(ami) + images = tmp_images + + # Limit by owner ids + if owners: + # support filtering by Owners=['self'] + owners = list(map( + lambda o: context.get_current_user() + if context and o == 'self' else o, + owners)) + images = [ami for ami in images if ami.owner_id in owners] + + # Generic filters + if filters: + return generic_filter(filters, images) - # Generic filters - if filters: - return generic_filter(filters, images) return images def deregister_image(self, ami_id): diff --git a/moto/ec2/responses/amis.py b/moto/ec2/responses/amis.py index 12ab3e6be..17e1e228d 100755 --- a/moto/ec2/responses/amis.py +++ b/moto/ec2/responses/amis.py @@ -113,12 +113,12 @@ DESCRIBE_IMAGES_RESPONSE = """ Date: Thu, 11 Jan 2018 14:51:42 -0500 Subject: [PATCH 043/182] Fix tests that were introduced in PR #1398 --- moto/core/responses.py | 2 +- moto/ec2/models.py | 1 + tests/test_ec2/test_amis.py | 33 ++++++++++++++++++++------------- 3 files changed, 22 insertions(+), 14 deletions(-) diff --git a/moto/core/responses.py b/moto/core/responses.py index 5afe5e168..d254d1f85 100644 --- a/moto/core/responses.py +++ b/moto/core/responses.py @@ -192,7 +192,7 @@ class BaseResponse(_TemplateEnvironmentMixin): return self.querystring.get('AWSAccessKeyId') else: # Should we raise an unauthorized exception instead? - return None + return '111122223333' def _dispatch(self, request, full_url, headers): self.setup_class(request, full_url, headers) diff --git a/moto/ec2/models.py b/moto/ec2/models.py index edf6087b9..f877d3772 100755 --- a/moto/ec2/models.py +++ b/moto/ec2/models.py @@ -1140,6 +1140,7 @@ class AmiBackend(object): # TODO: check that instance exists and pull info from it. ami_id = random_ami_id() instance = self.get_instance(instance_id) + ami = Ami(self, ami_id, instance=instance, source_ami=None, name=name, description=description, owner_id=context.get_current_user() if context else '111122223333') diff --git a/tests/test_ec2/test_amis.py b/tests/test_ec2/test_amis.py index d3a36e423..3f21ca20b 100644 --- a/tests/test_ec2/test_amis.py +++ b/tests/test_ec2/test_amis.py @@ -7,6 +7,7 @@ from boto.exception import EC2ResponseError from botocore.exceptions import ClientError # Ensure 'assert_raises' context manager support for Python 2.6 from nose.tools import assert_raises +import sure # noqa from moto import mock_ec2_deprecated, mock_ec2 from tests.helpers import requires_boto_gte @@ -695,16 +696,20 @@ def test_ami_describe_non_existent(): @mock_ec2 def test_ami_filter_wildcard(): - ec2 = boto3.resource('ec2', region_name='us-west-1') - instance = ec2.create_instances(ImageId='ami-1234abcd', MinCount=1, MaxCount=1)[0] - image = instance.create_image(Name='test-image') + ec2_resource = boto3.resource('ec2', region_name='us-west-1') + ec2_client = boto3.client('ec2', region_name='us-west-1') + + instance = ec2_resource.create_instances(ImageId='ami-1234abcd', MinCount=1, MaxCount=1)[0] + instance.create_image(Name='test-image') # create an image with the same owner but will not match the filter instance.create_image(Name='not-matching-image') - filter_result = list( - ec2.images.filter(Owners=['111122223333'], Filters=[{'Name': 'name', 'Values': ['test*']}])) - filter_result.should.equal([image]) + my_images = ec2_client.describe_images( + Owners=['111122223333'], + Filters=[{'Name': 'name', 'Values': ['test*']}] + )['Images'] + my_images.should.have.length_of(1) @mock_ec2 @@ -724,16 +729,18 @@ def test_ami_filter_by_owner_id(): # Check we actually have a subset of images assert len(ubuntu_ids) < len(all_ids) + @mock_ec2 def test_ami_filter_by_self(): - client = boto3.client('ec2', region_name='us-east-1') + ec2_resource = boto3.resource('ec2', region_name='us-west-1') + ec2_client = boto3.client('ec2', region_name='us-west-1') - my_images = client.describe_images(Owners=['self']) - assert len(my_images) == 0 + my_images = ec2_client.describe_images(Owners=['self'])['Images'] + my_images.should.have.length_of(0) # Create a new image - instance = ec2.create_instances(ImageId='ami-1234abcd', MinCount=1, MaxCount=1)[0] - image = instance.create_image(Name='test-image') + instance = ec2_resource.create_instances(ImageId='ami-1234abcd', MinCount=1, MaxCount=1)[0] + instance.create_image(Name='test-image') - my_images = client.describe_images(Owners=['self']) - assert len(my_images) == 1 + my_images = ec2_client.describe_images(Owners=['self'])['Images'] + my_images.should.have.length_of(1) From 663283a8f04fc8d22f3ed0735698c929cbe253f4 Mon Sep 17 00:00:00 2001 From: Toshiya Kawasaki Date: Sun, 14 Jan 2018 14:35:53 +0900 Subject: [PATCH 044/182] add logs exceptions --- moto/logs/exceptions.py | 33 +++++++++++++++++++++++++ moto/logs/models.py | 42 +++++++++++++++++++++----------- tests/test_logs/test_logs.py | 47 ++++++++++++++++++++++++++++++++++++ 3 files changed, 108 insertions(+), 14 deletions(-) create mode 100644 moto/logs/exceptions.py diff --git a/moto/logs/exceptions.py b/moto/logs/exceptions.py new file mode 100644 index 000000000..cc83452ea --- /dev/null +++ b/moto/logs/exceptions.py @@ -0,0 +1,33 @@ +from __future__ import unicode_literals +from moto.core.exceptions import JsonRESTError + + +class LogsClientError(JsonRESTError): + code = 400 + + +class ResourceNotFoundException(LogsClientError): + def __init__(self): + self.code = 400 + super(ResourceNotFoundException, self).__init__( + "ResourceNotFoundException", + "The specified resource does not exist" + ) + + +class InvalidParameterException(LogsClientError): + def __init__(self, msg=None): + self.code = 400 + super(InvalidParameterException, self).__init__( + "InvalidParameterException", + msg or "A parameter is specified incorrectly." + ) + + +class ResourceAlreadyExistsException(LogsClientError): + def __init__(self): + self.code = 400 + super(ResourceAlreadyExistsException, self).__init__( + 'ResourceAlreadyExistsException', + 'The specified resource already exists.' + ) diff --git a/moto/logs/models.py b/moto/logs/models.py index 09dcb3645..9e25a4d6a 100644 --- a/moto/logs/models.py +++ b/moto/logs/models.py @@ -1,6 +1,10 @@ from moto.core import BaseBackend import boto.logs from moto.core.utils import unix_time_millis +from .exceptions import ( + ResourceNotFoundException, + ResourceAlreadyExistsException +) class LogEvent: @@ -126,11 +130,13 @@ class LogGroup: self.streams = dict() # {name: LogStream} def create_log_stream(self, log_stream_name): - assert log_stream_name not in self.streams + if log_stream_name in self.streams: + raise ResourceAlreadyExistsException() self.streams[log_stream_name] = LogStream(self.region, self.name, log_stream_name) def delete_log_stream(self, log_stream_name): - assert log_stream_name in self.streams + if log_stream_name not in self.streams: + raise ResourceNotFoundException() del self.streams[log_stream_name] def describe_log_streams(self, descending, limit, log_group_name, log_stream_name_prefix, next_token, order_by): @@ -151,18 +157,18 @@ class LogGroup: return log_streams_page, new_token def put_log_events(self, log_group_name, log_stream_name, log_events, sequence_token): - assert log_stream_name in self.streams + if log_stream_name not in self.streams: + raise ResourceNotFoundException() stream = self.streams[log_stream_name] return stream.put_log_events(log_group_name, log_stream_name, log_events, sequence_token) def get_log_events(self, log_group_name, log_stream_name, start_time, end_time, limit, next_token, start_from_head): - assert log_stream_name in self.streams + if log_stream_name not in self.streams: + raise ResourceNotFoundException() stream = self.streams[log_stream_name] return stream.get_log_events(log_group_name, log_stream_name, start_time, end_time, limit, next_token, start_from_head) def filter_log_events(self, log_group_name, log_stream_names, start_time, end_time, limit, next_token, filter_pattern, interleaved): - assert not filter_pattern # TODO: impl - streams = [stream for name, stream in self.streams.items() if not log_stream_names or name in log_stream_names] events = [] @@ -195,7 +201,8 @@ class LogsBackend(BaseBackend): self.__init__(region_name) def create_log_group(self, log_group_name, tags): - assert log_group_name not in self.groups + if log_group_name in self.groups: + raise ResourceAlreadyExistsException() self.groups[log_group_name] = LogGroup(self.region_name, log_group_name, tags) def ensure_log_group(self, log_group_name, tags): @@ -204,37 +211,44 @@ class LogsBackend(BaseBackend): self.groups[log_group_name] = LogGroup(self.region_name, log_group_name, tags) def delete_log_group(self, log_group_name): - assert log_group_name in self.groups + if log_group_name not in self.groups: + raise ResourceNotFoundException() del self.groups[log_group_name] def create_log_stream(self, log_group_name, log_stream_name): - assert log_group_name in self.groups + if log_group_name not in self.groups: + raise ResourceNotFoundException() log_group = self.groups[log_group_name] return log_group.create_log_stream(log_stream_name) def delete_log_stream(self, log_group_name, log_stream_name): - assert log_group_name in self.groups + if log_group_name not in self.groups: + raise ResourceNotFoundException() log_group = self.groups[log_group_name] return log_group.delete_log_stream(log_stream_name) def describe_log_streams(self, descending, limit, log_group_name, log_stream_name_prefix, next_token, order_by): - assert log_group_name in self.groups + if log_group_name not in self.groups: + raise ResourceNotFoundException() log_group = self.groups[log_group_name] return log_group.describe_log_streams(descending, limit, log_group_name, log_stream_name_prefix, next_token, order_by) def put_log_events(self, log_group_name, log_stream_name, log_events, sequence_token): # TODO: add support for sequence_tokens - assert log_group_name in self.groups + if log_group_name not in self.groups: + raise ResourceNotFoundException() log_group = self.groups[log_group_name] return log_group.put_log_events(log_group_name, log_stream_name, log_events, sequence_token) def get_log_events(self, log_group_name, log_stream_name, start_time, end_time, limit, next_token, start_from_head): - assert log_group_name in self.groups + if log_group_name not in self.groups: + raise ResourceNotFoundException() log_group = self.groups[log_group_name] return log_group.get_log_events(log_group_name, log_stream_name, start_time, end_time, limit, next_token, start_from_head) def filter_log_events(self, log_group_name, log_stream_names, start_time, end_time, limit, next_token, filter_pattern, interleaved): - assert log_group_name in self.groups + if log_group_name not in self.groups: + raise ResourceNotFoundException() log_group = self.groups[log_group_name] return log_group.filter_log_events(log_group_name, log_stream_names, start_time, end_time, limit, next_token, filter_pattern, interleaved) diff --git a/tests/test_logs/test_logs.py b/tests/test_logs/test_logs.py index 392b3f7e9..1b2f5f75e 100644 --- a/tests/test_logs/test_logs.py +++ b/tests/test_logs/test_logs.py @@ -1,7 +1,9 @@ import boto3 import sure # noqa +from botocore.exceptions import ClientError from moto import mock_logs, settings +from nose.tools import assert_raises _logs_region = 'us-east-1' if settings.TEST_SERVER_MODE else 'us-west-2' @@ -12,3 +14,48 @@ def test_log_group_create(): log_group_name = 'dummy' response = conn.create_log_group(logGroupName=log_group_name) response = conn.delete_log_group(logGroupName=log_group_name) + + +@mock_logs +def test_exceptions(): + conn = boto3.client('logs', 'us-west-2') + log_group_name = 'dummy' + log_stream_name = 'dummp-stream' + conn.create_log_group(logGroupName=log_group_name) + with assert_raises(ClientError): + conn.create_log_group(logGroupName=log_group_name) + + # descrine_log_groups is not implemented yet + + conn.create_log_stream( + logGroupName=log_group_name, + logStreamName=log_stream_name + ) + with assert_raises(ClientError): + conn.create_log_stream( + logGroupName=log_group_name, + logStreamName=log_stream_name + ) + + conn.put_log_events( + logGroupName=log_group_name, + logStreamName=log_stream_name, + logEvents=[ + { + 'timestamp': 0, + 'message': 'line' + }, + ], + ) + + with assert_raises(ClientError): + conn.put_log_events( + logGroupName=log_group_name, + logStreamName="invalid-stream", + logEvents=[ + { + 'timestamp': 0, + 'message': 'line' + }, + ], + ) From 056a4e4672ece0fce71d4c3ea95abf92a7501c2c Mon Sep 17 00:00:00 2001 From: Toshiya Kawasaki Date: Sun, 14 Jan 2018 14:39:24 +0900 Subject: [PATCH 045/182] Fix iot exception definition --- moto/iot/exceptions.py | 1 + 1 file changed, 1 insertion(+) diff --git a/moto/iot/exceptions.py b/moto/iot/exceptions.py index 47435eeb5..7bbdb706d 100644 --- a/moto/iot/exceptions.py +++ b/moto/iot/exceptions.py @@ -28,5 +28,6 @@ class VersionConflictException(IoTClientError): def __init__(self, name): self.code = 409 super(VersionConflictException, self).__init__( + 'VersionConflictException', 'The version for thing %s does not match the expected version.' % name ) From 4f445a3db582c251f88d48866393947a57ae9bb7 Mon Sep 17 00:00:00 2001 From: Toshiya Kawasaki Date: Sun, 14 Jan 2018 14:36:17 +0900 Subject: [PATCH 046/182] Handle describe-streams error when log events does not exist. --- moto/logs/models.py | 24 ++++++++++++++++-------- 1 file changed, 16 insertions(+), 8 deletions(-) diff --git a/moto/logs/models.py b/moto/logs/models.py index 9e25a4d6a..6ff7f93bf 100644 --- a/moto/logs/models.py +++ b/moto/logs/models.py @@ -53,23 +53,29 @@ class LogStream: self.__class__._log_ids += 1 def _update(self): - self.firstEventTimestamp = min([x.timestamp for x in self.events]) - self.lastEventTimestamp = max([x.timestamp for x in self.events]) + # events can be empty when stream is described soon after creation + self.firstEventTimestamp = min([x.timestamp for x in self.events]) if self.events else None + self.lastEventTimestamp = max([x.timestamp for x in self.events]) if self.events else None def to_describe_dict(self): # Compute start and end times self._update() - return { + res = { "arn": self.arn, "creationTime": self.creationTime, - "firstEventTimestamp": self.firstEventTimestamp, - "lastEventTimestamp": self.lastEventTimestamp, - "lastIngestionTime": self.lastIngestionTime, "logStreamName": self.logStreamName, "storedBytes": self.storedBytes, - "uploadSequenceToken": str(self.uploadSequenceToken), } + if self.events: + rest = { + "firstEventTimestamp": self.firstEventTimestamp, + "lastEventTimestamp": self.lastEventTimestamp, + "lastIngestionTime": self.lastIngestionTime, + "uploadSequenceToken": str(self.uploadSequenceToken), + } + res.update(rest) + return res def put_log_events(self, log_group_name, log_stream_name, log_events, sequence_token): # TODO: ensure sequence_token @@ -140,10 +146,12 @@ class LogGroup: del self.streams[log_stream_name] def describe_log_streams(self, descending, limit, log_group_name, log_stream_name_prefix, next_token, order_by): + # responses only logStreamName, creationTime, arn, storedBytes when no events are stored. + log_streams = [(name, stream.to_describe_dict()) for name, stream in self.streams.items() if name.startswith(log_stream_name_prefix)] def sorter(item): - return item[0] if order_by == 'logStreamName' else item[1]['lastEventTimestamp'] + return item[0] if order_by == 'logStreamName' else item[1].get('lastEventTimestamp', 0) if next_token is None: next_token = 0 From ef9b229acc0da428c5fc3f8a588cb3cae2bed213 Mon Sep 17 00:00:00 2001 From: Ciprian Radulescu Date: Sun, 14 Jan 2018 18:49:47 +0200 Subject: [PATCH 047/182] url decode x-amz-copy-source as per s3 nodejs documentation --- moto/s3/responses.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/moto/s3/responses.py b/moto/s3/responses.py index 6abb4f2d1..1cd505cc8 100755 --- a/moto/s3/responses.py +++ b/moto/s3/responses.py @@ -4,7 +4,7 @@ import re import six from moto.core.utils import str_to_rfc_1123_datetime -from six.moves.urllib.parse import parse_qs, urlparse +from six.moves.urllib.parse import parse_qs, urlparse, unquote import xmltodict @@ -631,7 +631,7 @@ class ResponseObject(_TemplateEnvironmentMixin): upload_id = query['uploadId'][0] part_number = int(query['partNumber'][0]) if 'x-amz-copy-source' in request.headers: - src = request.headers.get("x-amz-copy-source").lstrip("/") + src = unquote(request.headers.get("x-amz-copy-source")).lstrip("/") src_bucket, src_key = src.split("/", 1) src_range = request.headers.get( 'x-amz-copy-source-range', '').split("bytes=")[-1] @@ -673,7 +673,7 @@ class ResponseObject(_TemplateEnvironmentMixin): if 'x-amz-copy-source' in request.headers: # Copy key - src_key_parsed = urlparse(request.headers.get("x-amz-copy-source")) + src_key_parsed = urlparse(unquote(request.headers.get("x-amz-copy-source"))) src_bucket, src_key = src_key_parsed.path.lstrip("/").split("/", 1) src_version_id = parse_qs(src_key_parsed.query).get( 'versionId', [None])[0] From 055bc0f5d7279d4a85c74f945d2cd8752b65fe39 Mon Sep 17 00:00:00 2001 From: Ciprian Radulescu Date: Sun, 14 Jan 2018 19:57:45 +0200 Subject: [PATCH 048/182] updated changelog --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index b10967f64..4e4632562 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -6,6 +6,7 @@ Latest * Implemented signal_workflow_execution for SWF * Wired SWF backend to the moto server * Fixed incorrect handling of task list parameter on start_workflow_execution + * Added url decoding to x-amz-copy-source header for copying S3 files 1.1.25 ----- From 2beb004006666950d92b78a32bd18e1b24f796c5 Mon Sep 17 00:00:00 2001 From: Ciprian Radulescu Date: Sun, 14 Jan 2018 20:02:49 +0200 Subject: [PATCH 049/182] updated changelog for pull request --- CHANGELOG.md | 1 - 1 file changed, 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 4e4632562..e8c6abb11 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -5,7 +5,6 @@ Latest ------ * Implemented signal_workflow_execution for SWF * Wired SWF backend to the moto server - * Fixed incorrect handling of task list parameter on start_workflow_execution * Added url decoding to x-amz-copy-source header for copying S3 files 1.1.25 From 597676c59c229819b3da1abaaeb48a3020374e13 Mon Sep 17 00:00:00 2001 From: dbfr3qs Date: Mon, 15 Jan 2018 21:52:32 +1300 Subject: [PATCH 050/182] add tags when creating ebs volume --- moto/ec2/responses/elastic_block_store.py | 13 ++++++++++++ tests/test_ec2/test_tags.py | 26 ++++++++++++++++++++++- 2 files changed, 38 insertions(+), 1 deletion(-) diff --git a/moto/ec2/responses/elastic_block_store.py b/moto/ec2/responses/elastic_block_store.py index 37b3e9a07..333642247 100644 --- a/moto/ec2/responses/elastic_block_store.py +++ b/moto/ec2/responses/elastic_block_store.py @@ -32,10 +32,13 @@ class ElasticBlockStore(BaseResponse): size = self._get_param('Size') zone = self._get_param('AvailabilityZone') snapshot_id = self._get_param('SnapshotId') + tags = self._parse_tag_specification("TagSpecification") + volume_tags = tags.get('image', {}) encrypted = self._get_param('Encrypted', if_none=False) if self.is_not_dryrun('CreateVolume'): volume = self.ec2_backend.create_volume( size, zone, snapshot_id, encrypted) + volume.add_tags(volume_tags) template = self.response_template(CREATE_VOLUME_RESPONSE) return template.render(volume=volume) @@ -139,6 +142,16 @@ CREATE_VOLUME_RESPONSE = """ Date: Thu, 18 Jan 2018 19:40:24 +1300 Subject: [PATCH 051/182] hack tests now that boto get_templates returns ordered dicts --- .../test_cloudformation_stack_crud_boto3.py | 13 +++++++------ 1 file changed, 7 insertions(+), 6 deletions(-) diff --git a/tests/test_cloudformation/test_cloudformation_stack_crud_boto3.py b/tests/test_cloudformation/test_cloudformation_stack_crud_boto3.py index 1f3bfdec7..781e89e2b 100644 --- a/tests/test_cloudformation/test_cloudformation_stack_crud_boto3.py +++ b/tests/test_cloudformation/test_cloudformation_stack_crud_boto3.py @@ -160,7 +160,7 @@ def test_boto3_create_stack(): TemplateBody=dummy_template_json, ) - cf_conn.get_template(StackName="test_stack")['TemplateBody'].should.equal( + json.loads(json.dumps(cf_conn.get_template(StackName="test_stack")['TemplateBody'])).should.equal( dummy_template) @@ -270,9 +270,10 @@ def test_create_stack_from_s3_url(): StackName='stack_from_url', TemplateURL=key_url, ) - - cf_conn.get_template(StackName="stack_from_url")[ - 'TemplateBody'].should.equal(dummy_template) + # from IPython import embed + # embed() + json.loads(json.dumps(cf_conn.get_template(StackName="stack_from_url")[ + 'TemplateBody'])).should.equal(dummy_template) @mock_cloudformation @@ -306,8 +307,8 @@ def test_update_stack_from_s3_url(): TemplateURL=key_url, ) - cf_conn.get_template(StackName="update_stack_from_url")[ - 'TemplateBody'].should.equal(dummy_update_template) + json.loads(json.dumps(cf_conn.get_template(StackName="update_stack_from_url")[ + 'TemplateBody'])).should.equal(dummy_update_template) @mock_cloudformation From 402aa354593c99df4124c058e1e933def00a497a Mon Sep 17 00:00:00 2001 From: Jack Danger Date: Sat, 20 Jan 2018 19:35:02 -0800 Subject: [PATCH 052/182] fixing makefile --- Makefile | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/Makefile b/Makefile index 2b90c0ec2..98840ba9b 100644 --- a/Makefile +++ b/Makefile @@ -36,8 +36,7 @@ tag_github_release: git tag `python setup.py --version` git push origin `python setup.py --version` -publish: - upload_pypi_artifact \ +publish: upload_pypi_artifact \ tag_github_release \ push_dockerhub_image From 52f0d0a4e44112800b961f8983ba686087d4a206 Mon Sep 17 00:00:00 2001 From: Jack Danger Date: Sat, 20 Jan 2018 19:44:22 -0800 Subject: [PATCH 053/182] importing sure in tests that require it --- tests/test_autoscaling/test_elbv2.py | 1 + .../test_cloudformation/test_cloudformation_stack_crud_boto3.py | 1 + tests/test_swf/models/test_activity_task.py | 1 + tests/test_swf/models/test_domain.py | 1 + tests/test_swf/models/test_generic_type.py | 1 + tests/test_swf/models/test_history_event.py | 1 + tests/test_swf/models/test_timeout.py | 1 + tests/test_swf/responses/test_activity_tasks.py | 1 + tests/test_swf/responses/test_activity_types.py | 1 + tests/test_swf/responses/test_decision_tasks.py | 1 + tests/test_swf/responses/test_domains.py | 1 + tests/test_swf/responses/test_timeouts.py | 1 + tests/test_swf/test_exceptions.py | 1 + 13 files changed, 13 insertions(+) diff --git a/tests/test_autoscaling/test_elbv2.py b/tests/test_autoscaling/test_elbv2.py index 89ec4a399..9aff981f1 100644 --- a/tests/test_autoscaling/test_elbv2.py +++ b/tests/test_autoscaling/test_elbv2.py @@ -1,6 +1,7 @@ from __future__ import unicode_literals import boto3 +import sure # noqa from moto import mock_autoscaling, mock_ec2, mock_elbv2 @mock_elbv2 diff --git a/tests/test_cloudformation/test_cloudformation_stack_crud_boto3.py b/tests/test_cloudformation/test_cloudformation_stack_crud_boto3.py index 1f3bfdec7..4e85453b9 100644 --- a/tests/test_cloudformation/test_cloudformation_stack_crud_boto3.py +++ b/tests/test_cloudformation/test_cloudformation_stack_crud_boto3.py @@ -4,6 +4,7 @@ import json import boto3 from botocore.exceptions import ClientError +import sure # noqa # Ensure 'assert_raises' context manager support for Python 2.6 from nose.tools import assert_raises diff --git a/tests/test_swf/models/test_activity_task.py b/tests/test_swf/models/test_activity_task.py index 5dddab975..41c88cafe 100644 --- a/tests/test_swf/models/test_activity_task.py +++ b/tests/test_swf/models/test_activity_task.py @@ -1,4 +1,5 @@ from freezegun import freeze_time +import sure # noqa from moto.swf.exceptions import SWFWorkflowExecutionClosedError from moto.swf.models import ( diff --git a/tests/test_swf/models/test_domain.py b/tests/test_swf/models/test_domain.py index 57f66c830..1a8a1268d 100644 --- a/tests/test_swf/models/test_domain.py +++ b/tests/test_swf/models/test_domain.py @@ -1,4 +1,5 @@ from collections import namedtuple +import sure # noqa from moto.swf.exceptions import SWFUnknownResourceFault from moto.swf.models import Domain diff --git a/tests/test_swf/models/test_generic_type.py b/tests/test_swf/models/test_generic_type.py index d7410f395..294df9f84 100644 --- a/tests/test_swf/models/test_generic_type.py +++ b/tests/test_swf/models/test_generic_type.py @@ -1,4 +1,5 @@ from moto.swf.models import GenericType +import sure # noqa # Tests for GenericType (ActivityType, WorkflowType) diff --git a/tests/test_swf/models/test_history_event.py b/tests/test_swf/models/test_history_event.py index 43592aa6c..b869408ce 100644 --- a/tests/test_swf/models/test_history_event.py +++ b/tests/test_swf/models/test_history_event.py @@ -1,4 +1,5 @@ from freezegun import freeze_time +import sure # noqa from moto.swf.models import HistoryEvent diff --git a/tests/test_swf/models/test_timeout.py b/tests/test_swf/models/test_timeout.py index d685bca8e..fb52652fd 100644 --- a/tests/test_swf/models/test_timeout.py +++ b/tests/test_swf/models/test_timeout.py @@ -1,4 +1,5 @@ from freezegun import freeze_time +import sure # noqa from moto.swf.models import Timeout diff --git a/tests/test_swf/responses/test_activity_tasks.py b/tests/test_swf/responses/test_activity_tasks.py index 3511d4e56..c0b8897b9 100644 --- a/tests/test_swf/responses/test_activity_tasks.py +++ b/tests/test_swf/responses/test_activity_tasks.py @@ -1,5 +1,6 @@ from boto.swf.exceptions import SWFResponseError from freezegun import freeze_time +import sure # noqa from moto import mock_swf_deprecated from moto.swf import swf_backend diff --git a/tests/test_swf/responses/test_activity_types.py b/tests/test_swf/responses/test_activity_types.py index b283d3448..95d8a3733 100644 --- a/tests/test_swf/responses/test_activity_types.py +++ b/tests/test_swf/responses/test_activity_types.py @@ -1,5 +1,6 @@ import boto from boto.swf.exceptions import SWFResponseError +import sure # noqa from moto import mock_swf_deprecated diff --git a/tests/test_swf/responses/test_decision_tasks.py b/tests/test_swf/responses/test_decision_tasks.py index 466e1a2ae..972b1053b 100644 --- a/tests/test_swf/responses/test_decision_tasks.py +++ b/tests/test_swf/responses/test_decision_tasks.py @@ -1,5 +1,6 @@ from boto.swf.exceptions import SWFResponseError from freezegun import freeze_time +import sure # noqa from moto import mock_swf_deprecated from moto.swf import swf_backend diff --git a/tests/test_swf/responses/test_domains.py b/tests/test_swf/responses/test_domains.py index 3fa12d665..8edc76432 100644 --- a/tests/test_swf/responses/test_domains.py +++ b/tests/test_swf/responses/test_domains.py @@ -1,5 +1,6 @@ import boto from boto.swf.exceptions import SWFResponseError +import sure # noqa from moto import mock_swf_deprecated diff --git a/tests/test_swf/responses/test_timeouts.py b/tests/test_swf/responses/test_timeouts.py index 5bd0ead96..f49c597a4 100644 --- a/tests/test_swf/responses/test_timeouts.py +++ b/tests/test_swf/responses/test_timeouts.py @@ -1,4 +1,5 @@ from freezegun import freeze_time +import sure # noqa from moto import mock_swf_deprecated diff --git a/tests/test_swf/test_exceptions.py b/tests/test_swf/test_exceptions.py index a23a14e66..8617242b9 100644 --- a/tests/test_swf/test_exceptions.py +++ b/tests/test_swf/test_exceptions.py @@ -1,4 +1,5 @@ from __future__ import unicode_literals +import sure # noqa import json From 89bad393132da86421368ff823138497ab51d771 Mon Sep 17 00:00:00 2001 From: Dan W Anderson Date: Thu, 18 Jan 2018 15:23:27 -0800 Subject: [PATCH 054/182] add redrivepolicy attribute to sqs --- moto/sqs/models.py | 1 + 1 file changed, 1 insertion(+) diff --git a/moto/sqs/models.py b/moto/sqs/models.py index 85b69ab0e..dbc170387 100644 --- a/moto/sqs/models.py +++ b/moto/sqs/models.py @@ -166,6 +166,7 @@ class Queue(BaseModel): 'MessageRetentionPeriod', 'QueueArn', 'ReceiveMessageWaitTimeSeconds', + 'RedrivePolicy', 'VisibilityTimeout', 'WaitTimeSeconds'] ALLOWED_PERMISSIONS = ('*', 'ChangeMessageVisibility', 'DeleteMessage', 'GetQueueAttributes', From 8959643e5666872bda234efc1c3c94244a5183c4 Mon Sep 17 00:00:00 2001 From: Dan W Anderson Date: Thu, 18 Jan 2018 15:58:11 -0800 Subject: [PATCH 055/182] return redrivepolicy attribute as string --- moto/sqs/models.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/moto/sqs/models.py b/moto/sqs/models.py index dbc170387..0a268e9eb 100644 --- a/moto/sqs/models.py +++ b/moto/sqs/models.py @@ -286,6 +286,8 @@ class Queue(BaseModel): attr = getattr(self, camelcase_to_underscores(attribute)) if isinstance(attr, bool): attr = str(attr).lower() + elif attribute == 'RedrivePolicy': + attr = json.dumps(attr) result[attribute] = attr return result From 616095602ae0c2e7301687529c1e96723ff20f65 Mon Sep 17 00:00:00 2001 From: Dan W Anderson Date: Thu, 18 Jan 2018 15:58:20 -0800 Subject: [PATCH 056/182] test for redrive policy --- tests/test_sqs/test_sqs.py | 24 ++++++++++++++++++++++++ 1 file changed, 24 insertions(+) diff --git a/tests/test_sqs/test_sqs.py b/tests/test_sqs/test_sqs.py index c761ec8d9..b91fd7bc7 100644 --- a/tests/test_sqs/test_sqs.py +++ b/tests/test_sqs/test_sqs.py @@ -932,3 +932,27 @@ def test_queue_with_dlq(): resp = sqs.list_dead_letter_source_queues(QueueUrl=queue_url1) resp['queueUrls'][0].should.equal(queue_url2) + +@mock_sqs +def test_redrive_policy_available(): + sqs = boto3.client('sqs', region_name='us-east-1') + + resp = sqs.create_queue(QueueName='test-deadletter') + queue_url1 = resp['QueueUrl'] + queue_arn1 = sqs.get_queue_attributes(QueueUrl=queue_url1)['Attributes']['QueueArn'] + redrive_policy = { + 'deadLetterTargetArn': queue_arn1, + 'maxReceiveCount': 1, + } + + resp = sqs.create_queue( + QueueName='test-queue', + Attributes={ + 'RedrivePolicy': json.dumps(redrive_policy) + } + ) + + queue_url2 = resp['QueueUrl'] + attributes = sqs.get_queue_attributes(QueueUrl=queue_url2)['Attributes'] + assert 'RedrivePolicy' in attributes + assert json.loads(attributes['RedrivePolicy']) == redrive_policy From e4cf58d6fa20e3cf1996caf9911cbac284641517 Mon Sep 17 00:00:00 2001 From: Koudai Aono Date: Thu, 7 Dec 2017 11:40:42 +0900 Subject: [PATCH 057/182] cast MaxRecords to int on describe_db_instances --- moto/rds2/responses.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/moto/rds2/responses.py b/moto/rds2/responses.py index 3e093221d..f610c1506 100644 --- a/moto/rds2/responses.py +++ b/moto/rds2/responses.py @@ -123,7 +123,7 @@ class RDS2Response(BaseResponse): start = all_ids.index(marker) + 1 else: start = 0 - page_size = self._get_param('MaxRecords', 50) # the default is 100, but using 50 to make testing easier + page_size = int(self._get_param('MaxRecords', 50)) # the default is 100, but using 50 to make testing easier instances_resp = all_instances[start:start + page_size] next_marker = None if len(all_instances) > start + page_size: From 518282dbd3e9161c98def7b66310b0742c8019f7 Mon Sep 17 00:00:00 2001 From: Koudai Aono Date: Thu, 18 Jan 2018 13:05:07 +0900 Subject: [PATCH 058/182] change get_param method to get_int_param --- moto/rds2/responses.py | 2 +- tests/test_rds2/test_rds2.py | 2 ++ 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/moto/rds2/responses.py b/moto/rds2/responses.py index f610c1506..eddb0042b 100644 --- a/moto/rds2/responses.py +++ b/moto/rds2/responses.py @@ -123,7 +123,7 @@ class RDS2Response(BaseResponse): start = all_ids.index(marker) + 1 else: start = 0 - page_size = int(self._get_param('MaxRecords', 50)) # the default is 100, but using 50 to make testing easier + page_size = self._get_int_param('MaxRecords', 50) # the default is 100, but using 50 to make testing easier instances_resp = all_instances[start:start + page_size] next_marker = None if len(all_instances) > start + page_size: diff --git a/tests/test_rds2/test_rds2.py b/tests/test_rds2/test_rds2.py index 183a183b1..ea0ab378f 100644 --- a/tests/test_rds2/test_rds2.py +++ b/tests/test_rds2/test_rds2.py @@ -197,6 +197,8 @@ def test_get_databases_paginated(): resp2 = conn.describe_db_instances(Marker=resp["Marker"]) resp2["DBInstances"].should.have.length_of(1) + resp3 = conn.describe_db_instances(MaxRecords=100) + resp3["DBInstances"].should.have.length_of(51) @mock_rds2 def test_describe_non_existant_database(): From 9ef271fafa4d47a2161948a6679337af6c76f1d5 Mon Sep 17 00:00:00 2001 From: Koudai Aono Date: Thu, 25 Jan 2018 01:34:27 +0900 Subject: [PATCH 059/182] Remove unneeded comments --- .../test_cloudformation/test_cloudformation_stack_crud_boto3.py | 2 -- 1 file changed, 2 deletions(-) diff --git a/tests/test_cloudformation/test_cloudformation_stack_crud_boto3.py b/tests/test_cloudformation/test_cloudformation_stack_crud_boto3.py index 781e89e2b..b9ee60d6b 100644 --- a/tests/test_cloudformation/test_cloudformation_stack_crud_boto3.py +++ b/tests/test_cloudformation/test_cloudformation_stack_crud_boto3.py @@ -270,8 +270,6 @@ def test_create_stack_from_s3_url(): StackName='stack_from_url', TemplateURL=key_url, ) - # from IPython import embed - # embed() json.loads(json.dumps(cf_conn.get_template(StackName="stack_from_url")[ 'TemplateBody'])).should.equal(dummy_template) From 0eb02613a022ac25a331513974d25d47383e2abb Mon Sep 17 00:00:00 2001 From: Koudai Aono Date: Thu, 25 Jan 2018 02:37:51 +0900 Subject: [PATCH 060/182] fix get_template tests to support OrderedDict --- .../test_cloudformation_stack_crud_boto3.py | 13 +++++++------ 1 file changed, 7 insertions(+), 6 deletions(-) diff --git a/tests/test_cloudformation/test_cloudformation_stack_crud_boto3.py b/tests/test_cloudformation/test_cloudformation_stack_crud_boto3.py index b9ee60d6b..e4625fe69 100644 --- a/tests/test_cloudformation/test_cloudformation_stack_crud_boto3.py +++ b/tests/test_cloudformation/test_cloudformation_stack_crud_boto3.py @@ -1,6 +1,7 @@ from __future__ import unicode_literals import json +from collections import OrderedDict import boto3 from botocore.exceptions import ClientError @@ -160,8 +161,8 @@ def test_boto3_create_stack(): TemplateBody=dummy_template_json, ) - json.loads(json.dumps(cf_conn.get_template(StackName="test_stack")['TemplateBody'])).should.equal( - dummy_template) + cf_conn.get_template(StackName="test_stack")['TemplateBody'].should.equal( + json.loads(dummy_template_json, object_pairs_hook=OrderedDict)) @mock_cloudformation @@ -270,8 +271,8 @@ def test_create_stack_from_s3_url(): StackName='stack_from_url', TemplateURL=key_url, ) - json.loads(json.dumps(cf_conn.get_template(StackName="stack_from_url")[ - 'TemplateBody'])).should.equal(dummy_template) + cf_conn.get_template(StackName="stack_from_url")['TemplateBody'].should.equal( + json.loads(dummy_template_json, object_pairs_hook=OrderedDict)) @mock_cloudformation @@ -305,8 +306,8 @@ def test_update_stack_from_s3_url(): TemplateURL=key_url, ) - json.loads(json.dumps(cf_conn.get_template(StackName="update_stack_from_url")[ - 'TemplateBody'])).should.equal(dummy_update_template) + cf_conn.get_template(StackName="update_stack_from_url")[ 'TemplateBody'].should.equal( + json.loads(dummy_update_template_json, object_pairs_hook=OrderedDict)) @mock_cloudformation From 53184208fd8d84c620abab75ac70d22ed8ffeedf Mon Sep 17 00:00:00 2001 From: Chris Keogh Date: Thu, 25 Jan 2018 14:49:59 +1300 Subject: [PATCH 061/182] add elasticloadbalancer:loadbalancer to resourcegroupstaggingapi.get_resources --- moto/resourcegroupstaggingapi/models.py | 35 ++++++++--- .../test_resourcegroupstaggingapi.py | 63 ++++++++++++++++++- 2 files changed, 88 insertions(+), 10 deletions(-) diff --git a/moto/resourcegroupstaggingapi/models.py b/moto/resourcegroupstaggingapi/models.py index fbc54454b..4aec63aa6 100644 --- a/moto/resourcegroupstaggingapi/models.py +++ b/moto/resourcegroupstaggingapi/models.py @@ -119,15 +119,17 @@ class ResourceGroupsTaggingAPIBackend(BaseBackend): def tag_filter(tag_list): result = [] + if tag_filters: + for tag in tag_list: + temp_result = [] + for f in filters: + f_result = f(tag['Key'], tag['Value']) + temp_result.append(f_result) + result.append(all(temp_result)) - for tag in tag_list: - temp_result = [] - for f in filters: - f_result = f(tag['Key'], tag['Value']) - temp_result.append(f_result) - result.append(all(temp_result)) - - return any(result) + return any(result) + else: + return True # Do S3, resource type s3 if not resource_type_filters or 's3' in resource_type_filters: @@ -210,6 +212,23 @@ class ResourceGroupsTaggingAPIBackend(BaseBackend): # TODO add these to the keys and values functions / combine functions # ELB + def get_elbv2_tags(arn): + result = [] + for key, value in self.elbv2_backend.load_balancers[elb.arn].tags.items(): + result.append({'Key': key, 'Value': value}) + return result + + if not resource_type_filters or 'elasticloadbalancer' in resource_type_filters or 'elasticloadbalancer:loadbalancer' in resource_type_filters: + for elb in self.elbv2_backend.load_balancers.values(): + tags = get_elbv2_tags(elb.arn) + # if 'elasticloadbalancer:loadbalancer' in resource_type_filters: + # from IPython import embed + # embed() + if not tag_filter(tags): # Skip if no tags, or invalid filter + continue + + yield {'ResourceARN': '{0}'.format(elb.arn), 'Tags': tags} + # EMR Cluster # Glacier Vault diff --git a/tests/test_resourcegroupstaggingapi/test_resourcegroupstaggingapi.py b/tests/test_resourcegroupstaggingapi/test_resourcegroupstaggingapi.py index cce0f1b99..759063329 100644 --- a/tests/test_resourcegroupstaggingapi/test_resourcegroupstaggingapi.py +++ b/tests/test_resourcegroupstaggingapi/test_resourcegroupstaggingapi.py @@ -2,7 +2,7 @@ from __future__ import unicode_literals import boto3 import sure # noqa -from moto import mock_resourcegroupstaggingapi, mock_s3, mock_ec2 +from moto import mock_resourcegroupstaggingapi, mock_s3, mock_ec2, mock_elbv2 @mock_s3 @@ -223,4 +223,63 @@ def test_get_tag_values_ec2(): resp['TagValues'].should.contain('MY_VALUE1') resp['TagValues'].should.contain('MY_VALUE4') - # TODO test pagenation \ No newline at end of file +@mock_ec2 +@mock_elbv2 +@mock_resourcegroupstaggingapi +def test_get_resources_elbv2(): + conn = boto3.client('elbv2', region_name='us-east-1') + ec2 = boto3.resource('ec2', region_name='us-east-1') + + security_group = ec2.create_security_group( + GroupName='a-security-group', Description='First One') + vpc = ec2.create_vpc(CidrBlock='172.28.7.0/24', InstanceTenancy='default') + subnet1 = ec2.create_subnet( + VpcId=vpc.id, + CidrBlock='172.28.7.192/26', + AvailabilityZone='us-east-1a') + subnet2 = ec2.create_subnet( + VpcId=vpc.id, + CidrBlock='172.28.7.192/26', + AvailabilityZone='us-east-1b') + + conn.create_load_balancer( + Name='my-lb', + Subnets=[subnet1.id, subnet2.id], + SecurityGroups=[security_group.id], + Scheme='internal', + Tags=[ + { + 'Key': 'key_name', + 'Value': 'a_value' + }, + { + 'Key': 'key_2', + 'Value': 'val2' + } + ] + ) + + conn.create_load_balancer( + Name='my-other-lb', + Subnets=[subnet1.id, subnet2.id], + SecurityGroups=[security_group.id], + Scheme='internal', + ) + + rtapi = boto3.client('resourcegroupstaggingapi', region_name='us-east-1') + + resp = rtapi.get_resources(ResourceTypeFilters=['elasticloadbalancer:loadbalancer']) + + resp['ResourceTagMappingList'].should.have.length_of(2) + resp['ResourceTagMappingList'][0]['ResourceARN'].should.contain('loadbalancer/') + resp = rtapi.get_resources( + ResourceTypeFilters=['elasticloadbalancer:loadbalancer'], + TagFilters=[{ + 'Key': 'key_name' + }] + ) + + resp['ResourceTagMappingList'].should.have.length_of(1) + resp['ResourceTagMappingList'][0]['Tags'].should.contain({'Key': 'key_name', 'Value': 'a_value'}) + + # TODO test pagenation From 3ce57644cdda3aacc06aab0c9188fbf9c8d8a1bb Mon Sep 17 00:00:00 2001 From: grahamlyons Date: Fri, 26 Jan 2018 11:12:50 +0000 Subject: [PATCH 062/182] Change `'image'` for `'volume'` when getting tags The AWS docs say that: "Currently, the resource types that support tagging on creation are instance and volume." Calling `create_volume` and passing `image` as the resource type in tag specifications causes an `InvalidParameterValue` error. --- moto/ec2/responses/elastic_block_store.py | 2 +- tests/test_ec2/test_tags.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/moto/ec2/responses/elastic_block_store.py b/moto/ec2/responses/elastic_block_store.py index 333642247..31831c18b 100644 --- a/moto/ec2/responses/elastic_block_store.py +++ b/moto/ec2/responses/elastic_block_store.py @@ -33,7 +33,7 @@ class ElasticBlockStore(BaseResponse): zone = self._get_param('AvailabilityZone') snapshot_id = self._get_param('SnapshotId') tags = self._parse_tag_specification("TagSpecification") - volume_tags = tags.get('image', {}) + volume_tags = tags.get('volume', {}) encrypted = self._get_param('Encrypted', if_none=False) if self.is_not_dryrun('CreateVolume'): volume = self.ec2_backend.create_volume( diff --git a/tests/test_ec2/test_tags.py b/tests/test_ec2/test_tags.py index 8f52da6f3..d78fe24c3 100644 --- a/tests/test_ec2/test_tags.py +++ b/tests/test_ec2/test_tags.py @@ -397,7 +397,7 @@ def test_create_volume_with_tags(): Size=40, TagSpecifications=[ { - 'ResourceType': 'image', + 'ResourceType': 'volume', 'Tags': [ { 'Key': 'TEST_TAG', From aedfebed98b7dc027f052ee37c2c876478cd452a Mon Sep 17 00:00:00 2001 From: Rasmus Larsen Date: Fri, 26 Jan 2018 14:48:11 +0100 Subject: [PATCH 063/182] Add support for CidrIpv6 in Security Ingress rules. --- moto/ec2/models.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/moto/ec2/models.py b/moto/ec2/models.py index f877d3772..352960fd5 100755 --- a/moto/ec2/models.py +++ b/moto/ec2/models.py @@ -1709,6 +1709,7 @@ class SecurityGroupIngress(object): group_id = properties.get('GroupId') ip_protocol = properties.get("IpProtocol") cidr_ip = properties.get("CidrIp") + cidr_ipv6 = properties.get("CidrIpv6" from_port = properties.get("FromPort") source_security_group_id = properties.get("SourceSecurityGroupId") source_security_group_name = properties.get("SourceSecurityGroupName") @@ -1717,7 +1718,7 @@ class SecurityGroupIngress(object): to_port = properties.get("ToPort") assert group_id or group_name - assert source_security_group_name or cidr_ip or source_security_group_id + assert source_security_group_name or cidr_ip or cidr_ipv6 or source_security_group_id assert ip_protocol if source_security_group_id: From 59c49ca34f911da15b9385743b7d2a5124247fd5 Mon Sep 17 00:00:00 2001 From: Rasmus Larsen Date: Fri, 26 Jan 2018 14:55:53 +0100 Subject: [PATCH 064/182] Fix typo. --- moto/ec2/models.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/moto/ec2/models.py b/moto/ec2/models.py index 352960fd5..74bc9d166 100755 --- a/moto/ec2/models.py +++ b/moto/ec2/models.py @@ -1709,7 +1709,7 @@ class SecurityGroupIngress(object): group_id = properties.get('GroupId') ip_protocol = properties.get("IpProtocol") cidr_ip = properties.get("CidrIp") - cidr_ipv6 = properties.get("CidrIpv6" + cidr_ipv6 = properties.get("CidrIpv6") from_port = properties.get("FromPort") source_security_group_id = properties.get("SourceSecurityGroupId") source_security_group_name = properties.get("SourceSecurityGroupName") From 796145d62d2b7db9e84d00d6119488a1d44a27da Mon Sep 17 00:00:00 2001 From: Rasmus Larsen Date: Fri, 26 Jan 2018 15:32:22 +0100 Subject: [PATCH 065/182] Make DestinationCidrBlock optional (ipv6). --- moto/ec2/models.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/moto/ec2/models.py b/moto/ec2/models.py index 74bc9d166..f80861c10 100755 --- a/moto/ec2/models.py +++ b/moto/ec2/models.py @@ -2591,7 +2591,7 @@ class Route(object): ec2_backend = ec2_backends[region_name] route_table = ec2_backend.create_route( route_table_id=route_table_id, - destination_cidr_block=properties['DestinationCidrBlock'], + destination_cidr_block=properties.get('DestinationCidrBlock'), gateway_id=gateway_id, instance_id=instance_id, interface_id=interface_id, From d8b124fbf4a641508cb54ecc77c53c0b2cf8e622 Mon Sep 17 00:00:00 2001 From: captainkerk Date: Sun, 28 Jan 2018 03:06:57 +0000 Subject: [PATCH 066/182] added: enable/disable/modify redshift snapshot copy methods --- moto/redshift/exceptions.py | 19 ++++++++ moto/redshift/models.py | 47 +++++++++++++++++- moto/redshift/responses.py | 55 +++++++++++++++++++++ tests/test_redshift/test_redshift.py | 72 ++++++++++++++++++++++++++++ 4 files changed, 191 insertions(+), 2 deletions(-) diff --git a/moto/redshift/exceptions.py b/moto/redshift/exceptions.py index a89ed5a04..138afd442 100644 --- a/moto/redshift/exceptions.py +++ b/moto/redshift/exceptions.py @@ -93,3 +93,22 @@ class ResourceNotFoundFaultError(RedshiftClientError): msg = message super(ResourceNotFoundFaultError, self).__init__( 'ResourceNotFoundFault', msg) + + +class SnapshotCopyDisabledFaultError(RedshiftClientError): + def __init__(self, cluster_identifier): + super(SnapshotCopyDisabledFaultError, self).__init__( + 'SnapshotCopyDisabledFault', + "Cannot modify retention period because snapshot copy is disabled on Cluster {0}.".format(cluster_identifier)) + +class SnapshotCopyAlreadyDisabledFaultError(RedshiftClientError): + def __init__(self, cluster_identifier): + super(SnapshotCopyAlreadyDisabledFaultError, self).__init__( + 'SnapshotCopyAlreadyDisabledFault', + "Snapshot Copy is already disabled on Cluster {0}.".format(cluster_identifier)) + +class SnapshotCopyAlreadyEnabledFaultError(RedshiftClientError): + def __init__(self, cluster_identifier): + super(SnapshotCopyAlreadyEnabledFaultError, self).__init__( + 'SnapshotCopyAlreadyEnabledFault', + "Snapshot Copy is already enabled on Cluster {0}.".format(cluster_identifier)) diff --git a/moto/redshift/models.py b/moto/redshift/models.py index fa642ef01..2bab77f66 100644 --- a/moto/redshift/models.py +++ b/moto/redshift/models.py @@ -17,7 +17,10 @@ from .exceptions import ( ClusterSubnetGroupNotFoundError, InvalidParameterValueError, InvalidSubnetError, - ResourceNotFoundFaultError + ResourceNotFoundFaultError, + SnapshotCopyDisabledFaultError, + SnapshotCopyAlreadyDisabledFaultError, + SnapshotCopyAlreadyEnabledFaultError, ) @@ -80,6 +83,7 @@ class Cluster(TaggableResourceMixin, BaseModel): self.cluster_subnet_group_name = cluster_subnet_group_name self.publicly_accessible = publicly_accessible self.encrypted = encrypted + self.cluster_snapshot_copy_status = {} self.allow_version_upgrade = allow_version_upgrade if allow_version_upgrade is not None else True self.cluster_version = cluster_version if cluster_version else "1.0" @@ -194,7 +198,7 @@ class Cluster(TaggableResourceMixin, BaseModel): return self.cluster_identifier def to_json(self): - return { + json_response = { "MasterUsername": self.master_username, "MasterUserPassword": "****", "ClusterVersion": self.cluster_version, @@ -223,6 +227,7 @@ class Cluster(TaggableResourceMixin, BaseModel): "NodeType": self.node_type, "ClusterIdentifier": self.cluster_identifier, "AllowVersionUpgrade": self.allow_version_upgrade, + "Endpoint": { "Address": self.endpoint, "Port": self.port @@ -231,6 +236,10 @@ class Cluster(TaggableResourceMixin, BaseModel): "Tags": self.tags } + if self.cluster_snapshot_copy_status: + json_response['ClusterSnapshotCopyStatus'] = self.cluster_snapshot_copy_status + return json_response + class SubnetGroup(TaggableResourceMixin, BaseModel): @@ -417,6 +426,40 @@ class RedshiftBackend(BaseBackend): self.__dict__ = {} self.__init__(ec2_backend, region_name) + def enable_snapshot_copy(self, **kwargs): + cluster_identifier = kwargs['cluster_identifier'] + cluster = self.clusters[cluster_identifier] + if not cluster.cluster_snapshot_copy_status: + status = { + 'DestinationRegion': kwargs['destination_region'], + 'RetentionPeriod': kwargs['retention_period'], + 'SnapshotCopyGrantName': kwargs['snapshot_copy_grant_name'], + } + cluster.cluster_snapshot_copy_status = status + return cluster + + else: + raise SnapshotCopyAlreadyEnabledFaultError(cluster_identifier) + + + def disable_snapshot_copy(self, **kwargs): + cluster_identifier = kwargs['cluster_identifier'] + cluster = self.clusters[cluster_identifier] + if cluster.cluster_snapshot_copy_status: + cluster.cluster_snapshot_copy_status = {} + else: + raise SnapshotCopyAlreadyDisabledFaultError(cluster_identifier) + return cluster + + + def modify_snapshot_copy_retention_period(self, cluster_identifier, retention_period): + cluster = self.clusters[cluster_identifier] + if cluster.cluster_snapshot_copy_status: + cluster.cluster_snapshot_copy_status['RetentionPeriod'] = retention_period + else: + raise SnapshotCopyDisabledFaultError(cluster_identifier) + return cluster + def create_cluster(self, **cluster_kwargs): cluster_identifier = cluster_kwargs['cluster_identifier'] cluster = Cluster(self, **cluster_kwargs) diff --git a/moto/redshift/responses.py b/moto/redshift/responses.py index a320f9cae..bd7223c8c 100644 --- a/moto/redshift/responses.py +++ b/moto/redshift/responses.py @@ -501,3 +501,58 @@ class RedshiftResponse(BaseResponse): } } }) + + def enable_snapshot_copy(self): + snapshot_copy_kwargs = { + 'cluster_identifier': self._get_param('ClusterIdentifier'), + 'destination_region': self._get_param('DestinationRegion'), + 'retention_period': self._get_param('RetentionPeriod'), + 'snapshot_copy_grant_name': self._get_param('SnapshotCopyGrantName'), + } + cluster = self.redshift_backend.enable_snapshot_copy(**snapshot_copy_kwargs) + + return self.get_response({ + "EnableSnapshotCopyResponse": { + "EnableSnapshotCopyResult": { + "Cluster": cluster.to_json() + }, + "ResponseMetadata": { + "RequestId": "384ac68d-3775-11df-8963-01868b7c937a", + } + } + }) + + def disable_snapshot_copy(self): + snapshot_copy_kwargs = { + 'cluster_identifier': self._get_param('ClusterIdentifier'), + } + cluster = self.redshift_backend.disable_snapshot_copy(**snapshot_copy_kwargs) + + return self.get_response({ + "DisableSnapshotCopyResponse": { + "DisableSnapshotCopyResult": { + "Cluster": cluster.to_json() + }, + "ResponseMetadata": { + "RequestId": "384ac68d-3775-11df-8963-01868b7c937a", + } + } + }) + + def modify_snapshot_copy_retention_period(self): + snapshot_copy_kwargs = { + 'cluster_identifier': self._get_param('ClusterIdentifier'), + 'retention_period': self._get_param('RetentionPeriod'), + } + cluster = self.redshift_backend.modify_snapshot_copy_retention_period(**snapshot_copy_kwargs) + + return self.get_response({ + "ModifySnapshotCopyRetentionPeriodResponse": { + "ModifySnapshotCopyRetentionPeriodResult": { + "Clusters": [cluster.to_json()] + }, + "ResponseMetadata": { + "RequestId": "384ac68d-3775-11df-8963-01868b7c937a", + } + } + }) diff --git a/tests/test_redshift/test_redshift.py b/tests/test_redshift/test_redshift.py index cebaa3ec7..46400d34e 100644 --- a/tests/test_redshift/test_redshift.py +++ b/tests/test_redshift/test_redshift.py @@ -1042,3 +1042,75 @@ def test_tagged_resource_not_found_error(): ResourceName='bad:arn' ).should.throw(ClientError, "Tagging is not supported for this type of resource") + +@mock_redshift +def test_enable_snapshot_copy(): + client = boto3.client('redshift', region_name='us-east-1') + client.create_cluster( + DBName='test', + ClusterIdentifier='test', + ClusterType='single-node', + NodeType='ds2.xlarge', + MasterUsername='user', + MasterUserPassword='password', + ) + client.enable_snapshot_copy( + ClusterIdentifier='test', + DestinationRegion='us-west-2', + RetentionPeriod=3, + SnapshotCopyGrantName='copy-us-east-1-to-us-west-2' + ) + response = client.describe_clusters(ClusterIdentifier='test') + cluster_snapshot_copy_status = response['Clusters'][0]['ClusterSnapshotCopyStatus'] + cluster_snapshot_copy_status['RetentionPeriod'].should.equal(3) + cluster_snapshot_copy_status['DestinationRegion'].should.equal('us-west-2') + cluster_snapshot_copy_status['SnapshotCopyGrantName'].should.equal('copy-us-east-1-to-us-west-2') + + +@mock_redshift +def test_disable_snapshot_copy(): + client = boto3.client('redshift', region_name='us-east-1') + client.create_cluster( + DBName='test', + ClusterIdentifier='test', + ClusterType='single-node', + NodeType='ds2.xlarge', + MasterUsername='user', + MasterUserPassword='password', + ) + client.enable_snapshot_copy( + ClusterIdentifier='test', + DestinationRegion='us-west-2', + RetentionPeriod=3, + SnapshotCopyGrantName='copy-us-east-1-to-us-west-2', + ) + client.disable_snapshot_copy( + ClusterIdentifier='test', + ) + response = client.describe_clusters(ClusterIdentifier='test') + response['Clusters'][0].shouldnt.contain('ClusterSnapshotCopyStatus') + +@mock_redshift +def test_modify_snapshot_copy_retention_period(): + client = boto3.client('redshift', region_name='us-east-1') + client.create_cluster( + DBName='test', + ClusterIdentifier='test', + ClusterType='single-node', + NodeType='ds2.xlarge', + MasterUsername='user', + MasterUserPassword='password', + ) + client.enable_snapshot_copy( + ClusterIdentifier='test', + DestinationRegion='us-west-2', + RetentionPeriod=3, + SnapshotCopyGrantName='copy-us-east-1-to-us-west-2', + ) + client.modify_snapshot_copy_retention_period( + ClusterIdentifier='test', + RetentionPeriod=5, + ) + response = client.describe_clusters(ClusterIdentifier='test') + cluster_snapshot_copy_status = response['Clusters'][0]['ClusterSnapshotCopyStatus'] + cluster_snapshot_copy_status['RetentionPeriod'].should.equal(5) From 92798b9a9fef52f5a79d5cd0b3ea48d2310d2ee7 Mon Sep 17 00:00:00 2001 From: captainkerk Date: Sun, 28 Jan 2018 03:27:06 +0000 Subject: [PATCH 067/182] improve error handling --- moto/redshift/models.py | 20 +++++++++----------- 1 file changed, 9 insertions(+), 11 deletions(-) diff --git a/moto/redshift/models.py b/moto/redshift/models.py index 2bab77f66..4975868d0 100644 --- a/moto/redshift/models.py +++ b/moto/redshift/models.py @@ -83,7 +83,6 @@ class Cluster(TaggableResourceMixin, BaseModel): self.cluster_subnet_group_name = cluster_subnet_group_name self.publicly_accessible = publicly_accessible self.encrypted = encrypted - self.cluster_snapshot_copy_status = {} self.allow_version_upgrade = allow_version_upgrade if allow_version_upgrade is not None else True self.cluster_version = cluster_version if cluster_version else "1.0" @@ -236,8 +235,10 @@ class Cluster(TaggableResourceMixin, BaseModel): "Tags": self.tags } - if self.cluster_snapshot_copy_status: + try: json_response['ClusterSnapshotCopyStatus'] = self.cluster_snapshot_copy_status + except AttributeError: + pass return json_response @@ -429,7 +430,7 @@ class RedshiftBackend(BaseBackend): def enable_snapshot_copy(self, **kwargs): cluster_identifier = kwargs['cluster_identifier'] cluster = self.clusters[cluster_identifier] - if not cluster.cluster_snapshot_copy_status: + if not hasattr(cluster, 'cluster_snapshot_copy_status'): status = { 'DestinationRegion': kwargs['destination_region'], 'RetentionPeriod': kwargs['retention_period'], @@ -437,28 +438,25 @@ class RedshiftBackend(BaseBackend): } cluster.cluster_snapshot_copy_status = status return cluster - else: raise SnapshotCopyAlreadyEnabledFaultError(cluster_identifier) - def disable_snapshot_copy(self, **kwargs): cluster_identifier = kwargs['cluster_identifier'] cluster = self.clusters[cluster_identifier] - if cluster.cluster_snapshot_copy_status: - cluster.cluster_snapshot_copy_status = {} + if hasattr(cluster, 'cluster_snapshot_copy_status'): + del cluster.cluster_snapshot_copy_status + return cluster else: raise SnapshotCopyAlreadyDisabledFaultError(cluster_identifier) - return cluster - def modify_snapshot_copy_retention_period(self, cluster_identifier, retention_period): cluster = self.clusters[cluster_identifier] - if cluster.cluster_snapshot_copy_status: + if hasattr(cluster, 'cluster_snapshot_copy_status'): cluster.cluster_snapshot_copy_status['RetentionPeriod'] = retention_period + return cluster else: raise SnapshotCopyDisabledFaultError(cluster_identifier) - return cluster def create_cluster(self, **cluster_kwargs): cluster_identifier = cluster_kwargs['cluster_identifier'] From ed066582714d071e2db78ac96f3bd014a8b28a4a Mon Sep 17 00:00:00 2001 From: captainkerk Date: Sun, 28 Jan 2018 03:28:49 +0000 Subject: [PATCH 068/182] address spacing issues --- moto/redshift/exceptions.py | 2 ++ moto/redshift/models.py | 1 - moto/redshift/responses.py | 2 +- tests/test_redshift/test_redshift.py | 1 + 4 files changed, 4 insertions(+), 2 deletions(-) diff --git a/moto/redshift/exceptions.py b/moto/redshift/exceptions.py index 138afd442..865aaeab2 100644 --- a/moto/redshift/exceptions.py +++ b/moto/redshift/exceptions.py @@ -101,12 +101,14 @@ class SnapshotCopyDisabledFaultError(RedshiftClientError): 'SnapshotCopyDisabledFault', "Cannot modify retention period because snapshot copy is disabled on Cluster {0}.".format(cluster_identifier)) + class SnapshotCopyAlreadyDisabledFaultError(RedshiftClientError): def __init__(self, cluster_identifier): super(SnapshotCopyAlreadyDisabledFaultError, self).__init__( 'SnapshotCopyAlreadyDisabledFault', "Snapshot Copy is already disabled on Cluster {0}.".format(cluster_identifier)) + class SnapshotCopyAlreadyEnabledFaultError(RedshiftClientError): def __init__(self, cluster_identifier): super(SnapshotCopyAlreadyEnabledFaultError, self).__init__( diff --git a/moto/redshift/models.py b/moto/redshift/models.py index 4975868d0..44e944c3b 100644 --- a/moto/redshift/models.py +++ b/moto/redshift/models.py @@ -226,7 +226,6 @@ class Cluster(TaggableResourceMixin, BaseModel): "NodeType": self.node_type, "ClusterIdentifier": self.cluster_identifier, "AllowVersionUpgrade": self.allow_version_upgrade, - "Endpoint": { "Address": self.endpoint, "Port": self.port diff --git a/moto/redshift/responses.py b/moto/redshift/responses.py index bd7223c8c..724a61b68 100644 --- a/moto/redshift/responses.py +++ b/moto/redshift/responses.py @@ -555,4 +555,4 @@ class RedshiftResponse(BaseResponse): "RequestId": "384ac68d-3775-11df-8963-01868b7c937a", } } - }) + }) diff --git a/tests/test_redshift/test_redshift.py b/tests/test_redshift/test_redshift.py index 46400d34e..79da9f193 100644 --- a/tests/test_redshift/test_redshift.py +++ b/tests/test_redshift/test_redshift.py @@ -1090,6 +1090,7 @@ def test_disable_snapshot_copy(): response = client.describe_clusters(ClusterIdentifier='test') response['Clusters'][0].shouldnt.contain('ClusterSnapshotCopyStatus') + @mock_redshift def test_modify_snapshot_copy_retention_period(): client = boto3.client('redshift', region_name='us-east-1') From 7130dd5239891a5ae2a9219e690c8c6f1c4e5906 Mon Sep 17 00:00:00 2001 From: captainkerk Date: Sun, 28 Jan 2018 03:53:32 +0000 Subject: [PATCH 069/182] rework to follow spec for encrypted/unencrypted clusters --- moto/redshift/models.py | 7 +++++++ moto/redshift/responses.py | 2 +- tests/test_redshift/test_redshift.py | 26 ++++++++++++++++++++++++-- 3 files changed, 32 insertions(+), 3 deletions(-) diff --git a/moto/redshift/models.py b/moto/redshift/models.py index 44e944c3b..f0ea9f5f9 100644 --- a/moto/redshift/models.py +++ b/moto/redshift/models.py @@ -4,6 +4,7 @@ import copy import datetime import boto.redshift +from botocore.exceptions import ClientError from moto.compat import OrderedDict from moto.core import BaseBackend, BaseModel from moto.core.utils import iso_8601_datetime_with_milliseconds @@ -430,6 +431,12 @@ class RedshiftBackend(BaseBackend): cluster_identifier = kwargs['cluster_identifier'] cluster = self.clusters[cluster_identifier] if not hasattr(cluster, 'cluster_snapshot_copy_status'): + if cluster.encrypted == 'true' and kwargs['snapshot_copy_grant_name'] is None: + raise ClientError( + 'InvalidParameterValue', + 'SnapshotCopyGrantName is required for Snapshot Copy ' + 'on KMS encrypted clusters.' + ) status = { 'DestinationRegion': kwargs['destination_region'], 'RetentionPeriod': kwargs['retention_period'], diff --git a/moto/redshift/responses.py b/moto/redshift/responses.py index 724a61b68..63945c00b 100644 --- a/moto/redshift/responses.py +++ b/moto/redshift/responses.py @@ -506,7 +506,7 @@ class RedshiftResponse(BaseResponse): snapshot_copy_kwargs = { 'cluster_identifier': self._get_param('ClusterIdentifier'), 'destination_region': self._get_param('DestinationRegion'), - 'retention_period': self._get_param('RetentionPeriod'), + 'retention_period': self._get_param('RetentionPeriod', 7), 'snapshot_copy_grant_name': self._get_param('SnapshotCopyGrantName'), } cluster = self.redshift_backend.enable_snapshot_copy(**snapshot_copy_kwargs) diff --git a/tests/test_redshift/test_redshift.py b/tests/test_redshift/test_redshift.py index 79da9f193..32deb74bc 100644 --- a/tests/test_redshift/test_redshift.py +++ b/tests/test_redshift/test_redshift.py @@ -1047,12 +1047,13 @@ def test_tagged_resource_not_found_error(): def test_enable_snapshot_copy(): client = boto3.client('redshift', region_name='us-east-1') client.create_cluster( - DBName='test', ClusterIdentifier='test', ClusterType='single-node', - NodeType='ds2.xlarge', + DBName='test', + Encrypted=True, MasterUsername='user', MasterUserPassword='password', + NodeType='ds2.xlarge', ) client.enable_snapshot_copy( ClusterIdentifier='test', @@ -1067,6 +1068,27 @@ def test_enable_snapshot_copy(): cluster_snapshot_copy_status['SnapshotCopyGrantName'].should.equal('copy-us-east-1-to-us-west-2') +@mock_redshift +def test_enable_snapshot_copy_unencrypted(): + client = boto3.client('redshift', region_name='us-east-1') + client.create_cluster( + ClusterIdentifier='test', + ClusterType='single-node', + DBName='test', + MasterUsername='user', + MasterUserPassword='password', + NodeType='ds2.xlarge', + ) + client.enable_snapshot_copy( + ClusterIdentifier='test', + DestinationRegion='us-west-2', + ) + response = client.describe_clusters(ClusterIdentifier='test') + cluster_snapshot_copy_status = response['Clusters'][0]['ClusterSnapshotCopyStatus'] + cluster_snapshot_copy_status['RetentionPeriod'].should.equal(7) + cluster_snapshot_copy_status['DestinationRegion'].should.equal('us-west-2') + + @mock_redshift def test_disable_snapshot_copy(): client = boto3.client('redshift', region_name='us-east-1') From e514b98747121e71a63e38012c64008c23bdb002 Mon Sep 17 00:00:00 2001 From: captainkerk Date: Sun, 28 Jan 2018 22:58:28 +0000 Subject: [PATCH 070/182] redshift: add copy grant functionality --- moto/redshift/exceptions.py | 15 +++++++++ moto/redshift/models.py | 50 +++++++++++++++++++++++++++- moto/redshift/responses.py | 49 +++++++++++++++++++++++++++ tests/test_redshift/test_redshift.py | 39 ++++++++++++++++++++++ 4 files changed, 152 insertions(+), 1 deletion(-) diff --git a/moto/redshift/exceptions.py b/moto/redshift/exceptions.py index a89ed5a04..f287b9a5f 100644 --- a/moto/redshift/exceptions.py +++ b/moto/redshift/exceptions.py @@ -58,6 +58,21 @@ class InvalidSubnetError(RedshiftClientError): "Subnet {0} not found.".format(subnet_identifier)) +class SnapshotCopyGrantAlreadyExistsFaultError(RedshiftClientError): + def __init__(self, snapshot_copy_grant_name): + super(SnapshotCopyGrantAlreadyExistsFaultError, self).__init__( + 'SnapshotCopyGrantAlreadyExistsFault', + "Cannot create the snapshot copy grant because a grant " + "with the identifier '{0}' already exists".format(snapshot_copy_grant_name)) + + +class SnapshotCopyGrantNotFoundFaultError(RedshiftClientError): + def __init__(self, snapshot_copy_grant_name): + super(SnapshotCopyGrantNotFoundFaultError, self).__init__( + 'SnapshotCopyGrantNotFoundFault', + "Snapshot copy grant not found: {0}".format(snapshot_copy_grant_name)) + + class ClusterSnapshotNotFoundError(RedshiftClientError): def __init__(self, snapshot_identifier): super(ClusterSnapshotNotFoundError, self).__init__( diff --git a/moto/redshift/models.py b/moto/redshift/models.py index fa642ef01..fd7c8b759 100644 --- a/moto/redshift/models.py +++ b/moto/redshift/models.py @@ -17,7 +17,9 @@ from .exceptions import ( ClusterSubnetGroupNotFoundError, InvalidParameterValueError, InvalidSubnetError, - ResourceNotFoundFaultError + ResourceNotFoundFaultError, + SnapshotCopyGrantAlreadyExistsFaultError, + SnapshotCopyGrantNotFoundFaultError ) @@ -231,6 +233,22 @@ class Cluster(TaggableResourceMixin, BaseModel): "Tags": self.tags } +class SnapshotCopyGrant(TaggableResourceMixin, BaseModel): + + resource_type = 'snapshotcopygrant' + + def __init__(self, snapshot_copy_grant_name, kms_key_id, region_name): + self.snapshot_copy_grant_name = snapshot_copy_grant_name + self.kms_key_id = kms_key_id + self.region_name = region_name + + def to_json(self): + return { + "SnapshotCopyGrantName": self.snapshot_copy_grant_name, + "KmsKeyId": self.kms_key_id + } + + class SubnetGroup(TaggableResourceMixin, BaseModel): @@ -410,6 +428,7 @@ class RedshiftBackend(BaseBackend): 'snapshot': self.snapshots, 'subnetgroup': self.subnet_groups } + self.snapshot_copy_grants = {} def reset(self): ec2_backend = self.ec2_backend @@ -568,6 +587,35 @@ class RedshiftBackend(BaseBackend): create_kwargs.update(kwargs) return self.create_cluster(**create_kwargs) + def create_snapshot_copy_grant(self, **kwargs): + snapshot_copy_grant_name = kwargs['snapshot_copy_grant_name'] + kms_key_id = kwargs['kms_key_id'] + region_name = kwargs['region_name'] + if snapshot_copy_grant_name not in self.snapshot_copy_grants: + snapshot_copy_grant = SnapshotCopyGrant(snapshot_copy_grant_name, + kms_key_id, region_name) + self.snapshot_copy_grants[snapshot_copy_grant_name] = snapshot_copy_grant + return snapshot_copy_grant + + raise SnapshotCopyGrantAlreadyExistsFaultError(snapshot_copy_grant_name) + + def delete_snapshot_copy_grant(self, **kwargs): + snapshot_copy_grant_name = kwargs['snapshot_copy_grant_name'] + if snapshot_copy_grant_name in self.snapshot_copy_grants: + return self.snapshot_copy_grants.pop(snapshot_copy_grant_name) + + raise SnapshotCopyGrantNotFoundFaultError(snapshot_copy_grant_name) + + def describe_snapshot_copy_grants(self, **kwargs): + copy_grants = self.snapshot_copy_grants.values() + snapshot_copy_grant_name = kwargs['snapshot_copy_grant_name'] + if snapshot_copy_grant_name: + if snapshot_copy_grant_name in self.snapshot_copy_grants: + return [self.snapshot_copy_grants[snapshot_copy_grant_name]] + else: + raise SnapshotCopyGrantNotFoundFaultError(snapshot_copy_grant_name) + return copy_grants + def _get_resource_from_arn(self, arn): try: arn_breakdown = arn.split(':') diff --git a/moto/redshift/responses.py b/moto/redshift/responses.py index a320f9cae..ab1a9f424 100644 --- a/moto/redshift/responses.py +++ b/moto/redshift/responses.py @@ -457,6 +457,55 @@ class RedshiftResponse(BaseResponse): } }) + def create_snapshot_copy_grant(self): + copy_grant_kwargs = { + 'snapshot_copy_grant_name': self._get_param('SnapshotCopyGrantName'), + 'kms_key_id': self._get_param('KmsKeyId'), + 'region_name': self._get_param('Region'), + } + + copy_grant = self.redshift_backend.create_snapshot_copy_grant(**copy_grant_kwargs) + return self.get_response({ + "CreateSnapshotCopyGrantResponse": { + "CreateSnapshotCopyGrantResult": { + "SnapshotCopyGrant": copy_grant.to_json() + }, + "ResponseMetadata": { + "RequestId": "384ac68d-3775-11df-8963-01868b7c937a", + } + } + }) + + def delete_snapshot_copy_grant(self): + copy_grant_kwargs = { + 'snapshot_copy_grant_name': self._get_param('SnapshotCopyGrantName'), + } + self.redshift_backend.delete_snapshot_copy_grant(**copy_grant_kwargs) + return self.get_response({ + "DeleteSnapshotCopyGrantResponse": { + "ResponseMetadata": { + "RequestId": "384ac68d-3775-11df-8963-01868b7c937a", + } + } + }) + + def describe_snapshot_copy_grants(self): + copy_grant_kwargs = { + 'snapshot_copy_grant_name': self._get_param('SnapshotCopyGrantName'), + } + + copy_grants = self.redshift_backend.describe_snapshot_copy_grants(**copy_grant_kwargs) + return self.get_response({ + "DescribeSnapshotCopyGrantsResponse": { + "DescribeSnapshotCopyGrantsResult": { + "SnapshotCopyGrants": [copy_grant.to_json() for copy_grant in copy_grants] + }, + "ResponseMetadata": { + "RequestId": "384ac68d-3775-11df-8963-01868b7c937a", + } + } + }) + def create_tags(self): resource_name = self._get_param('ResourceName') tags = self.unpack_complex_list_params('Tags.Tag', ('Key', 'Value')) diff --git a/tests/test_redshift/test_redshift.py b/tests/test_redshift/test_redshift.py index cebaa3ec7..8759506ec 100644 --- a/tests/test_redshift/test_redshift.py +++ b/tests/test_redshift/test_redshift.py @@ -34,6 +34,45 @@ def test_create_cluster_boto3(): response['Cluster']['NodeType'].should.equal('ds2.xlarge') +@mock_redshift +def test_create_snapshot_copy_grant(): + client = boto3.client('redshift', region_name='us-east-1') + grants = client.create_snapshot_copy_grant( + SnapshotCopyGrantName='test-us-east-1', + KmsKeyId='fake', + ) + grants['SnapshotCopyGrant']['SnapshotCopyGrantName'].should.equal('test-us-east-1') + grants['SnapshotCopyGrant']['KmsKeyId'].should.equal('fake') + + client.delete_snapshot_copy_grant( + SnapshotCopyGrantName='test-us-east-1', + ) + + client.describe_snapshot_copy_grants.when.called_with( + SnapshotCopyGrantName='test-us-east-1', + ).should.throw(Exception) + + +@mock_redshift +def test_create_many_snapshot_copy_grants(): + client = boto3.client('redshift', region_name='us-east-1') + + for i in range(10): + client.create_snapshot_copy_grant( + SnapshotCopyGrantName='test-us-east-1-{0}'.format(i), + KmsKeyId='fake', + ) + response = client.describe_snapshot_copy_grants() + len(response['SnapshotCopyGrants']).should.equal(10) + + +@mock_redshift +def test_no_snapshot_copy_grants(): + client = boto3.client('redshift', region_name='us-east-1') + response = client.describe_snapshot_copy_grants() + len(response['SnapshotCopyGrants']).should.equal(0) + + @mock_redshift_deprecated def test_create_cluster(): conn = boto.redshift.connect_to_region("us-east-1") From 4d77aef7e5d530e47eba836d7c94805e7fd13893 Mon Sep 17 00:00:00 2001 From: captainkerk Date: Sun, 28 Jan 2018 23:08:03 +0000 Subject: [PATCH 071/182] adjust spacing, remove unnecessary region_name variable from SnapshotCopyGrant init --- moto/redshift/models.py | 9 ++------- 1 file changed, 2 insertions(+), 7 deletions(-) diff --git a/moto/redshift/models.py b/moto/redshift/models.py index fd7c8b759..5da68fe67 100644 --- a/moto/redshift/models.py +++ b/moto/redshift/models.py @@ -237,10 +237,9 @@ class SnapshotCopyGrant(TaggableResourceMixin, BaseModel): resource_type = 'snapshotcopygrant' - def __init__(self, snapshot_copy_grant_name, kms_key_id, region_name): + def __init__(self, snapshot_copy_grant_name, kms_key_id): self.snapshot_copy_grant_name = snapshot_copy_grant_name self.kms_key_id = kms_key_id - self.region_name = region_name def to_json(self): return { @@ -590,20 +589,16 @@ class RedshiftBackend(BaseBackend): def create_snapshot_copy_grant(self, **kwargs): snapshot_copy_grant_name = kwargs['snapshot_copy_grant_name'] kms_key_id = kwargs['kms_key_id'] - region_name = kwargs['region_name'] if snapshot_copy_grant_name not in self.snapshot_copy_grants: - snapshot_copy_grant = SnapshotCopyGrant(snapshot_copy_grant_name, - kms_key_id, region_name) + snapshot_copy_grant = SnapshotCopyGrant(snapshot_copy_grant_name, kms_key_id) self.snapshot_copy_grants[snapshot_copy_grant_name] = snapshot_copy_grant return snapshot_copy_grant - raise SnapshotCopyGrantAlreadyExistsFaultError(snapshot_copy_grant_name) def delete_snapshot_copy_grant(self, **kwargs): snapshot_copy_grant_name = kwargs['snapshot_copy_grant_name'] if snapshot_copy_grant_name in self.snapshot_copy_grants: return self.snapshot_copy_grants.pop(snapshot_copy_grant_name) - raise SnapshotCopyGrantNotFoundFaultError(snapshot_copy_grant_name) def describe_snapshot_copy_grants(self, **kwargs): From 258c076eef09486d998b5d917ac19a7b713b074a Mon Sep 17 00:00:00 2001 From: captainkerk Date: Sun, 28 Jan 2018 23:13:56 +0000 Subject: [PATCH 072/182] (slow heavy metal music playing) adjust spacing to appease the pep8 gods --- moto/redshift/models.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/moto/redshift/models.py b/moto/redshift/models.py index 5da68fe67..cf639e3df 100644 --- a/moto/redshift/models.py +++ b/moto/redshift/models.py @@ -233,6 +233,7 @@ class Cluster(TaggableResourceMixin, BaseModel): "Tags": self.tags } + class SnapshotCopyGrant(TaggableResourceMixin, BaseModel): resource_type = 'snapshotcopygrant' @@ -248,7 +249,6 @@ class SnapshotCopyGrant(TaggableResourceMixin, BaseModel): } - class SubnetGroup(TaggableResourceMixin, BaseModel): resource_type = 'subnetgroup' From f3debf8f6ff57c023231dedd2ee97e7e34774b85 Mon Sep 17 00:00:00 2001 From: grahamlyons Date: Mon, 29 Jan 2018 13:53:44 +0000 Subject: [PATCH 073/182] Test and fix bug for snapshot searching The logic which contructed a list of values for parameters with multiple values was flawed in that e.g. `Subnet.1` and `Subnet.10` would be have their values counted against `Subnet.1` because they share a prefix. This now checks for a starting `.` before counting that name as having the requested prefix. --- moto/core/responses.py | 4 ++++ tests/test_ec2/test_elastic_block_store.py | 25 +++++++++++++++++++++- 2 files changed, 28 insertions(+), 1 deletion(-) diff --git a/moto/core/responses.py b/moto/core/responses.py index d254d1f85..278a24dc4 100644 --- a/moto/core/responses.py +++ b/moto/core/responses.py @@ -345,6 +345,10 @@ class BaseResponse(_TemplateEnvironmentMixin): if is_tracked(name) or not name.startswith(param_prefix): continue + if len(name) > len(param_prefix) and \ + not name[len(param_prefix):].startswith('.'): + continue + match = self.param_list_regex.search(name[len(param_prefix):]) if len(name) > len(param_prefix) else None if match: prefix = param_prefix + match.group(1) diff --git a/tests/test_ec2/test_elastic_block_store.py b/tests/test_ec2/test_elastic_block_store.py index 9c07f38d6..fc0677cfe 100644 --- a/tests/test_ec2/test_elastic_block_store.py +++ b/tests/test_ec2/test_elastic_block_store.py @@ -5,10 +5,11 @@ from nose.tools import assert_raises from moto.ec2 import ec2_backends import boto +import boto3 from boto.exception import EC2ResponseError import sure # noqa -from moto import mock_ec2_deprecated +from moto import mock_ec2_deprecated, mock_ec2 @mock_ec2_deprecated @@ -579,3 +580,25 @@ def test_volume_tag_escaping(): snaps = [snap for snap in conn.get_all_snapshots() if snap.id == snapshot.id] dict(snaps[0].tags).should.equal({'key': ''}) + + +@mock_ec2 +def test_search_for_many_snapshots(): + ec2_client = boto3.client('ec2', region_name='eu-west-1') + + volume_response = ec2_client.create_volume( + AvailabilityZone='eu-west-1a', Size=10 + ) + + snapshot_ids = [] + for i in range(1, 20): + create_snapshot_response = ec2_client.create_snapshot( + VolumeId=volume_response['VolumeId'] + ) + snapshot_ids.append(create_snapshot_response['SnapshotId']) + + snapshots_response = ec2_client.describe_snapshots( + SnapshotIds=snapshot_ids + ) + + assert len(snapshots_response['Snapshots']) == len(snapshot_ids) From 363f734e2b80f976aaba0f80ac10849a6d0bde63 Mon Sep 17 00:00:00 2001 From: rhard7 Date: Mon, 29 Jan 2018 12:37:23 -0800 Subject: [PATCH 074/182] fixes apigateway timestamp to match aws --- moto/apigateway/models.py | 10 ++++------ 1 file changed, 4 insertions(+), 6 deletions(-) diff --git a/moto/apigateway/models.py b/moto/apigateway/models.py index e7ff98119..cc8696104 100644 --- a/moto/apigateway/models.py +++ b/moto/apigateway/models.py @@ -1,12 +1,11 @@ from __future__ import absolute_import from __future__ import unicode_literals -import datetime import requests +import time from moto.packages.responses import responses from moto.core import BaseBackend, BaseModel -from moto.core.utils import iso_8601_datetime_with_milliseconds from .utils import create_id from .exceptions import StageNotFoundException @@ -20,8 +19,7 @@ class Deployment(BaseModel, dict): self['id'] = deployment_id self['stageName'] = name self['description'] = description - self['createdDate'] = iso_8601_datetime_with_milliseconds( - datetime.datetime.now()) + self['createdDate'] = int(time.time()) class IntegrationResponse(BaseModel, dict): @@ -300,7 +298,7 @@ class RestAPI(BaseModel): self.region_name = region_name self.name = name self.description = description - self.create_date = datetime.datetime.utcnow() + self.create_date = int(time.time()) self.deployments = {} self.stages = {} @@ -313,7 +311,7 @@ class RestAPI(BaseModel): "id": self.id, "name": self.name, "description": self.description, - "createdDate": iso_8601_datetime_with_milliseconds(self.create_date), + "createdDate": int(time.time()), } def add_child(self, path, parent_id=None): From c7bcbadc6ec1e32ff80cc805834ecb03fabeef42 Mon Sep 17 00:00:00 2001 From: Taro Sato Date: Tue, 30 Jan 2018 13:48:04 -0800 Subject: [PATCH 075/182] Fix the S3 HEAD response body --- moto/s3/responses.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/moto/s3/responses.py b/moto/s3/responses.py index 8d2caf098..1b32698e4 100755 --- a/moto/s3/responses.py +++ b/moto/s3/responses.py @@ -172,7 +172,7 @@ class ResponseObject(_TemplateEnvironmentMixin): # HEAD (which the real API responds with), and instead # raises NoSuchBucket, leading to inconsistency in # error response between real and mocked responses. - return 404, {}, "Not Found" + return 404, {}, "" return 200, {}, "" def _bucket_response_get(self, bucket_name, querystring, headers): From 5e70d0ce4c24d8f365b0998fb12535dceb0cf416 Mon Sep 17 00:00:00 2001 From: Taro Sato Date: Tue, 30 Jan 2018 16:10:43 -0800 Subject: [PATCH 076/182] Support both virtual-hosted-style and path-style URLs for region name parsing --- moto/s3/responses.py | 9 +++------ moto/s3/utils.py | 19 +++++++++++++++++++ tests/test_s3/test_s3_utils.py | 20 +++++++++++++++++++- 3 files changed, 41 insertions(+), 7 deletions(-) diff --git a/moto/s3/responses.py b/moto/s3/responses.py index 8d2caf098..57c435b30 100755 --- a/moto/s3/responses.py +++ b/moto/s3/responses.py @@ -18,10 +18,10 @@ from .exceptions import BucketAlreadyExists, S3ClientError, MissingBucket, Missi MalformedACLError from .models import s3_backend, get_canned_acl, FakeGrantee, FakeGrant, FakeAcl, FakeKey, FakeTagging, FakeTagSet, \ FakeTag -from .utils import bucket_name_from_url, metadata_from_headers +from .utils import bucket_name_from_url, metadata_from_headers, parse_region_from_url from xml.dom import minidom -REGION_URL_REGEX = r'\.s3-(.+?)\.amazonaws\.com' + DEFAULT_REGION_NAME = 'us-east-1' @@ -128,10 +128,7 @@ class ResponseObject(_TemplateEnvironmentMixin): parsed_url = urlparse(full_url) querystring = parse_qs(parsed_url.query, keep_blank_values=True) method = request.method - region_name = DEFAULT_REGION_NAME - region_match = re.search(REGION_URL_REGEX, full_url) - if region_match: - region_name = region_match.groups()[0] + region_name = parse_region_from_url(full_url) bucket_name = self.parse_bucket_name_from_url(request, full_url) if not bucket_name: diff --git a/moto/s3/utils.py b/moto/s3/utils.py index a121eae3a..8968d2ad2 100644 --- a/moto/s3/utils.py +++ b/moto/s3/utils.py @@ -1,4 +1,5 @@ from __future__ import unicode_literals +import logging from boto.s3.key import Key import re @@ -6,6 +7,10 @@ import six from six.moves.urllib.parse import urlparse, unquote import sys + +log = logging.getLogger(__name__) + + bucket_name_regex = re.compile("(.+).s3(.*).amazonaws.com") @@ -27,6 +32,20 @@ def bucket_name_from_url(url): return None +REGION_URL_REGEX = re.compile( + r'^https?://(s3[-\.](?P.+)\.amazonaws\.com/(.+)|' + r'(.+)\.s3-(?P.+)\.amazonaws\.com)/?') + + +def parse_region_from_url(url): + match = REGION_URL_REGEX.search(url) + if match: + region = match.group('region1') or match.group('region2') + else: + region = 'us-east-1' + return region + + def metadata_from_headers(headers): metadata = {} meta_regex = re.compile( diff --git a/tests/test_s3/test_s3_utils.py b/tests/test_s3/test_s3_utils.py index b4f56d89a..f1dfc04d1 100644 --- a/tests/test_s3/test_s3_utils.py +++ b/tests/test_s3/test_s3_utils.py @@ -1,6 +1,6 @@ from __future__ import unicode_literals from sure import expect -from moto.s3.utils import bucket_name_from_url, _VersionedKeyStore +from moto.s3.utils import bucket_name_from_url, _VersionedKeyStore, parse_region_from_url def test_base_url(): @@ -53,3 +53,21 @@ def test_versioned_key_store(): d.setlist('key', [[1], [2]]) d['key'].should.have.length_of(1) d.getlist('key').should.be.equal([[1], [2]]) + + +def test_parse_region_from_url(): + expected = 'us-west-2' + for url in ['http://s3-us-west-2.amazonaws.com/bucket', + 'http://s3.us-west-2.amazonaws.com/bucket', + 'http://bucket.s3-us-west-2.amazonaws.com', + 'https://s3-us-west-2.amazonaws.com/bucket', + 'https://s3.us-west-2.amazonaws.com/bucket', + 'https://bucket.s3-us-west-2.amazonaws.com']: + parse_region_from_url(url).should.equal(expected) + + expected = 'us-east-1' + for url in ['http://s3.amazonaws.com/bucket', + 'http://bucket.s3.amazonaws.com', + 'https://s3.amazonaws.com/bucket', + 'https://bucket.s3.amazonaws.com']: + parse_region_from_url(url).should.equal(expected) From d090a8188c8842467acda141a3d25841c9df2dee Mon Sep 17 00:00:00 2001 From: Jack Danger Date: Thu, 1 Feb 2018 11:05:19 +0900 Subject: [PATCH 077/182] fixing version number Fixes #1481 --- moto/__init__.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/moto/__init__.py b/moto/__init__.py index 9d292a3e1..c38212b42 100644 --- a/moto/__init__.py +++ b/moto/__init__.py @@ -3,7 +3,7 @@ import logging # logging.getLogger('boto').setLevel(logging.CRITICAL) __title__ = 'moto' -__version__ = '1.2.0', +__version__ = '1.2.0' from .acm import mock_acm # flake8: noqa from .apigateway import mock_apigateway, mock_apigateway_deprecated # flake8: noqa From 5f61950096e2d33cfb42021d5f661a71ae192357 Mon Sep 17 00:00:00 2001 From: Hugo Lopes Tavares Date: Thu, 1 Feb 2018 17:09:10 -0500 Subject: [PATCH 078/182] Make SpotPrice optional when requesting a spot fleet When price is omitted, AWS will default to the on-demand price --- moto/ec2/models.py | 5 +- moto/ec2/responses/spot_fleets.py | 6 +- requirements-dev.txt | 2 +- .../test_cloudformation_stack_integration.py | 72 +++++++++++++++++++ tests/test_ec2/test_spot_fleet.py | 27 +++++++ 5 files changed, 108 insertions(+), 4 deletions(-) diff --git a/moto/ec2/models.py b/moto/ec2/models.py index f877d3772..bfc672ed7 100755 --- a/moto/ec2/models.py +++ b/moto/ec2/models.py @@ -2943,7 +2943,7 @@ class SpotFleetRequest(TaggedEC2Resource): 'Properties']['SpotFleetRequestConfigData'] ec2_backend = ec2_backends[region_name] - spot_price = properties['SpotPrice'] + spot_price = properties.get('SpotPrice') target_capacity = properties['TargetCapacity'] iam_fleet_role = properties['IamFleetRole'] allocation_strategy = properties['AllocationStrategy'] @@ -2977,7 +2977,8 @@ class SpotFleetRequest(TaggedEC2Resource): launch_spec_index += 1 else: # lowestPrice cheapest_spec = sorted( - self.launch_specs, key=lambda spec: float(spec.spot_price))[0] + # FIXME: change `+inf` to the on demand price scaled to weighted capacity when it's not present + self.launch_specs, key=lambda spec: float(spec.spot_price or '+inf'))[0] weight_so_far = weight_to_add + (weight_to_add % cheapest_spec.weighted_capacity) weight_map[cheapest_spec] = int( weight_so_far // cheapest_spec.weighted_capacity) diff --git a/moto/ec2/responses/spot_fleets.py b/moto/ec2/responses/spot_fleets.py index 81d1e0146..0366af9d6 100644 --- a/moto/ec2/responses/spot_fleets.py +++ b/moto/ec2/responses/spot_fleets.py @@ -40,7 +40,7 @@ class SpotFleets(BaseResponse): def request_spot_fleet(self): spot_config = self._get_dict_param("SpotFleetRequestConfig.") - spot_price = spot_config['spot_price'] + spot_price = spot_config.get('spot_price') target_capacity = spot_config['target_capacity'] iam_fleet_role = spot_config['iam_fleet_role'] allocation_strategy = spot_config['allocation_strategy'] @@ -78,7 +78,9 @@ DESCRIBE_SPOT_FLEET_TEMPLATE = """ Date: Tue, 13 Feb 2018 17:28:56 +1100 Subject: [PATCH 079/182] Use the TemplateBody by default it exist serverless cli use the PUT + TemplateBody + TemplateURL to upload cloudformation template This fix try to get the TemplateBody from the request first, if not exist, fetch the one from s3 bucket --- AUTHORS.md | 1 + moto/cloudformation/responses.py | 5 ++--- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/AUTHORS.md b/AUTHORS.md index 1771d1a78..710cf5dcb 100644 --- a/AUTHORS.md +++ b/AUTHORS.md @@ -49,3 +49,4 @@ Moto is written by Steve Pulec with contributions from: * [Michael van Tellingen](https://github.com/mvantellingen) * [Jessie Nadler](https://github.com/nadlerjessie) * [Alex Morken](https://github.com/alexmorken) +* [Clive Li](https://github.com/cliveli) diff --git a/moto/cloudformation/responses.py b/moto/cloudformation/responses.py index 07d263652..73d1d2c2b 100644 --- a/moto/cloudformation/responses.py +++ b/moto/cloudformation/responses.py @@ -221,13 +221,12 @@ class CloudFormationResponse(BaseResponse): stack_name = self._get_param('StackName') role_arn = self._get_param('RoleARN') template_url = self._get_param('TemplateURL') + stack_body = self._get_param('TemplateBody') if self._get_param('UsePreviousTemplate') == "true": stack_body = self.cloudformation_backend.get_stack( stack_name).template - elif template_url: + elif not stack_body and template_url: stack_body = self._get_stack_from_s3_url(template_url) - else: - stack_body = self._get_param('TemplateBody') parameters = dict([ (parameter['parameter_key'], parameter['parameter_value']) From bf2ba0b680fc26903549626a1e7864cce765d9d6 Mon Sep 17 00:00:00 2001 From: Loukas Leontopoulos Date: Mon, 19 Feb 2018 15:22:53 +0200 Subject: [PATCH 080/182] Add more exception tests for opsworks --- tests/test_opsworks/test_instances.py | 50 ++++++++++++++++++++++++++- tests/test_opsworks/test_layers.py | 29 ++++++++++++++++ 2 files changed, 78 insertions(+), 1 deletion(-) diff --git a/tests/test_opsworks/test_instances.py b/tests/test_opsworks/test_instances.py index 9c9e20878..f594a87c8 100644 --- a/tests/test_opsworks/test_instances.py +++ b/tests/test_opsworks/test_instances.py @@ -23,6 +23,20 @@ def test_create_instance(): Shortname="TestLayerShortName" )['LayerId'] + second_stack_id = client.create_stack( + Name="test_stack_2", + Region="us-east-1", + ServiceRoleArn="service_arn", + DefaultInstanceProfileArn="profile_arn" + )['StackId'] + + second_layer_id = client.create_layer( + StackId=second_stack_id, + Type="custom", + Name="SecondTestLayer", + Shortname="SecondTestLayerShortName" + )['LayerId'] + response = client.create_instance( StackId=stack_id, LayerIds=[layer_id], InstanceType="t2.micro" ) @@ -36,6 +50,14 @@ def test_create_instance(): client.create_instance.when.called_with( StackId=stack_id, LayerIds=["nothere"], InstanceType="t2.micro" ).should.throw(Exception, "nothere") + # ClientError + client.create_instance.when.called_with( + StackId=stack_id, LayerIds=[second_layer_id], InstanceType="t2.micro" + ).should.throw(Exception, "Please only provide layer IDs from the same stack") + # ClientError + client.start_instance.when.called_with( + InstanceId="nothere" + ).should.throw(Exception, "Unable to find instance with ID nothere") @mock_opsworks @@ -131,6 +153,32 @@ def test_describe_instances(): response.should.have.length_of(2) S2L1_i1.should_not.be.within([i["InstanceId"] for i in response]) + # ClientError + client.describe_instances.when.called_with( + StackId=S1, + LayerId=S1L1 + ).should.throw( + Exception, "Please provide either one or more" + ) + # ClientError + client.describe_instances.when.called_with( + StackId="nothere" + ).should.throw( + Exception, "nothere" + ) + # ClientError + client.describe_instances.when.called_with( + LayerId="nothere" + ).should.throw( + Exception, "nothere" + ) + # ClientError + client.describe_instances.when.called_with( + InstanceIds=["nothere"] + ).should.throw( + Exception, "nothere" + ) + @mock_opsworks @mock_ec2 @@ -155,7 +203,7 @@ def test_ec2_integration(): )['LayerId'] instance_id = opsworks.create_instance( - StackId=stack_id, LayerIds=[layer_id], InstanceType="t2.micro" + StackId=stack_id, LayerIds=[layer_id], InstanceType="t2.micro", SshKeyName="testSSH" )['InstanceId'] ec2 = boto3.client('ec2', region_name='us-east-1') diff --git a/tests/test_opsworks/test_layers.py b/tests/test_opsworks/test_layers.py index 03224feb0..9c640dfc3 100644 --- a/tests/test_opsworks/test_layers.py +++ b/tests/test_opsworks/test_layers.py @@ -62,6 +62,15 @@ def test_create_layer_response(): Exception, re.compile( r'already a layer with shortname "TestLayerShortName"') ) + # ClientError + client.create_layer.when.called_with( + StackId="nothere", + Type="custom", + Name="TestLayer", + Shortname="_" + ).should.throw( + Exception, "nothere" + ) @freeze_time("2015-01-01") @@ -86,3 +95,23 @@ def test_describe_layers(): rv1['Layers'].should.equal(rv2['Layers']) rv1['Layers'][0]['Name'].should.equal("TestLayer") + + # ClientError + client.describe_layers.when.called_with( + StackId=stack_id, + LayerIds=[layer_id] + ).should.throw( + Exception, "Please provide one or more layer IDs or a stack ID" + ) + # ClientError + client.describe_layers.when.called_with( + StackId="nothere" + ).should.throw( + Exception, "Unable to find stack with ID nothere" + ) + # ClientError + client.describe_layers.when.called_with( + LayerIds=["nothere"] + ).should.throw( + Exception, "nothere" + ) From 29061ec0f83a6517492ccd856fb5305f04887f69 Mon Sep 17 00:00:00 2001 From: Chris Wolfe Date: Mon, 19 Feb 2018 09:10:52 -0600 Subject: [PATCH 081/182] add a basic test to start --- tests/test_ssm/test_ssm_boto3.py | 17 +++++++++++++++++ 1 file changed, 17 insertions(+) diff --git a/tests/test_ssm/test_ssm_boto3.py b/tests/test_ssm/test_ssm_boto3.py index ff8e5e8a4..c9be673ac 100644 --- a/tests/test_ssm/test_ssm_boto3.py +++ b/tests/test_ssm/test_ssm_boto3.py @@ -458,3 +458,20 @@ def test_add_remove_list_tags_for_resource(): ResourceType='Parameter' ) len(response['TagList']).should.equal(0) + + +@mock_ssm +def test_send_command(): + ssm_document = 'AWS-RunShellScript' + script = '#!/bin/bash\necho \'hello world\'' + + client = boto3.client('ssm', region_name='us-east-1') + response = client.send_command( + InstanceIds=['i-123456'], + DocumentName=ssm_document, + TimeoutSeconds=60, + Parameters={'commands': [script]}, + OutputS3BucketName='the-bucket' + ) + + assert response['Command'] From 3d7f584f6453c27f9435d3a7b10b120b93650093 Mon Sep 17 00:00:00 2001 From: Loukas Leontopoulos Date: Mon, 19 Feb 2018 17:28:35 +0200 Subject: [PATCH 082/182] Change lambda backend to support docker changes --- moto/awslambda/models.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/moto/awslambda/models.py b/moto/awslambda/models.py index 947691bcf..3c3d3ea66 100644 --- a/moto/awslambda/models.py +++ b/moto/awslambda/models.py @@ -104,7 +104,7 @@ class _DockerDataVolumeContext: # It doesn't exist so we need to create it self._vol_ref.volume = self._lambda_func.docker_client.volumes.create(self._lambda_func.code_sha_256) - container = self._lambda_func.docker_client.containers.run('alpine', 'sleep 100', volumes={self.name: '/tmp/data'}, detach=True) + container = self._lambda_func.docker_client.containers.run('alpine', 'sleep 100', volumes={self.name: {'bind': '/tmp/data', 'mode': 'rw'}}, detach=True) try: tar_bytes = zip2tar(self._lambda_func.code_bytes) container.put_archive('/tmp/data', tar_bytes) @@ -309,7 +309,7 @@ class LambdaFunction(BaseModel): finally: if container: try: - exit_code = container.wait(timeout=300) + exit_code = container.wait(timeout=300)['StatusCode'] except requests.exceptions.ReadTimeout: exit_code = -1 container.stop() From 99d336241715afd3a3438b049972e713008d527b Mon Sep 17 00:00:00 2001 From: Chris Wolfe Date: Mon, 19 Feb 2018 09:39:29 -0600 Subject: [PATCH 083/182] add code to respond to SSM send_command --- moto/ssm/models.py | 39 +++++++++++++++++++++++++++++++++++++++ moto/ssm/responses.py | 5 +++++ 2 files changed, 44 insertions(+) diff --git a/moto/ssm/models.py b/moto/ssm/models.py index d8dc10a4b..c15a2047a 100644 --- a/moto/ssm/models.py +++ b/moto/ssm/models.py @@ -5,7 +5,9 @@ from collections import defaultdict from moto.core import BaseBackend, BaseModel from moto.ec2 import ec2_backends +import datetime import time +import uuid class Parameter(BaseModel): @@ -138,6 +140,43 @@ class SimpleSystemManagerBackend(BaseBackend): def list_tags_for_resource(self, resource_type, resource_id): return self._resource_tags[resource_type][resource_id] + def send_command(self, **kwargs): + instances = kwargs['InstanceIds'] + now = datetime.datetime.now() + expires_after = now + datetime.timedelta(0, int(kwargs['TimeoutSeconds'])) + return { + 'Command': { + 'CommandId': str(uuid.uuid4()), + 'DocumentName': kwargs['DocumentName'], + 'Comment': kwargs.get('Comment'), + 'ExpiresAfter': expires_after.isoformat(), + 'Parameters': { + 'string': [ + 'string', + ] + }, + 'InstanceIds': kwargs['InstanceIds'], + 'Targets': kwargs.get('targets'), + 'RequestedDateTime': now.isoformat(), + 'Status': 'Success', + 'StatusDetails': 'string', + 'OutputS3Region': 'string', + 'OutputS3BucketName': 'string', + 'OutputS3KeyPrefix': 'string', + 'MaxConcurrency': 'string', + 'MaxErrors': 'string', + 'TargetCount': len(instances), + 'CompletedCount': len(instances), + 'ErrorCount': 0, + 'ServiceRole': kwargs.get('ServiceRoleArn'), + 'NotificationConfig': { + 'NotificationArn': 'string', + 'NotificationEvents': ['Success'], + 'NotificationType': 'Command' + } + } + } + ssm_backends = {} for region, ec2_backend in ec2_backends.items(): diff --git a/moto/ssm/responses.py b/moto/ssm/responses.py index 0b4ca3b65..757bf0317 100644 --- a/moto/ssm/responses.py +++ b/moto/ssm/responses.py @@ -190,3 +190,8 @@ class SimpleSystemManagerResponse(BaseResponse): tag_list = [{'Key': k, 'Value': v} for (k, v) in tags.items()] response = {'TagList': tag_list} return json.dumps(response) + + def send_command(self): + return json.dumps( + self.ssm_backend.send_command(**self.request_params) + ) From 8ac4ff1e99c68c751046932ea360ca2c10d8a9e7 Mon Sep 17 00:00:00 2001 From: Chris Wolfe Date: Mon, 19 Feb 2018 09:58:46 -0600 Subject: [PATCH 084/182] greater granularity --- moto/ssm/models.py | 12 ++++-------- tests/test_ssm/test_ssm_boto3.py | 20 ++++++++++++++++---- 2 files changed, 20 insertions(+), 12 deletions(-) diff --git a/moto/ssm/models.py b/moto/ssm/models.py index c15a2047a..0f75599c3 100644 --- a/moto/ssm/models.py +++ b/moto/ssm/models.py @@ -150,19 +150,15 @@ class SimpleSystemManagerBackend(BaseBackend): 'DocumentName': kwargs['DocumentName'], 'Comment': kwargs.get('Comment'), 'ExpiresAfter': expires_after.isoformat(), - 'Parameters': { - 'string': [ - 'string', - ] - }, + 'Parameters': kwargs['Parameters'], 'InstanceIds': kwargs['InstanceIds'], 'Targets': kwargs.get('targets'), 'RequestedDateTime': now.isoformat(), 'Status': 'Success', 'StatusDetails': 'string', - 'OutputS3Region': 'string', - 'OutputS3BucketName': 'string', - 'OutputS3KeyPrefix': 'string', + 'OutputS3Region': kwargs.get('OutputS3Region'), + 'OutputS3BucketName': kwargs.get('OutputS3BucketName'), + 'OutputS3KeyPrefix': kwargs.get('OutputS3KeyPrefix'), 'MaxConcurrency': 'string', 'MaxErrors': 'string', 'TargetCount': len(instances), diff --git a/tests/test_ssm/test_ssm_boto3.py b/tests/test_ssm/test_ssm_boto3.py index c9be673ac..5d6588732 100644 --- a/tests/test_ssm/test_ssm_boto3.py +++ b/tests/test_ssm/test_ssm_boto3.py @@ -463,15 +463,27 @@ def test_add_remove_list_tags_for_resource(): @mock_ssm def test_send_command(): ssm_document = 'AWS-RunShellScript' - script = '#!/bin/bash\necho \'hello world\'' + params = {'commands': ['#!/bin/bash\necho \'hello world\'']} client = boto3.client('ssm', region_name='us-east-1') + before = datetime.datetime.now() response = client.send_command( InstanceIds=['i-123456'], DocumentName=ssm_document, TimeoutSeconds=60, - Parameters={'commands': [script]}, - OutputS3BucketName='the-bucket' + Parameters=params, + OutputS3Region='us-east-2', + OutputS3BucketName='the-bucket', + OutputS3KeyPrefix='pref' ) + cmd = response['Command'] - assert response['Command'] + cmd['CommandId'].should_not.be(None) + cmd['DocumentName'].should.equal(ssm_document) + cmd['Parameters'].should.equal(params) + + cmd['OutputS3Region'].should.equal('us-east-2') + cmd['OutputS3BucketName'].should.equal('the-bucket') + cmd['OutputS3KeyPrefix'].should.equal('pref') + + cmd['ExpiresAfter'].should.be.greater_than(before) From 7a6987a7f17804902ba5e470b1a93cf7bdef978d Mon Sep 17 00:00:00 2001 From: Chris Wolfe Date: Mon, 19 Feb 2018 09:59:52 -0600 Subject: [PATCH 085/182] note --- tests/test_ssm/test_ssm_boto3.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/tests/test_ssm/test_ssm_boto3.py b/tests/test_ssm/test_ssm_boto3.py index 5d6588732..0e8a770b3 100644 --- a/tests/test_ssm/test_ssm_boto3.py +++ b/tests/test_ssm/test_ssm_boto3.py @@ -466,7 +466,9 @@ def test_send_command(): params = {'commands': ['#!/bin/bash\necho \'hello world\'']} client = boto3.client('ssm', region_name='us-east-1') + # note the timeout is determined server side, so this is a simpler check. before = datetime.datetime.now() + response = client.send_command( InstanceIds=['i-123456'], DocumentName=ssm_document, From afe922bfb60a195370d042772dd9b5d7cfe0eda3 Mon Sep 17 00:00:00 2001 From: Evan Stachowiak Date: Wed, 21 Feb 2018 13:11:26 +0100 Subject: [PATCH 086/182] Fix MaxRecords compare issue --- moto/autoscaling/responses.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/moto/autoscaling/responses.py b/moto/autoscaling/responses.py index d3f9ca483..787198dfa 100644 --- a/moto/autoscaling/responses.py +++ b/moto/autoscaling/responses.py @@ -166,7 +166,7 @@ class AutoScalingResponse(BaseResponse): start = all_names.index(token) + 1 else: start = 0 - max_records = self._get_param("MaxRecords", 50) + max_records = int(self._get_param("MaxRecords", 50)) if max_records > 100: raise ValueError groups = all_groups[start:start + max_records] From 6ab416724a594e545af42e14a21a4b00db7e38d7 Mon Sep 17 00:00:00 2001 From: andrew Date: Thu, 22 Feb 2018 14:58:19 -0500 Subject: [PATCH 087/182] WIP: add iam roles to redshift --- moto/redshift/models.py | 5 ++++- moto/redshift/responses.py | 11 ++++++++++- tests/test_redshift/test_redshift.py | 18 ++++++++++++++++++ 3 files changed, 32 insertions(+), 2 deletions(-) diff --git a/moto/redshift/models.py b/moto/redshift/models.py index fa642ef01..7062b521c 100644 --- a/moto/redshift/models.py +++ b/moto/redshift/models.py @@ -67,7 +67,7 @@ class Cluster(TaggableResourceMixin, BaseModel): preferred_maintenance_window, cluster_parameter_group_name, automated_snapshot_retention_period, port, cluster_version, allow_version_upgrade, number_of_nodes, publicly_accessible, - encrypted, region_name, tags=None): + encrypted, region_name, tags=None, iam_roles=None): super(Cluster, self).__init__(region_name, tags) self.redshift_backend = redshift_backend self.cluster_identifier = cluster_identifier @@ -112,6 +112,9 @@ class Cluster(TaggableResourceMixin, BaseModel): else: self.number_of_nodes = 1 + if iam_roles: + self.iam_roles = iam_roles + @classmethod def create_from_cloudformation_json(cls, resource_name, cloudformation_json, region_name): redshift_backend = redshift_backends[region_name] diff --git a/moto/redshift/responses.py b/moto/redshift/responses.py index a320f9cae..54cd51744 100644 --- a/moto/redshift/responses.py +++ b/moto/redshift/responses.py @@ -99,6 +99,12 @@ class RedshiftResponse(BaseResponse): vpc_security_group_ids = self._get_multi_param('VpcSecurityGroupIds.VpcSecurityGroupId') return vpc_security_group_ids + def _get_iam_roles(self): + iam_roles = self._get_multi_param('IamRoles.member') + if not iam_roles: + iam_roles = self._get_multi_param('IamRoles.IamRoleArn') + return iam_roles + def _get_subnet_ids(self): subnet_ids = self._get_multi_param('SubnetIds.member') if not subnet_ids: @@ -127,7 +133,8 @@ class RedshiftResponse(BaseResponse): "publicly_accessible": self._get_param("PubliclyAccessible"), "encrypted": self._get_param("Encrypted"), "region_name": self.region, - "tags": self.unpack_complex_list_params('Tags.Tag', ('Key', 'Value')) + "tags": self.unpack_complex_list_params('Tags.Tag', ('Key', 'Value')), + "iam_roles": self._get_iam_roles(), } cluster = self.redshift_backend.create_cluster(**cluster_kwargs).to_json() cluster['ClusterStatus'] = 'creating' @@ -162,6 +169,7 @@ class RedshiftResponse(BaseResponse): "automated_snapshot_retention_period": self._get_int_param( 'AutomatedSnapshotRetentionPeriod'), "region_name": self.region, + "iam_roles": self._get_iam_roles(), } cluster = self.redshift_backend.restore_from_cluster_snapshot(**restore_kwargs).to_json() cluster['ClusterStatus'] = 'creating' @@ -209,6 +217,7 @@ class RedshiftResponse(BaseResponse): "number_of_nodes": self._get_int_param('NumberOfNodes'), "publicly_accessible": self._get_param("PubliclyAccessible"), "encrypted": self._get_param("Encrypted"), + "iam_roles": self._get_iam_roles(), } cluster_kwargs = {} # We only want parameters that were actually passed in, otherwise diff --git a/tests/test_redshift/test_redshift.py b/tests/test_redshift/test_redshift.py index cebaa3ec7..3267b3acf 100644 --- a/tests/test_redshift/test_redshift.py +++ b/tests/test_redshift/test_redshift.py @@ -294,6 +294,24 @@ def test_create_cluster_with_vpc_security_groups_boto3(): list(group_ids).should.equal([security_group.id]) +@mock_redshift +def test_create_cluster_with_iam_roles(): + iam_role = 'arn:aws:iam:::role/my-iam-role' + client = boto3.client('redshift', region_name='us-east-1') + cluster_id = 'my_cluster' + client.create_cluster( + ClusterIdentifier=cluster_id, + NodeType="dw.hs1.xlarge", + MasterUsername="username", + MasterUserPassword="password", + IamRoles=[iam_role], + ) + response = client.describe_clusters(ClusterIdentifier=cluster_id) + cluster = response['Clusters'][0] + iam_roles = [role['IamRoleArn'] for role in cluster['IamRoles']] + list(iam_roles).should.equal([iam_role.arn]) + + @mock_redshift_deprecated def test_create_cluster_with_parameter_group(): conn = boto.connect_redshift() From 7d3af65f2233770a4dfb2ca46c127959bb53505d Mon Sep 17 00:00:00 2001 From: andrew Date: Fri, 23 Feb 2018 14:42:49 -0500 Subject: [PATCH 088/182] fix errors --- moto/redshift/models.py | 27 +++++++++++++++++++++------ moto/redshift/responses.py | 6 +++--- tests/test_redshift/test_redshift.py | 6 +++--- 3 files changed, 27 insertions(+), 12 deletions(-) diff --git a/moto/redshift/models.py b/moto/redshift/models.py index 7062b521c..a7096d956 100644 --- a/moto/redshift/models.py +++ b/moto/redshift/models.py @@ -8,6 +8,7 @@ from moto.compat import OrderedDict from moto.core import BaseBackend, BaseModel from moto.core.utils import iso_8601_datetime_with_milliseconds from moto.ec2 import ec2_backends +from moto.iam import iam_backends from .exceptions import ( ClusterNotFoundError, ClusterParameterGroupNotFoundError, @@ -67,7 +68,7 @@ class Cluster(TaggableResourceMixin, BaseModel): preferred_maintenance_window, cluster_parameter_group_name, automated_snapshot_retention_period, port, cluster_version, allow_version_upgrade, number_of_nodes, publicly_accessible, - encrypted, region_name, tags=None, iam_roles=None): + encrypted, region_name, tags=None, iam_roles_arn=[]): super(Cluster, self).__init__(region_name, tags) self.redshift_backend = redshift_backend self.cluster_identifier = cluster_identifier @@ -112,8 +113,7 @@ class Cluster(TaggableResourceMixin, BaseModel): else: self.number_of_nodes = 1 - if iam_roles: - self.iam_roles = iam_roles + self.iam_roles_arn = iam_roles_arn @classmethod def create_from_cloudformation_json(cls, resource_name, cloudformation_json, region_name): @@ -196,6 +196,14 @@ class Cluster(TaggableResourceMixin, BaseModel): def resource_id(self): return self.cluster_identifier + @property + def iam_roles(self): + return [ + iam_role for iam_role + in self.redshift_backend.iam_backend.get_roles() + if iam_role.arn in self.iam_roles_arn + ] + def to_json(self): return { "MasterUsername": self.master_username, @@ -231,7 +239,11 @@ class Cluster(TaggableResourceMixin, BaseModel): "Port": self.port }, "PendingModifiedValues": [], - "Tags": self.tags + "Tags": self.tags, + "IamRoles": [{ + "ApplyStatus": "in-sync", + "IamRoleArn": iam_role.arn + } for iam_role in self.iam_roles] } @@ -354,7 +366,7 @@ class Snapshot(TaggableResourceMixin, BaseModel): resource_type = 'snapshot' - def __init__(self, cluster, snapshot_identifier, region_name, tags=None): + def __init__(self, cluster, snapshot_identifier, region_name, tags=None, iam_roles_arn=[]): super(Snapshot, self).__init__(region_name, tags) self.cluster = copy.copy(cluster) self.snapshot_identifier = snapshot_identifier @@ -362,6 +374,7 @@ class Snapshot(TaggableResourceMixin, BaseModel): self.status = 'available' self.create_time = iso_8601_datetime_with_milliseconds( datetime.datetime.now()) + self.iam_roles_arn = iam_roles_arn @property def resource_id(self): @@ -383,7 +396,8 @@ class Snapshot(TaggableResourceMixin, BaseModel): 'NodeType': self.cluster.node_type, 'NumberOfNodes': self.cluster.number_of_nodes, 'DBName': self.cluster.db_name, - 'Tags': self.tags + 'Tags': self.tags, + 'IamRoles': self.iam_roles_arn } @@ -413,6 +427,7 @@ class RedshiftBackend(BaseBackend): 'snapshot': self.snapshots, 'subnetgroup': self.subnet_groups } + self.iam_backend = iam_backends['global'] def reset(self): ec2_backend = self.ec2_backend diff --git a/moto/redshift/responses.py b/moto/redshift/responses.py index 54cd51744..34f116bd5 100644 --- a/moto/redshift/responses.py +++ b/moto/redshift/responses.py @@ -134,7 +134,7 @@ class RedshiftResponse(BaseResponse): "encrypted": self._get_param("Encrypted"), "region_name": self.region, "tags": self.unpack_complex_list_params('Tags.Tag', ('Key', 'Value')), - "iam_roles": self._get_iam_roles(), + "iam_roles_arn": self._get_iam_roles(), } cluster = self.redshift_backend.create_cluster(**cluster_kwargs).to_json() cluster['ClusterStatus'] = 'creating' @@ -169,7 +169,7 @@ class RedshiftResponse(BaseResponse): "automated_snapshot_retention_period": self._get_int_param( 'AutomatedSnapshotRetentionPeriod'), "region_name": self.region, - "iam_roles": self._get_iam_roles(), + "iam_roles_arn": self._get_iam_roles(), } cluster = self.redshift_backend.restore_from_cluster_snapshot(**restore_kwargs).to_json() cluster['ClusterStatus'] = 'creating' @@ -217,7 +217,7 @@ class RedshiftResponse(BaseResponse): "number_of_nodes": self._get_int_param('NumberOfNodes'), "publicly_accessible": self._get_param("PubliclyAccessible"), "encrypted": self._get_param("Encrypted"), - "iam_roles": self._get_iam_roles(), + "iam_roles_arn": self._get_iam_roles(), } cluster_kwargs = {} # We only want parameters that were actually passed in, otherwise diff --git a/tests/test_redshift/test_redshift.py b/tests/test_redshift/test_redshift.py index 3267b3acf..b617ac797 100644 --- a/tests/test_redshift/test_redshift.py +++ b/tests/test_redshift/test_redshift.py @@ -296,7 +296,7 @@ def test_create_cluster_with_vpc_security_groups_boto3(): @mock_redshift def test_create_cluster_with_iam_roles(): - iam_role = 'arn:aws:iam:::role/my-iam-role' + iam_roles_arn = ['arn:aws:iam:::role/my-iam-role',] client = boto3.client('redshift', region_name='us-east-1') cluster_id = 'my_cluster' client.create_cluster( @@ -304,12 +304,12 @@ def test_create_cluster_with_iam_roles(): NodeType="dw.hs1.xlarge", MasterUsername="username", MasterUserPassword="password", - IamRoles=[iam_role], + IamRoles=iam_roles_arn ) response = client.describe_clusters(ClusterIdentifier=cluster_id) cluster = response['Clusters'][0] iam_roles = [role['IamRoleArn'] for role in cluster['IamRoles']] - list(iam_roles).should.equal([iam_role.arn]) + iam_roles_arn.should.equal(iam_roles) @mock_redshift_deprecated From 894906e0ee8332c6e9dc3d78c5e7da016b5c70ae Mon Sep 17 00:00:00 2001 From: andrew Date: Fri, 23 Feb 2018 14:52:22 -0500 Subject: [PATCH 089/182] remove reliance on iam_backends --- moto/redshift/models.py | 19 ++++++------------- 1 file changed, 6 insertions(+), 13 deletions(-) diff --git a/moto/redshift/models.py b/moto/redshift/models.py index a7096d956..9a5850faf 100644 --- a/moto/redshift/models.py +++ b/moto/redshift/models.py @@ -8,7 +8,6 @@ from moto.compat import OrderedDict from moto.core import BaseBackend, BaseModel from moto.core.utils import iso_8601_datetime_with_milliseconds from moto.ec2 import ec2_backends -from moto.iam import iam_backends from .exceptions import ( ClusterNotFoundError, ClusterParameterGroupNotFoundError, @@ -196,14 +195,6 @@ class Cluster(TaggableResourceMixin, BaseModel): def resource_id(self): return self.cluster_identifier - @property - def iam_roles(self): - return [ - iam_role for iam_role - in self.redshift_backend.iam_backend.get_roles() - if iam_role.arn in self.iam_roles_arn - ] - def to_json(self): return { "MasterUsername": self.master_username, @@ -242,8 +233,8 @@ class Cluster(TaggableResourceMixin, BaseModel): "Tags": self.tags, "IamRoles": [{ "ApplyStatus": "in-sync", - "IamRoleArn": iam_role.arn - } for iam_role in self.iam_roles] + "IamRoleArn": iam_role_arn + } for iam_role_arn in self.iam_roles_arn] } @@ -397,7 +388,10 @@ class Snapshot(TaggableResourceMixin, BaseModel): 'NumberOfNodes': self.cluster.number_of_nodes, 'DBName': self.cluster.db_name, 'Tags': self.tags, - 'IamRoles': self.iam_roles_arn + "IamRoles": [{ + "ApplyStatus": "in-sync", + "IamRoleArn": iam_role_arn + } for iam_role_arn in self.iam_roles_arn] } @@ -427,7 +421,6 @@ class RedshiftBackend(BaseBackend): 'snapshot': self.snapshots, 'subnetgroup': self.subnet_groups } - self.iam_backend = iam_backends['global'] def reset(self): ec2_backend = self.ec2_backend From 73a1c03580fd0ba383e6130d264844b1c90ad524 Mon Sep 17 00:00:00 2001 From: Dave Golombek Date: Mon, 5 Mar 2018 16:52:56 -0500 Subject: [PATCH 090/182] Fix handling of PageSize for ELB/ELBv2 Resolves #1497 --- moto/elb/responses.py | 2 +- moto/elbv2/responses.py | 6 +++--- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/moto/elb/responses.py b/moto/elb/responses.py index b1980c9b2..40d6ec2f9 100644 --- a/moto/elb/responses.py +++ b/moto/elb/responses.py @@ -61,7 +61,7 @@ class ELBResponse(BaseResponse): start = all_names.index(marker) + 1 else: start = 0 - page_size = self._get_param('PageSize', 50) # the default is 400, but using 50 to make testing easier + page_size = self._get_int_param('PageSize', 50) # the default is 400, but using 50 to make testing easier load_balancers_resp = all_load_balancers[start:start + page_size] next_marker = None if len(all_load_balancers) > start + page_size: diff --git a/moto/elbv2/responses.py b/moto/elbv2/responses.py index 7c71ce78a..1814f1273 100644 --- a/moto/elbv2/responses.py +++ b/moto/elbv2/responses.py @@ -242,7 +242,7 @@ class ELBV2Response(BaseResponse): start = all_names.index(marker) + 1 else: start = 0 - page_size = self._get_param('PageSize', 50) # the default is 400, but using 50 to make testing easier + page_size = self._get_int_param('PageSize', 50) # the default is 400, but using 50 to make testing easier load_balancers_resp = all_load_balancers[start:start + page_size] next_marker = None if len(all_load_balancers) > start + page_size: @@ -468,7 +468,7 @@ class ELBV2Response(BaseResponse): def describe_account_limits(self): # Supports paging but not worth implementing yet # marker = self._get_param('Marker') - # page_size = self._get_param('PageSize') + # page_size = self._get_int_param('PageSize') limits = { 'application-load-balancers': 20, @@ -489,7 +489,7 @@ class ELBV2Response(BaseResponse): names = self._get_multi_param('Names.member.') # Supports paging but not worth implementing yet # marker = self._get_param('Marker') - # page_size = self._get_param('PageSize') + # page_size = self._get_int_param('PageSize') policies = SSL_POLICIES if names: From 4f05aa725cd12a8cef8eae6b257e2a6facef39ea Mon Sep 17 00:00:00 2001 From: Srikanth Raju Date: Tue, 6 Mar 2018 01:50:22 -0800 Subject: [PATCH 091/182] S3: Do not attempt to return deleted files in bucket listing --- moto/s3/models.py | 1 + tests/test_s3/test_s3.py | 15 +++++++++++++++ 2 files changed, 16 insertions(+) diff --git a/moto/s3/models.py b/moto/s3/models.py index 7eb89531f..b8a6a99cc 100644 --- a/moto/s3/models.py +++ b/moto/s3/models.py @@ -722,6 +722,7 @@ class S3Backend(BaseBackend): else: key_results.add(key) + key_results = filter(lambda key: not isinstance(key, FakeDeleteMarker), key_results) key_results = sorted(key_results, key=lambda key: key.name) folder_results = [folder_name for folder_name in sorted( folder_results, key=lambda key: key)] diff --git a/tests/test_s3/test_s3.py b/tests/test_s3/test_s3.py index 33752af60..0d6b691a9 100644 --- a/tests/test_s3/test_s3.py +++ b/tests/test_s3/test_s3.py @@ -1390,6 +1390,21 @@ def test_boto3_copy_object_with_versioning(): obj2_version_new.should_not.equal(obj2_version) +@mock_s3 +def test_boto3_deleted_versionings_list(): + client = boto3.client('s3', region_name='us-east-1') + + client.create_bucket(Bucket='blah') + client.put_bucket_versioning(Bucket='blah', VersioningConfiguration={'Status': 'Enabled'}) + + client.put_object(Bucket='blah', Key='test1', Body=b'test1') + client.put_object(Bucket='blah', Key='test2', Body=b'test2') + client.delete_objects(Bucket='blah', Delete={'Objects': [{'Key': 'test1'}]}) + + listed = client.list_objects_v2(Bucket='blah') + assert len(listed['Contents']) == 1 + + @mock_s3 def test_boto3_head_object_if_modified_since(): s3 = boto3.client('s3', region_name='us-east-1') From 63be8a6c3869675a0c8f20ef6e3763bc5c037890 Mon Sep 17 00:00:00 2001 From: Steve Pulec Date: Tue, 6 Mar 2018 08:06:12 -0500 Subject: [PATCH 092/182] Fix test coverage report. --- setup.cfg | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/setup.cfg b/setup.cfg index 3c6e79cf3..fb04c16a8 100644 --- a/setup.cfg +++ b/setup.cfg @@ -1,2 +1,8 @@ +[nosetests] +verbosity=1 +detailed-errors=1 +with-coverage=1 +cover-package=moto + [bdist_wheel] universal=1 From 9a8b36debc633b29b68c2f5c313632cc4ecb6b57 Mon Sep 17 00:00:00 2001 From: Dave Golombek Date: Tue, 6 Mar 2018 16:56:15 -0500 Subject: [PATCH 093/182] ELBv2.create_listener links TargetGroup to LB In order to search target_groups by LB, we need this link in place. Resolves #1500 --- moto/elbv2/models.py | 4 ++++ tests/test_elbv2/test_elbv2.py | 4 ++++ 2 files changed, 8 insertions(+) diff --git a/moto/elbv2/models.py b/moto/elbv2/models.py index 726b1a164..8921581d3 100644 --- a/moto/elbv2/models.py +++ b/moto/elbv2/models.py @@ -486,6 +486,10 @@ class ELBv2Backend(BaseBackend): arn = load_balancer_arn.replace(':loadbalancer/', ':listener/') + "/%s%s" % (port, id(self)) listener = FakeListener(load_balancer_arn, arn, protocol, port, ssl_policy, certificate, default_actions) balancer.listeners[listener.arn] = listener + for action in default_actions: + if action['target_group_arn'] in self.target_groups.keys(): + target_group = self.target_groups[action['target_group_arn']] + target_group.load_balancer_arns.append(load_balancer_arn) return listener def describe_load_balancers(self, arns, names): diff --git a/tests/test_elbv2/test_elbv2.py b/tests/test_elbv2/test_elbv2.py index 4fb527525..ce092976a 100644 --- a/tests/test_elbv2/test_elbv2.py +++ b/tests/test_elbv2/test_elbv2.py @@ -340,6 +340,10 @@ def test_create_target_group_and_listeners(): 'Type': 'forward'}]) http_listener_arn = listener.get('ListenerArn') + response = conn.describe_target_groups(LoadBalancerArn=load_balancer_arn, + Names=['a-target']) + response.get('TargetGroups').should.have.length_of(1) + # And another with SSL response = conn.create_listener( LoadBalancerArn=load_balancer_arn, From 31eac49e1555c5345021a252cb0c95043197ea16 Mon Sep 17 00:00:00 2001 From: Steve Pulec Date: Tue, 6 Mar 2018 17:48:23 -0500 Subject: [PATCH 094/182] Lock down version of aws-xray-sdk See https://travis-ci.org/spulec/moto/jobs/350056229 --- setup.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/setup.py b/setup.py index 27c635944..57140401b 100755 --- a/setup.py +++ b/setup.py @@ -22,7 +22,7 @@ install_requires = [ "mock", "docker>=2.5.1", "jsondiff==1.1.1", - "aws-xray-sdk>=0.93", + "aws-xray-sdk<0.96,>=0.93", ] extras_require = { From e2e1c7347b3421adec88e76afbb89b1cad6783ea Mon Sep 17 00:00:00 2001 From: andrew Date: Wed, 7 Mar 2018 08:38:07 -0500 Subject: [PATCH 095/182] default to None --- moto/redshift/models.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/moto/redshift/models.py b/moto/redshift/models.py index 9a5850faf..c8a4e3531 100644 --- a/moto/redshift/models.py +++ b/moto/redshift/models.py @@ -67,7 +67,7 @@ class Cluster(TaggableResourceMixin, BaseModel): preferred_maintenance_window, cluster_parameter_group_name, automated_snapshot_retention_period, port, cluster_version, allow_version_upgrade, number_of_nodes, publicly_accessible, - encrypted, region_name, tags=None, iam_roles_arn=[]): + encrypted, region_name, tags=None, iam_roles_arn=None): super(Cluster, self).__init__(region_name, tags) self.redshift_backend = redshift_backend self.cluster_identifier = cluster_identifier @@ -112,7 +112,7 @@ class Cluster(TaggableResourceMixin, BaseModel): else: self.number_of_nodes = 1 - self.iam_roles_arn = iam_roles_arn + self.iam_roles_arn = iam_roles_arn or [] @classmethod def create_from_cloudformation_json(cls, resource_name, cloudformation_json, region_name): @@ -357,7 +357,7 @@ class Snapshot(TaggableResourceMixin, BaseModel): resource_type = 'snapshot' - def __init__(self, cluster, snapshot_identifier, region_name, tags=None, iam_roles_arn=[]): + def __init__(self, cluster, snapshot_identifier, region_name, tags=None, iam_roles_arn=None): super(Snapshot, self).__init__(region_name, tags) self.cluster = copy.copy(cluster) self.snapshot_identifier = snapshot_identifier @@ -365,7 +365,7 @@ class Snapshot(TaggableResourceMixin, BaseModel): self.status = 'available' self.create_time = iso_8601_datetime_with_milliseconds( datetime.datetime.now()) - self.iam_roles_arn = iam_roles_arn + self.iam_roles_arn = iam_roles_arn or [] @property def resource_id(self): From ee353961739f004cfa5999fbcbba7d6e6d1c2288 Mon Sep 17 00:00:00 2001 From: Steve Pulec Date: Wed, 7 Mar 2018 09:24:18 -0500 Subject: [PATCH 096/182] Cleanup param parsing. --- moto/autoscaling/responses.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/moto/autoscaling/responses.py b/moto/autoscaling/responses.py index 787198dfa..9e11299ce 100644 --- a/moto/autoscaling/responses.py +++ b/moto/autoscaling/responses.py @@ -166,7 +166,7 @@ class AutoScalingResponse(BaseResponse): start = all_names.index(token) + 1 else: start = 0 - max_records = int(self._get_param("MaxRecords", 50)) + max_records = self._get_int_param("MaxRecords", 50) if max_records > 100: raise ValueError groups = all_groups[start:start + max_records] From bfeea007749a771248ffafc6c6d9693b36577f10 Mon Sep 17 00:00:00 2001 From: Jim Shields Date: Wed, 13 Dec 2017 10:15:40 -0500 Subject: [PATCH 097/182] Fix #1370: Implement suspend_processes in AutoScaling service --- moto/autoscaling/models.py | 5 ++++ moto/autoscaling/responses.py | 22 +++++++++++++++- tests/test_autoscaling/test_autoscaling.py | 29 ++++++++++++++++++++++ 3 files changed, 55 insertions(+), 1 deletion(-) diff --git a/moto/autoscaling/models.py b/moto/autoscaling/models.py index af65c2a56..0ebc4c465 100644 --- a/moto/autoscaling/models.py +++ b/moto/autoscaling/models.py @@ -179,6 +179,7 @@ class FakeAutoScalingGroup(BaseModel): self.placement_group = placement_group self.termination_policies = termination_policies + self.suspended_processes = [] self.instance_states = [] self.tags = tags if tags else [] self.set_desired_capacity(desired_capacity) @@ -621,6 +622,10 @@ class AutoScalingBackend(BaseBackend): asg_targets = [{'id': x.instance.id} for x in group.instance_states] self.elbv2_backend.deregister_targets(target_group, (asg_targets)) + def suspend_processes(self, group_name, scaling_processes): + group = self.autoscaling_groups[group_name] + group.suspended_processes = scaling_processes or [] + autoscaling_backends = {} for region, ec2_backend in ec2_backends.items(): diff --git a/moto/autoscaling/responses.py b/moto/autoscaling/responses.py index 9e11299ce..c7170e17e 100644 --- a/moto/autoscaling/responses.py +++ b/moto/autoscaling/responses.py @@ -283,6 +283,13 @@ class AutoScalingResponse(BaseResponse): template = self.response_template(DETACH_LOAD_BALANCERS_TEMPLATE) return template.render() + def suspend_processes(self): + autoscaling_group_name = self._get_param('AutoScalingGroupName') + scaling_processes = self._get_multi_param('ScalingProcesses.member') + self.autoscaling_backend.suspend_processes(autoscaling_group_name, scaling_processes) + template = self.response_template(SUSPEND_PROCESSES_TEMPLATE) + return template.render() + CREATE_LAUNCH_CONFIGURATION_TEMPLATE = """ @@ -463,7 +470,14 @@ DESCRIBE_AUTOSCALING_GROUPS_TEMPLATE = """ {% endfor %} - + + {% for suspended_process in group.suspended_processes %} + + {{suspended_process}} + + + {% endfor %} + {{ group.name }} {{ group.health_check_type }} 2013-05-06T17:47:15.107Z @@ -644,6 +658,12 @@ DETACH_LOAD_BALANCERS_TEMPLATE = """ + + 7c6e177f-f082-11e1-ac58-3714bEXAMPLE + +""" + SET_INSTANCE_HEALTH_TEMPLATE = """ diff --git a/tests/test_autoscaling/test_autoscaling.py b/tests/test_autoscaling/test_autoscaling.py index 453d14096..f2ee16221 100644 --- a/tests/test_autoscaling/test_autoscaling.py +++ b/tests/test_autoscaling/test_autoscaling.py @@ -1067,3 +1067,32 @@ def test_set_instance_health(): instance1 = response['AutoScalingGroups'][0]['Instances'][0] instance1['HealthStatus'].should.equal('Unhealthy') + +@mock_autoscaling +def test_asg(): + client = boto3.client('autoscaling') + client.create_launch_configuration( + LaunchConfigurationName='lc', + ) + client.create_auto_scaling_group( + LaunchConfigurationName='lc', + AutoScalingGroupName='test-asg', + MinSize=1, + MaxSize=1, + ) + + # Testing something that calls the below... + client.suspend_processes( + AutoScalingGroupName='test-asg', + ScalingProcesses=['Launch'] + ) + + res = client.describe_auto_scaling_groups( + AutoScalingGroupNames=['test-asg'] + ) + launch_suspended = False + for proc in res['AutoScalingGroups'][0]['SuspendedProcesses']: + if proc.get('ProcessName') == 'Launch': + launch_suspended = True + + assert launch_suspended is True From baedbfa8ca058cef407540965af31c46b5a7b569 Mon Sep 17 00:00:00 2001 From: Jim Shields Date: Fri, 9 Mar 2018 17:22:57 -0500 Subject: [PATCH 098/182] Fix test_suspend_processes * Add `region_name` to the client to be consistent with other tests * Add `VPCZoneIdentifier` to the ASG creation (AZ or VPC is required) * Add myself as a contributor --- AUTHORS.md | 1 + tests/test_autoscaling/test_autoscaling.py | 10 +++++++--- 2 files changed, 8 insertions(+), 3 deletions(-) diff --git a/AUTHORS.md b/AUTHORS.md index 710cf5dcb..5152e5471 100644 --- a/AUTHORS.md +++ b/AUTHORS.md @@ -50,3 +50,4 @@ Moto is written by Steve Pulec with contributions from: * [Jessie Nadler](https://github.com/nadlerjessie) * [Alex Morken](https://github.com/alexmorken) * [Clive Li](https://github.com/cliveli) +* [Jim Shields](https://github.com/jimjshields) diff --git a/tests/test_autoscaling/test_autoscaling.py b/tests/test_autoscaling/test_autoscaling.py index f2ee16221..f86ca2b81 100644 --- a/tests/test_autoscaling/test_autoscaling.py +++ b/tests/test_autoscaling/test_autoscaling.py @@ -1069,8 +1069,9 @@ def test_set_instance_health(): instance1['HealthStatus'].should.equal('Unhealthy') @mock_autoscaling -def test_asg(): - client = boto3.client('autoscaling') +def test_suspend_processes(): + mocked_networking = setup_networking() + client = boto3.client('autoscaling', region_name='us-east-1') client.create_launch_configuration( LaunchConfigurationName='lc', ) @@ -1079,9 +1080,10 @@ def test_asg(): AutoScalingGroupName='test-asg', MinSize=1, MaxSize=1, + VPCZoneIdentifier=mocked_networking['subnet1'], ) - # Testing something that calls the below... + # When we suspend the 'Launch' process on the ASG client client.suspend_processes( AutoScalingGroupName='test-asg', ScalingProcesses=['Launch'] @@ -1090,6 +1092,8 @@ def test_asg(): res = client.describe_auto_scaling_groups( AutoScalingGroupNames=['test-asg'] ) + + # The 'Launch' process should, in fact, be suspended launch_suspended = False for proc in res['AutoScalingGroups'][0]['SuspendedProcesses']: if proc.get('ProcessName') == 'Launch': From 6dce7dcb18491a5630f7454ac1a596f2f91f58e6 Mon Sep 17 00:00:00 2001 From: Iain Bullard Date: Wed, 21 Mar 2018 15:48:08 +0000 Subject: [PATCH 099/182] Improve SQS Compatibility with AWS (#1520) * Return correct error code when fetching a queue that does not exist * Improve SQS Queue get and set attributes * Queue creation and set_attributes uses the same code path - ensure bool/int values are cast correctly * RedrivePolicy is handled properly with set_attributes - _setup_dlq is called - is json decoded, so that returned RedrivePolicy is not json encoded twice * As per AWS not all attributes are returned when they are not set, for example RedrivePolicy, FifoQueue, Policy, Kms* * WaitTimeSeconds is not a queue attribute switch to ReceiveMessageWaitTimeSeconds --- moto/sqs/models.py | 140 ++++++++++++++++++++++++------------- moto/sqs/responses.py | 15 ++-- tests/test_sqs/test_sqs.py | 75 +++++++++++++++++++- 3 files changed, 170 insertions(+), 60 deletions(-) diff --git a/moto/sqs/models.py b/moto/sqs/models.py index 0a268e9eb..044759e4f 100644 --- a/moto/sqs/models.py +++ b/moto/sqs/models.py @@ -152,64 +152,86 @@ class Message(BaseModel): class Queue(BaseModel): - camelcase_attributes = ['ApproximateNumberOfMessages', - 'ApproximateNumberOfMessagesDelayed', - 'ApproximateNumberOfMessagesNotVisible', - 'ContentBasedDeduplication', - 'CreatedTimestamp', - 'DelaySeconds', - 'FifoQueue', - 'KmsDataKeyReusePeriodSeconds', - 'KmsMasterKeyId', - 'LastModifiedTimestamp', - 'MaximumMessageSize', - 'MessageRetentionPeriod', - 'QueueArn', - 'ReceiveMessageWaitTimeSeconds', - 'RedrivePolicy', - 'VisibilityTimeout', - 'WaitTimeSeconds'] - ALLOWED_PERMISSIONS = ('*', 'ChangeMessageVisibility', 'DeleteMessage', 'GetQueueAttributes', - 'GetQueueUrl', 'ReceiveMessage', 'SendMessage') + base_attributes = ['ApproximateNumberOfMessages', + 'ApproximateNumberOfMessagesDelayed', + 'ApproximateNumberOfMessagesNotVisible', + 'CreatedTimestamp', + 'DelaySeconds', + 'LastModifiedTimestamp', + 'MaximumMessageSize', + 'MessageRetentionPeriod', + 'QueueArn', + 'ReceiveMessageWaitTimeSeconds', + 'VisibilityTimeout'] + fifo_attributes = ['FifoQueue', + 'ContentBasedDeduplication'] + kms_attributes = ['KmsDataKeyReusePeriodSeconds', + 'KmsMasterKeyId'] + ALLOWED_PERMISSIONS = ('*', 'ChangeMessageVisibility', 'DeleteMessage', + 'GetQueueAttributes', 'GetQueueUrl', + 'ReceiveMessage', 'SendMessage') def __init__(self, name, region, **kwargs): self.name = name - self.visibility_timeout = int(kwargs.get('VisibilityTimeout', 30)) self.region = region self.tags = {} + self.permissions = {} self._messages = [] now = unix_time() - - # kwargs can also have: - # [Policy, RedrivePolicy] - self.fifo_queue = kwargs.get('FifoQueue', 'false') == 'true' - self.content_based_deduplication = kwargs.get('ContentBasedDeduplication', 'false') == 'true' - self.kms_master_key_id = kwargs.get('KmsMasterKeyId', 'alias/aws/sqs') - self.kms_data_key_reuse_period_seconds = int(kwargs.get('KmsDataKeyReusePeriodSeconds', 300)) self.created_timestamp = now - self.delay_seconds = int(kwargs.get('DelaySeconds', 0)) - self.last_modified_timestamp = now - self.maximum_message_size = int(kwargs.get('MaximumMessageSize', 64 << 10)) - self.message_retention_period = int(kwargs.get('MessageRetentionPeriod', 86400 * 4)) # four days - self.queue_arn = 'arn:aws:sqs:{0}:123456789012:{1}'.format(self.region, self.name) - self.receive_message_wait_time_seconds = int(kwargs.get('ReceiveMessageWaitTimeSeconds', 0)) - self.permissions = {} - - # wait_time_seconds will be set to immediate return messages - self.wait_time_seconds = int(kwargs.get('WaitTimeSeconds', 0)) - - self.redrive_policy = {} + self.queue_arn = 'arn:aws:sqs:{0}:123456789012:{1}'.format(self.region, + self.name) self.dead_letter_queue = None - if 'RedrivePolicy' in kwargs: - self._setup_dlq(kwargs['RedrivePolicy']) + # default settings for a non fifo queue + defaults = { + 'ContentBasedDeduplication': 'false', + 'DelaySeconds': 0, + 'FifoQueue': 'false', + 'KmsDataKeyReusePeriodSeconds': 300, # five minutes + 'KmsMasterKeyId': None, + 'MaximumMessageSize': int(64 << 10), + 'MessageRetentionPeriod': 86400 * 4, # four days + 'Policy': None, + 'ReceiveMessageWaitTimeSeconds': 0, + 'RedrivePolicy': None, + 'VisibilityTimeout': 30, + } + + defaults.update(kwargs) + self._set_attributes(defaults, now) # Check some conditions if self.fifo_queue and not self.name.endswith('.fifo'): raise MessageAttributesInvalid('Queue name must end in .fifo for FIFO queues') + def _set_attributes(self, attributes, now=None): + if not now: + now = unix_time() + + integer_fields = ('DelaySeconds', 'KmsDataKeyreusePeriodSeconds', + 'MaximumMessageSize', 'MessageRetentionPeriod', + 'ReceiveMessageWaitTime', 'VisibilityTimeout') + bool_fields = ('ContentBasedDeduplication', 'FifoQueue') + + for key, value in six.iteritems(attributes): + if key in integer_fields: + value = int(value) + if key in bool_fields: + value = value == "true" + + if key == 'RedrivePolicy' and value is not None: + continue + + setattr(self, camelcase_to_underscores(key), value) + + if attributes.get('RedrivePolicy', None): + self._setup_dlq(attributes['RedrivePolicy']) + + self.last_modified_timestamp = now + def _setup_dlq(self, policy_json): try: self.redrive_policy = json.loads(policy_json) @@ -252,8 +274,8 @@ class Queue(BaseModel): if 'VisibilityTimeout' in properties: queue.visibility_timeout = int(properties['VisibilityTimeout']) - if 'WaitTimeSeconds' in properties: - queue.wait_time_seconds = int(properties['WaitTimeSeconds']) + if 'ReceiveMessageWaitTimeSeconds' in properties: + queue.receive_message_wait_time_seconds = int(properties['ReceiveMessageWaitTimeSeconds']) return queue @classmethod @@ -282,13 +304,31 @@ class Queue(BaseModel): @property def attributes(self): result = {} - for attribute in self.camelcase_attributes: + + for attribute in self.base_attributes: attr = getattr(self, camelcase_to_underscores(attribute)) - if isinstance(attr, bool): - attr = str(attr).lower() - elif attribute == 'RedrivePolicy': - attr = json.dumps(attr) result[attribute] = attr + + if self.fifo_queue: + for attribute in self.fifo_attributes: + attr = getattr(self, camelcase_to_underscores(attribute)) + result[attribute] = attr + + if self.kms_master_key_id: + for attribute in self.kms_attributes: + attr = getattr(self, camelcase_to_underscores(attribute)) + result[attribute] = attr + + if self.policy: + result['Policy'] = self.policy + + if self.redrive_policy: + result['RedrivePolicy'] = json.dumps(self.redrive_policy) + + for key in result: + if isinstance(result[key], bool): + result[key] = str(result[key]).lower() + return result def url(self, request_url): @@ -355,9 +395,9 @@ class SQSBackend(BaseBackend): return self.queues.pop(queue_name) return False - def set_queue_attribute(self, queue_name, key, value): + def set_queue_attributes(self, queue_name, attributes): queue = self.get_queue(queue_name) - setattr(queue, key, value) + queue._set_attributes(attributes) return queue def send_message(self, queue_name, message_body, message_attributes=None, delay_seconds=None): diff --git a/moto/sqs/responses.py b/moto/sqs/responses.py index b31681f16..71aab9a58 100644 --- a/moto/sqs/responses.py +++ b/moto/sqs/responses.py @@ -4,7 +4,7 @@ import re from six.moves.urllib.parse import urlparse from moto.core.responses import BaseResponse -from moto.core.utils import camelcase_to_underscores, amz_crc32, amzn_request_id +from moto.core.utils import amz_crc32, amzn_request_id from .utils import parse_message_attributes from .models import sqs_backends from .exceptions import ( @@ -87,7 +87,8 @@ class SQSResponse(BaseResponse): try: queue = self.sqs_backend.get_queue(queue_name) except QueueDoesNotExist as e: - return self._error('QueueDoesNotExist', e.description) + return self._error('AWS.SimpleQueueService.NonExistentQueue', + e.description) if queue: template = self.response_template(GET_QUEUE_URL_RESPONSE) @@ -171,7 +172,8 @@ class SQSResponse(BaseResponse): try: queue = self.sqs_backend.get_queue(queue_name) except QueueDoesNotExist as e: - return self._error('QueueDoesNotExist', e.description) + return self._error('AWS.SimpleQueueService.NonExistentQueue', + e.description) template = self.response_template(GET_QUEUE_ATTRIBUTES_RESPONSE) return template.render(queue=queue) @@ -179,9 +181,8 @@ class SQSResponse(BaseResponse): def set_queue_attributes(self): # TODO validate self.get_param('QueueUrl') queue_name = self._get_queue_name() - for key, value in self.attribute.items(): - key = camelcase_to_underscores(key) - self.sqs_backend.set_queue_attribute(queue_name, key, value) + self.sqs_backend.set_queue_attributes(queue_name, self.attribute) + return SET_QUEUE_ATTRIBUTE_RESPONSE def delete_queue(self): @@ -323,7 +324,7 @@ class SQSResponse(BaseResponse): try: wait_time = int(self.querystring.get("WaitTimeSeconds")[0]) except TypeError: - wait_time = queue.wait_time_seconds + wait_time = queue.receive_message_wait_time_seconds try: visibility_timeout = self._get_validated_visibility_timeout() diff --git a/tests/test_sqs/test_sqs.py b/tests/test_sqs/test_sqs.py index b91fd7bc7..05936ab39 100644 --- a/tests/test_sqs/test_sqs.py +++ b/tests/test_sqs/test_sqs.py @@ -72,6 +72,24 @@ def test_create_queue(): queue.attributes.get('VisibilityTimeout').should.equal('30') +@mock_sqs +def test_create_queue_kms(): + sqs = boto3.resource('sqs', region_name='us-east-1') + + new_queue = sqs.create_queue( + QueueName='test-queue', + Attributes={ + 'KmsMasterKeyId': 'master-key-id', + 'KmsDataKeyReusePeriodSeconds': '600' + }) + new_queue.should_not.be.none + + queue = sqs.get_queue_by_name(QueueName='test-queue') + + queue.attributes.get('KmsMasterKeyId').should.equal('master-key-id') + queue.attributes.get('KmsDataKeyReusePeriodSeconds').should.equal('600') + + @mock_sqs def test_get_nonexistent_queue(): sqs = boto3.resource('sqs', region_name='us-east-1') @@ -79,13 +97,15 @@ def test_get_nonexistent_queue(): sqs.get_queue_by_name(QueueName='nonexisting-queue') ex = err.exception ex.operation_name.should.equal('GetQueueUrl') - ex.response['Error']['Code'].should.equal('QueueDoesNotExist') + ex.response['Error']['Code'].should.equal( + 'AWS.SimpleQueueService.NonExistentQueue') with assert_raises(ClientError) as err: sqs.Queue('http://whatever-incorrect-queue-address').load() ex = err.exception ex.operation_name.should.equal('GetQueueAttributes') - ex.response['Error']['Code'].should.equal('QueueDoesNotExist') + ex.response['Error']['Code'].should.equal( + 'AWS.SimpleQueueService.NonExistentQueue') @mock_sqs @@ -890,7 +910,7 @@ def test_create_fifo_queue_with_dlq(): def test_queue_with_dlq(): if os.environ.get('TEST_SERVER_MODE', 'false').lower() == 'true': raise SkipTest('Cant manipulate time in server mode') - + sqs = boto3.client('sqs', region_name='us-east-1') with freeze_time("2015-01-01 12:00:00"): @@ -933,6 +953,7 @@ def test_queue_with_dlq(): resp = sqs.list_dead_letter_source_queues(QueueUrl=queue_url1) resp['queueUrls'][0].should.equal(queue_url2) + @mock_sqs def test_redrive_policy_available(): sqs = boto3.client('sqs', region_name='us-east-1') @@ -956,3 +977,51 @@ def test_redrive_policy_available(): attributes = sqs.get_queue_attributes(QueueUrl=queue_url2)['Attributes'] assert 'RedrivePolicy' in attributes assert json.loads(attributes['RedrivePolicy']) == redrive_policy + + # Cant have redrive policy without maxReceiveCount + with assert_raises(ClientError): + sqs.create_queue( + QueueName='test-queue2', + Attributes={ + 'FifoQueue': 'true', + 'RedrivePolicy': json.dumps({'deadLetterTargetArn': queue_arn1}) + } + ) + + +@mock_sqs +def test_redrive_policy_non_existent_queue(): + sqs = boto3.client('sqs', region_name='us-east-1') + redrive_policy = { + 'deadLetterTargetArn': 'arn:aws:sqs:us-east-1:123456789012:no-queue', + 'maxReceiveCount': 1, + } + + with assert_raises(ClientError): + sqs.create_queue( + QueueName='test-queue', + Attributes={ + 'RedrivePolicy': json.dumps(redrive_policy) + } + ) + + +@mock_sqs +def test_redrive_policy_set_attributes(): + sqs = boto3.resource('sqs', region_name='us-east-1') + + queue = sqs.create_queue(QueueName='test-queue') + deadletter_queue = sqs.create_queue(QueueName='test-deadletter') + + redrive_policy = { + 'deadLetterTargetArn': deadletter_queue.attributes['QueueArn'], + 'maxReceiveCount': 1, + } + + queue.set_attributes(Attributes={ + 'RedrivePolicy': json.dumps(redrive_policy)}) + + copy = sqs.get_queue_by_name(QueueName='test-queue') + assert 'RedrivePolicy' in copy.attributes + copy_policy = json.loads(copy.attributes['RedrivePolicy']) + assert copy_policy == redrive_policy From d3d9557d499f91c4858f4b132a127905e3e250da Mon Sep 17 00:00:00 2001 From: Iain Bullard Date: Wed, 21 Mar 2018 15:49:11 +0000 Subject: [PATCH 100/182] Implement basic SNS message filtering (#1521) * Add support for FilterPolicy to sns subscription set_filter_attributes * Add basic support for sns message filtering This adds support for exact string value matching along with AND/OR logic as described here: https://docs.aws.amazon.com/sns/latest/dg/message-filtering.html It does not provide support for: - Anything-but string matching - Prefix string matching - Numeric Value Matching The above filter policies (if configured) will not match messages. --- moto/sns/models.py | 44 ++++++- moto/sns/responses.py | 8 +- moto/sqs/responses.py | 2 +- tests/test_sns/test_publishing_boto3.py | 133 +++++++++++++++++++++ tests/test_sns/test_subscriptions_boto3.py | 15 +++ 5 files changed, 194 insertions(+), 8 deletions(-) diff --git a/moto/sns/models.py b/moto/sns/models.py index 70587d980..9afc28f46 100644 --- a/moto/sns/models.py +++ b/moto/sns/models.py @@ -42,11 +42,12 @@ class Topic(BaseModel): self.subscriptions_confimed = 0 self.subscriptions_deleted = 0 - def publish(self, message, subject=None): + def publish(self, message, subject=None, message_attributes=None): message_id = six.text_type(uuid.uuid4()) subscriptions, _ = self.sns_backend.list_subscriptions(self.arn) for subscription in subscriptions: - subscription.publish(message, message_id, subject=subject) + subscription.publish(message, message_id, subject=subject, + message_attributes=message_attributes) return message_id def get_cfn_attribute(self, attribute_name): @@ -81,9 +82,14 @@ class Subscription(BaseModel): self.protocol = protocol self.arn = make_arn_for_subscription(self.topic.arn) self.attributes = {} + self._filter_policy = None # filter policy as a dict, not json. self.confirmed = False - def publish(self, message, message_id, subject=None): + def publish(self, message, message_id, subject=None, + message_attributes=None): + if not self._matches_filter_policy(message_attributes): + return + if self.protocol == 'sqs': queue_name = self.endpoint.split(":")[-1] region = self.endpoint.split(":")[3] @@ -98,6 +104,28 @@ class Subscription(BaseModel): region = self.arn.split(':')[3] lambda_backends[region].send_message(function_name, message, subject=subject) + def _matches_filter_policy(self, message_attributes): + # TODO: support Anything-but matching, prefix matching and + # numeric value matching. + if not self._filter_policy: + return True + + if message_attributes is None: + message_attributes = {} + + def _field_match(field, rules, message_attributes): + if field not in message_attributes: + return False + for rule in rules: + if isinstance(rule, six.string_types): + # only string value matching is supported + if message_attributes[field] == rule: + return True + return False + + return all(_field_match(field, rules, message_attributes) + for field, rules in six.iteritems(self._filter_policy)) + def get_post_data(self, message, message_id, subject): return { "Type": "Notification", @@ -274,13 +302,14 @@ class SNSBackend(BaseBackend): else: return self._get_values_nexttoken(self.subscriptions, next_token) - def publish(self, arn, message, subject=None): + def publish(self, arn, message, subject=None, message_attributes=None): if subject is not None and len(subject) >= 100: raise ValueError('Subject must be less than 100 characters') try: topic = self.get_topic(arn) - message_id = topic.publish(message, subject=subject) + message_id = topic.publish(message, subject=subject, + message_attributes=message_attributes) except SNSNotFoundError: endpoint = self.get_endpoint(arn) message_id = endpoint.publish(message) @@ -352,7 +381,7 @@ class SNSBackend(BaseBackend): return subscription.attributes def set_subscription_attributes(self, arn, name, value): - if name not in ['RawMessageDelivery', 'DeliveryPolicy']: + if name not in ['RawMessageDelivery', 'DeliveryPolicy', 'FilterPolicy']: raise SNSInvalidParameter('AttributeName') # TODO: should do validation @@ -363,6 +392,9 @@ class SNSBackend(BaseBackend): subscription.attributes[name] = value + if name == 'FilterPolicy': + subscription._filter_policy = json.loads(value) + sns_backends = {} for region in boto.sns.regions(): diff --git a/moto/sns/responses.py b/moto/sns/responses.py index 3b4aade80..7f23214cf 100644 --- a/moto/sns/responses.py +++ b/moto/sns/responses.py @@ -241,6 +241,10 @@ class SNSResponse(BaseResponse): phone_number = self._get_param('PhoneNumber') subject = self._get_param('Subject') + message_attributes = self._get_map_prefix('MessageAttributes.entry', + key_end='Name', + value_end='Value') + if phone_number is not None: # Check phone is correct syntax (e164) if not is_e164(phone_number): @@ -265,7 +269,9 @@ class SNSResponse(BaseResponse): message = self._get_param('Message') try: - message_id = self.backend.publish(arn, message, subject=subject) + message_id = self.backend.publish( + arn, message, subject=subject, + message_attributes=message_attributes) except ValueError as err: error_response = self._error('InvalidParameter', str(err)) return error_response, dict(status=400) diff --git a/moto/sqs/responses.py b/moto/sqs/responses.py index 71aab9a58..c475f0ce0 100644 --- a/moto/sqs/responses.py +++ b/moto/sqs/responses.py @@ -30,7 +30,7 @@ class SQSResponse(BaseResponse): @property def attribute(self): if not hasattr(self, '_attribute'): - self._attribute = self._get_map_prefix('Attribute', key_end='Name', value_end='Value') + self._attribute = self._get_map_prefix('Attribute', key_end='.Name', value_end='.Value') return self._attribute def _get_queue_name(self): diff --git a/tests/test_sns/test_publishing_boto3.py b/tests/test_sns/test_publishing_boto3.py index 1540ceb84..3ccc3ef44 100644 --- a/tests/test_sns/test_publishing_boto3.py +++ b/tests/test_sns/test_publishing_boto3.py @@ -207,3 +207,136 @@ def test_publish_subject(): err.response['Error']['Code'].should.equal('InvalidParameter') else: raise RuntimeError('Should have raised an InvalidParameter exception') + + +def _setup_filter_policy_test(filter_policy): + sns = boto3.resource('sns', region_name='us-east-1') + topic = sns.create_topic(Name='some-topic') + + sqs = boto3.resource('sqs', region_name='us-east-1') + queue = sqs.create_queue(QueueName='test-queue') + + subscription = topic.subscribe( + Protocol='sqs', Endpoint=queue.attributes['QueueArn']) + + subscription.set_attributes( + AttributeName='FilterPolicy', AttributeValue=json.dumps(filter_policy)) + + return topic, subscription, queue + + +@mock_sqs +@mock_sns +def test_filtering_exact_string(): + topic, subscription, queue = _setup_filter_policy_test( + {'store': ['example_corp']}) + + topic.publish( + Message='match', + MessageAttributes={'store': {'DataType': 'String', + 'StringValue': 'example_corp'}}) + + messages = queue.receive_messages(MaxNumberOfMessages=5) + message_bodies = [json.loads(m.body)['Message'] for m in messages] + message_bodies.should.equal(['match']) + +@mock_sqs +@mock_sns +def test_filtering_exact_string_multiple_message_attributes(): + topic, subscription, queue = _setup_filter_policy_test( + {'store': ['example_corp']}) + + topic.publish( + Message='match', + MessageAttributes={'store': {'DataType': 'String', + 'StringValue': 'example_corp'}, + 'event': {'DataType': 'String', + 'StringValue': 'order_cancelled'}}) + + messages = queue.receive_messages(MaxNumberOfMessages=5) + message_bodies = [json.loads(m.body)['Message'] for m in messages] + message_bodies.should.equal(['match']) + +@mock_sqs +@mock_sns +def test_filtering_exact_string_OR_matching(): + topic, subscription, queue = _setup_filter_policy_test( + {'store': ['example_corp', 'different_corp']}) + + topic.publish( + Message='match example_corp', + MessageAttributes={'store': {'DataType': 'String', + 'StringValue': 'example_corp'}}) + topic.publish( + Message='match different_corp', + MessageAttributes={'store': {'DataType': 'String', + 'StringValue': 'different_corp'}}) + messages = queue.receive_messages(MaxNumberOfMessages=5) + message_bodies = [json.loads(m.body)['Message'] for m in messages] + message_bodies.should.equal( + ['match example_corp', 'match different_corp']) + +@mock_sqs +@mock_sns +def test_filtering_exact_string_AND_matching_positive(): + topic, subscription, queue = _setup_filter_policy_test( + {'store': ['example_corp'], + 'event': ['order_cancelled']}) + + topic.publish( + Message='match example_corp order_cancelled', + MessageAttributes={'store': {'DataType': 'String', + 'StringValue': 'example_corp'}, + 'event': {'DataType': 'String', + 'StringValue': 'order_cancelled'}}) + + messages = queue.receive_messages(MaxNumberOfMessages=5) + message_bodies = [json.loads(m.body)['Message'] for m in messages] + message_bodies.should.equal( + ['match example_corp order_cancelled']) + +@mock_sqs +@mock_sns +def test_filtering_exact_string_AND_matching_no_match(): + topic, subscription, queue = _setup_filter_policy_test( + {'store': ['example_corp'], + 'event': ['order_cancelled']}) + + topic.publish( + Message='match example_corp order_accepted', + MessageAttributes={'store': {'DataType': 'String', + 'StringValue': 'example_corp'}, + 'event': {'DataType': 'String', + 'StringValue': 'order_accepted'}}) + + messages = queue.receive_messages(MaxNumberOfMessages=5) + message_bodies = [json.loads(m.body)['Message'] for m in messages] + message_bodies.should.equal([]) + + +@mock_sqs +@mock_sns +def test_filtering_exact_string_no_match(): + topic, subscription, queue = _setup_filter_policy_test( + {'store': ['example_corp']}) + + topic.publish( + Message='no match', + MessageAttributes={'store': {'DataType': 'String', + 'StringValue': 'different_corp'}}) + + messages = queue.receive_messages(MaxNumberOfMessages=5) + message_bodies = [json.loads(m.body)['Message'] for m in messages] + message_bodies.should.equal([]) + +@mock_sqs +@mock_sns +def test_filtering_exact_string_no_attributes_no_match(): + topic, subscription, queue = _setup_filter_policy_test( + {'store': ['example_corp']}) + + topic.publish(Message='no match') + + messages = queue.receive_messages(MaxNumberOfMessages=5) + message_bodies = [json.loads(m.body)['Message'] for m in messages] + message_bodies.should.equal([]) diff --git a/tests/test_sns/test_subscriptions_boto3.py b/tests/test_sns/test_subscriptions_boto3.py index 59cef221f..98075e617 100644 --- a/tests/test_sns/test_subscriptions_boto3.py +++ b/tests/test_sns/test_subscriptions_boto3.py @@ -223,11 +223,26 @@ def test_set_subscription_attributes(): AttributeName='DeliveryPolicy', AttributeValue=delivery_policy ) + + filter_policy = json.dumps({ + "store": ["example_corp"], + "event": ["order_cancelled"], + "encrypted": [False], + "customer_interests": ["basketball", "baseball"] + }) + conn.set_subscription_attributes( + SubscriptionArn=subscription_arn, + AttributeName='FilterPolicy', + AttributeValue=filter_policy + ) + attrs = conn.get_subscription_attributes( SubscriptionArn=subscription_arn ) + attrs['Attributes']['RawMessageDelivery'].should.equal('true') attrs['Attributes']['DeliveryPolicy'].should.equal(delivery_policy) + attrs['Attributes']['FilterPolicy'].should.equal(filter_policy) # not existing subscription with assert_raises(ClientError): From 02ffce1a1571113a7c3344f490b68e43005616ea Mon Sep 17 00:00:00 2001 From: Iain Bullard Date: Wed, 21 Mar 2018 15:50:14 +0000 Subject: [PATCH 101/182] Tighten upper bound on python-dateutil to match botocore (#1519) https://github.com/boto/botocore/commit/90d7692702be1a423af15e0f49b58365f2a400f2 --- setup.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/setup.py b/setup.py index 57140401b..4a2489676 100755 --- a/setup.py +++ b/setup.py @@ -18,7 +18,7 @@ install_requires = [ "werkzeug", "pyaml", "pytz", - "python-dateutil<3.0.0,>=2.1", + "python-dateutil<2.7.0,>=2.1", "mock", "docker>=2.5.1", "jsondiff==1.1.1", From 39e9379195816cf993621306ff37aeca7dc46b8a Mon Sep 17 00:00:00 2001 From: Toshiya Kawasaki Date: Thu, 22 Mar 2018 00:55:03 +0900 Subject: [PATCH 102/182] Fix cloudwatch logs' response error (#1426) --- moto/logs/responses.py | 3 +-- tests/test_logs/test_logs.py | 27 +++++++++++++++++++++++++++ 2 files changed, 28 insertions(+), 2 deletions(-) diff --git a/moto/logs/responses.py b/moto/logs/responses.py index e0a17f5f8..7bf481908 100644 --- a/moto/logs/responses.py +++ b/moto/logs/responses.py @@ -87,9 +87,8 @@ class LogsResponse(BaseResponse): events, next_backward_token, next_foward_token = \ self.logs_backend.get_log_events(log_group_name, log_stream_name, start_time, end_time, limit, next_token, start_from_head) - return json.dumps({ - "events": [ob.__dict__ for ob in events], + "events": events, "nextBackwardToken": next_backward_token, "nextForwardToken": next_foward_token }) diff --git a/tests/test_logs/test_logs.py b/tests/test_logs/test_logs.py index 1b2f5f75e..0139723c9 100644 --- a/tests/test_logs/test_logs.py +++ b/tests/test_logs/test_logs.py @@ -59,3 +59,30 @@ def test_exceptions(): }, ], ) + + +@mock_logs +def test_put_logs(): + conn = boto3.client('logs', 'us-west-2') + log_group_name = 'dummy' + log_stream_name = 'stream' + conn.create_log_group(logGroupName=log_group_name) + conn.create_log_stream( + logGroupName=log_group_name, + logStreamName=log_stream_name + ) + messages = [ + {'timestamp': 0, 'message': 'hello'}, + {'timestamp': 0, 'message': 'world'} + ] + conn.put_log_events( + logGroupName=log_group_name, + logStreamName=log_stream_name, + logEvents=messages + ) + res = conn.get_log_events( + logGroupName=log_group_name, + logStreamName=log_stream_name + ) + events = res['events'] + events.should.have.length_of(2) \ No newline at end of file From 5d51329c34abf4312de89f74faa6a4e9dc224e7e Mon Sep 17 00:00:00 2001 From: Graham Lyons Date: Wed, 21 Mar 2018 15:55:58 +0000 Subject: [PATCH 103/182] Don't create volumes for AMIs (#1456) * Delete the volume used during AMI creation Creating an AMI doesn't actually result in the creation of an EBS volume, although the associated snapshot does reference one. To that end, delete the volume once we've used it. * Add `owner_id` to `Snapshot`, verify AMI snapshots The default AMIs which are created by moto have EBS volume mappings but the snapshots associated with those don't have the correct owners set. This adds the owner to the snapshot model and passes it through from the JSON data. --- moto/ec2/models.py | 14 ++++-- moto/ec2/responses/elastic_block_store.py | 4 +- tests/test_ec2/test_amis.py | 56 +++++++++++++++++------ 3 files changed, 54 insertions(+), 20 deletions(-) diff --git a/moto/ec2/models.py b/moto/ec2/models.py index b1989d672..51ef2a124 100755 --- a/moto/ec2/models.py +++ b/moto/ec2/models.py @@ -1088,7 +1088,8 @@ class Ami(TaggedEC2Resource): # AWS auto-creates these, we should reflect the same. volume = self.ec2_backend.create_volume(15, region_name) self.ebs_snapshot = self.ec2_backend.create_snapshot( - volume.id, "Auto-created snapshot for AMI %s" % self.id) + volume.id, "Auto-created snapshot for AMI %s" % self.id, owner_id) + self.ec2_backend.delete_volume(volume.id) @property def is_public(self): @@ -1840,7 +1841,7 @@ class Volume(TaggedEC2Resource): class Snapshot(TaggedEC2Resource): - def __init__(self, ec2_backend, snapshot_id, volume, description, encrypted=False): + def __init__(self, ec2_backend, snapshot_id, volume, description, encrypted=False, owner_id='123456789012'): self.id = snapshot_id self.volume = volume self.description = description @@ -1849,6 +1850,7 @@ class Snapshot(TaggedEC2Resource): self.ec2_backend = ec2_backend self.status = 'completed' self.encrypted = encrypted + self.owner_id = owner_id def get_filter_value(self, filter_name): if filter_name == 'description': @@ -1940,11 +1942,13 @@ class EBSBackend(object): volume.attachment = None return old_attachment - def create_snapshot(self, volume_id, description): + def create_snapshot(self, volume_id, description, owner_id=None): snapshot_id = random_snapshot_id() volume = self.get_volume(volume_id) - snapshot = Snapshot(self, snapshot_id, volume, - description, volume.encrypted) + params = [self, snapshot_id, volume, description, volume.encrypted] + if owner_id: + params.append(owner_id) + snapshot = Snapshot(*params) self.snapshots[snapshot_id] = snapshot return snapshot diff --git a/moto/ec2/responses/elastic_block_store.py b/moto/ec2/responses/elastic_block_store.py index 31831c18b..2d43f8ffb 100644 --- a/moto/ec2/responses/elastic_block_store.py +++ b/moto/ec2/responses/elastic_block_store.py @@ -229,7 +229,7 @@ CREATE_SNAPSHOT_RESPONSE = """ Date: Wed, 21 Mar 2018 16:56:57 +0100 Subject: [PATCH 104/182] Make putparameter behave more like the real endpoint does, respond with Version or ParameterAlreadyExists (#1464) --- moto/ssm/models.py | 1 + moto/ssm/responses.py | 13 +++++++++++-- tests/test_ssm/test_ssm_boto3.py | 23 ++++++++++++++++------- 3 files changed, 28 insertions(+), 9 deletions(-) diff --git a/moto/ssm/models.py b/moto/ssm/models.py index 0f75599c3..af450e39e 100644 --- a/moto/ssm/models.py +++ b/moto/ssm/models.py @@ -126,6 +126,7 @@ class SimpleSystemManagerBackend(BaseBackend): last_modified_date = time.time() self._parameters[name] = Parameter( name, value, type, description, keyid, last_modified_date, version) + return version def add_tags_to_resource(self, resource_type, resource_id, tags): for key, value in tags.items(): diff --git a/moto/ssm/responses.py b/moto/ssm/responses.py index 757bf0317..d9906a82e 100644 --- a/moto/ssm/responses.py +++ b/moto/ssm/responses.py @@ -162,9 +162,18 @@ class SimpleSystemManagerResponse(BaseResponse): keyid = self._get_param('KeyId') overwrite = self._get_param('Overwrite', False) - self.ssm_backend.put_parameter( + result = self.ssm_backend.put_parameter( name, description, value, type_, keyid, overwrite) - return json.dumps({}) + + if result is None: + error = { + '__type': 'ParameterAlreadyExists', + 'message': 'Parameter {0} already exists.'.format(name) + } + return json.dumps(error), dict(status=400) + + response = {'Version': result} + return json.dumps(response) def add_tags_to_resource(self): resource_id = self._get_param('ResourceId') diff --git a/tests/test_ssm/test_ssm_boto3.py b/tests/test_ssm/test_ssm_boto3.py index 0e8a770b3..97801e0b9 100644 --- a/tests/test_ssm/test_ssm_boto3.py +++ b/tests/test_ssm/test_ssm_boto3.py @@ -97,12 +97,14 @@ def test_get_parameters_by_path(): def test_put_parameter(): client = boto3.client('ssm', region_name='us-east-1') - client.put_parameter( + response = client.put_parameter( Name='test', Description='A test parameter', Value='value', Type='String') + response['Version'].should.equal(1) + response = client.get_parameters( Names=[ 'test' @@ -115,11 +117,16 @@ def test_put_parameter(): response['Parameters'][0]['Type'].should.equal('String') response['Parameters'][0]['Version'].should.equal(1) - client.put_parameter( - Name='test', - Description='desc 2', - Value='value 2', - Type='String') + try: + client.put_parameter( + Name='test', + Description='desc 2', + Value='value 2', + Type='String') + raise RuntimeError('Should fail') + except botocore.exceptions.ClientError as err: + err.operation_name.should.equal('PutParameter') + err.response['Error']['Message'].should.equal('Parameter test already exists.') response = client.get_parameters( Names=[ @@ -134,13 +141,15 @@ def test_put_parameter(): response['Parameters'][0]['Type'].should.equal('String') response['Parameters'][0]['Version'].should.equal(1) - client.put_parameter( + response = client.put_parameter( Name='test', Description='desc 3', Value='value 3', Type='String', Overwrite=True) + response['Version'].should.equal(2) + response = client.get_parameters( Names=[ 'test' From 1b20f21a75430c765348b150c0f84c08052c2894 Mon Sep 17 00:00:00 2001 From: Ash Berlin-Taylor Date: Wed, 21 Mar 2018 15:57:50 +0000 Subject: [PATCH 105/182] Escape EMR template fields to avoid invalid XML responses (#1467) I had an EMR step that contained a `&` and this caused the ListStep call to fail. I've added the `| escape` filter to handle it in this case and a few other cases that look like they could suffer the same fate. --- moto/emr/responses.py | 24 ++++++++++++------------ tests/test_emr/test_emr.py | 4 ++-- 2 files changed, 14 insertions(+), 14 deletions(-) diff --git a/moto/emr/responses.py b/moto/emr/responses.py index 8442e4010..49e37ab9a 100644 --- a/moto/emr/responses.py +++ b/moto/emr/responses.py @@ -462,10 +462,10 @@ DESCRIBE_JOB_FLOWS_TEMPLATE = """=2.8", + "Jinja2>=2.7.3", "boto>=2.36.0", "boto3>=1.2.1", "botocore>=1.7.12", From b7ae704ad20597601e6b6280e1f6d2cf925d2e53 Mon Sep 17 00:00:00 2001 From: Loukas Leontopoulos Date: Wed, 21 Mar 2018 18:05:57 +0200 Subject: [PATCH 110/182] Add opsworks app mocks (#1481) * Add implementation for OpsWorks create_app and describe_apps * Fix the name of the test * Add some more exception tests --- moto/opsworks/models.py | 101 ++++++++++++++++++++++++++++++ moto/opsworks/responses.py | 24 ++++++++ tests/test_opsworks/test_apps.py | 102 +++++++++++++++++++++++++++++++ 3 files changed, 227 insertions(+) create mode 100644 tests/test_opsworks/test_apps.py diff --git a/moto/opsworks/models.py b/moto/opsworks/models.py index fe8c882a7..4fe428c65 100644 --- a/moto/opsworks/models.py +++ b/moto/opsworks/models.py @@ -398,11 +398,82 @@ class Stack(BaseModel): return response +class App(BaseModel): + + def __init__(self, stack_id, name, type, + shortname=None, + description=None, + datasources=None, + app_source=None, + domains=None, + enable_ssl=False, + ssl_configuration=None, + attributes=None, + environment=None): + self.stack_id = stack_id + self.name = name + self.type = type + self.shortname = shortname + self.description = description + + self.datasources = datasources + if datasources is None: + self.datasources = [] + + self.app_source = app_source + if app_source is None: + self.app_source = {} + + self.domains = domains + if domains is None: + self.domains = [] + + self.enable_ssl = enable_ssl + + self.ssl_configuration = ssl_configuration + if ssl_configuration is None: + self.ssl_configuration = {} + + self.attributes = attributes + if attributes is None: + self.attributes = {} + + self.environment = environment + if environment is None: + self.environment = {} + + self.id = "{0}".format(uuid.uuid4()) + self.created_at = datetime.datetime.utcnow() + + def __eq__(self, other): + return self.id == other.id + + def to_dict(self): + d = { + "AppId": self.id, + "AppSource": self.app_source, + "Attributes": self.attributes, + "CreatedAt": self.created_at.isoformat(), + "Datasources": self.datasources, + "Description": self.description, + "Domains": self.domains, + "EnableSsl": self.enable_ssl, + "Environment": self.environment, + "Name": self.name, + "Shortname": self.shortname, + "SslConfiguration": self.ssl_configuration, + "StackId": self.stack_id, + "Type": self.type + } + return d + + class OpsWorksBackend(BaseBackend): def __init__(self, ec2_backend): self.stacks = {} self.layers = {} + self.apps = {} self.instances = {} self.ec2_backend = ec2_backend @@ -435,6 +506,20 @@ class OpsWorksBackend(BaseBackend): self.stacks[stackid].layers.append(layer) return layer + def create_app(self, **kwargs): + name = kwargs['name'] + stackid = kwargs['stack_id'] + if stackid not in self.stacks: + raise ResourceNotFoundException(stackid) + if name in [a.name for a in self.stacks[stackid].apps]: + raise ValidationException( + 'There is already an app named "{0}" ' + 'for this stack'.format(name)) + app = App(**kwargs) + self.apps[app.id] = app + self.stacks[stackid].apps.append(app) + return app + def create_instance(self, **kwargs): stack_id = kwargs['stack_id'] layer_ids = kwargs['layer_ids'] @@ -502,6 +587,22 @@ class OpsWorksBackend(BaseBackend): raise ResourceNotFoundException(", ".join(unknown_layers)) return [self.layers[id].to_dict() for id in layer_ids] + def describe_apps(self, stack_id, app_ids): + if stack_id is not None and app_ids is not None: + raise ValidationException( + "Please provide one or more app IDs or a stack ID" + ) + if stack_id is not None: + if stack_id not in self.stacks: + raise ResourceNotFoundException( + "Unable to find stack with ID {0}".format(stack_id)) + return [app.to_dict() for app in self.stacks[stack_id].apps] + + unknown_apps = set(app_ids) - set(self.apps.keys()) + if unknown_apps: + raise ResourceNotFoundException(", ".join(unknown_apps)) + return [self.apps[id].to_dict() for id in app_ids] + def describe_instances(self, instance_ids, layer_id, stack_id): if len(list(filter(None, (instance_ids, layer_id, stack_id)))) != 1: raise ValidationException("Please provide either one or more " diff --git a/moto/opsworks/responses.py b/moto/opsworks/responses.py index 42e0f2c5c..c9f8fe125 100644 --- a/moto/opsworks/responses.py +++ b/moto/opsworks/responses.py @@ -75,6 +75,24 @@ class OpsWorksResponse(BaseResponse): layer = self.opsworks_backend.create_layer(**kwargs) return json.dumps({"LayerId": layer.id}, indent=1) + def create_app(self): + kwargs = dict( + stack_id=self.parameters.get('StackId'), + name=self.parameters.get('Name'), + type=self.parameters.get('Type'), + shortname=self.parameters.get('Shortname'), + description=self.parameters.get('Description'), + datasources=self.parameters.get('DataSources'), + app_source=self.parameters.get('AppSource'), + domains=self.parameters.get('Domains'), + enable_ssl=self.parameters.get('EnableSsl'), + ssl_configuration=self.parameters.get('SslConfiguration'), + attributes=self.parameters.get('Attributes'), + environment=self.parameters.get('Environment') + ) + app = self.opsworks_backend.create_app(**kwargs) + return json.dumps({"AppId": app.id}, indent=1) + def create_instance(self): kwargs = dict( stack_id=self.parameters.get("StackId"), @@ -110,6 +128,12 @@ class OpsWorksResponse(BaseResponse): layers = self.opsworks_backend.describe_layers(stack_id, layer_ids) return json.dumps({"Layers": layers}, indent=1) + def describe_apps(self): + stack_id = self.parameters.get("StackId") + app_ids = self.parameters.get("AppIds") + apps = self.opsworks_backend.describe_apps(stack_id, app_ids) + return json.dumps({"Apps": apps}, indent=1) + def describe_instances(self): instance_ids = self.parameters.get("InstanceIds") layer_id = self.parameters.get("LayerId") diff --git a/tests/test_opsworks/test_apps.py b/tests/test_opsworks/test_apps.py new file mode 100644 index 000000000..37d0f2fe4 --- /dev/null +++ b/tests/test_opsworks/test_apps.py @@ -0,0 +1,102 @@ +from __future__ import unicode_literals +import boto3 +from freezegun import freeze_time +import sure # noqa +import re + +from moto import mock_opsworks + + +@freeze_time("2015-01-01") +@mock_opsworks +def test_create_app_response(): + client = boto3.client('opsworks', region_name='us-east-1') + stack_id = client.create_stack( + Name="test_stack_1", + Region="us-east-1", + ServiceRoleArn="service_arn", + DefaultInstanceProfileArn="profile_arn" + )['StackId'] + + response = client.create_app( + StackId=stack_id, + Type="other", + Name="TestApp" + ) + + response.should.contain("AppId") + + second_stack_id = client.create_stack( + Name="test_stack_2", + Region="us-east-1", + ServiceRoleArn="service_arn", + DefaultInstanceProfileArn="profile_arn" + )['StackId'] + + response = client.create_app( + StackId=second_stack_id, + Type="other", + Name="TestApp" + ) + + response.should.contain("AppId") + + # ClientError + client.create_app.when.called_with( + StackId=stack_id, + Type="other", + Name="TestApp" + ).should.throw( + Exception, re.compile(r'already an app named "TestApp"') + ) + + # ClientError + client.create_app.when.called_with( + StackId="nothere", + Type="other", + Name="TestApp" + ).should.throw( + Exception, "nothere" + ) + +@freeze_time("2015-01-01") +@mock_opsworks +def test_describe_apps(): + client = boto3.client('opsworks', region_name='us-east-1') + stack_id = client.create_stack( + Name="test_stack_1", + Region="us-east-1", + ServiceRoleArn="service_arn", + DefaultInstanceProfileArn="profile_arn" + )['StackId'] + app_id = client.create_app( + StackId=stack_id, + Type="other", + Name="TestApp" + )['AppId'] + + rv1 = client.describe_apps(StackId=stack_id) + rv2 = client.describe_apps(AppIds=[app_id]) + rv1['Apps'].should.equal(rv2['Apps']) + + rv1['Apps'][0]['Name'].should.equal("TestApp") + + # ClientError + client.describe_apps.when.called_with( + StackId=stack_id, + AppIds=[app_id] + ).should.throw( + Exception, "Please provide one or more app IDs or a stack ID" + ) + # ClientError + client.describe_apps.when.called_with( + StackId="nothere" + ).should.throw( + Exception, "Unable to find stack with ID nothere" + ) + # ClientError + client.describe_apps.when.called_with( + AppIds=["nothere"] + ).should.throw( + Exception, "nothere" + ) From 4b3469292a27ff3e6d6ff99fc5ef92d17b928250 Mon Sep 17 00:00:00 2001 From: Rob Walker Date: Thu, 22 Mar 2018 02:10:38 +1000 Subject: [PATCH 111/182] Enable Extended CIDR Associations on VPC (#1511) * Enable Extended CIDR Associations on VPC * Ooops missed the utils, try to be more flakey?, remove unnecessary part in tests * try to be even more flakey --- moto/ec2/exceptions.py | 28 ++++++ moto/ec2/models.py | 130 +++++++++++++++++++------- moto/ec2/responses/vpcs.py | 147 +++++++++++++++++++++++++++-- moto/ec2/utils.py | 24 +++-- tests/test_ec2/test_vpcs.py | 181 +++++++++++++++++++++++++++++++++++- 5 files changed, 455 insertions(+), 55 deletions(-) diff --git a/moto/ec2/exceptions.py b/moto/ec2/exceptions.py index 5cff527be..f747c9cd5 100644 --- a/moto/ec2/exceptions.py +++ b/moto/ec2/exceptions.py @@ -280,6 +280,15 @@ class InvalidAssociationIdError(EC2ClientError): .format(association_id)) +class InvalidVpcCidrBlockAssociationIdError(EC2ClientError): + + def __init__(self, association_id): + super(InvalidVpcCidrBlockAssociationIdError, self).__init__( + "InvalidVpcCidrBlockAssociationIdError.NotFound", + "The vpc CIDR block association ID '{0}' does not exist" + .format(association_id)) + + class InvalidVPCPeeringConnectionIdError(EC2ClientError): def __init__(self, vpc_peering_connection_id): @@ -392,3 +401,22 @@ class FilterNotImplementedError(MotoNotImplementedError): super(FilterNotImplementedError, self).__init__( "The filter '{0}' for {1}".format( filter_name, method_name)) + + +class CidrLimitExceeded(EC2ClientError): + + def __init__(self, vpc_id, max_cidr_limit): + super(CidrLimitExceeded, self).__init__( + "CidrLimitExceeded", + "This network '{0}' has met its maximum number of allowed CIDRs: {1}".format(vpc_id, max_cidr_limit) + ) + + +class OperationNotPermitted(EC2ClientError): + + def __init__(self, association_id): + super(OperationNotPermitted, self).__init__( + "OperationNotPermitted", + "The vpc CIDR block with association ID {} may not be disassociated. " + "It is the primary IPv4 CIDR block of the VPC".format(association_id) + ) diff --git a/moto/ec2/models.py b/moto/ec2/models.py index c885dac9e..c94752ef6 100755 --- a/moto/ec2/models.py +++ b/moto/ec2/models.py @@ -24,51 +24,54 @@ from moto.core import BaseBackend from moto.core.models import Model, BaseModel from moto.core.utils import iso_8601_datetime_with_milliseconds, camelcase_to_underscores from .exceptions import ( - EC2ClientError, + CidrLimitExceeded, DependencyViolationError, - MissingParameterError, + EC2ClientError, + FilterNotImplementedError, + GatewayNotAttachedError, + InvalidAddressError, + InvalidAllocationIdError, + InvalidAMIIdError, + InvalidAMIAttributeItemValueError, + InvalidAssociationIdError, + InvalidCIDRSubnetError, + InvalidCustomerGatewayIdError, + InvalidDHCPOptionsIdError, + InvalidDomainError, + InvalidID, + InvalidInstanceIdError, + InvalidInternetGatewayIdError, + InvalidKeyPairDuplicateError, + InvalidKeyPairNameError, + InvalidNetworkAclIdError, + InvalidNetworkAttachmentIdError, + InvalidNetworkInterfaceIdError, InvalidParameterValueError, InvalidParameterValueErrorTagNull, - InvalidDHCPOptionsIdError, - MalformedDHCPOptionsIdError, - InvalidKeyPairNameError, - InvalidKeyPairDuplicateError, - InvalidInternetGatewayIdError, - GatewayNotAttachedError, - ResourceAlreadyAssociatedError, - InvalidVPCIdError, - InvalidSubnetIdError, - InvalidNetworkInterfaceIdError, - InvalidNetworkAttachmentIdError, - InvalidSecurityGroupDuplicateError, - InvalidSecurityGroupNotFoundError, InvalidPermissionNotFoundError, InvalidPermissionDuplicateError, InvalidRouteTableIdError, InvalidRouteError, - InvalidInstanceIdError, - InvalidAMIIdError, - InvalidAMIAttributeItemValueError, + InvalidSecurityGroupDuplicateError, + InvalidSecurityGroupNotFoundError, InvalidSnapshotIdError, + InvalidSubnetIdError, InvalidVolumeIdError, InvalidVolumeAttachmentError, - InvalidDomainError, - InvalidAddressError, - InvalidAllocationIdError, - InvalidAssociationIdError, + InvalidVpcCidrBlockAssociationIdError, InvalidVPCPeeringConnectionIdError, InvalidVPCPeeringConnectionStateTransitionError, - TagLimitExceeded, - InvalidID, - InvalidCIDRSubnetError, - InvalidNetworkAclIdError, + InvalidVPCIdError, InvalidVpnGatewayIdError, InvalidVpnConnectionIdError, - InvalidCustomerGatewayIdError, - RulesPerSecurityGroupLimitExceededError, + MalformedAMIIdError, + MalformedDHCPOptionsIdError, + MissingParameterError, MotoNotImplementedError, - FilterNotImplementedError, - MalformedAMIIdError) + OperationNotPermitted, + ResourceAlreadyAssociatedError, + RulesPerSecurityGroupLimitExceededError, + TagLimitExceeded) from .utils import ( EC2_RESOURCE_TO_PREFIX, EC2_PREFIX_TO_RESOURCE, @@ -81,6 +84,7 @@ from .utils import ( random_instance_id, random_internet_gateway_id, random_ip, + random_ipv6_cidr, random_nat_gateway_id, random_key_pair, random_private_ip, @@ -97,6 +101,7 @@ from .utils import ( random_subnet_association_id, random_volume_id, random_vpc_id, + random_vpc_cidr_association_id, random_vpc_peering_connection_id, generic_filter, is_valid_resource_id, @@ -2005,10 +2010,13 @@ class EBSBackend(object): class VPC(TaggedEC2Resource): - def __init__(self, ec2_backend, vpc_id, cidr_block, is_default, instance_tenancy='default'): + def __init__(self, ec2_backend, vpc_id, cidr_block, is_default, instance_tenancy='default', + amazon_provided_ipv6_cidr_block=False): + self.ec2_backend = ec2_backend self.id = vpc_id self.cidr_block = cidr_block + self.cidr_block_association_set = {} self.dhcp_options = None self.state = 'available' self.instance_tenancy = instance_tenancy @@ -2018,6 +2026,10 @@ class VPC(TaggedEC2Resource): # or VPCs created using the wizard of the VPC console self.enable_dns_hostnames = 'true' if is_default else 'false' + self.associate_vpc_cidr_block(cidr_block) + if amazon_provided_ipv6_cidr_block: + self.associate_vpc_cidr_block(cidr_block, amazon_provided_ipv6_cidr_block=amazon_provided_ipv6_cidr_block) + @classmethod def create_from_cloudformation_json(cls, resource_name, cloudformation_json, region_name): properties = cloudformation_json['Properties'] @@ -2043,6 +2055,12 @@ class VPC(TaggedEC2Resource): return self.id elif filter_name in ('cidr', 'cidr-block', 'cidrBlock'): return self.cidr_block + elif filter_name in ('cidr-block-association.cidr-block', 'ipv6-cidr-block-association.ipv6-cidr-block'): + return [c['cidr_block'] for c in self.get_cidr_block_association_set(ipv6='ipv6' in filter_name)] + elif filter_name in ('cidr-block-association.association-id', 'ipv6-cidr-block-association.association-id'): + return self.cidr_block_association_set.keys() + elif filter_name in ('cidr-block-association.state', 'ipv6-cidr-block-association.state'): + return [c['cidr_block_state']['state'] for c in self.get_cidr_block_association_set(ipv6='ipv6' in filter_name)] elif filter_name in ('instance_tenancy', 'InstanceTenancy'): return self.instance_tenancy elif filter_name in ('is-default', 'isDefault'): @@ -2054,8 +2072,37 @@ class VPC(TaggedEC2Resource): return None return self.dhcp_options.id else: - return super(VPC, self).get_filter_value( - filter_name, 'DescribeVpcs') + return super(VPC, self).get_filter_value(filter_name, 'DescribeVpcs') + + def associate_vpc_cidr_block(self, cidr_block, amazon_provided_ipv6_cidr_block=False): + max_associations = 5 if not amazon_provided_ipv6_cidr_block else 1 + + if len(self.get_cidr_block_association_set(amazon_provided_ipv6_cidr_block)) >= max_associations: + raise CidrLimitExceeded(self.id, max_associations) + + association_id = random_vpc_cidr_association_id() + + association_set = { + 'association_id': association_id, + 'cidr_block_state': {'state': 'associated', 'StatusMessage': ''} + } + + association_set['cidr_block'] = random_ipv6_cidr() if amazon_provided_ipv6_cidr_block else cidr_block + self.cidr_block_association_set[association_id] = association_set + return association_set + + def disassociate_vpc_cidr_block(self, association_id): + if self.cidr_block == self.cidr_block_association_set.get(association_id, {}).get('cidr_block'): + raise OperationNotPermitted(association_id) + + response = self.cidr_block_association_set.pop(association_id, {}) + if response: + response['vpc_id'] = self.id + response['cidr_block_state']['state'] = 'disassociating' + return response + + def get_cidr_block_association_set(self, ipv6=False): + return [c for c in self.cidr_block_association_set.values() if ('::/' if ipv6 else '.') in c.get('cidr_block')] class VPCBackend(object): @@ -2063,10 +2110,9 @@ class VPCBackend(object): self.vpcs = {} super(VPCBackend, self).__init__() - def create_vpc(self, cidr_block, instance_tenancy='default'): + def create_vpc(self, cidr_block, instance_tenancy='default', amazon_provided_ipv6_cidr_block=False): vpc_id = random_vpc_id() - vpc = VPC(self, vpc_id, cidr_block, len( - self.vpcs) == 0, instance_tenancy) + vpc = VPC(self, vpc_id, cidr_block, len(self.vpcs) == 0, instance_tenancy, amazon_provided_ipv6_cidr_block) self.vpcs[vpc_id] = vpc # AWS creates a default main route table and security group. @@ -2139,6 +2185,18 @@ class VPCBackend(object): else: raise InvalidParameterValueError(attr_name) + def disassociate_vpc_cidr_block(self, association_id): + for vpc in self.vpcs.values(): + response = vpc.disassociate_vpc_cidr_block(association_id) + if response: + return response + else: + raise InvalidVpcCidrBlockAssociationIdError(association_id) + + def associate_vpc_cidr_block(self, vpc_id, cidr_block, amazon_provided_ipv6_cidr_block): + vpc = self.get_vpc(vpc_id) + return vpc.associate_vpc_cidr_block(cidr_block, amazon_provided_ipv6_cidr_block) + class VPCPeeringConnectionStatus(object): def __init__(self, code='initiating-request', message=''): diff --git a/moto/ec2/responses/vpcs.py b/moto/ec2/responses/vpcs.py index 8a53151e0..88673d863 100644 --- a/moto/ec2/responses/vpcs.py +++ b/moto/ec2/responses/vpcs.py @@ -9,9 +9,12 @@ class VPCs(BaseResponse): def create_vpc(self): cidr_block = self._get_param('CidrBlock') instance_tenancy = self._get_param('InstanceTenancy', if_none='default') - vpc = self.ec2_backend.create_vpc(cidr_block, instance_tenancy) + amazon_provided_ipv6_cidr_blocks = self._get_param('AmazonProvidedIpv6CidrBlock') + vpc = self.ec2_backend.create_vpc(cidr_block, instance_tenancy, + amazon_provided_ipv6_cidr_block=amazon_provided_ipv6_cidr_blocks) + doc_date = '2013-10-15' if 'Boto/' in self.headers.get('user-agent', '') else '2016-11-15' template = self.response_template(CREATE_VPC_RESPONSE) - return template.render(vpc=vpc) + return template.render(vpc=vpc, doc_date=doc_date) def delete_vpc(self): vpc_id = self._get_param('VpcId') @@ -23,8 +26,9 @@ class VPCs(BaseResponse): vpc_ids = self._get_multi_param('VpcId') filters = filters_from_querystring(self.querystring) vpcs = self.ec2_backend.get_all_vpcs(vpc_ids=vpc_ids, filters=filters) + doc_date = '2013-10-15' if 'Boto/' in self.headers.get('user-agent', '') else '2016-11-15' template = self.response_template(DESCRIBE_VPCS_RESPONSE) - return template.render(vpcs=vpcs) + return template.render(vpcs=vpcs, doc_date=doc_date) def describe_vpc_attribute(self): vpc_id = self._get_param('VpcId') @@ -45,14 +49,63 @@ class VPCs(BaseResponse): vpc_id, attr_name, attr_value) return MODIFY_VPC_ATTRIBUTE_RESPONSE + def associate_vpc_cidr_block(self): + vpc_id = self._get_param('VpcId') + amazon_provided_ipv6_cidr_blocks = self._get_param('AmazonProvidedIpv6CidrBlock') + # todo test on AWS if can create an association for IPV4 and IPV6 in the same call? + cidr_block = self._get_param('CidrBlock') if not amazon_provided_ipv6_cidr_blocks else None + value = self.ec2_backend.associate_vpc_cidr_block(vpc_id, cidr_block, amazon_provided_ipv6_cidr_blocks) + if not amazon_provided_ipv6_cidr_blocks: + render_template = ASSOCIATE_VPC_CIDR_BLOCK_RESPONSE + else: + render_template = IPV6_ASSOCIATE_VPC_CIDR_BLOCK_RESPONSE + template = self.response_template(render_template) + return template.render(vpc_id=vpc_id, value=value, cidr_block=value['cidr_block'], + association_id=value['association_id'], cidr_block_state='associating') + + def disassociate_vpc_cidr_block(self): + association_id = self._get_param('AssociationId') + value = self.ec2_backend.disassociate_vpc_cidr_block(association_id) + if "::" in value.get('cidr_block', ''): + render_template = IPV6_DISASSOCIATE_VPC_CIDR_BLOCK_RESPONSE + else: + render_template = DISASSOCIATE_VPC_CIDR_BLOCK_RESPONSE + template = self.response_template(render_template) + return template.render(vpc_id=value['vpc_id'], cidr_block=value['cidr_block'], + association_id=value['association_id'], cidr_block_state='disassociating') + CREATE_VPC_RESPONSE = """ - + 7a62c49f-347e-4fc4-9331-6e8eEXAMPLE {{ vpc.id }} pending {{ vpc.cidr_block }} + {% if doc_date == "2016-11-15" %} + + {% for assoc in vpc.get_cidr_block_association_set() %} + + {{assoc.cidr_block}} + {{ assoc.association_id }} + + {{assoc.cidr_block_state.state}} + + + {% endfor %} + + + {% for assoc in vpc.get_cidr_block_association_set(ipv6=True) %} + + {{assoc.cidr_block}} + {{ assoc.association_id }} + + {{assoc.cidr_block_state.state}} + + + {% endfor %} + + {% endif %} {% if vpc.dhcp_options %}{{ vpc.dhcp_options.id }}{% else %}dopt-1a2b3c4d2{% endif %} {{ vpc.instance_tenancy }} @@ -69,14 +122,38 @@ CREATE_VPC_RESPONSE = """ """ DESCRIBE_VPCS_RESPONSE = """ - - 7a62c49f-347e-4fc4-9331-6e8eEXAMPLE + + 7a62c442-3484-4f42-9342-6942EXAMPLE {% for vpc in vpcs %} {{ vpc.id }} {{ vpc.state }} {{ vpc.cidr_block }} + {% if doc_date == "2016-11-15" %} + + {% for assoc in vpc.get_cidr_block_association_set() %} + + {{assoc.cidr_block}} + {{ assoc.association_id }} + + {{assoc.cidr_block_state.state}} + + + {% endfor %} + + + {% for assoc in vpc.get_cidr_block_association_set(ipv6=True) %} + + {{assoc.cidr_block}} + {{ assoc.association_id }} + + {{assoc.cidr_block_state.state}} + + + {% endfor %} + + {% endif %} {% if vpc.dhcp_options %}{{ vpc.dhcp_options.id }}{% else %}dopt-7a8b9c2d{% endif %} {{ vpc.instance_tenancy }} {{ vpc.is_default }} @@ -96,14 +173,14 @@ DESCRIBE_VPCS_RESPONSE = """ """ DELETE_VPC_RESPONSE = """ - + 7a62c49f-347e-4fc4-9331-6e8eEXAMPLE true """ DESCRIBE_VPC_ATTRIBUTE_RESPONSE = """ - + 7a62c49f-347e-4fc4-9331-6e8eEXAMPLE {{ vpc_id }} <{{ attribute }}> @@ -112,7 +189,59 @@ DESCRIBE_VPC_ATTRIBUTE_RESPONSE = """ """ MODIFY_VPC_ATTRIBUTE_RESPONSE = """ - + 7a62c49f-347e-4fc4-9331-6e8eEXAMPLE true """ + +ASSOCIATE_VPC_CIDR_BLOCK_RESPONSE = """ + + 7a62c49f-347e-4fc4-9331-6e8eEXAMPLE + {{vpc_id}} + + {{association_id}} + {{cidr_block}} + + {{cidr_block_state}} + + +""" + +DISASSOCIATE_VPC_CIDR_BLOCK_RESPONSE = """ + + 7a62c49f-347e-4fc4-9331-6e8eEXAMPLE + {{vpc_id}} + + {{association_id}} + {{cidr_block}} + + {{cidr_block_state}} + + +""" + +IPV6_ASSOCIATE_VPC_CIDR_BLOCK_RESPONSE = """ + + 33af6c54-1139-4d50-b4f7-15a8example + {{vpc_id}} + + {{association_id}} + {{cidr_block}} + + {{cidr_block_state}} + + +""" + +IPV6_DISASSOCIATE_VPC_CIDR_BLOCK_RESPONSE = """ + + 33af6c54-1139-4d50-b4f7-15a8example + {{vpc_id}} + + {{association_id}} + {{cidr_block}} + + {{cidr_block_state}} + + +""" diff --git a/moto/ec2/utils.py b/moto/ec2/utils.py index 32122c763..f5c9b8512 100644 --- a/moto/ec2/utils.py +++ b/moto/ec2/utils.py @@ -27,6 +27,7 @@ EC2_RESOURCE_TO_PREFIX = { 'reservation': 'r', 'volume': 'vol', 'vpc': 'vpc', + 'vpc-cidr-association-id': 'vpc-cidr-assoc', 'vpc-elastic-ip': 'eipalloc', 'vpc-elastic-ip-association': 'eipassoc', 'vpc-peering-connection': 'pcx', @@ -34,16 +35,17 @@ EC2_RESOURCE_TO_PREFIX = { 'vpn-gateway': 'vgw'} -EC2_PREFIX_TO_RESOURCE = dict((v, k) - for (k, v) in EC2_RESOURCE_TO_PREFIX.items()) +EC2_PREFIX_TO_RESOURCE = dict((v, k) for (k, v) in EC2_RESOURCE_TO_PREFIX.items()) + + +def random_resource_id(size=8): + chars = list(range(10)) + ['a', 'b', 'c', 'd', 'e', 'f'] + resource_id = ''.join(six.text_type(random.choice(chars)) for x in range(size)) + return resource_id def random_id(prefix='', size=8): - chars = list(range(10)) + ['a', 'b', 'c', 'd', 'e', 'f'] - - resource_id = ''.join(six.text_type(random.choice(chars)) - for x in range(size)) - return '{0}-{1}'.format(prefix, resource_id) + return '{0}-{1}'.format(prefix, random_resource_id(size)) def random_ami_id(): @@ -110,6 +112,10 @@ def random_vpc_id(): return random_id(prefix=EC2_RESOURCE_TO_PREFIX['vpc']) +def random_vpc_cidr_association_id(): + return random_id(prefix=EC2_RESOURCE_TO_PREFIX['vpc-cidr-association-id']) + + def random_vpc_peering_connection_id(): return random_id(prefix=EC2_RESOURCE_TO_PREFIX['vpc-peering-connection']) @@ -165,6 +171,10 @@ def random_ip(): ) +def random_ipv6_cidr(): + return "2400:6500:{}:{}::/56".format(random_resource_id(4), random_resource_id(4)) + + def generate_route_id(route_table_id, cidr_block): return "%s~%s" % (route_table_id, cidr_block) diff --git a/tests/test_ec2/test_vpcs.py b/tests/test_ec2/test_vpcs.py index fc0a93cbb..318491b44 100644 --- a/tests/test_ec2/test_vpcs.py +++ b/tests/test_ec2/test_vpcs.py @@ -2,6 +2,8 @@ from __future__ import unicode_literals # Ensure 'assert_raises' context manager support for Python 2.6 import tests.backport_assert_raises # flake8: noqa from nose.tools import assert_raises +from moto.ec2.exceptions import EC2ClientError +from botocore.exceptions import ClientError import boto3 import boto @@ -275,8 +277,8 @@ def test_default_vpc(): def test_non_default_vpc(): ec2 = boto3.resource('ec2', region_name='us-west-1') - # Create the default VPC - ec2.create_vpc(CidrBlock='172.31.0.0/16') + # Create the default VPC - this already exists when backend instantiated! + #ec2.create_vpc(CidrBlock='172.31.0.0/16') # Create the non default VPC vpc = ec2.create_vpc(CidrBlock='10.0.0.0/16') @@ -295,6 +297,12 @@ def test_non_default_vpc(): attr = response.get('EnableDnsHostnames') attr.get('Value').shouldnt.be.ok + # Check Primary CIDR Block Associations + cidr_block_association_set = next(iter(vpc.cidr_block_association_set), None) + cidr_block_association_set['CidrBlockState']['State'].should.equal('associated') + cidr_block_association_set['CidrBlock'].should.equal(vpc.cidr_block) + cidr_block_association_set['AssociationId'].should.contain('vpc-cidr-assoc') + @mock_ec2 def test_vpc_dedicated_tenancy(): @@ -340,7 +348,6 @@ def test_vpc_modify_enable_dns_hostnames(): ec2.create_vpc(CidrBlock='172.31.0.0/16') vpc = ec2.create_vpc(CidrBlock='10.0.0.0/16') - # Test default values for VPC attributes response = vpc.describe_attribute(Attribute='enableDnsHostnames') attr = response.get('EnableDnsHostnames') @@ -364,3 +371,171 @@ def test_vpc_associate_dhcp_options(): vpc.update() dhcp_options.id.should.equal(vpc.dhcp_options_id) + + +@mock_ec2 +def test_associate_vpc_ipv4_cidr_block(): + ec2 = boto3.resource('ec2', region_name='us-west-1') + + vpc = ec2.create_vpc(CidrBlock='10.10.42.0/24') + + # Associate/Extend vpc CIDR range up to 5 ciders + for i in range(43, 47): + response = ec2.meta.client.associate_vpc_cidr_block(VpcId=vpc.id, CidrBlock='10.10.{}.0/24'.format(i)) + response['CidrBlockAssociation']['CidrBlockState']['State'].should.equal('associating') + response['CidrBlockAssociation']['CidrBlock'].should.equal('10.10.{}.0/24'.format(i)) + response['CidrBlockAssociation']['AssociationId'].should.contain('vpc-cidr-assoc') + + # Check all associations exist + vpc = ec2.Vpc(vpc.id) + vpc.cidr_block_association_set.should.have.length_of(5) + vpc.cidr_block_association_set[2]['CidrBlockState']['State'].should.equal('associated') + vpc.cidr_block_association_set[4]['CidrBlockState']['State'].should.equal('associated') + + # Check error on adding 6th association. + with assert_raises(ClientError) as ex: + response = ec2.meta.client.associate_vpc_cidr_block(VpcId=vpc.id, CidrBlock='10.10.50.0/22') + str(ex.exception).should.equal( + "An error occurred (CidrLimitExceeded) when calling the AssociateVpcCidrBlock " + "operation: This network '{}' has met its maximum number of allowed CIDRs: 5".format(vpc.id)) + +@mock_ec2 +def test_disassociate_vpc_ipv4_cidr_block(): + ec2 = boto3.resource('ec2', region_name='us-west-1') + + vpc = ec2.create_vpc(CidrBlock='10.10.42.0/24') + ec2.meta.client.associate_vpc_cidr_block(VpcId=vpc.id, CidrBlock='10.10.43.0/24') + + # Remove an extended cidr block + vpc = ec2.Vpc(vpc.id) + non_default_assoc_cidr_block = next(iter([x for x in vpc.cidr_block_association_set if vpc.cidr_block != x['CidrBlock']]), None) + response = ec2.meta.client.disassociate_vpc_cidr_block(AssociationId=non_default_assoc_cidr_block['AssociationId']) + response['CidrBlockAssociation']['CidrBlockState']['State'].should.equal('disassociating') + response['CidrBlockAssociation']['CidrBlock'].should.equal(non_default_assoc_cidr_block['CidrBlock']) + response['CidrBlockAssociation']['AssociationId'].should.equal(non_default_assoc_cidr_block['AssociationId']) + + # Error attempting to delete a non-existent CIDR_BLOCK association + with assert_raises(ClientError) as ex: + response = ec2.meta.client.disassociate_vpc_cidr_block(AssociationId='vpc-cidr-assoc-BORING123') + str(ex.exception).should.equal( + "An error occurred (InvalidVpcCidrBlockAssociationIdError.NotFound) when calling the " + "DisassociateVpcCidrBlock operation: The vpc CIDR block association ID " + "'vpc-cidr-assoc-BORING123' does not exist") + + # Error attempting to delete Primary CIDR BLOCK association + vpc_base_cidr_assoc_id = next(iter([x for x in vpc.cidr_block_association_set + if vpc.cidr_block == x['CidrBlock']]), {})['AssociationId'] + + with assert_raises(ClientError) as ex: + response = ec2.meta.client.disassociate_vpc_cidr_block(AssociationId=vpc_base_cidr_assoc_id) + str(ex.exception).should.equal( + "An error occurred (OperationNotPermitted) when calling the DisassociateVpcCidrBlock operation: " + "The vpc CIDR block with association ID {} may not be disassociated. It is the primary " + "IPv4 CIDR block of the VPC".format(vpc_base_cidr_assoc_id)) + +@mock_ec2 +def test_cidr_block_association_filters(): + ec2 = boto3.resource('ec2', region_name='us-west-1') + vpc1 = ec2.create_vpc(CidrBlock='10.90.0.0/16') + vpc2 = ec2.create_vpc(CidrBlock='10.91.0.0/16') + ec2.meta.client.associate_vpc_cidr_block(VpcId=vpc2.id, CidrBlock='10.10.0.0/19') + vpc3 = ec2.create_vpc(CidrBlock='10.92.0.0/24') + ec2.meta.client.associate_vpc_cidr_block(VpcId=vpc3.id, CidrBlock='10.92.1.0/24') + ec2.meta.client.associate_vpc_cidr_block(VpcId=vpc3.id, CidrBlock='10.92.2.0/24') + vpc3_assoc_response = ec2.meta.client.associate_vpc_cidr_block(VpcId=vpc3.id, CidrBlock='10.92.3.0/24') + + # Test filters for a cidr-block in all VPCs cidr-block-associations + filtered_vpcs = list(ec2.vpcs.filter(Filters=[{'Name': 'cidr-block-association.cidr-block', + 'Values': ['10.10.0.0/19']}])) + filtered_vpcs.should.be.length_of(1) + filtered_vpcs[0].id.should.equal(vpc2.id) + + # Test filter for association id in VPCs + association_id = vpc3_assoc_response['CidrBlockAssociation']['AssociationId'] + filtered_vpcs = list(ec2.vpcs.filter(Filters=[{'Name': 'cidr-block-association.association-id', + 'Values': [association_id]}])) + filtered_vpcs.should.be.length_of(1) + filtered_vpcs[0].id.should.equal(vpc3.id) + + # Test filter for association state in VPC - this will never show anything in this test + filtered_vpcs = list(ec2.vpcs.filter(Filters=[{'Name': 'cidr-block-association.association-id', + 'Values': ['failing']}])) + filtered_vpcs.should.be.length_of(0) + +@mock_ec2 +def test_vpc_associate_ipv6_cidr_block(): + ec2 = boto3.resource('ec2', region_name='us-west-1') + + # Test create VPC with IPV6 cidr range + vpc = ec2.create_vpc(CidrBlock='10.10.42.0/24', AmazonProvidedIpv6CidrBlock=True) + ipv6_cidr_block_association_set = next(iter(vpc.ipv6_cidr_block_association_set), None) + ipv6_cidr_block_association_set['Ipv6CidrBlockState']['State'].should.equal('associated') + ipv6_cidr_block_association_set['Ipv6CidrBlock'].should.contain('::/56') + ipv6_cidr_block_association_set['AssociationId'].should.contain('vpc-cidr-assoc') + + # Test Fail on adding 2nd IPV6 association - AWS only allows 1 at this time! + with assert_raises(ClientError) as ex: + response = ec2.meta.client.associate_vpc_cidr_block(VpcId=vpc.id, AmazonProvidedIpv6CidrBlock=True) + str(ex.exception).should.equal( + "An error occurred (CidrLimitExceeded) when calling the AssociateVpcCidrBlock " + "operation: This network '{}' has met its maximum number of allowed CIDRs: 1".format(vpc.id)) + + # Test associate ipv6 cidr block after vpc created + vpc = ec2.create_vpc(CidrBlock='10.10.50.0/24') + response = ec2.meta.client.associate_vpc_cidr_block(VpcId=vpc.id, AmazonProvidedIpv6CidrBlock=True) + response['Ipv6CidrBlockAssociation']['Ipv6CidrBlockState']['State'].should.equal('associating') + response['Ipv6CidrBlockAssociation']['Ipv6CidrBlock'].should.contain('::/56') + response['Ipv6CidrBlockAssociation']['AssociationId'].should.contain('vpc-cidr-assoc-') + + # Check on describe vpc that has ipv6 cidr block association + vpc = ec2.Vpc(vpc.id) + vpc.ipv6_cidr_block_association_set.should.be.length_of(1) + + +@mock_ec2 +def test_vpc_disassociate_ipv6_cidr_block(): + ec2 = boto3.resource('ec2', region_name='us-west-1') + + # Test create VPC with IPV6 cidr range + vpc = ec2.create_vpc(CidrBlock='10.10.42.0/24', AmazonProvidedIpv6CidrBlock=True) + # Test disassociating the only IPV6 + assoc_id = vpc.ipv6_cidr_block_association_set[0]['AssociationId'] + response = ec2.meta.client.disassociate_vpc_cidr_block(AssociationId=assoc_id) + response['Ipv6CidrBlockAssociation']['Ipv6CidrBlockState']['State'].should.equal('disassociating') + response['Ipv6CidrBlockAssociation']['Ipv6CidrBlock'].should.contain('::/56') + response['Ipv6CidrBlockAssociation']['AssociationId'].should.equal(assoc_id) + + +@mock_ec2 +def test_ipv6_cidr_block_association_filters(): + ec2 = boto3.resource('ec2', region_name='us-west-1') + vpc1 = ec2.create_vpc(CidrBlock='10.90.0.0/16') + + vpc2 = ec2.create_vpc(CidrBlock='10.91.0.0/16', AmazonProvidedIpv6CidrBlock=True) + vpc2_assoc_ipv6_assoc_id = vpc2.ipv6_cidr_block_association_set[0]['AssociationId'] + ec2.meta.client.associate_vpc_cidr_block(VpcId=vpc2.id, CidrBlock='10.10.0.0/19') + + vpc3 = ec2.create_vpc(CidrBlock='10.92.0.0/24') + ec2.meta.client.associate_vpc_cidr_block(VpcId=vpc3.id, CidrBlock='10.92.1.0/24') + ec2.meta.client.associate_vpc_cidr_block(VpcId=vpc3.id, CidrBlock='10.92.2.0/24') + response = ec2.meta.client.associate_vpc_cidr_block(VpcId=vpc3.id, AmazonProvidedIpv6CidrBlock=True) + vpc3_ipv6_cidr_block = response['Ipv6CidrBlockAssociation']['Ipv6CidrBlock'] + + vpc4 = ec2.create_vpc(CidrBlock='10.95.0.0/16') # Here for its looks + + # Test filters for an ipv6 cidr-block in all VPCs cidr-block-associations + filtered_vpcs = list(ec2.vpcs.filter(Filters=[{'Name': 'ipv6-cidr-block-association.ipv6-cidr-block', + 'Values': [vpc3_ipv6_cidr_block]}])) + filtered_vpcs.should.be.length_of(1) + filtered_vpcs[0].id.should.equal(vpc3.id) + + # Test filter for association id in VPCs + filtered_vpcs = list(ec2.vpcs.filter(Filters=[{'Name': 'ipv6-cidr-block-association.association-id', + 'Values': [vpc2_assoc_ipv6_assoc_id]}])) + filtered_vpcs.should.be.length_of(1) + filtered_vpcs[0].id.should.equal(vpc2.id) + + # Test filter for association state in VPC - this will never show anything in this test + filtered_vpcs = list(ec2.vpcs.filter(Filters=[{'Name': 'ipv6-cidr-block-association.state', + 'Values': ['associated']}])) + filtered_vpcs.should.be.length_of(2) # 2 of 4 VPCs From cc14114afe069986d158099640bc26f679773e6f Mon Sep 17 00:00:00 2001 From: Mike Grima Date: Wed, 21 Mar 2018 09:11:24 -0700 Subject: [PATCH 112/182] Implemented S3 get/put_notification_configuration (#1516) closes #973 --- moto/s3/exceptions.py | 30 +++++ moto/s3/models.py | 102 ++++++++++++----- moto/s3/responses.py | 154 +++++++++++++++++++++++++- tests/test_s3/test_s3.py | 229 ++++++++++++++++++++++++++++++++++++++- 4 files changed, 486 insertions(+), 29 deletions(-) diff --git a/moto/s3/exceptions.py b/moto/s3/exceptions.py index 08dd02313..8c6e291ef 100644 --- a/moto/s3/exceptions.py +++ b/moto/s3/exceptions.py @@ -138,3 +138,33 @@ class CrossLocationLoggingProhibitted(S3ClientError): "CrossLocationLoggingProhibitted", "Cross S3 location logging not allowed." ) + + +class InvalidNotificationARN(S3ClientError): + code = 400 + + def __init__(self, *args, **kwargs): + super(InvalidNotificationARN, self).__init__( + "InvalidArgument", + "The ARN is not well formed", + *args, **kwargs) + + +class InvalidNotificationDestination(S3ClientError): + code = 400 + + def __init__(self, *args, **kwargs): + super(InvalidNotificationDestination, self).__init__( + "InvalidArgument", + "The notification destination service region is not valid for the bucket location constraint", + *args, **kwargs) + + +class InvalidNotificationEvent(S3ClientError): + code = 400 + + def __init__(self, *args, **kwargs): + super(InvalidNotificationEvent, self).__init__( + "InvalidArgument", + "The event is not supported for notifications", + *args, **kwargs) diff --git a/moto/s3/models.py b/moto/s3/models.py index b8a6a99cc..c414225de 100644 --- a/moto/s3/models.py +++ b/moto/s3/models.py @@ -6,12 +6,16 @@ import hashlib import copy import itertools import codecs +import random +import string + import six from bisect import insort from moto.core import BaseBackend, BaseModel from moto.core.utils import iso_8601_datetime_with_milliseconds, rfc_1123_datetime -from .exceptions import BucketAlreadyExists, MissingBucket, InvalidPart, EntityTooSmall, MissingKey +from .exceptions import BucketAlreadyExists, MissingBucket, InvalidPart, EntityTooSmall, MissingKey, \ + InvalidNotificationDestination from .utils import clean_key_name, _VersionedKeyStore UPLOAD_ID_BYTES = 43 @@ -270,7 +274,7 @@ def get_canned_acl(acl): grants.append(FakeGrant([ALL_USERS_GRANTEE], [PERMISSION_READ])) elif acl == 'public-read-write': grants.append(FakeGrant([ALL_USERS_GRANTEE], [ - PERMISSION_READ, PERMISSION_WRITE])) + PERMISSION_READ, PERMISSION_WRITE])) elif acl == 'authenticated-read': grants.append( FakeGrant([AUTHENTICATED_USERS_GRANTEE], [PERMISSION_READ])) @@ -282,7 +286,7 @@ def get_canned_acl(acl): pass # TODO: bucket owner, EC2 Read elif acl == 'log-delivery-write': grants.append(FakeGrant([LOG_DELIVERY_GRANTEE], [ - PERMISSION_READ_ACP, PERMISSION_WRITE])) + PERMISSION_READ_ACP, PERMISSION_WRITE])) else: assert False, 'Unknown canned acl: %s' % (acl,) return FakeAcl(grants=grants) @@ -333,6 +337,26 @@ class CorsRule(BaseModel): self.max_age_seconds = max_age_seconds +class Notification(BaseModel): + + def __init__(self, arn, events, filters=None, id=None): + self.id = id if id else ''.join(random.choice(string.ascii_letters + string.digits) for _ in range(50)) + self.arn = arn + self.events = events + self.filters = filters if filters else {} + + +class NotificationConfiguration(BaseModel): + + def __init__(self, topic=None, queue=None, cloud_function=None): + self.topic = [Notification(t["Topic"], t["Event"], filters=t.get("Filter"), id=t.get("Id")) for t in topic] \ + if topic else [] + self.queue = [Notification(q["Queue"], q["Event"], filters=q.get("Filter"), id=q.get("Id")) for q in queue] \ + if queue else [] + self.cloud_function = [Notification(c["CloudFunction"], c["Event"], filters=c.get("Filter"), id=c.get("Id")) + for c in cloud_function] if cloud_function else [] + + class FakeBucket(BaseModel): def __init__(self, name, region_name): @@ -348,6 +372,7 @@ class FakeBucket(BaseModel): self.tags = FakeTagging() self.cors = [] self.logging = {} + self.notification_configuration = None @property def location(self): @@ -426,36 +451,55 @@ class FakeBucket(BaseModel): def set_logging(self, logging_config, bucket_backend): if not logging_config: self.logging = {} - else: - from moto.s3.exceptions import InvalidTargetBucketForLogging, CrossLocationLoggingProhibitted - # Target bucket must exist in the same account (assuming all moto buckets are in the same account): - if not bucket_backend.buckets.get(logging_config["TargetBucket"]): - raise InvalidTargetBucketForLogging("The target bucket for logging does not exist.") + return - # Does the target bucket have the log-delivery WRITE and READ_ACP permissions? - write = read_acp = False - for grant in bucket_backend.buckets[logging_config["TargetBucket"]].acl.grants: - # Must be granted to: http://acs.amazonaws.com/groups/s3/LogDelivery - for grantee in grant.grantees: - if grantee.uri == "http://acs.amazonaws.com/groups/s3/LogDelivery": - if "WRITE" in grant.permissions or "FULL_CONTROL" in grant.permissions: - write = True + from moto.s3.exceptions import InvalidTargetBucketForLogging, CrossLocationLoggingProhibitted + # Target bucket must exist in the same account (assuming all moto buckets are in the same account): + if not bucket_backend.buckets.get(logging_config["TargetBucket"]): + raise InvalidTargetBucketForLogging("The target bucket for logging does not exist.") - if "READ_ACP" in grant.permissions or "FULL_CONTROL" in grant.permissions: - read_acp = True + # Does the target bucket have the log-delivery WRITE and READ_ACP permissions? + write = read_acp = False + for grant in bucket_backend.buckets[logging_config["TargetBucket"]].acl.grants: + # Must be granted to: http://acs.amazonaws.com/groups/s3/LogDelivery + for grantee in grant.grantees: + if grantee.uri == "http://acs.amazonaws.com/groups/s3/LogDelivery": + if "WRITE" in grant.permissions or "FULL_CONTROL" in grant.permissions: + write = True - break + if "READ_ACP" in grant.permissions or "FULL_CONTROL" in grant.permissions: + read_acp = True - if not write or not read_acp: - raise InvalidTargetBucketForLogging("You must give the log-delivery group WRITE and READ_ACP" - " permissions to the target bucket") + break - # Buckets must also exist within the same region: - if bucket_backend.buckets[logging_config["TargetBucket"]].region_name != self.region_name: - raise CrossLocationLoggingProhibitted() + if not write or not read_acp: + raise InvalidTargetBucketForLogging("You must give the log-delivery group WRITE and READ_ACP" + " permissions to the target bucket") - # Checks pass -- set the logging config: - self.logging = logging_config + # Buckets must also exist within the same region: + if bucket_backend.buckets[logging_config["TargetBucket"]].region_name != self.region_name: + raise CrossLocationLoggingProhibitted() + + # Checks pass -- set the logging config: + self.logging = logging_config + + def set_notification_configuration(self, notification_config): + if not notification_config: + self.notification_configuration = None + return + + self.notification_configuration = NotificationConfiguration( + topic=notification_config.get("TopicConfiguration"), + queue=notification_config.get("QueueConfiguration"), + cloud_function=notification_config.get("CloudFunctionConfiguration") + ) + + # Validate that the region is correct: + for thing in ["topic", "queue", "cloud_function"]: + for t in getattr(self.notification_configuration, thing): + region = t.arn.split(":")[3] + if region != self.region_name: + raise InvalidNotificationDestination() def set_website_configuration(self, website_configuration): self.website_configuration = website_configuration @@ -651,6 +695,10 @@ class S3Backend(BaseBackend): bucket = self.get_bucket(bucket_name) bucket.delete_cors() + def put_bucket_notification_configuration(self, bucket_name, notification_config): + bucket = self.get_bucket(bucket_name) + bucket.set_notification_configuration(notification_config) + def initiate_multipart(self, bucket_name, key_name, metadata): bucket = self.get_bucket(bucket_name) new_multipart = FakeMultipart(key_name, metadata) diff --git a/moto/s3/responses.py b/moto/s3/responses.py index 7b07e4e07..fce45b5f9 100755 --- a/moto/s3/responses.py +++ b/moto/s3/responses.py @@ -15,7 +15,7 @@ from moto.s3bucket_path.utils import bucket_name_from_url as bucketpath_bucket_n parse_key_name as bucketpath_parse_key_name, is_delete_keys as bucketpath_is_delete_keys from .exceptions import BucketAlreadyExists, S3ClientError, MissingBucket, MissingKey, InvalidPartOrder, MalformedXML, \ - MalformedACLError + MalformedACLError, InvalidNotificationARN, InvalidNotificationEvent from .models import s3_backend, get_canned_acl, FakeGrantee, FakeGrant, FakeAcl, FakeKey, FakeTagging, FakeTagSet, \ FakeTag from .utils import bucket_name_from_url, metadata_from_headers, parse_region_from_url @@ -243,6 +243,13 @@ class ResponseObject(_TemplateEnvironmentMixin): return 404, {}, template.render(bucket_name=bucket_name) template = self.response_template(S3_BUCKET_CORS_RESPONSE) return template.render(bucket=bucket) + elif "notification" in querystring: + bucket = self.backend.get_bucket(bucket_name) + if not bucket.notification_configuration: + return 200, {}, "" + template = self.response_template(S3_GET_BUCKET_NOTIFICATION_CONFIG) + return template.render(bucket=bucket) + elif 'versions' in querystring: delimiter = querystring.get('delimiter', [None])[0] encoding_type = querystring.get('encoding-type', [None])[0] @@ -411,6 +418,15 @@ class ResponseObject(_TemplateEnvironmentMixin): return "" except KeyError: raise MalformedXML() + elif "notification" in querystring: + try: + self.backend.put_bucket_notification_configuration(bucket_name, + self._notification_config_from_xml(body)) + return "" + except KeyError: + raise MalformedXML() + except Exception as e: + raise e else: if body: @@ -918,6 +934,74 @@ class ResponseObject(_TemplateEnvironmentMixin): return parsed_xml["BucketLoggingStatus"]["LoggingEnabled"] + def _notification_config_from_xml(self, xml): + parsed_xml = xmltodict.parse(xml) + + if not len(parsed_xml["NotificationConfiguration"]): + return {} + + # The types of notifications, and their required fields (apparently lambda is categorized by the API as + # "CloudFunction"): + notification_fields = [ + ("Topic", "sns"), + ("Queue", "sqs"), + ("CloudFunction", "lambda") + ] + + event_names = [ + 's3:ReducedRedundancyLostObject', + 's3:ObjectCreated:*', + 's3:ObjectCreated:Put', + 's3:ObjectCreated:Post', + 's3:ObjectCreated:Copy', + 's3:ObjectCreated:CompleteMultipartUpload', + 's3:ObjectRemoved:*', + 's3:ObjectRemoved:Delete', + 's3:ObjectRemoved:DeleteMarkerCreated' + ] + + found_notifications = 0 # Tripwire -- if this is not ever set, then there were no notifications + for name, arn_string in notification_fields: + # 1st verify that the proper notification configuration has been passed in (with an ARN that is close + # to being correct -- nothing too complex in the ARN logic): + the_notification = parsed_xml["NotificationConfiguration"].get("{}Configuration".format(name)) + if the_notification: + found_notifications += 1 + if not isinstance(the_notification, list): + the_notification = parsed_xml["NotificationConfiguration"]["{}Configuration".format(name)] \ + = [the_notification] + + for n in the_notification: + if not n[name].startswith("arn:aws:{}:".format(arn_string)): + raise InvalidNotificationARN() + + # 2nd, verify that the Events list is correct: + assert n["Event"] + if not isinstance(n["Event"], list): + n["Event"] = [n["Event"]] + + for event in n["Event"]: + if event not in event_names: + raise InvalidNotificationEvent() + + # Parse out the filters: + if n.get("Filter"): + # Error if S3Key is blank: + if not n["Filter"]["S3Key"]: + raise KeyError() + + if not isinstance(n["Filter"]["S3Key"]["FilterRule"], list): + n["Filter"]["S3Key"]["FilterRule"] = [n["Filter"]["S3Key"]["FilterRule"]] + + for filter_rule in n["Filter"]["S3Key"]["FilterRule"]: + assert filter_rule["Name"] in ["suffix", "prefix"] + assert filter_rule["Value"] + + if not found_notifications: + return {} + + return parsed_xml["NotificationConfiguration"] + def _key_response_delete(self, bucket_name, query, key_name, headers): if query.get('uploadId'): upload_id = query['uploadId'][0] @@ -1460,3 +1544,71 @@ S3_LOGGING_CONFIG = """ S3_NO_LOGGING_CONFIG = """ """ + +S3_GET_BUCKET_NOTIFICATION_CONFIG = """ + + {% for topic in bucket.notification_configuration.topic %} + + {{ topic.id }} + {{ topic.arn }} + {% for event in topic.events %} + {{ event }} + {% endfor %} + {% if topic.filters %} + + + {% for rule in topic.filters["S3Key"]["FilterRule"] %} + + {{ rule["Name"] }} + {{ rule["Value"] }} + + {% endfor %} + + + {% endif %} + + {% endfor %} + {% for queue in bucket.notification_configuration.queue %} + + {{ queue.id }} + {{ queue.arn }} + {% for event in queue.events %} + {{ event }} + {% endfor %} + {% if queue.filters %} + + + {% for rule in queue.filters["S3Key"]["FilterRule"] %} + + {{ rule["Name"] }} + {{ rule["Value"] }} + + {% endfor %} + + + {% endif %} + + {% endfor %} + {% for cf in bucket.notification_configuration.cloud_function %} + + {{ cf.id }} + {{ cf.arn }} + {% for event in cf.events %} + {{ event }} + {% endfor %} + {% if cf.filters %} + + + {% for rule in cf.filters["S3Key"]["FilterRule"] %} + + {{ rule["Name"] }} + {{ rule["Value"] }} + + {% endfor %} + + + {% endif %} + + {% endfor %} + +""" diff --git a/tests/test_s3/test_s3.py b/tests/test_s3/test_s3.py index 0d6b691a9..369426758 100644 --- a/tests/test_s3/test_s3.py +++ b/tests/test_s3/test_s3.py @@ -1835,6 +1835,233 @@ def test_put_bucket_acl_body(): assert not result.get("Grants") +@mock_s3 +def test_put_bucket_notification(): + s3 = boto3.client("s3", region_name="us-east-1") + s3.create_bucket(Bucket="bucket") + + # With no configuration: + result = s3.get_bucket_notification(Bucket="bucket") + assert not result.get("TopicConfigurations") + assert not result.get("QueueConfigurations") + assert not result.get("LambdaFunctionConfigurations") + + # Place proper topic configuration: + s3.put_bucket_notification_configuration(Bucket="bucket", + NotificationConfiguration={ + "TopicConfigurations": [ + { + "TopicArn": "arn:aws:sns:us-east-1:012345678910:mytopic", + "Events": [ + "s3:ObjectCreated:*", + "s3:ObjectRemoved:*" + ] + }, + { + "TopicArn": "arn:aws:sns:us-east-1:012345678910:myothertopic", + "Events": [ + "s3:ObjectCreated:*" + ], + "Filter": { + "Key": { + "FilterRules": [ + { + "Name": "prefix", + "Value": "images/" + }, + { + "Name": "suffix", + "Value": "png" + } + ] + } + } + } + ] + }) + + # Verify to completion: + result = s3.get_bucket_notification_configuration(Bucket="bucket") + assert len(result["TopicConfigurations"]) == 2 + assert not result.get("QueueConfigurations") + assert not result.get("LambdaFunctionConfigurations") + assert result["TopicConfigurations"][0]["TopicArn"] == "arn:aws:sns:us-east-1:012345678910:mytopic" + assert result["TopicConfigurations"][1]["TopicArn"] == "arn:aws:sns:us-east-1:012345678910:myothertopic" + assert len(result["TopicConfigurations"][0]["Events"]) == 2 + assert len(result["TopicConfigurations"][1]["Events"]) == 1 + assert result["TopicConfigurations"][0]["Events"][0] == "s3:ObjectCreated:*" + assert result["TopicConfigurations"][0]["Events"][1] == "s3:ObjectRemoved:*" + assert result["TopicConfigurations"][1]["Events"][0] == "s3:ObjectCreated:*" + assert result["TopicConfigurations"][0]["Id"] + assert result["TopicConfigurations"][1]["Id"] + assert not result["TopicConfigurations"][0].get("Filter") + assert len(result["TopicConfigurations"][1]["Filter"]["Key"]["FilterRules"]) == 2 + assert result["TopicConfigurations"][1]["Filter"]["Key"]["FilterRules"][0]["Name"] == "prefix" + assert result["TopicConfigurations"][1]["Filter"]["Key"]["FilterRules"][0]["Value"] == "images/" + assert result["TopicConfigurations"][1]["Filter"]["Key"]["FilterRules"][1]["Name"] == "suffix" + assert result["TopicConfigurations"][1]["Filter"]["Key"]["FilterRules"][1]["Value"] == "png" + + # Place proper queue configuration: + s3.put_bucket_notification_configuration(Bucket="bucket", + NotificationConfiguration={ + "QueueConfigurations": [ + { + "Id": "SomeID", + "QueueArn": "arn:aws:sqs:us-east-1:012345678910:myQueue", + "Events": ["s3:ObjectCreated:*"], + "Filter": { + "Key": { + "FilterRules": [ + { + "Name": "prefix", + "Value": "images/" + } + ] + } + } + } + ] + }) + result = s3.get_bucket_notification_configuration(Bucket="bucket") + assert len(result["QueueConfigurations"]) == 1 + assert not result.get("TopicConfigurations") + assert not result.get("LambdaFunctionConfigurations") + assert result["QueueConfigurations"][0]["Id"] == "SomeID" + assert result["QueueConfigurations"][0]["QueueArn"] == "arn:aws:sqs:us-east-1:012345678910:myQueue" + assert result["QueueConfigurations"][0]["Events"][0] == "s3:ObjectCreated:*" + assert len(result["QueueConfigurations"][0]["Events"]) == 1 + assert len(result["QueueConfigurations"][0]["Filter"]["Key"]["FilterRules"]) == 1 + assert result["QueueConfigurations"][0]["Filter"]["Key"]["FilterRules"][0]["Name"] == "prefix" + assert result["QueueConfigurations"][0]["Filter"]["Key"]["FilterRules"][0]["Value"] == "images/" + + # Place proper Lambda configuration: + s3.put_bucket_notification_configuration(Bucket="bucket", + NotificationConfiguration={ + "LambdaFunctionConfigurations": [ + { + "LambdaFunctionArn": + "arn:aws:lambda:us-east-1:012345678910:function:lambda", + "Events": ["s3:ObjectCreated:*"], + "Filter": { + "Key": { + "FilterRules": [ + { + "Name": "prefix", + "Value": "images/" + } + ] + } + } + } + ] + }) + result = s3.get_bucket_notification_configuration(Bucket="bucket") + assert len(result["LambdaFunctionConfigurations"]) == 1 + assert not result.get("TopicConfigurations") + assert not result.get("QueueConfigurations") + assert result["LambdaFunctionConfigurations"][0]["Id"] + assert result["LambdaFunctionConfigurations"][0]["LambdaFunctionArn"] == \ + "arn:aws:lambda:us-east-1:012345678910:function:lambda" + assert result["LambdaFunctionConfigurations"][0]["Events"][0] == "s3:ObjectCreated:*" + assert len(result["LambdaFunctionConfigurations"][0]["Events"]) == 1 + assert len(result["LambdaFunctionConfigurations"][0]["Filter"]["Key"]["FilterRules"]) == 1 + assert result["LambdaFunctionConfigurations"][0]["Filter"]["Key"]["FilterRules"][0]["Name"] == "prefix" + assert result["LambdaFunctionConfigurations"][0]["Filter"]["Key"]["FilterRules"][0]["Value"] == "images/" + + # And with all 3 set: + s3.put_bucket_notification_configuration(Bucket="bucket", + NotificationConfiguration={ + "TopicConfigurations": [ + { + "TopicArn": "arn:aws:sns:us-east-1:012345678910:mytopic", + "Events": [ + "s3:ObjectCreated:*", + "s3:ObjectRemoved:*" + ] + } + ], + "LambdaFunctionConfigurations": [ + { + "LambdaFunctionArn": + "arn:aws:lambda:us-east-1:012345678910:function:lambda", + "Events": ["s3:ObjectCreated:*"] + } + ], + "QueueConfigurations": [ + { + "QueueArn": "arn:aws:sqs:us-east-1:012345678910:myQueue", + "Events": ["s3:ObjectCreated:*"] + } + ] + }) + result = s3.get_bucket_notification_configuration(Bucket="bucket") + assert len(result["LambdaFunctionConfigurations"]) == 1 + assert len(result["TopicConfigurations"]) == 1 + assert len(result["QueueConfigurations"]) == 1 + + # And clear it out: + s3.put_bucket_notification_configuration(Bucket="bucket", NotificationConfiguration={}) + result = s3.get_bucket_notification_configuration(Bucket="bucket") + assert not result.get("TopicConfigurations") + assert not result.get("QueueConfigurations") + assert not result.get("LambdaFunctionConfigurations") + + +@mock_s3 +def test_put_bucket_notification_errors(): + s3 = boto3.client("s3", region_name="us-east-1") + s3.create_bucket(Bucket="bucket") + + # With incorrect ARNs: + for tech, arn in [("Queue", "sqs"), ("Topic", "sns"), ("LambdaFunction", "lambda")]: + with assert_raises(ClientError) as err: + s3.put_bucket_notification_configuration(Bucket="bucket", + NotificationConfiguration={ + "{}Configurations".format(tech): [ + { + "{}Arn".format(tech): + "arn:aws:{}:us-east-1:012345678910:lksajdfkldskfj", + "Events": ["s3:ObjectCreated:*"] + } + ] + }) + + assert err.exception.response["Error"]["Code"] == "InvalidArgument" + assert err.exception.response["Error"]["Message"] == "The ARN is not well formed" + + # Region not the same as the bucket: + with assert_raises(ClientError) as err: + s3.put_bucket_notification_configuration(Bucket="bucket", + NotificationConfiguration={ + "QueueConfigurations": [ + { + "QueueArn": + "arn:aws:sqs:us-west-2:012345678910:lksajdfkldskfj", + "Events": ["s3:ObjectCreated:*"] + } + ] + }) + + assert err.exception.response["Error"]["Code"] == "InvalidArgument" + assert err.exception.response["Error"]["Message"] == \ + "The notification destination service region is not valid for the bucket location constraint" + + # Invalid event name: + with assert_raises(ClientError) as err: + s3.put_bucket_notification_configuration(Bucket="bucket", + NotificationConfiguration={ + "QueueConfigurations": [ + { + "QueueArn": + "arn:aws:sqs:us-east-1:012345678910:lksajdfkldskfj", + "Events": ["notarealeventname"] + } + ] + }) + assert err.exception.response["Error"]["Code"] == "InvalidArgument" + assert err.exception.response["Error"]["Message"] == "The event is not supported for notifications" + + @mock_s3 def test_boto3_put_bucket_logging(): s3 = boto3.client("s3", region_name="us-east-1") @@ -1953,7 +2180,7 @@ def test_boto3_put_bucket_logging(): result = s3.get_bucket_logging(Bucket=bucket_name) assert len(result["LoggingEnabled"]["TargetGrants"]) == 2 assert result["LoggingEnabled"]["TargetGrants"][0]["Grantee"]["ID"] == \ - "SOMEIDSTRINGHERE9238748923734823917498237489237409123840983274" + "SOMEIDSTRINGHERE9238748923734823917498237489237409123840983274" # Test with just 1 grant: s3.put_bucket_logging(Bucket=bucket_name, BucketLoggingStatus={ From d55ff20685da41eadb75aa1d3b1cb5e64770d549 Mon Sep 17 00:00:00 2001 From: Javier Buzzi Date: Wed, 21 Mar 2018 17:11:49 +0100 Subject: [PATCH 113/182] Basic support for AWS Gateway apikeys (#1494) * Basic support for AWS Gateway apikeys * Adds missing -- much needed tests * Fixes issue with master merge * Fixes linter * Fixes tests --- moto/apigateway/models.py | 37 ++++++++++++++++++++++++ moto/apigateway/responses.py | 22 ++++++++++++++ moto/apigateway/urls.py | 2 ++ tests/test_apigateway/test_apigateway.py | 34 ++++++++++++++++++++-- 4 files changed, 93 insertions(+), 2 deletions(-) diff --git a/moto/apigateway/models.py b/moto/apigateway/models.py index cc8696104..27a9b86c2 100644 --- a/moto/apigateway/models.py +++ b/moto/apigateway/models.py @@ -1,6 +1,8 @@ from __future__ import absolute_import from __future__ import unicode_literals +import random +import string import requests import time @@ -291,6 +293,25 @@ class Stage(BaseModel, dict): raise Exception('Patch operation "%s" not implemented' % op['op']) +class ApiKey(BaseModel, dict): + + def __init__(self, name=None, description=None, enabled=True, + generateDistinctId=False, value=None, stageKeys=None, customerId=None): + super(ApiKey, self).__init__() + self['id'] = create_id() + if generateDistinctId: + # Best guess of what AWS does internally + self['value'] = ''.join(random.sample(string.ascii_letters + string.digits, 40)) + else: + self['value'] = value + self['name'] = name + self['customerId'] = customerId + self['description'] = description + self['enabled'] = enabled + self['createdDate'] = self['lastUpdatedDate'] = int(time.time()) + self['stageKeys'] = stageKeys + + class RestAPI(BaseModel): def __init__(self, id, region_name, name, description): @@ -386,6 +407,7 @@ class APIGatewayBackend(BaseBackend): def __init__(self, region_name): super(APIGatewayBackend, self).__init__() self.apis = {} + self.keys = {} self.region_name = region_name def reset(self): @@ -539,6 +561,21 @@ class APIGatewayBackend(BaseBackend): api = self.get_rest_api(function_id) return api.delete_deployment(deployment_id) + def create_apikey(self, payload): + key = ApiKey(**payload) + self.keys[key['value']] = key + return key + + def get_apikeys(self): + return list(self.keys.values()) + + def get_apikey(self, value): + return self.keys[value] + + def delete_apikey(self, value): + self.keys.pop(value) + return {} + apigateway_backends = {} # Not available in boto yet diff --git a/moto/apigateway/responses.py b/moto/apigateway/responses.py index 443fd4060..ff6ef1f33 100644 --- a/moto/apigateway/responses.py +++ b/moto/apigateway/responses.py @@ -226,3 +226,25 @@ class APIGatewayResponse(BaseResponse): deployment = self.backend.delete_deployment( function_id, deployment_id) return 200, {}, json.dumps(deployment) + + def apikeys(self, request, full_url, headers): + self.setup_class(request, full_url, headers) + + if self.method == 'POST': + apikey_response = self.backend.create_apikey(json.loads(self.body)) + elif self.method == 'GET': + apikeys_response = self.backend.get_apikeys() + return 200, {}, json.dumps({"item": apikeys_response}) + return 200, {}, json.dumps(apikey_response) + + def apikey_individual(self, request, full_url, headers): + self.setup_class(request, full_url, headers) + + url_path_parts = self.path.split("/") + apikey = url_path_parts[2] + + if self.method == 'GET': + apikey_response = self.backend.get_apikey(apikey) + elif self.method == 'DELETE': + apikey_response = self.backend.delete_apikey(apikey) + return 200, {}, json.dumps(apikey_response) diff --git a/moto/apigateway/urls.py b/moto/apigateway/urls.py index 5637699e0..ca1f445a7 100644 --- a/moto/apigateway/urls.py +++ b/moto/apigateway/urls.py @@ -18,4 +18,6 @@ url_paths = { '{0}/restapis/(?P[^/]+)/resources/(?P[^/]+)/methods/(?P[^/]+)/responses/(?P\d+)$': APIGatewayResponse().resource_method_responses, '{0}/restapis/(?P[^/]+)/resources/(?P[^/]+)/methods/(?P[^/]+)/integration/?$': APIGatewayResponse().integrations, '{0}/restapis/(?P[^/]+)/resources/(?P[^/]+)/methods/(?P[^/]+)/integration/responses/(?P\d+)/?$': APIGatewayResponse().integration_responses, + '{0}/apikeys$': APIGatewayResponse().apikeys, + '{0}/apikeys/(?P[^/]+)': APIGatewayResponse().apikey_individual, } diff --git a/tests/test_apigateway/test_apigateway.py b/tests/test_apigateway/test_apigateway.py index 11230658b..9e2307bdd 100644 --- a/tests/test_apigateway/test_apigateway.py +++ b/tests/test_apigateway/test_apigateway.py @@ -1,8 +1,6 @@ from __future__ import unicode_literals -from datetime import datetime -from dateutil.tz import tzutc import boto3 from freezegun import freeze_time import requests @@ -965,3 +963,35 @@ def test_http_proxying_integration(): if not settings.TEST_SERVER_MODE: requests.get(deploy_url).content.should.equal(b"a fake response") + + +@mock_apigateway +def test_api_keys(): + region_name = 'us-west-2' + client = boto3.client('apigateway', region_name=region_name) + response = client.get_api_keys() + len(response['items']).should.equal(0) + + apikey_value = '12345' + apikey_name = 'TESTKEY1' + payload = {'value': apikey_value, 'name': apikey_name} + response = client.create_api_key(**payload) + apikey = client.get_api_key(apiKey=payload['value']) + apikey['name'].should.equal(apikey_name) + apikey['value'].should.equal(apikey_value) + + apikey_name = 'TESTKEY2' + payload = {'name': apikey_name, 'generateDistinctId': True} + response = client.create_api_key(**payload) + apikey = client.get_api_key(apiKey=response['value']) + apikey['name'].should.equal(apikey_name) + len(apikey['value']).should.equal(40) + apikey_value = apikey['value'] + + response = client.get_api_keys() + len(response['items']).should.equal(2) + + client.delete_api_key(apiKey=apikey_value) + + response = client.get_api_keys() + len(response['items']).should.equal(1) From c13f77173fe1d83069f585e046a2b358e65b4c76 Mon Sep 17 00:00:00 2001 From: Ben Jones Date: Wed, 21 Mar 2018 11:13:05 -0500 Subject: [PATCH 114/182] add UsePreviousValue support for parameters when updating a CloudFormation stack (#1504) --- moto/cloudformation/responses.py | 13 ++++++--- .../test_cloudformation_stack_crud_boto3.py | 27 +++++++++++++++++++ 2 files changed, 37 insertions(+), 3 deletions(-) diff --git a/moto/cloudformation/responses.py b/moto/cloudformation/responses.py index 73d1d2c2b..a1295a20d 100644 --- a/moto/cloudformation/responses.py +++ b/moto/cloudformation/responses.py @@ -222,17 +222,24 @@ class CloudFormationResponse(BaseResponse): role_arn = self._get_param('RoleARN') template_url = self._get_param('TemplateURL') stack_body = self._get_param('TemplateBody') + stack = self.cloudformation_backend.get_stack(stack_name) if self._get_param('UsePreviousTemplate') == "true": - stack_body = self.cloudformation_backend.get_stack( - stack_name).template + stack_body = stack.template elif not stack_body and template_url: stack_body = self._get_stack_from_s3_url(template_url) + incoming_params = self._get_list_prefix("Parameters.member") parameters = dict([ (parameter['parameter_key'], parameter['parameter_value']) for parameter - in self._get_list_prefix("Parameters.member") + in incoming_params if 'parameter_value' in parameter ]) + previous = dict([ + (parameter['parameter_key'], stack.parameters[parameter['parameter_key']]) + for parameter + in incoming_params if 'use_previous_value' in parameter + ]) + parameters.update(previous) # boto3 is supposed to let you clear the tags by passing an empty value, but the request body doesn't # end up containing anything we can use to differentiate between passing an empty value versus not # passing anything. so until that changes, moto won't be able to clear tags, only update them. diff --git a/tests/test_cloudformation/test_cloudformation_stack_crud_boto3.py b/tests/test_cloudformation/test_cloudformation_stack_crud_boto3.py index fb9b549cd..1dbf80fb5 100644 --- a/tests/test_cloudformation/test_cloudformation_stack_crud_boto3.py +++ b/tests/test_cloudformation/test_cloudformation_stack_crud_boto3.py @@ -276,6 +276,33 @@ def test_create_stack_from_s3_url(): json.loads(dummy_template_json, object_pairs_hook=OrderedDict)) +@mock_cloudformation +def test_update_stack_with_previous_value(): + name = 'update_stack_with_previous_value' + cf_conn = boto3.client('cloudformation', region_name='us-east-1') + cf_conn.create_stack( + StackName=name, TemplateBody=dummy_template_yaml_with_ref, + Parameters=[ + {'ParameterKey': 'TagName', 'ParameterValue': 'foo'}, + {'ParameterKey': 'TagDescription', 'ParameterValue': 'bar'}, + ] + ) + cf_conn.update_stack( + StackName=name, UsePreviousTemplate=True, + Parameters=[ + {'ParameterKey': 'TagName', 'UsePreviousValue': True}, + {'ParameterKey': 'TagDescription', 'ParameterValue': 'not bar'}, + ] + ) + stack = cf_conn.describe_stacks(StackName=name)['Stacks'][0] + tag_name = [x['ParameterValue'] for x in stack['Parameters'] + if x['ParameterKey'] == 'TagName'][0] + tag_desc = [x['ParameterValue'] for x in stack['Parameters'] + if x['ParameterKey'] == 'TagDescription'][0] + assert tag_name == 'foo' + assert tag_desc == 'not bar' + + @mock_cloudformation @mock_s3 @mock_ec2 From 1a8a4a084d843939ecd2a7ea23789e606d542850 Mon Sep 17 00:00:00 2001 From: Colin Jones Date: Wed, 21 Mar 2018 16:33:09 +0000 Subject: [PATCH 115/182] S3 Ignore Subdomain for Bucketname Flag (#1419) * Some circumstances need subdomains to be ignored rather that interpreted as bucketname, this patch allows such behaviour to be configured * Adding helper case whereby localstack features as path based exception * Remove whitespace :( --- moto/s3/responses.py | 7 ++++--- moto/s3/utils.py | 3 +++ tests/test_s3/test_s3_utils.py | 7 +++++++ 3 files changed, 14 insertions(+), 3 deletions(-) diff --git a/moto/s3/responses.py b/moto/s3/responses.py index fce45b5f9..5ae3b0ede 100755 --- a/moto/s3/responses.py +++ b/moto/s3/responses.py @@ -57,10 +57,11 @@ class ResponseObject(_TemplateEnvironmentMixin): if not host: host = urlparse(request.url).netloc - if (not host or host.startswith('localhost') or + if (not host or host.startswith('localhost') or host.startswith('localstack') or re.match(r'^[^.]+$', host) or re.match(r'^.*\.svc\.cluster\.local$', host)): - # Default to path-based buckets for (1) localhost, (2) local host names that do not - # contain a "." (e.g., Docker container host names), or (3) kubernetes host names + # Default to path-based buckets for (1) localhost, (2) localstack hosts (e.g. localstack.dev), + # (3) local host names that do not contain a "." (e.g., Docker container host names), or + # (4) kubernetes host names return False match = re.match(r'^([^\[\]:]+)(:\d+)?$', host) diff --git a/moto/s3/utils.py b/moto/s3/utils.py index 8968d2ad2..85a812aad 100644 --- a/moto/s3/utils.py +++ b/moto/s3/utils.py @@ -1,5 +1,6 @@ from __future__ import unicode_literals import logging +import os from boto.s3.key import Key import re @@ -15,6 +16,8 @@ bucket_name_regex = re.compile("(.+).s3(.*).amazonaws.com") def bucket_name_from_url(url): + if os.environ.get('S3_IGNORE_SUBDOMAIN_BUCKETNAME', '') in ['1', 'true']: + return None domain = urlparse(url).netloc if domain.startswith('www.'): diff --git a/tests/test_s3/test_s3_utils.py b/tests/test_s3/test_s3_utils.py index f1dfc04d1..9cda1f157 100644 --- a/tests/test_s3/test_s3_utils.py +++ b/tests/test_s3/test_s3_utils.py @@ -1,4 +1,5 @@ from __future__ import unicode_literals +import os from sure import expect from moto.s3.utils import bucket_name_from_url, _VersionedKeyStore, parse_region_from_url @@ -16,6 +17,12 @@ def test_localhost_without_bucket(): expect(bucket_name_from_url( 'https://www.localhost:5000/def')).should.equal(None) +def test_force_ignore_subdomain_for_bucketnames(): + os.environ['S3_IGNORE_SUBDOMAIN_BUCKETNAME'] = '1' + expect(bucket_name_from_url('https://subdomain.localhost:5000/abc/resource')).should.equal(None) + del(os.environ['S3_IGNORE_SUBDOMAIN_BUCKETNAME']) + + def test_versioned_key_store(): d = _VersionedKeyStore() From 2faffc96de1112e8a2b89c264c770f6fdca66a11 Mon Sep 17 00:00:00 2001 From: Kevin Ford Date: Thu, 22 Mar 2018 00:13:09 -0500 Subject: [PATCH 116/182] Use 'data' attribute of Flask Request object. (#1117) * Flask Request object does not have a 'body' attribute, changed to 'data' * Making moto 'glaciar' more aws 'glaciar' like. * Making moto 'glacier' more aws 'glacier' like. * Fixing Travis errors? * OK, check if object has proper attribute because HTTPrettyRequest has no data attribute and Python Requests has no body attribute. * Output to match test expectation; sleep for 60 seconds to mimic actual wait time. * Amending test_describe_job to reflect changes. * Shorten time from 1 minute to seconds. * Shorten sleep time in test. Forgot about the test. --- moto/glacier/models.py | 151 ++++++++++++++++++++---- moto/glacier/responses.py | 49 ++++++-- tests/test_glacier/test_glacier_jobs.py | 26 ++-- 3 files changed, 170 insertions(+), 56 deletions(-) diff --git a/moto/glacier/models.py b/moto/glacier/models.py index 1afb1241a..2c16bc97d 100644 --- a/moto/glacier/models.py +++ b/moto/glacier/models.py @@ -2,42 +2,101 @@ from __future__ import unicode_literals import hashlib +import datetime + + import boto.glacier from moto.core import BaseBackend, BaseModel from .utils import get_job_id -class ArchiveJob(BaseModel): +class Job(BaseModel): + def __init__(self, tier): + self.st = datetime.datetime.now() - def __init__(self, job_id, archive_id): + if tier.lower() == "expedited": + self.et = self.st + datetime.timedelta(seconds=2) + elif tier.lower() == "bulk": + self.et = self.st + datetime.timedelta(seconds=10) + else: + # Standard + self.et = self.st + datetime.timedelta(seconds=5) + + +class ArchiveJob(Job): + + def __init__(self, job_id, tier, arn, archive_id): self.job_id = job_id + self.tier = tier + self.arn = arn self.archive_id = archive_id + Job.__init__(self, tier) def to_dict(self): - return { - "Action": "InventoryRetrieval", + d = { + "Action": "ArchiveRetrieval", "ArchiveId": self.archive_id, "ArchiveSizeInBytes": 0, "ArchiveSHA256TreeHash": None, - "Completed": True, - "CompletionDate": "2013-03-20T17:03:43.221Z", - "CreationDate": "2013-03-20T17:03:43.221Z", - "InventorySizeInBytes": "0", + "Completed": False, + "CreationDate": self.st.strftime("%Y-%m-%dT%H:%M:%S.000Z"), + "InventorySizeInBytes": 0, "JobDescription": None, "JobId": self.job_id, "RetrievalByteRange": None, "SHA256TreeHash": None, "SNSTopic": None, - "StatusCode": "Succeeded", + "StatusCode": "InProgress", "StatusMessage": None, - "VaultARN": None, + "VaultARN": self.arn, + "Tier": self.tier } + if datetime.datetime.now() > self.et: + d["Completed"] = True + d["CompletionDate"] = self.et.strftime("%Y-%m-%dT%H:%M:%S.000Z") + d["InventorySizeInBytes"] = 10000 + d["StatusCode"] = "Succeeded" + return d + + +class InventoryJob(Job): + + def __init__(self, job_id, tier, arn): + self.job_id = job_id + self.tier = tier + self.arn = arn + Job.__init__(self, tier) + + def to_dict(self): + d = { + "Action": "InventoryRetrieval", + "ArchiveSHA256TreeHash": None, + "Completed": False, + "CreationDate": self.st.strftime("%Y-%m-%dT%H:%M:%S.000Z"), + "InventorySizeInBytes": 0, + "JobDescription": None, + "JobId": self.job_id, + "RetrievalByteRange": None, + "SHA256TreeHash": None, + "SNSTopic": None, + "StatusCode": "InProgress", + "StatusMessage": None, + "VaultARN": self.arn, + "Tier": self.tier + } + if datetime.datetime.now() > self.et: + d["Completed"] = True + d["CompletionDate"] = self.et.strftime("%Y-%m-%dT%H:%M:%S.000Z") + d["InventorySizeInBytes"] = 10000 + d["StatusCode"] = "Succeeded" + return d class Vault(BaseModel): def __init__(self, vault_name, region): + self.st = datetime.datetime.now() self.vault_name = vault_name self.region = region self.archives = {} @@ -48,29 +107,57 @@ class Vault(BaseModel): return "arn:aws:glacier:{0}:012345678901:vaults/{1}".format(self.region, self.vault_name) def to_dict(self): - return { - "CreationDate": "2013-03-20T17:03:43.221Z", - "LastInventoryDate": "2013-03-20T17:03:43.221Z", - "NumberOfArchives": None, - "SizeInBytes": None, + archives_size = 0 + for k in self.archives: + archives_size += self.archives[k]["size"] + d = { + "CreationDate": self.st.strftime("%Y-%m-%dT%H:%M:%S.000Z"), + "LastInventoryDate": self.st.strftime("%Y-%m-%dT%H:%M:%S.000Z"), + "NumberOfArchives": len(self.archives), + "SizeInBytes": archives_size, "VaultARN": self.arn, "VaultName": self.vault_name, } + return d - def create_archive(self, body): - archive_id = hashlib.sha256(body).hexdigest() - self.archives[archive_id] = body + def create_archive(self, body, description): + archive_id = hashlib.md5(body).hexdigest() + self.archives[archive_id] = {} + self.archives[archive_id]["body"] = body + self.archives[archive_id]["size"] = len(body) + self.archives[archive_id]["sha256"] = hashlib.sha256(body).hexdigest() + self.archives[archive_id]["creation_date"] = datetime.datetime.now().strftime("%Y-%m-%dT%H:%M:%S.000Z") + self.archives[archive_id]["description"] = description return archive_id def get_archive_body(self, archive_id): - return self.archives[archive_id] + return self.archives[archive_id]["body"] + + def get_archive_list(self): + archive_list = [] + for a in self.archives: + archive = self.archives[a] + aobj = { + "ArchiveId": a, + "ArchiveDescription": archive["description"], + "CreationDate": archive["creation_date"], + "Size": archive["size"], + "SHA256TreeHash": archive["sha256"] + } + archive_list.append(aobj) + return archive_list def delete_archive(self, archive_id): return self.archives.pop(archive_id) - def initiate_job(self, archive_id): + def initiate_job(self, job_type, tier, archive_id): job_id = get_job_id() - job = ArchiveJob(job_id, archive_id) + + if job_type == "inventory-retrieval": + job = InventoryJob(job_id, tier, self.arn) + elif job_type == "archive-retrieval": + job = ArchiveJob(job_id, tier, self.arn, archive_id) + self.jobs[job_id] = job return job_id @@ -80,10 +167,24 @@ class Vault(BaseModel): def describe_job(self, job_id): return self.jobs.get(job_id) + def job_ready(self, job_id): + job = self.describe_job(job_id) + jobj = job.to_dict() + return jobj["Completed"] + def get_job_output(self, job_id): job = self.describe_job(job_id) - archive_body = self.get_archive_body(job.archive_id) - return archive_body + jobj = job.to_dict() + if jobj["Action"] == "InventoryRetrieval": + archives = self.get_archive_list() + return { + "VaultARN": self.arn, + "InventoryDate": jobj["CompletionDate"], + "ArchiveList": archives + } + else: + archive_body = self.get_archive_body(job.archive_id) + return archive_body class GlacierBackend(BaseBackend): @@ -109,9 +210,9 @@ class GlacierBackend(BaseBackend): def delete_vault(self, vault_name): self.vaults.pop(vault_name) - def initiate_job(self, vault_name, archive_id): + def initiate_job(self, vault_name, job_type, tier, archive_id): vault = self.get_vault(vault_name) - job_id = vault.initiate_job(archive_id) + job_id = vault.initiate_job(job_type, tier, archive_id) return job_id def list_jobs(self, vault_name): diff --git a/moto/glacier/responses.py b/moto/glacier/responses.py index cda859b29..abdf83e4f 100644 --- a/moto/glacier/responses.py +++ b/moto/glacier/responses.py @@ -72,17 +72,25 @@ class GlacierResponse(_TemplateEnvironmentMixin): def _vault_archive_response(self, request, full_url, headers): method = request.method - body = request.body + if hasattr(request, 'body'): + body = request.body + else: + body = request.data + description = "" + if 'x-amz-archive-description' in request.headers: + description = request.headers['x-amz-archive-description'] parsed_url = urlparse(full_url) querystring = parse_qs(parsed_url.query, keep_blank_values=True) vault_name = full_url.split("/")[-2] if method == 'POST': - return self._vault_archive_response_post(vault_name, body, querystring, headers) + return self._vault_archive_response_post(vault_name, body, description, querystring, headers) + else: + return 400, headers, "400 Bad Request" - def _vault_archive_response_post(self, vault_name, body, querystring, headers): + def _vault_archive_response_post(self, vault_name, body, description, querystring, headers): vault = self.backend.get_vault(vault_name) - vault_id = vault.create_archive(body) + vault_id = vault.create_archive(body, description) headers['x-amz-archive-id'] = vault_id return 201, headers, "" @@ -110,7 +118,10 @@ class GlacierResponse(_TemplateEnvironmentMixin): def _vault_jobs_response(self, request, full_url, headers): method = request.method - body = request.body + if hasattr(request, 'body'): + body = request.body + else: + body = request.data account_id = full_url.split("/")[1] vault_name = full_url.split("/")[-2] @@ -125,11 +136,17 @@ class GlacierResponse(_TemplateEnvironmentMixin): }) elif method == 'POST': json_body = json.loads(body.decode("utf-8")) - archive_id = json_body['ArchiveId'] - job_id = self.backend.initiate_job(vault_name, archive_id) + job_type = json_body['Type'] + archive_id = None + if 'ArchiveId' in json_body: + archive_id = json_body['ArchiveId'] + if 'Tier' in json_body: + tier = json_body["Tier"] + else: + tier = "Standard" + job_id = self.backend.initiate_job(vault_name, job_type, tier, archive_id) headers['x-amz-job-id'] = job_id - headers[ - 'Location'] = "/{0}/vaults/{1}/jobs/{2}".format(account_id, vault_name, job_id) + headers['Location'] = "/{0}/vaults/{1}/jobs/{2}".format(account_id, vault_name, job_id) return 202, headers, "" @classmethod @@ -155,8 +172,14 @@ class GlacierResponse(_TemplateEnvironmentMixin): def _vault_jobs_output_response(self, request, full_url, headers): vault_name = full_url.split("/")[-4] job_id = full_url.split("/")[-2] - vault = self.backend.get_vault(vault_name) - output = vault.get_job_output(job_id) - headers['content-type'] = 'application/octet-stream' - return 200, headers, output + if vault.job_ready(job_id): + output = vault.get_job_output(job_id) + if isinstance(output, dict): + headers['content-type'] = 'application/json' + return 200, headers, json.dumps(output) + else: + headers['content-type'] = 'application/octet-stream' + return 200, headers, output + else: + return 404, headers, "404 Not Found" diff --git a/tests/test_glacier/test_glacier_jobs.py b/tests/test_glacier/test_glacier_jobs.py index 66780f681..152aa14c8 100644 --- a/tests/test_glacier/test_glacier_jobs.py +++ b/tests/test_glacier/test_glacier_jobs.py @@ -1,6 +1,7 @@ from __future__ import unicode_literals import json +import time from boto.glacier.layer1 import Layer1 import sure # noqa @@ -39,24 +40,11 @@ def test_describe_job(): job_id = job_response['JobId'] job = conn.describe_job(vault_name, job_id) - json.loads(job.read().decode("utf-8")).should.equal({ - 'CompletionDate': '2013-03-20T17:03:43.221Z', - 'VaultARN': None, - 'RetrievalByteRange': None, - 'SHA256TreeHash': None, - 'Completed': True, - 'InventorySizeInBytes': '0', - 'JobId': job_id, - 'Action': 'InventoryRetrieval', - 'JobDescription': None, - 'SNSTopic': None, - 'ArchiveSizeInBytes': 0, - 'ArchiveId': archive_id, - 'ArchiveSHA256TreeHash': None, - 'CreationDate': '2013-03-20T17:03:43.221Z', - 'StatusMessage': None, - 'StatusCode': 'Succeeded', - }) + joboutput = json.loads(job.read().decode("utf-8")) + + joboutput.should.have.key('Tier').which.should.equal('Standard') + joboutput.should.have.key('StatusCode').which.should.equal('InProgress') + joboutput.should.have.key('VaultARN').which.should.equal('arn:aws:glacier:RegionInfo:us-west-2:012345678901:vaults/my_vault') @mock_glacier_deprecated @@ -96,5 +84,7 @@ def test_get_job_output(): }) job_id = job_response['JobId'] + time.sleep(6) + output = conn.get_job_output(vault_name, job_id) output.read().decode("utf-8").should.equal("some stuff") From 941d817da4e62c72f638e4972e3ae39dfbcf4d7b Mon Sep 17 00:00:00 2001 From: Alexander Mohr Date: Wed, 21 Mar 2018 22:14:10 -0700 Subject: [PATCH 117/182] fix lambda endpoint parsing (#1395) * fix endpoint parsing * add new unittest * finish test --- moto/awslambda/models.py | 6 +-- moto/sns/models.py | 18 ++++++-- tests/test_awslambda/test_lambda.py | 65 +++++++++++++++++++++++++++-- 3 files changed, 80 insertions(+), 9 deletions(-) diff --git a/moto/awslambda/models.py b/moto/awslambda/models.py index 3c3d3ea66..80b4ffba3 100644 --- a/moto/awslambda/models.py +++ b/moto/awslambda/models.py @@ -603,7 +603,7 @@ class LambdaBackend(BaseBackend): def list_functions(self): return self._lambdas.all() - def send_message(self, function_name, message, subject=None): + def send_message(self, function_name, message, subject=None, qualifier=None): event = { "Records": [ { @@ -636,8 +636,8 @@ class LambdaBackend(BaseBackend): ] } - self._functions[function_name][-1].invoke(json.dumps(event), {}, {}) - pass + func = self._lambdas.get_function(function_name, qualifier) + func.invoke(json.dumps(event), {}, {}) def list_tags(self, resource): return self.get_function_by_arn(resource).tags diff --git a/moto/sns/models.py b/moto/sns/models.py index 9afc28f46..a66523614 100644 --- a/moto/sns/models.py +++ b/moto/sns/models.py @@ -100,9 +100,21 @@ class Subscription(BaseModel): requests.post(self.endpoint, json=post_data) elif self.protocol == 'lambda': # TODO: support bad function name - function_name = self.endpoint.split(":")[-1] - region = self.arn.split(':')[3] - lambda_backends[region].send_message(function_name, message, subject=subject) + # http://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html + arr = self.endpoint.split(":") + region = arr[3] + qualifier = None + if len(arr) == 7: + assert arr[5] == 'function' + function_name = arr[-1] + elif len(arr) == 8: + assert arr[5] == 'function' + qualifier = arr[-1] + function_name = arr[-2] + else: + assert False + + lambda_backends[region].send_message(function_name, message, subject=subject, qualifier=qualifier) def _matches_filter_policy(self, message_attributes): # TODO: support Anything-but matching, prefix matching and diff --git a/tests/test_awslambda/test_lambda.py b/tests/test_awslambda/test_lambda.py index e7a9f9174..8ea9cc6fd 100644 --- a/tests/test_awslambda/test_lambda.py +++ b/tests/test_awslambda/test_lambda.py @@ -6,11 +6,12 @@ import boto3 import hashlib import io import json +import time import zipfile import sure # noqa from freezegun import freeze_time -from moto import mock_lambda, mock_s3, mock_ec2, settings +from moto import mock_lambda, mock_s3, mock_ec2, mock_sns, mock_logs, settings _lambda_region = 'us-west-2' @@ -48,6 +49,15 @@ def lambda_handler(event, context): return _process_lambda(func_str) +def get_test_zip_file3(): + pfunc = """ +def lambda_handler(event, context): + print("get_test_zip_file3 success") + return event +""" + return _process_lambda(pfunc) + + @mock_lambda def test_list_functions(): conn = boto3.client('lambda', 'us-west-2') @@ -160,6 +170,56 @@ if settings.TEST_SERVER_MODE: payload.should.equal(msg) +@mock_logs +@mock_sns +@mock_ec2 +@mock_lambda +def test_invoke_function_from_sns(): + logs_conn = boto3.client("logs", region_name="us-west-2") + sns_conn = boto3.client("sns", region_name="us-west-2") + sns_conn.create_topic(Name="some-topic") + topics_json = sns_conn.list_topics() + topics = topics_json["Topics"] + topic_arn = topics[0]['TopicArn'] + + conn = boto3.client('lambda', 'us-west-2') + result = conn.create_function( + FunctionName='testFunction', + Runtime='python2.7', + Role='test-iam-role', + Handler='lambda_function.lambda_handler', + Code={ + 'ZipFile': get_test_zip_file3(), + }, + Description='test lambda function', + Timeout=3, + MemorySize=128, + Publish=True, + ) + + sns_conn.subscribe(TopicArn=topic_arn, Protocol="lambda", Endpoint=result['FunctionArn']) + + result = sns_conn.publish(TopicArn=topic_arn, Message=json.dumps({})) + + start = time.time() + while (time.time() - start) < 30: + result = logs_conn.describe_log_streams(logGroupName='/aws/lambda/testFunction') + log_streams = result.get('logStreams') + if not log_streams: + time.sleep(1) + continue + + assert len(log_streams) == 1 + result = logs_conn.get_log_events(logGroupName='/aws/lambda/testFunction', logStreamName=log_streams[0]['logStreamName']) + for event in result.get('events'): + if event['message'] == 'get_test_zip_file3 success': + return + + time.sleep(1) + + assert False, "Test Failed" + + @mock_lambda def test_create_based_on_s3_with_missing_bucket(): conn = boto3.client('lambda', 'us-west-2') @@ -420,7 +480,6 @@ def test_publish(): function_list['Functions'][0]['FunctionArn'].should.contain('testFunction:$LATEST') - @mock_lambda @mock_s3 @freeze_time('2015-01-01 00:00:00') @@ -674,7 +733,7 @@ def test_get_function_created_with_zipfile(): "CodeSha256": hashlib.sha256(zip_content).hexdigest(), "CodeSize": len(zip_content), "Description": "test lambda function", - "FunctionArn":'arn:aws:lambda:{}:123456789012:function:testFunction:$LATEST'.format(_lambda_region), + "FunctionArn": 'arn:aws:lambda:{}:123456789012:function:testFunction:$LATEST'.format(_lambda_region), "FunctionName": "testFunction", "Handler": "lambda_function.handler", "MemorySize": 128, From 5fe575b6ed977d05fed361729071d4b475ebf2ea Mon Sep 17 00:00:00 2001 From: Jack Danger Date: Thu, 22 Mar 2018 11:28:32 -0700 Subject: [PATCH 118/182] bumping to version 1.3.1 (#1530) * bumping to version 1.3.1 * Updating implementation coverage --- .bumpversion.cfg | 2 +- IMPLEMENTATION_COVERAGE.md | 83 +++++++++++++++++++++++++++++++++----- moto/__init__.py | 2 +- setup.py | 2 +- 4 files changed, 76 insertions(+), 13 deletions(-) diff --git a/.bumpversion.cfg b/.bumpversion.cfg index ef5aec38e..91e571d38 100644 --- a/.bumpversion.cfg +++ b/.bumpversion.cfg @@ -1,5 +1,5 @@ [bumpversion] -current_version = 1.3.0 +current_version = 1.3.1 [bumpversion:file:setup.py] diff --git a/IMPLEMENTATION_COVERAGE.md b/IMPLEMENTATION_COVERAGE.md index 582448219..c98093147 100644 --- a/IMPLEMENTATION_COVERAGE.md +++ b/IMPLEMENTATION_COVERAGE.md @@ -185,6 +185,7 @@ ## appstream - 0% implemented - [ ] associate_fleet +- [ ] copy_image - [ ] create_directory_config - [ ] create_fleet - [ ] create_image_builder @@ -240,6 +241,7 @@ - [ ] list_resolvers - [ ] list_types - [ ] start_schema_creation +- [ ] update_api_key - [ ] update_data_source - [ ] update_graphql_api - [ ] update_resolver @@ -312,6 +314,12 @@ - [ ] terminate_instance_in_auto_scaling_group - [X] update_auto_scaling_group +## autoscaling-plans - 0% implemented +- [ ] create_scaling_plan +- [ ] delete_scaling_plan +- [ ] describe_scaling_plan_resources +- [ ] describe_scaling_plans + ## batch - 93% implemented - [ ] cancel_job - [X] create_compute_environment @@ -348,6 +356,8 @@ ## ce - 0% implemented - [ ] get_cost_and_usage - [ ] get_dimension_values +- [ ] get_reservation_coverage +- [ ] get_reservation_purchase_recommendation - [ ] get_reservation_utilization - [ ] get_tags @@ -642,6 +652,7 @@ - [ ] post_comment_for_compared_commit - [ ] post_comment_for_pull_request - [ ] post_comment_reply +- [ ] put_file - [ ] put_repository_triggers - [ ] test_repository_triggers - [ ] update_comment @@ -668,6 +679,7 @@ - [ ] delete_application - [ ] delete_deployment_config - [ ] delete_deployment_group +- [ ] delete_git_hub_account_token - [ ] deregister_on_premises_instance - [ ] get_application - [ ] get_application_revision @@ -822,6 +834,7 @@ - [ ] get_device - [ ] get_group - [ ] get_identity_provider_by_identifier +- [ ] get_signing_certificate - [ ] get_ui_customization - [ ] get_user - [ ] get_user_attribute_verification_code @@ -891,6 +904,7 @@ - [ ] start_topics_detection_job ## config - 0% implemented +- [ ] batch_get_resource_config - [ ] delete_config_rule - [ ] delete_configuration_recorder - [ ] delete_delivery_channel @@ -1109,6 +1123,7 @@ - [ ] describe_events - [ ] describe_orderable_replication_instances - [ ] describe_refresh_schemas_status +- [ ] describe_replication_instance_task_logs - [ ] describe_replication_instances - [ ] describe_replication_subnet_groups - [ ] describe_replication_task_assessment_results @@ -1122,6 +1137,7 @@ - [ ] modify_replication_instance - [ ] modify_replication_subnet_group - [ ] modify_replication_task +- [ ] reboot_replication_instance - [ ] refresh_schemas - [ ] reload_tables - [ ] remove_tags_from_resource @@ -1222,7 +1238,7 @@ - [ ] associate_iam_instance_profile - [X] associate_route_table - [ ] associate_subnet_cidr_block -- [ ] associate_vpc_cidr_block +- [X] associate_vpc_cidr_block - [ ] attach_classic_link_vpc - [X] attach_internet_gateway - [X] attach_network_interface @@ -1312,6 +1328,7 @@ - [X] deregister_image - [ ] describe_account_attributes - [X] describe_addresses +- [ ] describe_aggregate_id_format - [X] describe_availability_zones - [ ] describe_bundle_tasks - [ ] describe_classic_link_instances @@ -1350,6 +1367,7 @@ - [X] describe_network_interfaces - [ ] describe_placement_groups - [ ] describe_prefix_lists +- [ ] describe_principal_id_format - [X] describe_regions - [ ] describe_reserved_instances - [ ] describe_reserved_instances_listings @@ -1400,7 +1418,7 @@ - [ ] disassociate_iam_instance_profile - [X] disassociate_route_table - [ ] disassociate_subnet_cidr_block -- [ ] disassociate_vpc_cidr_block +- [X] disassociate_vpc_cidr_block - [ ] enable_vgw_route_propagation - [ ] enable_volume_io - [ ] enable_vpc_classic_link @@ -1605,6 +1623,7 @@ - [ ] delete_configuration_template - [ ] delete_environment_configuration - [ ] delete_platform_version +- [ ] describe_account_attributes - [ ] describe_application_versions - [ ] describe_applications - [ ] describe_configuration_options @@ -1843,6 +1862,7 @@ - [ ] resolve_alias - [ ] search_game_sessions - [ ] start_game_session_placement +- [ ] start_match_backfill - [ ] start_matchmaking - [ ] stop_game_session_placement - [ ] stop_matchmaking @@ -1897,6 +1917,7 @@ - [ ] batch_delete_connection - [ ] batch_delete_partition - [ ] batch_delete_table +- [ ] batch_delete_table_version - [ ] batch_get_partition - [ ] batch_stop_job_run - [ ] create_classifier @@ -1918,6 +1939,7 @@ - [ ] delete_job - [ ] delete_partition - [ ] delete_table +- [ ] delete_table_version - [ ] delete_trigger - [ ] delete_user_defined_function - [ ] get_catalog_import_status @@ -1942,6 +1964,7 @@ - [ ] get_partitions - [ ] get_plan - [ ] get_table +- [ ] get_table_version - [ ] get_table_versions - [ ] get_tables - [ ] get_trigger @@ -2396,7 +2419,7 @@ - [ ] start_next_pending_job_execution - [ ] update_job_execution -## kinesis - 59% implemented +## kinesis - 56% implemented - [X] add_tags_to_stream - [X] create_stream - [ ] decrease_stream_retention_period @@ -2409,6 +2432,7 @@ - [X] get_records - [X] get_shard_iterator - [ ] increase_stream_retention_period +- [ ] list_shards - [X] list_streams - [X] list_tags_for_stream - [X] merge_shards @@ -2551,6 +2575,7 @@ - [ ] get_builtin_intents - [ ] get_builtin_slot_types - [ ] get_export +- [ ] get_import - [ ] get_intent - [ ] get_intent_versions - [ ] get_intents @@ -2562,6 +2587,7 @@ - [ ] put_bot_alias - [ ] put_intent - [ ] put_slot_type +- [ ] start_import ## lex-runtime - 0% implemented - [ ] post_content @@ -2749,6 +2775,9 @@ - [ ] list_inputs - [ ] start_channel - [ ] stop_channel +- [ ] update_channel +- [ ] update_input +- [ ] update_input_security_group ## mediapackage - 0% implemented - [ ] create_channel @@ -2767,10 +2796,13 @@ - [ ] create_container - [ ] delete_container - [ ] delete_container_policy +- [ ] delete_cors_policy - [ ] describe_container - [ ] get_container_policy +- [ ] get_cors_policy - [ ] list_containers - [ ] put_container_policy +- [ ] put_cors_policy ## mediastore-data - 0% implemented - [ ] delete_object @@ -2873,13 +2905,13 @@ - [ ] update_notification_settings - [ ] update_qualification_type -## opsworks - 9% implemented +## opsworks - 12% implemented - [ ] assign_instance - [ ] assign_volume - [ ] associate_elastic_ip - [ ] attach_elastic_load_balancer - [ ] clone_stack -- [ ] create_app +- [X] create_app - [ ] create_deployment - [X] create_instance - [X] create_layer @@ -2896,7 +2928,7 @@ - [ ] deregister_rds_db_instance - [ ] deregister_volume - [ ] describe_agent_versions -- [ ] describe_apps +- [X] describe_apps - [ ] describe_commands - [ ] describe_deployments - [ ] describe_ecs_clusters @@ -2906,6 +2938,7 @@ - [X] describe_layers - [ ] describe_load_based_auto_scaling - [ ] describe_my_user_profile +- [ ] describe_operating_systems - [ ] describe_permissions - [ ] describe_raid_arrays - [ ] describe_rds_db_instances @@ -3012,6 +3045,7 @@ ## pinpoint - 0% implemented - [ ] create_app - [ ] create_campaign +- [ ] create_export_job - [ ] create_import_job - [ ] create_segment - [ ] delete_adm_channel @@ -3023,6 +3057,7 @@ - [ ] delete_baidu_channel - [ ] delete_campaign - [ ] delete_email_channel +- [ ] delete_endpoint - [ ] delete_event_stream - [ ] delete_gcm_channel - [ ] delete_segment @@ -3044,10 +3079,13 @@ - [ ] get_email_channel - [ ] get_endpoint - [ ] get_event_stream +- [ ] get_export_job +- [ ] get_export_jobs - [ ] get_gcm_channel - [ ] get_import_job - [ ] get_import_jobs - [ ] get_segment +- [ ] get_segment_export_jobs - [ ] get_segment_import_jobs - [ ] get_segment_version - [ ] get_segment_versions @@ -3382,7 +3420,7 @@ - [ ] update_tags_for_domain - [ ] view_billing -## s3 - 14% implemented +## s3 - 15% implemented - [ ] abort_multipart_upload - [ ] complete_multipart_upload - [ ] copy_object @@ -3447,7 +3485,7 @@ - [X] put_bucket_logging - [ ] put_bucket_metrics_configuration - [ ] put_bucket_notification -- [ ] put_bucket_notification_configuration +- [X] put_bucket_notification_configuration - [ ] put_bucket_policy - [ ] put_bucket_replication - [ ] put_bucket_request_payment @@ -3467,21 +3505,25 @@ - [ ] create_endpoint_config - [ ] create_model - [ ] create_notebook_instance +- [ ] create_notebook_instance_lifecycle_config - [ ] create_presigned_notebook_instance_url - [ ] create_training_job - [ ] delete_endpoint - [ ] delete_endpoint_config - [ ] delete_model - [ ] delete_notebook_instance +- [ ] delete_notebook_instance_lifecycle_config - [ ] delete_tags - [ ] describe_endpoint - [ ] describe_endpoint_config - [ ] describe_model - [ ] describe_notebook_instance +- [ ] describe_notebook_instance_lifecycle_config - [ ] describe_training_job - [ ] list_endpoint_configs - [ ] list_endpoints - [ ] list_models +- [ ] list_notebook_instance_lifecycle_configs - [ ] list_notebook_instances - [ ] list_tags - [ ] list_training_jobs @@ -3491,6 +3533,7 @@ - [ ] update_endpoint - [ ] update_endpoint_weights_and_capacities - [ ] update_notebook_instance +- [ ] update_notebook_instance_lifecycle_config ## sagemaker-runtime - 0% implemented - [ ] invoke_endpoint @@ -3511,6 +3554,7 @@ - [ ] create_application - [ ] create_application_version - [ ] create_cloud_formation_change_set +- [ ] delete_application - [ ] get_application - [ ] get_application_policy - [ ] list_application_versions @@ -3528,13 +3572,16 @@ - [ ] create_portfolio - [ ] create_portfolio_share - [ ] create_product +- [ ] create_provisioned_product_plan - [ ] create_provisioning_artifact - [ ] create_tag_option - [ ] delete_constraint - [ ] delete_portfolio - [ ] delete_portfolio_share - [ ] delete_product +- [ ] delete_provisioned_product_plan - [ ] delete_provisioning_artifact +- [ ] delete_tag_option - [ ] describe_constraint - [ ] describe_copy_product_status - [ ] describe_portfolio @@ -3542,6 +3589,7 @@ - [ ] describe_product_as_admin - [ ] describe_product_view - [ ] describe_provisioned_product +- [ ] describe_provisioned_product_plan - [ ] describe_provisioning_artifact - [ ] describe_provisioning_parameters - [ ] describe_record @@ -3549,6 +3597,7 @@ - [ ] disassociate_principal_from_portfolio - [ ] disassociate_product_from_portfolio - [ ] disassociate_tag_option_from_resource +- [ ] execute_provisioned_product_plan - [ ] list_accepted_portfolio_shares - [ ] list_constraints_for_portfolio - [ ] list_launch_paths @@ -3556,6 +3605,7 @@ - [ ] list_portfolios - [ ] list_portfolios_for_product - [ ] list_principals_for_portfolio +- [ ] list_provisioned_product_plans - [ ] list_provisioning_artifacts - [ ] list_record_history - [ ] list_resources_for_tag_option @@ -3565,6 +3615,7 @@ - [ ] scan_provisioned_products - [ ] search_products - [ ] search_products_as_admin +- [ ] search_provisioned_products - [ ] terminate_provisioned_product - [ ] update_constraint - [ ] update_portfolio @@ -3590,6 +3641,7 @@ - [ ] list_operations - [ ] list_services - [ ] register_instance +- [ ] update_instance_custom_health_status - [ ] update_service ## ses - 11% implemented @@ -3740,7 +3792,7 @@ - [X] subscribe - [X] unsubscribe -## sqs - 60% implemented +## sqs - 65% implemented - [X] add_permission - [X] change_message_visibility - [ ] change_message_visibility_batch @@ -3758,7 +3810,7 @@ - [X] remove_permission - [X] send_message - [ ] send_message_batch -- [ ] set_queue_attributes +- [X] set_queue_attributes - [X] tag_queue - [X] untag_queue @@ -4005,6 +4057,11 @@ - [X] start_workflow_execution - [X] terminate_workflow_execution +## transcribe - 0% implemented +- [ ] get_transcription_job +- [ ] list_transcription_jobs +- [ ] start_transcription_job + ## translate - 0% implemented - [ ] translate_text @@ -4024,6 +4081,7 @@ - [ ] delete_byte_match_set - [ ] delete_geo_match_set - [ ] delete_ip_set +- [ ] delete_permission_policy - [ ] delete_rate_based_rule - [ ] delete_regex_match_set - [ ] delete_regex_pattern_set @@ -4038,6 +4096,7 @@ - [ ] get_change_token_status - [ ] get_geo_match_set - [ ] get_ip_set +- [ ] get_permission_policy - [ ] get_rate_based_rule - [ ] get_rate_based_rule_managed_keys - [ ] get_regex_match_set @@ -4063,6 +4122,7 @@ - [ ] list_subscribed_rule_groups - [ ] list_web_acls - [ ] list_xss_match_sets +- [ ] put_permission_policy - [ ] update_byte_match_set - [ ] update_geo_match_set - [ ] update_ip_set @@ -4093,6 +4153,7 @@ - [ ] delete_byte_match_set - [ ] delete_geo_match_set - [ ] delete_ip_set +- [ ] delete_permission_policy - [ ] delete_rate_based_rule - [ ] delete_regex_match_set - [ ] delete_regex_pattern_set @@ -4108,6 +4169,7 @@ - [ ] get_change_token_status - [ ] get_geo_match_set - [ ] get_ip_set +- [ ] get_permission_policy - [ ] get_rate_based_rule - [ ] get_rate_based_rule_managed_keys - [ ] get_regex_match_set @@ -4135,6 +4197,7 @@ - [ ] list_subscribed_rule_groups - [ ] list_web_acls - [ ] list_xss_match_sets +- [ ] put_permission_policy - [ ] update_byte_match_set - [ ] update_geo_match_set - [ ] update_ip_set diff --git a/moto/__init__.py b/moto/__init__.py index 9703f9f68..da9f8240a 100644 --- a/moto/__init__.py +++ b/moto/__init__.py @@ -3,7 +3,7 @@ import logging # logging.getLogger('boto').setLevel(logging.CRITICAL) __title__ = 'moto' -__version__ = '1.3.0' +__version__ = '1.3.1' from .acm import mock_acm # flake8: noqa from .apigateway import mock_apigateway, mock_apigateway_deprecated # flake8: noqa diff --git a/setup.py b/setup.py index c3c562b85..f1570c496 100755 --- a/setup.py +++ b/setup.py @@ -39,7 +39,7 @@ else: setup( name='moto', - version='1.3.0', + version='1.3.1', description='A library that allows your python tests to easily' ' mock out the boto library', author='Steve Pulec', From 0ed388b4b866472d86984ea56c5a2297071b34c3 Mon Sep 17 00:00:00 2001 From: William Richard Date: Mon, 26 Mar 2018 14:11:12 -0400 Subject: [PATCH 119/182] If Properies isn't set, cloudformation will just use default values --- moto/ecs/models.py | 6 ++- tests/test_ecs/test_ecs_boto3.py | 87 ++++++++++++++++++++++++-------- 2 files changed, 71 insertions(+), 22 deletions(-) diff --git a/moto/ecs/models.py b/moto/ecs/models.py index e0b29cb01..859dfc1e9 100644 --- a/moto/ecs/models.py +++ b/moto/ecs/models.py @@ -61,7 +61,11 @@ class Cluster(BaseObject): @classmethod def create_from_cloudformation_json(cls, resource_name, cloudformation_json, region_name): - properties = cloudformation_json['Properties'] + # if properties is not provided, cloudformation will use the default values for all properties + if 'Properties' in cloudformation_json: + properties = cloudformation_json['Properties'] + else: + properties = {} ecs_backend = ecs_backends[region_name] return ecs_backend.create_cluster( diff --git a/tests/test_ecs/test_ecs_boto3.py b/tests/test_ecs/test_ecs_boto3.py index 5fcc297aa..e24471abf 100644 --- a/tests/test_ecs/test_ecs_boto3.py +++ b/tests/test_ecs/test_ecs_boto3.py @@ -664,7 +664,7 @@ def test_list_container_instances(): instanceIdentityDocument=instance_id_document) test_instance_arns.append(response['containerInstance'][ - 'containerInstanceArn']) + 'containerInstanceArn']) response = ecs_client.list_container_instances(cluster=test_cluster_name) @@ -702,7 +702,7 @@ def test_describe_container_instances(): instanceIdentityDocument=instance_id_document) test_instance_arns.append(response['containerInstance'][ - 'containerInstanceArn']) + 'containerInstanceArn']) test_instance_ids = list( map((lambda x: x.split('/')[1]), test_instance_arns)) @@ -1052,7 +1052,7 @@ def test_describe_tasks(): len(response['tasks']).should.equal(2) set([response['tasks'][0]['taskArn'], response['tasks'] - [1]['taskArn']]).should.equal(set(tasks_arns)) + [1]['taskArn']]).should.equal(set(tasks_arns)) @mock_ecs @@ -1208,7 +1208,8 @@ def test_resource_reservation_and_release(): cluster='test_ecs_cluster', containerInstances=[container_instance_arn] )['containerInstances'][0] - remaining_resources, registered_resources = _fetch_container_instance_resources(container_instance_description) + remaining_resources, registered_resources = _fetch_container_instance_resources( + container_instance_description) remaining_resources['CPU'].should.equal(registered_resources['CPU'] - 1024) remaining_resources['MEMORY'].should.equal(registered_resources['MEMORY'] - 400) registered_resources['PORTS'].append('80') @@ -1223,7 +1224,8 @@ def test_resource_reservation_and_release(): cluster='test_ecs_cluster', containerInstances=[container_instance_arn] )['containerInstances'][0] - remaining_resources, registered_resources = _fetch_container_instance_resources(container_instance_description) + remaining_resources, registered_resources = _fetch_container_instance_resources( + container_instance_description) remaining_resources['CPU'].should.equal(registered_resources['CPU']) remaining_resources['MEMORY'].should.equal(registered_resources['MEMORY']) remaining_resources['PORTS'].should.equal(registered_resources['PORTS']) @@ -1246,6 +1248,36 @@ def test_create_cluster_through_cloudformation(): } } template_json = json.dumps(template) + + ecs_conn = boto3.client('ecs', region_name='us-west-1') + resp = ecs_conn.list_clusters() + len(resp['clusterArns']).should.equal(0) + + cfn_conn = boto3.client('cloudformation', region_name='us-west-1') + cfn_conn.create_stack( + StackName="test_stack", + TemplateBody=template_json, + ) + + resp = ecs_conn.list_clusters() + len(resp['clusterArns']).should.equal(1) + + +@mock_ecs +@mock_cloudformation +def test_create_cluster_through_cloudformation_no_name(): + # cloudformation should create a cluster name for you if you do not provide it + # https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-ecs-cluster.html#cfn-ecs-cluster-clustername + template = { + "AWSTemplateFormatVersion": "2010-09-09", + "Description": "ECS Cluster Test CloudFormation", + "Resources": { + "testCluster": { + "Type": "AWS::ECS::Cluster", + } + } + } + template_json = json.dumps(template) cfn_conn = boto3.client('cloudformation', region_name='us-west-1') cfn_conn.create_stack( StackName="test_stack", @@ -1674,7 +1706,8 @@ def test_attributes(): attributes=[ {'name': 'env', 'value': 'prod'}, {'name': 'attr1', 'value': 'instance1', 'targetId': full_arn1}, - {'name': 'attr1', 'value': 'instance2', 'targetId': partial_arn2, 'targetType': 'container-instance'} + {'name': 'attr1', 'value': 'instance2', 'targetId': partial_arn2, + 'targetType': 'container-instance'} ] ) @@ -1690,12 +1723,14 @@ def test_attributes(): # Tests that the attrs have been set properly len(list(filter(lambda item: item['name'] == 'env', attrs))).should.equal(2) - len(list(filter(lambda item: item['name'] == 'attr1' and item['value'] == 'instance1', attrs))).should.equal(1) + len(list( + filter(lambda item: item['name'] == 'attr1' and item['value'] == 'instance1', attrs))).should.equal(1) ecs_client.delete_attributes( cluster=test_cluster_name, attributes=[ - {'name': 'attr1', 'value': 'instance2', 'targetId': partial_arn2, 'targetType': 'container-instance'} + {'name': 'attr1', 'value': 'instance2', 'targetId': partial_arn2, + 'targetType': 'container-instance'} ] ) NUM_CUSTOM_ATTRIBUTES -= 1 @@ -1806,7 +1841,8 @@ def test_default_container_instance_attributes(): {'name': 'ecs.instance-type', 'value': test_instance.instance_type}, {'name': 'ecs.os-type', 'value': test_instance.platform or 'linux'} ] - assert sorted(default_attributes, key=lambda item: item['name']) == sorted(expected_result, key=lambda item: item['name']) + assert sorted(default_attributes, key=lambda item: item['name']) == sorted(expected_result, + key=lambda item: item['name']) @mock_ec2 @@ -1846,17 +1882,19 @@ def test_describe_container_instances_with_attributes(): # Set attributes on container instance, one without a value attributes = [ - {'name': 'env', 'value': 'prod'}, - {'name': 'attr1', 'value': 'instance1', 'targetId': container_instance_id, 'targetType': 'container-instance'}, - {'name': 'attr_without_value'} - ] + {'name': 'env', 'value': 'prod'}, + {'name': 'attr1', 'value': 'instance1', 'targetId': container_instance_id, + 'targetType': 'container-instance'}, + {'name': 'attr_without_value'} + ] ecs_client.put_attributes( cluster=test_cluster_name, attributes=attributes ) # Describe container instance, should have attributes previously set - described_instance = ecs_client.describe_container_instances(cluster=test_cluster_name, containerInstances=[container_instance_id]) + described_instance = ecs_client.describe_container_instances(cluster=test_cluster_name, + containerInstances=[container_instance_id]) assert len(described_instance['containerInstances']) == 1 assert isinstance(described_instance['containerInstances'][0]['attributes'], list) @@ -1867,7 +1905,8 @@ def test_describe_container_instances_with_attributes(): attribute.pop('targetId', None) attribute.pop('targetType', None) cleaned_attributes.append(attribute) - described_attributes = sorted(described_instance['containerInstances'][0]['attributes'], key=lambda item: item['name']) + described_attributes = sorted(described_instance['containerInstances'][0]['attributes'], + key=lambda item: item['name']) expected_attributes = sorted(default_attributes + cleaned_attributes, key=lambda item: item['name']) assert described_attributes == expected_attributes @@ -1877,10 +1916,16 @@ def _fetch_container_instance_resources(container_instance_description): registered_resources = {} remaining_resources_list = container_instance_description['remainingResources'] registered_resources_list = container_instance_description['registeredResources'] - remaining_resources['CPU'] = [x['integerValue'] for x in remaining_resources_list if x['name'] == 'CPU'][0] - remaining_resources['MEMORY'] = [x['integerValue'] for x in remaining_resources_list if x['name'] == 'MEMORY'][0] - remaining_resources['PORTS'] = [x['stringSetValue'] for x in remaining_resources_list if x['name'] == 'PORTS'][0] - registered_resources['CPU'] = [x['integerValue'] for x in registered_resources_list if x['name'] == 'CPU'][0] - registered_resources['MEMORY'] = [x['integerValue'] for x in registered_resources_list if x['name'] == 'MEMORY'][0] - registered_resources['PORTS'] = [x['stringSetValue'] for x in registered_resources_list if x['name'] == 'PORTS'][0] + remaining_resources['CPU'] = [x['integerValue'] for x in remaining_resources_list if x['name'] == 'CPU'][ + 0] + remaining_resources['MEMORY'] = \ + [x['integerValue'] for x in remaining_resources_list if x['name'] == 'MEMORY'][0] + remaining_resources['PORTS'] = \ + [x['stringSetValue'] for x in remaining_resources_list if x['name'] == 'PORTS'][0] + registered_resources['CPU'] = \ + [x['integerValue'] for x in registered_resources_list if x['name'] == 'CPU'][0] + registered_resources['MEMORY'] = \ + [x['integerValue'] for x in registered_resources_list if x['name'] == 'MEMORY'][0] + registered_resources['PORTS'] = \ + [x['stringSetValue'] for x in registered_resources_list if x['name'] == 'PORTS'][0] return remaining_resources, registered_resources From f304c4e14132ee807e505b2dc33550db6a697b1e Mon Sep 17 00:00:00 2001 From: William Richard Date: Mon, 26 Mar 2018 14:13:00 -0400 Subject: [PATCH 120/182] Adding myself as a contributor --- AUTHORS.md | 1 + 1 file changed, 1 insertion(+) diff --git a/AUTHORS.md b/AUTHORS.md index 5152e5471..ded1935e9 100644 --- a/AUTHORS.md +++ b/AUTHORS.md @@ -51,3 +51,4 @@ Moto is written by Steve Pulec with contributions from: * [Alex Morken](https://github.com/alexmorken) * [Clive Li](https://github.com/cliveli) * [Jim Shields](https://github.com/jimjshields) +* [William Richard](https://github.com/william-richard) From f18259d49e5e66929a793ec8105d32ccfcc70d21 Mon Sep 17 00:00:00 2001 From: Giorgos-Christos Dimitriou Date: Mon, 26 Mar 2018 22:26:24 +0300 Subject: [PATCH 121/182] Change REPONSE to RESPONSE for consistency (#1538) --- moto/route53/responses.py | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/moto/route53/responses.py b/moto/route53/responses.py index 00e5c60a5..6679e7945 100644 --- a/moto/route53/responses.py +++ b/moto/route53/responses.py @@ -150,7 +150,7 @@ class Route53(BaseResponse): elif method == "GET": querystring = parse_qs(parsed_url.query) - template = Template(LIST_RRSET_REPONSE) + template = Template(LIST_RRSET_RESPONSE) start_type = querystring.get("type", [None])[0] start_name = querystring.get("name", [None])[0] record_sets = the_zone.get_record_sets(start_type, start_name) @@ -182,9 +182,9 @@ class Route53(BaseResponse): elif method == "DELETE": health_check_id = parsed_url.path.split("/")[-1] route53_backend.delete_health_check(health_check_id) - return 200, headers, DELETE_HEALTH_CHECK_REPONSE + return 200, headers, DELETE_HEALTH_CHECK_RESPONSE elif method == "GET": - template = Template(LIST_HEALTH_CHECKS_REPONSE) + template = Template(LIST_HEALTH_CHECKS_RESPONSE) health_checks = route53_backend.get_health_checks() return 200, headers, template.render(health_checks=health_checks) @@ -248,7 +248,7 @@ CHANGE_TAGS_FOR_RESOURCE_RESPONSE = """ +LIST_RRSET_RESPONSE = """ {% for record_set in record_sets %} {{ record_set.to_xml() }} @@ -350,7 +350,7 @@ CREATE_HEALTH_CHECK_RESPONSE = """ {{ health_check.to_xml() }} """ -LIST_HEALTH_CHECKS_REPONSE = """ +LIST_HEALTH_CHECKS_RESPONSE = """ {% for health_check in health_checks %} @@ -361,6 +361,6 @@ LIST_HEALTH_CHECKS_REPONSE = """ {{ health_checks|length }} """ -DELETE_HEALTH_CHECK_REPONSE = """ +DELETE_HEALTH_CHECK_RESPONSE = """ """ From f4f79b2a8eb85f653c95a120c84a34723104af9c Mon Sep 17 00:00:00 2001 From: Barry Ruffner Date: Wed, 28 Mar 2018 12:40:42 -0700 Subject: [PATCH 122/182] Added basic cognitoidentity (not working) --- moto/cognitoidentity/__init__.py | 7 + moto/cognitoidentity/models.py | 163 ++++ moto/cognitoidentity/responses.py | 104 +++ moto/cognitoidentity/urls.py | 10 + moto/cognitoidentity/utils.py | 23 + .../test_cognitoidentity.py | 762 ++++++++++++++++++ 6 files changed, 1069 insertions(+) create mode 100644 moto/cognitoidentity/__init__.py create mode 100644 moto/cognitoidentity/models.py create mode 100644 moto/cognitoidentity/responses.py create mode 100644 moto/cognitoidentity/urls.py create mode 100644 moto/cognitoidentity/utils.py create mode 100644 tests/test_cognitoidentity/test_cognitoidentity.py diff --git a/moto/cognitoidentity/__init__.py b/moto/cognitoidentity/__init__.py new file mode 100644 index 000000000..f9a6da7ed --- /dev/null +++ b/moto/cognitoidentity/__init__.py @@ -0,0 +1,7 @@ +from __future__ import unicode_literals +from .models import cognitoidentity_backends +from ..core.models import base_decorator, deprecated_base_decorator + +cognitoidentity_backend = cognitoidentity_backends['us-east-1'] +mock_datapipeline = base_decorator(cognitoidentity_backends) +mock_datapipeline_deprecated = deprecated_base_decorator(cognitoidentity_backends) diff --git a/moto/cognitoidentity/models.py b/moto/cognitoidentity/models.py new file mode 100644 index 000000000..f136d0799 --- /dev/null +++ b/moto/cognitoidentity/models.py @@ -0,0 +1,163 @@ +from __future__ import unicode_literals + +import datetime +import boto.cognito.identity +from moto.compat import OrderedDict +from moto.core import BaseBackend, BaseModel +from .utils import get_random_pipeline_id, remove_capitalization_of_dict_keys + + +class CognitoIdentityObject(BaseModel): + + def __init__(self, object_id, name, fields): + self.object_id = object_id + self.name = name + self.fields = fields + + def to_json(self): + return { + "fields": self.fields, + "id": self.object_id, + "name": self.name, + } + + +class CognitoIdentity(BaseModel): + + def __init__(self, name, unique_id, **kwargs): + self.name = name + # self.unique_id = unique_id + # self.description = kwargs.get('description', '') + self.identity_pool_id = get_random_identity_id() + # self.creation_time = datetime.datetime.utcnow() + # self.objects = [] + # self.status = "PENDING" + # self.tags = kwargs.get('tags', []) + + @property + def physical_resource_id(self): + return self.pipeline_id + + def to_meta_json(self): + return { + "id": self.pipeline_id, + "name": self.name, + } + + def to_json(self): + return { + "description": self.description, + "fields": [{ + "key": "@pipelineState", + "stringValue": self.status, + }, { + "key": "description", + "stringValue": self.description + }, { + "key": "name", + "stringValue": self.name + }, { + "key": "@creationTime", + "stringValue": datetime.datetime.strftime(self.creation_time, '%Y-%m-%dT%H-%M-%S'), + }, { + "key": "@id", + "stringValue": self.pipeline_id, + }, { + "key": "@sphere", + "stringValue": "PIPELINE" + }, { + "key": "@version", + "stringValue": "1" + }, { + "key": "@userId", + "stringValue": "924374875933" + }, { + "key": "@accountId", + "stringValue": "924374875933" + }, { + "key": "uniqueId", + "stringValue": self.unique_id + }], + "name": self.name, + "pipelineId": self.pipeline_id, + "tags": self.tags + } + + def set_pipeline_objects(self, pipeline_objects): + self.objects = [ + PipelineObject(pipeline_object['id'], pipeline_object[ + 'name'], pipeline_object['fields']) + for pipeline_object in remove_capitalization_of_dict_keys(pipeline_objects) + ] + + def activate(self): + self.status = "SCHEDULED" + + @classmethod + def create_from_cloudformation_json(cls, resource_name, cloudformation_json, region_name): + datapipeline_backend = cognitoidentity_backends[region_name] + properties = cloudformation_json["Properties"] + + cloudformation_unique_id = "cf-" + properties["Name"] + pipeline = datapipeline_backend.create_pipeline( + properties["Name"], cloudformation_unique_id) + datapipeline_backend.put_pipeline_definition( + pipeline.pipeline_id, properties["PipelineObjects"]) + + if properties["Activate"]: + pipeline.activate() + return pipeline + + +class CognitoIdentityBackend(BaseBackend): + + def __init__(self, region): + self.region = region + self.identity_pools = OrderedDict() + + def create_identity_pool(self, identity_pool_name, allow_unauthenticated_identities, , region='us-east-1',**kwargs): + identity_pool = CognitoIdentity(identity_pool_name, allow_unauthenticated_identities, **kwargs) + self.identity_pools[identity_pool.identity_pool_name] = identity_pool + return identity_pool + + def delete_identity_pool(self, identity_pool_id): + pass + + def list_pipelines(self): + return self.pipelines.values() + + def describe_pipelines(self, pipeline_ids): + pipelines = [pipeline for pipeline in self.pipelines.values( + ) if pipeline.pipeline_id in pipeline_ids] + return pipelines + + def get_pipeline(self, pipeline_id): + return self.pipelines[pipeline_id] + + def delete_pipeline(self, pipeline_id): + self.pipelines.pop(pipeline_id, None) + + def put_pipeline_definition(self, pipeline_id, pipeline_objects): + pipeline = self.get_pipeline(pipeline_id) + pipeline.set_pipeline_objects(pipeline_objects) + + def get_pipeline_definition(self, pipeline_id): + pipeline = self.get_pipeline(pipeline_id) + return pipeline.objects + + def describe_objects(self, object_ids, pipeline_id): + pipeline = self.get_pipeline(pipeline_id) + pipeline_objects = [ + pipeline_object for pipeline_object in pipeline.objects + if pipeline_object.object_id in object_ids + ] + return pipeline_objects + + def activate_pipeline(self, pipeline_id): + pipeline = self.get_pipeline(pipeline_id) + pipeline.activate() + + +cognitoidentity_backends = {} +for region in boto.cognito.identity.regions(): + cognitoidentity_backends[region.name] = CognitoIdentityBackend(region.name) diff --git a/moto/cognitoidentity/responses.py b/moto/cognitoidentity/responses.py new file mode 100644 index 000000000..e462e3981 --- /dev/null +++ b/moto/cognitoidentity/responses.py @@ -0,0 +1,104 @@ +from __future__ import unicode_literals + +import json + +from moto.core.responses import BaseResponse +from .models import datapipeline_backends + + +class DataPipelineResponse(BaseResponse): + + @property + def parameters(self): + # TODO this should really be moved to core/responses.py + if self.body: + return json.loads(self.body) + else: + return self.querystring + + @property + def datapipeline_backend(self): + return datapipeline_backends[self.region] + + def create_pipeline(self): + name = self.parameters.get('name') + unique_id = self.parameters.get('uniqueId') + description = self.parameters.get('description', '') + tags = self.parameters.get('tags', []) + pipeline = self.datapipeline_backend.create_pipeline(name, unique_id, description=description, tags=tags) + return json.dumps({ + "pipelineId": pipeline.pipeline_id, + }) + + def list_pipelines(self): + pipelines = list(self.datapipeline_backend.list_pipelines()) + pipeline_ids = [pipeline.pipeline_id for pipeline in pipelines] + max_pipelines = 50 + marker = self.parameters.get('marker') + if marker: + start = pipeline_ids.index(marker) + 1 + else: + start = 0 + pipelines_resp = pipelines[start:start + max_pipelines] + has_more_results = False + marker = None + if start + max_pipelines < len(pipeline_ids) - 1: + has_more_results = True + marker = pipelines_resp[-1].pipeline_id + return json.dumps({ + "hasMoreResults": has_more_results, + "marker": marker, + "pipelineIdList": [ + pipeline.to_meta_json() for pipeline in pipelines_resp + ] + }) + + def describe_pipelines(self): + pipeline_ids = self.parameters["pipelineIds"] + pipelines = self.datapipeline_backend.describe_pipelines(pipeline_ids) + + return json.dumps({ + "pipelineDescriptionList": [ + pipeline.to_json() for pipeline in pipelines + ] + }) + + def delete_pipeline(self): + pipeline_id = self.parameters["pipelineId"] + self.datapipeline_backend.delete_pipeline(pipeline_id) + return json.dumps({}) + + def put_pipeline_definition(self): + pipeline_id = self.parameters["pipelineId"] + pipeline_objects = self.parameters["pipelineObjects"] + + self.datapipeline_backend.put_pipeline_definition( + pipeline_id, pipeline_objects) + return json.dumps({"errored": False}) + + def get_pipeline_definition(self): + pipeline_id = self.parameters["pipelineId"] + pipeline_definition = self.datapipeline_backend.get_pipeline_definition( + pipeline_id) + return json.dumps({ + "pipelineObjects": [pipeline_object.to_json() for pipeline_object in pipeline_definition] + }) + + def describe_objects(self): + pipeline_id = self.parameters["pipelineId"] + object_ids = self.parameters["objectIds"] + + pipeline_objects = self.datapipeline_backend.describe_objects( + object_ids, pipeline_id) + return json.dumps({ + "hasMoreResults": False, + "marker": None, + "pipelineObjects": [ + pipeline_object.to_json() for pipeline_object in pipeline_objects + ] + }) + + def activate_pipeline(self): + pipeline_id = self.parameters["pipelineId"] + self.datapipeline_backend.activate_pipeline(pipeline_id) + return json.dumps({}) diff --git a/moto/cognitoidentity/urls.py b/moto/cognitoidentity/urls.py new file mode 100644 index 000000000..40805874b --- /dev/null +++ b/moto/cognitoidentity/urls.py @@ -0,0 +1,10 @@ +from __future__ import unicode_literals +from .responses import DataPipelineResponse + +url_bases = [ + "https?://datapipeline.(.+).amazonaws.com", +] + +url_paths = { + '{0}/$': DataPipelineResponse.dispatch, +} diff --git a/moto/cognitoidentity/utils.py b/moto/cognitoidentity/utils.py new file mode 100644 index 000000000..4832c2042 --- /dev/null +++ b/moto/cognitoidentity/utils.py @@ -0,0 +1,23 @@ +import collections +import six +from moto.core.utils import get_random_hex + + +def get_random_identity_id(region): + return "{0}:{0}".format(region, get_random_hex(length=19)) + + +def remove_capitalization_of_dict_keys(obj): + if isinstance(obj, collections.Mapping): + result = obj.__class__() + for key, value in obj.items(): + normalized_key = key[:1].lower() + key[1:] + result[normalized_key] = remove_capitalization_of_dict_keys(value) + return result + elif isinstance(obj, collections.Iterable) and not isinstance(obj, six.string_types): + result = obj.__class__() + for item in obj: + result += (remove_capitalization_of_dict_keys(item),) + return result + else: + return obj diff --git a/tests/test_cognitoidentity/test_cognitoidentity.py b/tests/test_cognitoidentity/test_cognitoidentity.py new file mode 100644 index 000000000..e7a9f9174 --- /dev/null +++ b/tests/test_cognitoidentity/test_cognitoidentity.py @@ -0,0 +1,762 @@ +from __future__ import unicode_literals + +import base64 +import botocore.client +import boto3 +import hashlib +import io +import json +import zipfile +import sure # noqa + +from freezegun import freeze_time +from moto import mock_lambda, mock_s3, mock_ec2, settings + +_lambda_region = 'us-west-2' + + +def _process_lambda(func_str): + zip_output = io.BytesIO() + zip_file = zipfile.ZipFile(zip_output, 'w', zipfile.ZIP_DEFLATED) + zip_file.writestr('lambda_function.py', func_str) + zip_file.close() + zip_output.seek(0) + return zip_output.read() + + +def get_test_zip_file1(): + pfunc = """ +def lambda_handler(event, context): + return event +""" + return _process_lambda(pfunc) + + +def get_test_zip_file2(): + func_str = """ +import boto3 + +def lambda_handler(event, context): + ec2 = boto3.resource('ec2', region_name='us-west-2', endpoint_url='http://{base_url}') + + volume_id = event.get('volume_id') + vol = ec2.Volume(volume_id) + + print('get volume details for %s\\nVolume - %s state=%s, size=%s' % (volume_id, volume_id, vol.state, vol.size)) + return event +""".format(base_url="motoserver:5000" if settings.TEST_SERVER_MODE else "ec2.us-west-2.amazonaws.com") + return _process_lambda(func_str) + + +@mock_lambda +def test_list_functions(): + conn = boto3.client('lambda', 'us-west-2') + result = conn.list_functions() + result['Functions'].should.have.length_of(0) + + +@mock_lambda +def test_invoke_requestresponse_function(): + conn = boto3.client('lambda', 'us-west-2') + conn.create_function( + FunctionName='testFunction', + Runtime='python2.7', + Role='test-iam-role', + Handler='lambda_function.lambda_handler', + Code={ + 'ZipFile': get_test_zip_file1(), + }, + Description='test lambda function', + Timeout=3, + MemorySize=128, + Publish=True, + ) + + in_data = {'msg': 'So long and thanks for all the fish'} + success_result = conn.invoke(FunctionName='testFunction', InvocationType='RequestResponse', + Payload=json.dumps(in_data)) + + success_result["StatusCode"].should.equal(202) + result_obj = json.loads( + base64.b64decode(success_result["LogResult"]).decode('utf-8')) + + result_obj.should.equal(in_data) + + payload = success_result["Payload"].read().decode('utf-8') + json.loads(payload).should.equal(in_data) + + +@mock_lambda +def test_invoke_event_function(): + conn = boto3.client('lambda', 'us-west-2') + conn.create_function( + FunctionName='testFunction', + Runtime='python2.7', + Role='test-iam-role', + Handler='lambda_function.lambda_handler', + Code={ + 'ZipFile': get_test_zip_file1(), + }, + Description='test lambda function', + Timeout=3, + MemorySize=128, + Publish=True, + ) + + conn.invoke.when.called_with( + FunctionName='notAFunction', + InvocationType='Event', + Payload='{}' + ).should.throw(botocore.client.ClientError) + + in_data = {'msg': 'So long and thanks for all the fish'} + success_result = conn.invoke( + FunctionName='testFunction', InvocationType='Event', Payload=json.dumps(in_data)) + success_result["StatusCode"].should.equal(202) + json.loads(success_result['Payload'].read().decode( + 'utf-8')).should.equal({}) + + +if settings.TEST_SERVER_MODE: + @mock_ec2 + @mock_lambda + def test_invoke_function_get_ec2_volume(): + conn = boto3.resource("ec2", "us-west-2") + vol = conn.create_volume(Size=99, AvailabilityZone='us-west-2') + vol = conn.Volume(vol.id) + + conn = boto3.client('lambda', 'us-west-2') + conn.create_function( + FunctionName='testFunction', + Runtime='python2.7', + Role='test-iam-role', + Handler='lambda_function.lambda_handler', + Code={ + 'ZipFile': get_test_zip_file2(), + }, + Description='test lambda function', + Timeout=3, + MemorySize=128, + Publish=True, + ) + + in_data = {'volume_id': vol.id} + result = conn.invoke(FunctionName='testFunction', + InvocationType='RequestResponse', Payload=json.dumps(in_data)) + result["StatusCode"].should.equal(202) + msg = 'get volume details for %s\nVolume - %s state=%s, size=%s\n%s' % ( + vol.id, vol.id, vol.state, vol.size, json.dumps(in_data)) + + log_result = base64.b64decode(result["LogResult"]).decode('utf-8') + + # fix for running under travis (TODO: investigate why it has an extra newline) + log_result = log_result.replace('\n\n', '\n') + log_result.should.equal(msg) + + payload = result['Payload'].read().decode('utf-8') + + # fix for running under travis (TODO: investigate why it has an extra newline) + payload = payload.replace('\n\n', '\n') + payload.should.equal(msg) + + +@mock_lambda +def test_create_based_on_s3_with_missing_bucket(): + conn = boto3.client('lambda', 'us-west-2') + + conn.create_function.when.called_with( + FunctionName='testFunction', + Runtime='python2.7', + Role='test-iam-role', + Handler='lambda_function.lambda_handler', + Code={ + 'S3Bucket': 'this-bucket-does-not-exist', + 'S3Key': 'test.zip', + }, + Description='test lambda function', + Timeout=3, + MemorySize=128, + Publish=True, + VpcConfig={ + "SecurityGroupIds": ["sg-123abc"], + "SubnetIds": ["subnet-123abc"], + }, + ).should.throw(botocore.client.ClientError) + + +@mock_lambda +@mock_s3 +@freeze_time('2015-01-01 00:00:00') +def test_create_function_from_aws_bucket(): + s3_conn = boto3.client('s3', 'us-west-2') + s3_conn.create_bucket(Bucket='test-bucket') + zip_content = get_test_zip_file2() + + s3_conn.put_object(Bucket='test-bucket', Key='test.zip', Body=zip_content) + conn = boto3.client('lambda', 'us-west-2') + + result = conn.create_function( + FunctionName='testFunction', + Runtime='python2.7', + Role='test-iam-role', + Handler='lambda_function.lambda_handler', + Code={ + 'S3Bucket': 'test-bucket', + 'S3Key': 'test.zip', + }, + Description='test lambda function', + Timeout=3, + MemorySize=128, + Publish=True, + VpcConfig={ + "SecurityGroupIds": ["sg-123abc"], + "SubnetIds": ["subnet-123abc"], + }, + ) + # this is hard to match against, so remove it + result['ResponseMetadata'].pop('HTTPHeaders', None) + # Botocore inserts retry attempts not seen in Python27 + result['ResponseMetadata'].pop('RetryAttempts', None) + result.pop('LastModified') + result.should.equal({ + 'FunctionName': 'testFunction', + 'FunctionArn': 'arn:aws:lambda:{}:123456789012:function:testFunction:$LATEST'.format(_lambda_region), + 'Runtime': 'python2.7', + 'Role': 'test-iam-role', + 'Handler': 'lambda_function.lambda_handler', + "CodeSha256": hashlib.sha256(zip_content).hexdigest(), + "CodeSize": len(zip_content), + 'Description': 'test lambda function', + 'Timeout': 3, + 'MemorySize': 128, + 'Version': '$LATEST', + 'VpcConfig': { + "SecurityGroupIds": ["sg-123abc"], + "SubnetIds": ["subnet-123abc"], + "VpcId": "vpc-123abc" + }, + 'ResponseMetadata': {'HTTPStatusCode': 201}, + }) + + +@mock_lambda +@freeze_time('2015-01-01 00:00:00') +def test_create_function_from_zipfile(): + conn = boto3.client('lambda', 'us-west-2') + zip_content = get_test_zip_file1() + result = conn.create_function( + FunctionName='testFunction', + Runtime='python2.7', + Role='test-iam-role', + Handler='lambda_function.lambda_handler', + Code={ + 'ZipFile': zip_content, + }, + Description='test lambda function', + Timeout=3, + MemorySize=128, + Publish=True, + ) + # this is hard to match against, so remove it + result['ResponseMetadata'].pop('HTTPHeaders', None) + # Botocore inserts retry attempts not seen in Python27 + result['ResponseMetadata'].pop('RetryAttempts', None) + result.pop('LastModified') + + result.should.equal({ + 'FunctionName': 'testFunction', + 'FunctionArn': 'arn:aws:lambda:{}:123456789012:function:testFunction:$LATEST'.format(_lambda_region), + 'Runtime': 'python2.7', + 'Role': 'test-iam-role', + 'Handler': 'lambda_function.lambda_handler', + 'CodeSize': len(zip_content), + 'Description': 'test lambda function', + 'Timeout': 3, + 'MemorySize': 128, + 'CodeSha256': hashlib.sha256(zip_content).hexdigest(), + 'Version': '$LATEST', + 'VpcConfig': { + "SecurityGroupIds": [], + "SubnetIds": [], + }, + + 'ResponseMetadata': {'HTTPStatusCode': 201}, + }) + + +@mock_lambda +@mock_s3 +@freeze_time('2015-01-01 00:00:00') +def test_get_function(): + s3_conn = boto3.client('s3', 'us-west-2') + s3_conn.create_bucket(Bucket='test-bucket') + + zip_content = get_test_zip_file1() + s3_conn.put_object(Bucket='test-bucket', Key='test.zip', Body=zip_content) + conn = boto3.client('lambda', 'us-west-2') + + conn.create_function( + FunctionName='testFunction', + Runtime='python2.7', + Role='test-iam-role', + Handler='lambda_function.lambda_handler', + Code={ + 'S3Bucket': 'test-bucket', + 'S3Key': 'test.zip', + }, + Description='test lambda function', + Timeout=3, + MemorySize=128, + Publish=True, + ) + + result = conn.get_function(FunctionName='testFunction') + # this is hard to match against, so remove it + result['ResponseMetadata'].pop('HTTPHeaders', None) + # Botocore inserts retry attempts not seen in Python27 + result['ResponseMetadata'].pop('RetryAttempts', None) + result['Configuration'].pop('LastModified') + + result['Code']['Location'].should.equal('s3://awslambda-{0}-tasks.s3-{0}.amazonaws.com/test.zip'.format(_lambda_region)) + result['Code']['RepositoryType'].should.equal('S3') + + result['Configuration']['CodeSha256'].should.equal(hashlib.sha256(zip_content).hexdigest()) + result['Configuration']['CodeSize'].should.equal(len(zip_content)) + result['Configuration']['Description'].should.equal('test lambda function') + result['Configuration'].should.contain('FunctionArn') + result['Configuration']['FunctionName'].should.equal('testFunction') + result['Configuration']['Handler'].should.equal('lambda_function.lambda_handler') + result['Configuration']['MemorySize'].should.equal(128) + result['Configuration']['Role'].should.equal('test-iam-role') + result['Configuration']['Runtime'].should.equal('python2.7') + result['Configuration']['Timeout'].should.equal(3) + result['Configuration']['Version'].should.equal('$LATEST') + result['Configuration'].should.contain('VpcConfig') + + # Test get function with + result = conn.get_function(FunctionName='testFunction', Qualifier='$LATEST') + result['Configuration']['Version'].should.equal('$LATEST') + + +@mock_lambda +@mock_s3 +def test_delete_function(): + s3_conn = boto3.client('s3', 'us-west-2') + s3_conn.create_bucket(Bucket='test-bucket') + + zip_content = get_test_zip_file2() + s3_conn.put_object(Bucket='test-bucket', Key='test.zip', Body=zip_content) + conn = boto3.client('lambda', 'us-west-2') + + conn.create_function( + FunctionName='testFunction', + Runtime='python2.7', + Role='test-iam-role', + Handler='lambda_function.lambda_handler', + Code={ + 'S3Bucket': 'test-bucket', + 'S3Key': 'test.zip', + }, + Description='test lambda function', + Timeout=3, + MemorySize=128, + Publish=True, + ) + + success_result = conn.delete_function(FunctionName='testFunction') + # this is hard to match against, so remove it + success_result['ResponseMetadata'].pop('HTTPHeaders', None) + # Botocore inserts retry attempts not seen in Python27 + success_result['ResponseMetadata'].pop('RetryAttempts', None) + + success_result.should.equal({'ResponseMetadata': {'HTTPStatusCode': 204}}) + + conn.delete_function.when.called_with( + FunctionName='testFunctionThatDoesntExist').should.throw(botocore.client.ClientError) + + +@mock_lambda +@mock_s3 +def test_publish(): + s3_conn = boto3.client('s3', 'us-west-2') + s3_conn.create_bucket(Bucket='test-bucket') + + zip_content = get_test_zip_file2() + s3_conn.put_object(Bucket='test-bucket', Key='test.zip', Body=zip_content) + conn = boto3.client('lambda', 'us-west-2') + + conn.create_function( + FunctionName='testFunction', + Runtime='python2.7', + Role='test-iam-role', + Handler='lambda_function.lambda_handler', + Code={ + 'S3Bucket': 'test-bucket', + 'S3Key': 'test.zip', + }, + Description='test lambda function', + Timeout=3, + MemorySize=128, + Publish=True, + ) + + function_list = conn.list_functions() + function_list['Functions'].should.have.length_of(1) + latest_arn = function_list['Functions'][0]['FunctionArn'] + + conn.publish_version(FunctionName='testFunction') + + function_list = conn.list_functions() + function_list['Functions'].should.have.length_of(2) + + # #SetComprehension ;-) + published_arn = list({f['FunctionArn'] for f in function_list['Functions']} - {latest_arn})[0] + published_arn.should.contain('testFunction:1') + + conn.delete_function(FunctionName='testFunction', Qualifier='1') + + function_list = conn.list_functions() + function_list['Functions'].should.have.length_of(1) + function_list['Functions'][0]['FunctionArn'].should.contain('testFunction:$LATEST') + + + +@mock_lambda +@mock_s3 +@freeze_time('2015-01-01 00:00:00') +def test_list_create_list_get_delete_list(): + """ + test `list -> create -> list -> get -> delete -> list` integration + + """ + s3_conn = boto3.client('s3', 'us-west-2') + s3_conn.create_bucket(Bucket='test-bucket') + + zip_content = get_test_zip_file2() + s3_conn.put_object(Bucket='test-bucket', Key='test.zip', Body=zip_content) + conn = boto3.client('lambda', 'us-west-2') + + conn.list_functions()['Functions'].should.have.length_of(0) + + conn.create_function( + FunctionName='testFunction', + Runtime='python2.7', + Role='test-iam-role', + Handler='lambda_function.lambda_handler', + Code={ + 'S3Bucket': 'test-bucket', + 'S3Key': 'test.zip', + }, + Description='test lambda function', + Timeout=3, + MemorySize=128, + Publish=True, + ) + expected_function_result = { + "Code": { + "Location": "s3://awslambda-{0}-tasks.s3-{0}.amazonaws.com/test.zip".format(_lambda_region), + "RepositoryType": "S3" + }, + "Configuration": { + "CodeSha256": hashlib.sha256(zip_content).hexdigest(), + "CodeSize": len(zip_content), + "Description": "test lambda function", + "FunctionArn": 'arn:aws:lambda:{}:123456789012:function:testFunction:$LATEST'.format(_lambda_region), + "FunctionName": "testFunction", + "Handler": "lambda_function.lambda_handler", + "MemorySize": 128, + "Role": "test-iam-role", + "Runtime": "python2.7", + "Timeout": 3, + "Version": '$LATEST', + "VpcConfig": { + "SecurityGroupIds": [], + "SubnetIds": [], + } + }, + 'ResponseMetadata': {'HTTPStatusCode': 200}, + } + func = conn.list_functions()['Functions'][0] + func.pop('LastModified') + func.should.equal(expected_function_result['Configuration']) + + func = conn.get_function(FunctionName='testFunction') + # this is hard to match against, so remove it + func['ResponseMetadata'].pop('HTTPHeaders', None) + # Botocore inserts retry attempts not seen in Python27 + func['ResponseMetadata'].pop('RetryAttempts', None) + func['Configuration'].pop('LastModified') + + func.should.equal(expected_function_result) + conn.delete_function(FunctionName='testFunction') + + conn.list_functions()['Functions'].should.have.length_of(0) + + +@mock_lambda +def test_invoke_lambda_error(): + lambda_fx = """ +def lambda_handler(event, context): + raise Exception('failsauce') + """ + zip_output = io.BytesIO() + zip_file = zipfile.ZipFile(zip_output, 'w', zipfile.ZIP_DEFLATED) + zip_file.writestr('lambda_function.py', lambda_fx) + zip_file.close() + zip_output.seek(0) + + client = boto3.client('lambda', region_name='us-east-1') + client.create_function( + FunctionName='test-lambda-fx', + Runtime='python2.7', + Role='test-iam-role', + Handler='lambda_function.lambda_handler', + Description='test lambda function', + Timeout=3, + MemorySize=128, + Publish=True, + Code={ + 'ZipFile': zip_output.read() + }, + ) + + result = client.invoke( + FunctionName='test-lambda-fx', + InvocationType='RequestResponse', + LogType='Tail' + ) + + assert 'FunctionError' in result + assert result['FunctionError'] == 'Handled' + + +@mock_lambda +@mock_s3 +def test_tags(): + """ + test list_tags -> tag_resource -> list_tags -> tag_resource -> list_tags -> untag_resource -> list_tags integration + """ + s3_conn = boto3.client('s3', 'us-west-2') + s3_conn.create_bucket(Bucket='test-bucket') + + zip_content = get_test_zip_file2() + s3_conn.put_object(Bucket='test-bucket', Key='test.zip', Body=zip_content) + conn = boto3.client('lambda', 'us-west-2') + + function = conn.create_function( + FunctionName='testFunction', + Runtime='python2.7', + Role='test-iam-role', + Handler='lambda_function.handler', + Code={ + 'S3Bucket': 'test-bucket', + 'S3Key': 'test.zip', + }, + Description='test lambda function', + Timeout=3, + MemorySize=128, + Publish=True, + ) + + # List tags when there are none + conn.list_tags( + Resource=function['FunctionArn'] + )['Tags'].should.equal(dict()) + + # List tags when there is one + conn.tag_resource( + Resource=function['FunctionArn'], + Tags=dict(spam='eggs') + )['ResponseMetadata']['HTTPStatusCode'].should.equal(200) + conn.list_tags( + Resource=function['FunctionArn'] + )['Tags'].should.equal(dict(spam='eggs')) + + # List tags when another has been added + conn.tag_resource( + Resource=function['FunctionArn'], + Tags=dict(foo='bar') + )['ResponseMetadata']['HTTPStatusCode'].should.equal(200) + conn.list_tags( + Resource=function['FunctionArn'] + )['Tags'].should.equal(dict(spam='eggs', foo='bar')) + + # Untag resource + conn.untag_resource( + Resource=function['FunctionArn'], + TagKeys=['spam', 'trolls'] + )['ResponseMetadata']['HTTPStatusCode'].should.equal(204) + conn.list_tags( + Resource=function['FunctionArn'] + )['Tags'].should.equal(dict(foo='bar')) + + # Untag a tag that does not exist (no error and no change) + conn.untag_resource( + Resource=function['FunctionArn'], + TagKeys=['spam'] + )['ResponseMetadata']['HTTPStatusCode'].should.equal(204) + + +@mock_lambda +def test_tags_not_found(): + """ + Test list_tags and tag_resource when the lambda with the given arn does not exist + """ + conn = boto3.client('lambda', 'us-west-2') + conn.list_tags.when.called_with( + Resource='arn:aws:lambda:123456789012:function:not-found' + ).should.throw(botocore.client.ClientError) + + conn.tag_resource.when.called_with( + Resource='arn:aws:lambda:123456789012:function:not-found', + Tags=dict(spam='eggs') + ).should.throw(botocore.client.ClientError) + + conn.untag_resource.when.called_with( + Resource='arn:aws:lambda:123456789012:function:not-found', + TagKeys=['spam'] + ).should.throw(botocore.client.ClientError) + + +@mock_lambda +def test_invoke_async_function(): + conn = boto3.client('lambda', 'us-west-2') + conn.create_function( + FunctionName='testFunction', + Runtime='python2.7', + Role='test-iam-role', + Handler='lambda_function.lambda_handler', + Code={'ZipFile': get_test_zip_file1()}, + Description='test lambda function', + Timeout=3, + MemorySize=128, + Publish=True, + ) + + success_result = conn.invoke_async( + FunctionName='testFunction', + InvokeArgs=json.dumps({'test': 'event'}) + ) + + success_result['Status'].should.equal(202) + + +@mock_lambda +@freeze_time('2015-01-01 00:00:00') +def test_get_function_created_with_zipfile(): + conn = boto3.client('lambda', 'us-west-2') + zip_content = get_test_zip_file1() + result = conn.create_function( + FunctionName='testFunction', + Runtime='python2.7', + Role='test-iam-role', + Handler='lambda_function.handler', + Code={ + 'ZipFile': zip_content, + }, + Description='test lambda function', + Timeout=3, + MemorySize=128, + Publish=True, + ) + + response = conn.get_function( + FunctionName='testFunction' + ) + response['Configuration'].pop('LastModified') + + response['ResponseMetadata']['HTTPStatusCode'].should.equal(200) + assert len(response['Code']) == 2 + assert response['Code']['RepositoryType'] == 'S3' + assert response['Code']['Location'].startswith('s3://awslambda-{0}-tasks.s3-{0}.amazonaws.com'.format(_lambda_region)) + response['Configuration'].should.equal( + { + "CodeSha256": hashlib.sha256(zip_content).hexdigest(), + "CodeSize": len(zip_content), + "Description": "test lambda function", + "FunctionArn":'arn:aws:lambda:{}:123456789012:function:testFunction:$LATEST'.format(_lambda_region), + "FunctionName": "testFunction", + "Handler": "lambda_function.handler", + "MemorySize": 128, + "Role": "test-iam-role", + "Runtime": "python2.7", + "Timeout": 3, + "Version": '$LATEST', + "VpcConfig": { + "SecurityGroupIds": [], + "SubnetIds": [], + } + }, + ) + + +@mock_lambda +def add_function_permission(): + conn = boto3.client('lambda', 'us-west-2') + zip_content = get_test_zip_file1() + result = conn.create_function( + FunctionName='testFunction', + Runtime='python2.7', + Role='test-iam-role', + Handler='lambda_function.handler', + Code={ + 'ZipFile': zip_content, + }, + Description='test lambda function', + Timeout=3, + MemorySize=128, + Publish=True, + ) + + response = conn.add_permission( + FunctionName='testFunction', + StatementId='1', + Action="lambda:InvokeFunction", + Principal='432143214321', + SourceArn="arn:aws:lambda:us-west-2:account-id:function:helloworld", + SourceAccount='123412341234', + EventSourceToken='blah', + Qualifier='2' + ) + assert 'Statement' in response + res = json.loads(response['Statement']) + assert res['Action'] == "lambda:InvokeFunction" + + +@mock_lambda +def get_function_policy(): + conn = boto3.client('lambda', 'us-west-2') + zip_content = get_test_zip_file1() + result = conn.create_function( + FunctionName='testFunction', + Runtime='python2.7', + Role='test-iam-role', + Handler='lambda_function.handler', + Code={ + 'ZipFile': zip_content, + }, + Description='test lambda function', + Timeout=3, + MemorySize=128, + Publish=True, + ) + + response = conn.add_permission( + FunctionName='testFunction', + StatementId='1', + Action="lambda:InvokeFunction", + Principal='432143214321', + SourceArn="arn:aws:lambda:us-west-2:account-id:function:helloworld", + SourceAccount='123412341234', + EventSourceToken='blah', + Qualifier='2' + ) + + response = conn.get_policy( + FunctionName='testFunction' + ) + + assert 'Policy' in response + assert isinstance(response['Policy'], str) + res = json.loads(response['Policy']) + assert res['Statement'][0]['Action'] == 'lambda:InvokeFunction' From 7737832bf3d657240ebe485335148ff448b84ca6 Mon Sep 17 00:00:00 2001 From: Steve Pulec Date: Thu, 29 Mar 2018 10:08:39 -0400 Subject: [PATCH 123/182] Fix bug adding None TTL to route53 responses. --- moto/route53/models.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/moto/route53/models.py b/moto/route53/models.py index af8bb690a..d483d22e2 100644 --- a/moto/route53/models.py +++ b/moto/route53/models.py @@ -140,7 +140,9 @@ class RecordSet(BaseModel): {% if record_set.region %} {{ record_set.region }} {% endif %} - {{ record_set.ttl }} + {% if record_set.ttl %} + {{ record_set.ttl }} + {% endif %} {% for record in record_set.records %} From 0a4d2037df89136e6b76ad5bdbb1ffad50c5064c Mon Sep 17 00:00:00 2001 From: Devin Bjelland Date: Thu, 29 Mar 2018 18:42:53 -0500 Subject: [PATCH 124/182] fix bug with Kinesis ResourceInUse exception (#1544) --- moto/kinesis/exceptions.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/moto/kinesis/exceptions.py b/moto/kinesis/exceptions.py index e2fe02775..82f796ecc 100644 --- a/moto/kinesis/exceptions.py +++ b/moto/kinesis/exceptions.py @@ -17,7 +17,7 @@ class ResourceNotFoundError(BadRequest): class ResourceInUseError(BadRequest): def __init__(self, message): - super(ResourceNotFoundError, self).__init__() + super(ResourceInUseError, self).__init__() self.description = json.dumps({ "message": message, '__type': 'ResourceInUseException', From dcd290c3c37d5be842c5b075b2d5f8ce2f8b874b Mon Sep 17 00:00:00 2001 From: Mohamed El Mouctar HAIDARA Date: Fri, 30 Mar 2018 15:09:02 +0200 Subject: [PATCH 125/182] Fix ApiGateway key identification API Gateway keys are identified by their ids and not their values - https://docs.aws.amazon.com/cli/latest/reference/apigateway/get-api-key.html#examples - http://boto3.readthedocs.io/en/latest/reference/services/apigateway.html#APIGateway.Client.get_api_key --- moto/apigateway/models.py | 10 +++++----- tests/test_apigateway/test_apigateway.py | 8 ++++---- 2 files changed, 9 insertions(+), 9 deletions(-) diff --git a/moto/apigateway/models.py b/moto/apigateway/models.py index 27a9b86c2..e9a8bb429 100644 --- a/moto/apigateway/models.py +++ b/moto/apigateway/models.py @@ -563,17 +563,17 @@ class APIGatewayBackend(BaseBackend): def create_apikey(self, payload): key = ApiKey(**payload) - self.keys[key['value']] = key + self.keys[key['id']] = key return key def get_apikeys(self): return list(self.keys.values()) - def get_apikey(self, value): - return self.keys[value] + def get_apikey(self, api_key_id): + return self.keys[api_key_id] - def delete_apikey(self, value): - self.keys.pop(value) + def delete_apikey(self, api_key_id): + self.keys.pop(api_key_id) return {} diff --git a/tests/test_apigateway/test_apigateway.py b/tests/test_apigateway/test_apigateway.py index 9e2307bdd..c6cb83a39 100644 --- a/tests/test_apigateway/test_apigateway.py +++ b/tests/test_apigateway/test_apigateway.py @@ -976,22 +976,22 @@ def test_api_keys(): apikey_name = 'TESTKEY1' payload = {'value': apikey_value, 'name': apikey_name} response = client.create_api_key(**payload) - apikey = client.get_api_key(apiKey=payload['value']) + apikey = client.get_api_key(apiKey=response['id']) apikey['name'].should.equal(apikey_name) apikey['value'].should.equal(apikey_value) apikey_name = 'TESTKEY2' payload = {'name': apikey_name, 'generateDistinctId': True} response = client.create_api_key(**payload) - apikey = client.get_api_key(apiKey=response['value']) + apikey_id = response['id'] + apikey = client.get_api_key(apiKey=apikey_id) apikey['name'].should.equal(apikey_name) len(apikey['value']).should.equal(40) - apikey_value = apikey['value'] response = client.get_api_keys() len(response['items']).should.equal(2) - client.delete_api_key(apiKey=apikey_value) + client.delete_api_key(apiKey=apikey_id) response = client.get_api_keys() len(response['items']).should.equal(1) From 4184acc0d27b584a921939fb85467cebf27bb025 Mon Sep 17 00:00:00 2001 From: Mike Grima Date: Mon, 2 Apr 2018 14:19:14 -0700 Subject: [PATCH 126/182] Added Filtering support for S3 lifecycle (#1535) * Added Filtering support for S3 lifecycle Also added `ExpiredObjectDeleteMarker`. closes #1533 closes #1479 * Result set no longer contains "Prefix" if "Filter" is set. --- moto/s3/models.py | 61 ++++++++++- moto/s3/responses.py | 28 ++++- setup.py | 4 +- tests/test_s3/test_s3_lifecycle.py | 167 ++++++++++++++++++++++++++++- 4 files changed, 253 insertions(+), 7 deletions(-) diff --git a/moto/s3/models.py b/moto/s3/models.py index c414225de..3b4623d61 100644 --- a/moto/s3/models.py +++ b/moto/s3/models.py @@ -15,7 +15,7 @@ from bisect import insort from moto.core import BaseBackend, BaseModel from moto.core.utils import iso_8601_datetime_with_milliseconds, rfc_1123_datetime from .exceptions import BucketAlreadyExists, MissingBucket, InvalidPart, EntityTooSmall, MissingKey, \ - InvalidNotificationDestination + InvalidNotificationDestination, MalformedXML from .utils import clean_key_name, _VersionedKeyStore UPLOAD_ID_BYTES = 43 @@ -311,18 +311,35 @@ class FakeTag(BaseModel): self.value = value +class LifecycleFilter(BaseModel): + + def __init__(self, prefix=None, tag=None, and_filter=None): + self.prefix = prefix or '' + self.tag = tag + self.and_filter = and_filter + + +class LifecycleAndFilter(BaseModel): + + def __init__(self, prefix=None, tags=None): + self.prefix = prefix or '' + self.tags = tags + + class LifecycleRule(BaseModel): - def __init__(self, id=None, prefix=None, status=None, expiration_days=None, - expiration_date=None, transition_days=None, + def __init__(self, id=None, prefix=None, lc_filter=None, status=None, expiration_days=None, + expiration_date=None, transition_days=None, expired_object_delete_marker=None, transition_date=None, storage_class=None): self.id = id self.prefix = prefix + self.filter = lc_filter self.status = status self.expiration_days = expiration_days self.expiration_date = expiration_date self.transition_days = transition_days self.transition_date = transition_date + self.expired_object_delete_marker = expired_object_delete_marker self.storage_class = storage_class @@ -387,12 +404,50 @@ class FakeBucket(BaseModel): for rule in rules: expiration = rule.get('Expiration') transition = rule.get('Transition') + + eodm = None + if expiration and expiration.get("ExpiredObjectDeleteMarker") is not None: + # This cannot be set if Date or Days is set: + if expiration.get("Days") or expiration.get("Date"): + raise MalformedXML() + eodm = expiration["ExpiredObjectDeleteMarker"] + + # Pull out the filter: + lc_filter = None + if rule.get("Filter"): + # Can't have both `Filter` and `Prefix` (need to check for the presence of the key): + try: + if rule["Prefix"] or not rule["Prefix"]: + raise MalformedXML() + except KeyError: + pass + + and_filter = None + if rule["Filter"].get("And"): + and_tags = [] + if rule["Filter"]["And"].get("Tag"): + if not isinstance(rule["Filter"]["And"]["Tag"], list): + rule["Filter"]["And"]["Tag"] = [rule["Filter"]["And"]["Tag"]] + + for t in rule["Filter"]["And"]["Tag"]: + and_tags.append(FakeTag(t["Key"], t.get("Value", ''))) + + and_filter = LifecycleAndFilter(prefix=rule["Filter"]["And"]["Prefix"], tags=and_tags) + + filter_tag = None + if rule["Filter"].get("Tag"): + filter_tag = FakeTag(rule["Filter"]["Tag"]["Key"], rule["Filter"]["Tag"].get("Value", '')) + + lc_filter = LifecycleFilter(prefix=rule["Filter"]["Prefix"], tag=filter_tag, and_filter=and_filter) + self.rules.append(LifecycleRule( id=rule.get('ID'), prefix=rule.get('Prefix'), + lc_filter=lc_filter, status=rule['Status'], expiration_days=expiration.get('Days') if expiration else None, expiration_date=expiration.get('Date') if expiration else None, + expired_object_delete_marker=eodm, transition_days=transition.get('Days') if transition else None, transition_date=transition.get('Date') if transition else None, storage_class=transition[ diff --git a/moto/s3/responses.py b/moto/s3/responses.py index 5ae3b0ede..02a9ac40e 100755 --- a/moto/s3/responses.py +++ b/moto/s3/responses.py @@ -1176,7 +1176,30 @@ S3_BUCKET_LIFECYCLE_CONFIGURATION = """ {% for rule in rules %} {{ rule.id }} + {% if rule.filter %} + + {{ rule.filter.prefix }} + {% if rule.filter.tag %} + + {{ rule.filter.tag.key }} + {{ rule.filter.tag.value }} + + {% endif %} + {% if rule.filter.and_filter %} + + {{ rule.filter.and_filter.prefix }} + {% for tag in rule.filter.and_filter.tags %} + + {{ tag.key }} + {{ tag.value }} + + {% endfor %} + + {% endif %} + + {% else %} {{ rule.prefix if rule.prefix != None }} + {% endif %} {{ rule.status }} {% if rule.storage_class %} @@ -1189,7 +1212,7 @@ S3_BUCKET_LIFECYCLE_CONFIGURATION = """ {{ rule.storage_class }} {% endif %} - {% if rule.expiration_days or rule.expiration_date %} + {% if rule.expiration_days or rule.expiration_date or rule.expired_object_delete_marker %} {% if rule.expiration_days %} {{ rule.expiration_days }} @@ -1197,6 +1220,9 @@ S3_BUCKET_LIFECYCLE_CONFIGURATION = """ {% if rule.expiration_date %} {{ rule.expiration_date }} {% endif %} + {% if rule.expired_object_delete_marker %} + {{ rule.expired_object_delete_marker }} + {% endif %} {% endif %} diff --git a/setup.py b/setup.py index f1570c496..1f135ae7b 100755 --- a/setup.py +++ b/setup.py @@ -8,8 +8,8 @@ import sys install_requires = [ "Jinja2>=2.7.3", "boto>=2.36.0", - "boto3>=1.2.1", - "botocore>=1.7.12", + "boto3>=1.6.16", + "botocore>=1.9.16", "cookies", "cryptography>=2.0.0", "requests>=2.5", diff --git a/tests/test_s3/test_s3_lifecycle.py b/tests/test_s3/test_s3_lifecycle.py index 5cae8f790..d176e95c6 100644 --- a/tests/test_s3/test_s3_lifecycle.py +++ b/tests/test_s3/test_s3_lifecycle.py @@ -1,12 +1,16 @@ from __future__ import unicode_literals import boto +import boto3 from boto.exception import S3ResponseError from boto.s3.lifecycle import Lifecycle, Transition, Expiration, Rule import sure # noqa +from botocore.exceptions import ClientError +from datetime import datetime +from nose.tools import assert_raises -from moto import mock_s3_deprecated +from moto import mock_s3_deprecated, mock_s3 @mock_s3_deprecated @@ -26,6 +30,167 @@ def test_lifecycle_create(): list(lifecycle.transition).should.equal([]) +@mock_s3 +def test_lifecycle_with_filters(): + client = boto3.client("s3") + client.create_bucket(Bucket="bucket") + + # Create a lifecycle rule with a Filter (no tags): + lfc = { + "Rules": [ + { + "Expiration": { + "Days": 7 + }, + "ID": "wholebucket", + "Filter": { + "Prefix": "" + }, + "Status": "Enabled" + } + ] + } + client.put_bucket_lifecycle_configuration(Bucket="bucket", LifecycleConfiguration=lfc) + result = client.get_bucket_lifecycle_configuration(Bucket="bucket") + assert len(result["Rules"]) == 1 + assert result["Rules"][0]["Filter"]["Prefix"] == '' + assert not result["Rules"][0]["Filter"].get("And") + assert not result["Rules"][0]["Filter"].get("Tag") + with assert_raises(KeyError): + assert result["Rules"][0]["Prefix"] + + # With a tag: + lfc["Rules"][0]["Filter"]["Tag"] = { + "Key": "mytag", + "Value": "mytagvalue" + } + client.put_bucket_lifecycle_configuration(Bucket="bucket", LifecycleConfiguration=lfc) + result = client.get_bucket_lifecycle_configuration(Bucket="bucket") + assert len(result["Rules"]) == 1 + assert result["Rules"][0]["Filter"]["Prefix"] == '' + assert not result["Rules"][0]["Filter"].get("And") + assert result["Rules"][0]["Filter"]["Tag"]["Key"] == "mytag" + assert result["Rules"][0]["Filter"]["Tag"]["Value"] == "mytagvalue" + with assert_raises(KeyError): + assert result["Rules"][0]["Prefix"] + + # With And (single tag): + lfc["Rules"][0]["Filter"]["And"] = { + "Prefix": "some/prefix", + "Tags": [ + { + "Key": "mytag", + "Value": "mytagvalue" + } + ] + } + client.put_bucket_lifecycle_configuration(Bucket="bucket", LifecycleConfiguration=lfc) + result = client.get_bucket_lifecycle_configuration(Bucket="bucket") + assert len(result["Rules"]) == 1 + assert result["Rules"][0]["Filter"]["Prefix"] == "" + assert result["Rules"][0]["Filter"]["And"]["Prefix"] == "some/prefix" + assert len(result["Rules"][0]["Filter"]["And"]["Tags"]) == 1 + assert result["Rules"][0]["Filter"]["And"]["Tags"][0]["Key"] == "mytag" + assert result["Rules"][0]["Filter"]["And"]["Tags"][0]["Value"] == "mytagvalue" + assert result["Rules"][0]["Filter"]["Tag"]["Key"] == "mytag" + assert result["Rules"][0]["Filter"]["Tag"]["Value"] == "mytagvalue" + with assert_raises(KeyError): + assert result["Rules"][0]["Prefix"] + + # With multiple And tags: + lfc["Rules"][0]["Filter"]["And"] = { + "Prefix": "some/prefix", + "Tags": [ + { + "Key": "mytag", + "Value": "mytagvalue" + }, + { + "Key": "mytag2", + "Value": "mytagvalue2" + } + ] + } + client.put_bucket_lifecycle_configuration(Bucket="bucket", LifecycleConfiguration=lfc) + result = client.get_bucket_lifecycle_configuration(Bucket="bucket") + assert len(result["Rules"]) == 1 + assert result["Rules"][0]["Filter"]["Prefix"] == "" + assert result["Rules"][0]["Filter"]["And"]["Prefix"] == "some/prefix" + assert len(result["Rules"][0]["Filter"]["And"]["Tags"]) == 2 + assert result["Rules"][0]["Filter"]["And"]["Tags"][0]["Key"] == "mytag" + assert result["Rules"][0]["Filter"]["And"]["Tags"][0]["Value"] == "mytagvalue" + assert result["Rules"][0]["Filter"]["Tag"]["Key"] == "mytag" + assert result["Rules"][0]["Filter"]["Tag"]["Value"] == "mytagvalue" + assert result["Rules"][0]["Filter"]["And"]["Tags"][1]["Key"] == "mytag2" + assert result["Rules"][0]["Filter"]["And"]["Tags"][1]["Value"] == "mytagvalue2" + assert result["Rules"][0]["Filter"]["Tag"]["Key"] == "mytag" + assert result["Rules"][0]["Filter"]["Tag"]["Value"] == "mytagvalue" + with assert_raises(KeyError): + assert result["Rules"][0]["Prefix"] + + # Can't have both filter and prefix: + lfc["Rules"][0]["Prefix"] = '' + with assert_raises(ClientError) as err: + client.put_bucket_lifecycle_configuration(Bucket="bucket", LifecycleConfiguration=lfc) + assert err.exception.response["Error"]["Code"] == "MalformedXML" + + lfc["Rules"][0]["Prefix"] = 'some/path' + with assert_raises(ClientError) as err: + client.put_bucket_lifecycle_configuration(Bucket="bucket", LifecycleConfiguration=lfc) + assert err.exception.response["Error"]["Code"] == "MalformedXML" + + # No filters -- just a prefix: + del lfc["Rules"][0]["Filter"] + client.put_bucket_lifecycle_configuration(Bucket="bucket", LifecycleConfiguration=lfc) + result = client.get_bucket_lifecycle_configuration(Bucket="bucket") + assert not result["Rules"][0].get("Filter") + assert result["Rules"][0]["Prefix"] == "some/path" + + +@mock_s3 +def test_lifecycle_with_eodm(): + client = boto3.client("s3") + client.create_bucket(Bucket="bucket") + + lfc = { + "Rules": [ + { + "Expiration": { + "ExpiredObjectDeleteMarker": True + }, + "ID": "wholebucket", + "Filter": { + "Prefix": "" + }, + "Status": "Enabled" + } + ] + } + client.put_bucket_lifecycle_configuration(Bucket="bucket", LifecycleConfiguration=lfc) + result = client.get_bucket_lifecycle_configuration(Bucket="bucket") + assert len(result["Rules"]) == 1 + assert result["Rules"][0]["Expiration"]["ExpiredObjectDeleteMarker"] + + # Set to False: + lfc["Rules"][0]["Expiration"]["ExpiredObjectDeleteMarker"] = False + client.put_bucket_lifecycle_configuration(Bucket="bucket", LifecycleConfiguration=lfc) + result = client.get_bucket_lifecycle_configuration(Bucket="bucket") + assert len(result["Rules"]) == 1 + assert not result["Rules"][0]["Expiration"]["ExpiredObjectDeleteMarker"] + + # With failure: + lfc["Rules"][0]["Expiration"]["Days"] = 7 + with assert_raises(ClientError) as err: + client.put_bucket_lifecycle_configuration(Bucket="bucket", LifecycleConfiguration=lfc) + assert err.exception.response["Error"]["Code"] == "MalformedXML" + del lfc["Rules"][0]["Expiration"]["Days"] + + lfc["Rules"][0]["Expiration"]["Date"] = datetime(2015, 1, 1) + with assert_raises(ClientError) as err: + client.put_bucket_lifecycle_configuration(Bucket="bucket", LifecycleConfiguration=lfc) + assert err.exception.response["Error"]["Code"] == "MalformedXML" + + @mock_s3_deprecated def test_lifecycle_with_glacier_transition(): conn = boto.s3.connect_to_region("us-west-1") From 83f4419d03293261250be9fe0fca04153c58eb4c Mon Sep 17 00:00:00 2001 From: Barry Ruffner Date: Tue, 3 Apr 2018 11:38:59 -0700 Subject: [PATCH 127/182] Added create_identity_pool and cleaned up test data. --- moto/__init__.py | 1 + moto/cognitoidentity/__init__.py | 4 +- moto/cognitoidentity/models.py | 188 ++--- moto/cognitoidentity/responses.py | 108 +-- moto/cognitoidentity/urls.py | 6 +- .../test_cognitoidentity.py | 783 +----------------- 6 files changed, 144 insertions(+), 946 deletions(-) diff --git a/moto/__init__.py b/moto/__init__.py index 9703f9f68..8c0de8c91 100644 --- a/moto/__init__.py +++ b/moto/__init__.py @@ -11,6 +11,7 @@ from .autoscaling import mock_autoscaling, mock_autoscaling_deprecated # flake8 from .awslambda import mock_lambda, mock_lambda_deprecated # flake8: noqa from .cloudformation import mock_cloudformation, mock_cloudformation_deprecated # flake8: noqa from .cloudwatch import mock_cloudwatch, mock_cloudwatch_deprecated # flake8: noqa +from .cognitoidentity import mock_cognitoidentity, mock_cognitoidentity_deprecated # flake8: noqa from .datapipeline import mock_datapipeline, mock_datapipeline_deprecated # flake8: noqa from .dynamodb import mock_dynamodb, mock_dynamodb_deprecated # flake8: noqa from .dynamodb2 import mock_dynamodb2, mock_dynamodb2_deprecated # flake8: noqa diff --git a/moto/cognitoidentity/__init__.py b/moto/cognitoidentity/__init__.py index f9a6da7ed..2f040fa19 100644 --- a/moto/cognitoidentity/__init__.py +++ b/moto/cognitoidentity/__init__.py @@ -3,5 +3,5 @@ from .models import cognitoidentity_backends from ..core.models import base_decorator, deprecated_base_decorator cognitoidentity_backend = cognitoidentity_backends['us-east-1'] -mock_datapipeline = base_decorator(cognitoidentity_backends) -mock_datapipeline_deprecated = deprecated_base_decorator(cognitoidentity_backends) +mock_cognitoidentity = base_decorator(cognitoidentity_backends) +mock_cognitoidentity_deprecated = deprecated_base_decorator(cognitoidentity_backends) diff --git a/moto/cognitoidentity/models.py b/moto/cognitoidentity/models.py index f136d0799..95663de53 100644 --- a/moto/cognitoidentity/models.py +++ b/moto/cognitoidentity/models.py @@ -1,10 +1,12 @@ from __future__ import unicode_literals +import json import datetime import boto.cognito.identity from moto.compat import OrderedDict from moto.core import BaseBackend, BaseModel -from .utils import get_random_pipeline_id, remove_capitalization_of_dict_keys +from moto.core.utils import iso_8601_datetime_with_milliseconds +from .utils import get_random_identity_id, remove_capitalization_of_dict_keys class CognitoIdentityObject(BaseModel): @@ -24,139 +26,91 @@ class CognitoIdentityObject(BaseModel): class CognitoIdentity(BaseModel): - def __init__(self, name, unique_id, **kwargs): - self.name = name - # self.unique_id = unique_id - # self.description = kwargs.get('description', '') - self.identity_pool_id = get_random_identity_id() - # self.creation_time = datetime.datetime.utcnow() - # self.objects = [] - # self.status = "PENDING" - # self.tags = kwargs.get('tags', []) + def __init__(self, region, identity_pool_name, **kwargs): + self.identity_pool_name = identity_pool_name + self.allow_unauthenticated_identities = kwargs.get('allow_unauthenticated_identities', '') + self.supported_login_providers = kwargs.get('supported_login_providers', {}) + self.developer_provider_name = kwargs.get('developer_provider_name', '') + self.open_id_connect_provider_arns = kwargs.get('open_id_connect_provider_arns', []) + self.cognito_identity_providers = kwargs.get('cognito_identity_providers', []) + self.saml_provider_arns = kwargs.get('saml_provider_arns', []) - @property - def physical_resource_id(self): - return self.pipeline_id - - def to_meta_json(self): - return { - "id": self.pipeline_id, - "name": self.name, - } - - def to_json(self): - return { - "description": self.description, - "fields": [{ - "key": "@pipelineState", - "stringValue": self.status, - }, { - "key": "description", - "stringValue": self.description - }, { - "key": "name", - "stringValue": self.name - }, { - "key": "@creationTime", - "stringValue": datetime.datetime.strftime(self.creation_time, '%Y-%m-%dT%H-%M-%S'), - }, { - "key": "@id", - "stringValue": self.pipeline_id, - }, { - "key": "@sphere", - "stringValue": "PIPELINE" - }, { - "key": "@version", - "stringValue": "1" - }, { - "key": "@userId", - "stringValue": "924374875933" - }, { - "key": "@accountId", - "stringValue": "924374875933" - }, { - "key": "uniqueId", - "stringValue": self.unique_id - }], - "name": self.name, - "pipelineId": self.pipeline_id, - "tags": self.tags - } - - def set_pipeline_objects(self, pipeline_objects): - self.objects = [ - PipelineObject(pipeline_object['id'], pipeline_object[ - 'name'], pipeline_object['fields']) - for pipeline_object in remove_capitalization_of_dict_keys(pipeline_objects) - ] - - def activate(self): - self.status = "SCHEDULED" - - @classmethod - def create_from_cloudformation_json(cls, resource_name, cloudformation_json, region_name): - datapipeline_backend = cognitoidentity_backends[region_name] - properties = cloudformation_json["Properties"] - - cloudformation_unique_id = "cf-" + properties["Name"] - pipeline = datapipeline_backend.create_pipeline( - properties["Name"], cloudformation_unique_id) - datapipeline_backend.put_pipeline_definition( - pipeline.pipeline_id, properties["PipelineObjects"]) - - if properties["Activate"]: - pipeline.activate() - return pipeline + self.identity_pool_id = get_random_identity_id(region) + self.creation_time = datetime.datetime.utcnow() class CognitoIdentityBackend(BaseBackend): def __init__(self, region): + super(CognitoIdentityBackend, self).__init__() self.region = region self.identity_pools = OrderedDict() - def create_identity_pool(self, identity_pool_name, allow_unauthenticated_identities, , region='us-east-1',**kwargs): - identity_pool = CognitoIdentity(identity_pool_name, allow_unauthenticated_identities, **kwargs) - self.identity_pools[identity_pool.identity_pool_name] = identity_pool - return identity_pool + def reset(self): + region = self.region + self.__dict__ = {} + self.__init__(region) - def delete_identity_pool(self, identity_pool_id): - pass + def create_identity_pool(self, identity_pool_name, allow_unauthenticated_identities, + supported_login_providers, developer_provider_name, open_id_connect_provider_arns, + cognito_identity_providers, saml_provider_arns): - def list_pipelines(self): - return self.pipelines.values() + new_identity = CognitoIdentity(self.region, identity_pool_name, + allow_unauthenticated_identities=allow_unauthenticated_identities, + supported_login_providers=supported_login_providers, + developer_provider_name=developer_provider_name, + open_id_connect_provider_arns=open_id_connect_provider_arns, + cognito_identity_providers=cognito_identity_providers, + saml_provider_arns=saml_provider_arns) + self.identity_pools[new_identity.identity_pool_id] = new_identity - def describe_pipelines(self, pipeline_ids): - pipelines = [pipeline for pipeline in self.pipelines.values( - ) if pipeline.pipeline_id in pipeline_ids] - return pipelines + response = json.dumps({ + 'IdentityPoolId': new_identity.identity_pool_id, + 'IdentityPoolName': new_identity.identity_pool_name, + 'AllowUnauthenticatedIdentities': new_identity.allow_unauthenticated_identities, + 'SupportedLoginProviders': new_identity.supported_login_providers, + 'DeveloperProviderName': new_identity.developer_provider_name, + 'OpenIdConnectProviderARNs': new_identity.open_id_connect_provider_arns, + 'CognitoIdentityProviders': new_identity.cognito_identity_providers, + 'SamlProviderARNs': new_identity.saml_provider_arns + }) - def get_pipeline(self, pipeline_id): - return self.pipelines[pipeline_id] + return response - def delete_pipeline(self, pipeline_id): - self.pipelines.pop(pipeline_id, None) - def put_pipeline_definition(self, pipeline_id, pipeline_objects): - pipeline = self.get_pipeline(pipeline_id) - pipeline.set_pipeline_objects(pipeline_objects) + def get_id(self): + identity_id = {'IdentityId': get_random_identity_id(self.region)} + return json.dumps(identity_id) - def get_pipeline_definition(self, pipeline_id): - pipeline = self.get_pipeline(pipeline_id) - return pipeline.objects - - def describe_objects(self, object_ids, pipeline_id): - pipeline = self.get_pipeline(pipeline_id) - pipeline_objects = [ - pipeline_object for pipeline_object in pipeline.objects - if pipeline_object.object_id in object_ids - ] - return pipeline_objects - - def activate_pipeline(self, pipeline_id): - pipeline = self.get_pipeline(pipeline_id) - pipeline.activate() + def get_credentials_for_identity(self, identity_id): + duration = 90 + now = datetime.datetime.utcnow() + expiration = now + datetime.timedelta(seconds=duration) + expiration_str = str(iso_8601_datetime_with_milliseconds(expiration)) + return json.dumps({ + "Credentials": { + "AccessKeyId": "TESTACCESSKEY12345", + "Expiration": expiration_str, + "SecretKey": "ABCSECRETKEY", + "SessionToken": "ABC12345" + }, + "IdentityId": identity_id + }) + def get_open_id_token_for_developer_identity(self, identity_id): + duration = 90 + now = datetime.datetime.utcnow() + expiration = now + datetime.timedelta(seconds=duration) + expiration_str = str(iso_8601_datetime_with_milliseconds(expiration)) + return json.dumps({ + "Credentials": { + "AccessKeyId": "TESTACCESSKEY12345", + "Expiration": expiration_str, + "SecretKey": "ABCSECRETKEY", + "SessionToken": "ABC12345" + }, + "IdentityId": identity_id + }) cognitoidentity_backends = {} for region in boto.cognito.identity.regions(): diff --git a/moto/cognitoidentity/responses.py b/moto/cognitoidentity/responses.py index e462e3981..2285607a9 100644 --- a/moto/cognitoidentity/responses.py +++ b/moto/cognitoidentity/responses.py @@ -3,10 +3,10 @@ from __future__ import unicode_literals import json from moto.core.responses import BaseResponse -from .models import datapipeline_backends +from .models import cognitoidentity_backends -class DataPipelineResponse(BaseResponse): +class CognitoIdentityResponse(BaseResponse): @property def parameters(self): @@ -17,88 +17,32 @@ class DataPipelineResponse(BaseResponse): return self.querystring @property - def datapipeline_backend(self): - return datapipeline_backends[self.region] + def cognitoidentity_backend(self): + return cognitoidentity_backends[self.region] - def create_pipeline(self): - name = self.parameters.get('name') - unique_id = self.parameters.get('uniqueId') - description = self.parameters.get('description', '') - tags = self.parameters.get('tags', []) - pipeline = self.datapipeline_backend.create_pipeline(name, unique_id, description=description, tags=tags) - return json.dumps({ - "pipelineId": pipeline.pipeline_id, - }) + def create_identity_pool(self): + identity_pool_name = self._get_param('IdentityPoolName') + allow_unauthenticated_identities = self._get_param('AllowUnauthenticatedIdentities') + supported_login_providers = self._get_param('SupportedLoginProviders') + developer_provider_name = self._get_param('DeveloperProviderName') + open_id_connect_provider_arns = self._get_param('OpenIdConnectProviderARNs') + cognito_identity_providers = self._get_param('CognitoIdentityProviders') + saml_provider_arns = self._get_param('SamlProviderARNs') + return cognitoidentity_backends[self.region].create_identity_pool( + identity_pool_name=identity_pool_name, + allow_unauthenticated_identities=allow_unauthenticated_identities, + supported_login_providers=supported_login_providers, + developer_provider_name=developer_provider_name, + open_id_connect_provider_arns=open_id_connect_provider_arns, + cognito_identity_providers=cognito_identity_providers, + saml_provider_arns=saml_provider_arns) - def list_pipelines(self): - pipelines = list(self.datapipeline_backend.list_pipelines()) - pipeline_ids = [pipeline.pipeline_id for pipeline in pipelines] - max_pipelines = 50 - marker = self.parameters.get('marker') - if marker: - start = pipeline_ids.index(marker) + 1 - else: - start = 0 - pipelines_resp = pipelines[start:start + max_pipelines] - has_more_results = False - marker = None - if start + max_pipelines < len(pipeline_ids) - 1: - has_more_results = True - marker = pipelines_resp[-1].pipeline_id - return json.dumps({ - "hasMoreResults": has_more_results, - "marker": marker, - "pipelineIdList": [ - pipeline.to_meta_json() for pipeline in pipelines_resp - ] - }) + def get_id(self): + return cognitoidentity_backends[self.region].get_id() - def describe_pipelines(self): - pipeline_ids = self.parameters["pipelineIds"] - pipelines = self.datapipeline_backend.describe_pipelines(pipeline_ids) + def get_credentials_for_identity(self): + return cognitoidentity_backends[self.region].get_credentials_for_identity(self._get_param('IdentityId')) - return json.dumps({ - "pipelineDescriptionList": [ - pipeline.to_json() for pipeline in pipelines - ] - }) + def get_open_id_token_for_developer_identity(self): + return cognitoidentity_backends[self.region].get_open_id_token_for_developer_identity(self._get_param('IdentityId')) - def delete_pipeline(self): - pipeline_id = self.parameters["pipelineId"] - self.datapipeline_backend.delete_pipeline(pipeline_id) - return json.dumps({}) - - def put_pipeline_definition(self): - pipeline_id = self.parameters["pipelineId"] - pipeline_objects = self.parameters["pipelineObjects"] - - self.datapipeline_backend.put_pipeline_definition( - pipeline_id, pipeline_objects) - return json.dumps({"errored": False}) - - def get_pipeline_definition(self): - pipeline_id = self.parameters["pipelineId"] - pipeline_definition = self.datapipeline_backend.get_pipeline_definition( - pipeline_id) - return json.dumps({ - "pipelineObjects": [pipeline_object.to_json() for pipeline_object in pipeline_definition] - }) - - def describe_objects(self): - pipeline_id = self.parameters["pipelineId"] - object_ids = self.parameters["objectIds"] - - pipeline_objects = self.datapipeline_backend.describe_objects( - object_ids, pipeline_id) - return json.dumps({ - "hasMoreResults": False, - "marker": None, - "pipelineObjects": [ - pipeline_object.to_json() for pipeline_object in pipeline_objects - ] - }) - - def activate_pipeline(self): - pipeline_id = self.parameters["pipelineId"] - self.datapipeline_backend.activate_pipeline(pipeline_id) - return json.dumps({}) diff --git a/moto/cognitoidentity/urls.py b/moto/cognitoidentity/urls.py index 40805874b..3fe63ef07 100644 --- a/moto/cognitoidentity/urls.py +++ b/moto/cognitoidentity/urls.py @@ -1,10 +1,10 @@ from __future__ import unicode_literals -from .responses import DataPipelineResponse +from .responses import CognitoIdentityResponse url_bases = [ - "https?://datapipeline.(.+).amazonaws.com", + "https?://cognito-identity.(.+).amazonaws.com", ] url_paths = { - '{0}/$': DataPipelineResponse.dispatch, + '{0}/$': CognitoIdentityResponse.dispatch, } diff --git a/tests/test_cognitoidentity/test_cognitoidentity.py b/tests/test_cognitoidentity/test_cognitoidentity.py index e7a9f9174..730f3dccf 100644 --- a/tests/test_cognitoidentity/test_cognitoidentity.py +++ b/tests/test_cognitoidentity/test_cognitoidentity.py @@ -10,753 +10,52 @@ import zipfile import sure # noqa from freezegun import freeze_time -from moto import mock_lambda, mock_s3, mock_ec2, settings +from moto import mock_cognitoidentity, settings -_lambda_region = 'us-west-2' +@mock_cognitoidentity +def test_create_identity_pool(): + conn = boto3.client('cognito-identity', 'us-west-2') - -def _process_lambda(func_str): - zip_output = io.BytesIO() - zip_file = zipfile.ZipFile(zip_output, 'w', zipfile.ZIP_DEFLATED) - zip_file.writestr('lambda_function.py', func_str) - zip_file.close() - zip_output.seek(0) - return zip_output.read() - - -def get_test_zip_file1(): - pfunc = """ -def lambda_handler(event, context): - return event -""" - return _process_lambda(pfunc) - - -def get_test_zip_file2(): - func_str = """ -import boto3 - -def lambda_handler(event, context): - ec2 = boto3.resource('ec2', region_name='us-west-2', endpoint_url='http://{base_url}') - - volume_id = event.get('volume_id') - vol = ec2.Volume(volume_id) - - print('get volume details for %s\\nVolume - %s state=%s, size=%s' % (volume_id, volume_id, vol.state, vol.size)) - return event -""".format(base_url="motoserver:5000" if settings.TEST_SERVER_MODE else "ec2.us-west-2.amazonaws.com") - return _process_lambda(func_str) - - -@mock_lambda -def test_list_functions(): - conn = boto3.client('lambda', 'us-west-2') - result = conn.list_functions() - result['Functions'].should.have.length_of(0) - - -@mock_lambda -def test_invoke_requestresponse_function(): - conn = boto3.client('lambda', 'us-west-2') - conn.create_function( - FunctionName='testFunction', - Runtime='python2.7', - Role='test-iam-role', - Handler='lambda_function.lambda_handler', - Code={ - 'ZipFile': get_test_zip_file1(), - }, - Description='test lambda function', - Timeout=3, - MemorySize=128, - Publish=True, - ) - - in_data = {'msg': 'So long and thanks for all the fish'} - success_result = conn.invoke(FunctionName='testFunction', InvocationType='RequestResponse', - Payload=json.dumps(in_data)) - - success_result["StatusCode"].should.equal(202) - result_obj = json.loads( - base64.b64decode(success_result["LogResult"]).decode('utf-8')) - - result_obj.should.equal(in_data) - - payload = success_result["Payload"].read().decode('utf-8') - json.loads(payload).should.equal(in_data) - - -@mock_lambda -def test_invoke_event_function(): - conn = boto3.client('lambda', 'us-west-2') - conn.create_function( - FunctionName='testFunction', - Runtime='python2.7', - Role='test-iam-role', - Handler='lambda_function.lambda_handler', - Code={ - 'ZipFile': get_test_zip_file1(), - }, - Description='test lambda function', - Timeout=3, - MemorySize=128, - Publish=True, - ) - - conn.invoke.when.called_with( - FunctionName='notAFunction', - InvocationType='Event', - Payload='{}' - ).should.throw(botocore.client.ClientError) - - in_data = {'msg': 'So long and thanks for all the fish'} - success_result = conn.invoke( - FunctionName='testFunction', InvocationType='Event', Payload=json.dumps(in_data)) - success_result["StatusCode"].should.equal(202) - json.loads(success_result['Payload'].read().decode( - 'utf-8')).should.equal({}) - - -if settings.TEST_SERVER_MODE: - @mock_ec2 - @mock_lambda - def test_invoke_function_get_ec2_volume(): - conn = boto3.resource("ec2", "us-west-2") - vol = conn.create_volume(Size=99, AvailabilityZone='us-west-2') - vol = conn.Volume(vol.id) - - conn = boto3.client('lambda', 'us-west-2') - conn.create_function( - FunctionName='testFunction', - Runtime='python2.7', - Role='test-iam-role', - Handler='lambda_function.lambda_handler', - Code={ - 'ZipFile': get_test_zip_file2(), + result = conn.create_identity_pool(IdentityPoolName='TestPool', + AllowUnauthenticatedIdentities=False, + SupportedLoginProviders={'graph.facebook.com':'123456789012345'}, + DeveloperProviderName='devname', + OpenIdConnectProviderARNs=['arn:aws:rds:eu-west-2:123456789012:db:mysql-db',], + CognitoIdentityProviders=[ + { + 'ProviderName': 'testprovider', + 'ClientId': 'CLIENT12345', + 'ServerSideTokenCheck': True }, - Description='test lambda function', - Timeout=3, - MemorySize=128, - Publish=True, - ) + ], + SamlProviderARNs=['arn:aws:rds:eu-west-2:123456789012:db:mysql-db',]) + assert result['IdentityPoolId'] != '' - in_data = {'volume_id': vol.id} - result = conn.invoke(FunctionName='testFunction', - InvocationType='RequestResponse', Payload=json.dumps(in_data)) - result["StatusCode"].should.equal(202) - msg = 'get volume details for %s\nVolume - %s state=%s, size=%s\n%s' % ( - vol.id, vol.id, vol.state, vol.size, json.dumps(in_data)) +@mock_cognitoidentity +def test_get_id(): + conn = boto3.client('cognito-identity', 'us-west-2') + result = conn.get_id(AccountId='someaccount', + IdentityPoolId='us-west-2:12345', + Logins={ + 'someurl': '12345' + }) + assert result['IdentityId'].startswith('us-west-2') - log_result = base64.b64decode(result["LogResult"]).decode('utf-8') +@mock_cognitoidentity +def test_get_credentials_for_identity(): + conn = boto3.client('cognito-identity', 'us-west-2') + result = conn.get_credentials_for_identity(IdentityId='12345') + assert result['IdentityId'] == '12345' - # fix for running under travis (TODO: investigate why it has an extra newline) - log_result = log_result.replace('\n\n', '\n') - log_result.should.equal(msg) - - payload = result['Payload'].read().decode('utf-8') - - # fix for running under travis (TODO: investigate why it has an extra newline) - payload = payload.replace('\n\n', '\n') - payload.should.equal(msg) - - -@mock_lambda -def test_create_based_on_s3_with_missing_bucket(): - conn = boto3.client('lambda', 'us-west-2') - - conn.create_function.when.called_with( - FunctionName='testFunction', - Runtime='python2.7', - Role='test-iam-role', - Handler='lambda_function.lambda_handler', - Code={ - 'S3Bucket': 'this-bucket-does-not-exist', - 'S3Key': 'test.zip', - }, - Description='test lambda function', - Timeout=3, - MemorySize=128, - Publish=True, - VpcConfig={ - "SecurityGroupIds": ["sg-123abc"], - "SubnetIds": ["subnet-123abc"], - }, - ).should.throw(botocore.client.ClientError) - - -@mock_lambda -@mock_s3 -@freeze_time('2015-01-01 00:00:00') -def test_create_function_from_aws_bucket(): - s3_conn = boto3.client('s3', 'us-west-2') - s3_conn.create_bucket(Bucket='test-bucket') - zip_content = get_test_zip_file2() - - s3_conn.put_object(Bucket='test-bucket', Key='test.zip', Body=zip_content) - conn = boto3.client('lambda', 'us-west-2') - - result = conn.create_function( - FunctionName='testFunction', - Runtime='python2.7', - Role='test-iam-role', - Handler='lambda_function.lambda_handler', - Code={ - 'S3Bucket': 'test-bucket', - 'S3Key': 'test.zip', - }, - Description='test lambda function', - Timeout=3, - MemorySize=128, - Publish=True, - VpcConfig={ - "SecurityGroupIds": ["sg-123abc"], - "SubnetIds": ["subnet-123abc"], +@mock_cognitoidentity +def test_get_open_id_token_for_developer_identity(): + conn = boto3.client('cognito-identity', 'us-west-2') + result = conn.get_open_id_token_for_developer_identity( + IdentityPoolId='us-west-2:12345', + IdentityId='12345', + Logins={ + 'someurl': '12345' }, + TokenDuration=123 ) - # this is hard to match against, so remove it - result['ResponseMetadata'].pop('HTTPHeaders', None) - # Botocore inserts retry attempts not seen in Python27 - result['ResponseMetadata'].pop('RetryAttempts', None) - result.pop('LastModified') - result.should.equal({ - 'FunctionName': 'testFunction', - 'FunctionArn': 'arn:aws:lambda:{}:123456789012:function:testFunction:$LATEST'.format(_lambda_region), - 'Runtime': 'python2.7', - 'Role': 'test-iam-role', - 'Handler': 'lambda_function.lambda_handler', - "CodeSha256": hashlib.sha256(zip_content).hexdigest(), - "CodeSize": len(zip_content), - 'Description': 'test lambda function', - 'Timeout': 3, - 'MemorySize': 128, - 'Version': '$LATEST', - 'VpcConfig': { - "SecurityGroupIds": ["sg-123abc"], - "SubnetIds": ["subnet-123abc"], - "VpcId": "vpc-123abc" - }, - 'ResponseMetadata': {'HTTPStatusCode': 201}, - }) - - -@mock_lambda -@freeze_time('2015-01-01 00:00:00') -def test_create_function_from_zipfile(): - conn = boto3.client('lambda', 'us-west-2') - zip_content = get_test_zip_file1() - result = conn.create_function( - FunctionName='testFunction', - Runtime='python2.7', - Role='test-iam-role', - Handler='lambda_function.lambda_handler', - Code={ - 'ZipFile': zip_content, - }, - Description='test lambda function', - Timeout=3, - MemorySize=128, - Publish=True, - ) - # this is hard to match against, so remove it - result['ResponseMetadata'].pop('HTTPHeaders', None) - # Botocore inserts retry attempts not seen in Python27 - result['ResponseMetadata'].pop('RetryAttempts', None) - result.pop('LastModified') - - result.should.equal({ - 'FunctionName': 'testFunction', - 'FunctionArn': 'arn:aws:lambda:{}:123456789012:function:testFunction:$LATEST'.format(_lambda_region), - 'Runtime': 'python2.7', - 'Role': 'test-iam-role', - 'Handler': 'lambda_function.lambda_handler', - 'CodeSize': len(zip_content), - 'Description': 'test lambda function', - 'Timeout': 3, - 'MemorySize': 128, - 'CodeSha256': hashlib.sha256(zip_content).hexdigest(), - 'Version': '$LATEST', - 'VpcConfig': { - "SecurityGroupIds": [], - "SubnetIds": [], - }, - - 'ResponseMetadata': {'HTTPStatusCode': 201}, - }) - - -@mock_lambda -@mock_s3 -@freeze_time('2015-01-01 00:00:00') -def test_get_function(): - s3_conn = boto3.client('s3', 'us-west-2') - s3_conn.create_bucket(Bucket='test-bucket') - - zip_content = get_test_zip_file1() - s3_conn.put_object(Bucket='test-bucket', Key='test.zip', Body=zip_content) - conn = boto3.client('lambda', 'us-west-2') - - conn.create_function( - FunctionName='testFunction', - Runtime='python2.7', - Role='test-iam-role', - Handler='lambda_function.lambda_handler', - Code={ - 'S3Bucket': 'test-bucket', - 'S3Key': 'test.zip', - }, - Description='test lambda function', - Timeout=3, - MemorySize=128, - Publish=True, - ) - - result = conn.get_function(FunctionName='testFunction') - # this is hard to match against, so remove it - result['ResponseMetadata'].pop('HTTPHeaders', None) - # Botocore inserts retry attempts not seen in Python27 - result['ResponseMetadata'].pop('RetryAttempts', None) - result['Configuration'].pop('LastModified') - - result['Code']['Location'].should.equal('s3://awslambda-{0}-tasks.s3-{0}.amazonaws.com/test.zip'.format(_lambda_region)) - result['Code']['RepositoryType'].should.equal('S3') - - result['Configuration']['CodeSha256'].should.equal(hashlib.sha256(zip_content).hexdigest()) - result['Configuration']['CodeSize'].should.equal(len(zip_content)) - result['Configuration']['Description'].should.equal('test lambda function') - result['Configuration'].should.contain('FunctionArn') - result['Configuration']['FunctionName'].should.equal('testFunction') - result['Configuration']['Handler'].should.equal('lambda_function.lambda_handler') - result['Configuration']['MemorySize'].should.equal(128) - result['Configuration']['Role'].should.equal('test-iam-role') - result['Configuration']['Runtime'].should.equal('python2.7') - result['Configuration']['Timeout'].should.equal(3) - result['Configuration']['Version'].should.equal('$LATEST') - result['Configuration'].should.contain('VpcConfig') - - # Test get function with - result = conn.get_function(FunctionName='testFunction', Qualifier='$LATEST') - result['Configuration']['Version'].should.equal('$LATEST') - - -@mock_lambda -@mock_s3 -def test_delete_function(): - s3_conn = boto3.client('s3', 'us-west-2') - s3_conn.create_bucket(Bucket='test-bucket') - - zip_content = get_test_zip_file2() - s3_conn.put_object(Bucket='test-bucket', Key='test.zip', Body=zip_content) - conn = boto3.client('lambda', 'us-west-2') - - conn.create_function( - FunctionName='testFunction', - Runtime='python2.7', - Role='test-iam-role', - Handler='lambda_function.lambda_handler', - Code={ - 'S3Bucket': 'test-bucket', - 'S3Key': 'test.zip', - }, - Description='test lambda function', - Timeout=3, - MemorySize=128, - Publish=True, - ) - - success_result = conn.delete_function(FunctionName='testFunction') - # this is hard to match against, so remove it - success_result['ResponseMetadata'].pop('HTTPHeaders', None) - # Botocore inserts retry attempts not seen in Python27 - success_result['ResponseMetadata'].pop('RetryAttempts', None) - - success_result.should.equal({'ResponseMetadata': {'HTTPStatusCode': 204}}) - - conn.delete_function.when.called_with( - FunctionName='testFunctionThatDoesntExist').should.throw(botocore.client.ClientError) - - -@mock_lambda -@mock_s3 -def test_publish(): - s3_conn = boto3.client('s3', 'us-west-2') - s3_conn.create_bucket(Bucket='test-bucket') - - zip_content = get_test_zip_file2() - s3_conn.put_object(Bucket='test-bucket', Key='test.zip', Body=zip_content) - conn = boto3.client('lambda', 'us-west-2') - - conn.create_function( - FunctionName='testFunction', - Runtime='python2.7', - Role='test-iam-role', - Handler='lambda_function.lambda_handler', - Code={ - 'S3Bucket': 'test-bucket', - 'S3Key': 'test.zip', - }, - Description='test lambda function', - Timeout=3, - MemorySize=128, - Publish=True, - ) - - function_list = conn.list_functions() - function_list['Functions'].should.have.length_of(1) - latest_arn = function_list['Functions'][0]['FunctionArn'] - - conn.publish_version(FunctionName='testFunction') - - function_list = conn.list_functions() - function_list['Functions'].should.have.length_of(2) - - # #SetComprehension ;-) - published_arn = list({f['FunctionArn'] for f in function_list['Functions']} - {latest_arn})[0] - published_arn.should.contain('testFunction:1') - - conn.delete_function(FunctionName='testFunction', Qualifier='1') - - function_list = conn.list_functions() - function_list['Functions'].should.have.length_of(1) - function_list['Functions'][0]['FunctionArn'].should.contain('testFunction:$LATEST') - - - -@mock_lambda -@mock_s3 -@freeze_time('2015-01-01 00:00:00') -def test_list_create_list_get_delete_list(): - """ - test `list -> create -> list -> get -> delete -> list` integration - - """ - s3_conn = boto3.client('s3', 'us-west-2') - s3_conn.create_bucket(Bucket='test-bucket') - - zip_content = get_test_zip_file2() - s3_conn.put_object(Bucket='test-bucket', Key='test.zip', Body=zip_content) - conn = boto3.client('lambda', 'us-west-2') - - conn.list_functions()['Functions'].should.have.length_of(0) - - conn.create_function( - FunctionName='testFunction', - Runtime='python2.7', - Role='test-iam-role', - Handler='lambda_function.lambda_handler', - Code={ - 'S3Bucket': 'test-bucket', - 'S3Key': 'test.zip', - }, - Description='test lambda function', - Timeout=3, - MemorySize=128, - Publish=True, - ) - expected_function_result = { - "Code": { - "Location": "s3://awslambda-{0}-tasks.s3-{0}.amazonaws.com/test.zip".format(_lambda_region), - "RepositoryType": "S3" - }, - "Configuration": { - "CodeSha256": hashlib.sha256(zip_content).hexdigest(), - "CodeSize": len(zip_content), - "Description": "test lambda function", - "FunctionArn": 'arn:aws:lambda:{}:123456789012:function:testFunction:$LATEST'.format(_lambda_region), - "FunctionName": "testFunction", - "Handler": "lambda_function.lambda_handler", - "MemorySize": 128, - "Role": "test-iam-role", - "Runtime": "python2.7", - "Timeout": 3, - "Version": '$LATEST', - "VpcConfig": { - "SecurityGroupIds": [], - "SubnetIds": [], - } - }, - 'ResponseMetadata': {'HTTPStatusCode': 200}, - } - func = conn.list_functions()['Functions'][0] - func.pop('LastModified') - func.should.equal(expected_function_result['Configuration']) - - func = conn.get_function(FunctionName='testFunction') - # this is hard to match against, so remove it - func['ResponseMetadata'].pop('HTTPHeaders', None) - # Botocore inserts retry attempts not seen in Python27 - func['ResponseMetadata'].pop('RetryAttempts', None) - func['Configuration'].pop('LastModified') - - func.should.equal(expected_function_result) - conn.delete_function(FunctionName='testFunction') - - conn.list_functions()['Functions'].should.have.length_of(0) - - -@mock_lambda -def test_invoke_lambda_error(): - lambda_fx = """ -def lambda_handler(event, context): - raise Exception('failsauce') - """ - zip_output = io.BytesIO() - zip_file = zipfile.ZipFile(zip_output, 'w', zipfile.ZIP_DEFLATED) - zip_file.writestr('lambda_function.py', lambda_fx) - zip_file.close() - zip_output.seek(0) - - client = boto3.client('lambda', region_name='us-east-1') - client.create_function( - FunctionName='test-lambda-fx', - Runtime='python2.7', - Role='test-iam-role', - Handler='lambda_function.lambda_handler', - Description='test lambda function', - Timeout=3, - MemorySize=128, - Publish=True, - Code={ - 'ZipFile': zip_output.read() - }, - ) - - result = client.invoke( - FunctionName='test-lambda-fx', - InvocationType='RequestResponse', - LogType='Tail' - ) - - assert 'FunctionError' in result - assert result['FunctionError'] == 'Handled' - - -@mock_lambda -@mock_s3 -def test_tags(): - """ - test list_tags -> tag_resource -> list_tags -> tag_resource -> list_tags -> untag_resource -> list_tags integration - """ - s3_conn = boto3.client('s3', 'us-west-2') - s3_conn.create_bucket(Bucket='test-bucket') - - zip_content = get_test_zip_file2() - s3_conn.put_object(Bucket='test-bucket', Key='test.zip', Body=zip_content) - conn = boto3.client('lambda', 'us-west-2') - - function = conn.create_function( - FunctionName='testFunction', - Runtime='python2.7', - Role='test-iam-role', - Handler='lambda_function.handler', - Code={ - 'S3Bucket': 'test-bucket', - 'S3Key': 'test.zip', - }, - Description='test lambda function', - Timeout=3, - MemorySize=128, - Publish=True, - ) - - # List tags when there are none - conn.list_tags( - Resource=function['FunctionArn'] - )['Tags'].should.equal(dict()) - - # List tags when there is one - conn.tag_resource( - Resource=function['FunctionArn'], - Tags=dict(spam='eggs') - )['ResponseMetadata']['HTTPStatusCode'].should.equal(200) - conn.list_tags( - Resource=function['FunctionArn'] - )['Tags'].should.equal(dict(spam='eggs')) - - # List tags when another has been added - conn.tag_resource( - Resource=function['FunctionArn'], - Tags=dict(foo='bar') - )['ResponseMetadata']['HTTPStatusCode'].should.equal(200) - conn.list_tags( - Resource=function['FunctionArn'] - )['Tags'].should.equal(dict(spam='eggs', foo='bar')) - - # Untag resource - conn.untag_resource( - Resource=function['FunctionArn'], - TagKeys=['spam', 'trolls'] - )['ResponseMetadata']['HTTPStatusCode'].should.equal(204) - conn.list_tags( - Resource=function['FunctionArn'] - )['Tags'].should.equal(dict(foo='bar')) - - # Untag a tag that does not exist (no error and no change) - conn.untag_resource( - Resource=function['FunctionArn'], - TagKeys=['spam'] - )['ResponseMetadata']['HTTPStatusCode'].should.equal(204) - - -@mock_lambda -def test_tags_not_found(): - """ - Test list_tags and tag_resource when the lambda with the given arn does not exist - """ - conn = boto3.client('lambda', 'us-west-2') - conn.list_tags.when.called_with( - Resource='arn:aws:lambda:123456789012:function:not-found' - ).should.throw(botocore.client.ClientError) - - conn.tag_resource.when.called_with( - Resource='arn:aws:lambda:123456789012:function:not-found', - Tags=dict(spam='eggs') - ).should.throw(botocore.client.ClientError) - - conn.untag_resource.when.called_with( - Resource='arn:aws:lambda:123456789012:function:not-found', - TagKeys=['spam'] - ).should.throw(botocore.client.ClientError) - - -@mock_lambda -def test_invoke_async_function(): - conn = boto3.client('lambda', 'us-west-2') - conn.create_function( - FunctionName='testFunction', - Runtime='python2.7', - Role='test-iam-role', - Handler='lambda_function.lambda_handler', - Code={'ZipFile': get_test_zip_file1()}, - Description='test lambda function', - Timeout=3, - MemorySize=128, - Publish=True, - ) - - success_result = conn.invoke_async( - FunctionName='testFunction', - InvokeArgs=json.dumps({'test': 'event'}) - ) - - success_result['Status'].should.equal(202) - - -@mock_lambda -@freeze_time('2015-01-01 00:00:00') -def test_get_function_created_with_zipfile(): - conn = boto3.client('lambda', 'us-west-2') - zip_content = get_test_zip_file1() - result = conn.create_function( - FunctionName='testFunction', - Runtime='python2.7', - Role='test-iam-role', - Handler='lambda_function.handler', - Code={ - 'ZipFile': zip_content, - }, - Description='test lambda function', - Timeout=3, - MemorySize=128, - Publish=True, - ) - - response = conn.get_function( - FunctionName='testFunction' - ) - response['Configuration'].pop('LastModified') - - response['ResponseMetadata']['HTTPStatusCode'].should.equal(200) - assert len(response['Code']) == 2 - assert response['Code']['RepositoryType'] == 'S3' - assert response['Code']['Location'].startswith('s3://awslambda-{0}-tasks.s3-{0}.amazonaws.com'.format(_lambda_region)) - response['Configuration'].should.equal( - { - "CodeSha256": hashlib.sha256(zip_content).hexdigest(), - "CodeSize": len(zip_content), - "Description": "test lambda function", - "FunctionArn":'arn:aws:lambda:{}:123456789012:function:testFunction:$LATEST'.format(_lambda_region), - "FunctionName": "testFunction", - "Handler": "lambda_function.handler", - "MemorySize": 128, - "Role": "test-iam-role", - "Runtime": "python2.7", - "Timeout": 3, - "Version": '$LATEST', - "VpcConfig": { - "SecurityGroupIds": [], - "SubnetIds": [], - } - }, - ) - - -@mock_lambda -def add_function_permission(): - conn = boto3.client('lambda', 'us-west-2') - zip_content = get_test_zip_file1() - result = conn.create_function( - FunctionName='testFunction', - Runtime='python2.7', - Role='test-iam-role', - Handler='lambda_function.handler', - Code={ - 'ZipFile': zip_content, - }, - Description='test lambda function', - Timeout=3, - MemorySize=128, - Publish=True, - ) - - response = conn.add_permission( - FunctionName='testFunction', - StatementId='1', - Action="lambda:InvokeFunction", - Principal='432143214321', - SourceArn="arn:aws:lambda:us-west-2:account-id:function:helloworld", - SourceAccount='123412341234', - EventSourceToken='blah', - Qualifier='2' - ) - assert 'Statement' in response - res = json.loads(response['Statement']) - assert res['Action'] == "lambda:InvokeFunction" - - -@mock_lambda -def get_function_policy(): - conn = boto3.client('lambda', 'us-west-2') - zip_content = get_test_zip_file1() - result = conn.create_function( - FunctionName='testFunction', - Runtime='python2.7', - Role='test-iam-role', - Handler='lambda_function.handler', - Code={ - 'ZipFile': zip_content, - }, - Description='test lambda function', - Timeout=3, - MemorySize=128, - Publish=True, - ) - - response = conn.add_permission( - FunctionName='testFunction', - StatementId='1', - Action="lambda:InvokeFunction", - Principal='432143214321', - SourceArn="arn:aws:lambda:us-west-2:account-id:function:helloworld", - SourceAccount='123412341234', - EventSourceToken='blah', - Qualifier='2' - ) - - response = conn.get_policy( - FunctionName='testFunction' - ) - - assert 'Policy' in response - assert isinstance(response['Policy'], str) - res = json.loads(response['Policy']) - assert res['Statement'][0]['Action'] == 'lambda:InvokeFunction' + assert result['IdentityId'] == '12345' From 433997629fd59cc0b3996eacecb538035a475e60 Mon Sep 17 00:00:00 2001 From: Barry Ruffner Date: Tue, 3 Apr 2018 11:58:11 -0700 Subject: [PATCH 128/182] Cleaned up unused method and import --- moto/cognitoidentity/models.py | 2 +- moto/cognitoidentity/utils.py | 16 ---------------- 2 files changed, 1 insertion(+), 17 deletions(-) diff --git a/moto/cognitoidentity/models.py b/moto/cognitoidentity/models.py index 95663de53..d0ec2d735 100644 --- a/moto/cognitoidentity/models.py +++ b/moto/cognitoidentity/models.py @@ -6,7 +6,7 @@ import boto.cognito.identity from moto.compat import OrderedDict from moto.core import BaseBackend, BaseModel from moto.core.utils import iso_8601_datetime_with_milliseconds -from .utils import get_random_identity_id, remove_capitalization_of_dict_keys +from .utils import get_random_identity_id class CognitoIdentityObject(BaseModel): diff --git a/moto/cognitoidentity/utils.py b/moto/cognitoidentity/utils.py index 4832c2042..d885e0e81 100644 --- a/moto/cognitoidentity/utils.py +++ b/moto/cognitoidentity/utils.py @@ -5,19 +5,3 @@ from moto.core.utils import get_random_hex def get_random_identity_id(region): return "{0}:{0}".format(region, get_random_hex(length=19)) - - -def remove_capitalization_of_dict_keys(obj): - if isinstance(obj, collections.Mapping): - result = obj.__class__() - for key, value in obj.items(): - normalized_key = key[:1].lower() + key[1:] - result[normalized_key] = remove_capitalization_of_dict_keys(value) - return result - elif isinstance(obj, collections.Iterable) and not isinstance(obj, six.string_types): - result = obj.__class__() - for item in obj: - result += (remove_capitalization_of_dict_keys(item),) - return result - else: - return obj From 5df0f1befc3a935b0ae974ed8f9b4a11161badf7 Mon Sep 17 00:00:00 2001 From: Barry Ruffner Date: Tue, 3 Apr 2018 12:08:53 -0700 Subject: [PATCH 129/182] Fixes for automated tests linting. --- moto/cognitoidentity/models.py | 48 +++++++++++++++---------------- moto/cognitoidentity/responses.py | 11 ++++--- moto/cognitoidentity/utils.py | 2 -- 3 files changed, 29 insertions(+), 32 deletions(-) diff --git a/moto/cognitoidentity/models.py b/moto/cognitoidentity/models.py index d0ec2d735..9b3ba0648 100644 --- a/moto/cognitoidentity/models.py +++ b/moto/cognitoidentity/models.py @@ -51,16 +51,16 @@ class CognitoIdentityBackend(BaseBackend): self.__dict__ = {} self.__init__(region) - def create_identity_pool(self, identity_pool_name, allow_unauthenticated_identities, + def create_identity_pool(self, identity_pool_name, allow_unauthenticated_identities, supported_login_providers, developer_provider_name, open_id_connect_provider_arns, - cognito_identity_providers, saml_provider_arns): + cognito_identity_providers, saml_provider_arns): - new_identity = CognitoIdentity(self.region, identity_pool_name, - allow_unauthenticated_identities=allow_unauthenticated_identities, - supported_login_providers=supported_login_providers, - developer_provider_name=developer_provider_name, + new_identity = CognitoIdentity(self.region, identity_pool_name, + allow_unauthenticated_identities=allow_unauthenticated_identities, + supported_login_providers=supported_login_providers, + developer_provider_name=developer_provider_name, open_id_connect_provider_arns=open_id_connect_provider_arns, - cognito_identity_providers=cognito_identity_providers, + cognito_identity_providers=cognito_identity_providers, saml_provider_arns=saml_provider_arns) self.identity_pools[new_identity.identity_pool_id] = new_identity @@ -77,7 +77,6 @@ class CognitoIdentityBackend(BaseBackend): return response - def get_id(self): identity_id = {'IdentityId': get_random_identity_id(self.region)} return json.dumps(identity_id) @@ -88,14 +87,14 @@ class CognitoIdentityBackend(BaseBackend): expiration = now + datetime.timedelta(seconds=duration) expiration_str = str(iso_8601_datetime_with_milliseconds(expiration)) return json.dumps({ - "Credentials": { - "AccessKeyId": "TESTACCESSKEY12345", - "Expiration": expiration_str, - "SecretKey": "ABCSECRETKEY", - "SessionToken": "ABC12345" - }, - "IdentityId": identity_id - }) + "Credentials": { + "AccessKeyId": "TESTACCESSKEY12345", + "Expiration": expiration_str, + "SecretKey": "ABCSECRETKEY", + "SessionToken": "ABC12345" + }, + "IdentityId": identity_id + }) def get_open_id_token_for_developer_identity(self, identity_id): duration = 90 @@ -103,14 +102,15 @@ class CognitoIdentityBackend(BaseBackend): expiration = now + datetime.timedelta(seconds=duration) expiration_str = str(iso_8601_datetime_with_milliseconds(expiration)) return json.dumps({ - "Credentials": { - "AccessKeyId": "TESTACCESSKEY12345", - "Expiration": expiration_str, - "SecretKey": "ABCSECRETKEY", - "SessionToken": "ABC12345" - }, - "IdentityId": identity_id - }) + "Credentials": { + "AccessKeyId": "TESTACCESSKEY12345", + "Expiration": expiration_str, + "SecretKey": "ABCSECRETKEY", + "SessionToken": "ABC12345" + }, + "IdentityId": identity_id + }) + cognitoidentity_backends = {} for region in boto.cognito.identity.regions(): diff --git a/moto/cognitoidentity/responses.py b/moto/cognitoidentity/responses.py index 2285607a9..33afd47c1 100644 --- a/moto/cognitoidentity/responses.py +++ b/moto/cognitoidentity/responses.py @@ -30,11 +30,11 @@ class CognitoIdentityResponse(BaseResponse): saml_provider_arns = self._get_param('SamlProviderARNs') return cognitoidentity_backends[self.region].create_identity_pool( identity_pool_name=identity_pool_name, - allow_unauthenticated_identities=allow_unauthenticated_identities, - supported_login_providers=supported_login_providers, + allow_unauthenticated_identities=allow_unauthenticated_identities, + supported_login_providers=supported_login_providers, developer_provider_name=developer_provider_name, - open_id_connect_provider_arns=open_id_connect_provider_arns, - cognito_identity_providers=cognito_identity_providers, + open_id_connect_provider_arns=open_id_connect_provider_arns, + cognito_identity_providers=cognito_identity_providers, saml_provider_arns=saml_provider_arns) def get_id(self): @@ -44,5 +44,4 @@ class CognitoIdentityResponse(BaseResponse): return cognitoidentity_backends[self.region].get_credentials_for_identity(self._get_param('IdentityId')) def get_open_id_token_for_developer_identity(self): - return cognitoidentity_backends[self.region].get_open_id_token_for_developer_identity(self._get_param('IdentityId')) - + return cognitoidentity_backends[self.region].get_open_id_token_for_developer_identity(self._get_param('IdentityId')) \ No newline at end of file diff --git a/moto/cognitoidentity/utils.py b/moto/cognitoidentity/utils.py index d885e0e81..359631763 100644 --- a/moto/cognitoidentity/utils.py +++ b/moto/cognitoidentity/utils.py @@ -1,5 +1,3 @@ -import collections -import six from moto.core.utils import get_random_hex From ed495cdd9ef9021e5a86091e8e4fdcbd72223cc6 Mon Sep 17 00:00:00 2001 From: Barry Ruffner Date: Tue, 3 Apr 2018 12:17:34 -0700 Subject: [PATCH 130/182] More cleanup of identation and reordering / newlines for flake8 happiness. --- moto/cognitoidentity/models.py | 41 ++++++++++++++++++------------- moto/cognitoidentity/responses.py | 3 ++- 2 files changed, 26 insertions(+), 18 deletions(-) diff --git a/moto/cognitoidentity/models.py b/moto/cognitoidentity/models.py index 9b3ba0648..4474b90bb 100644 --- a/moto/cognitoidentity/models.py +++ b/moto/cognitoidentity/models.py @@ -1,11 +1,14 @@ from __future__ import unicode_literals -import json import datetime +import json + import boto.cognito.identity + from moto.compat import OrderedDict from moto.core import BaseBackend, BaseModel from moto.core.utils import iso_8601_datetime_with_milliseconds + from .utils import get_random_identity_id @@ -86,14 +89,16 @@ class CognitoIdentityBackend(BaseBackend): now = datetime.datetime.utcnow() expiration = now + datetime.timedelta(seconds=duration) expiration_str = str(iso_8601_datetime_with_milliseconds(expiration)) - return json.dumps({ - "Credentials": { - "AccessKeyId": "TESTACCESSKEY12345", - "Expiration": expiration_str, - "SecretKey": "ABCSECRETKEY", - "SessionToken": "ABC12345" - }, - "IdentityId": identity_id + return json.dumps( + { + "Credentials": + { + "AccessKeyId": "TESTACCESSKEY12345", + "Expiration": expiration_str, + "SecretKey": "ABCSECRETKEY", + "SessionToken": "ABC12345" + }, + "IdentityId": identity_id }) def get_open_id_token_for_developer_identity(self, identity_id): @@ -101,14 +106,16 @@ class CognitoIdentityBackend(BaseBackend): now = datetime.datetime.utcnow() expiration = now + datetime.timedelta(seconds=duration) expiration_str = str(iso_8601_datetime_with_milliseconds(expiration)) - return json.dumps({ - "Credentials": { - "AccessKeyId": "TESTACCESSKEY12345", - "Expiration": expiration_str, - "SecretKey": "ABCSECRETKEY", - "SessionToken": "ABC12345" - }, - "IdentityId": identity_id + return json.dumps( + { + "Credentials": + { + "AccessKeyId": "TESTACCESSKEY12345", + "Expiration": expiration_str, + "SecretKey": "ABCSECRETKEY", + "SessionToken": "ABC12345" + }, + "IdentityId": identity_id }) diff --git a/moto/cognitoidentity/responses.py b/moto/cognitoidentity/responses.py index 33afd47c1..cadf38133 100644 --- a/moto/cognitoidentity/responses.py +++ b/moto/cognitoidentity/responses.py @@ -3,6 +3,7 @@ from __future__ import unicode_literals import json from moto.core.responses import BaseResponse + from .models import cognitoidentity_backends @@ -44,4 +45,4 @@ class CognitoIdentityResponse(BaseResponse): return cognitoidentity_backends[self.region].get_credentials_for_identity(self._get_param('IdentityId')) def get_open_id_token_for_developer_identity(self): - return cognitoidentity_backends[self.region].get_open_id_token_for_developer_identity(self._get_param('IdentityId')) \ No newline at end of file + return cognitoidentity_backends[self.region].get_open_id_token_for_developer_identity(self._get_param('IdentityId')) From 7f0723a0689f94f85f01a9558ff7964b2f865910 Mon Sep 17 00:00:00 2001 From: Barry Ruffner Date: Tue, 3 Apr 2018 12:57:21 -0700 Subject: [PATCH 131/182] Added missing backend for server mode. --- moto/backends.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/moto/backends.py b/moto/backends.py index dc85aacdd..d8d317573 100644 --- a/moto/backends.py +++ b/moto/backends.py @@ -6,6 +6,7 @@ from moto.autoscaling import autoscaling_backends from moto.awslambda import lambda_backends from moto.cloudformation import cloudformation_backends from moto.cloudwatch import cloudwatch_backends +from moto.cognitoidentity import cognitoidentity_backends from moto.core import moto_api_backends from moto.datapipeline import datapipeline_backends from moto.dynamodb import dynamodb_backends @@ -49,6 +50,7 @@ BACKENDS = { 'batch': batch_backends, 'cloudformation': cloudformation_backends, 'cloudwatch': cloudwatch_backends, + 'cognito-identity': cognitoidentity_backends, 'datapipeline': datapipeline_backends, 'dynamodb': dynamodb_backends, 'dynamodb2': dynamodb_backends2, From 1046ee5041cc5743922bfa021c353ff0abefb929 Mon Sep 17 00:00:00 2001 From: Barry Ruffner Date: Tue, 3 Apr 2018 13:38:24 -0700 Subject: [PATCH 132/182] Added object to parsing and test server test for cognito. --- moto/cloudformation/parsing.py | 2 ++ tests/test_cognitoidentity/test_server.py | 27 +++++++++++++++++++++++ 2 files changed, 29 insertions(+) create mode 100644 tests/test_cognitoidentity/test_server.py diff --git a/moto/cloudformation/parsing.py b/moto/cloudformation/parsing.py index 81f47f4a3..849d8c917 100644 --- a/moto/cloudformation/parsing.py +++ b/moto/cloudformation/parsing.py @@ -10,6 +10,7 @@ from moto.autoscaling import models as autoscaling_models from moto.awslambda import models as lambda_models from moto.batch import models as batch_models from moto.cloudwatch import models as cloudwatch_models +from moto.cognitoidentity import models as cognitoidentity_models from moto.datapipeline import models as datapipeline_models from moto.dynamodb import models as dynamodb_models from moto.ec2 import models as ec2_models @@ -65,6 +66,7 @@ MODEL_MAP = { "AWS::ElasticLoadBalancingV2::LoadBalancer": elbv2_models.FakeLoadBalancer, "AWS::ElasticLoadBalancingV2::TargetGroup": elbv2_models.FakeTargetGroup, "AWS::ElasticLoadBalancingV2::Listener": elbv2_models.FakeListener, + "AWS::Cognito::IdentityPool": cognitoidentity_models.CognitoIdentity, "AWS::DataPipeline::Pipeline": datapipeline_models.Pipeline, "AWS::IAM::InstanceProfile": iam_models.InstanceProfile, "AWS::IAM::Role": iam_models.Role, diff --git a/tests/test_cognitoidentity/test_server.py b/tests/test_cognitoidentity/test_server.py new file mode 100644 index 000000000..0a6ae14d1 --- /dev/null +++ b/tests/test_cognitoidentity/test_server.py @@ -0,0 +1,27 @@ +from __future__ import unicode_literals + +import json +import sure # noqa + +import moto.server as server +from moto import mock_cognitoidentity + +''' +Test the different server responses +''' + + +@mock_cognitoidentity +def test_create_identity_pool(): + + backend = server.create_backend_app("cognito-identity") + test_client = backend.test_client() + + res = test_client.post('/', + data={"IdentityPoolName": "test", "AllowUnauthenticatedIdentities": True}, + headers={ + "X-Amz-Target": "com.amazonaws.cognito.identity.model.AWSCognitoIdentityService.CreateIdentityPool"}, + ) + + json_data = json.loads(res.data.decode("utf-8")) + assert json_data['IdentityPoolName'] == "test" From 2455de8282a87ea2b08cf2799e6db8968a39c24e Mon Sep 17 00:00:00 2001 From: Barry Ruffner Date: Tue, 3 Apr 2018 14:08:20 -0700 Subject: [PATCH 133/182] Added a print. --- .../test_cognitoidentity.py | 24 +++++++++---------- 1 file changed, 11 insertions(+), 13 deletions(-) diff --git a/tests/test_cognitoidentity/test_cognitoidentity.py b/tests/test_cognitoidentity/test_cognitoidentity.py index 730f3dccf..4184441d1 100644 --- a/tests/test_cognitoidentity/test_cognitoidentity.py +++ b/tests/test_cognitoidentity/test_cognitoidentity.py @@ -1,26 +1,20 @@ from __future__ import unicode_literals -import base64 -import botocore.client import boto3 -import hashlib -import io -import json -import zipfile + +from moto import mock_cognitoidentity import sure # noqa -from freezegun import freeze_time -from moto import mock_cognitoidentity, settings @mock_cognitoidentity def test_create_identity_pool(): conn = boto3.client('cognito-identity', 'us-west-2') result = conn.create_identity_pool(IdentityPoolName='TestPool', - AllowUnauthenticatedIdentities=False, - SupportedLoginProviders={'graph.facebook.com':'123456789012345'}, + AllowUnauthenticatedIdentities=False, + SupportedLoginProviders={'graph.facebook.com': '123456789012345'}, DeveloperProviderName='devname', - OpenIdConnectProviderARNs=['arn:aws:rds:eu-west-2:123456789012:db:mysql-db',], + OpenIdConnectProviderARNs=['arn:aws:rds:eu-west-2:123456789012:db:mysql-db'], CognitoIdentityProviders=[ { 'ProviderName': 'testprovider', @@ -28,25 +22,29 @@ def test_create_identity_pool(): 'ServerSideTokenCheck': True }, ], - SamlProviderARNs=['arn:aws:rds:eu-west-2:123456789012:db:mysql-db',]) + SamlProviderARNs=['arn:aws:rds:eu-west-2:123456789012:db:mysql-db']) assert result['IdentityPoolId'] != '' + @mock_cognitoidentity def test_get_id(): conn = boto3.client('cognito-identity', 'us-west-2') result = conn.get_id(AccountId='someaccount', IdentityPoolId='us-west-2:12345', Logins={ - 'someurl': '12345' + 'someurl': '12345' }) + print(result) assert result['IdentityId'].startswith('us-west-2') + @mock_cognitoidentity def test_get_credentials_for_identity(): conn = boto3.client('cognito-identity', 'us-west-2') result = conn.get_credentials_for_identity(IdentityId='12345') assert result['IdentityId'] == '12345' + @mock_cognitoidentity def test_get_open_id_token_for_developer_identity(): conn = boto3.client('cognito-identity', 'us-west-2') From 229d453b99a061eb558378de73bc54471c4c0339 Mon Sep 17 00:00:00 2001 From: Barry Ruffner Date: Tue, 3 Apr 2018 16:27:30 -0700 Subject: [PATCH 134/182] Made some changes for server testing and added another get_id test. --- moto/cognitoidentity/models.py | 18 ++---------------- moto/cognitoidentity/responses.py | 14 -------------- .../test_cognitoidentity.py | 13 +++++++++++-- tests/test_cognitoidentity/test_server.py | 18 ++++++++++++++++++ 4 files changed, 31 insertions(+), 32 deletions(-) diff --git a/moto/cognitoidentity/models.py b/moto/cognitoidentity/models.py index 4474b90bb..21cd4f949 100644 --- a/moto/cognitoidentity/models.py +++ b/moto/cognitoidentity/models.py @@ -12,21 +12,6 @@ from moto.core.utils import iso_8601_datetime_with_milliseconds from .utils import get_random_identity_id -class CognitoIdentityObject(BaseModel): - - def __init__(self, object_id, name, fields): - self.object_id = object_id - self.name = name - self.fields = fields - - def to_json(self): - return { - "fields": self.fields, - "id": self.object_id, - "name": self.name, - } - - class CognitoIdentity(BaseModel): def __init__(self, region, identity_pool_name, **kwargs): @@ -89,7 +74,7 @@ class CognitoIdentityBackend(BaseBackend): now = datetime.datetime.utcnow() expiration = now + datetime.timedelta(seconds=duration) expiration_str = str(iso_8601_datetime_with_milliseconds(expiration)) - return json.dumps( + response = json.dumps( { "Credentials": { @@ -100,6 +85,7 @@ class CognitoIdentityBackend(BaseBackend): }, "IdentityId": identity_id }) + return response def get_open_id_token_for_developer_identity(self, identity_id): duration = 90 diff --git a/moto/cognitoidentity/responses.py b/moto/cognitoidentity/responses.py index cadf38133..ea54b2cff 100644 --- a/moto/cognitoidentity/responses.py +++ b/moto/cognitoidentity/responses.py @@ -1,7 +1,5 @@ from __future__ import unicode_literals -import json - from moto.core.responses import BaseResponse from .models import cognitoidentity_backends @@ -9,18 +7,6 @@ from .models import cognitoidentity_backends class CognitoIdentityResponse(BaseResponse): - @property - def parameters(self): - # TODO this should really be moved to core/responses.py - if self.body: - return json.loads(self.body) - else: - return self.querystring - - @property - def cognitoidentity_backend(self): - return cognitoidentity_backends[self.region] - def create_identity_pool(self): identity_pool_name = self._get_param('IdentityPoolName') allow_unauthenticated_identities = self._get_param('AllowUnauthenticatedIdentities') diff --git a/tests/test_cognitoidentity/test_cognitoidentity.py b/tests/test_cognitoidentity/test_cognitoidentity.py index 4184441d1..2b54709a1 100644 --- a/tests/test_cognitoidentity/test_cognitoidentity.py +++ b/tests/test_cognitoidentity/test_cognitoidentity.py @@ -5,6 +5,8 @@ import boto3 from moto import mock_cognitoidentity import sure # noqa +from moto.cognitoidentity.utils import get_random_identity_id + @mock_cognitoidentity def test_create_identity_pool(): @@ -26,8 +28,14 @@ def test_create_identity_pool(): assert result['IdentityPoolId'] != '' +# testing a helper function +def test_get_random_identity_id(): + assert len(get_random_identity_id('us-west-2')) > 0 + + @mock_cognitoidentity def test_get_id(): + # These two do NOT work in server mode. They just don't return the data from the model. conn = boto3.client('cognito-identity', 'us-west-2') result = conn.get_id(AccountId='someaccount', IdentityPoolId='us-west-2:12345', @@ -35,14 +43,15 @@ def test_get_id(): 'someurl': '12345' }) print(result) - assert result['IdentityId'].startswith('us-west-2') + assert result.get('IdentityId', "").startswith('us-west-2') or result.get('ResponseMetadata').get('HTTPStatusCode') == 200 @mock_cognitoidentity def test_get_credentials_for_identity(): conn = boto3.client('cognito-identity', 'us-west-2') result = conn.get_credentials_for_identity(IdentityId='12345') - assert result['IdentityId'] == '12345' + + assert result.get('IdentityId') == '12345' or result.get('ResponseMetadata').get('HTTPStatusCode') == 200 @mock_cognitoidentity diff --git a/tests/test_cognitoidentity/test_server.py b/tests/test_cognitoidentity/test_server.py index 0a6ae14d1..b63d42bc0 100644 --- a/tests/test_cognitoidentity/test_server.py +++ b/tests/test_cognitoidentity/test_server.py @@ -25,3 +25,21 @@ def test_create_identity_pool(): json_data = json.loads(res.data.decode("utf-8")) assert json_data['IdentityPoolName'] == "test" + + +@mock_cognitoidentity +def test_get_id(): + backend = server.create_backend_app("cognito-identity") + test_client = backend.test_client() + + res = test_client.post('/', + data=json.dumps({'AccountId': 'someaccount', + 'IdentityPoolId': 'us-west-2:12345', + 'Logins': {'someurl': '12345'}}), + headers={ + "X-Amz-Target": "com.amazonaws.cognito.identity.model.AWSCognitoIdentityService.GetId"}, + ) + + print(res.data) + json_data = json.loads(res.data.decode("utf-8")) + assert ':' in json_data['IdentityId'] From 49cce220acd80ff3702a268101de1fa94c36861c Mon Sep 17 00:00:00 2001 From: Barry Ruffner Date: Tue, 3 Apr 2018 16:40:45 -0700 Subject: [PATCH 135/182] Updated readme. --- README.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/README.md b/README.md index 59dc67432..9642a8db6 100644 --- a/README.md +++ b/README.md @@ -70,6 +70,8 @@ It gets even better! Moto isn't just for Python code and it isn't just for S3. L |------------------------------------------------------------------------------| | CloudwatchEvents | @mock_events | all endpoints done | |------------------------------------------------------------------------------| +| Cognito Identity | @mock_cognitoidentity| basic endpoints done | +|------------------------------------------------------------------------------| | Data Pipeline | @mock_datapipeline| basic endpoints done | |------------------------------------------------------------------------------| | DynamoDB | @mock_dynamodb | core endpoints done | From 383b0c1c36c8f0e7416674d70e853b27c7d217c3 Mon Sep 17 00:00:00 2001 From: Barry Ruffner Date: Tue, 3 Apr 2018 17:05:36 -0700 Subject: [PATCH 136/182] Made some corrections to the developer identity response and added checks to add coverage. --- moto/cognitoidentity/models.py | 17 ++++------------- .../test_cognitoidentity.py | 2 ++ 2 files changed, 6 insertions(+), 13 deletions(-) diff --git a/moto/cognitoidentity/models.py b/moto/cognitoidentity/models.py index 21cd4f949..daa2a4641 100644 --- a/moto/cognitoidentity/models.py +++ b/moto/cognitoidentity/models.py @@ -88,21 +88,12 @@ class CognitoIdentityBackend(BaseBackend): return response def get_open_id_token_for_developer_identity(self, identity_id): - duration = 90 - now = datetime.datetime.utcnow() - expiration = now + datetime.timedelta(seconds=duration) - expiration_str = str(iso_8601_datetime_with_milliseconds(expiration)) - return json.dumps( + response = json.dumps( { - "Credentials": - { - "AccessKeyId": "TESTACCESSKEY12345", - "Expiration": expiration_str, - "SecretKey": "ABCSECRETKEY", - "SessionToken": "ABC12345" - }, - "IdentityId": identity_id + "IdentityId": identity_id, + "Token": get_random_identity_id(self.region) }) + return response cognitoidentity_backends = {} diff --git a/tests/test_cognitoidentity/test_cognitoidentity.py b/tests/test_cognitoidentity/test_cognitoidentity.py index 2b54709a1..c4f51ee4c 100644 --- a/tests/test_cognitoidentity/test_cognitoidentity.py +++ b/tests/test_cognitoidentity/test_cognitoidentity.py @@ -51,6 +51,7 @@ def test_get_credentials_for_identity(): conn = boto3.client('cognito-identity', 'us-west-2') result = conn.get_credentials_for_identity(IdentityId='12345') + assert result.get('Expiration') > 0 or result.get('ResponseMetadata').get('HTTPStatusCode') == 200 assert result.get('IdentityId') == '12345' or result.get('ResponseMetadata').get('HTTPStatusCode') == 200 @@ -65,4 +66,5 @@ def test_get_open_id_token_for_developer_identity(): }, TokenDuration=123 ) + assert len(result['Token']) assert result['IdentityId'] == '12345' From b86b46421067262aa603372f681a193c4fca397d Mon Sep 17 00:00:00 2001 From: Barry Ruffner Date: Tue, 3 Apr 2018 17:22:21 -0700 Subject: [PATCH 137/182] fix for an expiration test. --- tests/test_cognitoidentity/test_cognitoidentity.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/test_cognitoidentity/test_cognitoidentity.py b/tests/test_cognitoidentity/test_cognitoidentity.py index c4f51ee4c..c752f3784 100644 --- a/tests/test_cognitoidentity/test_cognitoidentity.py +++ b/tests/test_cognitoidentity/test_cognitoidentity.py @@ -51,7 +51,7 @@ def test_get_credentials_for_identity(): conn = boto3.client('cognito-identity', 'us-west-2') result = conn.get_credentials_for_identity(IdentityId='12345') - assert result.get('Expiration') > 0 or result.get('ResponseMetadata').get('HTTPStatusCode') == 200 + assert result.get('Expiration', 0) > 0 or result.get('ResponseMetadata').get('HTTPStatusCode') == 200 assert result.get('IdentityId') == '12345' or result.get('ResponseMetadata').get('HTTPStatusCode') == 200 From f5f64be45b9e2ba1edca510e5aac151b9ea43047 Mon Sep 17 00:00:00 2001 From: Barry Date: Wed, 4 Apr 2018 00:28:39 -0700 Subject: [PATCH 138/182] Added comment --- tests/test_cognitoidentity/test_cognitoidentity.py | 1 + 1 file changed, 1 insertion(+) diff --git a/tests/test_cognitoidentity/test_cognitoidentity.py b/tests/test_cognitoidentity/test_cognitoidentity.py index c752f3784..a38107b99 100644 --- a/tests/test_cognitoidentity/test_cognitoidentity.py +++ b/tests/test_cognitoidentity/test_cognitoidentity.py @@ -48,6 +48,7 @@ def test_get_id(): @mock_cognitoidentity def test_get_credentials_for_identity(): + # These two do NOT work in server mode. They just don't return the data from the model. conn = boto3.client('cognito-identity', 'us-west-2') result = conn.get_credentials_for_identity(IdentityId='12345') From ca72707409ad33f3101f936a2998660dfe6e0ea9 Mon Sep 17 00:00:00 2001 From: Josh Prendergast Date: Wed, 4 Apr 2018 17:24:41 +0100 Subject: [PATCH 139/182] Fix AttributeError in filter_log_events An AttributeError would be thrown if the `interleaved` parameter was passed. --- moto/logs/models.py | 2 +- tests/test_logs/test_logs.py | 30 +++++++++++++++++++++++++++++- 2 files changed, 30 insertions(+), 2 deletions(-) diff --git a/moto/logs/models.py b/moto/logs/models.py index 6ff7f93bf..3ae697a27 100644 --- a/moto/logs/models.py +++ b/moto/logs/models.py @@ -184,7 +184,7 @@ class LogGroup: events += stream.filter_log_events(log_group_name, log_stream_names, start_time, end_time, limit, next_token, filter_pattern, interleaved) if interleaved: - events = sorted(events, key=lambda event: event.timestamp) + events = sorted(events, key=lambda event: event['timestamp']) if next_token is None: next_token = 0 diff --git a/tests/test_logs/test_logs.py b/tests/test_logs/test_logs.py index 0139723c9..a9a7f5260 100644 --- a/tests/test_logs/test_logs.py +++ b/tests/test_logs/test_logs.py @@ -85,4 +85,32 @@ def test_put_logs(): logStreamName=log_stream_name ) events = res['events'] - events.should.have.length_of(2) \ No newline at end of file + events.should.have.length_of(2) + + +@mock_logs +def test_filter_logs_interleaved(): + conn = boto3.client('logs', 'us-west-2') + log_group_name = 'dummy' + log_stream_name = 'stream' + conn.create_log_group(logGroupName=log_group_name) + conn.create_log_stream( + logGroupName=log_group_name, + logStreamName=log_stream_name + ) + messages = [ + {'timestamp': 0, 'message': 'hello'}, + {'timestamp': 0, 'message': 'world'} + ] + conn.put_log_events( + logGroupName=log_group_name, + logStreamName=log_stream_name, + logEvents=messages + ) + res = conn.filter_log_events( + logGroupName=log_group_name, + logStreamNames=[log_stream_name], + interleaved=True, + ) + events = res['events'] + events.should.have.length_of(2) From e2af8bc83691b82da3e60ce35c2219f7a0ceff0a Mon Sep 17 00:00:00 2001 From: Tom Elliff Date: Wed, 4 Apr 2018 17:21:08 +0100 Subject: [PATCH 140/182] Allow tagging snapshots on creation Largely copying what was done for volume creation in https://github.com/spulec/moto/pull/1432 --- moto/ec2/responses/elastic_block_store.py | 13 +++++++ tests/test_ec2/test_tags.py | 42 +++++++++++++++++++++++ 2 files changed, 55 insertions(+) diff --git a/moto/ec2/responses/elastic_block_store.py b/moto/ec2/responses/elastic_block_store.py index 2d43f8ffb..cdc5b18e9 100644 --- a/moto/ec2/responses/elastic_block_store.py +++ b/moto/ec2/responses/elastic_block_store.py @@ -23,8 +23,11 @@ class ElasticBlockStore(BaseResponse): def create_snapshot(self): volume_id = self._get_param('VolumeId') description = self._get_param('Description') + tags = self._parse_tag_specification("TagSpecification") + snapshot_tags = tags.get('snapshot', {}) if self.is_not_dryrun('CreateSnapshot'): snapshot = self.ec2_backend.create_snapshot(volume_id, description) + snapshot.add_tags(snapshot_tags) template = self.response_template(CREATE_SNAPSHOT_RESPONSE) return template.render(snapshot=snapshot) @@ -233,6 +236,16 @@ CREATE_SNAPSHOT_RESPONSE = """ diff --git a/tests/test_ec2/test_tags.py b/tests/test_ec2/test_tags.py index d78fe24c3..c92a4f81f 100644 --- a/tests/test_ec2/test_tags.py +++ b/tests/test_ec2/test_tags.py @@ -409,3 +409,45 @@ def test_create_volume_with_tags(): ) assert response['Tags'][0]['Key'] == 'TEST_TAG' + + +@mock_ec2 +def test_create_snapshot_with_tags(): + client = boto3.client('ec2', 'us-west-2') + volume_id = client.create_volume( + AvailabilityZone='us-west-2', + Encrypted=False, + Size=40, + TagSpecifications=[ + { + 'ResourceType': 'volume', + 'Tags': [ + { + 'Key': 'TEST_TAG', + 'Value': 'TEST_VALUE' + } + ], + } + ] + )['VolumeId'] + snapshot = client.create_snapshot( + VolumeId=volume_id, + TagSpecifications=[ + { + 'ResourceType': 'snapshot', + 'Tags': [ + { + 'Key': 'TEST_SNAPSHOT_TAG', + 'Value': 'TEST_SNAPSHOT_VALUE' + } + ], + } + ] + ) + + expected_tags = [{ + 'Key': 'TEST_SNAPSHOT_TAG', + 'Value': 'TEST_SNAPSHOT_VALUE' + }] + + assert snapshot['Tags'] == expected_tags From daa6bfe84debe891a1e16851dc95d5cc454604f2 Mon Sep 17 00:00:00 2001 From: Steve Pulec Date: Wed, 4 Apr 2018 14:56:51 -0400 Subject: [PATCH 141/182] Dont iterate over dictionary that we are modifying. --- moto/ecs/models.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/moto/ecs/models.py b/moto/ecs/models.py index e0b29cb01..998975650 100644 --- a/moto/ecs/models.py +++ b/moto/ecs/models.py @@ -24,7 +24,7 @@ class BaseObject(BaseModel): def gen_response_object(self): response_object = copy(self.__dict__) - for key, value in response_object.items(): + for key, value in self.__dict__.items(): if '_' in key: response_object[self.camelCase(key)] = value del response_object[key] From 4636a2afc3dbdfaede3030553a2e1109fc0d52f1 Mon Sep 17 00:00:00 2001 From: William Richard Date: Wed, 4 Apr 2018 15:05:51 -0400 Subject: [PATCH 142/182] Add physical_resource_id to ECS task definition --- moto/ecs/models.py | 4 ++++ tests/test_ecs/test_ecs_boto3.py | 7 ++++++- 2 files changed, 10 insertions(+), 1 deletion(-) diff --git a/moto/ecs/models.py b/moto/ecs/models.py index e0b29cb01..3ae360d69 100644 --- a/moto/ecs/models.py +++ b/moto/ecs/models.py @@ -109,6 +109,10 @@ class TaskDefinition(BaseObject): del response_object['arn'] return response_object + @property + def physical_resource_id(self): + return self.arn + @classmethod def create_from_cloudformation_json(cls, resource_name, cloudformation_json, region_name): properties = cloudformation_json['Properties'] diff --git a/tests/test_ecs/test_ecs_boto3.py b/tests/test_ecs/test_ecs_boto3.py index 5fcc297aa..53042ed02 100644 --- a/tests/test_ecs/test_ecs_boto3.py +++ b/tests/test_ecs/test_ecs_boto3.py @@ -1319,15 +1319,20 @@ def test_create_task_definition_through_cloudformation(): } template_json = json.dumps(template) cfn_conn = boto3.client('cloudformation', region_name='us-west-1') + stack_name = 'test_stack' cfn_conn.create_stack( - StackName="test_stack", + StackName=stack_name, TemplateBody=template_json, ) ecs_conn = boto3.client('ecs', region_name='us-west-1') resp = ecs_conn.list_task_definitions() len(resp['taskDefinitionArns']).should.equal(1) + task_definition_arn = resp['taskDefinitionArns'][0] + task_definition_details = cfn_conn.describe_stack_resource( + StackName=stack_name,LogicalResourceId='testTaskDefinition')['StackResourceDetail'] + task_definition_details['PhysicalResourceId'].should.equal(task_definition_arn) @mock_ec2 @mock_ecs From cd1c6d3e6c3c80bf55fbfd6a657ba642c6fb4e3f Mon Sep 17 00:00:00 2001 From: Steve Pulec Date: Thu, 5 Apr 2018 16:57:43 -0400 Subject: [PATCH 143/182] Unvendor responses, move back to upstream. --- moto/apigateway/models.py | 2 +- moto/core/models.py | 87 ++++- moto/packages/responses/.gitignore | 12 - moto/packages/responses/.travis.yml | 27 -- moto/packages/responses/CHANGES | 32 -- moto/packages/responses/LICENSE | 201 ---------- moto/packages/responses/MANIFEST.in | 2 - moto/packages/responses/Makefile | 16 - moto/packages/responses/README.rst | 190 --------- moto/packages/responses/__init__.py | 0 moto/packages/responses/responses.py | 330 ---------------- moto/packages/responses/setup.cfg | 5 - moto/packages/responses/setup.py | 99 ----- moto/packages/responses/test_responses.py | 444 ---------------------- moto/packages/responses/tox.ini | 11 - moto/s3/urls.py | 4 +- setup.py | 1 + tests/test_apigateway/test_apigateway.py | 2 +- tests/test_sns/test_publishing_boto3.py | 2 +- 19 files changed, 79 insertions(+), 1388 deletions(-) delete mode 100644 moto/packages/responses/.gitignore delete mode 100644 moto/packages/responses/.travis.yml delete mode 100644 moto/packages/responses/CHANGES delete mode 100644 moto/packages/responses/LICENSE delete mode 100644 moto/packages/responses/MANIFEST.in delete mode 100644 moto/packages/responses/Makefile delete mode 100644 moto/packages/responses/README.rst delete mode 100644 moto/packages/responses/__init__.py delete mode 100644 moto/packages/responses/responses.py delete mode 100644 moto/packages/responses/setup.cfg delete mode 100644 moto/packages/responses/setup.py delete mode 100644 moto/packages/responses/test_responses.py delete mode 100644 moto/packages/responses/tox.ini diff --git a/moto/apigateway/models.py b/moto/apigateway/models.py index 27a9b86c2..a419a3afa 100644 --- a/moto/apigateway/models.py +++ b/moto/apigateway/models.py @@ -6,7 +6,7 @@ import string import requests import time -from moto.packages.responses import responses +import responses from moto.core import BaseBackend, BaseModel from .utils import create_id from .exceptions import StageNotFoundException diff --git a/moto/core/models.py b/moto/core/models.py index c6fb72ffa..c9895c0cb 100644 --- a/moto/core/models.py +++ b/moto/core/models.py @@ -9,7 +9,7 @@ import re import six from moto import settings -from moto.packages.responses import responses +import responses from moto.packages.httpretty import HTTPretty from .utils import ( convert_httpretty_response, @@ -124,31 +124,90 @@ RESPONSES_METHODS = [responses.GET, responses.DELETE, responses.HEAD, responses.OPTIONS, responses.PATCH, responses.POST, responses.PUT] -class ResponsesMockAWS(BaseMockAWS): +class CallbackResponse(responses.CallbackResponse): + ''' + Need to subclass so we can change a couple things + ''' + def get_response(self, request): + ''' + Need to override this so we can pass decode_content=False + ''' + headers = self.get_headers() + result = self.callback(request) + if isinstance(result, Exception): + raise result + + status, r_headers, body = result + body = responses._handle_body(body) + headers.update(r_headers) + + return responses.HTTPResponse( + status=status, + reason=six.moves.http_client.responses.get(status), + body=body, + headers=headers, + preload_content=False, + # Need to not decode_content to mimic requests + decode_content=False, + ) + + def _url_matches(self, url, other, match_querystring=False): + ''' + Need to override this so we can fix querystrings breaking regex matching + ''' + if not match_querystring: + other = other.split('?', 1)[0] + + if responses._is_string(url): + if responses._has_unicode(url): + url = responses._clean_unicode(url) + if not isinstance(other, six.text_type): + other = other.encode('ascii').decode('utf8') + return self._url_matches_strict(url, other) + elif isinstance(url, responses.Pattern) and url.match(other): + return True + else: + return False + + +botocore_mock = responses.RequestsMock(assert_all_requests_are_fired=False, target='botocore.vendored.requests.adapters.HTTPAdapter.send') + + +class ResponsesMockAWS(BaseMockAWS): def reset(self): + botocore_mock.reset() responses.reset() def enable_patching(self): + botocore_mock.start() responses.start() + for method in RESPONSES_METHODS: for backend in self.backends_for_urls.values(): for key, value in backend.urls.items(): - responses.add_callback( - method=method, - url=re.compile(key), - callback=convert_flask_to_responses_response(value), + responses.add( + CallbackResponse( + method=method, + url=re.compile(key), + callback=convert_flask_to_responses_response(value), + stream=True, + match_querystring=False, + ) + ) + botocore_mock.add( + CallbackResponse( + method=method, + url=re.compile(key), + callback=convert_flask_to_responses_response(value), + stream=True, + match_querystring=False, + ) ) - for pattern in responses.mock._urls: - pattern['stream'] = True - def disable_patching(self): - try: - responses.stop() - except AttributeError: - pass - responses.reset() + botocore_mock.stop() + responses.stop() MockAWS = ResponsesMockAWS diff --git a/moto/packages/responses/.gitignore b/moto/packages/responses/.gitignore deleted file mode 100644 index 5d4406b8d..000000000 --- a/moto/packages/responses/.gitignore +++ /dev/null @@ -1,12 +0,0 @@ -.arcconfig -.coverage -.DS_Store -.idea -*.db -*.egg-info -*.pyc -/htmlcov -/dist -/build -/.cache -/.tox diff --git a/moto/packages/responses/.travis.yml b/moto/packages/responses/.travis.yml deleted file mode 100644 index 9ab219db0..000000000 --- a/moto/packages/responses/.travis.yml +++ /dev/null @@ -1,27 +0,0 @@ -language: python -sudo: false -python: - - "2.6" - - "2.7" - - "3.3" - - "3.4" - - "3.5" -cache: - directories: - - .pip_download_cache -env: - matrix: - - REQUESTS=requests==2.0 - - REQUESTS=-U requests - - REQUESTS="-e git+git://github.com/kennethreitz/requests.git#egg=requests" - global: - - PIP_DOWNLOAD_CACHE=".pip_download_cache" -matrix: - allow_failures: - - env: 'REQUESTS="-e git+git://github.com/kennethreitz/requests.git#egg=requests"' -install: - - "pip install ${REQUESTS}" - - make develop -script: - - if [[ $TRAVIS_PYTHON_VERSION != 2.6 ]]; then make lint; fi - - py.test . --cov responses --cov-report term-missing diff --git a/moto/packages/responses/CHANGES b/moto/packages/responses/CHANGES deleted file mode 100644 index 1bfd7ead8..000000000 --- a/moto/packages/responses/CHANGES +++ /dev/null @@ -1,32 +0,0 @@ -Unreleased ----------- - -- Allow empty list/dict as json object (GH-100) - -0.5.1 ------ - -- Add LICENSE, README and CHANGES to the PyPI distribution (GH-97). - -0.5.0 ------ - -- Allow passing a JSON body to `response.add` (GH-82) -- Improve ConnectionError emulation (GH-73) -- Correct assertion in assert_all_requests_are_fired (GH-71) - -0.4.0 ------ - -- Requests 2.0+ is required -- Mocking now happens on the adapter instead of the session - -0.3.0 ------ - -- Add the ability to mock errors (GH-22) -- Add responses.mock context manager (GH-36) -- Support custom adapters (GH-33) -- Add support for regexp error matching (GH-25) -- Add support for dynamic bodies via `responses.add_callback` (GH-24) -- Preserve argspec when using `responses.activate` decorator (GH-18) diff --git a/moto/packages/responses/LICENSE b/moto/packages/responses/LICENSE deleted file mode 100644 index 52b44b20a..000000000 --- a/moto/packages/responses/LICENSE +++ /dev/null @@ -1,201 +0,0 @@ - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - -TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - -1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - -2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - -3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - -4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - -5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - -6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - -7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - -8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - -9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - -END OF TERMS AND CONDITIONS - -APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - -Copyright 2015 David Cramer - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. diff --git a/moto/packages/responses/MANIFEST.in b/moto/packages/responses/MANIFEST.in deleted file mode 100644 index ef901684c..000000000 --- a/moto/packages/responses/MANIFEST.in +++ /dev/null @@ -1,2 +0,0 @@ -include README.rst CHANGES LICENSE -global-exclude *~ diff --git a/moto/packages/responses/Makefile b/moto/packages/responses/Makefile deleted file mode 100644 index 9da42c6d1..000000000 --- a/moto/packages/responses/Makefile +++ /dev/null @@ -1,16 +0,0 @@ -develop: - pip install -e . - make install-test-requirements - -install-test-requirements: - pip install "file://`pwd`#egg=responses[tests]" - -test: develop lint - @echo "Running Python tests" - py.test . - @echo "" - -lint: - @echo "Linting Python files" - PYFLAKES_NODOCTEST=1 flake8 . - @echo "" diff --git a/moto/packages/responses/README.rst b/moto/packages/responses/README.rst deleted file mode 100644 index 5f946fcde..000000000 --- a/moto/packages/responses/README.rst +++ /dev/null @@ -1,190 +0,0 @@ -Responses -========= - -.. image:: https://travis-ci.org/getsentry/responses.svg?branch=master - :target: https://travis-ci.org/getsentry/responses - -A utility library for mocking out the `requests` Python library. - -.. note:: Responses requires Requests >= 2.0 - -Response body as string ------------------------ - -.. code-block:: python - - import responses - import requests - - @responses.activate - def test_my_api(): - responses.add(responses.GET, 'http://twitter.com/api/1/foobar', - body='{"error": "not found"}', status=404, - content_type='application/json') - - resp = requests.get('http://twitter.com/api/1/foobar') - - assert resp.json() == {"error": "not found"} - - assert len(responses.calls) == 1 - assert responses.calls[0].request.url == 'http://twitter.com/api/1/foobar' - assert responses.calls[0].response.text == '{"error": "not found"}' - -You can also specify a JSON object instead of a body string. - -.. code-block:: python - - import responses - import requests - - @responses.activate - def test_my_api(): - responses.add(responses.GET, 'http://twitter.com/api/1/foobar', - json={"error": "not found"}, status=404) - - resp = requests.get('http://twitter.com/api/1/foobar') - - assert resp.json() == {"error": "not found"} - - assert len(responses.calls) == 1 - assert responses.calls[0].request.url == 'http://twitter.com/api/1/foobar' - assert responses.calls[0].response.text == '{"error": "not found"}' - -Request callback ----------------- - -.. code-block:: python - - import json - - import responses - import requests - - @responses.activate - def test_calc_api(): - - def request_callback(request): - payload = json.loads(request.body) - resp_body = {'value': sum(payload['numbers'])} - headers = {'request-id': '728d329e-0e86-11e4-a748-0c84dc037c13'} - return (200, headers, json.dumps(resp_body)) - - responses.add_callback( - responses.POST, 'http://calc.com/sum', - callback=request_callback, - content_type='application/json', - ) - - resp = requests.post( - 'http://calc.com/sum', - json.dumps({'numbers': [1, 2, 3]}), - headers={'content-type': 'application/json'}, - ) - - assert resp.json() == {'value': 6} - - assert len(responses.calls) == 1 - assert responses.calls[0].request.url == 'http://calc.com/sum' - assert responses.calls[0].response.text == '{"value": 6}' - assert ( - responses.calls[0].response.headers['request-id'] == - '728d329e-0e86-11e4-a748-0c84dc037c13' - ) - -Instead of passing a string URL into `responses.add` or `responses.add_callback` -you can also supply a compiled regular expression. - -.. code-block:: python - - import re - import responses - import requests - - # Instead of - responses.add(responses.GET, 'http://twitter.com/api/1/foobar', - body='{"error": "not found"}', status=404, - content_type='application/json') - - # You can do the following - url_re = re.compile(r'https?://twitter\.com/api/\d+/foobar') - responses.add(responses.GET, url_re, - body='{"error": "not found"}', status=404, - content_type='application/json') - -A response can also throw an exception as follows. - -.. code-block:: python - - import responses - import requests - from requests.exceptions import HTTPError - - exception = HTTPError('Something went wrong') - responses.add(responses.GET, 'http://twitter.com/api/1/foobar', - body=exception) - # All calls to 'http://twitter.com/api/1/foobar' will throw exception. - - -Responses as a context manager ------------------------------- - -.. code-block:: python - - import responses - import requests - - - def test_my_api(): - with responses.RequestsMock() as rsps: - rsps.add(responses.GET, 'http://twitter.com/api/1/foobar', - body='{}', status=200, - content_type='application/json') - resp = requests.get('http://twitter.com/api/1/foobar') - - assert resp.status_code == 200 - - # outside the context manager requests will hit the remote server - resp = requests.get('http://twitter.com/api/1/foobar') - resp.status_code == 404 - - -Assertions on declared responses --------------------------------- - -When used as a context manager, Responses will, by default, raise an assertion -error if a url was registered but not accessed. This can be disabled by passing -the ``assert_all_requests_are_fired`` value: - -.. code-block:: python - - import responses - import requests - - - def test_my_api(): - with responses.RequestsMock(assert_all_requests_are_fired=False) as rsps: - rsps.add(responses.GET, 'http://twitter.com/api/1/foobar', - body='{}', status=200, - content_type='application/json') - -Multiple Responses ------------------- -You can also use ``assert_all_requests_are_fired`` to add multiple responses for the same url: - -.. code-block:: python - - import responses - import requests - - - def test_my_api(): - with responses.RequestsMock(assert_all_requests_are_fired=True) as rsps: - rsps.add(responses.GET, 'http://twitter.com/api/1/foobar', status=500) - rsps.add(responses.GET, 'http://twitter.com/api/1/foobar', - body='{}', status=200, - content_type='application/json') - - resp = requests.get('http://twitter.com/api/1/foobar') - assert resp.status_code == 500 - resp = requests.get('http://twitter.com/api/1/foobar') - assert resp.status_code == 200 diff --git a/moto/packages/responses/__init__.py b/moto/packages/responses/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/moto/packages/responses/responses.py b/moto/packages/responses/responses.py deleted file mode 100644 index 3bc437f0b..000000000 --- a/moto/packages/responses/responses.py +++ /dev/null @@ -1,330 +0,0 @@ -from __future__ import ( - absolute_import, print_function, division, unicode_literals -) - -import inspect -import json as json_module -import re -import six - -from collections import namedtuple, Sequence, Sized -from functools import update_wrapper -from cookies import Cookies -from requests.adapters import HTTPAdapter -from requests.utils import cookiejar_from_dict -from requests.exceptions import ConnectionError -from requests.sessions import REDIRECT_STATI - -try: - from requests.packages.urllib3.response import HTTPResponse -except ImportError: - from urllib3.response import HTTPResponse - -if six.PY2: - from urlparse import urlparse, parse_qsl -else: - from urllib.parse import urlparse, parse_qsl - -if six.PY2: - try: - from six import cStringIO as BufferIO - except ImportError: - from six import StringIO as BufferIO -else: - from io import BytesIO as BufferIO - - -Call = namedtuple('Call', ['request', 'response']) - -_wrapper_template = """\ -def wrapper%(signature)s: - with responses: - return func%(funcargs)s -""" - - -def _is_string(s): - return isinstance(s, (six.string_types, six.text_type)) - - -def _is_redirect(response): - try: - # 2.0.0 <= requests <= 2.2 - return response.is_redirect - except AttributeError: - # requests > 2.2 - return ( - # use request.sessions conditional - response.status_code in REDIRECT_STATI and - 'location' in response.headers - ) - - -def get_wrapped(func, wrapper_template, evaldict): - # Preserve the argspec for the wrapped function so that testing - # tools such as pytest can continue to use their fixture injection. - args, a, kw, defaults = inspect.getargspec(func) - - signature = inspect.formatargspec(args, a, kw, defaults) - is_bound_method = hasattr(func, '__self__') - if is_bound_method: - args = args[1:] # Omit 'self' - callargs = inspect.formatargspec(args, a, kw, None) - - ctx = {'signature': signature, 'funcargs': callargs} - six.exec_(wrapper_template % ctx, evaldict) - - wrapper = evaldict['wrapper'] - - update_wrapper(wrapper, func) - if is_bound_method: - wrapper = wrapper.__get__(func.__self__, type(func.__self__)) - return wrapper - - -class CallList(Sequence, Sized): - - def __init__(self): - self._calls = [] - - def __iter__(self): - return iter(self._calls) - - def __len__(self): - return len(self._calls) - - def __getitem__(self, idx): - return self._calls[idx] - - def add(self, request, response): - self._calls.append(Call(request, response)) - - def reset(self): - self._calls = [] - - -def _ensure_url_default_path(url, match_querystring): - if _is_string(url) and url.count('/') == 2: - if match_querystring: - return url.replace('?', '/?', 1) - else: - return url + '/' - return url - - -class RequestsMock(object): - DELETE = 'DELETE' - GET = 'GET' - HEAD = 'HEAD' - OPTIONS = 'OPTIONS' - PATCH = 'PATCH' - POST = 'POST' - PUT = 'PUT' - - def __init__(self, assert_all_requests_are_fired=True, pass_through=True): - self._calls = CallList() - self.reset() - self.assert_all_requests_are_fired = assert_all_requests_are_fired - self.pass_through = pass_through - self.original_send = HTTPAdapter.send - - def reset(self): - self._urls = [] - self._calls.reset() - - def add(self, method, url, body='', match_querystring=False, - status=200, adding_headers=None, stream=False, - content_type='text/plain', json=None): - - # if we were passed a `json` argument, - # override the body and content_type - if json is not None: - body = json_module.dumps(json) - content_type = 'application/json' - - # ensure the url has a default path set if the url is a string - url = _ensure_url_default_path(url, match_querystring) - - # body must be bytes - if isinstance(body, six.text_type): - body = body.encode('utf-8') - - self._urls.append({ - 'url': url, - 'method': method, - 'body': body, - 'content_type': content_type, - 'match_querystring': match_querystring, - 'status': status, - 'adding_headers': adding_headers, - 'stream': stream, - }) - - def add_callback(self, method, url, callback, match_querystring=False, - content_type='text/plain'): - # ensure the url has a default path set if the url is a string - # url = _ensure_url_default_path(url, match_querystring) - - self._urls.append({ - 'url': url, - 'method': method, - 'callback': callback, - 'content_type': content_type, - 'match_querystring': match_querystring, - }) - - @property - def calls(self): - return self._calls - - def __enter__(self): - self.start() - return self - - def __exit__(self, type, value, traceback): - success = type is None - self.stop(allow_assert=success) - self.reset() - return success - - def activate(self, func): - evaldict = {'responses': self, 'func': func} - return get_wrapped(func, _wrapper_template, evaldict) - - def _find_match(self, request): - for match in self._urls: - if request.method != match['method']: - continue - - if not self._has_url_match(match, request.url): - continue - - break - else: - return None - if self.assert_all_requests_are_fired: - # for each found match remove the url from the stack - self._urls.remove(match) - return match - - def _has_url_match(self, match, request_url): - url = match['url'] - - if not match['match_querystring']: - request_url = request_url.split('?', 1)[0] - - if _is_string(url): - if match['match_querystring']: - return self._has_strict_url_match(url, request_url) - else: - return url == request_url - elif isinstance(url, re._pattern_type) and url.match(request_url): - return True - else: - return False - - def _has_strict_url_match(self, url, other): - url_parsed = urlparse(url) - other_parsed = urlparse(other) - - if url_parsed[:3] != other_parsed[:3]: - return False - - url_qsl = sorted(parse_qsl(url_parsed.query)) - other_qsl = sorted(parse_qsl(other_parsed.query)) - return url_qsl == other_qsl - - def _on_request(self, adapter, request, **kwargs): - match = self._find_match(request) - # TODO(dcramer): find the correct class for this - if match is None: - if self.pass_through: - return self.original_send(adapter, request, **kwargs) - - error_msg = 'Connection refused: {0} {1}'.format(request.method, - request.url) - response = ConnectionError(error_msg) - response.request = request - - self._calls.add(request, response) - raise response - - if 'body' in match and isinstance(match['body'], Exception): - self._calls.add(request, match['body']) - raise match['body'] - - headers = {} - if match['content_type'] is not None: - headers['Content-Type'] = match['content_type'] - - if 'callback' in match: # use callback - status, r_headers, body = match['callback'](request) - if isinstance(body, six.text_type): - body = body.encode('utf-8') - body = BufferIO(body) - headers.update(r_headers) - - elif 'body' in match: - if match['adding_headers']: - headers.update(match['adding_headers']) - status = match['status'] - body = BufferIO(match['body']) - - response = HTTPResponse( - status=status, - reason=six.moves.http_client.responses[status], - body=body, - headers=headers, - preload_content=False, - # Need to not decode_content to mimic requests - decode_content=False, - ) - - response = adapter.build_response(request, response) - if not match.get('stream'): - response.content # NOQA - - try: - resp_cookies = Cookies.from_request(response.headers['set-cookie']) - response.cookies = cookiejar_from_dict(dict( - (v.name, v.value) - for _, v - in resp_cookies.items() - )) - except (KeyError, TypeError): - pass - - self._calls.add(request, response) - - return response - - def start(self): - try: - from unittest import mock - except ImportError: - import mock - - def unbound_on_send(adapter, request, *a, **kwargs): - return self._on_request(adapter, request, *a, **kwargs) - self._patcher1 = mock.patch('botocore.vendored.requests.adapters.HTTPAdapter.send', - unbound_on_send) - self._patcher1.start() - self._patcher2 = mock.patch('requests.adapters.HTTPAdapter.send', - unbound_on_send) - self._patcher2.start() - - def stop(self, allow_assert=True): - self._patcher1.stop() - self._patcher2.stop() - if allow_assert and self.assert_all_requests_are_fired and self._urls: - raise AssertionError( - 'Not all requests have been executed {0!r}'.format( - [(url['method'], url['url']) for url in self._urls])) - - -# expose default mock namespace -mock = _default_mock = RequestsMock(assert_all_requests_are_fired=False, pass_through=False) -__all__ = [] -for __attr in (a for a in dir(_default_mock) if not a.startswith('_')): - __all__.append(__attr) - globals()[__attr] = getattr(_default_mock, __attr) diff --git a/moto/packages/responses/setup.cfg b/moto/packages/responses/setup.cfg deleted file mode 100644 index 9b6594f2e..000000000 --- a/moto/packages/responses/setup.cfg +++ /dev/null @@ -1,5 +0,0 @@ -[pytest] -addopts=--tb=short - -[bdist_wheel] -universal=1 diff --git a/moto/packages/responses/setup.py b/moto/packages/responses/setup.py deleted file mode 100644 index 911c07da4..000000000 --- a/moto/packages/responses/setup.py +++ /dev/null @@ -1,99 +0,0 @@ -#!/usr/bin/env python -""" -responses -========= - -A utility library for mocking out the `requests` Python library. - -:copyright: (c) 2015 David Cramer -:license: Apache 2.0 -""" - -import sys -import logging - -from setuptools import setup -from setuptools.command.test import test as TestCommand -import pkg_resources - - -setup_requires = [] - -if 'test' in sys.argv: - setup_requires.append('pytest') - -install_requires = [ - 'requests>=2.0', - 'cookies', - 'six', -] - -tests_require = [ - 'pytest', - 'coverage >= 3.7.1, < 5.0.0', - 'pytest-cov', - 'flake8', -] - - -extras_require = { - ':python_version in "2.6, 2.7, 3.2"': ['mock'], - 'tests': tests_require, -} - -try: - if 'bdist_wheel' not in sys.argv: - for key, value in extras_require.items(): - if key.startswith(':') and pkg_resources.evaluate_marker(key[1:]): - install_requires.extend(value) -except Exception: - logging.getLogger(__name__).exception( - 'Something went wrong calculating platform specific dependencies, so ' - "you're getting them all!" - ) - for key, value in extras_require.items(): - if key.startswith(':'): - install_requires.extend(value) - - -class PyTest(TestCommand): - - def finalize_options(self): - TestCommand.finalize_options(self) - self.test_args = ['test_responses.py'] - self.test_suite = True - - def run_tests(self): - # import here, cause outside the eggs aren't loaded - import pytest - errno = pytest.main(self.test_args) - sys.exit(errno) - - -setup( - name='responses', - version='0.6.0', - author='David Cramer', - description=( - 'A utility library for mocking out the `requests` Python library.' - ), - url='https://github.com/getsentry/responses', - license='Apache 2.0', - long_description=open('README.rst').read(), - py_modules=['responses', 'test_responses'], - zip_safe=False, - install_requires=install_requires, - extras_require=extras_require, - tests_require=tests_require, - setup_requires=setup_requires, - cmdclass={'test': PyTest}, - include_package_data=True, - classifiers=[ - 'Intended Audience :: Developers', - 'Intended Audience :: System Administrators', - 'Operating System :: OS Independent', - 'Programming Language :: Python :: 2', - 'Programming Language :: Python :: 3', - 'Topic :: Software Development' - ], -) diff --git a/moto/packages/responses/test_responses.py b/moto/packages/responses/test_responses.py deleted file mode 100644 index 967a535cf..000000000 --- a/moto/packages/responses/test_responses.py +++ /dev/null @@ -1,444 +0,0 @@ -from __future__ import ( - absolute_import, print_function, division, unicode_literals -) - -import re -import requests -import responses -import pytest - -from inspect import getargspec -from requests.exceptions import ConnectionError, HTTPError - - -def assert_reset(): - assert len(responses._default_mock._urls) == 0 - assert len(responses.calls) == 0 - - -def assert_response(resp, body=None, content_type='text/plain'): - assert resp.status_code == 200 - assert resp.reason == 'OK' - if content_type is not None: - assert resp.headers['Content-Type'] == content_type - else: - assert 'Content-Type' not in resp.headers - assert resp.text == body - - -def test_response(): - @responses.activate - def run(): - responses.add(responses.GET, 'http://example.com', body=b'test') - resp = requests.get('http://example.com') - assert_response(resp, 'test') - assert len(responses.calls) == 1 - assert responses.calls[0].request.url == 'http://example.com/' - assert responses.calls[0].response.content == b'test' - - resp = requests.get('http://example.com?foo=bar') - assert_response(resp, 'test') - assert len(responses.calls) == 2 - assert responses.calls[1].request.url == 'http://example.com/?foo=bar' - assert responses.calls[1].response.content == b'test' - - run() - assert_reset() - - -def test_connection_error(): - @responses.activate - def run(): - responses.add(responses.GET, 'http://example.com') - - with pytest.raises(ConnectionError): - requests.get('http://example.com/foo') - - assert len(responses.calls) == 1 - assert responses.calls[0].request.url == 'http://example.com/foo' - assert type(responses.calls[0].response) is ConnectionError - assert responses.calls[0].response.request - - run() - assert_reset() - - -def test_match_querystring(): - @responses.activate - def run(): - url = 'http://example.com?test=1&foo=bar' - responses.add( - responses.GET, url, - match_querystring=True, body=b'test') - resp = requests.get('http://example.com?test=1&foo=bar') - assert_response(resp, 'test') - resp = requests.get('http://example.com?foo=bar&test=1') - assert_response(resp, 'test') - - run() - assert_reset() - - -def test_match_querystring_error(): - @responses.activate - def run(): - responses.add( - responses.GET, 'http://example.com/?test=1', - match_querystring=True) - - with pytest.raises(ConnectionError): - requests.get('http://example.com/foo/?test=2') - - run() - assert_reset() - - -def test_match_querystring_regex(): - @responses.activate - def run(): - """Note that `match_querystring` value shouldn't matter when passing a - regular expression""" - - responses.add( - responses.GET, re.compile(r'http://example\.com/foo/\?test=1'), - body='test1', match_querystring=True) - - resp = requests.get('http://example.com/foo/?test=1') - assert_response(resp, 'test1') - - responses.add( - responses.GET, re.compile(r'http://example\.com/foo/\?test=2'), - body='test2', match_querystring=False) - - resp = requests.get('http://example.com/foo/?test=2') - assert_response(resp, 'test2') - - run() - assert_reset() - - -def test_match_querystring_error_regex(): - @responses.activate - def run(): - """Note that `match_querystring` value shouldn't matter when passing a - regular expression""" - - responses.add( - responses.GET, re.compile(r'http://example\.com/foo/\?test=1'), - match_querystring=True) - - with pytest.raises(ConnectionError): - requests.get('http://example.com/foo/?test=3') - - responses.add( - responses.GET, re.compile(r'http://example\.com/foo/\?test=2'), - match_querystring=False) - - with pytest.raises(ConnectionError): - requests.get('http://example.com/foo/?test=4') - - run() - assert_reset() - - -def test_accept_string_body(): - @responses.activate - def run(): - url = 'http://example.com/' - responses.add( - responses.GET, url, body='test') - resp = requests.get(url) - assert_response(resp, 'test') - - run() - assert_reset() - - -def test_accept_json_body(): - @responses.activate - def run(): - content_type = 'application/json' - - url = 'http://example.com/' - responses.add( - responses.GET, url, json={"message": "success"}) - resp = requests.get(url) - assert_response(resp, '{"message": "success"}', content_type) - - url = 'http://example.com/1/' - responses.add(responses.GET, url, json=[]) - resp = requests.get(url) - assert_response(resp, '[]', content_type) - - run() - assert_reset() - - -def test_no_content_type(): - @responses.activate - def run(): - url = 'http://example.com/' - responses.add( - responses.GET, url, body='test', content_type=None) - resp = requests.get(url) - assert_response(resp, 'test', content_type=None) - - run() - assert_reset() - - -def test_throw_connection_error_explicit(): - @responses.activate - def run(): - url = 'http://example.com' - exception = HTTPError('HTTP Error') - responses.add( - responses.GET, url, exception) - - with pytest.raises(HTTPError) as HE: - requests.get(url) - - assert str(HE.value) == 'HTTP Error' - - run() - assert_reset() - - -def test_callback(): - body = b'test callback' - status = 400 - reason = 'Bad Request' - headers = {'foo': 'bar'} - url = 'http://example.com/' - - def request_callback(request): - return (status, headers, body) - - @responses.activate - def run(): - responses.add_callback(responses.GET, url, request_callback) - resp = requests.get(url) - assert resp.text == "test callback" - assert resp.status_code == status - assert resp.reason == reason - assert 'foo' in resp.headers - assert resp.headers['foo'] == 'bar' - - run() - assert_reset() - - -def test_callback_no_content_type(): - body = b'test callback' - status = 400 - reason = 'Bad Request' - headers = {'foo': 'bar'} - url = 'http://example.com/' - - def request_callback(request): - return (status, headers, body) - - @responses.activate - def run(): - responses.add_callback( - responses.GET, url, request_callback, content_type=None) - resp = requests.get(url) - assert resp.text == "test callback" - assert resp.status_code == status - assert resp.reason == reason - assert 'foo' in resp.headers - assert 'Content-Type' not in resp.headers - - run() - assert_reset() - - -def test_regular_expression_url(): - @responses.activate - def run(): - url = re.compile(r'https?://(.*\.)?example.com') - responses.add(responses.GET, url, body=b'test') - - resp = requests.get('http://example.com') - assert_response(resp, 'test') - - resp = requests.get('https://example.com') - assert_response(resp, 'test') - - resp = requests.get('https://uk.example.com') - assert_response(resp, 'test') - - with pytest.raises(ConnectionError): - requests.get('https://uk.exaaample.com') - - run() - assert_reset() - - -def test_custom_adapter(): - @responses.activate - def run(): - url = "http://example.com" - responses.add(responses.GET, url, body=b'test') - - calls = [0] - - class DummyAdapter(requests.adapters.HTTPAdapter): - - def send(self, *a, **k): - calls[0] += 1 - return super(DummyAdapter, self).send(*a, **k) - - # Test that the adapter is actually used - session = requests.Session() - session.mount("http://", DummyAdapter()) - - resp = session.get(url, allow_redirects=False) - assert calls[0] == 1 - - # Test that the response is still correctly emulated - session = requests.Session() - session.mount("http://", DummyAdapter()) - - resp = session.get(url) - assert_response(resp, 'test') - - run() - - -def test_responses_as_context_manager(): - def run(): - with responses.mock: - responses.add(responses.GET, 'http://example.com', body=b'test') - resp = requests.get('http://example.com') - assert_response(resp, 'test') - assert len(responses.calls) == 1 - assert responses.calls[0].request.url == 'http://example.com/' - assert responses.calls[0].response.content == b'test' - - resp = requests.get('http://example.com?foo=bar') - assert_response(resp, 'test') - assert len(responses.calls) == 2 - assert (responses.calls[1].request.url == - 'http://example.com/?foo=bar') - assert responses.calls[1].response.content == b'test' - - run() - assert_reset() - - -def test_activate_doesnt_change_signature(): - def test_function(a, b=None): - return (a, b) - - decorated_test_function = responses.activate(test_function) - assert getargspec(test_function) == getargspec(decorated_test_function) - assert decorated_test_function(1, 2) == test_function(1, 2) - assert decorated_test_function(3) == test_function(3) - - -def test_activate_doesnt_change_signature_for_method(): - class TestCase(object): - - def test_function(self, a, b=None): - return (self, a, b) - - test_case = TestCase() - argspec = getargspec(test_case.test_function) - decorated_test_function = responses.activate(test_case.test_function) - assert argspec == getargspec(decorated_test_function) - assert decorated_test_function(1, 2) == test_case.test_function(1, 2) - assert decorated_test_function(3) == test_case.test_function(3) - - -def test_response_cookies(): - body = b'test callback' - status = 200 - headers = {'set-cookie': 'session_id=12345; a=b; c=d'} - url = 'http://example.com/' - - def request_callback(request): - return (status, headers, body) - - @responses.activate - def run(): - responses.add_callback(responses.GET, url, request_callback) - resp = requests.get(url) - assert resp.text == "test callback" - assert resp.status_code == status - assert 'session_id' in resp.cookies - assert resp.cookies['session_id'] == '12345' - assert resp.cookies['a'] == 'b' - assert resp.cookies['c'] == 'd' - run() - assert_reset() - - -def test_assert_all_requests_are_fired(): - def run(): - with pytest.raises(AssertionError) as excinfo: - with responses.RequestsMock( - assert_all_requests_are_fired=True) as m: - m.add(responses.GET, 'http://example.com', body=b'test') - assert 'http://example.com' in str(excinfo.value) - assert responses.GET in str(excinfo) - - # check that assert_all_requests_are_fired default to True - with pytest.raises(AssertionError): - with responses.RequestsMock() as m: - m.add(responses.GET, 'http://example.com', body=b'test') - - # check that assert_all_requests_are_fired doesn't swallow exceptions - with pytest.raises(ValueError): - with responses.RequestsMock() as m: - m.add(responses.GET, 'http://example.com', body=b'test') - raise ValueError() - - run() - assert_reset() - - -def test_allow_redirects_samehost(): - redirecting_url = 'http://example.com' - final_url_path = '/1' - final_url = '{0}{1}'.format(redirecting_url, final_url_path) - url_re = re.compile(r'^http://example.com(/)?(\d+)?$') - - def request_callback(request): - # endpoint of chained redirect - if request.url.endswith(final_url_path): - return 200, (), b'test' - # otherwise redirect to an integer path - else: - if request.url.endswith('/0'): - n = 1 - else: - n = 0 - redirect_headers = {'location': '/{0!s}'.format(n)} - return 301, redirect_headers, None - - def run(): - # setup redirect - with responses.mock: - responses.add_callback(responses.GET, url_re, request_callback) - resp_no_redirects = requests.get(redirecting_url, - allow_redirects=False) - assert resp_no_redirects.status_code == 301 - assert len(responses.calls) == 1 # 1x300 - assert responses.calls[0][1].status_code == 301 - assert_reset() - - with responses.mock: - responses.add_callback(responses.GET, url_re, request_callback) - resp_yes_redirects = requests.get(redirecting_url, - allow_redirects=True) - assert len(responses.calls) == 3 # 2x300 + 1x200 - assert len(resp_yes_redirects.history) == 2 - assert resp_yes_redirects.status_code == 200 - assert final_url == resp_yes_redirects.url - status_codes = [call[1].status_code for call in responses.calls] - assert status_codes == [301, 301, 200] - assert_reset() - - run() - assert_reset() diff --git a/moto/packages/responses/tox.ini b/moto/packages/responses/tox.ini deleted file mode 100644 index 0a31c03ab..000000000 --- a/moto/packages/responses/tox.ini +++ /dev/null @@ -1,11 +0,0 @@ - -[tox] -envlist = {py26,py27,py32,py33,py34,py35} - -[testenv] -deps = - pytest - pytest-cov - pytest-flakes -commands = - py.test . --cov responses --cov-report term-missing --flakes diff --git a/moto/s3/urls.py b/moto/s3/urls.py index 1d439a549..af0a9954e 100644 --- a/moto/s3/urls.py +++ b/moto/s3/urls.py @@ -21,7 +21,7 @@ url_paths = { '{0}/$': S3ResponseInstance.bucket_response, # subdomain key of path-based bucket - '{0}/(?P[^/]+)/?$': S3ResponseInstance.ambiguous_response, + '{0}/(?P[^/?]+)/?$': S3ResponseInstance.ambiguous_response, # path-based bucket + key - '{0}/(?P[^/]+)/(?P.+)': S3ResponseInstance.key_response, + '{0}/(?P[^/?]+)/(?P.+)': S3ResponseInstance.key_response, } diff --git a/setup.py b/setup.py index 1f135ae7b..d253d7f0a 100755 --- a/setup.py +++ b/setup.py @@ -23,6 +23,7 @@ install_requires = [ "docker>=2.5.1", "jsondiff==1.1.1", "aws-xray-sdk<0.96,>=0.93", + "responses", ] extras_require = { diff --git a/tests/test_apigateway/test_apigateway.py b/tests/test_apigateway/test_apigateway.py index 9e2307bdd..1dc9c976e 100644 --- a/tests/test_apigateway/test_apigateway.py +++ b/tests/test_apigateway/test_apigateway.py @@ -7,7 +7,7 @@ import requests import sure # noqa from botocore.exceptions import ClientError -from moto.packages.responses import responses +import responses from moto import mock_apigateway, settings diff --git a/tests/test_sns/test_publishing_boto3.py b/tests/test_sns/test_publishing_boto3.py index 3ccc3ef44..52347cc15 100644 --- a/tests/test_sns/test_publishing_boto3.py +++ b/tests/test_sns/test_publishing_boto3.py @@ -9,7 +9,7 @@ import re from freezegun import freeze_time import sure # noqa -from moto.packages.responses import responses +import responses from botocore.exceptions import ClientError from moto import mock_sns, mock_sqs from freezegun import freeze_time From 2ee484990d4980cc975f5f5d593ceff04d897d4f Mon Sep 17 00:00:00 2001 From: Steve Pulec Date: Fri, 6 Apr 2018 09:26:47 -0400 Subject: [PATCH 144/182] Catch RuntimeError on unpatching in case of multiple unpatching. --- moto/core/models.py | 11 +++++++++-- 1 file changed, 9 insertions(+), 2 deletions(-) diff --git a/moto/core/models.py b/moto/core/models.py index c9895c0cb..cc7b5d8f9 100644 --- a/moto/core/models.py +++ b/moto/core/models.py @@ -206,8 +206,15 @@ class ResponsesMockAWS(BaseMockAWS): ) def disable_patching(self): - botocore_mock.stop() - responses.stop() + try: + botocore_mock.stop() + except RuntimeError: + pass + + try: + responses.stop() + except RuntimeError: + pass MockAWS = ResponsesMockAWS From 56f29a0e6ed65ba197f6e005d86cd56e66db0731 Mon Sep 17 00:00:00 2001 From: Alberto Vara Date: Sat, 7 Apr 2018 20:07:17 +0200 Subject: [PATCH 145/182] Fix/lambda backend (#1556) * Fix exception with "object has no attribute" When use this code: client = boto3.client('lambda') client.get_policy([...]) moto rise: ``` moto/awslambda/responses.py", line 109, in _get_policy lambda_backend = self.get_lambda_backend(full_url) Exception: 'LambdaResponse' object has no attribute 'get_lambda_backend' ``` * fix shadows built-in name --- moto/awslambda/responses.py | 14 +++++--------- 1 file changed, 5 insertions(+), 9 deletions(-) diff --git a/moto/awslambda/responses.py b/moto/awslambda/responses.py index 5676da1ca..2c8a54523 100644 --- a/moto/awslambda/responses.py +++ b/moto/awslambda/responses.py @@ -94,25 +94,21 @@ class LambdaResponse(BaseResponse): return self._add_policy(request, full_url, headers) def _add_policy(self, request, full_url, headers): - lambda_backend = self.get_lambda_backend(full_url) - path = request.path if hasattr(request, 'path') else request.path_url function_name = path.split('/')[-2] - if lambda_backend.has_function(function_name): + if self.lambda_backend.get_function(function_name): policy = request.body.decode('utf8') - lambda_backend.add_policy(function_name, policy) + self.lambda_backend.add_policy(function_name, policy) return 200, {}, json.dumps(dict(Statement=policy)) else: return 404, {}, "{}" def _get_policy(self, request, full_url, headers): - lambda_backend = self.get_lambda_backend(full_url) - path = request.path if hasattr(request, 'path') else request.path_url function_name = path.split('/')[-2] - if lambda_backend.has_function(function_name): - function = lambda_backend.get_function(function_name) - return 200, {}, json.dumps(dict(Policy="{\"Statement\":[" + function.policy + "]}")) + if self.lambda_backend.get_function(function_name): + lambda_function = self.lambda_backend.get_function(function_name) + return 200, {}, json.dumps(dict(Policy="{\"Statement\":[" + lambda_function.policy + "]}")) else: return 404, {}, "{}" From 09ac77d979d455dccba6bb73fad73b0b0cd8de85 Mon Sep 17 00:00:00 2001 From: sawandas <38201746+sawandas@users.noreply.github.com> Date: Mon, 9 Apr 2018 12:10:44 +0530 Subject: [PATCH 146/182] Issue #1539 : Fixing dynamodb filtering (contains, begins with) Currently contains and begins with are not respecting the given filter value --- moto/dynamodb2/comparisons.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/moto/dynamodb2/comparisons.py b/moto/dynamodb2/comparisons.py index 68051460e..51d62fb83 100644 --- a/moto/dynamodb2/comparisons.py +++ b/moto/dynamodb2/comparisons.py @@ -176,6 +176,8 @@ def get_filter_expression(expr, names, values): next_token = six.next(token_iterator) while next_token != ')': + if next_token in values_map: + next_token = values_map[next_token] function_list.append(next_token) next_token = six.next(token_iterator) From 861c47a552db20c9f022856c66d7c2e82e506d42 Mon Sep 17 00:00:00 2001 From: sawandas <38201746+sawandas@users.noreply.github.com> Date: Mon, 9 Apr 2018 13:42:50 +0530 Subject: [PATCH 147/182] Update test cases for dynamodb contains filter --- tests/test_dynamodb2/test_dynamodb.py | 13 +++++++++++++ 1 file changed, 13 insertions(+) diff --git a/tests/test_dynamodb2/test_dynamodb.py b/tests/test_dynamodb2/test_dynamodb.py index d7c5b5843..1d7fa4034 100644 --- a/tests/test_dynamodb2/test_dynamodb.py +++ b/tests/test_dynamodb2/test_dynamodb.py @@ -658,6 +658,14 @@ def test_filter_expression(): {':v0': {'N': '7'}} ) filter_expr.expr(row1).should.be(True) + # Expression from to check contains on string value + filter_expr = moto.dynamodb2.comparisons.get_filter_expression( + 'contains(#n0, :v0)', + {'#n0': 'Desc'}, + {':v0': {'S': 'Some'}} + ) + filter_expr.expr(row1).should.be(True) + filter_expr.expr(row2).should.be(False) @mock_dynamodb2 @@ -699,6 +707,11 @@ def test_query_filter(): ) assert response['Count'] == 1 assert response['Items'][0]['app'] == 'app2' + response = table.query( + KeyConditionExpression=Key('client').eq('client1'), + FilterExpression=Attr('app').contains('app') + ) + assert response['Count'] == 2 @mock_dynamodb2 From ec0d8080108e8eeeecc33f50d5a44f509affde22 Mon Sep 17 00:00:00 2001 From: Steve Pulec Date: Wed, 11 Apr 2018 09:39:33 -0400 Subject: [PATCH 148/182] Only start responses patcher if not already activated. --- moto/core/models.py | 15 ++++++++++----- 1 file changed, 10 insertions(+), 5 deletions(-) diff --git a/moto/core/models.py b/moto/core/models.py index cc7b5d8f9..92dc2a980 100644 --- a/moto/core/models.py +++ b/moto/core/models.py @@ -172,21 +172,26 @@ class CallbackResponse(responses.CallbackResponse): botocore_mock = responses.RequestsMock(assert_all_requests_are_fired=False, target='botocore.vendored.requests.adapters.HTTPAdapter.send') +responses_mock = responses._default_mock class ResponsesMockAWS(BaseMockAWS): def reset(self): botocore_mock.reset() - responses.reset() + responses_mock.reset() def enable_patching(self): - botocore_mock.start() - responses.start() + if not hasattr(botocore_mock, '_patcher') or not hasattr(botocore_mock._patcher, 'target'): + # Check for unactivated patcher + botocore_mock.start() + + if not hasattr(responses_mock, '_patcher') or not hasattr(responses_mock._patcher, 'target'): + responses_mock.start() for method in RESPONSES_METHODS: for backend in self.backends_for_urls.values(): for key, value in backend.urls.items(): - responses.add( + responses_mock.add( CallbackResponse( method=method, url=re.compile(key), @@ -212,7 +217,7 @@ class ResponsesMockAWS(BaseMockAWS): pass try: - responses.stop() + responses_mock.stop() except RuntimeError: pass From 1f46543ae263d3e6e53e8432771ff2abe816b48e Mon Sep 17 00:00:00 2001 From: William Richard Date: Wed, 4 Apr 2018 16:01:01 -0400 Subject: [PATCH 149/182] ECS CPU, memory hard limits and host ports are all optional. http://boto3.readthedocs.io/en/latest/reference/services/ecs.html#ECS.Client.register_task_definition --- moto/ecs/models.py | 8 ++-- tests/test_ecs/test_ecs_boto3.py | 81 ++++++++++++++++++++++++++++++++ 2 files changed, 86 insertions(+), 3 deletions(-) diff --git a/moto/ecs/models.py b/moto/ecs/models.py index 998975650..0c07ce107 100644 --- a/moto/ecs/models.py +++ b/moto/ecs/models.py @@ -502,10 +502,12 @@ class EC2ContainerServiceBackend(BaseBackend): def _calculate_task_resource_requirements(task_definition): resource_requirements = {"CPU": 0, "MEMORY": 0, "PORTS": [], "PORTS_UDP": []} for container_definition in task_definition.container_definitions: - resource_requirements["CPU"] += container_definition.get('cpu') - resource_requirements["MEMORY"] += container_definition.get("memory") + resource_requirements["CPU"] += container_definition.get('cpu', 0) + resource_requirements["MEMORY"] += container_definition.get( + "memory", container_definition.get('memoryReservation')) for port_mapping in container_definition.get("portMappings", []): - resource_requirements["PORTS"].append(port_mapping.get('hostPort')) + if 'hostPort' in port_mapping: + resource_requirements["PORTS"].append(port_mapping.get('hostPort')) return resource_requirements @staticmethod diff --git a/tests/test_ecs/test_ecs_boto3.py b/tests/test_ecs/test_ecs_boto3.py index 5fcc297aa..7f6b835be 100644 --- a/tests/test_ecs/test_ecs_boto3.py +++ b/tests/test_ecs/test_ecs_boto3.py @@ -1229,6 +1229,87 @@ def test_resource_reservation_and_release(): remaining_resources['PORTS'].should.equal(registered_resources['PORTS']) container_instance_description['runningTasksCount'].should.equal(0) +@mock_ec2 +@mock_ecs +def test_resource_reservation_and_release_memory_reservation(): + client = boto3.client('ecs', region_name='us-east-1') + ec2 = boto3.resource('ec2', region_name='us-east-1') + + test_cluster_name = 'test_ecs_cluster' + + _ = client.create_cluster( + clusterName=test_cluster_name + ) + + test_instance = ec2.create_instances( + ImageId="ami-1234abcd", + MinCount=1, + MaxCount=1, + )[0] + + instance_id_document = json.dumps( + ec2_utils.generate_instance_identity_document(test_instance) + ) + + _ = client.register_container_instance( + cluster=test_cluster_name, + instanceIdentityDocument=instance_id_document + ) + + _ = client.register_task_definition( + family='test_ecs_task', + containerDefinitions=[ + { + 'name': 'hello_world', + 'image': 'docker/hello-world:latest', + 'memoryReservation': 400, + 'essential': True, + 'environment': [{ + 'name': 'AWS_ACCESS_KEY_ID', + 'value': 'SOME_ACCESS_KEY' + }], + 'logConfiguration': {'logDriver': 'json-file'}, + 'portMappings': [ + { + 'containerPort': 8080 + } + ] + } + ] + ) + run_response = client.run_task( + cluster='test_ecs_cluster', + overrides={}, + taskDefinition='test_ecs_task', + count=1, + startedBy='moto' + ) + container_instance_arn = run_response['tasks'][0].get('containerInstanceArn') + container_instance_description = client.describe_container_instances( + cluster='test_ecs_cluster', + containerInstances=[container_instance_arn] + )['containerInstances'][0] + remaining_resources, registered_resources = _fetch_container_instance_resources(container_instance_description) + remaining_resources['CPU'].should.equal(registered_resources['CPU']) + remaining_resources['MEMORY'].should.equal(registered_resources['MEMORY'] - 400) + remaining_resources['PORTS'].should.equal(registered_resources['PORTS']) + container_instance_description['runningTasksCount'].should.equal(1) + client.stop_task( + cluster='test_ecs_cluster', + task=run_response['tasks'][0].get('taskArn'), + reason='moto testing' + ) + container_instance_description = client.describe_container_instances( + cluster='test_ecs_cluster', + containerInstances=[container_instance_arn] + )['containerInstances'][0] + remaining_resources, registered_resources = _fetch_container_instance_resources(container_instance_description) + remaining_resources['CPU'].should.equal(registered_resources['CPU']) + remaining_resources['MEMORY'].should.equal(registered_resources['MEMORY']) + remaining_resources['PORTS'].should.equal(registered_resources['PORTS']) + container_instance_description['runningTasksCount'].should.equal(0) + + @mock_ecs @mock_cloudformation From 9b281f63f6bfb4c3e13e2b3329757d40f567b63f Mon Sep 17 00:00:00 2001 From: William Richard Date: Wed, 4 Apr 2018 16:24:20 -0400 Subject: [PATCH 150/182] Add support for calculating resource requirements for cloudforamtion container definitions Cloudformation user capitalized resource names, while boto does not Undo whitespace changes --- moto/ecs/models.py | 23 +++++++++++++++++++---- 1 file changed, 19 insertions(+), 4 deletions(-) diff --git a/moto/ecs/models.py b/moto/ecs/models.py index 0c07ce107..6cc1b65a6 100644 --- a/moto/ecs/models.py +++ b/moto/ecs/models.py @@ -502,12 +502,27 @@ class EC2ContainerServiceBackend(BaseBackend): def _calculate_task_resource_requirements(task_definition): resource_requirements = {"CPU": 0, "MEMORY": 0, "PORTS": [], "PORTS_UDP": []} for container_definition in task_definition.container_definitions: - resource_requirements["CPU"] += container_definition.get('cpu', 0) - resource_requirements["MEMORY"] += container_definition.get( - "memory", container_definition.get('memoryReservation')) - for port_mapping in container_definition.get("portMappings", []): + # cloudformation uses capitalized properties, while boto uses all lower case + + # CPU is optional + resource_requirements["CPU"] += container_definition.get('cpu', + container_definition.get('Cpu', 0)) + + # either memory or memory reservation must be provided + if 'Memory' in container_definition or 'MemoryReservation' in container_definition: + resource_requirements["MEMORY"] += container_definition.get( + "Memory", container_definition.get('MemoryReservation')) + else: + resource_requirements["MEMORY"] += container_definition.get( + "memory", container_definition.get('memoryReservation')) + + port_mapping_key = 'PortMappings' if 'PortMappings' in container_definition else 'portMappings' + for port_mapping in container_definition.get(port_mapping_key, []): if 'hostPort' in port_mapping: resource_requirements["PORTS"].append(port_mapping.get('hostPort')) + elif 'HostPort' in port_mapping: + resource_requirements["PORTS"].append(port_mapping.get('HostPort')) + return resource_requirements @staticmethod From 67d7e8d590c0cff3da36ac0e081646f051cc7f0f Mon Sep 17 00:00:00 2001 From: Benny Elgazar Date: Fri, 13 Apr 2018 00:06:24 +0300 Subject: [PATCH 151/182] Fix Unicode problem (#1562) --- moto/core/responses.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/moto/core/responses.py b/moto/core/responses.py index 278a24dc4..ca5b9f7d2 100644 --- a/moto/core/responses.py +++ b/moto/core/responses.py @@ -574,7 +574,7 @@ class AWSServiceSpec(object): def __init__(self, path): self.path = resource_filename('botocore', path) - with open(self.path) as f: + with open(self.path, "rb") as f: spec = json.load(f) self.metadata = spec['metadata'] self.operations = spec['operations'] From 3ac453296875d6af4c4345738fb9c3ee65dbe556 Mon Sep 17 00:00:00 2001 From: Jack Danger Date: Thu, 12 Apr 2018 14:37:00 -0700 Subject: [PATCH 152/182] Version 1.3.2 (#1564) * bumping to version 1.3.2 * Updating implementation coverage * updating CHANGELOG --- .bumpversion.cfg | 2 +- CHANGELOG.md | 8 +++ IMPLEMENTATION_COVERAGE.md | 128 +++++++++++++++++++++++++++++++++++-- moto/__init__.py | 2 +- setup.py | 2 +- 5 files changed, 135 insertions(+), 7 deletions(-) diff --git a/.bumpversion.cfg b/.bumpversion.cfg index 91e571d38..6459ed410 100644 --- a/.bumpversion.cfg +++ b/.bumpversion.cfg @@ -1,5 +1,5 @@ [bumpversion] -current_version = 1.3.1 +current_version = 1.3.2 [bumpversion:file:setup.py] diff --git a/CHANGELOG.md b/CHANGELOG.md index 3ba4845a2..c85e38536 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,6 +1,14 @@ Moto Changelog =================== +1.3.1 +------ +The huge change in this version is that the responses library is no longer vendored. Many developers are now unblocked. Kudos to @spulec for the fix. + + * Fix route53 TTL bug + * Added filtering support for S3 lifecycle + * unvendoring responses + 1.3.0 ------ diff --git a/IMPLEMENTATION_COVERAGE.md b/IMPLEMENTATION_COVERAGE.md index c98093147..bc4bc1696 100644 --- a/IMPLEMENTATION_COVERAGE.md +++ b/IMPLEMENTATION_COVERAGE.md @@ -1,8 +1,9 @@ -## acm - 50% implemented +## acm - 41% implemented - [X] add_tags_to_certificate - [X] delete_certificate - [ ] describe_certificate +- [ ] export_certificate - [X] get_certificate - [ ] import_certificate - [ ] list_certificates @@ -10,21 +11,48 @@ - [X] remove_tags_from_certificate - [X] request_certificate - [ ] resend_validation_email +- [ ] update_certificate_options + +## acm-pca - 0% implemented +- [ ] create_certificate_authority +- [ ] create_certificate_authority_audit_report +- [ ] delete_certificate_authority +- [ ] describe_certificate_authority +- [ ] describe_certificate_authority_audit_report +- [ ] get_certificate +- [ ] get_certificate_authority_certificate +- [ ] get_certificate_authority_csr +- [ ] import_certificate_authority_certificate +- [ ] issue_certificate +- [ ] list_certificate_authorities +- [ ] list_tags +- [ ] revoke_certificate +- [ ] tag_certificate_authority +- [ ] untag_certificate_authority +- [ ] update_certificate_authority ## alexaforbusiness - 0% implemented +- [ ] associate_contact_with_address_book - [ ] associate_device_with_room - [ ] associate_skill_group_with_room +- [ ] create_address_book +- [ ] create_contact - [ ] create_profile - [ ] create_room - [ ] create_skill_group - [ ] create_user +- [ ] delete_address_book +- [ ] delete_contact - [ ] delete_profile - [ ] delete_room - [ ] delete_room_skill_parameter - [ ] delete_skill_group - [ ] delete_user +- [ ] disassociate_contact_from_address_book - [ ] disassociate_device_from_room - [ ] disassociate_skill_group_from_room +- [ ] get_address_book +- [ ] get_contact - [ ] get_device - [ ] get_profile - [ ] get_room @@ -35,6 +63,8 @@ - [ ] put_room_skill_parameter - [ ] resolve_room - [ ] revoke_invitation +- [ ] search_address_books +- [ ] search_contacts - [ ] search_devices - [ ] search_profiles - [ ] search_rooms @@ -44,6 +74,8 @@ - [ ] start_device_sync - [ ] tag_resource - [ ] untag_resource +- [ ] update_address_book +- [ ] update_contact - [ ] update_device - [ ] update_profile - [ ] update_room @@ -402,6 +434,7 @@ - [ ] get_applied_schema_version - [ ] get_directory - [ ] get_facet +- [ ] get_object_attributes - [ ] get_object_information - [ ] get_schema_as_json - [ ] get_typed_link_facet_information @@ -484,30 +517,48 @@ - [ ] create_cloud_front_origin_access_identity - [ ] create_distribution - [ ] create_distribution_with_tags +- [ ] create_field_level_encryption_config +- [ ] create_field_level_encryption_profile - [ ] create_invalidation +- [ ] create_public_key - [ ] create_streaming_distribution - [ ] create_streaming_distribution_with_tags - [ ] delete_cloud_front_origin_access_identity - [ ] delete_distribution +- [ ] delete_field_level_encryption_config +- [ ] delete_field_level_encryption_profile +- [ ] delete_public_key - [ ] delete_service_linked_role - [ ] delete_streaming_distribution - [ ] get_cloud_front_origin_access_identity - [ ] get_cloud_front_origin_access_identity_config - [ ] get_distribution - [ ] get_distribution_config +- [ ] get_field_level_encryption +- [ ] get_field_level_encryption_config +- [ ] get_field_level_encryption_profile +- [ ] get_field_level_encryption_profile_config - [ ] get_invalidation +- [ ] get_public_key +- [ ] get_public_key_config - [ ] get_streaming_distribution - [ ] get_streaming_distribution_config - [ ] list_cloud_front_origin_access_identities - [ ] list_distributions - [ ] list_distributions_by_web_acl_id +- [ ] list_field_level_encryption_configs +- [ ] list_field_level_encryption_profiles - [ ] list_invalidations +- [ ] list_public_keys - [ ] list_streaming_distributions - [ ] list_tags_for_resource - [ ] tag_resource - [ ] untag_resource - [ ] update_cloud_front_origin_access_identity - [ ] update_distribution +- [ ] update_field_level_encryption_config +- [ ] update_field_level_encryption_profile +- [ ] update_public_key - [ ] update_streaming_distribution ## cloudhsm - 0% implemented @@ -591,7 +642,7 @@ - [ ] stop_logging - [ ] update_trail -## cloudwatch - 60% implemented +## cloudwatch - 56% implemented - [X] delete_alarms - [X] delete_dashboards - [ ] describe_alarm_history @@ -600,6 +651,7 @@ - [ ] disable_alarm_actions - [ ] enable_alarm_actions - [X] get_dashboard +- [ ] get_metric_data - [X] get_metric_statistics - [X] list_dashboards - [ ] list_metrics @@ -624,6 +676,7 @@ - [ ] start_build - [ ] stop_build - [ ] update_project +- [ ] update_webhook ## codecommit - 0% implemented - [ ] batch_get_repositories @@ -905,19 +958,29 @@ ## config - 0% implemented - [ ] batch_get_resource_config +- [ ] delete_aggregation_authorization - [ ] delete_config_rule +- [ ] delete_configuration_aggregator - [ ] delete_configuration_recorder - [ ] delete_delivery_channel - [ ] delete_evaluation_results +- [ ] delete_pending_aggregation_request - [ ] deliver_config_snapshot +- [ ] describe_aggregate_compliance_by_config_rules +- [ ] describe_aggregation_authorizations - [ ] describe_compliance_by_config_rule - [ ] describe_compliance_by_resource - [ ] describe_config_rule_evaluation_status - [ ] describe_config_rules +- [ ] describe_configuration_aggregator_sources_status +- [ ] describe_configuration_aggregators - [ ] describe_configuration_recorder_status - [ ] describe_configuration_recorders - [ ] describe_delivery_channel_status - [ ] describe_delivery_channels +- [ ] describe_pending_aggregation_requests +- [ ] get_aggregate_compliance_details_by_config_rule +- [ ] get_aggregate_config_rule_compliance_summary - [ ] get_compliance_details_by_config_rule - [ ] get_compliance_details_by_resource - [ ] get_compliance_summary_by_config_rule @@ -925,7 +988,9 @@ - [ ] get_discovered_resource_counts - [ ] get_resource_config_history - [ ] list_discovered_resources +- [ ] put_aggregation_authorization - [ ] put_config_rule +- [ ] put_configuration_aggregator - [ ] put_configuration_recorder - [ ] put_delivery_channel - [ ] put_evaluations @@ -933,6 +998,10 @@ - [ ] start_configuration_recorder - [ ] stop_configuration_recorder +## connect - 0% implemented +- [ ] start_outbound_voice_contact +- [ ] stop_contact + ## cur - 0% implemented - [ ] delete_report_definition - [ ] describe_report_definitions @@ -984,11 +1053,13 @@ ## devicefarm - 0% implemented - [ ] create_device_pool +- [ ] create_instance_profile - [ ] create_network_profile - [ ] create_project - [ ] create_remote_access_session - [ ] create_upload - [ ] delete_device_pool +- [ ] delete_instance_profile - [ ] delete_network_profile - [ ] delete_project - [ ] delete_remote_access_session @@ -996,8 +1067,10 @@ - [ ] delete_upload - [ ] get_account_settings - [ ] get_device +- [ ] get_device_instance - [ ] get_device_pool - [ ] get_device_pool_compatibility +- [ ] get_instance_profile - [ ] get_job - [ ] get_network_profile - [ ] get_offering_status @@ -1009,8 +1082,10 @@ - [ ] get_upload - [ ] install_to_remote_access_session - [ ] list_artifacts +- [ ] list_device_instances - [ ] list_device_pools - [ ] list_devices +- [ ] list_instance_profiles - [ ] list_jobs - [ ] list_network_profiles - [ ] list_offering_promotions @@ -1029,7 +1104,9 @@ - [ ] schedule_run - [ ] stop_remote_access_session - [ ] stop_run +- [ ] update_device_instance - [ ] update_device_pool +- [ ] update_instance_profile - [ ] update_network_profile - [ ] update_project @@ -1188,7 +1265,7 @@ - [ ] update_radius - [ ] verify_trust -## dynamodb - 24% implemented +## dynamodb - 22% implemented - [ ] batch_get_item - [ ] batch_write_item - [ ] create_backup @@ -1211,9 +1288,11 @@ - [X] put_item - [X] query - [ ] restore_table_from_backup +- [ ] restore_table_to_point_in_time - [X] scan - [ ] tag_resource - [ ] untag_resource +- [ ] update_continuous_backups - [ ] update_global_table - [ ] update_item - [ ] update_table @@ -1810,6 +1889,20 @@ - [ ] put_record_batch - [ ] update_destination +## fms - 0% implemented +- [ ] associate_admin_account +- [ ] delete_notification_channel +- [ ] delete_policy +- [ ] disassociate_admin_account +- [ ] get_admin_account +- [ ] get_compliance_detail +- [ ] get_notification_channel +- [ ] get_policy +- [ ] list_compliance_status +- [ ] list_policies +- [ ] put_notification_channel +- [ ] put_policy + ## gamelift - 0% implemented - [ ] accept_match - [ ] create_alias @@ -2115,7 +2208,7 @@ - [ ] describe_event_types - [ ] describe_events -## iam - 48% implemented +## iam - 47% implemented - [ ] add_client_id_to_open_id_connect_provider - [X] add_role_to_instance_profile - [X] add_user_to_group @@ -2228,6 +2321,7 @@ - [ ] update_group - [X] update_login_profile - [ ] update_open_id_connect_provider_thumbprint +- [ ] update_role - [ ] update_role_description - [ ] update_saml_provider - [ ] update_server_certificate @@ -3496,6 +3590,7 @@ - [ ] put_object_acl - [ ] put_object_tagging - [ ] restore_object +- [ ] select_object_content - [ ] upload_part - [ ] upload_part_copy @@ -3550,6 +3645,23 @@ - [ ] put_attributes - [ ] select +## secretsmanager - 0% implemented +- [ ] cancel_rotate_secret +- [ ] create_secret +- [ ] delete_secret +- [ ] describe_secret +- [ ] get_random_password +- [ ] get_secret_value +- [ ] list_secret_version_ids +- [ ] list_secrets +- [ ] put_secret_value +- [ ] restore_secret +- [ ] rotate_secret +- [ ] tag_resource +- [ ] untag_resource +- [ ] update_secret +- [ ] update_secret_version_stage + ## serverlessrepo - 0% implemented - [ ] create_application - [ ] create_application_version @@ -4058,9 +4170,14 @@ - [X] terminate_workflow_execution ## transcribe - 0% implemented +- [ ] create_vocabulary +- [ ] delete_vocabulary - [ ] get_transcription_job +- [ ] get_vocabulary - [ ] list_transcription_jobs +- [ ] list_vocabularies - [ ] start_transcription_job +- [ ] update_vocabulary ## translate - 0% implemented - [ ] translate_text @@ -4262,6 +4379,7 @@ - [ ] create_user - [ ] delete_alias - [ ] delete_group +- [ ] delete_mailbox_permissions - [ ] delete_resource - [ ] delete_user - [ ] deregister_from_work_mail @@ -4274,10 +4392,12 @@ - [ ] list_aliases - [ ] list_group_members - [ ] list_groups +- [ ] list_mailbox_permissions - [ ] list_organizations - [ ] list_resource_delegates - [ ] list_resources - [ ] list_users +- [ ] put_mailbox_permissions - [ ] register_to_work_mail - [ ] reset_password - [ ] update_primary_email_address diff --git a/moto/__init__.py b/moto/__init__.py index da9f8240a..4d6b35017 100644 --- a/moto/__init__.py +++ b/moto/__init__.py @@ -3,7 +3,7 @@ import logging # logging.getLogger('boto').setLevel(logging.CRITICAL) __title__ = 'moto' -__version__ = '1.3.1' +__version__ = '1.3.2' from .acm import mock_acm # flake8: noqa from .apigateway import mock_apigateway, mock_apigateway_deprecated # flake8: noqa diff --git a/setup.py b/setup.py index d253d7f0a..79daa6081 100755 --- a/setup.py +++ b/setup.py @@ -40,7 +40,7 @@ else: setup( name='moto', - version='1.3.1', + version='1.3.2', description='A library that allows your python tests to easily' ' mock out the boto library', author='Steve Pulec', From 380710273a36821c0a6b18f95dd977f35a889667 Mon Sep 17 00:00:00 2001 From: Steve Pulec Date: Fri, 13 Apr 2018 07:21:48 -0400 Subject: [PATCH 153/182] Add .pytest_cache to .gitignore. --- .gitignore | 2 ++ 1 file changed, 2 insertions(+) diff --git a/.gitignore b/.gitignore index 18026d60f..c4b8c5034 100644 --- a/.gitignore +++ b/.gitignore @@ -13,3 +13,5 @@ build/ .DS_Store python_env .ropeproject/ +.pytest_cache/ + From e8a88cf3b200045f36e867a547cae8d9478998c6 Mon Sep 17 00:00:00 2001 From: Steve Pulec Date: Fri, 13 Apr 2018 07:23:24 -0400 Subject: [PATCH 154/182] Add more regions for APIGateway. Closes #1512. --- moto/apigateway/models.py | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/moto/apigateway/models.py b/moto/apigateway/models.py index 70280abaa..387303d31 100644 --- a/moto/apigateway/models.py +++ b/moto/apigateway/models.py @@ -6,6 +6,7 @@ import string import requests import time +from boto3.session import Session import responses from moto.core import BaseBackend, BaseModel from .utils import create_id @@ -577,7 +578,9 @@ class APIGatewayBackend(BaseBackend): return {} + + + apigateway_backends = {} -# Not available in boto yet -for region_name in ['us-east-1', 'us-west-2', 'eu-west-1', 'ap-northeast-1']: +for region_name in Session().get_available_regions('apigateway'): apigateway_backends[region_name] = APIGatewayBackend(region_name) From 929ae286cf6cd31fbdf7bda39bf526d43e4db0c9 Mon Sep 17 00:00:00 2001 From: Steve Pulec Date: Fri, 13 Apr 2018 07:33:53 -0400 Subject: [PATCH 155/182] Fix ELB ssl_certificate_id typo. Closes #1528. --- moto/apigateway/models.py | 3 --- moto/elb/models.py | 2 +- 2 files changed, 1 insertion(+), 4 deletions(-) diff --git a/moto/apigateway/models.py b/moto/apigateway/models.py index 387303d31..160b443b0 100644 --- a/moto/apigateway/models.py +++ b/moto/apigateway/models.py @@ -578,9 +578,6 @@ class APIGatewayBackend(BaseBackend): return {} - - - apigateway_backends = {} for region_name in Session().get_available_regions('apigateway'): apigateway_backends[region_name] = APIGatewayBackend(region_name) diff --git a/moto/elb/models.py b/moto/elb/models.py index 504c68908..8781620f1 100644 --- a/moto/elb/models.py +++ b/moto/elb/models.py @@ -268,7 +268,7 @@ class ELBBackend(BaseBackend): protocol = port['protocol'] instance_port = port['instance_port'] lb_port = port['load_balancer_port'] - ssl_certificate_id = port.get('sslcertificate_id') + ssl_certificate_id = port.get('ssl_certificate_id') for listener in balancer.listeners: if lb_port == listener.load_balancer_port: if protocol != listener.protocol: From d11ecddddea34dc9db52d89c455835acdde48d07 Mon Sep 17 00:00:00 2001 From: Steve Pulec Date: Fri, 13 Apr 2018 10:16:13 -0400 Subject: [PATCH 156/182] Add back ACM tests. --- tests/test_acm/test_acm.py | 133 ++++++++++++++++++------------------- 1 file changed, 66 insertions(+), 67 deletions(-) diff --git a/tests/test_acm/test_acm.py b/tests/test_acm/test_acm.py index ccac48181..ed96054d8 100644 --- a/tests/test_acm/test_acm.py +++ b/tests/test_acm/test_acm.py @@ -314,70 +314,69 @@ def test_request_certificate_no_san(): ) resp2.should.contain('Certificate') -# # Also tests the SAN code -# # requires Pull: https://github.com/spulec/freezegun/pull/210 -# @freeze_time("2012-01-01 12:00:00", as_arg=True) -# @mock_acm -# def test_request_certificate(frozen_time): -# # After requesting a certificate, it should then auto-validate after 1 minute -# # Some sneaky programming for that ;-) -# client = boto3.client('acm', region_name='eu-central-1') -# -# resp = client.request_certificate( -# DomainName='google.com', -# SubjectAlternativeNames=['google.com', 'www.google.com', 'mail.google.com'], -# ) -# arn = resp['CertificateArn'] -# -# resp = client.describe_certificate(CertificateArn=arn) -# resp['Certificate']['CertificateArn'].should.equal(arn) -# resp['Certificate']['DomainName'].should.equal('google.com') -# resp['Certificate']['Issuer'].should.equal('Amazon') -# resp['Certificate']['KeyAlgorithm'].should.equal('RSA_2048') -# resp['Certificate']['Status'].should.equal('PENDING_VALIDATION') -# resp['Certificate']['Type'].should.equal('AMAZON_ISSUED') -# len(resp['Certificate']['SubjectAlternativeNames']).should.equal(3) -# -# # Move time -# frozen_time.move_to('2012-01-01 12:02:00') -# resp = client.describe_certificate(CertificateArn=arn) -# resp['Certificate']['CertificateArn'].should.equal(arn) -# resp['Certificate']['Status'].should.equal('ISSUED') -# -# -# # requires Pull: https://github.com/spulec/freezegun/pull/210 -# @freeze_time("2012-01-01 12:00:00", as_arg=True) -# @mock_acm -# def test_request_certificate(frozen_time): -# # After requesting a certificate, it should then auto-validate after 1 minute -# # Some sneaky programming for that ;-) -# client = boto3.client('acm', region_name='eu-central-1') -# -# resp = client.request_certificate( -# IdempotencyToken='test_token', -# DomainName='google.com', -# SubjectAlternativeNames=['google.com', 'www.google.com', 'mail.google.com'], -# ) -# original_arn = resp['CertificateArn'] -# -# # Should be able to request a certificate multiple times in an hour -# # after that it makes a new one -# for time_intervals in ('2012-01-01 12:15:00', '2012-01-01 12:30:00', '2012-01-01 12:45:00'): -# frozen_time.move_to(time_intervals) -# resp = client.request_certificate( -# IdempotencyToken='test_token', -# DomainName='google.com', -# SubjectAlternativeNames=['google.com', 'www.google.com', 'mail.google.com'], -# ) -# arn = resp['CertificateArn'] -# arn.should.equal(original_arn) -# -# # Move time -# frozen_time.move_to('2012-01-01 13:01:00') -# resp = client.request_certificate( -# IdempotencyToken='test_token', -# DomainName='google.com', -# SubjectAlternativeNames=['google.com', 'www.google.com', 'mail.google.com'], -# ) -# arn = resp['CertificateArn'] -# arn.should_not.equal(original_arn) + +# Also tests the SAN code +@freeze_time("2012-01-01 12:00:00", as_arg=True) +@mock_acm +def test_request_certificate(frozen_time): + # After requesting a certificate, it should then auto-validate after 1 minute + # Some sneaky programming for that ;-) + client = boto3.client('acm', region_name='eu-central-1') + + resp = client.request_certificate( + DomainName='google.com', + SubjectAlternativeNames=['google.com', 'www.google.com', 'mail.google.com'], + ) + arn = resp['CertificateArn'] + + resp = client.describe_certificate(CertificateArn=arn) + resp['Certificate']['CertificateArn'].should.equal(arn) + resp['Certificate']['DomainName'].should.equal('google.com') + resp['Certificate']['Issuer'].should.equal('Amazon') + resp['Certificate']['KeyAlgorithm'].should.equal('RSA_2048') + resp['Certificate']['Status'].should.equal('PENDING_VALIDATION') + resp['Certificate']['Type'].should.equal('AMAZON_ISSUED') + len(resp['Certificate']['SubjectAlternativeNames']).should.equal(3) + + # Move time + frozen_time.move_to('2012-01-01 12:02:00') + resp = client.describe_certificate(CertificateArn=arn) + resp['Certificate']['CertificateArn'].should.equal(arn) + resp['Certificate']['Status'].should.equal('ISSUED') + + +@freeze_time("2012-01-01 12:00:00", as_arg=True) +@mock_acm +def test_request_certificate(frozen_time): + # After requesting a certificate, it should then auto-validate after 1 minute + # Some sneaky programming for that ;-) + client = boto3.client('acm', region_name='eu-central-1') + + resp = client.request_certificate( + IdempotencyToken='test_token', + DomainName='google.com', + SubjectAlternativeNames=['google.com', 'www.google.com', 'mail.google.com'], + ) + original_arn = resp['CertificateArn'] + + # Should be able to request a certificate multiple times in an hour + # after that it makes a new one + for time_intervals in ('2012-01-01 12:15:00', '2012-01-01 12:30:00', '2012-01-01 12:45:00'): + frozen_time.move_to(time_intervals) + resp = client.request_certificate( + IdempotencyToken='test_token', + DomainName='google.com', + SubjectAlternativeNames=['google.com', 'www.google.com', 'mail.google.com'], + ) + arn = resp['CertificateArn'] + arn.should.equal(original_arn) + + # Move time + frozen_time.move_to('2012-01-01 13:01:00') + resp = client.request_certificate( + IdempotencyToken='test_token', + DomainName='google.com', + SubjectAlternativeNames=['google.com', 'www.google.com', 'mail.google.com'], + ) + arn = resp['CertificateArn'] + arn.should_not.equal(original_arn) From f38378d7ecc6132741ac20590f4f85b5558b2cef Mon Sep 17 00:00:00 2001 From: Steve Pulec Date: Fri, 13 Apr 2018 10:37:06 -0400 Subject: [PATCH 157/182] Revert "Add back ACM tests." This reverts commit d11ecddddea34dc9db52d89c455835acdde48d07. --- tests/test_acm/test_acm.py | 133 +++++++++++++++++++------------------ 1 file changed, 67 insertions(+), 66 deletions(-) diff --git a/tests/test_acm/test_acm.py b/tests/test_acm/test_acm.py index ed96054d8..ccac48181 100644 --- a/tests/test_acm/test_acm.py +++ b/tests/test_acm/test_acm.py @@ -314,69 +314,70 @@ def test_request_certificate_no_san(): ) resp2.should.contain('Certificate') - -# Also tests the SAN code -@freeze_time("2012-01-01 12:00:00", as_arg=True) -@mock_acm -def test_request_certificate(frozen_time): - # After requesting a certificate, it should then auto-validate after 1 minute - # Some sneaky programming for that ;-) - client = boto3.client('acm', region_name='eu-central-1') - - resp = client.request_certificate( - DomainName='google.com', - SubjectAlternativeNames=['google.com', 'www.google.com', 'mail.google.com'], - ) - arn = resp['CertificateArn'] - - resp = client.describe_certificate(CertificateArn=arn) - resp['Certificate']['CertificateArn'].should.equal(arn) - resp['Certificate']['DomainName'].should.equal('google.com') - resp['Certificate']['Issuer'].should.equal('Amazon') - resp['Certificate']['KeyAlgorithm'].should.equal('RSA_2048') - resp['Certificate']['Status'].should.equal('PENDING_VALIDATION') - resp['Certificate']['Type'].should.equal('AMAZON_ISSUED') - len(resp['Certificate']['SubjectAlternativeNames']).should.equal(3) - - # Move time - frozen_time.move_to('2012-01-01 12:02:00') - resp = client.describe_certificate(CertificateArn=arn) - resp['Certificate']['CertificateArn'].should.equal(arn) - resp['Certificate']['Status'].should.equal('ISSUED') - - -@freeze_time("2012-01-01 12:00:00", as_arg=True) -@mock_acm -def test_request_certificate(frozen_time): - # After requesting a certificate, it should then auto-validate after 1 minute - # Some sneaky programming for that ;-) - client = boto3.client('acm', region_name='eu-central-1') - - resp = client.request_certificate( - IdempotencyToken='test_token', - DomainName='google.com', - SubjectAlternativeNames=['google.com', 'www.google.com', 'mail.google.com'], - ) - original_arn = resp['CertificateArn'] - - # Should be able to request a certificate multiple times in an hour - # after that it makes a new one - for time_intervals in ('2012-01-01 12:15:00', '2012-01-01 12:30:00', '2012-01-01 12:45:00'): - frozen_time.move_to(time_intervals) - resp = client.request_certificate( - IdempotencyToken='test_token', - DomainName='google.com', - SubjectAlternativeNames=['google.com', 'www.google.com', 'mail.google.com'], - ) - arn = resp['CertificateArn'] - arn.should.equal(original_arn) - - # Move time - frozen_time.move_to('2012-01-01 13:01:00') - resp = client.request_certificate( - IdempotencyToken='test_token', - DomainName='google.com', - SubjectAlternativeNames=['google.com', 'www.google.com', 'mail.google.com'], - ) - arn = resp['CertificateArn'] - arn.should_not.equal(original_arn) +# # Also tests the SAN code +# # requires Pull: https://github.com/spulec/freezegun/pull/210 +# @freeze_time("2012-01-01 12:00:00", as_arg=True) +# @mock_acm +# def test_request_certificate(frozen_time): +# # After requesting a certificate, it should then auto-validate after 1 minute +# # Some sneaky programming for that ;-) +# client = boto3.client('acm', region_name='eu-central-1') +# +# resp = client.request_certificate( +# DomainName='google.com', +# SubjectAlternativeNames=['google.com', 'www.google.com', 'mail.google.com'], +# ) +# arn = resp['CertificateArn'] +# +# resp = client.describe_certificate(CertificateArn=arn) +# resp['Certificate']['CertificateArn'].should.equal(arn) +# resp['Certificate']['DomainName'].should.equal('google.com') +# resp['Certificate']['Issuer'].should.equal('Amazon') +# resp['Certificate']['KeyAlgorithm'].should.equal('RSA_2048') +# resp['Certificate']['Status'].should.equal('PENDING_VALIDATION') +# resp['Certificate']['Type'].should.equal('AMAZON_ISSUED') +# len(resp['Certificate']['SubjectAlternativeNames']).should.equal(3) +# +# # Move time +# frozen_time.move_to('2012-01-01 12:02:00') +# resp = client.describe_certificate(CertificateArn=arn) +# resp['Certificate']['CertificateArn'].should.equal(arn) +# resp['Certificate']['Status'].should.equal('ISSUED') +# +# +# # requires Pull: https://github.com/spulec/freezegun/pull/210 +# @freeze_time("2012-01-01 12:00:00", as_arg=True) +# @mock_acm +# def test_request_certificate(frozen_time): +# # After requesting a certificate, it should then auto-validate after 1 minute +# # Some sneaky programming for that ;-) +# client = boto3.client('acm', region_name='eu-central-1') +# +# resp = client.request_certificate( +# IdempotencyToken='test_token', +# DomainName='google.com', +# SubjectAlternativeNames=['google.com', 'www.google.com', 'mail.google.com'], +# ) +# original_arn = resp['CertificateArn'] +# +# # Should be able to request a certificate multiple times in an hour +# # after that it makes a new one +# for time_intervals in ('2012-01-01 12:15:00', '2012-01-01 12:30:00', '2012-01-01 12:45:00'): +# frozen_time.move_to(time_intervals) +# resp = client.request_certificate( +# IdempotencyToken='test_token', +# DomainName='google.com', +# SubjectAlternativeNames=['google.com', 'www.google.com', 'mail.google.com'], +# ) +# arn = resp['CertificateArn'] +# arn.should.equal(original_arn) +# +# # Move time +# frozen_time.move_to('2012-01-01 13:01:00') +# resp = client.request_certificate( +# IdempotencyToken='test_token', +# DomainName='google.com', +# SubjectAlternativeNames=['google.com', 'www.google.com', 'mail.google.com'], +# ) +# arn = resp['CertificateArn'] +# arn.should_not.equal(original_arn) From 94ba2e68bdca09a48114723eedb40d38a75b6ba6 Mon Sep 17 00:00:00 2001 From: Steve Pulec Date: Fri, 13 Apr 2018 15:05:13 -0400 Subject: [PATCH 158/182] SSM SendCommand InstanceIds are optional. Closes #1534. --- moto/ssm/models.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/moto/ssm/models.py b/moto/ssm/models.py index af450e39e..bb25baa5f 100644 --- a/moto/ssm/models.py +++ b/moto/ssm/models.py @@ -142,7 +142,7 @@ class SimpleSystemManagerBackend(BaseBackend): return self._resource_tags[resource_type][resource_id] def send_command(self, **kwargs): - instances = kwargs['InstanceIds'] + instances = kwargs.get('InstanceIds', []) now = datetime.datetime.now() expires_after = now + datetime.timedelta(0, int(kwargs['TimeoutSeconds'])) return { From 3373c5bf13edf2dfe1eb9dd6e493630c387e8729 Mon Sep 17 00:00:00 2001 From: Steve Pulec Date: Fri, 13 Apr 2018 15:17:38 -0400 Subject: [PATCH 159/182] Fix SNS max subject length. Closes #1503. --- moto/sns/models.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/moto/sns/models.py b/moto/sns/models.py index a66523614..acfbac550 100644 --- a/moto/sns/models.py +++ b/moto/sns/models.py @@ -315,7 +315,8 @@ class SNSBackend(BaseBackend): return self._get_values_nexttoken(self.subscriptions, next_token) def publish(self, arn, message, subject=None, message_attributes=None): - if subject is not None and len(subject) >= 100: + if subject is not None and len(subject) > 100: + # Note that the AWS docs around length are wrong: https://github.com/spulec/moto/issues/1503 raise ValueError('Subject must be less than 100 characters') try: From 783504c897a53b3362e0cb72a027ba52b2b57ae2 Mon Sep 17 00:00:00 2001 From: Steve Pulec Date: Sat, 14 Apr 2018 11:16:43 -0400 Subject: [PATCH 160/182] We shouldnt throw a ValidationException on empty dynamodb key. Closes #1505. --- moto/dynamodb2/responses.py | 3 +-- tests/test_dynamodb2/test_dynamodb.py | 27 +++++++++++++++++++++++++++ 2 files changed, 28 insertions(+), 2 deletions(-) diff --git a/moto/dynamodb2/responses.py b/moto/dynamodb2/responses.py index 952d33efa..fce321c3d 100644 --- a/moto/dynamodb2/responses.py +++ b/moto/dynamodb2/responses.py @@ -161,8 +161,7 @@ class DynamoHandler(BaseResponse): name = self.body['TableName'] item = self.body['Item'] - res = re.search('\"\"', json.dumps(item)) - if res: + if any(list(param.values())[0] == '' for param in item.values() if isinstance(param, dict)): er = 'com.amazonaws.dynamodb.v20111205#ValidationException' return (400, {'server': 'amazon.com'}, diff --git a/tests/test_dynamodb2/test_dynamodb.py b/tests/test_dynamodb2/test_dynamodb.py index 1d7fa4034..20ff80167 100644 --- a/tests/test_dynamodb2/test_dynamodb.py +++ b/tests/test_dynamodb2/test_dynamodb.py @@ -247,6 +247,33 @@ def test_scan_returns_consumed_capacity(): assert response['ConsumedCapacity']['TableName'] == name +@requires_boto_gte("2.9") +@mock_dynamodb2 +def test_put_item_with_special_chars(): + name = 'TestTable' + conn = boto3.client('dynamodb', + region_name='us-west-2', + aws_access_key_id="ak", + aws_secret_access_key="sk") + + conn.create_table(TableName=name, + KeySchema=[{'AttributeName':'forum_name','KeyType':'HASH'}], + AttributeDefinitions=[{'AttributeName':'forum_name','AttributeType':'S'}], + ProvisionedThroughput={'ReadCapacityUnits':5,'WriteCapacityUnits':5}) + + conn.put_item( + TableName=name, + Item={ + 'forum_name': { 'S': 'LOLCat Forum' }, + 'subject': { 'S': 'Check this out!' }, + 'Body': { 'S': 'http://url_to_lolcat.gif'}, + 'SentBy': { 'S': "test" }, + 'ReceivedTime': { 'S': '12/9/2011 11:36:03 PM'}, + '"': {"S": "foo"}, + } + ) + + @requires_boto_gte("2.9") @mock_dynamodb2 def test_query_returns_consumed_capacity(): From a44b7e7f5c114c9a48131abbeef88422f28669d1 Mon Sep 17 00:00:00 2001 From: Phil Christensen Date: Fri, 9 Feb 2018 14:49:40 -0500 Subject: [PATCH 161/182] implementing mockable EBS snapshot copies --- moto/ec2/models.py | 9 +++++++++ moto/ec2/responses/elastic_block_store.py | 14 ++++++++++++-- 2 files changed, 21 insertions(+), 2 deletions(-) diff --git a/moto/ec2/models.py b/moto/ec2/models.py index c94752ef6..31bfb4839 100755 --- a/moto/ec2/models.py +++ b/moto/ec2/models.py @@ -1971,6 +1971,15 @@ class EBSBackend(object): matches = generic_filter(filters, matches) return matches + def copy_snapshot(self, source_snapshot_id, source_region, description=None): + source_snapshot = ec2_backends[source_region].describe_snapshots( + snapshot_ids=[source_snapshot_id])[0] + snapshot_id = random_snapshot_id() + snapshot = Snapshot(self, snapshot_id, volume=source_snapshot.volume, + description=description, encrypted=source_snapshot.encrypted) + self.snapshots[snapshot_id] = snapshot + return snapshot + def get_snapshot(self, snapshot_id): snapshot = self.snapshots.get(snapshot_id, None) if not snapshot: diff --git a/moto/ec2/responses/elastic_block_store.py b/moto/ec2/responses/elastic_block_store.py index cdc5b18e9..aa0d7f73b 100644 --- a/moto/ec2/responses/elastic_block_store.py +++ b/moto/ec2/responses/elastic_block_store.py @@ -16,9 +16,14 @@ class ElasticBlockStore(BaseResponse): return template.render(attachment=attachment) def copy_snapshot(self): + source_snapshot_id = self._get_param('SourceSnapshotId') + source_region = self._get_param('SourceRegion') + description = self._get_param('Description') if self.is_not_dryrun('CopySnapshot'): - raise NotImplementedError( - 'ElasticBlockStore.copy_snapshot is not yet implemented') + snapshot = self.ec2_backend.copy_snapshot( + source_snapshot_id, source_region, description) + template = self.response_template(COPY_SNAPSHOT_RESPONSE) + return template.render(snapshot=snapshot) def create_snapshot(self): volume_id = self._get_param('VolumeId') @@ -248,6 +253,11 @@ CREATE_SNAPSHOT_RESPONSE = """ + 59dbff89-35bd-4eac-99ed-be587EXAMPLE + {{ snapshot.id }} +""" + DESCRIBE_SNAPSHOTS_RESPONSE = """ 59dbff89-35bd-4eac-99ed-be587EXAMPLE From ae2650ffc74c40cce8503d8e94e15b8510bd3c5b Mon Sep 17 00:00:00 2001 From: Phil Christensen Date: Sun, 15 Apr 2018 16:01:08 -0400 Subject: [PATCH 162/182] copy snapshot unit test --- tests/test_ec2/test_elastic_block_store.py | 54 ++++++++++++++++++++++ 1 file changed, 54 insertions(+) diff --git a/tests/test_ec2/test_elastic_block_store.py b/tests/test_ec2/test_elastic_block_store.py index 95d410052..32ce1be22 100644 --- a/tests/test_ec2/test_elastic_block_store.py +++ b/tests/test_ec2/test_elastic_block_store.py @@ -6,6 +6,7 @@ from nose.tools import assert_raises from moto.ec2 import ec2_backends import boto import boto3 +from botocore.exceptions import ClientError from boto.exception import EC2ResponseError import sure # noqa @@ -587,6 +588,59 @@ def test_volume_tag_escaping(): dict(snaps[0].tags).should.equal({'key': ''}) +@mock_ec2 +def test_copy_snapshot(): + ec2_client = boto3.client('ec2', region_name='eu-west-1') + dest_ec2_client = boto3.client('ec2', region_name='eu-west-2') + + volume_response = ec2_client.create_volume( + AvailabilityZone='eu-west-1a', Size=10 + ) + + create_snapshot_response = ec2_client.create_snapshot( + VolumeId=volume_response['VolumeId'] + ) + + copy_snapshot_response = dest_ec2_client.copy_snapshot( + SourceSnapshotId=create_snapshot_response['SnapshotId'], + SourceRegion="eu-west-1" + ) + + ec2 = boto3.resource('ec2', region_name='eu-west-1') + dest_ec2 = boto3.resource('ec2', region_name='eu-west-2') + + source = ec2.Snapshot(create_snapshot_response['SnapshotId']) + dest = dest_ec2.Snapshot(copy_snapshot_response['SnapshotId']) + + attribs = ['data_encryption_key_id', 'encrypted', + 'kms_key_id', 'owner_alias', 'owner_id', 'progress', + 'start_time', 'state', 'state_message', + 'tags', 'volume_id', 'volume_size'] + + for attrib in attribs: + getattr(source, attrib).should.equal(getattr(dest, attrib)) + + # Copy from non-existent source ID. + with assert_raises(ClientError) as cm: + create_snapshot_error = ec2_client.create_snapshot( + VolumeId='vol-abcd1234' + ) + cm.exception.response['Error']['Code'].should.equal('InvalidVolume.NotFound') + cm.exception.response['Error']['Message'].should.equal("The volume 'vol-abcd1234' does not exist.") + cm.exception.response['ResponseMetadata']['RequestId'].should_not.be.none + cm.exception.response['ResponseMetadata']['HTTPStatusCode'].should.equal(400) + + # Copy from non-existent source region. + with assert_raises(ClientError) as cm: + copy_snapshot_response = dest_ec2_client.copy_snapshot( + SourceSnapshotId=create_snapshot_response['SnapshotId'], + SourceRegion="eu-west-2" + ) + cm.exception.response['Error']['Code'].should.equal('InvalidSnapshot.NotFound') + cm.exception.response['Error']['Message'].should.be.none + cm.exception.response['ResponseMetadata']['RequestId'].should_not.be.none + cm.exception.response['ResponseMetadata']['HTTPStatusCode'].should.equal(400) + @mock_ec2 def test_search_for_many_snapshots(): ec2_client = boto3.client('ec2', region_name='eu-west-1') From e20832d610bd0c37a2702aee4b60a3ce21db6912 Mon Sep 17 00:00:00 2001 From: Phil Christensen Date: Sun, 15 Apr 2018 17:01:23 -0400 Subject: [PATCH 163/182] update implementation coverage for copy_snapshot --- IMPLEMENTATION_COVERAGE.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/IMPLEMENTATION_COVERAGE.md b/IMPLEMENTATION_COVERAGE.md index bc4bc1696..1e63f3564 100644 --- a/IMPLEMENTATION_COVERAGE.md +++ b/IMPLEMENTATION_COVERAGE.md @@ -1336,7 +1336,7 @@ - [ ] confirm_product_instance - [ ] copy_fpga_image - [X] copy_image -- [ ] copy_snapshot +- [X] copy_snapshot - [X] create_customer_gateway - [ ] create_default_subnet - [ ] create_default_vpc From ba3c9db8a76f1428892e867c68c1e2f4c04c1fa1 Mon Sep 17 00:00:00 2001 From: Akito Nozaki Date: Tue, 17 Apr 2018 11:32:39 -0700 Subject: [PATCH 164/182] Fixing create_key_and_certificate boolean parameter (#1572) --- moto/iot/responses.py | 2 +- tests/test_iot/test_iot.py | 17 +++++++++++++++++ 2 files changed, 18 insertions(+), 1 deletion(-) diff --git a/moto/iot/responses.py b/moto/iot/responses.py index f59c105da..4bd35bce4 100644 --- a/moto/iot/responses.py +++ b/moto/iot/responses.py @@ -103,7 +103,7 @@ class IoTResponse(BaseResponse): return json.dumps(dict()) def create_keys_and_certificate(self): - set_as_active = self._get_param("setAsActive") + set_as_active = self._get_bool_param("setAsActive") cert, key_pair = self.iot_backend.create_keys_and_certificate( set_as_active=set_as_active, ) diff --git a/tests/test_iot/test_iot.py b/tests/test_iot/test_iot.py index 7c01934d3..e69e55fc0 100644 --- a/tests/test_iot/test_iot.py +++ b/tests/test_iot/test_iot.py @@ -96,6 +96,23 @@ def test_certs(): res = client.list_certificates() res.should.have.key('certificates').which.should.have.length_of(0) +@mock_iot +def test_certs_create_inactive(): + client = boto3.client('iot', region_name='ap-northeast-1') + cert = client.create_keys_and_certificate(setAsActive=False) + cert_id = cert['certificateId'] + + cert = client.describe_certificate(certificateId=cert_id) + cert.should.have.key('certificateDescription') + cert_desc = cert['certificateDescription'] + cert_desc.should.have.key('status').which.should.equal('INACTIVE') + + client.update_certificate(certificateId=cert_id, newStatus='ACTIVE') + cert = client.describe_certificate(certificateId=cert_id) + cert.should.have.key('certificateDescription') + cert_desc = cert['certificateDescription'] + cert_desc.should.have.key('status').which.should.equal('ACTIVE') + @mock_iot def test_policy(): client = boto3.client('iot', region_name='ap-northeast-1') From f401c60825872fe64f18a2a59d3a4ff89563028f Mon Sep 17 00:00:00 2001 From: wblack Date: Tue, 17 Apr 2018 16:27:48 +0000 Subject: [PATCH 165/182] Include SNS message attributes with message body when delivering to SQS. --- moto/core/responses.py | 48 +++++++++++++++++++++++ moto/sns/models.py | 12 ++++-- moto/sns/responses.py | 51 +++++++++++++++++++++++-- tests/test_sns/test_publishing_boto3.py | 30 ++++++++++++++- 4 files changed, 132 insertions(+), 9 deletions(-) diff --git a/moto/core/responses.py b/moto/core/responses.py index ca5b9f7d2..ed4792083 100644 --- a/moto/core/responses.py +++ b/moto/core/responses.py @@ -492,6 +492,54 @@ class BaseResponse(_TemplateEnvironmentMixin): return results + def _get_object_map(self, prefix, name='Name', value='Value'): + """ + Given a query dict like + { + Prefix.1.Name: [u'event'], + Prefix.1.Value.StringValue: [u'order_cancelled'], + Prefix.1.Value.DataType: [u'String'], + Prefix.2.Name: [u'store'], + Prefix.2.Value.StringValue: [u'example_corp'], + Prefix.2.Value.DataType [u'String'], + } + + returns + { + 'event': { + 'DataType': 'String', + 'StringValue': 'example_corp' + }, + 'store': { + 'DataType': 'String', + 'StringValue': 'order_cancelled' + } + } + """ + object_map = {} + index = 1 + while True: + # Loop through looking for keys representing object name + name_key = '{0}.{1}.{2}'.format(prefix, index, name) + obj_name = self.querystring.get(name_key) + if not obj_name: + # Found all keys + break + + obj = {} + value_key_prefix = '{0}.{1}.{2}.'.format( + prefix, index, value) + for k, v in self.querystring.items(): + if k.startswith(value_key_prefix): + _, value_key = k.split(value_key_prefix, 1) + obj[value_key] = v[0] + + object_map[obj_name[0]] = obj + + index += 1 + + return object_map + @property def request_json(self): return 'JSON' in self.querystring.get('ContentType', []) diff --git a/moto/sns/models.py b/moto/sns/models.py index acfbac550..562e9c106 100644 --- a/moto/sns/models.py +++ b/moto/sns/models.py @@ -93,7 +93,7 @@ class Subscription(BaseModel): if self.protocol == 'sqs': queue_name = self.endpoint.split(":")[-1] region = self.endpoint.split(":")[3] - enveloped_message = json.dumps(self.get_post_data(message, message_id, subject), sort_keys=True, indent=2, separators=(',', ': ')) + enveloped_message = json.dumps(self.get_post_data(message, message_id, subject, message_attributes=message_attributes), sort_keys=True, indent=2, separators=(',', ': ')) sqs_backends[region].send_message(queue_name, enveloped_message) elif self.protocol in ['http', 'https']: post_data = self.get_post_data(message, message_id, subject) @@ -131,15 +131,16 @@ class Subscription(BaseModel): for rule in rules: if isinstance(rule, six.string_types): # only string value matching is supported - if message_attributes[field] == rule: + if message_attributes[field]['Value'] == rule: return True return False return all(_field_match(field, rules, message_attributes) for field, rules in six.iteritems(self._filter_policy)) - def get_post_data(self, message, message_id, subject): - return { + def get_post_data( + self, message, message_id, subject, message_attributes=None): + post_data = { "Type": "Notification", "MessageId": message_id, "TopicArn": self.topic.arn, @@ -151,6 +152,9 @@ class Subscription(BaseModel): "SigningCertURL": "https://sns.us-east-1.amazonaws.com/SimpleNotificationService-f3ecfb7224c7233fe7bb5f59f96de52f.pem", "UnsubscribeURL": "https://sns.us-east-1.amazonaws.com/?Action=Unsubscribe&SubscriptionArn=arn:aws:sns:us-east-1:123456789012:some-topic:2bcfbf39-05c3-41de-beaa-fcfcc21c8f55" } + if message_attributes: + post_data["MessageAttributes"] = message_attributes + return post_data class PlatformApplication(BaseModel): diff --git a/moto/sns/responses.py b/moto/sns/responses.py index 7f23214cf..9c6f64f91 100644 --- a/moto/sns/responses.py +++ b/moto/sns/responses.py @@ -6,7 +6,7 @@ from collections import defaultdict from moto.core.responses import BaseResponse from moto.core.utils import camelcase_to_underscores from .models import sns_backends -from .exceptions import SNSNotFoundError +from .exceptions import SNSNotFoundError, InvalidParameterValue from .utils import is_e164 @@ -30,6 +30,48 @@ class SNSResponse(BaseResponse): in attributes ) + def _parse_message_attributes(self, prefix='', value_namespace='Value.'): + message_attributes = self._get_object_map( + 'MessageAttributes.entry', + name='Name', + value='Value' + ) + # SNS converts some key names before forwarding messages + # DataType -> Type, StringValue -> Value, BinaryValue -> Value + transformed_message_attributes = {} + for name, value in message_attributes.items(): + # validation + data_type = value['DataType'] + if not data_type: + raise InvalidParameterValue( + "The message attribute '{0}' must contain non-empty " + "message attribute value.".format(name)) + + data_type_parts = data_type.split('.') + if (len(data_type_parts) > 2 or + data_type_parts[0] not in ['String', 'Binary', 'Number']): + raise InvalidParameterValue( + "The message attribute '{0}' has an invalid message " + "attribute type, the set of supported type prefixes is " + "Binary, Number, and String.".format(name)) + + if 'StringValue' in value: + value = value['StringValue'] + elif 'BinaryValue' in 'Value': + value = value['BinaryValue'] + if not value: + raise InvalidParameterValue( + "The message attribute '{0}' must contain non-empty " + "message attribute value for message attribute " + "type '{1}'.".format(name, data_type[0])) + + # transformation + transformed_message_attributes[name] = { + 'Type': data_type, 'Value': value + } + + return transformed_message_attributes + def create_topic(self): name = self._get_param('Name') topic = self.backend.create_topic(name) @@ -241,9 +283,10 @@ class SNSResponse(BaseResponse): phone_number = self._get_param('PhoneNumber') subject = self._get_param('Subject') - message_attributes = self._get_map_prefix('MessageAttributes.entry', - key_end='Name', - value_end='Value') + try: + message_attributes = self._parse_message_attributes() + except InvalidParameterValue as e: + return self._error(e.description), dict(status=e.code) if phone_number is not None: # Check phone is correct syntax (e164) diff --git a/tests/test_sns/test_publishing_boto3.py b/tests/test_sns/test_publishing_boto3.py index 52347cc15..9a2403034 100644 --- a/tests/test_sns/test_publishing_boto3.py +++ b/tests/test_sns/test_publishing_boto3.py @@ -239,6 +239,11 @@ def test_filtering_exact_string(): messages = queue.receive_messages(MaxNumberOfMessages=5) message_bodies = [json.loads(m.body)['Message'] for m in messages] message_bodies.should.equal(['match']) + message_attributes = [ + json.loads(m.body)['MessageAttributes'] for m in messages] + message_attributes.should.equal( + [{'store': {'Type': 'String', 'Value': 'example_corp'}}]) + @mock_sqs @mock_sns @@ -256,6 +261,11 @@ def test_filtering_exact_string_multiple_message_attributes(): messages = queue.receive_messages(MaxNumberOfMessages=5) message_bodies = [json.loads(m.body)['Message'] for m in messages] message_bodies.should.equal(['match']) + message_attributes = [ + json.loads(m.body)['MessageAttributes'] for m in messages] + message_attributes.should.equal([{ + 'store': {'Type': 'String', 'Value': 'example_corp'}, + 'event': {'Type': 'String', 'Value': 'order_cancelled'}}]) @mock_sqs @mock_sns @@ -275,6 +285,11 @@ def test_filtering_exact_string_OR_matching(): message_bodies = [json.loads(m.body)['Message'] for m in messages] message_bodies.should.equal( ['match example_corp', 'match different_corp']) + message_attributes = [ + json.loads(m.body)['MessageAttributes'] for m in messages] + message_attributes.should.equal([ + {'store': {'Type': 'String', 'Value': 'example_corp'}}, + {'store': {'Type': 'String', 'Value': 'different_corp'}}]) @mock_sqs @mock_sns @@ -294,6 +309,11 @@ def test_filtering_exact_string_AND_matching_positive(): message_bodies = [json.loads(m.body)['Message'] for m in messages] message_bodies.should.equal( ['match example_corp order_cancelled']) + message_attributes = [ + json.loads(m.body)['MessageAttributes'] for m in messages] + message_attributes.should.equal([{ + 'store': {'Type': 'String', 'Value': 'example_corp'}, + 'event': {'Type': 'String', 'Value': 'order_cancelled'}}]) @mock_sqs @mock_sns @@ -312,7 +332,9 @@ def test_filtering_exact_string_AND_matching_no_match(): messages = queue.receive_messages(MaxNumberOfMessages=5) message_bodies = [json.loads(m.body)['Message'] for m in messages] message_bodies.should.equal([]) - + message_attributes = [ + json.loads(m.body)['MessageAttributes'] for m in messages] + message_attributes.should.equal([]) @mock_sqs @mock_sns @@ -328,6 +350,9 @@ def test_filtering_exact_string_no_match(): messages = queue.receive_messages(MaxNumberOfMessages=5) message_bodies = [json.loads(m.body)['Message'] for m in messages] message_bodies.should.equal([]) + message_attributes = [ + json.loads(m.body)['MessageAttributes'] for m in messages] + message_attributes.should.equal([]) @mock_sqs @mock_sns @@ -340,3 +365,6 @@ def test_filtering_exact_string_no_attributes_no_match(): messages = queue.receive_messages(MaxNumberOfMessages=5) message_bodies = [json.loads(m.body)['Message'] for m in messages] message_bodies.should.equal([]) + message_attributes = [ + json.loads(m.body)['MessageAttributes'] for m in messages] + message_attributes.should.equal([]) From 0b36f06df101f0f7a15d2a70851e0992afd0c275 Mon Sep 17 00:00:00 2001 From: wblack Date: Wed, 18 Apr 2018 09:54:15 +0000 Subject: [PATCH 166/182] Fixes for linter warnings --- tests/test_sns/test_publishing_boto3.py | 4 ---- 1 file changed, 4 deletions(-) diff --git a/tests/test_sns/test_publishing_boto3.py b/tests/test_sns/test_publishing_boto3.py index 9a2403034..6ea29c986 100644 --- a/tests/test_sns/test_publishing_boto3.py +++ b/tests/test_sns/test_publishing_boto3.py @@ -2,8 +2,6 @@ from __future__ import unicode_literals import json -from six.moves.urllib.parse import parse_qs - import boto3 import re from freezegun import freeze_time @@ -12,7 +10,6 @@ import sure # noqa import responses from botocore.exceptions import ClientError from moto import mock_sns, mock_sqs -from freezegun import freeze_time MESSAGE_FROM_SQS_TEMPLATE = '{\n "Message": "%s",\n "MessageId": "%s",\n "Signature": "EXAMPLElDMXvB8r9R83tGoNn0ecwd5UjllzsvSvbItzfaMpN2nk5HVSw7XnOn/49IkxDKz8YrlH2qJXj2iZB0Zo2O71c4qQk1fMUDi3LGpij7RCW7AW9vYYsSqIKRnFS94ilu7NFhUzLiieYr4BKHpdTmdD6c0esKEYBpabxDSc=",\n "SignatureVersion": "1",\n "SigningCertURL": "https://sns.us-east-1.amazonaws.com/SimpleNotificationService-f3ecfb7224c7233fe7bb5f59f96de52f.pem",\n "Subject": "my subject",\n "Timestamp": "2015-01-01T12:00:00.000Z",\n "TopicArn": "arn:aws:sns:%s:123456789012:some-topic",\n "Type": "Notification",\n "UnsubscribeURL": "https://sns.us-east-1.amazonaws.com/?Action=Unsubscribe&SubscriptionArn=arn:aws:sns:us-east-1:123456789012:some-topic:2bcfbf39-05c3-41de-beaa-fcfcc21c8f55"\n}' @@ -176,7 +173,6 @@ def test_publish_to_http(): response = conn.publish( TopicArn=topic_arn, Message="my message", Subject="my subject") - message_id = response['MessageId'] @mock_sqs From 794b8ba59e2bc0f9a07fbaa63d34624a4ff1707f Mon Sep 17 00:00:00 2001 From: Mike Grima Date: Wed, 18 Apr 2018 11:24:31 -0700 Subject: [PATCH 167/182] SNS now supports all modern boto3 regions. --- moto/sns/models.py | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/moto/sns/models.py b/moto/sns/models.py index acfbac550..4423997a0 100644 --- a/moto/sns/models.py +++ b/moto/sns/models.py @@ -4,11 +4,12 @@ import datetime import uuid import json -import boto.sns import requests import six import re +from boto3 import Session + from moto.compat import OrderedDict from moto.core import BaseBackend, BaseModel from moto.core.utils import iso_8601_datetime_with_milliseconds @@ -410,8 +411,8 @@ class SNSBackend(BaseBackend): sns_backends = {} -for region in boto.sns.regions(): - sns_backends[region.name] = SNSBackend(region.name) +for region in Session().get_available_regions('sns'): + sns_backends[region] = SNSBackend(region) DEFAULT_TOPIC_POLICY = { From 2ecb04d6e07e4f5835bc0e3a0a2e01d44cfc0148 Mon Sep 17 00:00:00 2001 From: Steve Pulec Date: Wed, 18 Apr 2018 16:15:47 -0400 Subject: [PATCH 168/182] Revert errant change to S3 urls in cd1c6d3e6c3c80bf55fbfd6a657ba642c6fb4e3f. --- moto/s3/urls.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/moto/s3/urls.py b/moto/s3/urls.py index af0a9954e..1d439a549 100644 --- a/moto/s3/urls.py +++ b/moto/s3/urls.py @@ -21,7 +21,7 @@ url_paths = { '{0}/$': S3ResponseInstance.bucket_response, # subdomain key of path-based bucket - '{0}/(?P[^/?]+)/?$': S3ResponseInstance.ambiguous_response, + '{0}/(?P[^/]+)/?$': S3ResponseInstance.ambiguous_response, # path-based bucket + key - '{0}/(?P[^/?]+)/(?P.+)': S3ResponseInstance.key_response, + '{0}/(?P[^/]+)/(?P.+)': S3ResponseInstance.key_response, } From 9f7330a588614c21bb35b8a9597ba871d52ab27e Mon Sep 17 00:00:00 2001 From: Steve Pulec Date: Wed, 18 Apr 2018 18:40:30 -0400 Subject: [PATCH 169/182] Update changelog. --- CHANGELOG.md | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index c85e38536..2d512a60e 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,7 +1,15 @@ Moto Changelog =================== -1.3.1 +1.3.3 +------ + + * APIGateway region fixes + * ECS improvements + * Add @mock_cognitoidentity, thanks to @brcoding + + +1.3.2 ------ The huge change in this version is that the responses library is no longer vendored. Many developers are now unblocked. Kudos to @spulec for the fix. From 7cc08a9c5c491f1deb0b1363e90615037fdd2549 Mon Sep 17 00:00:00 2001 From: Steve Pulec Date: Wed, 18 Apr 2018 18:40:32 -0400 Subject: [PATCH 170/182] bumping to version 1.3.3 --- .bumpversion.cfg | 2 +- moto/__init__.py | 2 +- setup.py | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/.bumpversion.cfg b/.bumpversion.cfg index 6459ed410..3e15854ef 100644 --- a/.bumpversion.cfg +++ b/.bumpversion.cfg @@ -1,5 +1,5 @@ [bumpversion] -current_version = 1.3.2 +current_version = 1.3.3 [bumpversion:file:setup.py] diff --git a/moto/__init__.py b/moto/__init__.py index 6caa7f8ca..c6f24388b 100644 --- a/moto/__init__.py +++ b/moto/__init__.py @@ -3,7 +3,7 @@ import logging # logging.getLogger('boto').setLevel(logging.CRITICAL) __title__ = 'moto' -__version__ = '1.3.2' +__version__ = '1.3.3' from .acm import mock_acm # flake8: noqa from .apigateway import mock_apigateway, mock_apigateway_deprecated # flake8: noqa diff --git a/setup.py b/setup.py index 79daa6081..27be6cfeb 100755 --- a/setup.py +++ b/setup.py @@ -40,7 +40,7 @@ else: setup( name='moto', - version='1.3.2', + version='1.3.3', description='A library that allows your python tests to easily' ' mock out the boto library', author='Steve Pulec', From 9a1dcddf1e9f51ca610988f2304e36372c08e8f6 Mon Sep 17 00:00:00 2001 From: Steve Pulec Date: Wed, 18 Apr 2018 18:40:42 -0400 Subject: [PATCH 171/182] Updating implementation coverage --- IMPLEMENTATION_COVERAGE.md | 4 ---- 1 file changed, 4 deletions(-) diff --git a/IMPLEMENTATION_COVERAGE.md b/IMPLEMENTATION_COVERAGE.md index bc4bc1696..94fa27438 100644 --- a/IMPLEMENTATION_COVERAGE.md +++ b/IMPLEMENTATION_COVERAGE.md @@ -434,7 +434,6 @@ - [ ] get_applied_schema_version - [ ] get_directory - [ ] get_facet -- [ ] get_object_attributes - [ ] get_object_information - [ ] get_schema_as_json - [ ] get_typed_link_facet_information @@ -4379,7 +4378,6 @@ - [ ] create_user - [ ] delete_alias - [ ] delete_group -- [ ] delete_mailbox_permissions - [ ] delete_resource - [ ] delete_user - [ ] deregister_from_work_mail @@ -4392,12 +4390,10 @@ - [ ] list_aliases - [ ] list_group_members - [ ] list_groups -- [ ] list_mailbox_permissions - [ ] list_organizations - [ ] list_resource_delegates - [ ] list_resources - [ ] list_users -- [ ] put_mailbox_permissions - [ ] register_to_work_mail - [ ] reset_password - [ ] update_primary_email_address From cac41a39a03cd1766c38c339e1f58f3b68f1e007 Mon Sep 17 00:00:00 2001 From: Steve Pulec Date: Wed, 18 Apr 2018 18:41:51 -0400 Subject: [PATCH 172/182] update changelog. --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 2d512a60e..fb3a5d8d5 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -4,6 +4,7 @@ Moto Changelog 1.3.3 ------ + * Fix a regression in S3 url regexes * APIGateway region fixes * ECS improvements * Add @mock_cognitoidentity, thanks to @brcoding From 05f16cfcf9510c11e05ae384d0dcdf7914347072 Mon Sep 17 00:00:00 2001 From: Steve Pulec Date: Wed, 18 Apr 2018 22:23:11 -0400 Subject: [PATCH 173/182] Fixes to dynamodb empty keys. --- moto/dynamodb2/responses.py | 14 +++++++++++++- 1 file changed, 13 insertions(+), 1 deletion(-) diff --git a/moto/dynamodb2/responses.py b/moto/dynamodb2/responses.py index fce321c3d..3c7e7ffc2 100644 --- a/moto/dynamodb2/responses.py +++ b/moto/dynamodb2/responses.py @@ -8,6 +8,18 @@ from moto.core.utils import camelcase_to_underscores, amzn_request_id from .models import dynamodb_backends, dynamo_json_dump +def has_empty_keys_or_values(_dict): + if _dict == "": + return True + if not isinstance(_dict, dict): + return False + return any( + key == '' or value == '' or + has_empty_keys_or_values(value) + for key, value in _dict.items() + ) + + class DynamoHandler(BaseResponse): def get_endpoint_name(self, headers): @@ -161,7 +173,7 @@ class DynamoHandler(BaseResponse): name = self.body['TableName'] item = self.body['Item'] - if any(list(param.values())[0] == '' for param in item.values() if isinstance(param, dict)): + if has_empty_keys_or_values(item): er = 'com.amazonaws.dynamodb.v20111205#ValidationException' return (400, {'server': 'amazon.com'}, From 311966e28de060e899cb81025a9b1dd85cd08834 Mon Sep 17 00:00:00 2001 From: Steve Pulec Date: Wed, 18 Apr 2018 22:47:02 -0400 Subject: [PATCH 174/182] Add IAMDatabaseAuthenticationEnabled and DbiResourceId to RDS response. Closes #1465. --- moto/rds2/models.py | 4 ++++ tests/test_rds2/test_rds2.py | 21 ++++++++++++--------- 2 files changed, 16 insertions(+), 9 deletions(-) diff --git a/moto/rds2/models.py b/moto/rds2/models.py index 268ae5af2..29fa95959 100644 --- a/moto/rds2/models.py +++ b/moto/rds2/models.py @@ -103,6 +103,8 @@ class Database(BaseModel): if not self.option_group_name and self.engine in self.default_option_groups: self.option_group_name = self.default_option_groups[self.engine] self.character_set_name = kwargs.get('character_set_name', None) + self.iam_database_authentication_enabled = False + self.dbi_resource_id = "db-M5ENSHXFPU6XHZ4G4ZEI5QIO2U" self.tags = kwargs.get('tags', []) @property @@ -142,6 +144,7 @@ class Database(BaseModel): {{ database.multi_az }} {{ database.db_instance_identifier }} + {{ database.dbi_resource_id }} 03:50-04:20 wed:06:38-wed:07:08 @@ -163,6 +166,7 @@ class Database(BaseModel): {{ database.source_db_identifier }} {% endif %} {{ database.engine }} + {{database.iam_database_authentication_enabled }} {{ database.license_model }} {{ database.engine_version }} diff --git a/tests/test_rds2/test_rds2.py b/tests/test_rds2/test_rds2.py index ea0ab378f..d056049b5 100644 --- a/tests/test_rds2/test_rds2.py +++ b/tests/test_rds2/test_rds2.py @@ -19,17 +19,20 @@ def test_create_database(): MasterUserPassword='hunter2', Port=1234, DBSecurityGroups=["my_sg"]) - database['DBInstance']['AllocatedStorage'].should.equal(10) - database['DBInstance']['DBInstanceClass'].should.equal("db.m1.small") - database['DBInstance']['LicenseModel'].should.equal("license-included") - database['DBInstance']['MasterUsername'].should.equal("root") - database['DBInstance']['DBSecurityGroups'][0][ + db_instance = database['DBInstance'] + db_instance['AllocatedStorage'].should.equal(10) + db_instance['DBInstanceClass'].should.equal("db.m1.small") + db_instance['LicenseModel'].should.equal("license-included") + db_instance['MasterUsername'].should.equal("root") + db_instance['DBSecurityGroups'][0][ 'DBSecurityGroupName'].should.equal('my_sg') - database['DBInstance']['DBInstanceArn'].should.equal( + db_instance['DBInstanceArn'].should.equal( 'arn:aws:rds:us-west-2:1234567890:db:db-master-1') - database['DBInstance']['DBInstanceStatus'].should.equal('available') - database['DBInstance']['DBName'].should.equal('staging-postgres') - database['DBInstance']['DBInstanceIdentifier'].should.equal("db-master-1") + db_instance['DBInstanceStatus'].should.equal('available') + db_instance['DBName'].should.equal('staging-postgres') + db_instance['DBInstanceIdentifier'].should.equal("db-master-1") + db_instance['IAMDatabaseAuthenticationEnabled'].should.equal(False) + db_instance['DbiResourceId'].should.contain("db-") @mock_rds2 From 6332ed9df9b34f421294971f7914758387aa40ef Mon Sep 17 00:00:00 2001 From: Darien Hager Date: Thu, 19 Apr 2018 00:16:03 -0700 Subject: [PATCH 175/182] Add test that certain FIFO-queue attributes flow through from sender to receiver --- tests/test_sqs/test_sqs.py | 22 ++++++++++++++++++++++ 1 file changed, 22 insertions(+) diff --git a/tests/test_sqs/test_sqs.py b/tests/test_sqs/test_sqs.py index 05936ab39..c5ad39eb0 100644 --- a/tests/test_sqs/test_sqs.py +++ b/tests/test_sqs/test_sqs.py @@ -170,6 +170,28 @@ def test_message_with_complex_attributes(): messages.should.have.length_of(1) +@mock_sqs +def test_send_message_with_message_group_id(): + sqs = boto3.resource('sqs', region_name='us-east-1') + queue = sqs.create_queue(QueueName="test-group-id.fifo", + Attributes={'FifoQueue': 'true'}) + + sent = queue.send_message( + MessageBody="mydata", + MessageDeduplicationId="dedupe_id_1", + MessageGroupId="group_id_1", + ) + + messages = queue.receive_messages() + messages.should.have.length_of(1) + + message_attributes = messages[0].attributes + message_attributes.should.contain('MessageGroupId') + message_attributes['MessageGroupId'].should.equal('group_id_1') + message_attributes.should.contain('MessageDeduplicationId') + message_attributes['MessageDeduplicationId'].should.equal('dedupe_id_1') + + @mock_sqs def test_send_message_with_unicode_characters(): body_one = 'Héllo!😀' From 6556ba89cddbbdd67db6a0b1ae81054ba12b0acd Mon Sep 17 00:00:00 2001 From: Darien Hager Date: Thu, 19 Apr 2018 00:22:35 -0700 Subject: [PATCH 176/182] Basic plumbing to preserve MessageGroupId and MessageDeduplicationID, if they are provided. --- moto/sqs/models.py | 10 +++++++++- moto/sqs/responses.py | 18 +++++++++++++++++- 2 files changed, 26 insertions(+), 2 deletions(-) diff --git a/moto/sqs/models.py b/moto/sqs/models.py index 044759e4f..9c8858bc0 100644 --- a/moto/sqs/models.py +++ b/moto/sqs/models.py @@ -38,6 +38,8 @@ class Message(BaseModel): self.sent_timestamp = None self.approximate_first_receive_timestamp = None self.approximate_receive_count = 0 + self.deduplication_id = None + self.group_id = None self.visible_at = 0 self.delayed_until = 0 @@ -400,7 +402,7 @@ class SQSBackend(BaseBackend): queue._set_attributes(attributes) return queue - def send_message(self, queue_name, message_body, message_attributes=None, delay_seconds=None): + def send_message(self, queue_name, message_body, message_attributes=None, delay_seconds=None, deduplication_id=None, group_id=None): queue = self.get_queue(queue_name) @@ -412,6 +414,12 @@ class SQSBackend(BaseBackend): message_id = get_random_message_id() message = Message(message_id, message_body) + # Attributes, but not *message* attributes + if deduplication_id is not None: + message.deduplication_id = deduplication_id + if group_id is not None: + message.group_id = group_id + if message_attributes: message.message_attributes = message_attributes diff --git a/moto/sqs/responses.py b/moto/sqs/responses.py index c475f0ce0..adf3e7a6e 100644 --- a/moto/sqs/responses.py +++ b/moto/sqs/responses.py @@ -198,6 +198,8 @@ class SQSResponse(BaseResponse): def send_message(self): message = self._get_param('MessageBody') delay_seconds = int(self._get_param('DelaySeconds', 0)) + message_group_id = self._get_param("MessageGroupId") + message_dedupe_id = self._get_param("MessageDeduplicationId") if len(message) > MAXIMUM_MESSAGE_LENGTH: return ERROR_TOO_LONG_RESPONSE, dict(status=400) @@ -213,7 +215,9 @@ class SQSResponse(BaseResponse): queue_name, message, message_attributes=message_attributes, - delay_seconds=delay_seconds + delay_seconds=delay_seconds, + deduplication_id=message_dedupe_id, + group_id=message_group_id ) template = self.response_template(SEND_MESSAGE_RESPONSE) return template.render(message=message, message_attributes=message_attributes) @@ -491,6 +495,18 @@ RECEIVE_MESSAGE_RESPONSE = """ ApproximateFirstReceiveTimestamp {{ message.approximate_first_receive_timestamp }} + {% if message.deduplication_id is not none %} + + MessageDeduplicationId + {{ message.deduplication_id }} + + {% endif %} + {% if message.group_id is not none %} + + MessageGroupId + {{ message.group_id }} + + {% endif %} {% if message.message_attributes.items()|count > 0 %} {{- message.attribute_md5 -}} {% endif %} From e931456204f6dfa0d5124b0ee26c5511dae4d31a Mon Sep 17 00:00:00 2001 From: Louis Willcock Date: Fri, 20 Apr 2018 10:25:28 +1000 Subject: [PATCH 177/182] UPDATE getting_started.rst - improve wording Found a few sentences in the Docs that I thought could be a bit more readable, hopefully improved them. --- docs/docs/getting_started.rst | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/docs/docs/getting_started.rst b/docs/docs/getting_started.rst index 97f667d26..d52e76235 100644 --- a/docs/docs/getting_started.rst +++ b/docs/docs/getting_started.rst @@ -20,7 +20,7 @@ If you want to install ``moto`` from source:: Moto usage ---------- -For example we have the following code we want to test: +For example, we have the following code we want to test: .. sourcecode:: python @@ -39,12 +39,12 @@ For example we have the following code we want to test: k.key = self.name k.set_contents_from_string(self.value) -There are several method to do this, just keep in mind Moto creates a full blank environment. +There are several ways to do this, but you should keep in mind that Moto creates a full, blank environment. Decorator ~~~~~~~~~ -With a decorator wrapping all the calls to S3 are automatically mocked out. +With a decorator wrapping, all the calls to S3 are automatically mocked out. .. sourcecode:: python @@ -66,7 +66,7 @@ With a decorator wrapping all the calls to S3 are automatically mocked out. Context manager ~~~~~~~~~~~~~~~ -Same as decorator, every call inside ``with`` statement are mocked out. +Same as the Decorator, every call inside the ``with`` statement is mocked out. .. sourcecode:: python @@ -83,7 +83,7 @@ Same as decorator, every call inside ``with`` statement are mocked out. Raw ~~~ -You can also start and stop manually the mocking. +You can also start and stop the mocking manually. .. sourcecode:: python @@ -104,11 +104,11 @@ You can also start and stop manually the mocking. Stand-alone server mode ~~~~~~~~~~~~~~~~~~~~~~~ -Moto comes with a stand-alone server allowing you to mock out an AWS HTTP endpoint. It is very useful to test even if you don't use Python. +Moto also comes with a stand-alone server allowing you to mock out an AWS HTTP endpoint. For testing purposes, it's extremely useful even if you don't use Python. .. sourcecode:: bash $ moto_server ec2 -p3000 * Running on http://127.0.0.1:3000/ -This method isn't encouraged if you're using ``boto``, best is to use decorator method. +However, this method isn't encouraged if you're using ``boto``, the best solution would be to use a decorator method. From 04b36b4488b19e516ea6e06525e385a358be7727 Mon Sep 17 00:00:00 2001 From: David Baumgold Date: Sat, 21 Apr 2018 09:46:12 +0200 Subject: [PATCH 178/182] Allow dateutil below 3.0.0 --- setup.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/setup.py b/setup.py index 27be6cfeb..ebbf6f0cd 100755 --- a/setup.py +++ b/setup.py @@ -18,7 +18,7 @@ install_requires = [ "werkzeug", "pyaml", "pytz", - "python-dateutil<2.7.0,>=2.1", + "python-dateutil<3.0.0,>=2.1", "mock", "docker>=2.5.1", "jsondiff==1.1.1", From 21a264c337cfc6cb30b57420a16fb33eede19248 Mon Sep 17 00:00:00 2001 From: Tom Elliff Date: Mon, 23 Apr 2018 19:41:54 +0100 Subject: [PATCH 179/182] Default TimeoutSeconds to 1 hour (#1592) TimeoutSeconds isn't a required field so we can't rely on it being there. Quick tests against the AWS API show that when it's not specified the ExpiresAfter field seems to be 1 hour after the request. --- moto/ssm/models.py | 2 +- tests/test_ssm/test_ssm_boto3.py | 1 - 2 files changed, 1 insertion(+), 2 deletions(-) diff --git a/moto/ssm/models.py b/moto/ssm/models.py index bb25baa5f..fc74e1524 100644 --- a/moto/ssm/models.py +++ b/moto/ssm/models.py @@ -144,7 +144,7 @@ class SimpleSystemManagerBackend(BaseBackend): def send_command(self, **kwargs): instances = kwargs.get('InstanceIds', []) now = datetime.datetime.now() - expires_after = now + datetime.timedelta(0, int(kwargs['TimeoutSeconds'])) + expires_after = now + datetime.timedelta(0, int(kwargs.get('TimeoutSeconds', 3600))) return { 'Command': { 'CommandId': str(uuid.uuid4()), diff --git a/tests/test_ssm/test_ssm_boto3.py b/tests/test_ssm/test_ssm_boto3.py index 97801e0b9..0531d1780 100644 --- a/tests/test_ssm/test_ssm_boto3.py +++ b/tests/test_ssm/test_ssm_boto3.py @@ -481,7 +481,6 @@ def test_send_command(): response = client.send_command( InstanceIds=['i-123456'], DocumentName=ssm_document, - TimeoutSeconds=60, Parameters=params, OutputS3Region='us-east-2', OutputS3BucketName='the-bucket', From fad439447481b10ebea43133ad67952d255491e9 Mon Sep 17 00:00:00 2001 From: Iain Bullard Date: Tue, 24 Apr 2018 17:51:49 +0100 Subject: [PATCH 180/182] SQS add missing validation to ReceiveMessage (#1595) * SQS receive_message - enforce bounds on MaxNumberOfMessages as AWS does * SQS receive_message - enforce bounds on WaitTimeSeconds as AWS does --- moto/sqs/responses.py | 16 ++++++++++++++ tests/test_sqs/test_sqs.py | 44 ++++++++++++++++++++++++++------------ 2 files changed, 46 insertions(+), 14 deletions(-) diff --git a/moto/sqs/responses.py b/moto/sqs/responses.py index adf3e7a6e..c489d7118 100644 --- a/moto/sqs/responses.py +++ b/moto/sqs/responses.py @@ -325,11 +325,27 @@ class SQSResponse(BaseResponse): except TypeError: message_count = DEFAULT_RECEIVED_MESSAGES + if message_count < 1 or message_count > 10: + return self._error( + "InvalidParameterValue", + "An error occurred (InvalidParameterValue) when calling " + "the ReceiveMessage operation: Value %s for parameter " + "MaxNumberOfMessages is invalid. Reason: must be between " + "1 and 10, if provided." % message_count) + try: wait_time = int(self.querystring.get("WaitTimeSeconds")[0]) except TypeError: wait_time = queue.receive_message_wait_time_seconds + if wait_time < 0 or wait_time > 20: + return self._error( + "InvalidParameterValue", + "An error occurred (InvalidParameterValue) when calling " + "the ReceiveMessage operation: Value %s for parameter " + "WaitTimeSeconds is invalid. Reason: must be <= 0 and " + ">= 20 if provided." % wait_time) + try: visibility_timeout = self._get_validated_visibility_timeout() except TypeError: diff --git a/tests/test_sqs/test_sqs.py b/tests/test_sqs/test_sqs.py index c5ad39eb0..1280fed80 100644 --- a/tests/test_sqs/test_sqs.py +++ b/tests/test_sqs/test_sqs.py @@ -378,6 +378,36 @@ def test_send_receive_message_timestamps(): int.when.called_with(approximate_first_receive_timestamp).shouldnt.throw(ValueError) +@mock_sqs +def test_max_number_of_messages_invalid_param(): + sqs = boto3.resource('sqs', region_name='us-east-1') + queue = sqs.create_queue(QueueName='test-queue') + + with assert_raises(ClientError): + queue.receive_messages(MaxNumberOfMessages=11) + + with assert_raises(ClientError): + queue.receive_messages(MaxNumberOfMessages=0) + + # no error but also no messages returned + queue.receive_messages(MaxNumberOfMessages=1, WaitTimeSeconds=0) + + +@mock_sqs +def test_wait_time_seconds_invalid_param(): + sqs = boto3.resource('sqs', region_name='us-east-1') + queue = sqs.create_queue(QueueName='test-queue') + + with assert_raises(ClientError): + queue.receive_messages(WaitTimeSeconds=-1) + + with assert_raises(ClientError): + queue.receive_messages(WaitTimeSeconds=21) + + # no error but also no messages returned + queue.receive_messages(WaitTimeSeconds=0) + + @mock_sqs def test_receive_messages_with_wait_seconds_timeout_of_zero(): """ @@ -393,20 +423,6 @@ def test_receive_messages_with_wait_seconds_timeout_of_zero(): messages.should.equal([]) -@mock_sqs -def test_receive_messages_with_wait_seconds_timeout_of_negative_one(): - """ - test that zero messages is returned with a wait_seconds_timeout of negative 1 - :return: - """ - - sqs = boto3.resource('sqs', region_name='us-east-1') - queue = sqs.create_queue(QueueName="blah") - - messages = queue.receive_messages(WaitTimeSeconds=-1) - messages.should.equal([]) - - @mock_sqs_deprecated def test_send_message_with_xml_characters(): conn = boto.connect_sqs('the_key', 'the_secret') From ba2ea8e1b32348346f382a19ee6d0e99a3c63fa3 Mon Sep 17 00:00:00 2001 From: wblackconv Date: Fri, 27 Apr 2018 23:10:30 +0100 Subject: [PATCH 181/182] Add tests for message attribute validation in SNS (#1582) * Add tests for message attribute validation in SNS Fixes up bug in return value of moto.sns.responses.SNSResponse._parse_message_attributes due to accidental recycling of a variable. * Fix test_sns.test_publish_to_http in py36 env Http response is encoded as a byte string which json.loads can't handle. --- moto/sns/responses.py | 16 ++--- tests/test_sns/test_publishing_boto3.py | 87 ++++++++++++++++++++++++- 2 files changed, 93 insertions(+), 10 deletions(-) diff --git a/moto/sns/responses.py b/moto/sns/responses.py index 9c6f64f91..035d56584 100644 --- a/moto/sns/responses.py +++ b/moto/sns/responses.py @@ -55,11 +55,12 @@ class SNSResponse(BaseResponse): "attribute type, the set of supported type prefixes is " "Binary, Number, and String.".format(name)) + transform_value = None if 'StringValue' in value: - value = value['StringValue'] - elif 'BinaryValue' in 'Value': - value = value['BinaryValue'] - if not value: + transform_value = value['StringValue'] + elif 'BinaryValue' in value: + transform_value = value['BinaryValue'] + if not transform_value: raise InvalidParameterValue( "The message attribute '{0}' must contain non-empty " "message attribute value for message attribute " @@ -67,7 +68,7 @@ class SNSResponse(BaseResponse): # transformation transformed_message_attributes[name] = { - 'Type': data_type, 'Value': value + 'Type': data_type, 'Value': transform_value } return transformed_message_attributes @@ -283,10 +284,7 @@ class SNSResponse(BaseResponse): phone_number = self._get_param('PhoneNumber') subject = self._get_param('Subject') - try: - message_attributes = self._parse_message_attributes() - except InvalidParameterValue as e: - return self._error(e.description), dict(status=e.code) + message_attributes = self._parse_message_attributes() if phone_number is not None: # Check phone is correct syntax (e164) diff --git a/tests/test_sns/test_publishing_boto3.py b/tests/test_sns/test_publishing_boto3.py index 6ea29c986..7db072287 100644 --- a/tests/test_sns/test_publishing_boto3.py +++ b/tests/test_sns/test_publishing_boto3.py @@ -1,5 +1,6 @@ from __future__ import unicode_literals +import base64 import json import boto3 @@ -41,6 +42,83 @@ def test_publish_to_sqs(): acquired_message.should.equal(expected) +@mock_sqs +@mock_sns +def test_publish_to_sqs_bad(): + conn = boto3.client('sns', region_name='us-east-1') + conn.create_topic(Name="some-topic") + response = conn.list_topics() + topic_arn = response["Topics"][0]['TopicArn'] + + sqs_conn = boto3.resource('sqs', region_name='us-east-1') + sqs_conn.create_queue(QueueName="test-queue") + + conn.subscribe(TopicArn=topic_arn, + Protocol="sqs", + Endpoint="arn:aws:sqs:us-east-1:123456789012:test-queue") + message = 'my message' + try: + # Test missing Value + conn.publish( + TopicArn=topic_arn, Message=message, + MessageAttributes={'store': {'DataType': 'String'}}) + except ClientError as err: + err.response['Error']['Code'].should.equal('InvalidParameterValue') + try: + # Test empty DataType (if the DataType field is missing entirely + # botocore throws an exception during validation) + conn.publish( + TopicArn=topic_arn, Message=message, + MessageAttributes={'store': { + 'DataType': '', + 'StringValue': 'example_corp' + }}) + except ClientError as err: + err.response['Error']['Code'].should.equal('InvalidParameterValue') + try: + # Test empty Value + conn.publish( + TopicArn=topic_arn, Message=message, + MessageAttributes={'store': { + 'DataType': 'String', + 'StringValue': '' + }}) + except ClientError as err: + err.response['Error']['Code'].should.equal('InvalidParameterValue') + + +@mock_sqs +@mock_sns +def test_publish_to_sqs_msg_attr_byte_value(): + conn = boto3.client('sns', region_name='us-east-1') + conn.create_topic(Name="some-topic") + response = conn.list_topics() + topic_arn = response["Topics"][0]['TopicArn'] + + sqs_conn = boto3.resource('sqs', region_name='us-east-1') + queue = sqs_conn.create_queue(QueueName="test-queue") + + conn.subscribe(TopicArn=topic_arn, + Protocol="sqs", + Endpoint="arn:aws:sqs:us-east-1:123456789012:test-queue") + message = 'my message' + conn.publish( + TopicArn=topic_arn, Message=message, + MessageAttributes={'store': { + 'DataType': 'Binary', + 'BinaryValue': b'\x02\x03\x04' + }}) + messages = queue.receive_messages(MaxNumberOfMessages=5) + message_attributes = [ + json.loads(m.body)['MessageAttributes'] for m in messages] + message_attributes.should.equal([{ + 'store': { + 'Type': 'Binary', + 'Value': base64.b64encode(b'\x02\x03\x04').decode() + } + }]) + + @mock_sns def test_publish_sms(): client = boto3.client('sns', region_name='us-east-1') @@ -153,7 +231,9 @@ def test_publish_to_sqs_in_different_region(): def test_publish_to_http(): def callback(request): request.headers["Content-Type"].should.equal("application/json") - json.loads.when.called_with(request.body).should_not.throw(Exception) + json.loads.when.called_with( + request.body.decode() + ).should_not.throw(Exception) return 200, {}, "" responses.add_callback( @@ -263,6 +343,7 @@ def test_filtering_exact_string_multiple_message_attributes(): 'store': {'Type': 'String', 'Value': 'example_corp'}, 'event': {'Type': 'String', 'Value': 'order_cancelled'}}]) + @mock_sqs @mock_sns def test_filtering_exact_string_OR_matching(): @@ -287,6 +368,7 @@ def test_filtering_exact_string_OR_matching(): {'store': {'Type': 'String', 'Value': 'example_corp'}}, {'store': {'Type': 'String', 'Value': 'different_corp'}}]) + @mock_sqs @mock_sns def test_filtering_exact_string_AND_matching_positive(): @@ -311,6 +393,7 @@ def test_filtering_exact_string_AND_matching_positive(): 'store': {'Type': 'String', 'Value': 'example_corp'}, 'event': {'Type': 'String', 'Value': 'order_cancelled'}}]) + @mock_sqs @mock_sns def test_filtering_exact_string_AND_matching_no_match(): @@ -332,6 +415,7 @@ def test_filtering_exact_string_AND_matching_no_match(): json.loads(m.body)['MessageAttributes'] for m in messages] message_attributes.should.equal([]) + @mock_sqs @mock_sns def test_filtering_exact_string_no_match(): @@ -350,6 +434,7 @@ def test_filtering_exact_string_no_match(): json.loads(m.body)['MessageAttributes'] for m in messages] message_attributes.should.equal([]) + @mock_sqs @mock_sns def test_filtering_exact_string_no_attributes_no_match(): From cb364eedc6e4edcfa3c65f152c7d13f1e5fe2ea4 Mon Sep 17 00:00:00 2001 From: Alex Casalboni Date: Mon, 30 Apr 2018 20:02:47 +0200 Subject: [PATCH 182/182] Implement SSM Parameter Store filters support (GetParametersByPath API) (#1604) * added tests for SSM Parameter Store filters (GetParametersByPath - ParameterStringFilter) * implemented SSM Parameter Store filters support (only for get_parameters_by_path API) * adding myself to authors file --- AUTHORS.md | 1 + moto/ssm/models.py | 27 +++++++++- moto/ssm/responses.py | 3 +- tests/test_ssm/test_ssm_boto3.py | 88 ++++++++++++++++++++++++++++++++ 4 files changed, 117 insertions(+), 2 deletions(-) diff --git a/AUTHORS.md b/AUTHORS.md index ded1935e9..6b7c96291 100644 --- a/AUTHORS.md +++ b/AUTHORS.md @@ -52,3 +52,4 @@ Moto is written by Steve Pulec with contributions from: * [Clive Li](https://github.com/cliveli) * [Jim Shields](https://github.com/jimjshields) * [William Richard](https://github.com/william-richard) +* [Alex Casalboni](https://github.com/alexcasalboni) diff --git a/moto/ssm/models.py b/moto/ssm/models.py index fc74e1524..aaeccc887 100644 --- a/moto/ssm/models.py +++ b/moto/ssm/models.py @@ -93,7 +93,7 @@ class SimpleSystemManagerBackend(BaseBackend): result.append(self._parameters[name]) return result - def get_parameters_by_path(self, path, with_decryption, recursive): + def get_parameters_by_path(self, path, with_decryption, recursive, filters=None): """Implement the get-parameters-by-path-API in the backend.""" result = [] # path could be with or without a trailing /. we handle this @@ -104,10 +104,35 @@ class SimpleSystemManagerBackend(BaseBackend): continue if '/' in param[len(path) + 1:] and not recursive: continue + if not self._match_filters(self._parameters[param], filters): + continue result.append(self._parameters[param]) return result + @staticmethod + def _match_filters(parameter, filters=None): + """Return True if the given parameter matches all the filters""" + for filter_obj in (filters or []): + key = filter_obj['Key'] + option = filter_obj.get('Option', 'Equals') + values = filter_obj.get('Values', []) + + what = None + if key == 'Type': + what = parameter.type + elif key == 'KeyId': + what = parameter.keyid + + if option == 'Equals'\ + and not any(what == value for value in values): + return False + elif option == 'BeginsWith'\ + and not any(what.startswith(value) for value in values): + return False + # True if no false match (or no filters at all) + return True + def get_parameter(self, name, with_decryption): if name in self._parameters: return self._parameters[name] diff --git a/moto/ssm/responses.py b/moto/ssm/responses.py index d9906a82e..e35eca5ee 100644 --- a/moto/ssm/responses.py +++ b/moto/ssm/responses.py @@ -85,9 +85,10 @@ class SimpleSystemManagerResponse(BaseResponse): path = self._get_param('Path') with_decryption = self._get_param('WithDecryption') recursive = self._get_param('Recursive', False) + filters = self._get_param('ParameterFilters') result = self.ssm_backend.get_parameters_by_path( - path, with_decryption, recursive + path, with_decryption, recursive, filters ) response = { diff --git a/tests/test_ssm/test_ssm_boto3.py b/tests/test_ssm/test_ssm_boto3.py index 0531d1780..ad48fd7ed 100644 --- a/tests/test_ssm/test_ssm_boto3.py +++ b/tests/test_ssm/test_ssm_boto3.py @@ -76,6 +76,25 @@ def test_get_parameters_by_path(): Value='value4', Type='String') + client.put_parameter( + Name='/baz/name1', + Description='A test parameter (list)', + Value='value1,value2,value3', + Type='StringList') + + client.put_parameter( + Name='/baz/name2', + Description='A test parameter', + Value='value1', + Type='String') + + client.put_parameter( + Name='/baz/pwd', + Description='A secure test parameter', + Value='my_secret', + Type='SecureString', + KeyId='alias/aws/ssm') + response = client.get_parameters_by_path(Path='/foo') len(response['Parameters']).should.equal(2) {p['Value'] for p in response['Parameters']}.should.equal( @@ -92,6 +111,75 @@ def test_get_parameters_by_path(): set(['value3', 'value4']) ) + response = client.get_parameters_by_path(Path='/baz') + len(response['Parameters']).should.equal(3) + + filters = [{ + 'Key': 'Type', + 'Option': 'Equals', + 'Values': ['StringList'], + }] + response = client.get_parameters_by_path(Path='/baz', ParameterFilters=filters) + len(response['Parameters']).should.equal(1) + {p['Name'] for p in response['Parameters']}.should.equal( + set(['/baz/name1']) + ) + + # note: 'Option' is optional (default: 'Equals') + filters = [{ + 'Key': 'Type', + 'Values': ['StringList'], + }] + response = client.get_parameters_by_path(Path='/baz', ParameterFilters=filters) + len(response['Parameters']).should.equal(1) + {p['Name'] for p in response['Parameters']}.should.equal( + set(['/baz/name1']) + ) + + filters = [{ + 'Key': 'Type', + 'Option': 'Equals', + 'Values': ['String'], + }] + response = client.get_parameters_by_path(Path='/baz', ParameterFilters=filters) + len(response['Parameters']).should.equal(1) + {p['Name'] for p in response['Parameters']}.should.equal( + set(['/baz/name2']) + ) + + filters = [{ + 'Key': 'Type', + 'Option': 'Equals', + 'Values': ['String', 'SecureString'], + }] + response = client.get_parameters_by_path(Path='/baz', ParameterFilters=filters) + len(response['Parameters']).should.equal(2) + {p['Name'] for p in response['Parameters']}.should.equal( + set(['/baz/name2', '/baz/pwd']) + ) + + filters = [{ + 'Key': 'Type', + 'Option': 'BeginsWith', + 'Values': ['String'], + }] + response = client.get_parameters_by_path(Path='/baz', ParameterFilters=filters) + len(response['Parameters']).should.equal(2) + {p['Name'] for p in response['Parameters']}.should.equal( + set(['/baz/name1', '/baz/name2']) + ) + + filters = [{ + 'Key': 'KeyId', + 'Option': 'Equals', + 'Values': ['alias/aws/ssm'], + }] + response = client.get_parameters_by_path(Path='/baz', ParameterFilters=filters) + len(response['Parameters']).should.equal(1) + {p['Name'] for p in response['Parameters']}.should.equal( + set(['/baz/pwd']) + ) + @mock_ssm def test_put_parameter():