From 6e89a22008f88d6ea86d7c435a65156ef13472a5 Mon Sep 17 00:00:00 2001 From: Robert Lewis Date: Fri, 18 Jan 2019 07:52:21 -0800 Subject: [PATCH 01/41] Remove request_id from templates. --- moto/autoscaling/responses.py | 20 ++++++++++---------- moto/sqs/responses.py | 26 +++++++++++++------------- 2 files changed, 23 insertions(+), 23 deletions(-) diff --git a/moto/autoscaling/responses.py b/moto/autoscaling/responses.py index 6a7913021..3b2752d46 100644 --- a/moto/autoscaling/responses.py +++ b/moto/autoscaling/responses.py @@ -404,7 +404,7 @@ ATTACH_LOAD_BALANCER_TARGET_GROUPS_TEMPLATE = """ -{{ requestid }} + """ @@ -412,7 +412,7 @@ ATTACH_INSTANCES_TEMPLATE = """ -{{ requestid }} + """ @@ -454,7 +454,7 @@ DETACH_INSTANCES_TEMPLATE = """ -{{ requestid }} + """ @@ -654,7 +654,7 @@ DELETE_POLICY_TEMPLATE = """ -{{ requestid }} + """ @@ -670,14 +670,14 @@ DESCRIBE_LOAD_BALANCERS_TEMPLATE = """ -{{ requestid }} + """ @@ -690,13 +690,13 @@ SUSPEND_PROCESSES_TEMPLATE = """ -{{ requestid }} + """ SET_INSTANCE_PROTECTION_TEMPLATE = """ -{{ requestid }} + """ diff --git a/moto/sqs/responses.py b/moto/sqs/responses.py index b4f64b14e..5ddaf8849 100644 --- a/moto/sqs/responses.py +++ b/moto/sqs/responses.py @@ -420,7 +420,7 @@ CREATE_QUEUE_RESPONSE = """ {{ queue.visibility_timeout }} - {{ requestid }} + """ @@ -429,7 +429,7 @@ GET_QUEUE_URL_RESPONSE = """ {{ queue.url(request_url) }} - {{ requestid }} + """ @@ -440,13 +440,13 @@ LIST_QUEUES_RESPONSE = """ {% endfor %} - {{ requestid }} + """ DELETE_QUEUE_RESPONSE = """ - {{ requestid }} + """ @@ -460,13 +460,13 @@ GET_QUEUE_ATTRIBUTES_RESPONSE = """ {% endfor %} - {{ requestid }} + """ SET_QUEUE_ATTRIBUTE_RESPONSE = """ - {{ requestid }} + """ @@ -483,7 +483,7 @@ SEND_MESSAGE_RESPONSE = """ - {{ requestid }} + """ @@ -543,7 +543,7 @@ RECEIVE_MESSAGE_RESPONSE = """ {% endfor %} - {{ requestid }} + """ @@ -561,13 +561,13 @@ SEND_MESSAGE_BATCH_RESPONSE = """ {% endfor %} - {{ requestid }} + """ DELETE_MESSAGE_RESPONSE = """ - {{ requestid }} + """ @@ -580,13 +580,13 @@ DELETE_MESSAGE_BATCH_RESPONSE = """ {% endfor %} - {{ requestid }} + """ CHANGE_MESSAGE_VISIBILITY_RESPONSE = """ - {{ requestid }} + """ @@ -613,7 +613,7 @@ CHANGE_MESSAGE_VISIBILITY_BATCH_RESPONSE = """ - {{ requestid }} + """ From 19a0179608393d1093805589c441d59cef549fac Mon Sep 17 00:00:00 2001 From: Robert Lewis Date: Fri, 18 Jan 2019 07:52:47 -0800 Subject: [PATCH 02/41] Use regex to populate requestId XML tag --- moto/core/utils.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/moto/core/utils.py b/moto/core/utils.py index 777a03752..ca670e871 100644 --- a/moto/core/utils.py +++ b/moto/core/utils.py @@ -280,7 +280,7 @@ def amzn_request_id(f): # Update request ID in XML try: - body = body.replace('{{ requestid }}', request_id) + body = re.sub(r'(?<=).*(?=<\/RequestId>)', request_id, body) except Exception: # Will just ignore if it cant work on bytes (which are str's on python2) pass From acdb1c9768c1923fecad9d3afb93b092b5f10eb3 Mon Sep 17 00:00:00 2001 From: Robert Lewis Date: Fri, 18 Jan 2019 08:32:45 -0800 Subject: [PATCH 03/41] Add requestid checking for sqs --- tests/test_sqs/test_sqs.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/tests/test_sqs/test_sqs.py b/tests/test_sqs/test_sqs.py index 9beb9a3fa..d53ae50f7 100644 --- a/tests/test_sqs/test_sqs.py +++ b/tests/test_sqs/test_sqs.py @@ -416,7 +416,9 @@ def test_send_receive_message_timestamps(): conn.create_queue(QueueName="test-queue") queue = sqs.Queue("test-queue") - queue.send_message(MessageBody="derp") + response = queue.send_message(MessageBody="derp") + assert response['ResponseMetadata']['RequestId'] + messages = conn.receive_message( QueueUrl=queue.url, MaxNumberOfMessages=1)['Messages'] From 570b73691beb1c562ef7db4dd4731a9b848bff2c Mon Sep 17 00:00:00 2001 From: Robert Lewis Date: Fri, 18 Jan 2019 16:09:42 -0800 Subject: [PATCH 04/41] Add requestid checking for autoscaling --- tests/test_autoscaling/test_autoscaling.py | 1 + 1 file changed, 1 insertion(+) diff --git a/tests/test_autoscaling/test_autoscaling.py b/tests/test_autoscaling/test_autoscaling.py index b1a65fb7e..d01f2dcb3 100644 --- a/tests/test_autoscaling/test_autoscaling.py +++ b/tests/test_autoscaling/test_autoscaling.py @@ -543,6 +543,7 @@ def test_describe_load_balancers(): ) response = client.describe_load_balancers(AutoScalingGroupName='test_asg') + assert response['ResponseMetadata']['RequestId'] list(response['LoadBalancers']).should.have.length_of(1) response['LoadBalancers'][0]['LoadBalancerName'].should.equal('my-lb') From 4d9039bf907cc2ecd7992aa3a746a14778b5ce2d Mon Sep 17 00:00:00 2001 From: Robert Lewis Date: Mon, 21 Jan 2019 13:08:59 -0800 Subject: [PATCH 05/41] Add name to authors file --- AUTHORS.md | 1 + 1 file changed, 1 insertion(+) diff --git a/AUTHORS.md b/AUTHORS.md index 0a152505a..5eb313dda 100644 --- a/AUTHORS.md +++ b/AUTHORS.md @@ -54,3 +54,4 @@ Moto is written by Steve Pulec with contributions from: * [William Richard](https://github.com/william-richard) * [Alex Casalboni](https://github.com/alexcasalboni) * [Jon Beilke](https://github.com/jrbeilke) +* [Robert Lewis](https://github.com/ralewis85) From b822db8d8c9edeffcbc715ac77c568f489822b5a Mon Sep 17 00:00:00 2001 From: shiba24 Date: Tue, 30 Apr 2019 19:35:21 +0900 Subject: [PATCH 06/41] Support create_table with PAY_PER_REQUEST billing mode of DynamoDB --- moto/dynamodb2/responses.py | 12 ++++++++++-- tests/test_dynamodb2/test_dynamodb.py | 27 +++++++++++++++++++++++++++ 2 files changed, 37 insertions(+), 2 deletions(-) diff --git a/moto/dynamodb2/responses.py b/moto/dynamodb2/responses.py index 49095f09c..e382efc1d 100644 --- a/moto/dynamodb2/responses.py +++ b/moto/dynamodb2/responses.py @@ -156,8 +156,16 @@ class DynamoHandler(BaseResponse): body = self.body # get the table name table_name = body['TableName'] - # get the throughput - throughput = body["ProvisionedThroughput"] + # check billing mode and get the throughput + if "BillingMode" in body.keys() and body["BillingMode"] == "PAY_PER_REQUEST": + if "ProvisionedThroughput" in body.keys(): + er = 'com.amazonaws.dynamodb.v20111205#ValidationException' + return self.error(er, + 'ProvisionedThroughput cannot be specified \ + when BillingMode is PAY_PER_REQUEST') + throughput = None + else: # Provisioned (default billing mode) + throughput = body["ProvisionedThroughput"] # getting the schema key_schema = body['KeySchema'] # getting attribute definition diff --git a/tests/test_dynamodb2/test_dynamodb.py b/tests/test_dynamodb2/test_dynamodb.py index 32fd61d16..208453f0a 100644 --- a/tests/test_dynamodb2/test_dynamodb.py +++ b/tests/test_dynamodb2/test_dynamodb.py @@ -949,6 +949,33 @@ def test_bad_scan_filter(): raise RuntimeError('Should of raised ResourceInUseException') +@mock_dynamodb2 +def test_create_table_pay_per_request(): + client = boto3.client('dynamodb', region_name='us-east-1') + client.create_table( + TableName='test1', + AttributeDefinitions=[{'AttributeName': 'client', 'AttributeType': 'S'}, {'AttributeName': 'app', 'AttributeType': 'S'}], + KeySchema=[{'AttributeName': 'client', 'KeyType': 'HASH'}, {'AttributeName': 'app', 'KeyType': 'RANGE'}], + BillingMode="PAY_PER_REQUEST" + ) + + +@mock_dynamodb2 +def test_create_table_error_pay_per_request_with_provisioned_param(): + client = boto3.client('dynamodb', region_name='us-east-1') + + try: + client.create_table( + TableName='test1', + AttributeDefinitions=[{'AttributeName': 'client', 'AttributeType': 'S'}, {'AttributeName': 'app', 'AttributeType': 'S'}], + KeySchema=[{'AttributeName': 'client', 'KeyType': 'HASH'}, {'AttributeName': 'app', 'KeyType': 'RANGE'}], + ProvisionedThroughput={'ReadCapacityUnits': 123, 'WriteCapacityUnits': 123}, + BillingMode="PAY_PER_REQUEST" + ) + except ClientError as err: + err.response['Error']['Code'].should.equal('ValidationException') + + @mock_dynamodb2 def test_duplicate_create(): client = boto3.client('dynamodb', region_name='us-east-1') From 8cb4db1896521af99ff295b028f604c6ab599401 Mon Sep 17 00:00:00 2001 From: Kyle Decot Date: Thu, 2 May 2019 14:00:28 -0400 Subject: [PATCH 07/41] Adds Support for filtering on schedulingStrategy in ECS#list_services (#2180) --- moto/ecs/models.py | 7 +++++-- moto/ecs/responses.py | 3 ++- tests/test_ecs/test_ecs_boto3.py | 17 +++++++++++++---- 3 files changed, 20 insertions(+), 7 deletions(-) diff --git a/moto/ecs/models.py b/moto/ecs/models.py index efa8a6cd0..a314c7776 100644 --- a/moto/ecs/models.py +++ b/moto/ecs/models.py @@ -699,12 +699,15 @@ class EC2ContainerServiceBackend(BaseBackend): return service - def list_services(self, cluster_str): + def list_services(self, cluster_str, scheduling_strategy=None): cluster_name = cluster_str.split('/')[-1] service_arns = [] for key, value in self.services.items(): if cluster_name + ':' in key: - service_arns.append(self.services[key].arn) + service = self.services[key] + if scheduling_strategy is None or service.scheduling_strategy == scheduling_strategy: + service_arns.append(service.arn) + return sorted(service_arns) def describe_services(self, cluster_str, service_names_or_arns): diff --git a/moto/ecs/responses.py b/moto/ecs/responses.py index 964ef59d2..92b769fad 100644 --- a/moto/ecs/responses.py +++ b/moto/ecs/responses.py @@ -163,7 +163,8 @@ class EC2ContainerServiceResponse(BaseResponse): def list_services(self): cluster_str = self._get_param('cluster') - service_arns = self.ecs_backend.list_services(cluster_str) + scheduling_strategy = self._get_param('schedulingStrategy') + service_arns = self.ecs_backend.list_services(cluster_str, scheduling_strategy) return json.dumps({ 'serviceArns': service_arns # , diff --git a/tests/test_ecs/test_ecs_boto3.py b/tests/test_ecs/test_ecs_boto3.py index 3bf25b8fc..b147c4159 100644 --- a/tests/test_ecs/test_ecs_boto3.py +++ b/tests/test_ecs/test_ecs_boto3.py @@ -388,23 +388,32 @@ def test_list_services(): cluster='test_ecs_cluster', serviceName='test_ecs_service1', taskDefinition='test_ecs_task', + schedulingStrategy='REPLICA', desiredCount=2 ) _ = client.create_service( cluster='test_ecs_cluster', serviceName='test_ecs_service2', taskDefinition='test_ecs_task', + schedulingStrategy='DAEMON', desiredCount=2 ) - response = client.list_services( + unfiltered_response = client.list_services( cluster='test_ecs_cluster' ) - len(response['serviceArns']).should.equal(2) - response['serviceArns'][0].should.equal( + len(unfiltered_response['serviceArns']).should.equal(2) + unfiltered_response['serviceArns'][0].should.equal( 'arn:aws:ecs:us-east-1:012345678910:service/test_ecs_service1') - response['serviceArns'][1].should.equal( + unfiltered_response['serviceArns'][1].should.equal( 'arn:aws:ecs:us-east-1:012345678910:service/test_ecs_service2') + filtered_response = client.list_services( + cluster='test_ecs_cluster', + schedulingStrategy='REPLICA' + ) + len(filtered_response['serviceArns']).should.equal(1) + filtered_response['serviceArns'][0].should.equal( + 'arn:aws:ecs:us-east-1:012345678910:service/test_ecs_service1') @mock_ecs def test_describe_services(): From 1fd71fd45a0fe5ef003adbb4f639c726a91eb94e Mon Sep 17 00:00:00 2001 From: redspart Date: Mon, 20 May 2019 18:56:23 -0400 Subject: [PATCH 08/41] Updated delete_cluster() for redshift (#2186) * Updated the deprecated decorator to allow the "SkipFinalClusterSnapshot" option that aws supports. * FIxed logical mistake on the delete_cluster * Removed an unused exception I put in --- moto/redshift/models.py | 27 +++++++++++++++++++++++++-- moto/redshift/responses.py | 9 +++++++-- tests/test_redshift/test_redshift.py | 25 ++++++++++++++++++------- 3 files changed, 50 insertions(+), 11 deletions(-) diff --git a/moto/redshift/models.py b/moto/redshift/models.py index 70cbb95cb..6a55f479a 100644 --- a/moto/redshift/models.py +++ b/moto/redshift/models.py @@ -531,14 +531,37 @@ class RedshiftBackend(BaseBackend): setattr(cluster, key, value) if new_cluster_identifier: - self.delete_cluster(cluster_identifier) + dic = { + "cluster_identifier": cluster_identifier, + "skip_final_snapshot": True, + "final_cluster_snapshot_identifier": None + } + self.delete_cluster(**dic) cluster.cluster_identifier = new_cluster_identifier self.clusters[new_cluster_identifier] = cluster return cluster - def delete_cluster(self, cluster_identifier): + def delete_cluster(self, **cluster_kwargs): + cluster_identifier = cluster_kwargs.pop("cluster_identifier") + cluster_skip_final_snapshot = cluster_kwargs.pop("skip_final_snapshot") + cluster_snapshot_identifer = cluster_kwargs.pop("final_cluster_snapshot_identifier") + if cluster_identifier in self.clusters: + if cluster_skip_final_snapshot is False and cluster_snapshot_identifer is None: + raise ClientError( + "InvalidParameterValue", + 'FinalSnapshotIdentifier is required for Snapshot copy ' + 'when SkipFinalSnapshot is False' + ) + elif cluster_skip_final_snapshot is False and cluster_snapshot_identifer is not None: # create snapshot + cluster = self.describe_clusters(cluster_identifier)[0] + self.create_cluster_snapshot( + cluster_identifier, + cluster_snapshot_identifer, + cluster.region, + cluster.tags) + return self.clusters.pop(cluster_identifier) raise ClusterNotFoundError(cluster_identifier) diff --git a/moto/redshift/responses.py b/moto/redshift/responses.py index 69fbac7c1..a7758febb 100644 --- a/moto/redshift/responses.py +++ b/moto/redshift/responses.py @@ -240,8 +240,13 @@ class RedshiftResponse(BaseResponse): }) def delete_cluster(self): - cluster_identifier = self._get_param("ClusterIdentifier") - cluster = self.redshift_backend.delete_cluster(cluster_identifier) + request_kwargs = { + "cluster_identifier": self._get_param("ClusterIdentifier"), + "final_cluster_snapshot_identifier": self._get_param("FinalClusterSnapshotIdentifier"), + "skip_final_snapshot": self._get_bool_param("SkipFinalClusterSnapshot") + } + + cluster = self.redshift_backend.delete_cluster(**request_kwargs) return self.get_response({ "DeleteClusterResponse": { diff --git a/tests/test_redshift/test_redshift.py b/tests/test_redshift/test_redshift.py index 9208c92dd..e37440c60 100644 --- a/tests/test_redshift/test_redshift.py +++ b/tests/test_redshift/test_redshift.py @@ -9,7 +9,7 @@ from boto.redshift.exceptions import ( ClusterParameterGroupNotFound, ClusterSecurityGroupNotFound, ClusterSubnetGroupNotFound, - InvalidSubnet, + InvalidSubnet ) from botocore.exceptions import ( ClientError @@ -339,7 +339,7 @@ def test_create_cluster_with_vpc_security_groups_boto3(): @mock_redshift def test_create_cluster_with_iam_roles(): - iam_roles_arn = ['arn:aws:iam:::role/my-iam-role',] + iam_roles_arn = ['arn:aws:iam:::role/my-iam-role', ] client = boto3.client('redshift', region_name='us-east-1') cluster_id = 'my_cluster' client.create_cluster( @@ -385,29 +385,41 @@ def test_describe_non_existent_cluster(): conn.describe_clusters.when.called_with( "not-a-cluster").should.throw(ClusterNotFound) - @mock_redshift_deprecated def test_delete_cluster(): conn = boto.connect_redshift() - cluster_identifier = 'my_cluster' + cluster_identifier = "my_cluster" + snapshot_identifier = "my_snapshot" conn.create_cluster( cluster_identifier, - node_type='single-node', + node_type="single-node", master_username="username", master_user_password="password", ) + conn.delete_cluster.when.called_with(cluster_identifier, False).should.throw(AttributeError) + clusters = conn.describe_clusters()['DescribeClustersResponse'][ 'DescribeClustersResult']['Clusters'] list(clusters).should.have.length_of(1) - conn.delete_cluster(cluster_identifier) + conn.delete_cluster( + cluster_identifier=cluster_identifier, + skip_final_cluster_snapshot=False, + final_cluster_snapshot_identifier=snapshot_identifier + ) clusters = conn.describe_clusters()['DescribeClustersResponse'][ 'DescribeClustersResult']['Clusters'] list(clusters).should.have.length_of(0) + snapshots = conn.describe_cluster_snapshots()["DescribeClusterSnapshotsResponse"][ + "DescribeClusterSnapshotsResult"]["Snapshots"] + list(snapshots).should.have.length_of(1) + + assert snapshot_identifier in snapshots[0]["SnapshotIdentifier"] + # Delete invalid id conn.delete_cluster.when.called_with( "not-a-cluster").should.throw(ClusterNotFound) @@ -643,7 +655,6 @@ def test_delete_cluster_parameter_group(): "not-a-parameter-group").should.throw(ClusterParameterGroupNotFound) - @mock_redshift def test_create_cluster_snapshot_of_non_existent_cluster(): client = boto3.client('redshift', region_name='us-east-1') From 1de9acb7ad37d605af3206c9e7cc4aa759c9b7d2 Mon Sep 17 00:00:00 2001 From: Jordan Date: Mon, 20 May 2019 16:58:10 -0600 Subject: [PATCH 09/41] Add cognito-idp admin_update_user_attributes #2184 (#2185) --- IMPLEMENTATION_COVERAGE.md | 2 +- moto/cognitoidp/models.py | 23 +++++++++++ moto/cognitoidp/responses.py | 7 ++++ tests/test_cognitoidp/test_cognitoidp.py | 50 ++++++++++++++++++++++++ 4 files changed, 81 insertions(+), 1 deletion(-) diff --git a/IMPLEMENTATION_COVERAGE.md b/IMPLEMENTATION_COVERAGE.md index 09f0cd039..c38d9c3a4 100644 --- a/IMPLEMENTATION_COVERAGE.md +++ b/IMPLEMENTATION_COVERAGE.md @@ -852,7 +852,7 @@ - [ ] admin_set_user_settings - [ ] admin_update_auth_event_feedback - [ ] admin_update_device_status -- [ ] admin_update_user_attributes +- [X] admin_update_user_attributes - [ ] admin_user_global_sign_out - [ ] associate_software_token - [X] change_password diff --git a/moto/cognitoidp/models.py b/moto/cognitoidp/models.py index bdd279ba6..ef1377789 100644 --- a/moto/cognitoidp/models.py +++ b/moto/cognitoidp/models.py @@ -287,6 +287,18 @@ class CognitoIdpUser(BaseModel): return user_json + def update_attributes(self, new_attributes): + + def flatten_attrs(attrs): + return {attr['Name']: attr['Value'] for attr in attrs} + + def expand_attrs(attrs): + return [{'Name': k, 'Value': v} for k, v in attrs.items()] + + flat_attributes = flatten_attrs(self.attributes) + flat_attributes.update(flatten_attrs(new_attributes)) + self.attributes = expand_attrs(flat_attributes) + class CognitoIdpBackend(BaseBackend): @@ -673,6 +685,17 @@ class CognitoIdpBackend(BaseBackend): else: raise NotAuthorizedError(access_token) + def admin_update_user_attributes(self, user_pool_id, username, attributes): + user_pool = self.user_pools.get(user_pool_id) + if not user_pool: + raise ResourceNotFoundError(user_pool_id) + + if username not in user_pool.users: + raise UserNotFoundError(username) + + user = user_pool.users[username] + user.update_attributes(attributes) + cognitoidp_backends = {} for region in boto.cognito.identity.regions(): diff --git a/moto/cognitoidp/responses.py b/moto/cognitoidp/responses.py index 264910739..e9e83695a 100644 --- a/moto/cognitoidp/responses.py +++ b/moto/cognitoidp/responses.py @@ -352,6 +352,13 @@ class CognitoIdpResponse(BaseResponse): cognitoidp_backends[region].change_password(access_token, previous_password, proposed_password) return "" + def admin_update_user_attributes(self): + user_pool_id = self._get_param("UserPoolId") + username = self._get_param("Username") + attributes = self._get_param("UserAttributes") + cognitoidp_backends[self.region].admin_update_user_attributes(user_pool_id, username, attributes) + return "" + class CognitoIdpJsonWebKeyResponse(BaseResponse): diff --git a/tests/test_cognitoidp/test_cognitoidp.py b/tests/test_cognitoidp/test_cognitoidp.py index e4e38e821..1483fcd0e 100644 --- a/tests/test_cognitoidp/test_cognitoidp.py +++ b/tests/test_cognitoidp/test_cognitoidp.py @@ -1162,3 +1162,53 @@ def test_confirm_forgot_password(): ConfirmationCode=str(uuid.uuid4()), Password=str(uuid.uuid4()), ) + +@mock_cognitoidp +def test_admin_update_user_attributes(): + conn = boto3.client("cognito-idp", "us-west-2") + + username = str(uuid.uuid4()) + user_pool_id = conn.create_user_pool(PoolName=str(uuid.uuid4()))["UserPool"]["Id"] + + conn.admin_create_user( + UserPoolId=user_pool_id, + Username=username, + UserAttributes=[ + { + 'Name': 'family_name', + 'Value': 'Doe', + }, + { + 'Name': 'given_name', + 'Value': 'John', + } + ] + ) + + conn.admin_update_user_attributes( + UserPoolId=user_pool_id, + Username=username, + UserAttributes=[ + { + 'Name': 'family_name', + 'Value': 'Doe', + }, + { + 'Name': 'given_name', + 'Value': 'Jane', + } + ] + ) + + user = conn.admin_get_user( + UserPoolId=user_pool_id, + Username=username + ) + attributes = user['UserAttributes'] + attributes.should.be.a(list) + for attr in attributes: + val = attr['Value'] + if attr['Name'] == 'family_name': + val.should.equal('Doe') + elif attr['Name'] == 'given_name': + val.should.equal('Jane') From e504226386c143988309d3730830375de2577e2f Mon Sep 17 00:00:00 2001 From: Eliot Alter Date: Mon, 20 May 2019 16:01:06 -0700 Subject: [PATCH 10/41] Fix a warning which was missing a space after the preiod. (#2022) --- moto/ec2/models.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/moto/ec2/models.py b/moto/ec2/models.py index 0936d2be9..fa07841b2 100644 --- a/moto/ec2/models.py +++ b/moto/ec2/models.py @@ -404,7 +404,7 @@ class Instance(TaggedEC2Resource, BotoInstance): warnings.warn('Could not find AMI with image-id:{0}, ' 'in the near future this will ' 'cause an error.\n' - 'Use ec2_backend.describe_images() to' + 'Use ec2_backend.describe_images() to ' 'find suitable image for your test'.format(image_id), PendingDeprecationWarning) From f13e4e41cd052a60df208ec12d893c8725a0b3f0 Mon Sep 17 00:00:00 2001 From: Alexey Firsov Date: Tue, 21 May 2019 02:02:36 +0300 Subject: [PATCH 11/41] add implemented kinesis method describe_stream_summary (#2023) --- IMPLEMENTATION_COVERAGE.md | 4 +-- moto/kinesis/models.py | 18 +++++++++++- moto/kinesis/responses.py | 5 ++++ tests/test_kinesis/test_kinesis.py | 47 ++++++++++++++++++++++-------- 4 files changed, 59 insertions(+), 15 deletions(-) diff --git a/IMPLEMENTATION_COVERAGE.md b/IMPLEMENTATION_COVERAGE.md index c38d9c3a4..fe34258ee 100644 --- a/IMPLEMENTATION_COVERAGE.md +++ b/IMPLEMENTATION_COVERAGE.md @@ -2519,14 +2519,14 @@ - [ ] start_next_pending_job_execution - [ ] update_job_execution -## kinesis - 56% implemented +## kinesis - 61% implemented - [X] add_tags_to_stream - [X] create_stream - [ ] decrease_stream_retention_period - [X] delete_stream - [ ] describe_limits - [X] describe_stream -- [ ] describe_stream_summary +- [X] describe_stream_summary - [ ] disable_enhanced_monitoring - [ ] enable_enhanced_monitoring - [X] get_records diff --git a/moto/kinesis/models.py b/moto/kinesis/models.py index d9a47ea87..886a3a61f 100644 --- a/moto/kinesis/models.py +++ b/moto/kinesis/models.py @@ -116,10 +116,12 @@ class Stream(BaseModel): def __init__(self, stream_name, shard_count, region): self.stream_name = stream_name self.shard_count = shard_count + self.creation_datetime = datetime.datetime.now() self.region = region self.account_number = "123456789012" self.shards = {} self.tags = {} + self.status = "ACTIVE" if six.PY3: izip_longest = itertools.zip_longest @@ -183,12 +185,23 @@ class Stream(BaseModel): "StreamDescription": { "StreamARN": self.arn, "StreamName": self.stream_name, - "StreamStatus": "ACTIVE", + "StreamStatus": self.status, "HasMoreShards": False, "Shards": [shard.to_json() for shard in self.shards.values()], } } + def to_json_summary(self): + return { + "StreamDescriptionSummary": { + "StreamARN": self.arn, + "StreamName": self.stream_name, + "StreamStatus": self.status, + "StreamCreationTimestamp": six.text_type(self.creation_datetime), + "OpenShardCount": self.shard_count, + } + } + @classmethod def create_from_cloudformation_json(cls, resource_name, cloudformation_json, region_name): properties = cloudformation_json['Properties'] @@ -309,6 +322,9 @@ class KinesisBackend(BaseBackend): else: raise StreamNotFoundError(stream_name) + def describe_stream_summary(self, stream_name): + return self.describe_stream(stream_name) + def list_streams(self): return self.streams.values() diff --git a/moto/kinesis/responses.py b/moto/kinesis/responses.py index 72b2af4ce..3a81bd9f4 100644 --- a/moto/kinesis/responses.py +++ b/moto/kinesis/responses.py @@ -33,6 +33,11 @@ class KinesisResponse(BaseResponse): stream = self.kinesis_backend.describe_stream(stream_name) return json.dumps(stream.to_json()) + def describe_stream_summary(self): + stream_name = self.parameters.get('StreamName') + stream = self.kinesis_backend.describe_stream_summary(stream_name) + return json.dumps(stream.to_json_summary()) + def list_streams(self): streams = self.kinesis_backend.list_streams() stream_names = [stream.stream_name for stream in streams] diff --git a/tests/test_kinesis/test_kinesis.py b/tests/test_kinesis/test_kinesis.py index c70236978..6986f79fc 100644 --- a/tests/test_kinesis/test_kinesis.py +++ b/tests/test_kinesis/test_kinesis.py @@ -1,12 +1,13 @@ from __future__ import unicode_literals -import boto.kinesis -from boto.kinesis.exceptions import ResourceNotFoundException, InvalidArgumentException -import boto3 -import sure # noqa import datetime import time +import boto.kinesis +import boto3 +from boto.kinesis.exceptions import ResourceNotFoundException, \ + InvalidArgumentException + from moto import mock_kinesis, mock_kinesis_deprecated @@ -73,6 +74,23 @@ def test_list_many_streams(): has_more_streams.should.equal(False) +@mock_kinesis +def test_describe_stream_summary(): + conn = boto3.client('kinesis', region_name="us-west-2") + stream_name = 'my_stream_summary' + shard_count = 5 + conn.create_stream(StreamName=stream_name, ShardCount=shard_count) + + resp = conn.describe_stream_summary(StreamName=stream_name) + stream = resp["StreamDescriptionSummary"] + + stream["StreamName"].should.equal(stream_name) + stream["OpenShardCount"].should.equal(shard_count) + stream["StreamARN"].should.equal( + "arn:aws:kinesis:us-west-2:123456789012:{}".format(stream_name)) + stream["StreamStatus"].should.equal("ACTIVE") + + @mock_kinesis_deprecated def test_basic_shard_iterator(): conn = boto.kinesis.connect_to_region("us-west-2") @@ -100,7 +118,8 @@ def test_get_invalid_shard_iterator(): conn.create_stream(stream_name, 1) conn.get_shard_iterator.when.called_with( - stream_name, "123", 'TRIM_HORIZON').should.throw(ResourceNotFoundException) + stream_name, "123", 'TRIM_HORIZON').should.throw( + ResourceNotFoundException) @mock_kinesis_deprecated @@ -354,8 +373,8 @@ def test_get_records_timestamp_filtering(): timestamp = datetime.datetime.utcnow() conn.put_record(StreamName=stream_name, - Data='1', - PartitionKey='1') + Data='1', + PartitionKey='1') response = conn.describe_stream(StreamName=stream_name) shard_id = response['StreamDescription']['Shards'][0]['ShardId'] @@ -368,7 +387,7 @@ def test_get_records_timestamp_filtering(): response = conn.get_records(ShardIterator=shard_iterator) response['Records'].should.have.length_of(1) response['Records'][0]['PartitionKey'].should.equal('1') - response['Records'][0]['ApproximateArrivalTimestamp'].should.be.\ + response['Records'][0]['ApproximateArrivalTimestamp'].should.be. \ greater_than(timestamp) response['MillisBehindLatest'].should.equal(0) @@ -461,7 +480,8 @@ def test_invalid_shard_iterator_type(): response = conn.describe_stream(stream_name) shard_id = response['StreamDescription']['Shards'][0]['ShardId'] response = conn.get_shard_iterator.when.called_with( - stream_name, shard_id, 'invalid-type').should.throw(InvalidArgumentException) + stream_name, shard_id, 'invalid-type').should.throw( + InvalidArgumentException) @mock_kinesis_deprecated @@ -549,7 +569,8 @@ def test_split_shard(): shard_range = shards[0]['HashKeyRange'] new_starting_hash = ( - int(shard_range['EndingHashKey']) + int(shard_range['StartingHashKey'])) // 2 + int(shard_range['EndingHashKey']) + int( + shard_range['StartingHashKey'])) // 2 conn.split_shard("my_stream", shards[0]['ShardId'], str(new_starting_hash)) stream_response = conn.describe_stream(stream_name) @@ -562,7 +583,8 @@ def test_split_shard(): shard_range = shards[2]['HashKeyRange'] new_starting_hash = ( - int(shard_range['EndingHashKey']) + int(shard_range['StartingHashKey'])) // 2 + int(shard_range['EndingHashKey']) + int( + shard_range['StartingHashKey'])) // 2 conn.split_shard("my_stream", shards[2]['ShardId'], str(new_starting_hash)) stream_response = conn.describe_stream(stream_name) @@ -592,7 +614,8 @@ def test_merge_shards(): shards.should.have.length_of(4) conn.merge_shards.when.called_with( - stream_name, 'shardId-000000000000', 'shardId-000000000002').should.throw(InvalidArgumentException) + stream_name, 'shardId-000000000000', + 'shardId-000000000002').should.throw(InvalidArgumentException) stream_response = conn.describe_stream(stream_name) From 33efe07b43bd65b76bc3b371ab8e0d41917fb9dc Mon Sep 17 00:00:00 2001 From: Craig Anderson Date: Tue, 21 May 2019 00:05:02 +0100 Subject: [PATCH 12/41] Hide CloudFormation pararamters with NoEcho. Fixes #2021 (#2024) --- AUTHORS.md | 1 + moto/cloudformation/parsing.py | 9 +++++- moto/cloudformation/responses.py | 6 +++- .../test_cloudformation/test_stack_parsing.py | 28 +++++++++++++++++++ 4 files changed, 42 insertions(+), 2 deletions(-) diff --git a/AUTHORS.md b/AUTHORS.md index 5eb313dda..fbca08368 100644 --- a/AUTHORS.md +++ b/AUTHORS.md @@ -54,4 +54,5 @@ Moto is written by Steve Pulec with contributions from: * [William Richard](https://github.com/william-richard) * [Alex Casalboni](https://github.com/alexcasalboni) * [Jon Beilke](https://github.com/jrbeilke) +* [Craig Anderson](https://github.com/craiga) * [Robert Lewis](https://github.com/ralewis85) diff --git a/moto/cloudformation/parsing.py b/moto/cloudformation/parsing.py index 0be68944b..f2b74d185 100644 --- a/moto/cloudformation/parsing.py +++ b/moto/cloudformation/parsing.py @@ -425,11 +425,18 @@ class ResourceMap(collections.Mapping): self.resolved_parameters[parameter_name] = parameter.get('Default') # Set any input parameters that were passed + self.no_echo_parameter_keys = [] for key, value in self.input_parameters.items(): if key in self.resolved_parameters: - value_type = parameter_slots[key].get('Type', 'String') + parameter_slot = parameter_slots[key] + + value_type = parameter_slot.get('Type', 'String') if value_type == 'CommaDelimitedList' or value_type.startswith("List"): value = value.split(',') + + if parameter_slot.get('NoEcho'): + self.no_echo_parameter_keys.append(key) + self.resolved_parameters[key] = value # Check if there are any non-default params that were not passed input diff --git a/moto/cloudformation/responses.py b/moto/cloudformation/responses.py index d1ef5ba8a..80970262f 100644 --- a/moto/cloudformation/responses.py +++ b/moto/cloudformation/responses.py @@ -654,7 +654,11 @@ DESCRIBE_STACKS_TEMPLATE = """ {% for param_name, param_value in stack.stack_parameters.items() %} {{ param_name }} - {{ param_value }} + {% if param_name in stack.resource_map.no_echo_parameter_keys %} + **** + {% else %} + {{ param_value }} + {% endif %} {% endfor %} diff --git a/tests/test_cloudformation/test_stack_parsing.py b/tests/test_cloudformation/test_stack_parsing.py index d21db2d48..25242e352 100644 --- a/tests/test_cloudformation/test_stack_parsing.py +++ b/tests/test_cloudformation/test_stack_parsing.py @@ -83,6 +83,18 @@ get_availability_zones_output = { } } +parameters = { + "Parameters": { + "Param": { + "Type": "String", + }, + "NoEchoParam": { + "Type": "String", + "NoEcho": True + } + } +} + split_select_template = { "AWSTemplateFormatVersion": "2010-09-09", "Resources": { @@ -157,6 +169,9 @@ get_attribute_outputs_template = dict( get_availability_zones_template = dict( list(dummy_template.items()) + list(get_availability_zones_output.items())) +parameters_template = dict( + list(dummy_template.items()) + list(parameters.items())) + dummy_template_json = json.dumps(dummy_template) name_type_template_json = json.dumps(name_type_template) output_type_template_json = json.dumps(outputs_template) @@ -165,6 +180,7 @@ get_attribute_outputs_template_json = json.dumps( get_attribute_outputs_template) get_availability_zones_template_json = json.dumps( get_availability_zones_template) +parameters_template_json = json.dumps(parameters_template) split_select_template_json = json.dumps(split_select_template) sub_template_json = json.dumps(sub_template) export_value_template_json = json.dumps(export_value_template) @@ -290,6 +306,18 @@ def test_parse_stack_with_bad_get_attribute_outputs(): "test_id", "test_stack", bad_output_template_json, {}, "us-west-1").should.throw(ValidationError) +def test_parse_stack_with_parameters(): + stack = FakeStack( + stack_id="test_id", + name="test_stack", + template=parameters_template_json, + parameters={"Param": "visible value", "NoEchoParam": "hidden value"}, + region_name='us-west-1') + + stack.resource_map.no_echo_parameter_keys.should.have("NoEchoParam") + stack.resource_map.no_echo_parameter_keys.should_not.have("Param") + + def test_parse_equals_condition(): parse_condition( condition={"Fn::Equals": [{"Ref": "EnvType"}, "prod"]}, From b8ba7980a025d029c47b21b4e1f19c7aaae25bac Mon Sep 17 00:00:00 2001 From: Travis Truman Date: Tue, 21 May 2019 12:44:06 -0400 Subject: [PATCH 13/41] =?UTF-8?q?Adding=20support=20for=20specifying=20a?= =?UTF-8?q?=20PermissionsBoundary=20ARN=20in=20calls=20to=20i=E2=80=A6=20(?= =?UTF-8?q?#2182)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * Adding support for specifying a PermissionsBoundary ARN in calls to iam.create_role Closes #2181 * Correcting whitespace error * Adding support for Role PermissionsBoundary to be returned from calls to list_roles * Raise ClientError when a bad permissions boundary ARN is supplied --- moto/iam/models.py | 14 +++++++++++--- moto/iam/responses.py | 16 +++++++++++++++- tests/test_iam/test_iam.py | 21 +++++++++++++++++++-- 3 files changed, 45 insertions(+), 6 deletions(-) diff --git a/moto/iam/models.py b/moto/iam/models.py index e36b41459..095bbab29 100644 --- a/moto/iam/models.py +++ b/moto/iam/models.py @@ -9,6 +9,7 @@ from cryptography import x509 from cryptography.hazmat.backends import default_backend import pytz +from moto.core.exceptions import RESTError from moto.core import BaseBackend, BaseModel from moto.core.utils import iso_8601_datetime_without_milliseconds @@ -131,7 +132,7 @@ class InlinePolicy(Policy): class Role(BaseModel): - def __init__(self, role_id, name, assume_role_policy_document, path): + def __init__(self, role_id, name, assume_role_policy_document, path, permissions_boundary): self.id = role_id self.name = name self.assume_role_policy_document = assume_role_policy_document @@ -141,6 +142,7 @@ class Role(BaseModel): self.create_date = datetime.now(pytz.utc) self.tags = {} self.description = "" + self.permissions_boundary = permissions_boundary @classmethod def create_from_cloudformation_json(cls, resource_name, cloudformation_json, region_name): @@ -150,6 +152,7 @@ class Role(BaseModel): role_name=resource_name, assume_role_policy_document=properties['AssumeRolePolicyDocument'], path=properties.get('Path', '/'), + permissions_boundary=properties.get('PermissionsBoundary', '') ) policies = properties.get('Policies', []) @@ -470,6 +473,8 @@ class IAMBackend(BaseBackend): self.managed_policies = self._init_managed_policies() self.account_aliases = [] self.saml_providers = {} + self.policy_arn_regex = re.compile( + r'^arn:aws:iam::[0-9]*:policy/.*$') super(IAMBackend, self).__init__() def _init_managed_policies(self): @@ -587,9 +592,12 @@ class IAMBackend(BaseBackend): return policies, marker - def create_role(self, role_name, assume_role_policy_document, path): + def create_role(self, role_name, assume_role_policy_document, path, permissions_boundary): role_id = random_resource_id() - role = Role(role_id, role_name, assume_role_policy_document, path) + if permissions_boundary and not self.policy_arn_regex.match(permissions_boundary): + raise RESTError('InvalidParameterValue', 'Value ({}) for parameter PermissionsBoundary is invalid.'.format(permissions_boundary)) + + role = Role(role_id, role_name, assume_role_policy_document, path, permissions_boundary) self.roles[role_id] = role return role diff --git a/moto/iam/responses.py b/moto/iam/responses.py index e5b4c9070..8d2a557cb 100644 --- a/moto/iam/responses.py +++ b/moto/iam/responses.py @@ -175,9 +175,11 @@ class IamResponse(BaseResponse): path = self._get_param('Path') assume_role_policy_document = self._get_param( 'AssumeRolePolicyDocument') + permissions_boundary = self._get_param( + 'PermissionsBoundary') role = iam_backend.create_role( - role_name, assume_role_policy_document, path) + role_name, assume_role_policy_document, path, permissions_boundary) template = self.response_template(CREATE_ROLE_TEMPLATE) return template.render(role=role) @@ -1000,6 +1002,12 @@ CREATE_ROLE_TEMPLATE = """ Date: Wed, 22 May 2019 01:45:30 +0900 Subject: [PATCH 14/41] fix #2011 (#2012) add support for Scan method using LSI or GSI --- moto/dynamodb2/exceptions.py | 2 + moto/dynamodb2/models.py | 51 ++++++-- moto/dynamodb2/responses.py | 8 +- tests/test_dynamodb2/test_dynamodb.py | 47 +++++++- .../test_dynamodb_table_with_range_key.py | 110 ++++++++++++++++++ .../test_dynamodb_table_without_range_key.py | 74 ++++++++++++ 6 files changed, 282 insertions(+), 10 deletions(-) create mode 100644 moto/dynamodb2/exceptions.py diff --git a/moto/dynamodb2/exceptions.py b/moto/dynamodb2/exceptions.py new file mode 100644 index 000000000..9df973292 --- /dev/null +++ b/moto/dynamodb2/exceptions.py @@ -0,0 +1,2 @@ +class InvalidIndexNameError(ValueError): + pass diff --git a/moto/dynamodb2/models.py b/moto/dynamodb2/models.py index 0f4594aa4..c2e5da2ee 100644 --- a/moto/dynamodb2/models.py +++ b/moto/dynamodb2/models.py @@ -13,6 +13,7 @@ from moto.core import BaseBackend, BaseModel from moto.core.utils import unix_time from moto.core.exceptions import JsonRESTError from .comparisons import get_comparison_func, get_filter_expression, Op +from .exceptions import InvalidIndexNameError class DynamoJsonEncoder(json.JSONEncoder): @@ -572,7 +573,7 @@ class Table(BaseModel): results = [] if index_name: - all_indexes = (self.global_indexes or []) + (self.indexes or []) + all_indexes = self.all_indexes() indexes_by_name = dict((i['IndexName'], i) for i in all_indexes) if index_name not in indexes_by_name: raise ValueError('Invalid index: %s for table: %s. Available indexes are: %s' % ( @@ -672,11 +673,39 @@ class Table(BaseModel): else: yield hash_set - def scan(self, filters, limit, exclusive_start_key, filter_expression=None): + def all_indexes(self): + return (self.global_indexes or []) + (self.indexes or []) + + def has_idx_items(self, index_name): + + all_indexes = self.all_indexes() + indexes_by_name = dict((i['IndexName'], i) for i in all_indexes) + idx = indexes_by_name[index_name] + idx_col_set = set([i['AttributeName'] for i in idx['KeySchema']]) + + for hash_set in self.items.values(): + if self.range_key_attr: + for item in hash_set.values(): + if idx_col_set.issubset(set(item.attrs)): + yield item + else: + if idx_col_set.issubset(set(hash_set.attrs)): + yield hash_set + + def scan(self, filters, limit, exclusive_start_key, filter_expression=None, index_name=None): results = [] scanned_count = 0 + all_indexes = self.all_indexes() + indexes_by_name = dict((i['IndexName'], i) for i in all_indexes) - for item in self.all_items(): + if index_name: + if index_name not in indexes_by_name: + raise InvalidIndexNameError('The table does not have the specified index: %s' % index_name) + items = self.has_idx_items(index_name) + else: + items = self.all_items() + + for item in items: scanned_count += 1 passes_all_conditions = True for attribute_name, (comparison_operator, comparison_objs) in filters.items(): @@ -703,10 +732,10 @@ class Table(BaseModel): results.append(item) results, last_evaluated_key = self._trim_results(results, limit, - exclusive_start_key) + exclusive_start_key, index_name) return results, scanned_count, last_evaluated_key - def _trim_results(self, results, limit, exclusive_start_key): + def _trim_results(self, results, limit, exclusive_start_key, scaned_index=None): if exclusive_start_key is not None: hash_key = DynamoType(exclusive_start_key.get(self.hash_key_attr)) range_key = exclusive_start_key.get(self.range_key_attr) @@ -726,6 +755,14 @@ class Table(BaseModel): if results[-1].range_key is not None: last_evaluated_key[self.range_key_attr] = results[-1].range_key + if scaned_index: + all_indexes = self.all_indexes() + indexes_by_name = dict((i['IndexName'], i) for i in all_indexes) + idx = indexes_by_name[scaned_index] + idx_col_list = [i['AttributeName'] for i in idx['KeySchema']] + for col in idx_col_list: + last_evaluated_key[col] = results[-1].attrs[col] + return results, last_evaluated_key def lookup(self, *args, **kwargs): @@ -893,7 +930,7 @@ class DynamoDBBackend(BaseBackend): return table.query(hash_key, range_comparison, range_values, limit, exclusive_start_key, scan_index_forward, projection_expression, index_name, filter_expression, **filter_kwargs) - def scan(self, table_name, filters, limit, exclusive_start_key, filter_expression, expr_names, expr_values): + def scan(self, table_name, filters, limit, exclusive_start_key, filter_expression, expr_names, expr_values, index_name): table = self.tables.get(table_name) if not table: return None, None, None @@ -908,7 +945,7 @@ class DynamoDBBackend(BaseBackend): else: filter_expression = Op(None, None) # Will always eval to true - return table.scan(scan_filters, limit, exclusive_start_key, filter_expression) + return table.scan(scan_filters, limit, exclusive_start_key, filter_expression, index_name) def update_item(self, table_name, key, update_expression, attribute_updates, expression_attribute_names, expression_attribute_values, expected=None): diff --git a/moto/dynamodb2/responses.py b/moto/dynamodb2/responses.py index e382efc1d..7eb565747 100644 --- a/moto/dynamodb2/responses.py +++ b/moto/dynamodb2/responses.py @@ -5,6 +5,7 @@ import re from moto.core.responses import BaseResponse from moto.core.utils import camelcase_to_underscores, amzn_request_id +from .exceptions import InvalidIndexNameError from .models import dynamodb_backends, dynamo_json_dump @@ -560,6 +561,7 @@ class DynamoHandler(BaseResponse): exclusive_start_key = self.body.get('ExclusiveStartKey') limit = self.body.get("Limit") + index_name = self.body.get('IndexName') try: items, scanned_count, last_evaluated_key = self.dynamodb_backend.scan(name, filters, @@ -567,7 +569,11 @@ class DynamoHandler(BaseResponse): exclusive_start_key, filter_expression, expression_attribute_names, - expression_attribute_values) + expression_attribute_values, + index_name) + except InvalidIndexNameError as err: + er = 'com.amazonaws.dynamodb.v20111205#ValidationException' + return self.error(er, str(err)) except ValueError as err: er = 'com.amazonaws.dynamodb.v20111205#ValidationError' return self.error(er, 'Bad Filter Expression: {0}'.format(err)) diff --git a/tests/test_dynamodb2/test_dynamodb.py b/tests/test_dynamodb2/test_dynamodb.py index 208453f0a..77846de04 100644 --- a/tests/test_dynamodb2/test_dynamodb.py +++ b/tests/test_dynamodb2/test_dynamodb.py @@ -1531,6 +1531,7 @@ def test_dynamodb_streams_2(): } assert 'LatestStreamLabel' in resp['TableDescription'] assert 'LatestStreamArn' in resp['TableDescription'] + @mock_dynamodb2 def test_condition_expressions(): @@ -1696,8 +1697,8 @@ def test_query_gsi_with_range_key(): res = dynamodb.query(TableName='test', IndexName='test_gsi', KeyConditionExpression='gsi_hash_key = :gsi_hash_key AND gsi_range_key = :gsi_range_key', ExpressionAttributeValues={ - ':gsi_hash_key': {'S': 'key1'}, - ':gsi_range_key': {'S': 'range1'} + ':gsi_hash_key': {'S': 'key1'}, + ':gsi_range_key': {'S': 'range1'} }) res.should.have.key("Count").equal(1) res.should.have.key("Items") @@ -1706,3 +1707,45 @@ def test_query_gsi_with_range_key(): 'gsi_hash_key': {'S': 'key1'}, 'gsi_range_key': {'S': 'range1'}, }) + + +@mock_dynamodb2 +def test_scan_by_non_exists_index(): + dynamodb = boto3.client('dynamodb', region_name='us-east-1') + + dynamodb.create_table( + TableName='test', + KeySchema=[{'AttributeName': 'id', 'KeyType': 'HASH'}], + AttributeDefinitions=[ + {'AttributeName': 'id', 'AttributeType': 'S'}, + {'AttributeName': 'gsi_col', 'AttributeType': 'S'} + ], + ProvisionedThroughput={'ReadCapacityUnits': 1, 'WriteCapacityUnits': 1}, + GlobalSecondaryIndexes=[ + { + 'IndexName': 'test_gsi', + 'KeySchema': [ + { + 'AttributeName': 'gsi_col', + 'KeyType': 'HASH' + }, + ], + 'Projection': { + 'ProjectionType': 'ALL', + }, + 'ProvisionedThroughput': { + 'ReadCapacityUnits': 1, + 'WriteCapacityUnits': 1 + } + }, + ] + ) + + with assert_raises(ClientError) as ex: + dynamodb.scan(TableName='test', IndexName='non_exists_index') + + ex.exception.response['Error']['Code'].should.equal('ValidationException') + ex.exception.response['ResponseMetadata']['HTTPStatusCode'].should.equal(400) + ex.exception.response['Error']['Message'].should.equal( + 'The table does not have the specified index: non_exists_index' + ) diff --git a/tests/test_dynamodb2/test_dynamodb_table_with_range_key.py b/tests/test_dynamodb2/test_dynamodb_table_with_range_key.py index a9ab298b7..77e06ecb1 100644 --- a/tests/test_dynamodb2/test_dynamodb_table_with_range_key.py +++ b/tests/test_dynamodb2/test_dynamodb_table_with_range_key.py @@ -1961,3 +1961,113 @@ def test_query_pagination(): results = page1['Items'] + page2['Items'] subjects = set([int(r['subject']) for r in results]) subjects.should.equal(set(range(10))) + + +@mock_dynamodb2 +def test_scan_by_index(): + dynamodb = boto3.client('dynamodb', region_name='us-east-1') + + dynamodb.create_table( + TableName='test', + KeySchema=[ + {'AttributeName': 'id', 'KeyType': 'HASH'}, + {'AttributeName': 'range_key', 'KeyType': 'RANGE'}, + ], + AttributeDefinitions=[ + {'AttributeName': 'id', 'AttributeType': 'S'}, + {'AttributeName': 'range_key', 'AttributeType': 'S'}, + {'AttributeName': 'gsi_col', 'AttributeType': 'S'}, + {'AttributeName': 'gsi_range_key', 'AttributeType': 'S'}, + {'AttributeName': 'lsi_range_key', 'AttributeType': 'S'}, + ], + ProvisionedThroughput={'ReadCapacityUnits': 1, 'WriteCapacityUnits': 1}, + GlobalSecondaryIndexes=[ + { + 'IndexName': 'test_gsi', + 'KeySchema': [ + {'AttributeName': 'gsi_col', 'KeyType': 'HASH'}, + {'AttributeName': 'gsi_range_key', 'KeyType': 'RANGE'}, + ], + 'Projection': { + 'ProjectionType': 'ALL', + }, + 'ProvisionedThroughput': { + 'ReadCapacityUnits': 1, + 'WriteCapacityUnits': 1 + } + }, + ], + LocalSecondaryIndexes=[ + { + 'IndexName': 'test_lsi', + 'KeySchema': [ + {'AttributeName': 'id', 'KeyType': 'HASH'}, + {'AttributeName': 'lsi_range_key', 'KeyType': 'RANGE'}, + ], + 'Projection': { + 'ProjectionType': 'ALL', + }, + }, + ] + ) + + dynamodb.put_item( + TableName='test', + Item={ + 'id': {'S': '1'}, + 'range_key': {'S': '1'}, + 'col1': {'S': 'val1'}, + 'gsi_col': {'S': '1'}, + 'gsi_range_key': {'S': '1'}, + 'lsi_range_key': {'S': '1'}, + } + ) + + dynamodb.put_item( + TableName='test', + Item={ + 'id': {'S': '1'}, + 'range_key': {'S': '2'}, + 'col1': {'S': 'val2'}, + 'gsi_col': {'S': '1'}, + 'gsi_range_key': {'S': '2'}, + 'lsi_range_key': {'S': '2'}, + } + ) + + dynamodb.put_item( + TableName='test', + Item={ + 'id': {'S': '3'}, + 'range_key': {'S': '1'}, + 'col1': {'S': 'val3'}, + } + ) + + res = dynamodb.scan(TableName='test') + assert res['Count'] == 3 + assert len(res['Items']) == 3 + + res = dynamodb.scan(TableName='test', IndexName='test_gsi') + assert res['Count'] == 2 + assert len(res['Items']) == 2 + + res = dynamodb.scan(TableName='test', IndexName='test_gsi', Limit=1) + assert res['Count'] == 1 + assert len(res['Items']) == 1 + last_eval_key = res['LastEvaluatedKey'] + assert last_eval_key['id']['S'] == '1' + assert last_eval_key['gsi_col']['S'] == '1' + assert last_eval_key['gsi_range_key']['S'] == '1' + + res = dynamodb.scan(TableName='test', IndexName='test_lsi') + assert res['Count'] == 2 + assert len(res['Items']) == 2 + + res = dynamodb.scan(TableName='test', IndexName='test_lsi', Limit=1) + assert res['Count'] == 1 + assert len(res['Items']) == 1 + last_eval_key = res['LastEvaluatedKey'] + assert last_eval_key['id']['S'] == '1' + assert last_eval_key['range_key']['S'] == '1' + assert last_eval_key['lsi_range_key']['S'] == '1' diff --git a/tests/test_dynamodb2/test_dynamodb_table_without_range_key.py b/tests/test_dynamodb2/test_dynamodb_table_without_range_key.py index 874804db0..1880c7cab 100644 --- a/tests/test_dynamodb2/test_dynamodb_table_without_range_key.py +++ b/tests/test_dynamodb2/test_dynamodb_table_without_range_key.py @@ -829,3 +829,77 @@ def test_scan_pagination(): results = page1['Items'] + page2['Items'] usernames = set([r['username'] for r in results]) usernames.should.equal(set(expected_usernames)) + + +@mock_dynamodb2 +def test_scan_by_index(): + dynamodb = boto3.client('dynamodb', region_name='us-east-1') + + dynamodb.create_table( + TableName='test', + KeySchema=[{'AttributeName': 'id', 'KeyType': 'HASH'}], + AttributeDefinitions=[ + {'AttributeName': 'id', 'AttributeType': 'S'}, + {'AttributeName': 'gsi_col', 'AttributeType': 'S'} + ], + ProvisionedThroughput={'ReadCapacityUnits': 1, 'WriteCapacityUnits': 1}, + GlobalSecondaryIndexes=[ + { + 'IndexName': 'test_gsi', + 'KeySchema': [ + { + 'AttributeName': 'gsi_col', + 'KeyType': 'HASH' + }, + ], + 'Projection': { + 'ProjectionType': 'ALL', + }, + 'ProvisionedThroughput': { + 'ReadCapacityUnits': 1, + 'WriteCapacityUnits': 1 + } + }, + ] + ) + + dynamodb.put_item( + TableName='test', + Item={ + 'id': {'S': '1'}, + 'col1': {'S': 'val1'}, + 'gsi_col': {'S': 'gsi_val1'}, + } + ) + + dynamodb.put_item( + TableName='test', + Item={ + 'id': {'S': '2'}, + 'col1': {'S': 'val2'}, + 'gsi_col': {'S': 'gsi_val2'}, + } + ) + + dynamodb.put_item( + TableName='test', + Item={ + 'id': {'S': '3'}, + 'col1': {'S': 'val3'}, + } + ) + + res = dynamodb.scan(TableName='test') + assert res['Count'] == 3 + assert len(res['Items']) == 3 + + res = dynamodb.scan(TableName='test', IndexName='test_gsi') + assert res['Count'] == 2 + assert len(res['Items']) == 2 + + res = dynamodb.scan(TableName='test', IndexName='test_gsi', Limit=1) + assert res['Count'] == 1 + assert len(res['Items']) == 1 + last_eval_key = res['LastEvaluatedKey'] + assert last_eval_key['id']['S'] == '1' + assert last_eval_key['gsi_col']['S'] == 'gsi_val1' From 796dd71c0ce8c8d7b7820d8d191dbe21e97721d7 Mon Sep 17 00:00:00 2001 From: James Bungard Date: Wed, 22 May 2019 02:46:22 +1000 Subject: [PATCH 15/41] Platform independent KMS timestamp generation (#2193) * Platform independent KMS timestamp generation Fixes #2192 * Switch to moto.core.unix_time Fixes #2192 --- moto/kms/models.py | 4 ++-- tests/test_kms/test_kms.py | 15 +++++++-------- 2 files changed, 9 insertions(+), 10 deletions(-) diff --git a/moto/kms/models.py b/moto/kms/models.py index b49e9dd09..2d6245ad2 100644 --- a/moto/kms/models.py +++ b/moto/kms/models.py @@ -3,7 +3,7 @@ from __future__ import unicode_literals import os import boto.kms from moto.core import BaseBackend, BaseModel -from moto.core.utils import iso_8601_datetime_without_milliseconds +from moto.core.utils import iso_8601_datetime_without_milliseconds, unix_time from .utils import generate_key_id from collections import defaultdict from datetime import datetime, timedelta @@ -37,7 +37,7 @@ class Key(BaseModel): "KeyMetadata": { "AWSAccountId": self.account_id, "Arn": self.arn, - "CreationDate": datetime.strftime(datetime.utcnow(), "%s"), + "CreationDate": "%d" % unix_time(), "Description": self.description, "Enabled": self.enabled, "KeyId": self.id, diff --git a/tests/test_kms/test_kms.py b/tests/test_kms/test_kms.py index e7ce9f74b..f0d77d3e9 100644 --- a/tests/test_kms/test_kms.py +++ b/tests/test_kms/test_kms.py @@ -18,13 +18,14 @@ from dateutil.tz import tzutc @mock_kms_deprecated def test_create_key(): conn = boto.kms.connect_to_region("us-west-2") + with freeze_time("2015-01-01 00:00:00"): + key = conn.create_key(policy="my policy", + description="my key", key_usage='ENCRYPT_DECRYPT') - key = conn.create_key(policy="my policy", - description="my key", key_usage='ENCRYPT_DECRYPT') - - key['KeyMetadata']['Description'].should.equal("my key") - key['KeyMetadata']['KeyUsage'].should.equal("ENCRYPT_DECRYPT") - key['KeyMetadata']['Enabled'].should.equal(True) + key['KeyMetadata']['Description'].should.equal("my key") + key['KeyMetadata']['KeyUsage'].should.equal("ENCRYPT_DECRYPT") + key['KeyMetadata']['Enabled'].should.equal(True) + key['KeyMetadata']['CreationDate'].should.equal("1420070400") @mock_kms_deprecated @@ -980,5 +981,3 @@ def test_put_key_policy_key_not_found(): PolicyName='default', Policy='new policy' ) - - From 6628567cbc7dde7fee533dcdd065d53f2c5f3113 Mon Sep 17 00:00:00 2001 From: sergejs-katusenoks <48756343+sergejs-katusenoks@users.noreply.github.com> Date: Tue, 21 May 2019 17:47:35 +0100 Subject: [PATCH 16/41] Fix base64 deprecated methods in kinesis.utils (#2209) * Fix base64 deprecated methods Using right encode and decode methods according to python version. * moved imports on top E402 module level import not at top of file --- moto/kinesis/utils.py | 15 +++++++++++++-- 1 file changed, 13 insertions(+), 2 deletions(-) diff --git a/moto/kinesis/utils.py b/moto/kinesis/utils.py index 337728f02..0c3edbb5a 100644 --- a/moto/kinesis/utils.py +++ b/moto/kinesis/utils.py @@ -1,8 +1,19 @@ +import sys import base64 from .exceptions import InvalidArgumentError +if sys.version_info[0] == 2: + encode_method = base64.encodestring + decode_method = base64.decodestring +elif sys.version_info[0] == 3: + encode_method = base64.encodebytes + decode_method = base64.decodebytes +else: + raise Exception("Python version is not supported") + + def compose_new_shard_iterator(stream_name, shard, shard_iterator_type, starting_sequence_number, at_timestamp): if shard_iterator_type == "AT_SEQUENCE_NUMBER": @@ -22,7 +33,7 @@ def compose_new_shard_iterator(stream_name, shard, shard_iterator_type, starting def compose_shard_iterator(stream_name, shard, last_sequence_id): - return base64.encodestring( + return encode_method( "{0}:{1}:{2}".format( stream_name, shard.shard_id, @@ -32,4 +43,4 @@ def compose_shard_iterator(stream_name, shard, last_sequence_id): def decompose_shard_iterator(shard_iterator): - return base64.decodestring(shard_iterator.encode("utf-8")).decode("utf-8").split(":") + return decode_method(shard_iterator.encode("utf-8")).decode("utf-8").split(":") From 8f4c273095fc730b603a997939ba6331afec7f17 Mon Sep 17 00:00:00 2001 From: cm-iwata <38879253+cm-iwata@users.noreply.github.com> Date: Wed, 22 May 2019 01:49:56 +0900 Subject: [PATCH 17/41] fix #2190 (#2194) lambda list_versions_by_function return $LATEST version and published version --- moto/awslambda/models.py | 15 +++++++--- moto/awslambda/responses.py | 7 +++-- moto/awslambda/utils.py | 9 ++++-- tests/test_awslambda/test_lambda.py | 44 +++++++++++++++++++++-------- 4 files changed, 56 insertions(+), 19 deletions(-) diff --git a/moto/awslambda/models.py b/moto/awslambda/models.py index 9fc41c11e..f291f3beb 100644 --- a/moto/awslambda/models.py +++ b/moto/awslambda/models.py @@ -30,7 +30,7 @@ from moto.s3.models import s3_backend from moto.logs.models import logs_backends from moto.s3.exceptions import MissingBucket, MissingKey from moto import settings -from .utils import make_function_arn +from .utils import make_function_arn, make_function_ver_arn logger = logging.getLogger(__name__) @@ -215,12 +215,12 @@ class LambdaFunction(BaseModel): self.code_size = key.size self.code_sha_256 = hashlib.sha256(key.value).hexdigest() - self.function_arn = make_function_arn(self.region, ACCOUNT_ID, self.function_name, version) + self.function_arn = make_function_arn(self.region, ACCOUNT_ID, self.function_name) self.tags = dict() def set_version(self, version): - self.function_arn = make_function_arn(self.region, ACCOUNT_ID, self.function_name, version) + self.function_arn = make_function_ver_arn(self.region, ACCOUNT_ID, self.function_name, version) self.version = version self.last_modified = datetime.datetime.utcnow().strftime('%Y-%m-%d %H:%M:%S') @@ -503,7 +503,10 @@ class LambdaStorage(object): def list_versions_by_function(self, name): if name not in self._functions: return None - return [self._functions[name]['latest']] + + latest = copy.copy(self._functions[name]['latest']) + latest.function_arn += ':$LATEST' + return [latest] + self._functions[name]['versions'] def get_arn(self, arn): return self._arns.get(arn, None) @@ -535,6 +538,7 @@ class LambdaStorage(object): fn.set_version(new_version) self._functions[name]['versions'].append(fn) + self._arns[fn.function_arn] = fn return fn def del_function(self, name, qualifier=None): @@ -604,6 +608,9 @@ class LambdaBackend(BaseBackend): self._lambdas.put_function(fn) + if spec.get('Publish'): + ver = self.publish_function(function_name) + fn.version = ver.version return fn def publish_function(self, function_name): diff --git a/moto/awslambda/responses.py b/moto/awslambda/responses.py index 1c43ef84b..c29c9acd9 100644 --- a/moto/awslambda/responses.py +++ b/moto/awslambda/responses.py @@ -150,7 +150,7 @@ class LambdaResponse(BaseResponse): for fn in self.lambda_backend.list_functions(): json_data = fn.get_configuration() - + json_data['Version'] = '$LATEST' result['Functions'].append(json_data) return 200, {}, json.dumps(result) @@ -204,7 +204,10 @@ class LambdaResponse(BaseResponse): if fn: code = fn.get_code() - + if qualifier is None or qualifier == '$LATEST': + code['Configuration']['Version'] = '$LATEST' + if qualifier == '$LATEST': + code['Configuration']['FunctionArn'] += ':$LATEST' return 200, {}, json.dumps(code) else: return 404, {}, "{}" diff --git a/moto/awslambda/utils.py b/moto/awslambda/utils.py index 88146d34f..82027cb2f 100644 --- a/moto/awslambda/utils.py +++ b/moto/awslambda/utils.py @@ -3,8 +3,13 @@ from collections import namedtuple ARN = namedtuple('ARN', ['region', 'account', 'function_name', 'version']) -def make_function_arn(region, account, name, version='1'): - return 'arn:aws:lambda:{0}:{1}:function:{2}:{3}'.format(region, account, name, version) +def make_function_arn(region, account, name): + return 'arn:aws:lambda:{0}:{1}:function:{2}'.format(region, account, name) + + +def make_function_ver_arn(region, account, name, version='1'): + arn = make_function_arn(region, account, name) + return '{0}:{1}'.format(arn, version) def split_function_arn(arn): diff --git a/tests/test_awslambda/test_lambda.py b/tests/test_awslambda/test_lambda.py index 479aaaa8a..9ef6fdb0d 100644 --- a/tests/test_awslambda/test_lambda.py +++ b/tests/test_awslambda/test_lambda.py @@ -282,7 +282,7 @@ def test_create_function_from_aws_bucket(): result.pop('LastModified') result.should.equal({ 'FunctionName': 'testFunction', - 'FunctionArn': 'arn:aws:lambda:{}:123456789012:function:testFunction:$LATEST'.format(_lambda_region), + 'FunctionArn': 'arn:aws:lambda:{}:123456789012:function:testFunction'.format(_lambda_region), 'Runtime': 'python2.7', 'Role': 'test-iam-role', 'Handler': 'lambda_function.lambda_handler', @@ -291,7 +291,7 @@ def test_create_function_from_aws_bucket(): 'Description': 'test lambda function', 'Timeout': 3, 'MemorySize': 128, - 'Version': '$LATEST', + 'Version': '1', 'VpcConfig': { "SecurityGroupIds": ["sg-123abc"], "SubnetIds": ["subnet-123abc"], @@ -327,7 +327,7 @@ def test_create_function_from_zipfile(): result.should.equal({ 'FunctionName': 'testFunction', - 'FunctionArn': 'arn:aws:lambda:{}:123456789012:function:testFunction:$LATEST'.format(_lambda_region), + 'FunctionArn': 'arn:aws:lambda:{}:123456789012:function:testFunction'.format(_lambda_region), 'Runtime': 'python2.7', 'Role': 'test-iam-role', 'Handler': 'lambda_function.lambda_handler', @@ -336,7 +336,7 @@ def test_create_function_from_zipfile(): 'Timeout': 3, 'MemorySize': 128, 'CodeSha256': hashlib.sha256(zip_content).hexdigest(), - 'Version': '$LATEST', + 'Version': '1', 'VpcConfig': { "SecurityGroupIds": [], "SubnetIds": [], @@ -398,6 +398,8 @@ def test_get_function(): # Test get function with result = conn.get_function(FunctionName='testFunction', Qualifier='$LATEST') result['Configuration']['Version'].should.equal('$LATEST') + result['Configuration']['FunctionArn'].should.equal('arn:aws:lambda:us-west-2:123456789012:function:testFunction:$LATEST') + # Test get function when can't find function name with assert_raises(ClientError): @@ -464,7 +466,7 @@ def test_publish(): Description='test lambda function', Timeout=3, MemorySize=128, - Publish=True, + Publish=False, ) function_list = conn.list_functions() @@ -485,7 +487,7 @@ def test_publish(): function_list = conn.list_functions() function_list['Functions'].should.have.length_of(1) - function_list['Functions'][0]['FunctionArn'].should.contain('testFunction:$LATEST') + function_list['Functions'][0]['FunctionArn'].should.contain('testFunction') @mock_lambda @@ -528,7 +530,7 @@ def test_list_create_list_get_delete_list(): "CodeSha256": hashlib.sha256(zip_content).hexdigest(), "CodeSize": len(zip_content), "Description": "test lambda function", - "FunctionArn": 'arn:aws:lambda:{}:123456789012:function:testFunction:$LATEST'.format(_lambda_region), + "FunctionArn": 'arn:aws:lambda:{}:123456789012:function:testFunction'.format(_lambda_region), "FunctionName": "testFunction", "Handler": "lambda_function.lambda_handler", "MemorySize": 128, @@ -701,7 +703,7 @@ def test_invoke_async_function(): ) success_result = conn.invoke_async( - FunctionName='testFunction', + FunctionName='testFunction', InvokeArgs=json.dumps({'test': 'event'}) ) @@ -741,7 +743,7 @@ def test_get_function_created_with_zipfile(): "CodeSha256": hashlib.sha256(zip_content).hexdigest(), "CodeSize": len(zip_content), "Description": "test lambda function", - "FunctionArn": 'arn:aws:lambda:{}:123456789012:function:testFunction:$LATEST'.format(_lambda_region), + "FunctionArn": 'arn:aws:lambda:{}:123456789012:function:testFunction'.format(_lambda_region), "FunctionName": "testFunction", "Handler": "lambda_function.handler", "MemorySize": 128, @@ -842,7 +844,7 @@ def test_list_versions_by_function(): conn.create_function( FunctionName='testFunction', Runtime='python2.7', - Role='test-iam-role', + Role='arn:aws:iam::123456789012:role/test-iam-role', Handler='lambda_function.lambda_handler', Code={ 'S3Bucket': 'test-bucket', @@ -857,8 +859,28 @@ def test_list_versions_by_function(): res = conn.publish_version(FunctionName='testFunction') assert res['ResponseMetadata']['HTTPStatusCode'] == 201 versions = conn.list_versions_by_function(FunctionName='testFunction') - + assert len(versions['Versions']) == 3 assert versions['Versions'][0]['FunctionArn'] == 'arn:aws:lambda:us-west-2:123456789012:function:testFunction:$LATEST' + assert versions['Versions'][1]['FunctionArn'] == 'arn:aws:lambda:us-west-2:123456789012:function:testFunction:1' + assert versions['Versions'][2]['FunctionArn'] == 'arn:aws:lambda:us-west-2:123456789012:function:testFunction:2' + + conn.create_function( + FunctionName='testFunction_2', + Runtime='python2.7', + Role='arn:aws:iam::123456789012:role/test-iam-role', + Handler='lambda_function.lambda_handler', + Code={ + 'S3Bucket': 'test-bucket', + 'S3Key': 'test.zip', + }, + Description='test lambda function', + Timeout=3, + MemorySize=128, + Publish=False, + ) + versions = conn.list_versions_by_function(FunctionName='testFunction_2') + assert len(versions['Versions']) == 1 + assert versions['Versions'][0]['FunctionArn'] == 'arn:aws:lambda:us-west-2:123456789012:function:testFunction_2:$LATEST' @mock_lambda From bbd7fefb371a8c7aafd879d1d1c8aa23314981d7 Mon Sep 17 00:00:00 2001 From: Justin Kieber-King Date: Wed, 22 May 2019 05:45:22 -0400 Subject: [PATCH 18/41] Feature: Secrets Manager put_secret_value and list_secret_version_ids (#2116) * initial work - adding put_secret_value and list_secret_versions * Added support for versions in all functions except rotate_secret * more work - refactor rotate_secret method - now, adds a new version of the secret and points default version id to it - updated implementation coverage readme - element in list check to fix unit test - fixed linting errors - added tests, fixed exception, failing tests still - secrets_manager/test_server fails when running whole suite, but not when running that individual test file * fixed failing test_get_secret_value * Removed test.py. Fixed condition statement. * fixed default stages + adding AWSPREVIOUS * remove old AWSPREVIOUS stages --- IMPLEMENTATION_COVERAGE.md | 6 +- moto/secretsmanager/exceptions.py | 8 + moto/secretsmanager/models.py | 178 ++++++++++++---- moto/secretsmanager/responses.py | 16 ++ .../test_secretsmanager.py | 104 +++++++-- tests/test_secretsmanager/test_server.py | 199 ++++++++++++++++-- 6 files changed, 427 insertions(+), 84 deletions(-) diff --git a/IMPLEMENTATION_COVERAGE.md b/IMPLEMENTATION_COVERAGE.md index fe34258ee..3cd4d1fec 100644 --- a/IMPLEMENTATION_COVERAGE.md +++ b/IMPLEMENTATION_COVERAGE.md @@ -3658,9 +3658,9 @@ - [X] describe_secret - [X] get_random_password - [X] get_secret_value -- [ ] list_secret_version_ids -- [x] list_secrets -- [ ] put_secret_value +- [X] list_secret_version_ids +- [X] list_secrets +- [X] put_secret_value - [X] restore_secret - [X] rotate_secret - [ ] tag_resource diff --git a/moto/secretsmanager/exceptions.py b/moto/secretsmanager/exceptions.py index 06010c411..fa81b6d8b 100644 --- a/moto/secretsmanager/exceptions.py +++ b/moto/secretsmanager/exceptions.py @@ -29,6 +29,14 @@ class InvalidParameterException(SecretsManagerClientError): message) +class ResourceExistsException(SecretsManagerClientError): + def __init__(self, message): + super(ResourceExistsException, self).__init__( + 'ResourceExistsException', + message + ) + + class InvalidRequestException(SecretsManagerClientError): def __init__(self, message): super(InvalidRequestException, self).__init__( diff --git a/moto/secretsmanager/models.py b/moto/secretsmanager/models.py index 44ac1ef47..ec90c3e19 100644 --- a/moto/secretsmanager/models.py +++ b/moto/secretsmanager/models.py @@ -11,6 +11,7 @@ from moto.core import BaseBackend, BaseModel from .exceptions import ( ResourceNotFoundException, InvalidParameterException, + ResourceExistsException, InvalidRequestException, ClientError ) @@ -47,6 +48,17 @@ class SecretsManagerBackend(BaseBackend): if not self._is_valid_identifier(secret_id): raise ResourceNotFoundException() + if not version_id and version_stage: + # set version_id to match version_stage + versions_dict = self.secrets[secret_id]['versions'] + for ver_id, ver_val in versions_dict.items(): + if version_stage in ver_val['version_stages']: + version_id = ver_id + break + if not version_id: + raise ResourceNotFoundException() + + # TODO check this part if 'deleted_date' in self.secrets[secret_id]: raise InvalidRequestException( "An error occurred (InvalidRequestException) when calling the GetSecretValue operation: You tried to \ @@ -54,42 +66,91 @@ class SecretsManagerBackend(BaseBackend): ) secret = self.secrets[secret_id] + version_id = version_id or secret['default_version_id'] + + secret_version = secret['versions'][version_id] response = json.dumps({ "ARN": secret_arn(self.region, secret['secret_id']), "Name": secret['name'], - "VersionId": secret['version_id'], - "SecretString": secret['secret_string'], - "VersionStages": [ - "AWSCURRENT", - ], - "CreatedDate": secret['createdate'] + "VersionId": secret_version['version_id'], + "SecretString": secret_version['secret_string'], + "VersionStages": secret_version['version_stages'], + "CreatedDate": secret_version['createdate'], }) return response def create_secret(self, name, secret_string, tags, **kwargs): - generated_version_id = str(uuid.uuid4()) + # error if secret exists + if name in self.secrets.keys(): + raise ResourceExistsException('A resource with the ID you requested already exists.') - secret = { - 'secret_string': secret_string, - 'secret_id': name, - 'name': name, - 'createdate': int(time.time()), - 'rotation_enabled': False, - 'rotation_lambda_arn': '', - 'auto_rotate_after_days': 0, - 'version_id': generated_version_id, - 'tags': tags - } - - self.secrets[name] = secret + version_id = self._add_secret(name, secret_string, tags=tags) response = json.dumps({ "ARN": secret_arn(self.region, name), "Name": name, - "VersionId": generated_version_id, + "VersionId": version_id, + }) + + return response + + def _add_secret(self, secret_id, secret_string, tags=[], version_id=None, version_stages=None): + + if version_stages is None: + version_stages = ['AWSCURRENT'] + + if not version_id: + version_id = str(uuid.uuid4()) + + secret_version = { + 'secret_string': secret_string, + 'createdate': int(time.time()), + 'version_id': version_id, + 'version_stages': version_stages, + } + + if secret_id in self.secrets: + # remove all old AWSPREVIOUS stages + for secret_verion_to_look_at in self.secrets[secret_id]['versions'].values(): + if 'AWSPREVIOUS' in secret_verion_to_look_at['version_stages']: + secret_verion_to_look_at['version_stages'].remove('AWSPREVIOUS') + + # set old AWSCURRENT secret to AWSPREVIOUS + previous_current_version_id = self.secrets[secret_id]['default_version_id'] + self.secrets[secret_id]['versions'][previous_current_version_id]['version_stages'] = ['AWSPREVIOUS'] + + self.secrets[secret_id]['versions'][version_id] = secret_version + self.secrets[secret_id]['default_version_id'] = version_id + else: + self.secrets[secret_id] = { + 'versions': { + version_id: secret_version + }, + 'default_version_id': version_id, + } + + secret = self.secrets[secret_id] + secret['secret_id'] = secret_id + secret['name'] = secret_id + secret['rotation_enabled'] = False + secret['rotation_lambda_arn'] = '' + secret['auto_rotate_after_days'] = 0 + secret['tags'] = tags + + return version_id + + def put_secret_value(self, secret_id, secret_string, version_stages): + + version_id = self._add_secret(secret_id, secret_string, version_stages=version_stages) + + response = json.dumps({ + 'ARN': secret_arn(self.region, secret_id), + 'Name': secret_id, + 'VersionId': version_id, + 'VersionStages': version_stages }) return response @@ -162,17 +223,24 @@ class SecretsManagerBackend(BaseBackend): secret = self.secrets[secret_id] - secret['version_id'] = client_request_token or '' + old_secret_version = secret['versions'][secret['default_version_id']] + new_version_id = client_request_token or str(uuid.uuid4()) + + self._add_secret(secret_id, old_secret_version['secret_string'], secret['tags'], version_id=new_version_id, version_stages=['AWSCURRENT']) + secret['rotation_lambda_arn'] = rotation_lambda_arn or '' if rotation_rules: secret['auto_rotate_after_days'] = rotation_rules.get(rotation_days, 0) if secret['auto_rotate_after_days'] > 0: secret['rotation_enabled'] = True + if 'AWSCURRENT' in old_secret_version['version_stages']: + old_secret_version['version_stages'].remove('AWSCURRENT') + response = json.dumps({ "ARN": secret_arn(self.region, secret['secret_id']), "Name": secret['name'], - "VersionId": secret['version_id'] + "VersionId": new_version_id }) return response @@ -206,28 +274,54 @@ class SecretsManagerBackend(BaseBackend): return response + def list_secret_version_ids(self, secret_id): + secret = self.secrets[secret_id] + + version_list = [] + for version_id, version in secret['versions'].items(): + version_list.append({ + 'CreatedDate': int(time.time()), + 'LastAccessedDate': int(time.time()), + 'VersionId': version_id, + 'VersionStages': version['version_stages'], + }) + + response = json.dumps({ + 'ARN': secret['secret_id'], + 'Name': secret['name'], + 'NextToken': '', + 'Versions': version_list, + }) + + return response + def list_secrets(self, max_results, next_token): # TODO implement pagination and limits - secret_list = [{ - "ARN": secret_arn(self.region, secret['secret_id']), - "DeletedDate": secret.get('deleted_date', None), - "Description": "", - "KmsKeyId": "", - "LastAccessedDate": None, - "LastChangedDate": None, - "LastRotatedDate": None, - "Name": secret['name'], - "RotationEnabled": secret['rotation_enabled'], - "RotationLambdaARN": secret['rotation_lambda_arn'], - "RotationRules": { - "AutomaticallyAfterDays": secret['auto_rotate_after_days'] - }, - "SecretVersionsToStages": { - secret['version_id']: ["AWSCURRENT"] - }, - "Tags": secret['tags'] - } for secret in self.secrets.values()] + secret_list = [] + for secret in self.secrets.values(): + + versions_to_stages = {} + for version_id, version in secret['versions'].items(): + versions_to_stages[version_id] = version['version_stages'] + + secret_list.append({ + "ARN": secret_arn(self.region, secret['secret_id']), + "DeletedDate": secret.get('deleted_date', None), + "Description": "", + "KmsKeyId": "", + "LastAccessedDate": None, + "LastChangedDate": None, + "LastRotatedDate": None, + "Name": secret['name'], + "RotationEnabled": secret['rotation_enabled'], + "RotationLambdaARN": secret['rotation_lambda_arn'], + "RotationRules": { + "AutomaticallyAfterDays": secret['auto_rotate_after_days'] + }, + "SecretVersionsToStages": versions_to_stages, + "Tags": secret['tags'] + }) return secret_list, None diff --git a/moto/secretsmanager/responses.py b/moto/secretsmanager/responses.py index 0eb02e39b..fe51d8c1b 100644 --- a/moto/secretsmanager/responses.py +++ b/moto/secretsmanager/responses.py @@ -67,6 +67,22 @@ class SecretsManagerResponse(BaseResponse): rotation_rules=rotation_rules ) + def put_secret_value(self): + secret_id = self._get_param('SecretId', if_none='') + secret_string = self._get_param('SecretString', if_none='') + version_stages = self._get_param('VersionStages', if_none=['AWSCURRENT']) + return secretsmanager_backends[self.region].put_secret_value( + secret_id=secret_id, + secret_string=secret_string, + version_stages=version_stages, + ) + + def list_secret_version_ids(self): + secret_id = self._get_param('SecretId', if_none='') + return secretsmanager_backends[self.region].list_secret_version_ids( + secret_id=secret_id + ) + def list_secrets(self): max_results = self._get_int_param("MaxResults") next_token = self._get_param("NextToken") diff --git a/tests/test_secretsmanager/test_secretsmanager.py b/tests/test_secretsmanager/test_secretsmanager.py index 81ce93cc3..6735924eb 100644 --- a/tests/test_secretsmanager/test_secretsmanager.py +++ b/tests/test_secretsmanager/test_secretsmanager.py @@ -4,13 +4,15 @@ import boto3 from moto import mock_secretsmanager from botocore.exceptions import ClientError -import sure # noqa import string +import unittest import pytz from datetime import datetime -import unittest from nose.tools import assert_raises +DEFAULT_SECRET_NAME = 'test-secret' + + @mock_secretsmanager def test_get_secret_value(): conn = boto3.client('secretsmanager', region_name='us-west-2') @@ -389,34 +391,32 @@ def test_restore_secret_that_does_not_exist(): @mock_secretsmanager def test_rotate_secret(): - secret_name = 'test-secret' conn = boto3.client('secretsmanager', region_name='us-west-2') - conn.create_secret(Name=secret_name, + conn.create_secret(Name=DEFAULT_SECRET_NAME, SecretString='foosecret') - rotated_secret = conn.rotate_secret(SecretId=secret_name) + rotated_secret = conn.rotate_secret(SecretId=DEFAULT_SECRET_NAME) assert rotated_secret assert rotated_secret['ARN'] != '' # Test arn not empty - assert rotated_secret['Name'] == secret_name + assert rotated_secret['Name'] == DEFAULT_SECRET_NAME assert rotated_secret['VersionId'] != '' @mock_secretsmanager def test_rotate_secret_enable_rotation(): - secret_name = 'test-secret' conn = boto3.client('secretsmanager', region_name='us-west-2') - conn.create_secret(Name=secret_name, + conn.create_secret(Name=DEFAULT_SECRET_NAME, SecretString='foosecret') - initial_description = conn.describe_secret(SecretId=secret_name) + initial_description = conn.describe_secret(SecretId=DEFAULT_SECRET_NAME) assert initial_description assert initial_description['RotationEnabled'] is False assert initial_description['RotationRules']['AutomaticallyAfterDays'] == 0 - conn.rotate_secret(SecretId=secret_name, + conn.rotate_secret(SecretId=DEFAULT_SECRET_NAME, RotationRules={'AutomaticallyAfterDays': 42}) - rotated_description = conn.describe_secret(SecretId=secret_name) + rotated_description = conn.describe_secret(SecretId=DEFAULT_SECRET_NAME) assert rotated_description assert rotated_description['RotationEnabled'] is True assert rotated_description['RotationRules']['AutomaticallyAfterDays'] == 42 @@ -460,9 +460,8 @@ def test_rotate_secret_client_request_token_too_short(): @mock_secretsmanager def test_rotate_secret_client_request_token_too_long(): - secret_name = 'test-secret' conn = boto3.client('secretsmanager', region_name='us-west-2') - conn.create_secret(Name=secret_name, + conn.create_secret(Name=DEFAULT_SECRET_NAME, SecretString='foosecret') client_request_token = ( @@ -470,19 +469,18 @@ def test_rotate_secret_client_request_token_too_long(): 'ED9F8B6C-85B7-446A-B7E4-38F2A3BEB13C' ) with assert_raises(ClientError): - result = conn.rotate_secret(SecretId=secret_name, + result = conn.rotate_secret(SecretId=DEFAULT_SECRET_NAME, ClientRequestToken=client_request_token) @mock_secretsmanager def test_rotate_secret_rotation_lambda_arn_too_long(): - secret_name = 'test-secret' conn = boto3.client('secretsmanager', region_name='us-west-2') - conn.create_secret(Name=secret_name, + conn.create_secret(Name=DEFAULT_SECRET_NAME, SecretString='foosecret') rotation_lambda_arn = '85B7-446A-B7E4' * 147 # == 2058 characters with assert_raises(ClientError): - result = conn.rotate_secret(SecretId=secret_name, + result = conn.rotate_secret(SecretId=DEFAULT_SECRET_NAME, RotationLambdaARN=rotation_lambda_arn) @mock_secretsmanager @@ -494,12 +492,78 @@ def test_rotate_secret_rotation_period_zero(): @mock_secretsmanager def test_rotate_secret_rotation_period_too_long(): - secret_name = 'test-secret' conn = boto3.client('secretsmanager', region_name='us-west-2') - conn.create_secret(Name=secret_name, + conn.create_secret(Name=DEFAULT_SECRET_NAME, SecretString='foosecret') rotation_rules = {'AutomaticallyAfterDays': 1001} with assert_raises(ClientError): - result = conn.rotate_secret(SecretId=secret_name, + result = conn.rotate_secret(SecretId=DEFAULT_SECRET_NAME, RotationRules=rotation_rules) + +@mock_secretsmanager +def test_put_secret_value_puts_new_secret(): + conn = boto3.client('secretsmanager', region_name='us-west-2') + put_secret_value_dict = conn.put_secret_value(SecretId=DEFAULT_SECRET_NAME, + SecretString='foosecret', + VersionStages=['AWSCURRENT']) + version_id = put_secret_value_dict['VersionId'] + + get_secret_value_dict = conn.get_secret_value(SecretId=DEFAULT_SECRET_NAME, + VersionId=version_id, + VersionStage='AWSCURRENT') + + assert get_secret_value_dict + assert get_secret_value_dict['SecretString'] == 'foosecret' + +@mock_secretsmanager +def test_put_secret_value_can_get_first_version_if_put_twice(): + conn = boto3.client('secretsmanager', region_name='us-west-2') + put_secret_value_dict = conn.put_secret_value(SecretId=DEFAULT_SECRET_NAME, + SecretString='first_secret', + VersionStages=['AWSCURRENT']) + first_version_id = put_secret_value_dict['VersionId'] + conn.put_secret_value(SecretId=DEFAULT_SECRET_NAME, + SecretString='second_secret', + VersionStages=['AWSCURRENT']) + + first_secret_value_dict = conn.get_secret_value(SecretId=DEFAULT_SECRET_NAME, + VersionId=first_version_id) + first_secret_value = first_secret_value_dict['SecretString'] + + assert first_secret_value == 'first_secret' + + +@mock_secretsmanager +def test_put_secret_value_versions_differ_if_same_secret_put_twice(): + conn = boto3.client('secretsmanager', region_name='us-west-2') + put_secret_value_dict = conn.put_secret_value(SecretId=DEFAULT_SECRET_NAME, + SecretString='dupe_secret', + VersionStages=['AWSCURRENT']) + first_version_id = put_secret_value_dict['VersionId'] + put_secret_value_dict = conn.put_secret_value(SecretId=DEFAULT_SECRET_NAME, + SecretString='dupe_secret', + VersionStages=['AWSCURRENT']) + second_version_id = put_secret_value_dict['VersionId'] + + assert first_version_id != second_version_id + + +@mock_secretsmanager +def test_can_list_secret_version_ids(): + conn = boto3.client('secretsmanager', region_name='us-west-2') + put_secret_value_dict = conn.put_secret_value(SecretId=DEFAULT_SECRET_NAME, + SecretString='dupe_secret', + VersionStages=['AWSCURRENT']) + first_version_id = put_secret_value_dict['VersionId'] + put_secret_value_dict = conn.put_secret_value(SecretId=DEFAULT_SECRET_NAME, + SecretString='dupe_secret', + VersionStages=['AWSCURRENT']) + second_version_id = put_secret_value_dict['VersionId'] + + versions_list = conn.list_secret_version_ids(SecretId=DEFAULT_SECRET_NAME) + + returned_version_ids = [v['VersionId'] for v in versions_list['Versions']] + + assert [first_version_id, second_version_id].sort() == returned_version_ids.sort() + diff --git a/tests/test_secretsmanager/test_server.py b/tests/test_secretsmanager/test_server.py index d0f495f57..23d823239 100644 --- a/tests/test_secretsmanager/test_server.py +++ b/tests/test_secretsmanager/test_server.py @@ -10,6 +10,8 @@ from moto import mock_secretsmanager Test the different server responses for secretsmanager ''' +DEFAULT_SECRET_NAME = 'test-secret' + @mock_secretsmanager def test_get_secret_value(): @@ -18,19 +20,20 @@ def test_get_secret_value(): test_client = backend.test_client() create_secret = test_client.post('/', - data={"Name": "test-secret", + data={"Name": DEFAULT_SECRET_NAME, "SecretString": "foo-secret"}, headers={ "X-Amz-Target": "secretsmanager.CreateSecret"}, ) get_secret = test_client.post('/', - data={"SecretId": "test-secret", - "VersionStage": "AWSCURRENT"}, - headers={ - "X-Amz-Target": "secretsmanager.GetSecretValue"}, - ) + data={"SecretId": DEFAULT_SECRET_NAME, + "VersionStage": "AWSCURRENT"}, + headers={ + "X-Amz-Target": "secretsmanager.GetSecretValue"}, + ) json_data = json.loads(get_secret.data.decode("utf-8")) + assert json_data['SecretString'] == 'foo-secret' @mock_secretsmanager @@ -55,7 +58,7 @@ def test_get_secret_that_does_not_match(): test_client = backend.test_client() create_secret = test_client.post('/', - data={"Name": "test-secret", + data={"Name": DEFAULT_SECRET_NAME, "SecretString": "foo-secret"}, headers={ "X-Amz-Target": "secretsmanager.CreateSecret"}, @@ -165,7 +168,7 @@ def test_describe_secret_that_does_not_match(): test_client = backend.test_client() create_secret = test_client.post('/', - data={"Name": "test-secret", + data={"Name": DEFAULT_SECRET_NAME, "SecretString": "foosecret"}, headers={ "X-Amz-Target": "secretsmanager.CreateSecret" @@ -188,7 +191,7 @@ def test_rotate_secret(): test_client = backend.test_client() create_secret = test_client.post('/', - data={"Name": "test-secret", + data={"Name": DEFAULT_SECRET_NAME, "SecretString": "foosecret"}, headers={ "X-Amz-Target": "secretsmanager.CreateSecret" @@ -197,7 +200,7 @@ def test_rotate_secret(): client_request_token = "EXAMPLE2-90ab-cdef-fedc-ba987SECRET2" rotate_secret = test_client.post('/', - data={"SecretId": "test-secret", + data={"SecretId": DEFAULT_SECRET_NAME, "ClientRequestToken": client_request_token}, headers={ "X-Amz-Target": "secretsmanager.RotateSecret" @@ -207,7 +210,7 @@ def test_rotate_secret(): json_data = json.loads(rotate_secret.data.decode("utf-8")) assert json_data # Returned dict is not empty assert json_data['ARN'] != '' - assert json_data['Name'] == 'test-secret' + assert json_data['Name'] == DEFAULT_SECRET_NAME assert json_data['VersionId'] == client_request_token # @mock_secretsmanager @@ -289,7 +292,7 @@ def test_rotate_secret_that_does_not_match(): test_client = backend.test_client() create_secret = test_client.post('/', - data={"Name": "test-secret", + data={"Name": DEFAULT_SECRET_NAME, "SecretString": "foosecret"}, headers={ "X-Amz-Target": "secretsmanager.CreateSecret" @@ -313,7 +316,7 @@ def test_rotate_secret_client_request_token_too_short(): test_client = backend.test_client() create_secret = test_client.post('/', - data={"Name": "test-secret", + data={"Name": DEFAULT_SECRET_NAME, "SecretString": "foosecret"}, headers={ "X-Amz-Target": "secretsmanager.CreateSecret" @@ -322,7 +325,7 @@ def test_rotate_secret_client_request_token_too_short(): client_request_token = "ED9F8B6C-85B7-B7E4-38F2A3BEB13C" rotate_secret = test_client.post('/', - data={"SecretId": "test-secret", + data={"SecretId": DEFAULT_SECRET_NAME, "ClientRequestToken": client_request_token}, headers={ "X-Amz-Target": "secretsmanager.RotateSecret" @@ -339,7 +342,7 @@ def test_rotate_secret_client_request_token_too_long(): test_client = backend.test_client() create_secret = test_client.post('/', - data={"Name": "test-secret", + data={"Name": DEFAULT_SECRET_NAME, "SecretString": "foosecret"}, headers={ "X-Amz-Target": "secretsmanager.CreateSecret" @@ -351,7 +354,7 @@ def test_rotate_secret_client_request_token_too_long(): 'ED9F8B6C-85B7-446A-B7E4-38F2A3BEB13C' ) rotate_secret = test_client.post('/', - data={"SecretId": "test-secret", + data={"SecretId": DEFAULT_SECRET_NAME, "ClientRequestToken": client_request_token}, headers={ "X-Amz-Target": "secretsmanager.RotateSecret" @@ -368,7 +371,7 @@ def test_rotate_secret_rotation_lambda_arn_too_long(): test_client = backend.test_client() create_secret = test_client.post('/', - data={"Name": "test-secret", + data={"Name": DEFAULT_SECRET_NAME, "SecretString": "foosecret"}, headers={ "X-Amz-Target": "secretsmanager.CreateSecret" @@ -377,7 +380,7 @@ def test_rotate_secret_rotation_lambda_arn_too_long(): rotation_lambda_arn = '85B7-446A-B7E4' * 147 # == 2058 characters rotate_secret = test_client.post('/', - data={"SecretId": "test-secret", + data={"SecretId": DEFAULT_SECRET_NAME, "RotationLambdaARN": rotation_lambda_arn}, headers={ "X-Amz-Target": "secretsmanager.RotateSecret" @@ -389,7 +392,165 @@ def test_rotate_secret_rotation_lambda_arn_too_long(): assert json_data['__type'] == 'InvalidParameterException' -# + + + +@mock_secretsmanager +def test_put_secret_value_puts_new_secret(): + backend = server.create_backend_app('secretsmanager') + test_client = backend.test_client() + + test_client.post('/', + data={ + "SecretId": DEFAULT_SECRET_NAME, + "SecretString": "foosecret", + "VersionStages": ["AWSCURRENT"]}, + headers={ + "X-Amz-Target": "secretsmanager.PutSecretValue"}, + ) + + put_second_secret_value_json = test_client.post('/', + data={ + "SecretId": DEFAULT_SECRET_NAME, + "SecretString": "foosecret", + "VersionStages": ["AWSCURRENT"]}, + headers={ + "X-Amz-Target": "secretsmanager.PutSecretValue"}, + ) + second_secret_json_data = json.loads(put_second_secret_value_json.data.decode("utf-8")) + + version_id = second_secret_json_data['VersionId'] + + secret_value_json = test_client.post('/', + data={ + "SecretId": DEFAULT_SECRET_NAME, + "VersionId": version_id, + "VersionStage": 'AWSCURRENT'}, + headers={ + "X-Amz-Target": "secretsmanager.GetSecretValue"}, + ) + + second_secret_json_data = json.loads(secret_value_json.data.decode("utf-8")) + + assert second_secret_json_data + assert second_secret_json_data['SecretString'] == 'foosecret' + + +@mock_secretsmanager +def test_put_secret_value_can_get_first_version_if_put_twice(): + backend = server.create_backend_app('secretsmanager') + test_client = backend.test_client() + + first_secret_string = 'first_secret' + second_secret_string = 'second_secret' + + put_first_secret_value_json = test_client.post('/', + data={ + "SecretId": DEFAULT_SECRET_NAME, + "SecretString": first_secret_string, + "VersionStages": ["AWSCURRENT"]}, + headers={ + "X-Amz-Target": "secretsmanager.PutSecretValue"}, + ) + + first_secret_json_data = json.loads(put_first_secret_value_json.data.decode("utf-8")) + + first_secret_version_id = first_secret_json_data['VersionId'] + + test_client.post('/', + data={ + "SecretId": DEFAULT_SECRET_NAME, + "SecretString": second_secret_string, + "VersionStages": ["AWSCURRENT"]}, + headers={ + "X-Amz-Target": "secretsmanager.PutSecretValue"}, + ) + + get_first_secret_value_json = test_client.post('/', + data={ + "SecretId": DEFAULT_SECRET_NAME, + "VersionId": first_secret_version_id, + "VersionStage": 'AWSCURRENT'}, + headers={ + "X-Amz-Target": "secretsmanager.GetSecretValue"}, + ) + + get_first_secret_json_data = json.loads(get_first_secret_value_json.data.decode("utf-8")) + + assert get_first_secret_json_data + assert get_first_secret_json_data['SecretString'] == first_secret_string + + +@mock_secretsmanager +def test_put_secret_value_versions_differ_if_same_secret_put_twice(): + backend = server.create_backend_app('secretsmanager') + test_client = backend.test_client() + + put_first_secret_value_json = test_client.post('/', + data={ + "SecretId": DEFAULT_SECRET_NAME, + "SecretString": "secret", + "VersionStages": ["AWSCURRENT"]}, + headers={ + "X-Amz-Target": "secretsmanager.PutSecretValue"}, + ) + first_secret_json_data = json.loads(put_first_secret_value_json.data.decode("utf-8")) + first_secret_version_id = first_secret_json_data['VersionId'] + + put_second_secret_value_json = test_client.post('/', + data={ + "SecretId": DEFAULT_SECRET_NAME, + "SecretString": "secret", + "VersionStages": ["AWSCURRENT"]}, + headers={ + "X-Amz-Target": "secretsmanager.PutSecretValue"}, + ) + second_secret_json_data = json.loads(put_second_secret_value_json.data.decode("utf-8")) + second_secret_version_id = second_secret_json_data['VersionId'] + + assert first_secret_version_id != second_secret_version_id + + +@mock_secretsmanager +def test_can_list_secret_version_ids(): + backend = server.create_backend_app('secretsmanager') + test_client = backend.test_client() + + put_first_secret_value_json = test_client.post('/', + data={ + "SecretId": DEFAULT_SECRET_NAME, + "SecretString": "secret", + "VersionStages": ["AWSCURRENT"]}, + headers={ + "X-Amz-Target": "secretsmanager.PutSecretValue"}, + ) + first_secret_json_data = json.loads(put_first_secret_value_json.data.decode("utf-8")) + first_secret_version_id = first_secret_json_data['VersionId'] + put_second_secret_value_json = test_client.post('/', + data={ + "SecretId": DEFAULT_SECRET_NAME, + "SecretString": "secret", + "VersionStages": ["AWSCURRENT"]}, + headers={ + "X-Amz-Target": "secretsmanager.PutSecretValue"}, + ) + second_secret_json_data = json.loads(put_second_secret_value_json.data.decode("utf-8")) + second_secret_version_id = second_secret_json_data['VersionId'] + + list_secret_versions_json = test_client.post('/', + data={ + "SecretId": DEFAULT_SECRET_NAME, }, + headers={ + "X-Amz-Target": "secretsmanager.ListSecretVersionIds"}, + ) + + versions_list = json.loads(list_secret_versions_json.data.decode("utf-8")) + + returned_version_ids = [v['VersionId'] for v in versions_list['Versions']] + + assert [first_secret_version_id, second_secret_version_id].sort() == returned_version_ids.sort() + +# # The following tests should work, but fail on the embedded dict in # RotationRules. The error message suggests a problem deeper in the code, which # needs further investigation. From 1088c421d2a9a5bd0d953df58f02ffca6dd0a09f Mon Sep 17 00:00:00 2001 From: Ber Zoidberg Date: Wed, 22 May 2019 02:47:02 -0700 Subject: [PATCH 19/41] #2212 add support for delete on update_with_attribute_updates (#2213) * add support for delete on update_with_attribute_updates --- moto/dynamodb2/models.py | 13 +++++++++ .../test_dynamodb_table_with_range_key.py | 28 +++++++++++++++++++ 2 files changed, 41 insertions(+) diff --git a/moto/dynamodb2/models.py b/moto/dynamodb2/models.py index c2e5da2ee..a8920e027 100644 --- a/moto/dynamodb2/models.py +++ b/moto/dynamodb2/models.py @@ -294,6 +294,19 @@ class Item(BaseModel): # TODO: implement other data types raise NotImplementedError( 'ADD not supported for %s' % ', '.join(update_action['Value'].keys())) + elif action == 'DELETE': + if set(update_action['Value'].keys()) == set(['SS']): + existing = self.attrs.get(attribute_name, DynamoType({"SS": {}})) + new_set = set(existing.value).difference(set(new_value)) + self.attrs[attribute_name] = DynamoType({ + "SS": list(new_set) + }) + else: + raise NotImplementedError( + 'ADD not supported for %s' % ', '.join(update_action['Value'].keys())) + else: + raise NotImplementedError( + '%s action not support for update_with_attribute_updates' % action) class StreamRecord(BaseModel): diff --git a/tests/test_dynamodb2/test_dynamodb_table_with_range_key.py b/tests/test_dynamodb2/test_dynamodb_table_with_range_key.py index 77e06ecb1..e64d7d196 100644 --- a/tests/test_dynamodb2/test_dynamodb_table_with_range_key.py +++ b/tests/test_dynamodb2/test_dynamodb_table_with_range_key.py @@ -1344,6 +1344,34 @@ def test_update_item_add_value_string_set(): 'subject': '123', }) +@mock_dynamodb2 +def test_update_item_delete_value_string_set(): + table = _create_table_with_range_key() + + table.put_item(Item={ + 'forum_name': 'the-key', + 'subject': '123', + 'string_set': set(['str1', 'str2']), + }) + + item_key = {'forum_name': 'the-key', 'subject': '123'} + table.update_item( + Key=item_key, + AttributeUpdates={ + 'string_set': { + 'Action': u'DELETE', + 'Value': set(['str2']), + }, + }, + ) + + returned_item = dict((k, str(v) if isinstance(v, Decimal) else v) + for k, v in table.get_item(Key=item_key)['Item'].items()) + dict(returned_item).should.equal({ + 'string_set': set(['str1']), + 'forum_name': 'the-key', + 'subject': '123', + }) @mock_dynamodb2 def test_update_item_add_value_does_not_exist_is_created(): From a6d9cadac3361c8822344011e114a4aa54c4f309 Mon Sep 17 00:00:00 2001 From: Terry Cain Date: Wed, 22 May 2019 22:01:28 +0100 Subject: [PATCH 20/41] Change docker library version check (#2214) --- moto/awslambda/models.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/moto/awslambda/models.py b/moto/awslambda/models.py index f291f3beb..8dfa4724a 100644 --- a/moto/awslambda/models.py +++ b/moto/awslambda/models.py @@ -45,7 +45,7 @@ except ImportError: _stderr_regex = re.compile(r'START|END|REPORT RequestId: .*') _orig_adapter_send = requests.adapters.HTTPAdapter.send -docker_3 = docker.__version__.startswith("3") +docker_3 = docker.__version__[0] >= '3' def zip2tar(zip_bytes): From 2a5f7e15a7bc748c1148dae922cb1a9f8672dcbe Mon Sep 17 00:00:00 2001 From: Dan Chan Date: Thu, 23 May 2019 04:01:47 -0400 Subject: [PATCH 21/41] Updating redshift.describe_cluster_snapshots to return multiple snapshots for cluster_identifier (#2216) --- moto/redshift/models.py | 5 +++- tests/test_redshift/test_redshift.py | 35 ++++++++++++++++++++-------- 2 files changed, 29 insertions(+), 11 deletions(-) diff --git a/moto/redshift/models.py b/moto/redshift/models.py index 6a55f479a..64e5c5e35 100644 --- a/moto/redshift/models.py +++ b/moto/redshift/models.py @@ -640,9 +640,12 @@ class RedshiftBackend(BaseBackend): def describe_cluster_snapshots(self, cluster_identifier=None, snapshot_identifier=None): if cluster_identifier: + cluster_snapshots = [] for snapshot in self.snapshots.values(): if snapshot.cluster.cluster_identifier == cluster_identifier: - return [snapshot] + cluster_snapshots.append(snapshot) + if cluster_snapshots: + return cluster_snapshots raise ClusterNotFoundError(cluster_identifier) if snapshot_identifier: diff --git a/tests/test_redshift/test_redshift.py b/tests/test_redshift/test_redshift.py index e37440c60..9301aaa72 100644 --- a/tests/test_redshift/test_redshift.py +++ b/tests/test_redshift/test_redshift.py @@ -699,7 +699,8 @@ def test_create_cluster_snapshot(): def test_describe_cluster_snapshots(): client = boto3.client('redshift', region_name='us-east-1') cluster_identifier = 'my_cluster' - snapshot_identifier = 'my_snapshot' + snapshot_identifier_1 = 'my_snapshot_1' + snapshot_identifier_2 = 'my_snapshot_2' client.create_cluster( DBName='test-db', @@ -711,19 +712,33 @@ def test_describe_cluster_snapshots(): ) client.create_cluster_snapshot( - SnapshotIdentifier=snapshot_identifier, + SnapshotIdentifier=snapshot_identifier_1, + ClusterIdentifier=cluster_identifier, + ) + client.create_cluster_snapshot( + SnapshotIdentifier=snapshot_identifier_2, ClusterIdentifier=cluster_identifier, ) + resp_snap_1 = client.describe_cluster_snapshots(SnapshotIdentifier=snapshot_identifier_1) + snapshot_1 = resp_snap_1['Snapshots'][0] + snapshot_1['SnapshotIdentifier'].should.equal(snapshot_identifier_1) + snapshot_1['ClusterIdentifier'].should.equal(cluster_identifier) + snapshot_1['NumberOfNodes'].should.equal(1) + snapshot_1['NodeType'].should.equal('ds2.xlarge') + snapshot_1['MasterUsername'].should.equal('username') + + resp_snap_2 = client.describe_cluster_snapshots(SnapshotIdentifier=snapshot_identifier_2) + snapshot_2 = resp_snap_2['Snapshots'][0] + snapshot_2['SnapshotIdentifier'].should.equal(snapshot_identifier_2) + snapshot_2['ClusterIdentifier'].should.equal(cluster_identifier) + snapshot_2['NumberOfNodes'].should.equal(1) + snapshot_2['NodeType'].should.equal('ds2.xlarge') + snapshot_2['MasterUsername'].should.equal('username') + resp_clust = client.describe_cluster_snapshots(ClusterIdentifier=cluster_identifier) - resp_snap = client.describe_cluster_snapshots(SnapshotIdentifier=snapshot_identifier) - resp_clust['Snapshots'][0].should.equal(resp_snap['Snapshots'][0]) - snapshot = resp_snap['Snapshots'][0] - snapshot['SnapshotIdentifier'].should.equal(snapshot_identifier) - snapshot['ClusterIdentifier'].should.equal(cluster_identifier) - snapshot['NumberOfNodes'].should.equal(1) - snapshot['NodeType'].should.equal('ds2.xlarge') - snapshot['MasterUsername'].should.equal('username') + resp_clust['Snapshots'][0].should.equal(resp_snap_1['Snapshots'][0]) + resp_clust['Snapshots'][1].should.equal(resp_snap_2['Snapshots'][0]) @mock_redshift From cb72d1d00e7dadf7e1089a73a8372c57a4259afa Mon Sep 17 00:00:00 2001 From: Jon Beilke Date: Sat, 25 May 2019 04:14:23 -0500 Subject: [PATCH 22/41] Feature cloudwatch log retention (#2199) * add proper retentionInDays to describe_log_groups response and add support for delete_retention_policy() and put_retention_policy() to log groups * fix for inline comment formatting * include check for retentionInDays to verify no retention by default in test_log_group_create --- moto/logs/models.py | 23 +++++++++++++++++++++-- moto/logs/responses.py | 11 +++++++++++ tests/test_logs/test_logs.py | 36 ++++++++++++++++++++++++++++++++++++ 3 files changed, 68 insertions(+), 2 deletions(-) diff --git a/moto/logs/models.py b/moto/logs/models.py index e105d4d14..a44b76812 100644 --- a/moto/logs/models.py +++ b/moto/logs/models.py @@ -137,6 +137,7 @@ class LogGroup: self.creationTime = unix_time_millis() self.tags = tags self.streams = dict() # {name: LogStream} + self.retentionInDays = None # AWS defaults to Never Expire for log group retention def create_log_stream(self, log_stream_name): if log_stream_name in self.streams: @@ -201,14 +202,20 @@ class LogGroup: return events_page, next_token, searched_streams def to_describe_dict(self): - return { + log_group = { "arn": self.arn, "creationTime": self.creationTime, "logGroupName": self.name, "metricFilterCount": 0, - "retentionInDays": 30, "storedBytes": sum(s.storedBytes for s in self.streams.values()), } + # AWS only returns retentionInDays if a value is set for the log group (ie. not Never Expire) + if self.retentionInDays: + log_group["retentionInDays"] = self.retentionInDays + return log_group + + def set_retention_policy(self, retention_in_days): + self.retentionInDays = retention_in_days class LogsBackend(BaseBackend): @@ -289,5 +296,17 @@ class LogsBackend(BaseBackend): log_group = self.groups[log_group_name] return log_group.filter_log_events(log_group_name, log_stream_names, start_time, end_time, limit, next_token, filter_pattern, interleaved) + def put_retention_policy(self, log_group_name, retention_in_days): + if log_group_name not in self.groups: + raise ResourceNotFoundException() + log_group = self.groups[log_group_name] + return log_group.set_retention_policy(retention_in_days) + + def delete_retention_policy(self, log_group_name): + if log_group_name not in self.groups: + raise ResourceNotFoundException() + log_group = self.groups[log_group_name] + return log_group.set_retention_policy(None) + logs_backends = {region.name: LogsBackend(region.name) for region in boto.logs.regions()} diff --git a/moto/logs/responses.py b/moto/logs/responses.py index 4bec86cb2..39f24a260 100644 --- a/moto/logs/responses.py +++ b/moto/logs/responses.py @@ -123,3 +123,14 @@ class LogsResponse(BaseResponse): "nextToken": next_token, "searchedLogStreams": searched_streams }) + + def put_retention_policy(self): + log_group_name = self._get_param('logGroupName') + retention_in_days = self._get_param('retentionInDays') + self.logs_backend.put_retention_policy(log_group_name, retention_in_days) + return '' + + def delete_retention_policy(self): + log_group_name = self._get_param('logGroupName') + self.logs_backend.delete_retention_policy(log_group_name) + return '' diff --git a/tests/test_logs/test_logs.py b/tests/test_logs/test_logs.py index e3d46fd87..7048061f0 100644 --- a/tests/test_logs/test_logs.py +++ b/tests/test_logs/test_logs.py @@ -17,6 +17,8 @@ def test_log_group_create(): response = conn.describe_log_groups(logGroupNamePrefix=log_group_name) assert len(response['logGroups']) == 1 + # AWS defaults to Never Expire for log group retention + assert response['logGroups'][0].get('retentionInDays') == None response = conn.delete_log_group(logGroupName=log_group_name) @@ -126,3 +128,37 @@ def test_filter_logs_interleaved(): resulting_event['timestamp'].should.equal(original_message['timestamp']) resulting_event['message'].should.equal(original_message['message']) +@mock_logs +def test_put_retention_policy(): + conn = boto3.client('logs', 'us-west-2') + log_group_name = 'dummy' + response = conn.create_log_group(logGroupName=log_group_name) + + response = conn.put_retention_policy(logGroupName=log_group_name, retentionInDays=7) + + response = conn.describe_log_groups(logGroupNamePrefix=log_group_name) + assert len(response['logGroups']) == 1 + assert response['logGroups'][0].get('retentionInDays') == 7 + + response = conn.delete_log_group(logGroupName=log_group_name) + +@mock_logs +def test_delete_retention_policy(): + conn = boto3.client('logs', 'us-west-2') + log_group_name = 'dummy' + response = conn.create_log_group(logGroupName=log_group_name) + + response = conn.put_retention_policy(logGroupName=log_group_name, retentionInDays=7) + + response = conn.describe_log_groups(logGroupNamePrefix=log_group_name) + assert len(response['logGroups']) == 1 + assert response['logGroups'][0].get('retentionInDays') == 7 + + response = conn.delete_retention_policy(logGroupName=log_group_name) + + response = conn.describe_log_groups(logGroupNamePrefix=log_group_name) + assert len(response['logGroups']) == 1 + assert response['logGroups'][0].get('retentionInDays') == None + + response = conn.delete_log_group(logGroupName=log_group_name) + From a61124f77426c6ee5145d71a4bc6bbcf5ab096e1 Mon Sep 17 00:00:00 2001 From: Koudai Aono Date: Sat, 25 May 2019 18:16:33 +0900 Subject: [PATCH 23/41] support to create dynamodb resource by cloudformation (#2219) * support to create dynamodb resource by cloudformation --- moto/cloudformation/parsing.py | 4 +- moto/dynamodb2/models.py | 19 +++ .../test_cloudformation_stack_integration.py | 132 ++++++++++++++++++ 3 files changed, 153 insertions(+), 2 deletions(-) diff --git a/moto/cloudformation/parsing.py b/moto/cloudformation/parsing.py index f2b74d185..3bf994bed 100644 --- a/moto/cloudformation/parsing.py +++ b/moto/cloudformation/parsing.py @@ -12,7 +12,7 @@ from moto.batch import models as batch_models from moto.cloudwatch import models as cloudwatch_models from moto.cognitoidentity import models as cognitoidentity_models from moto.datapipeline import models as datapipeline_models -from moto.dynamodb import models as dynamodb_models +from moto.dynamodb2 import models as dynamodb2_models from moto.ec2 import models as ec2_models from moto.ecs import models as ecs_models from moto.elb import models as elb_models @@ -37,7 +37,7 @@ MODEL_MAP = { "AWS::Batch::JobDefinition": batch_models.JobDefinition, "AWS::Batch::JobQueue": batch_models.JobQueue, "AWS::Batch::ComputeEnvironment": batch_models.ComputeEnvironment, - "AWS::DynamoDB::Table": dynamodb_models.Table, + "AWS::DynamoDB::Table": dynamodb2_models.Table, "AWS::Kinesis::Stream": kinesis_models.Stream, "AWS::Lambda::EventSourceMapping": lambda_models.EventSourceMapping, "AWS::Lambda::Function": lambda_models.LambdaFunction, diff --git a/moto/dynamodb2/models.py b/moto/dynamodb2/models.py index a8920e027..6bcde41b2 100644 --- a/moto/dynamodb2/models.py +++ b/moto/dynamodb2/models.py @@ -417,6 +417,25 @@ class Table(BaseModel): } self.set_stream_specification(streams) + @classmethod + def create_from_cloudformation_json(cls, resource_name, cloudformation_json, region_name): + properties = cloudformation_json['Properties'] + params = {} + + if 'KeySchema' in properties: + params['schema'] = properties['KeySchema'] + if 'AttributeDefinitions' in properties: + params['attr'] = properties['AttributeDefinitions'] + if 'GlobalSecondaryIndexes' in properties: + params['global_indexes'] = properties['GlobalSecondaryIndexes'] + if 'ProvisionedThroughput' in properties: + params['throughput'] = properties['ProvisionedThroughput'] + if 'LocalSecondaryIndexes' in properties: + params['indexes'] = properties['LocalSecondaryIndexes'] + + table = dynamodb_backends[region_name].create_table(name=properties['TableName'], **params) + return table + def _generate_arn(self, name): return 'arn:aws:dynamodb:us-east-1:123456789011:table/' + name diff --git a/tests/test_cloudformation/test_cloudformation_stack_integration.py b/tests/test_cloudformation/test_cloudformation_stack_integration.py index 449fde4ce..87fbdb0e6 100644 --- a/tests/test_cloudformation/test_cloudformation_stack_integration.py +++ b/tests/test_cloudformation/test_cloudformation_stack_integration.py @@ -2,6 +2,8 @@ from __future__ import unicode_literals import json import base64 +from decimal import Decimal + import boto import boto.cloudformation import boto.datapipeline @@ -22,6 +24,7 @@ from moto import ( mock_cloudformation, mock_cloudformation_deprecated, mock_datapipeline_deprecated, + mock_dynamodb2, mock_ec2, mock_ec2_deprecated, mock_elb, @@ -39,6 +42,7 @@ from moto import ( mock_sqs, mock_sqs_deprecated, mock_elbv2) +from moto.dynamodb2.models import Table from .fixtures import ( ec2_classic_eip, @@ -2433,3 +2437,131 @@ def test_stack_elbv2_resources_integration(): dns['OutputValue'].should.equal(load_balancers[0]['DNSName']) name['OutputValue'].should.equal(load_balancers[0]['LoadBalancerName']) + + +@mock_dynamodb2 +@mock_cloudformation +def test_stack_dynamodb_resources_integration(): + dynamodb_template = { + "AWSTemplateFormatVersion": "2010-09-09", + "Resources": { + "myDynamoDBTable": { + "Type": "AWS::DynamoDB::Table", + "Properties": { + "AttributeDefinitions": [ + { + "AttributeName": "Album", + "AttributeType": "S" + }, + { + "AttributeName": "Artist", + "AttributeType": "S" + }, + { + "AttributeName": "Sales", + "AttributeType": "N" + }, + { + "AttributeName": "NumberOfSongs", + "AttributeType": "N" + } + ], + "KeySchema": [ + { + "AttributeName": "Album", + "KeyType": "HASH" + }, + { + "AttributeName": "Artist", + "KeyType": "RANGE" + } + ], + "ProvisionedThroughput": { + "ReadCapacityUnits": "5", + "WriteCapacityUnits": "5" + }, + "TableName": "myTableName", + "GlobalSecondaryIndexes": [{ + "IndexName": "myGSI", + "KeySchema": [ + { + "AttributeName": "Sales", + "KeyType": "HASH" + }, + { + "AttributeName": "Artist", + "KeyType": "RANGE" + } + ], + "Projection": { + "NonKeyAttributes": ["Album","NumberOfSongs"], + "ProjectionType": "INCLUDE" + }, + "ProvisionedThroughput": { + "ReadCapacityUnits": "5", + "WriteCapacityUnits": "5" + } + }, + { + "IndexName": "myGSI2", + "KeySchema": [ + { + "AttributeName": "NumberOfSongs", + "KeyType": "HASH" + }, + { + "AttributeName": "Sales", + "KeyType": "RANGE" + } + ], + "Projection": { + "NonKeyAttributes": ["Album","Artist"], + "ProjectionType": "INCLUDE" + }, + "ProvisionedThroughput": { + "ReadCapacityUnits": "5", + "WriteCapacityUnits": "5" + } + }], + "LocalSecondaryIndexes":[{ + "IndexName": "myLSI", + "KeySchema": [ + { + "AttributeName": "Album", + "KeyType": "HASH" + }, + { + "AttributeName": "Sales", + "KeyType": "RANGE" + } + ], + "Projection": { + "NonKeyAttributes": ["Artist","NumberOfSongs"], + "ProjectionType": "INCLUDE" + } + }] + } + } + } + } + + dynamodb_template_json = json.dumps(dynamodb_template) + + cfn_conn = boto3.client('cloudformation', 'us-east-1') + cfn_conn.create_stack( + StackName='dynamodb_stack', + TemplateBody=dynamodb_template_json, + ) + + dynamodb_conn = boto3.resource('dynamodb', region_name='us-east-1') + table = dynamodb_conn.Table('myTableName') + table.name.should.equal('myTableName') + + table.put_item(Item={"Album": "myAlbum", "Artist": "myArtist", "Sales": 10, "NumberOfSongs": 5}) + + response = table.get_item(Key={"Album": "myAlbum", "Artist": "myArtist"}) + + response['Item']['Album'].should.equal('myAlbum') + response['Item']['Sales'].should.equal(Decimal('10')) + response['Item']['NumberOfSongs'].should.equal(Decimal('5')) + response['Item']['Album'].should.equal('myAlbum') From a3f6d2c110e5bc1117191ca364a0d1f4b28cbb4b Mon Sep 17 00:00:00 2001 From: Ashley Gould Date: Sat, 25 May 2019 02:20:19 -0700 Subject: [PATCH 24/41] [Resolves #2196] - endpoints for querying organizations SC policies (#2197) adding support for organizations service control policies * [Resolves #2196] - endpoints for querying organizations SC policies I have added the following mock endpoints to the Organizations service: - create_policy - list_policies - describe_policy - attach_policy - list_policies_for_target - list_targets_for_policy --- IMPLEMENTATION_COVERAGE.md | 14 +- moto/organizations/models.py | 147 ++++++++++ moto/organizations/responses.py | 30 ++ moto/organizations/utils.py | 17 ++ .../organizations_test_utils.py | 54 ++-- .../test_organizations_boto3.py | 272 ++++++++++++++++++ 6 files changed, 508 insertions(+), 26 deletions(-) diff --git a/IMPLEMENTATION_COVERAGE.md b/IMPLEMENTATION_COVERAGE.md index 3cd4d1fec..5190ab09b 100644 --- a/IMPLEMENTATION_COVERAGE.md +++ b/IMPLEMENTATION_COVERAGE.md @@ -3098,14 +3098,14 @@ - [ ] update_server - [ ] update_server_engine_attributes -## organizations - 30% implemented +## organizations - 47% implemented - [ ] accept_handshake -- [ ] attach_policy +- [X] attach_policy - [ ] cancel_handshake - [X] create_account - [X] create_organization - [X] create_organizational_unit -- [ ] create_policy +- [X] create_policy - [ ] decline_handshake - [ ] delete_organization - [ ] delete_organizational_unit @@ -3115,7 +3115,7 @@ - [ ] describe_handshake - [X] describe_organization - [X] describe_organizational_unit -- [ ] describe_policy +- [X] describe_policy - [ ] detach_policy - [ ] disable_aws_service_access - [ ] disable_policy_type @@ -3133,10 +3133,10 @@ - [ ] list_handshakes_for_organization - [X] list_organizational_units_for_parent - [X] list_parents -- [ ] list_policies -- [ ] list_policies_for_target +- [X] list_policies +- [X] list_policies_for_target - [X] list_roots -- [ ] list_targets_for_policy +- [X] list_targets_for_policy - [X] move_account - [ ] remove_account_from_organization - [ ] update_organizational_unit diff --git a/moto/organizations/models.py b/moto/organizations/models.py index 9d5fe3886..91004b9ba 100644 --- a/moto/organizations/models.py +++ b/moto/organizations/models.py @@ -47,6 +47,7 @@ class FakeOrganization(BaseModel): class FakeAccount(BaseModel): def __init__(self, organization, **kwargs): + self.type = 'ACCOUNT' self.organization_id = organization.id self.master_account_id = organization.master_account_id self.create_account_status_id = utils.make_random_create_account_status_id() @@ -57,6 +58,7 @@ class FakeAccount(BaseModel): self.status = 'ACTIVE' self.joined_method = 'CREATED' self.parent_id = organization.root_id + self.attached_policies = [] @property def arn(self): @@ -103,6 +105,7 @@ class FakeOrganizationalUnit(BaseModel): self.name = kwargs.get('Name') self.parent_id = kwargs.get('ParentId') self._arn_format = utils.OU_ARN_FORMAT + self.attached_policies = [] @property def arn(self): @@ -134,6 +137,7 @@ class FakeRoot(FakeOrganizationalUnit): 'Status': 'ENABLED' }] self._arn_format = utils.ROOT_ARN_FORMAT + self.attached_policies = [] def describe(self): return { @@ -144,12 +148,52 @@ class FakeRoot(FakeOrganizationalUnit): } +class FakeServiceControlPolicy(BaseModel): + + def __init__(self, organization, **kwargs): + self.type = 'POLICY' + self.content = kwargs.get('Content') + self.description = kwargs.get('Description') + self.name = kwargs.get('Name') + self.type = kwargs.get('Type') + self.id = utils.make_random_service_control_policy_id() + self.aws_managed = False + self.organization_id = organization.id + self.master_account_id = organization.master_account_id + self._arn_format = utils.SCP_ARN_FORMAT + self.attachments = [] + + @property + def arn(self): + return self._arn_format.format( + self.master_account_id, + self.organization_id, + self.id + ) + + def describe(self): + return { + 'Policy': { + 'PolicySummary': { + 'Id': self.id, + 'Arn': self.arn, + 'Name': self.name, + 'Description': self.description, + 'Type': self.type, + 'AwsManaged': self.aws_managed, + }, + 'Content': self.content + } + } + + class OrganizationsBackend(BaseBackend): def __init__(self): self.org = None self.accounts = [] self.ou = [] + self.policies = [] def create_organization(self, **kwargs): self.org = FakeOrganization(kwargs['FeatureSet']) @@ -292,5 +336,108 @@ class OrganizationsBackend(BaseBackend): ] ) + def create_policy(self, **kwargs): + new_policy = FakeServiceControlPolicy(self.org, **kwargs) + self.policies.append(new_policy) + return new_policy.describe() + + def describe_policy(self, **kwargs): + if re.compile(utils.SCP_ID_REGEX).match(kwargs['PolicyId']): + policy = next((p for p in self.policies if p.id == kwargs['PolicyId']), None) + if policy is None: + raise RESTError( + 'PolicyNotFoundException', + "You specified a policy that doesn't exist." + ) + else: + raise RESTError( + 'InvalidInputException', + 'You specified an invalid value.' + ) + return policy.describe() + + def attach_policy(self, **kwargs): + policy = next((p for p in self.policies if p.id == kwargs['PolicyId']), None) + if (re.compile(utils.ROOT_ID_REGEX).match(kwargs['TargetId']) or + re.compile(utils.OU_ID_REGEX).match(kwargs['TargetId'])): + ou = next((ou for ou in self.ou if ou.id == kwargs['TargetId']), None) + if ou is not None: + if ou not in ou.attached_policies: + ou.attached_policies.append(policy) + policy.attachments.append(ou) + else: + raise RESTError( + 'OrganizationalUnitNotFoundException', + "You specified an organizational unit that doesn't exist." + ) + elif re.compile(utils.ACCOUNT_ID_REGEX).match(kwargs['TargetId']): + account = next((a for a in self.accounts if a.id == kwargs['TargetId']), None) + if account is not None: + if account not in account.attached_policies: + account.attached_policies.append(policy) + policy.attachments.append(account) + else: + raise RESTError( + 'AccountNotFoundException', + "You specified an account that doesn't exist." + ) + else: + raise RESTError( + 'InvalidInputException', + 'You specified an invalid value.' + ) + + def list_policies(self, **kwargs): + return dict(Policies=[ + p.describe()['Policy']['PolicySummary'] for p in self.policies + ]) + + def list_policies_for_target(self, **kwargs): + if re.compile(utils.OU_ID_REGEX).match(kwargs['TargetId']): + obj = next((ou for ou in self.ou if ou.id == kwargs['TargetId']), None) + if obj is None: + raise RESTError( + 'OrganizationalUnitNotFoundException', + "You specified an organizational unit that doesn't exist." + ) + elif re.compile(utils.ACCOUNT_ID_REGEX).match(kwargs['TargetId']): + obj = next((a for a in self.accounts if a.id == kwargs['TargetId']), None) + if obj is None: + raise RESTError( + 'AccountNotFoundException', + "You specified an account that doesn't exist." + ) + else: + raise RESTError( + 'InvalidInputException', + 'You specified an invalid value.' + ) + return dict(Policies=[ + p.describe()['Policy']['PolicySummary'] for p in obj.attached_policies + ]) + + def list_targets_for_policy(self, **kwargs): + if re.compile(utils.SCP_ID_REGEX).match(kwargs['PolicyId']): + policy = next((p for p in self.policies if p.id == kwargs['PolicyId']), None) + if policy is None: + raise RESTError( + 'PolicyNotFoundException', + "You specified a policy that doesn't exist." + ) + else: + raise RESTError( + 'InvalidInputException', + 'You specified an invalid value.' + ) + objects = [ + { + 'TargetId': obj.id, + 'Arn': obj.arn, + 'Name': obj.name, + 'Type': obj.type, + } for obj in policy.attachments + ] + return dict(Targets=objects) + organizations_backend = OrganizationsBackend() diff --git a/moto/organizations/responses.py b/moto/organizations/responses.py index 966c3fbf3..814f30bad 100644 --- a/moto/organizations/responses.py +++ b/moto/organizations/responses.py @@ -85,3 +85,33 @@ class OrganizationsResponse(BaseResponse): return json.dumps( self.organizations_backend.list_children(**self.request_params) ) + + def create_policy(self): + return json.dumps( + self.organizations_backend.create_policy(**self.request_params) + ) + + def describe_policy(self): + return json.dumps( + self.organizations_backend.describe_policy(**self.request_params) + ) + + def attach_policy(self): + return json.dumps( + self.organizations_backend.attach_policy(**self.request_params) + ) + + def list_policies(self): + return json.dumps( + self.organizations_backend.list_policies(**self.request_params) + ) + + def list_policies_for_target(self): + return json.dumps( + self.organizations_backend.list_policies_for_target(**self.request_params) + ) + + def list_targets_for_policy(self): + return json.dumps( + self.organizations_backend.list_targets_for_policy(**self.request_params) + ) diff --git a/moto/organizations/utils.py b/moto/organizations/utils.py index 007afa6ed..bde3660d2 100644 --- a/moto/organizations/utils.py +++ b/moto/organizations/utils.py @@ -10,6 +10,7 @@ MASTER_ACCOUNT_ARN_FORMAT = 'arn:aws:organizations::{0}:account/{1}/{0}' ACCOUNT_ARN_FORMAT = 'arn:aws:organizations::{0}:account/{1}/{2}' ROOT_ARN_FORMAT = 'arn:aws:organizations::{0}:root/{1}/{2}' OU_ARN_FORMAT = 'arn:aws:organizations::{0}:ou/{1}/{2}' +SCP_ARN_FORMAT = 'arn:aws:organizations::{0}:policy/{1}/service_control_policy/{2}' CHARSET = string.ascii_lowercase + string.digits ORG_ID_SIZE = 10 @@ -17,6 +18,15 @@ ROOT_ID_SIZE = 4 ACCOUNT_ID_SIZE = 12 OU_ID_SUFFIX_SIZE = 8 CREATE_ACCOUNT_STATUS_ID_SIZE = 8 +SCP_ID_SIZE = 8 + +EMAIL_REGEX = "^.+@[a-zA-Z0-9-.]+.[a-zA-Z]{2,3}|[0-9]{1,3}$" +ORG_ID_REGEX = r'o-[a-z0-9]{%s}' % ORG_ID_SIZE +ROOT_ID_REGEX = r'r-[a-z0-9]{%s}' % ROOT_ID_SIZE +OU_ID_REGEX = r'ou-[a-z0-9]{%s}-[a-z0-9]{%s}' % (ROOT_ID_SIZE, OU_ID_SUFFIX_SIZE) +ACCOUNT_ID_REGEX = r'[0-9]{%s}' % ACCOUNT_ID_SIZE +CREATE_ACCOUNT_STATUS_ID_REGEX = r'car-[a-z0-9]{%s}' % CREATE_ACCOUNT_STATUS_ID_SIZE +SCP_ID_REGEX = r'p-[a-z0-9]{%s}' % SCP_ID_SIZE def make_random_org_id(): @@ -57,3 +67,10 @@ def make_random_create_account_status_id(): # "car-" followed by from 8 to 32 lower-case letters or digits. # e.g. 'car-35gxzwrp' return 'car-' + ''.join(random.choice(CHARSET) for x in range(CREATE_ACCOUNT_STATUS_ID_SIZE)) + + +def make_random_service_control_policy_id(): + # The regex pattern for a policy ID string requires "p-" followed by + # from 8 to 128 lower-case letters or digits. + # e.g. 'p-k2av4a8a' + return 'p-' + ''.join(random.choice(CHARSET) for x in range(SCP_ID_SIZE)) diff --git a/tests/test_organizations/organizations_test_utils.py b/tests/test_organizations/organizations_test_utils.py index 6548b1830..36933d41a 100644 --- a/tests/test_organizations/organizations_test_utils.py +++ b/tests/test_organizations/organizations_test_utils.py @@ -5,38 +5,36 @@ import sure # noqa import datetime from moto.organizations import utils -EMAIL_REGEX = "^.+@[a-zA-Z0-9-.]+.[a-zA-Z]{2,3}|[0-9]{1,3}$" -ORG_ID_REGEX = r'o-[a-z0-9]{%s}' % utils.ORG_ID_SIZE -ROOT_ID_REGEX = r'r-[a-z0-9]{%s}' % utils.ROOT_ID_SIZE -OU_ID_REGEX = r'ou-[a-z0-9]{%s}-[a-z0-9]{%s}' % (utils.ROOT_ID_SIZE, utils.OU_ID_SUFFIX_SIZE) -ACCOUNT_ID_REGEX = r'[0-9]{%s}' % utils.ACCOUNT_ID_SIZE -CREATE_ACCOUNT_STATUS_ID_REGEX = r'car-[a-z0-9]{%s}' % utils.CREATE_ACCOUNT_STATUS_ID_SIZE - def test_make_random_org_id(): org_id = utils.make_random_org_id() - org_id.should.match(ORG_ID_REGEX) + org_id.should.match(utils.ORG_ID_REGEX) def test_make_random_root_id(): root_id = utils.make_random_root_id() - root_id.should.match(ROOT_ID_REGEX) + root_id.should.match(utils.ROOT_ID_REGEX) def test_make_random_ou_id(): root_id = utils.make_random_root_id() ou_id = utils.make_random_ou_id(root_id) - ou_id.should.match(OU_ID_REGEX) + ou_id.should.match(utils.OU_ID_REGEX) def test_make_random_account_id(): account_id = utils.make_random_account_id() - account_id.should.match(ACCOUNT_ID_REGEX) + account_id.should.match(utils.ACCOUNT_ID_REGEX) def test_make_random_create_account_status_id(): create_account_status_id = utils.make_random_create_account_status_id() - create_account_status_id.should.match(CREATE_ACCOUNT_STATUS_ID_REGEX) + create_account_status_id.should.match(utils.CREATE_ACCOUNT_STATUS_ID_REGEX) + + +def test_make_random_service_control_policy_id(): + service_control_policy_id = utils.make_random_service_control_policy_id() + service_control_policy_id.should.match(utils.SCP_ID_REGEX) def validate_organization(response): @@ -50,7 +48,7 @@ def validate_organization(response): 'MasterAccountEmail', 'MasterAccountId', ]) - org['Id'].should.match(ORG_ID_REGEX) + org['Id'].should.match(utils.ORG_ID_REGEX) org['MasterAccountId'].should.equal(utils.MASTER_ACCOUNT_ID) org['MasterAccountArn'].should.equal(utils.MASTER_ACCOUNT_ARN_FORMAT.format( org['MasterAccountId'], @@ -72,7 +70,7 @@ def validate_roots(org, response): response.should.have.key('Roots').should.be.a(list) response['Roots'].should_not.be.empty root = response['Roots'][0] - root.should.have.key('Id').should.match(ROOT_ID_REGEX) + root.should.have.key('Id').should.match(utils.ROOT_ID_REGEX) root.should.have.key('Arn').should.equal(utils.ROOT_ARN_FORMAT.format( org['MasterAccountId'], org['Id'], @@ -87,7 +85,7 @@ def validate_roots(org, response): def validate_organizational_unit(org, response): response.should.have.key('OrganizationalUnit').should.be.a(dict) ou = response['OrganizationalUnit'] - ou.should.have.key('Id').should.match(OU_ID_REGEX) + ou.should.have.key('Id').should.match(utils.OU_ID_REGEX) ou.should.have.key('Arn').should.equal(utils.OU_ARN_FORMAT.format( org['MasterAccountId'], org['Id'], @@ -106,13 +104,13 @@ def validate_account(org, account): 'Name', 'Status', ]) - account['Id'].should.match(ACCOUNT_ID_REGEX) + account['Id'].should.match(utils.ACCOUNT_ID_REGEX) account['Arn'].should.equal(utils.ACCOUNT_ARN_FORMAT.format( org['MasterAccountId'], org['Id'], account['Id'], )) - account['Email'].should.match(EMAIL_REGEX) + account['Email'].should.match(utils.EMAIL_REGEX) account['JoinedMethod'].should.be.within(['INVITED', 'CREATED']) account['Status'].should.be.within(['ACTIVE', 'SUSPENDED']) account['Name'].should.be.a(six.string_types) @@ -128,9 +126,27 @@ def validate_create_account_status(create_status): 'RequestedTimestamp', 'State', ]) - create_status['Id'].should.match(CREATE_ACCOUNT_STATUS_ID_REGEX) - create_status['AccountId'].should.match(ACCOUNT_ID_REGEX) + create_status['Id'].should.match(utils.CREATE_ACCOUNT_STATUS_ID_REGEX) + create_status['AccountId'].should.match(utils.ACCOUNT_ID_REGEX) create_status['AccountName'].should.be.a(six.string_types) create_status['State'].should.equal('SUCCEEDED') create_status['RequestedTimestamp'].should.be.a(datetime.datetime) create_status['CompletedTimestamp'].should.be.a(datetime.datetime) + +def validate_policy_summary(org, summary): + summary.should.be.a(dict) + summary.should.have.key('Id').should.match(utils.SCP_ID_REGEX) + summary.should.have.key('Arn').should.equal(utils.SCP_ARN_FORMAT.format( + org['MasterAccountId'], + org['Id'], + summary['Id'], + )) + summary.should.have.key('Name').should.be.a(six.string_types) + summary.should.have.key('Description').should.be.a(six.string_types) + summary.should.have.key('Type').should.equal('SERVICE_CONTROL_POLICY') + summary.should.have.key('AwsManaged').should.be.a(bool) + +def validate_service_control_policy(org, response): + response.should.have.key('PolicySummary').should.be.a(dict) + response.should.have.key('Content').should.be.a(six.string_types) + validate_policy_summary(org, response['PolicySummary']) diff --git a/tests/test_organizations/test_organizations_boto3.py b/tests/test_organizations/test_organizations_boto3.py index dfac5feeb..05f831e62 100644 --- a/tests/test_organizations/test_organizations_boto3.py +++ b/tests/test_organizations/test_organizations_boto3.py @@ -1,6 +1,8 @@ from __future__ import unicode_literals import boto3 +import json +import six import sure # noqa from botocore.exceptions import ClientError from nose.tools import assert_raises @@ -13,6 +15,8 @@ from .organizations_test_utils import ( validate_organizational_unit, validate_account, validate_create_account_status, + validate_service_control_policy, + validate_policy_summary, ) @@ -320,3 +324,271 @@ def test_list_children_exception(): ex.operation_name.should.equal('ListChildren') ex.response['Error']['Code'].should.equal('400') ex.response['Error']['Message'].should.contain('InvalidInputException') + + +# Service Control Policies +policy_doc01 = dict( + Version='2012-10-17', + Statement=[dict( + Sid='MockPolicyStatement', + Effect='Allow', + Action='s3:*', + Resource='*', + )] +) + +@mock_organizations +def test_create_policy(): + client = boto3.client('organizations', region_name='us-east-1') + org = client.create_organization(FeatureSet='ALL')['Organization'] + policy = client.create_policy( + Content=json.dumps(policy_doc01), + Description='A dummy service control policy', + Name='MockServiceControlPolicy', + Type='SERVICE_CONTROL_POLICY' + )['Policy'] + validate_service_control_policy(org, policy) + policy['PolicySummary']['Name'].should.equal('MockServiceControlPolicy') + policy['PolicySummary']['Description'].should.equal('A dummy service control policy') + policy['Content'].should.equal(json.dumps(policy_doc01)) + + +@mock_organizations +def test_describe_policy(): + client = boto3.client('organizations', region_name='us-east-1') + org = client.create_organization(FeatureSet='ALL')['Organization'] + policy_id = client.create_policy( + Content=json.dumps(policy_doc01), + Description='A dummy service control policy', + Name='MockServiceControlPolicy', + Type='SERVICE_CONTROL_POLICY' + )['Policy']['PolicySummary']['Id'] + policy = client.describe_policy(PolicyId=policy_id)['Policy'] + validate_service_control_policy(org, policy) + policy['PolicySummary']['Name'].should.equal('MockServiceControlPolicy') + policy['PolicySummary']['Description'].should.equal('A dummy service control policy') + policy['Content'].should.equal(json.dumps(policy_doc01)) + + +@mock_organizations +def test_describe_policy_exception(): + client = boto3.client('organizations', region_name='us-east-1') + client.create_organization(FeatureSet='ALL')['Organization'] + policy_id = 'p-47fhe9s3' + with assert_raises(ClientError) as e: + response = client.describe_policy(PolicyId=policy_id) + ex = e.exception + ex.operation_name.should.equal('DescribePolicy') + ex.response['Error']['Code'].should.equal('400') + ex.response['Error']['Message'].should.contain('PolicyNotFoundException') + with assert_raises(ClientError) as e: + response = client.describe_policy(PolicyId='meaninglessstring') + ex = e.exception + ex.operation_name.should.equal('DescribePolicy') + ex.response['Error']['Code'].should.equal('400') + ex.response['Error']['Message'].should.contain('InvalidInputException') + + +@mock_organizations +def test_attach_policy(): + client = boto3.client('organizations', region_name='us-east-1') + org = client.create_organization(FeatureSet='ALL')['Organization'] + root_id = client.list_roots()['Roots'][0]['Id'] + ou_id = client.create_organizational_unit( + ParentId=root_id, + Name='ou01', + )['OrganizationalUnit']['Id'] + account_id = client.create_account( + AccountName=mockname, + Email=mockemail, + )['CreateAccountStatus']['AccountId'] + policy_id = client.create_policy( + Content=json.dumps(policy_doc01), + Description='A dummy service control policy', + Name='MockServiceControlPolicy', + Type='SERVICE_CONTROL_POLICY' + )['Policy']['PolicySummary']['Id'] + response = client.attach_policy(PolicyId=policy_id, TargetId=root_id) + response['ResponseMetadata']['HTTPStatusCode'].should.equal(200) + response = client.attach_policy(PolicyId=policy_id, TargetId=ou_id) + response['ResponseMetadata']['HTTPStatusCode'].should.equal(200) + response = client.attach_policy(PolicyId=policy_id, TargetId=account_id) + response['ResponseMetadata']['HTTPStatusCode'].should.equal(200) + + +@mock_organizations +def test_attach_policy_exception(): + client = boto3.client('organizations', region_name='us-east-1') + client.create_organization(FeatureSet='ALL')['Organization'] + root_id='r-dj873' + ou_id='ou-gi99-i7r8eh2i2' + account_id='126644886543' + policy_id = client.create_policy( + Content=json.dumps(policy_doc01), + Description='A dummy service control policy', + Name='MockServiceControlPolicy', + Type='SERVICE_CONTROL_POLICY' + )['Policy']['PolicySummary']['Id'] + with assert_raises(ClientError) as e: + response = client.attach_policy(PolicyId=policy_id, TargetId=root_id) + ex = e.exception + ex.operation_name.should.equal('AttachPolicy') + ex.response['Error']['Code'].should.equal('400') + ex.response['Error']['Message'].should.contain('OrganizationalUnitNotFoundException') + with assert_raises(ClientError) as e: + response = client.attach_policy(PolicyId=policy_id, TargetId=ou_id) + ex = e.exception + ex.operation_name.should.equal('AttachPolicy') + ex.response['Error']['Code'].should.equal('400') + ex.response['Error']['Message'].should.contain('OrganizationalUnitNotFoundException') + with assert_raises(ClientError) as e: + response = client.attach_policy(PolicyId=policy_id, TargetId=account_id) + ex = e.exception + ex.operation_name.should.equal('AttachPolicy') + ex.response['Error']['Code'].should.equal('400') + ex.response['Error']['Message'].should.contain('AccountNotFoundException') + with assert_raises(ClientError) as e: + response = client.attach_policy(PolicyId=policy_id, TargetId='meaninglessstring') + ex = e.exception + ex.operation_name.should.equal('AttachPolicy') + ex.response['Error']['Code'].should.equal('400') + ex.response['Error']['Message'].should.contain('InvalidInputException') + + +@mock_organizations +def test_list_polices(): + client = boto3.client('organizations', region_name='us-east-1') + org = client.create_organization(FeatureSet='ALL')['Organization'] + for i in range(0,4): + client.create_policy( + Content=json.dumps(policy_doc01), + Description='A dummy service control policy', + Name='MockServiceControlPolicy' + str(i), + Type='SERVICE_CONTROL_POLICY' + ) + response = client.list_policies(Filter='SERVICE_CONTROL_POLICY') + for policy in response['Policies']: + validate_policy_summary(org, policy) + + +@mock_organizations +def test_list_policies_for_target(): + client = boto3.client('organizations', region_name='us-east-1') + org = client.create_organization(FeatureSet='ALL')['Organization'] + root_id = client.list_roots()['Roots'][0]['Id'] + ou_id = client.create_organizational_unit( + ParentId=root_id, + Name='ou01', + )['OrganizationalUnit']['Id'] + account_id = client.create_account( + AccountName=mockname, + Email=mockemail, + )['CreateAccountStatus']['AccountId'] + policy_id = client.create_policy( + Content=json.dumps(policy_doc01), + Description='A dummy service control policy', + Name='MockServiceControlPolicy', + Type='SERVICE_CONTROL_POLICY' + )['Policy']['PolicySummary']['Id'] + client.attach_policy(PolicyId=policy_id, TargetId=ou_id) + response = client.list_policies_for_target( + TargetId=ou_id, + Filter='SERVICE_CONTROL_POLICY', + ) + for policy in response['Policies']: + validate_policy_summary(org, policy) + client.attach_policy(PolicyId=policy_id, TargetId=account_id) + response = client.list_policies_for_target( + TargetId=account_id, + Filter='SERVICE_CONTROL_POLICY', + ) + for policy in response['Policies']: + validate_policy_summary(org, policy) + + +@mock_organizations +def test_list_policies_for_target_exception(): + client = boto3.client('organizations', region_name='us-east-1') + client.create_organization(FeatureSet='ALL')['Organization'] + ou_id='ou-gi99-i7r8eh2i2' + account_id='126644886543' + with assert_raises(ClientError) as e: + response = client.list_policies_for_target( + TargetId=ou_id, + Filter='SERVICE_CONTROL_POLICY', + ) + ex = e.exception + ex.operation_name.should.equal('ListPoliciesForTarget') + ex.response['Error']['Code'].should.equal('400') + ex.response['Error']['Message'].should.contain('OrganizationalUnitNotFoundException') + with assert_raises(ClientError) as e: + response = client.list_policies_for_target( + TargetId=account_id, + Filter='SERVICE_CONTROL_POLICY', + ) + ex = e.exception + ex.operation_name.should.equal('ListPoliciesForTarget') + ex.response['Error']['Code'].should.equal('400') + ex.response['Error']['Message'].should.contain('AccountNotFoundException') + with assert_raises(ClientError) as e: + response = client.list_policies_for_target( + TargetId='meaninglessstring', + Filter='SERVICE_CONTROL_POLICY', + ) + ex = e.exception + ex.operation_name.should.equal('ListPoliciesForTarget') + ex.response['Error']['Code'].should.equal('400') + ex.response['Error']['Message'].should.contain('InvalidInputException') + + +@mock_organizations +def test_list_targets_for_policy(): + client = boto3.client('organizations', region_name='us-east-1') + org = client.create_organization(FeatureSet='ALL')['Organization'] + root_id = client.list_roots()['Roots'][0]['Id'] + ou_id = client.create_organizational_unit( + ParentId=root_id, + Name='ou01', + )['OrganizationalUnit']['Id'] + account_id = client.create_account( + AccountName=mockname, + Email=mockemail, + )['CreateAccountStatus']['AccountId'] + policy_id = client.create_policy( + Content=json.dumps(policy_doc01), + Description='A dummy service control policy', + Name='MockServiceControlPolicy', + Type='SERVICE_CONTROL_POLICY' + )['Policy']['PolicySummary']['Id'] + client.attach_policy(PolicyId=policy_id, TargetId=root_id) + client.attach_policy(PolicyId=policy_id, TargetId=ou_id) + client.attach_policy(PolicyId=policy_id, TargetId=account_id) + response = client.list_targets_for_policy(PolicyId=policy_id) + for target in response['Targets']: + target.should.be.a(dict) + target.should.have.key('Name').should.be.a(six.string_types) + target.should.have.key('Arn').should.be.a(six.string_types) + target.should.have.key('TargetId').should.be.a(six.string_types) + target.should.have.key('Type').should.be.within( + ['ROOT', 'ORGANIZATIONAL_UNIT', 'ACCOUNT'] + ) + + +@mock_organizations +def test_list_targets_for_policy_exception(): + client = boto3.client('organizations', region_name='us-east-1') + client.create_organization(FeatureSet='ALL')['Organization'] + policy_id = 'p-47fhe9s3' + with assert_raises(ClientError) as e: + response = client.list_targets_for_policy(PolicyId=policy_id) + ex = e.exception + ex.operation_name.should.equal('ListTargetsForPolicy') + ex.response['Error']['Code'].should.equal('400') + ex.response['Error']['Message'].should.contain('PolicyNotFoundException') + with assert_raises(ClientError) as e: + response = client.list_targets_for_policy(PolicyId='meaninglessstring') + ex = e.exception + ex.operation_name.should.equal('ListTargetsForPolicy') + ex.response['Error']['Code'].should.equal('400') + ex.response['Error']['Message'].should.contain('InvalidInputException') + From 9b12ce6809dc8015a09f4e6b51fb856a830c4295 Mon Sep 17 00:00:00 2001 From: Hans Date: Sat, 25 May 2019 17:21:57 +0800 Subject: [PATCH 25/41] Fix #1842 Create cross region VPC peering connection in both region (#2195) Add a class level store in models/VPCPeeringConnectionBackend of ec2 for saving vpc peering connection. Any instance can correctly save VPC peering connection info on both region when it create vpc peering connection. Update vpc_peering_connections in ec2/responses to meet new version: DESCRIBE_VPC_PEERING_CONNECTIONS_RESPONSE, ACCEPT_VPC_PEERING_CONNECTION_RESPONSE, Previous code only create one region VPC peering connection but doesn't create the other region VPC peering connection when create cross region VPC peering connection. Tested in real AWS environment at first and create unit test case according to real AWS environment response. Add 5 test cases VPC cross region delete case VPC cross region accept case VPC cross region accept wrong region case VPC cross region reject case VPC cross region reject wrong region case Related: #1842, #1830 --- moto/ec2/exceptions.py | 22 +++ moto/ec2/models.py | 38 ++++- moto/ec2/responses/vpc_peering_connections.py | 64 ++++--- tests/test_ec2/test_vpc_peering.py | 158 +++++++++++++++++- 4 files changed, 246 insertions(+), 36 deletions(-) diff --git a/moto/ec2/exceptions.py b/moto/ec2/exceptions.py index f747c9cd5..ec2c8542d 100644 --- a/moto/ec2/exceptions.py +++ b/moto/ec2/exceptions.py @@ -420,3 +420,25 @@ class OperationNotPermitted(EC2ClientError): "The vpc CIDR block with association ID {} may not be disassociated. " "It is the primary IPv4 CIDR block of the VPC".format(association_id) ) + + +# accept exception +class OperationNotPermitted2(EC2ClientError): + def __init__(self, client_region, pcx_id, acceptor_region): + super(OperationNotPermitted2, self).__init__( + "OperationNotPermitted", + "Incorrect region ({0}) specified for this request." + "VPC peering connection {1} must be accepted in region {2}".format(client_region, pcx_id, acceptor_region) + ) + + +# reject exception +class OperationNotPermitted3(EC2ClientError): + def __init__(self, client_region, pcx_id, acceptor_region): + super(OperationNotPermitted3, self).__init__( + "OperationNotPermitted", + "Incorrect region ({0}) specified for this request." + "VPC peering connection {1} must be accepted or rejected in region {2}".format(client_region, + pcx_id, + acceptor_region) + ) diff --git a/moto/ec2/models.py b/moto/ec2/models.py index fa07841b2..685558c35 100644 --- a/moto/ec2/models.py +++ b/moto/ec2/models.py @@ -70,6 +70,8 @@ from .exceptions import ( MissingParameterError, MotoNotImplementedError, OperationNotPermitted, + OperationNotPermitted2, + OperationNotPermitted3, ResourceAlreadyAssociatedError, RulesPerSecurityGroupLimitExceededError, TagLimitExceeded) @@ -2120,16 +2122,16 @@ class VPC(TaggedEC2Resource): class VPCBackend(object): - __refs__ = defaultdict(list) + vpc_refs = defaultdict(set) def __init__(self): self.vpcs = {} - self.__refs__[self.__class__].append(weakref.ref(self)) + self.vpc_refs[self.__class__].add(weakref.ref(self)) super(VPCBackend, self).__init__() @classmethod - def get_instances(cls): - for inst_ref in cls.__refs__[cls]: + def get_vpc_refs(cls): + for inst_ref in cls.vpc_refs[cls]: inst = inst_ref() if inst is not None: yield inst @@ -2159,7 +2161,7 @@ class VPCBackend(object): # get vpc by vpc id and aws region def get_cross_vpc(self, vpc_id, peer_region): - for vpcs in self.get_instances(): + for vpcs in self.get_vpc_refs(): if vpcs.region_name == peer_region: match_vpc = vpcs.get_vpc(vpc_id) return match_vpc @@ -2280,15 +2282,31 @@ class VPCPeeringConnection(TaggedEC2Resource): class VPCPeeringConnectionBackend(object): + # for cross region vpc reference + vpc_pcx_refs = defaultdict(set) + def __init__(self): self.vpc_pcxs = {} + self.vpc_pcx_refs[self.__class__].add(weakref.ref(self)) super(VPCPeeringConnectionBackend, self).__init__() + @classmethod + def get_vpc_pcx_refs(cls): + for inst_ref in cls.vpc_pcx_refs[cls]: + inst = inst_ref() + if inst is not None: + yield inst + def create_vpc_peering_connection(self, vpc, peer_vpc): vpc_pcx_id = random_vpc_peering_connection_id() vpc_pcx = VPCPeeringConnection(vpc_pcx_id, vpc, peer_vpc) vpc_pcx._status.pending() self.vpc_pcxs[vpc_pcx_id] = vpc_pcx + # insert cross region peering info + if vpc.ec2_backend.region_name != peer_vpc.ec2_backend.region_name: + for vpc_pcx_cx in peer_vpc.ec2_backend.get_vpc_pcx_refs(): + if vpc_pcx_cx.region_name == peer_vpc.ec2_backend.region_name: + vpc_pcx_cx.vpc_pcxs[vpc_pcx_id] = vpc_pcx return vpc_pcx def get_all_vpc_peering_connections(self): @@ -2306,6 +2324,11 @@ class VPCPeeringConnectionBackend(object): def accept_vpc_peering_connection(self, vpc_pcx_id): vpc_pcx = self.get_vpc_peering_connection(vpc_pcx_id) + # if cross region need accepter from another region + pcx_req_region = vpc_pcx.vpc.ec2_backend.region_name + pcx_acp_region = vpc_pcx.peer_vpc.ec2_backend.region_name + if pcx_req_region != pcx_acp_region and self.region_name == pcx_req_region: + raise OperationNotPermitted2(self.region_name, vpc_pcx.id, pcx_acp_region) if vpc_pcx._status.code != 'pending-acceptance': raise InvalidVPCPeeringConnectionStateTransitionError(vpc_pcx.id) vpc_pcx._status.accept() @@ -2313,6 +2336,11 @@ class VPCPeeringConnectionBackend(object): def reject_vpc_peering_connection(self, vpc_pcx_id): vpc_pcx = self.get_vpc_peering_connection(vpc_pcx_id) + # if cross region need accepter from another region + pcx_req_region = vpc_pcx.vpc.ec2_backend.region_name + pcx_acp_region = vpc_pcx.peer_vpc.ec2_backend.region_name + if pcx_req_region != pcx_acp_region and self.region_name == pcx_req_region: + raise OperationNotPermitted3(self.region_name, vpc_pcx.id, pcx_acp_region) if vpc_pcx._status.code != 'pending-acceptance': raise InvalidVPCPeeringConnectionStateTransitionError(vpc_pcx.id) vpc_pcx._status.reject() diff --git a/moto/ec2/responses/vpc_peering_connections.py b/moto/ec2/responses/vpc_peering_connections.py index 49d752893..68bae72da 100644 --- a/moto/ec2/responses/vpc_peering_connections.py +++ b/moto/ec2/responses/vpc_peering_connections.py @@ -74,30 +74,35 @@ CREATE_VPC_PEERING_CONNECTION_RESPONSE = """ """ DESCRIBE_VPC_PEERING_CONNECTIONS_RESPONSE = """ - - 7a62c49f-347e-4fc4-9331-6e8eEXAMPLE - - {% for vpc_pcx in vpc_pcxs %} - - {{ vpc_pcx.id }} - - 777788889999 - {{ vpc_pcx.vpc.id }} - {{ vpc_pcx.vpc.cidr_block }} - - - 123456789012 - {{ vpc_pcx.peer_vpc.id }} - - - {{ vpc_pcx._status.code }} - {{ vpc_pcx._status.message }} - - 2014-02-17T16:00:50.000Z - - - {% endfor %} - + +7a62c49f-347e-4fc4-9331-6e8eEXAMPLE + + {% for vpc_pcx in vpc_pcxs %} + + {{ vpc_pcx.id }} + + 777788889999 + {{ vpc_pcx.vpc.id }} + {{ vpc_pcx.vpc.cidr_block }} + + + 123456789012 + {{ vpc_pcx.peer_vpc.id }} + {{ vpc_pcx.peer_vpc.cidr_block }} + + false + true + false + + + + {{ vpc_pcx._status.code }} + {{ vpc_pcx._status.message }} + + + + {% endfor %} + """ @@ -109,19 +114,24 @@ DELETE_VPC_PEERING_CONNECTION_RESPONSE = """ """ ACCEPT_VPC_PEERING_CONNECTION_RESPONSE = """ - + 7a62c49f-347e-4fc4-9331-6e8eEXAMPLE {{ vpc_pcx.id }} - 123456789012 + 777788889999 {{ vpc_pcx.vpc.id }} {{ vpc_pcx.vpc.cidr_block }} - 777788889999 + 123456789012 {{ vpc_pcx.peer_vpc.id }} {{ vpc_pcx.peer_vpc.cidr_block }} + + false + false + false + {{ vpc_pcx._status.code }} diff --git a/tests/test_ec2/test_vpc_peering.py b/tests/test_ec2/test_vpc_peering.py index 082499a72..edfbfb3c2 100644 --- a/tests/test_ec2/test_vpc_peering.py +++ b/tests/test_ec2/test_vpc_peering.py @@ -107,14 +107,19 @@ def test_vpc_peering_connections_cross_region(): ec2_apn1 = boto3.resource('ec2', region_name='ap-northeast-1') vpc_apn1 = ec2_apn1.create_vpc(CidrBlock='10.20.0.0/16') # create peering - vpc_pcx = ec2_usw1.create_vpc_peering_connection( + vpc_pcx_usw1 = ec2_usw1.create_vpc_peering_connection( VpcId=vpc_usw1.id, PeerVpcId=vpc_apn1.id, PeerRegion='ap-northeast-1', ) - vpc_pcx.status['Code'].should.equal('initiating-request') - vpc_pcx.requester_vpc.id.should.equal(vpc_usw1.id) - vpc_pcx.accepter_vpc.id.should.equal(vpc_apn1.id) + vpc_pcx_usw1.status['Code'].should.equal('initiating-request') + vpc_pcx_usw1.requester_vpc.id.should.equal(vpc_usw1.id) + vpc_pcx_usw1.accepter_vpc.id.should.equal(vpc_apn1.id) + # test cross region vpc peering connection exist + vpc_pcx_apn1 = ec2_apn1.VpcPeeringConnection(vpc_pcx_usw1.id) + vpc_pcx_apn1.id.should.equal(vpc_pcx_usw1.id) + vpc_pcx_apn1.requester_vpc.id.should.equal(vpc_usw1.id) + vpc_pcx_apn1.accepter_vpc.id.should.equal(vpc_apn1.id) @mock_ec2 @@ -131,3 +136,148 @@ def test_vpc_peering_connections_cross_region_fail(): PeerVpcId=vpc_apn1.id, PeerRegion='ap-northeast-2') cm.exception.response['Error']['Code'].should.equal('InvalidVpcID.NotFound') + + +@mock_ec2 +def test_vpc_peering_connections_cross_region_accept(): + # create vpc in us-west-1 and ap-northeast-1 + ec2_usw1 = boto3.resource('ec2', region_name='us-west-1') + vpc_usw1 = ec2_usw1.create_vpc(CidrBlock='10.90.0.0/16') + ec2_apn1 = boto3.resource('ec2', region_name='ap-northeast-1') + vpc_apn1 = ec2_apn1.create_vpc(CidrBlock='10.20.0.0/16') + # create peering + vpc_pcx_usw1 = ec2_usw1.create_vpc_peering_connection( + VpcId=vpc_usw1.id, + PeerVpcId=vpc_apn1.id, + PeerRegion='ap-northeast-1', + ) + # accept peering from ap-northeast-1 + ec2_apn1 = boto3.client('ec2', region_name='ap-northeast-1') + ec2_usw1 = boto3.client('ec2', region_name='us-west-1') + acp_pcx_apn1 = ec2_apn1.accept_vpc_peering_connection( + VpcPeeringConnectionId=vpc_pcx_usw1.id + ) + des_pcx_apn1 = ec2_usw1.describe_vpc_peering_connections( + VpcPeeringConnectionIds=[vpc_pcx_usw1.id] + ) + des_pcx_usw1 = ec2_usw1.describe_vpc_peering_connections( + VpcPeeringConnectionIds=[vpc_pcx_usw1.id] + ) + acp_pcx_apn1['VpcPeeringConnection']['Status']['Code'].should.equal('active') + des_pcx_apn1['VpcPeeringConnections'][0]['Status']['Code'].should.equal('active') + des_pcx_usw1['VpcPeeringConnections'][0]['Status']['Code'].should.equal('active') + + +@mock_ec2 +def test_vpc_peering_connections_cross_region_reject(): + # create vpc in us-west-1 and ap-northeast-1 + ec2_usw1 = boto3.resource('ec2', region_name='us-west-1') + vpc_usw1 = ec2_usw1.create_vpc(CidrBlock='10.90.0.0/16') + ec2_apn1 = boto3.resource('ec2', region_name='ap-northeast-1') + vpc_apn1 = ec2_apn1.create_vpc(CidrBlock='10.20.0.0/16') + # create peering + vpc_pcx_usw1 = ec2_usw1.create_vpc_peering_connection( + VpcId=vpc_usw1.id, + PeerVpcId=vpc_apn1.id, + PeerRegion='ap-northeast-1', + ) + # reject peering from ap-northeast-1 + ec2_apn1 = boto3.client('ec2', region_name='ap-northeast-1') + ec2_usw1 = boto3.client('ec2', region_name='us-west-1') + rej_pcx_apn1 = ec2_apn1.reject_vpc_peering_connection( + VpcPeeringConnectionId=vpc_pcx_usw1.id + ) + des_pcx_apn1 = ec2_usw1.describe_vpc_peering_connections( + VpcPeeringConnectionIds=[vpc_pcx_usw1.id] + ) + des_pcx_usw1 = ec2_usw1.describe_vpc_peering_connections( + VpcPeeringConnectionIds=[vpc_pcx_usw1.id] + ) + rej_pcx_apn1['Return'].should.equal(True) + des_pcx_apn1['VpcPeeringConnections'][0]['Status']['Code'].should.equal('rejected') + des_pcx_usw1['VpcPeeringConnections'][0]['Status']['Code'].should.equal('rejected') + + +@mock_ec2 +def test_vpc_peering_connections_cross_region_delete(): + # create vpc in us-west-1 and ap-northeast-1 + ec2_usw1 = boto3.resource('ec2', region_name='us-west-1') + vpc_usw1 = ec2_usw1.create_vpc(CidrBlock='10.90.0.0/16') + ec2_apn1 = boto3.resource('ec2', region_name='ap-northeast-1') + vpc_apn1 = ec2_apn1.create_vpc(CidrBlock='10.20.0.0/16') + # create peering + vpc_pcx_usw1 = ec2_usw1.create_vpc_peering_connection( + VpcId=vpc_usw1.id, + PeerVpcId=vpc_apn1.id, + PeerRegion='ap-northeast-1', + ) + # reject peering from ap-northeast-1 + ec2_apn1 = boto3.client('ec2', region_name='ap-northeast-1') + ec2_usw1 = boto3.client('ec2', region_name='us-west-1') + del_pcx_apn1 = ec2_apn1.delete_vpc_peering_connection( + VpcPeeringConnectionId=vpc_pcx_usw1.id + ) + des_pcx_apn1 = ec2_usw1.describe_vpc_peering_connections( + VpcPeeringConnectionIds=[vpc_pcx_usw1.id] + ) + des_pcx_usw1 = ec2_usw1.describe_vpc_peering_connections( + VpcPeeringConnectionIds=[vpc_pcx_usw1.id] + ) + del_pcx_apn1['Return'].should.equal(True) + des_pcx_apn1['VpcPeeringConnections'][0]['Status']['Code'].should.equal('deleted') + des_pcx_usw1['VpcPeeringConnections'][0]['Status']['Code'].should.equal('deleted') + + +@mock_ec2 +def test_vpc_peering_connections_cross_region_accept_wrong_region(): + # create vpc in us-west-1 and ap-northeast-1 + ec2_usw1 = boto3.resource('ec2', region_name='us-west-1') + vpc_usw1 = ec2_usw1.create_vpc(CidrBlock='10.90.0.0/16') + ec2_apn1 = boto3.resource('ec2', region_name='ap-northeast-1') + vpc_apn1 = ec2_apn1.create_vpc(CidrBlock='10.20.0.0/16') + # create peering + vpc_pcx_usw1 = ec2_usw1.create_vpc_peering_connection( + VpcId=vpc_usw1.id, + PeerVpcId=vpc_apn1.id, + PeerRegion='ap-northeast-1', + ) + + # accept wrong peering from us-west-1 which will raise error + ec2_apn1 = boto3.client('ec2', region_name='ap-northeast-1') + ec2_usw1 = boto3.client('ec2', region_name='us-west-1') + with assert_raises(ClientError) as cm: + ec2_usw1.accept_vpc_peering_connection( + VpcPeeringConnectionId=vpc_pcx_usw1.id + ) + cm.exception.response['Error']['Code'].should.equal('OperationNotPermitted') + exp_msg = 'Incorrect region ({0}) specified for this request.VPC ' \ + 'peering connection {1} must be ' \ + 'accepted in region {2}'.format('us-west-1', vpc_pcx_usw1.id, 'ap-northeast-1') + cm.exception.response['Error']['Message'].should.equal(exp_msg) + + +@mock_ec2 +def test_vpc_peering_connections_cross_region_reject_wrong_region(): + # create vpc in us-west-1 and ap-northeast-1 + ec2_usw1 = boto3.resource('ec2', region_name='us-west-1') + vpc_usw1 = ec2_usw1.create_vpc(CidrBlock='10.90.0.0/16') + ec2_apn1 = boto3.resource('ec2', region_name='ap-northeast-1') + vpc_apn1 = ec2_apn1.create_vpc(CidrBlock='10.20.0.0/16') + # create peering + vpc_pcx_usw1 = ec2_usw1.create_vpc_peering_connection( + VpcId=vpc_usw1.id, + PeerVpcId=vpc_apn1.id, + PeerRegion='ap-northeast-1', + ) + # reject wrong peering from us-west-1 which will raise error + ec2_apn1 = boto3.client('ec2', region_name='ap-northeast-1') + ec2_usw1 = boto3.client('ec2', region_name='us-west-1') + with assert_raises(ClientError) as cm: + ec2_usw1.reject_vpc_peering_connection( + VpcPeeringConnectionId=vpc_pcx_usw1.id + ) + cm.exception.response['Error']['Code'].should.equal('OperationNotPermitted') + exp_msg = 'Incorrect region ({0}) specified for this request.VPC ' \ + 'peering connection {1} must be accepted or ' \ + 'rejected in region {2}'.format('us-west-1', vpc_pcx_usw1.id, 'ap-northeast-1') + cm.exception.response['Error']['Message'].should.equal(exp_msg) From 6838d7964f7e2416f311cb0bf00a8eb9aa1756bb Mon Sep 17 00:00:00 2001 From: Brian Engen Date: Sat, 25 May 2019 04:24:46 -0500 Subject: [PATCH 26/41] handles empty string in SNS next token (#2177) --- moto/sns/models.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/moto/sns/models.py b/moto/sns/models.py index 41e83aba4..c764cb25f 100644 --- a/moto/sns/models.py +++ b/moto/sns/models.py @@ -255,7 +255,7 @@ class SNSBackend(BaseBackend): return candidate_topic def _get_values_nexttoken(self, values_map, next_token=None): - if next_token is None: + if next_token is None or not next_token: next_token = 0 next_token = int(next_token) values = list(values_map.values())[ From 4a99dcddb26a012a1a6a8b71fd242bc82edd1b33 Mon Sep 17 00:00:00 2001 From: Jeffery Smith Date: Sat, 25 May 2019 05:34:59 -0400 Subject: [PATCH 27/41] Issue #2141 Adding owner-id to the filter for Snapshot (#2142) * Adding owner-id to the filter for Snapshot --- moto/ec2/models.py | 2 ++ tests/test_ec2/test_elastic_block_store.py | 5 +++++ 2 files changed, 7 insertions(+) diff --git a/moto/ec2/models.py b/moto/ec2/models.py index 685558c35..d57b9f01b 100644 --- a/moto/ec2/models.py +++ b/moto/ec2/models.py @@ -1881,6 +1881,8 @@ class Snapshot(TaggedEC2Resource): return str(self.encrypted).lower() elif filter_name == 'status': return self.status + elif filter_name == 'owner-id': + return self.owner_id else: return super(Snapshot, self).get_filter_value( filter_name, 'DescribeSnapshots') diff --git a/tests/test_ec2/test_elastic_block_store.py b/tests/test_ec2/test_elastic_block_store.py index 8f4a00b13..728be5d1f 100644 --- a/tests/test_ec2/test_elastic_block_store.py +++ b/tests/test_ec2/test_elastic_block_store.py @@ -394,6 +394,11 @@ def test_snapshot_filters(): set([snap.id for snap in snapshots_by_encrypted] ).should.equal({snapshot3.id}) + snapshots_by_owner_id = conn.get_all_snapshots( + filters={'owner-id': '123456789012'}) + set([snap.id for snap in snapshots_by_owner_id] + ).should.equal({snapshot1.id, snapshot2.id, snapshot3.id}) + @mock_ec2_deprecated def test_snapshot_attribute(): From 238d1c7c398d0879e0409ea49b940a4a77b8b01c Mon Sep 17 00:00:00 2001 From: Don Kuntz Date: Sat, 25 May 2019 04:58:41 -0500 Subject: [PATCH 28/41] Add glue.delete_table endpoint, for allowing tables to be deleted (#2112) * Add glue.delete_table endpoint, for allowing tables to be deleted * remove extra whitespace --- moto/glue/models.py | 8 ++++++++ moto/glue/responses.py | 6 ++++++ tests/test_glue/test_datacatalog.py | 21 +++++++++++++++++++++ 3 files changed, 35 insertions(+) diff --git a/moto/glue/models.py b/moto/glue/models.py index bcf2ec4bf..407c3a020 100644 --- a/moto/glue/models.py +++ b/moto/glue/models.py @@ -56,6 +56,14 @@ class GlueBackend(BaseBackend): database = self.get_database(database_name) return [table for table_name, table in database.tables.items()] + def delete_table(self, database_name, table_name): + database = self.get_database(database_name) + try: + del database.tables[table_name] + except KeyError: + raise TableNotFoundException(table_name) + return {} + class FakeDatabase(BaseModel): diff --git a/moto/glue/responses.py b/moto/glue/responses.py index 84cc6f901..8f09022ca 100644 --- a/moto/glue/responses.py +++ b/moto/glue/responses.py @@ -84,6 +84,12 @@ class GlueResponse(BaseResponse): ] }) + def delete_table(self): + database_name = self.parameters.get('DatabaseName') + table_name = self.parameters.get('Name') + resp = self.glue_backend.delete_table(database_name, table_name) + return json.dumps(resp) + def get_partitions(self): database_name = self.parameters.get('DatabaseName') table_name = self.parameters.get('TableName') diff --git a/tests/test_glue/test_datacatalog.py b/tests/test_glue/test_datacatalog.py index a457d5127..e4891f307 100644 --- a/tests/test_glue/test_datacatalog.py +++ b/tests/test_glue/test_datacatalog.py @@ -209,6 +209,27 @@ def test_get_table_when_database_not_exits(): exc.exception.response['Error']['Message'].should.match('Database nosuchdatabase not found') +@mock_glue +def test_delete_table(): + client = boto3.client('glue', region_name='us-east-1') + database_name = 'myspecialdatabase' + helpers.create_database(client, database_name) + + table_name = 'myspecialtable' + table_input = helpers.create_table_input(database_name, table_name) + helpers.create_table(client, database_name, table_name, table_input) + + result = client.delete_table(DatabaseName=database_name, Name=table_name) + result['ResponseMetadata']['HTTPStatusCode'].should.equal(200) + + # confirm table is deleted + with assert_raises(ClientError) as exc: + helpers.get_table(client, database_name, table_name) + + exc.exception.response['Error']['Code'].should.equal('EntityNotFoundException') + exc.exception.response['Error']['Message'].should.match('Table myspecialtable not found') + + @mock_glue def test_get_partitions_empty(): client = boto3.client('glue', region_name='us-east-1') From d8ff67197b3dc139e0ef42a2cfc71fac1fe93ea2 Mon Sep 17 00:00:00 2001 From: Mark Challoner Date: Sat, 25 May 2019 11:10:34 +0100 Subject: [PATCH 29/41] Add resource-groups. (#1953) --- IMPLEMENTATION_COVERAGE.md | 16 +- moto/__init__.py | 1 + moto/backends.py | 2 + moto/resourcegroups/__init__.py | 6 + moto/resourcegroups/exceptions.py | 13 + moto/resourcegroups/models.py | 338 ++++++++++++++++++ moto/resourcegroups/responses.py | 162 +++++++++ moto/resourcegroups/urls.py | 14 + tests/test_resourcegroups/__init__.py | 0 .../test_resourcegroups.py | 165 +++++++++ 10 files changed, 709 insertions(+), 8 deletions(-) create mode 100644 moto/resourcegroups/__init__.py create mode 100644 moto/resourcegroups/exceptions.py create mode 100644 moto/resourcegroups/models.py create mode 100644 moto/resourcegroups/responses.py create mode 100644 moto/resourcegroups/urls.py create mode 100644 tests/test_resourcegroups/__init__.py create mode 100644 tests/test_resourcegroups/test_resourcegroups.py diff --git a/IMPLEMENTATION_COVERAGE.md b/IMPLEMENTATION_COVERAGE.md index 5190ab09b..504037469 100644 --- a/IMPLEMENTATION_COVERAGE.md +++ b/IMPLEMENTATION_COVERAGE.md @@ -3415,19 +3415,19 @@ - [ ] start_stream_processor - [ ] stop_stream_processor -## resource-groups - 0% implemented -- [ ] create_group -- [ ] delete_group -- [ ] get_group -- [ ] get_group_query +## resource-groups - 62% implemented +- [X] create_group +- [X] delete_group +- [X] get_group +- [X] get_group_query - [ ] get_tags - [ ] list_group_resources -- [ ] list_groups +- [X] list_groups - [ ] search_resources - [ ] tag - [ ] untag -- [ ] update_group -- [ ] update_group_query +- [X] update_group +- [X] update_group_query ## resourcegroupstaggingapi - 60% implemented - [X] get_resources diff --git a/moto/__init__.py b/moto/__init__.py index 5eeac8471..8c51bab27 100644 --- a/moto/__init__.py +++ b/moto/__init__.py @@ -36,6 +36,7 @@ from .polly import mock_polly # flake8: noqa from .rds import mock_rds, mock_rds_deprecated # flake8: noqa from .rds2 import mock_rds2, mock_rds2_deprecated # flake8: noqa from .redshift import mock_redshift, mock_redshift_deprecated # flake8: noqa +from .resourcegroups import mock_resourcegroups # flake8: noqa from .s3 import mock_s3, mock_s3_deprecated # flake8: noqa from .ses import mock_ses, mock_ses_deprecated # flake8: noqa from .secretsmanager import mock_secretsmanager # flake8: noqa diff --git a/moto/backends.py b/moto/backends.py index 90cc803a7..6ea85093d 100644 --- a/moto/backends.py +++ b/moto/backends.py @@ -32,6 +32,7 @@ from moto.organizations import organizations_backends from moto.polly import polly_backends from moto.rds2 import rds2_backends from moto.redshift import redshift_backends +from moto.resourcegroups import resourcegroups_backends from moto.route53 import route53_backends from moto.s3 import s3_backends from moto.ses import ses_backends @@ -81,6 +82,7 @@ BACKENDS = { 'organizations': organizations_backends, 'polly': polly_backends, 'redshift': redshift_backends, + 'resource-groups': resourcegroups_backends, 'rds': rds2_backends, 's3': s3_backends, 's3bucket_path': s3_backends, diff --git a/moto/resourcegroups/__init__.py b/moto/resourcegroups/__init__.py new file mode 100644 index 000000000..74b0eb598 --- /dev/null +++ b/moto/resourcegroups/__init__.py @@ -0,0 +1,6 @@ +from __future__ import unicode_literals +from .models import resourcegroups_backends +from ..core.models import base_decorator + +resourcegroups_backend = resourcegroups_backends['us-east-1'] +mock_resourcegroups = base_decorator(resourcegroups_backends) diff --git a/moto/resourcegroups/exceptions.py b/moto/resourcegroups/exceptions.py new file mode 100644 index 000000000..a8e542979 --- /dev/null +++ b/moto/resourcegroups/exceptions.py @@ -0,0 +1,13 @@ +from __future__ import unicode_literals +import json + +from werkzeug.exceptions import HTTPException + + +class BadRequestException(HTTPException): + code = 400 + + def __init__(self, message, **kwargs): + super(BadRequestException, self).__init__( + description=json.dumps({"Message": message, "Code": "BadRequestException"}), **kwargs + ) diff --git a/moto/resourcegroups/models.py b/moto/resourcegroups/models.py new file mode 100644 index 000000000..6734bd48a --- /dev/null +++ b/moto/resourcegroups/models.py @@ -0,0 +1,338 @@ +from __future__ import unicode_literals +from builtins import str + +import boto3 +import json +import re + +from moto.core import BaseBackend, BaseModel +from .exceptions import BadRequestException + + +class FakeResourceGroup(BaseModel): + def __init__(self, name, resource_query, description=None, tags=None): + self.errors = [] + description = description or "" + tags = tags or {} + if self._validate_description(value=description): + self._description = description + if self._validate_name(value=name): + self._name = name + if self._validate_resource_query(value=resource_query): + self._resource_query = resource_query + if self._validate_tags(value=tags): + self._tags = tags + self._raise_errors() + self.arn = "arn:aws:resource-groups:us-west-1:123456789012:{name}".format(name=name) + + @staticmethod + def _format_error(key, value, constraint): + return "Value '{value}' at '{key}' failed to satisfy constraint: {constraint}".format( + constraint=constraint, + key=key, + value=value, + ) + + def _raise_errors(self): + if self.errors: + errors_len = len(self.errors) + plural = "s" if len(self.errors) > 1 else "" + errors = "; ".join(self.errors) + raise BadRequestException("{errors_len} validation error{plural} detected: {errors}".format( + errors_len=errors_len, plural=plural, errors=errors, + )) + + def _validate_description(self, value): + errors = [] + if len(value) > 511: + errors.append(self._format_error( + key="description", + value=value, + constraint="Member must have length less than or equal to 512", + )) + if not re.match(r"^[\sa-zA-Z0-9_.-]*$", value): + errors.append(self._format_error( + key="name", + value=value, + constraint=r"Member must satisfy regular expression pattern: [\sa-zA-Z0-9_\.-]*", + )) + if errors: + self.errors += errors + return False + return True + + def _validate_name(self, value): + errors = [] + if len(value) > 128: + errors.append(self._format_error( + key="name", + value=value, + constraint="Member must have length less than or equal to 128", + )) + # Note \ is a character to match not an escape. + if not re.match(r"^[a-zA-Z0-9_\\.-]+$", value): + errors.append(self._format_error( + key="name", + value=value, + constraint=r"Member must satisfy regular expression pattern: [a-zA-Z0-9_\.-]+", + )) + if errors: + self.errors += errors + return False + return True + + def _validate_resource_query(self, value): + errors = [] + if value["Type"] not in {"CLOUDFORMATION_STACK_1_0", "TAG_FILTERS_1_0"}: + errors.append(self._format_error( + key="resourceQuery.type", + value=value, + constraint="Member must satisfy enum value set: [CLOUDFORMATION_STACK_1_0, TAG_FILTERS_1_0]", + )) + if len(value["Query"]) > 2048: + errors.append(self._format_error( + key="resourceQuery.query", + value=value, + constraint="Member must have length less than or equal to 2048", + )) + if errors: + self.errors += errors + return False + return True + + def _validate_tags(self, value): + errors = [] + # AWS only outputs one error for all keys and one for all values. + error_keys = None + error_values = None + regex = re.compile(r"^([\\p{L}\\p{Z}\\p{N}_.:/=+\-@]*)$") + for tag_key, tag_value in value.items(): + # Validation for len(tag_key) >= 1 is done by botocore. + if len(tag_key) > 128 or re.match(regex, tag_key): + error_keys = self._format_error( + key="tags", + value=value, + constraint=( + "Map value must satisfy constraint: [" + "Member must have length less than or equal to 128, " + "Member must have length greater than or equal to 1, " + r"Member must satisfy regular expression pattern: ^([\p{L}\p{Z}\p{N}_.:/=+\-@]*)$" + "]" + ), + ) + # Validation for len(tag_value) >= 0 is nonsensical. + if len(tag_value) > 256 or re.match(regex, tag_key): + error_values = self._format_error( + key="tags", + value=value, + constraint=( + "Map value must satisfy constraint: [" + "Member must have length less than or equal to 256, " + "Member must have length greater than or equal to 0, " + r"Member must satisfy regular expression pattern: ^([\p{L}\p{Z}\p{N}_.:/=+\-@]*)$" + "]" + ), + ) + if error_keys: + errors.append(error_keys) + if error_values: + errors.append(error_values) + if errors: + self.errors += errors + return False + return True + + @property + def description(self): + return self._description + + @description.setter + def description(self, value): + if not self._validate_description(value=value): + self._raise_errors() + self._description = value + + @property + def name(self): + return self._name + + @name.setter + def name(self, value): + if not self._validate_name(value=value): + self._raise_errors() + self._name = value + + @property + def resource_query(self): + return self._resource_query + + @resource_query.setter + def resource_query(self, value): + if not self._validate_resource_query(value=value): + self._raise_errors() + self._resource_query = value + + @property + def tags(self): + return self._tags + + @tags.setter + def tags(self, value): + if not self._validate_tags(value=value): + self._raise_errors() + self._tags = value + + +class ResourceGroups(): + def __init__(self): + self.by_name = {} + self.by_arn = {} + + def __contains__(self, item): + return item in self.by_name + + def append(self, resource_group): + self.by_name[resource_group.name] = resource_group + self.by_arn[resource_group.arn] = resource_group + + def delete(self, name): + group = self.by_name[name] + del self.by_name[name] + del self.by_arn[group.arn] + return group + + +class ResourceGroupsBackend(BaseBackend): + def __init__(self, region_name=None): + super(ResourceGroupsBackend, self).__init__() + self.region_name = region_name + self.groups = ResourceGroups() + + @staticmethod + def _validate_resource_query(resource_query): + type = resource_query["Type"] + query = json.loads(resource_query["Query"]) + query_keys = set(query.keys()) + invalid_json_exception = BadRequestException("Invalid query: Invalid query format: check JSON syntax") + if not isinstance(query["ResourceTypeFilters"], list): + raise invalid_json_exception + if type == "CLOUDFORMATION_STACK_1_0": + if query_keys != {"ResourceTypeFilters", "StackIdentifier"}: + raise invalid_json_exception + stack_identifier = query["StackIdentifier"] + if not isinstance(stack_identifier, str): + raise invalid_json_exception + if not re.match( + r"^arn:aws:cloudformation:[a-z]{2}-[a-z]+-[0-9]+:[0-9]+:stack/[-0-9A-z]+/[-0-9a-f]+$", + stack_identifier, + ): + raise BadRequestException( + "Invalid query: Verify that the specified ARN is formatted correctly." + ) + # Once checking other resources is implemented. + # if stack_identifier not in self.cloudformation_backend.stacks: + # raise BadRequestException("Invalid query: The specified CloudFormation stack doesn't exist.") + if type == "TAG_FILTERS_1_0": + if query_keys != {"ResourceTypeFilters", "TagFilters"}: + raise invalid_json_exception + tag_filters = query["TagFilters"] + if not isinstance(tag_filters, list): + raise invalid_json_exception + if not tag_filters or len(tag_filters) > 50: + raise BadRequestException( + "Invalid query: The TagFilters list must contain between 1 and 50 elements" + ) + for tag_filter in tag_filters: + if not isinstance(tag_filter, dict): + raise invalid_json_exception + if set(tag_filter.keys()) != {"Key", "Values"}: + raise invalid_json_exception + key = tag_filter["Key"] + if not isinstance(key, str): + raise invalid_json_exception + if not key: + raise BadRequestException( + "Invalid query: The TagFilter element cannot have empty or null Key field" + ) + if len(key) > 128: + raise BadRequestException("Invalid query: The maximum length for a tag Key is 128") + values = tag_filter["Values"] + if not isinstance(values, list): + raise invalid_json_exception + if len(values) > 20: + raise BadRequestException( + "Invalid query: The TagFilter Values list must contain between 0 and 20 elements" + ) + for value in values: + if not isinstance(value, str): + raise invalid_json_exception + if len(value) > 256: + raise BadRequestException( + "Invalid query: The maximum length for a tag Value is 256" + ) + + @staticmethod + def _validate_tags(tags): + for tag in tags: + if tag.lower().startswith('aws:'): + raise BadRequestException("Tag keys must not start with 'aws:'") + + def create_group(self, name, resource_query, description=None, tags=None): + tags = tags or {} + group = FakeResourceGroup( + name=name, + resource_query=resource_query, + description=description, + tags=tags, + ) + if name in self.groups: + raise BadRequestException("Cannot create group: group already exists") + if name.upper().startswith("AWS"): + raise BadRequestException("Group name must not start with 'AWS'") + self._validate_tags(tags) + self._validate_resource_query(resource_query) + self.groups.append(group) + return group + + def delete_group(self, group_name): + return self.groups.delete(name=group_name) + + def get_group(self, group_name): + return self.groups.by_name[group_name] + + def get_tags(self, arn): + return self.groups.by_arn[arn].tags + + # def list_group_resources(self): + # ... + + def list_groups(self, filters=None, max_results=None, next_token=None): + return self.groups.by_name + + # def search_resources(self): + # ... + + def tag(self, arn, tags): + all_tags = self.groups.by_arn[arn].tags + all_tags.update(tags) + self._validate_tags(all_tags) + self.groups.by_arn[arn].tags = all_tags + + def untag(self, arn, keys): + group = self.groups.by_arn[arn] + for key in keys: + del group.tags[key] + + def update_group(self, group_name, description=None): + if description: + self.groups.by_name[group_name].description = description + return self.groups.by_name[group_name] + + def update_group_query(self, group_name, resource_query): + self._validate_resource_query(resource_query) + self.groups.by_name[group_name].resource_query = resource_query + return self.groups.by_name[group_name] + + +available_regions = boto3.session.Session().get_available_regions("resource-groups") +resourcegroups_backends = {region: ResourceGroupsBackend(region_name=region) for region in available_regions} diff --git a/moto/resourcegroups/responses.py b/moto/resourcegroups/responses.py new file mode 100644 index 000000000..02ea14c1a --- /dev/null +++ b/moto/resourcegroups/responses.py @@ -0,0 +1,162 @@ +from __future__ import unicode_literals +import json + +try: + from urllib import unquote +except ImportError: + from urllib.parse import unquote + +from moto.core.responses import BaseResponse +from .models import resourcegroups_backends + + +class ResourceGroupsResponse(BaseResponse): + SERVICE_NAME = 'resource-groups' + + @property + def resourcegroups_backend(self): + return resourcegroups_backends[self.region] + + def create_group(self): + name = self._get_param("Name") + description = self._get_param("Description") + resource_query = self._get_param("ResourceQuery") + tags = self._get_param("Tags") + group = self.resourcegroups_backend.create_group( + name=name, + description=description, + resource_query=resource_query, + tags=tags, + ) + return json.dumps({ + "Group": { + "GroupArn": group.arn, + "Name": group.name, + "Description": group.description + }, + "ResourceQuery": group.resource_query, + "Tags": group.tags + }) + + def delete_group(self): + group_name = self._get_param("GroupName") + group = self.resourcegroups_backend.delete_group(group_name=group_name) + return json.dumps({ + "Group": { + "GroupArn": group.arn, + "Name": group.name, + "Description": group.description + }, + }) + + def get_group(self): + group_name = self._get_param("GroupName") + group = self.resourcegroups_backend.get_group(group_name=group_name) + return json.dumps({ + "Group": { + "GroupArn": group.arn, + "Name": group.name, + "Description": group.description, + } + }) + + def get_group_query(self): + group_name = self._get_param("GroupName") + group = self.resourcegroups_backend.get_group(group_name=group_name) + return json.dumps({ + "GroupQuery": { + "GroupName": group.name, + "ResourceQuery": group.resource_query, + } + }) + + def get_tags(self): + arn = unquote(self._get_param("Arn")) + return json.dumps({ + "Arn": arn, + "Tags": self.resourcegroups_backend.get_tags(arn=arn) + }) + + def list_group_resources(self): + raise NotImplementedError('ResourceGroups.list_group_resources is not yet implemented') + + def list_groups(self): + filters = self._get_param("Filters") + if filters: + raise NotImplementedError( + 'ResourceGroups.list_groups with filter parameter is not yet implemented' + ) + max_results = self._get_int_param("MaxResults", 50) + next_token = self._get_param("NextToken") + groups = self.resourcegroups_backend.list_groups( + filters=filters, + max_results=max_results, + next_token=next_token + ) + return json.dumps({ + "GroupIdentifiers": [{ + "GroupName": group.name, + "GroupArn": group.arn, + } for group in groups.values()], + "Groups": [{ + "GroupArn": group.arn, + "Name": group.name, + "Description": group.description, + } for group in groups.values()], + "NextToken": next_token, + }) + + def search_resources(self): + raise NotImplementedError('ResourceGroups.search_resources is not yet implemented') + + def tag(self): + arn = unquote(self._get_param("Arn")) + tags = self._get_param("Tags") + if arn not in self.resourcegroups_backend.groups.by_arn: + raise NotImplementedError( + 'ResourceGroups.tag with non-resource-group Arn parameter is not yet implemented' + ) + self.resourcegroups_backend.tag(arn=arn, tags=tags) + return json.dumps({ + "Arn": arn, + "Tags": tags + }) + + def untag(self): + arn = unquote(self._get_param("Arn")) + keys = self._get_param("Keys") + if arn not in self.resourcegroups_backend.groups.by_arn: + raise NotImplementedError( + 'ResourceGroups.untag with non-resource-group Arn parameter is not yet implemented' + ) + self.resourcegroups_backend.untag(arn=arn, keys=keys) + return json.dumps({ + "Arn": arn, + "Keys": keys + }) + + def update_group(self): + group_name = self._get_param("GroupName") + description = self._get_param("Description", "") + group = self.resourcegroups_backend.update_group(group_name=group_name, description=description) + return json.dumps({ + "Group": { + "GroupArn": group.arn, + "Name": group.name, + "Description": group.description + }, + }) + + def update_group_query(self): + group_name = self._get_param("GroupName") + resource_query = self._get_param("ResourceQuery") + group = self.resourcegroups_backend.update_group_query( + group_name=group_name, + resource_query=resource_query + ) + return json.dumps({ + "GroupQuery": { + "GroupName": group.name, + "ResourceQuery": resource_query + } + }) diff --git a/moto/resourcegroups/urls.py b/moto/resourcegroups/urls.py new file mode 100644 index 000000000..518dde766 --- /dev/null +++ b/moto/resourcegroups/urls.py @@ -0,0 +1,14 @@ +from __future__ import unicode_literals +from .responses import ResourceGroupsResponse + +url_bases = [ + "https?://resource-groups(-fips)?.(.+).amazonaws.com", +] + +url_paths = { + '{0}/groups$': ResourceGroupsResponse.dispatch, + '{0}/groups/(?P[^/]+)$': ResourceGroupsResponse.dispatch, + '{0}/groups/(?P[^/]+)/query$': ResourceGroupsResponse.dispatch, + '{0}/groups-list$': ResourceGroupsResponse.dispatch, + '{0}/resources/(?P[^/]+)/tags$': ResourceGroupsResponse.dispatch, +} diff --git a/tests/test_resourcegroups/__init__.py b/tests/test_resourcegroups/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/tests/test_resourcegroups/test_resourcegroups.py b/tests/test_resourcegroups/test_resourcegroups.py new file mode 100644 index 000000000..bb3624413 --- /dev/null +++ b/tests/test_resourcegroups/test_resourcegroups.py @@ -0,0 +1,165 @@ +from __future__ import unicode_literals + +import boto3 +import json +import sure # noqa + +from moto import mock_resourcegroups + + +@mock_resourcegroups +def test_create_group(): + resource_groups = boto3.client("resource-groups", region_name="us-east-1") + + response = resource_groups.create_group( + Name="test_resource_group", + Description="description", + ResourceQuery={ + "Type": "TAG_FILTERS_1_0", + "Query": json.dumps( + { + "ResourceTypeFilters": ["AWS::AllSupported"], + "TagFilters": [ + {"Key": "resources_tag_key", "Values": ["resources_tag_value"]} + ], + } + ), + }, + Tags={"resource_group_tag_key": "resource_group_tag_value"} + ) + response["Group"]["Name"].should.contain("test_resource_group") + response["ResourceQuery"]["Type"].should.contain("TAG_FILTERS_1_0") + response["Tags"]["resource_group_tag_key"].should.contain("resource_group_tag_value") + + +@mock_resourcegroups +def test_delete_group(): + resource_groups = boto3.client("resource-groups", region_name="us-east-1") + + test_create_group() + + response = resource_groups.delete_group(GroupName="test_resource_group") + response["Group"]["Name"].should.contain("test_resource_group") + + response = resource_groups.list_groups() + response["GroupIdentifiers"].should.have.length_of(0) + response["Groups"].should.have.length_of(0) + + +@mock_resourcegroups +def test_get_group(): + resource_groups = boto3.client("resource-groups", region_name="us-east-1") + + test_create_group() + + response = resource_groups.get_group(GroupName="test_resource_group") + response["Group"]["Description"].should.contain("description") + + return response + + +@mock_resourcegroups +def test_get_group_query(): + resource_groups = boto3.client("resource-groups", region_name="us-east-1") + + test_create_group() + + response = resource_groups.get_group_query(GroupName="test_resource_group") + response["GroupQuery"]["ResourceQuery"]["Type"].should.contain("TAG_FILTERS_1_0") + + +@mock_resourcegroups +def test_get_tags(): + resource_groups = boto3.client("resource-groups", region_name="us-east-1") + + response = test_get_group() + + response = resource_groups.get_tags(Arn=response["Group"]["GroupArn"]) + response["Tags"].should.have.length_of(1) + response["Tags"]["resource_group_tag_key"].should.contain("resource_group_tag_value") + + return response + + +@mock_resourcegroups +def test_list_groups(): + resource_groups = boto3.client("resource-groups", region_name="us-east-1") + + test_create_group() + + response = resource_groups.list_groups() + response["GroupIdentifiers"].should.have.length_of(1) + response["Groups"].should.have.length_of(1) + + +@mock_resourcegroups +def test_tag(): + resource_groups = boto3.client("resource-groups", region_name="us-east-1") + + response = test_get_tags() + + response = resource_groups.tag( + Arn=response["Arn"], + Tags={"resource_group_tag_key_2": "resource_group_tag_value_2"} + ) + response["Tags"]["resource_group_tag_key_2"].should.contain("resource_group_tag_value_2") + + response = resource_groups.get_tags(Arn=response["Arn"]) + response["Tags"].should.have.length_of(2) + response["Tags"]["resource_group_tag_key_2"].should.contain("resource_group_tag_value_2") + + +@mock_resourcegroups +def test_untag(): + resource_groups = boto3.client("resource-groups", region_name="us-east-1") + + response = test_get_tags() + + response = resource_groups.untag(Arn=response["Arn"], Keys=["resource_group_tag_key"]) + response["Keys"].should.contain("resource_group_tag_key") + + response = resource_groups.get_tags(Arn=response["Arn"]) + response["Tags"].should.have.length_of(0) + + +@mock_resourcegroups +def test_update_group(): + resource_groups = boto3.client("resource-groups", region_name="us-east-1") + + test_get_group() + + response = resource_groups.update_group( + GroupName="test_resource_group", + Description="description_2", + ) + response["Group"]["Description"].should.contain("description_2") + + response = resource_groups.get_group(GroupName="test_resource_group") + response["Group"]["Description"].should.contain("description_2") + + +@mock_resourcegroups +def test_update_group_query(): + resource_groups = boto3.client("resource-groups", region_name="us-east-1") + + test_create_group() + + response = resource_groups.update_group_query( + GroupName="test_resource_group", + ResourceQuery={ + "Type": "CLOUDFORMATION_STACK_1_0", + "Query": json.dumps( + { + "ResourceTypeFilters": ["AWS::AllSupported"], + "StackIdentifier": ( + "arn:aws:cloudformation:eu-west-1:012345678912:stack/" + "test_stack/c223eca0-e744-11e8-8910-500c41f59083" + ) + } + ), + }, + ) + response["GroupQuery"]["ResourceQuery"]["Type"].should.contain("CLOUDFORMATION_STACK_1_0") + + response = resource_groups.get_group_query(GroupName="test_resource_group") + response["GroupQuery"]["ResourceQuery"]["Type"].should.contain("CLOUDFORMATION_STACK_1_0") From fb2a76fd66d78fd2dfb5977723ce20aba5b867be Mon Sep 17 00:00:00 2001 From: Daniel Miranda Date: Sat, 25 May 2019 07:17:52 -0300 Subject: [PATCH 30/41] ec2: add support for creation and importing of real SSH keys (#2108) * ec2: add support for creation and importing of real SSH keys * setup: lock PyYAML version to avoid incompatibilities --- moto/ec2/exceptions.py | 8 +++ moto/ec2/models.py | 13 +++- moto/ec2/utils.py | 60 +++++++++++++----- setup.py | 3 +- tests/test_ec2/__init__.py | 0 tests/test_ec2/helpers.py | 15 +++++ tests/test_ec2/test_key_pairs.py | 105 ++++++++++++++++++++++++++----- tests/test_ec2/test_utils.py | 10 ++- 8 files changed, 178 insertions(+), 36 deletions(-) create mode 100644 tests/test_ec2/__init__.py create mode 100644 tests/test_ec2/helpers.py diff --git a/moto/ec2/exceptions.py b/moto/ec2/exceptions.py index ec2c8542d..0453a42a1 100644 --- a/moto/ec2/exceptions.py +++ b/moto/ec2/exceptions.py @@ -58,6 +58,14 @@ class InvalidKeyPairDuplicateError(EC2ClientError): .format(key)) +class InvalidKeyPairFormatError(EC2ClientError): + + def __init__(self): + super(InvalidKeyPairFormatError, self).__init__( + "InvalidKeyPair.Format", + "Key is not in valid OpenSSH public key format") + + class InvalidVPCIdError(EC2ClientError): def __init__(self, vpc_id): diff --git a/moto/ec2/models.py b/moto/ec2/models.py index d57b9f01b..7d0a4ac08 100644 --- a/moto/ec2/models.py +++ b/moto/ec2/models.py @@ -20,6 +20,7 @@ from boto.ec2.blockdevicemapping import BlockDeviceMapping, BlockDeviceType from boto.ec2.spotinstancerequest import SpotInstanceRequest as BotoSpotRequest from boto.ec2.launchspecification import LaunchSpecification + from moto.compat import OrderedDict from moto.core import BaseBackend from moto.core.models import Model, BaseModel @@ -43,6 +44,7 @@ from .exceptions import ( InvalidInstanceIdError, InvalidInternetGatewayIdError, InvalidKeyPairDuplicateError, + InvalidKeyPairFormatError, InvalidKeyPairNameError, InvalidNetworkAclIdError, InvalidNetworkAttachmentIdError, @@ -120,6 +122,8 @@ from .utils import ( random_customer_gateway_id, is_tag_filter, tag_filter_matches, + rsa_public_key_parse, + rsa_public_key_fingerprint ) INSTANCE_TYPES = json.load( @@ -910,7 +914,14 @@ class KeyPairBackend(object): def import_key_pair(self, key_name, public_key_material): if key_name in self.keypairs: raise InvalidKeyPairDuplicateError(key_name) - keypair = KeyPair(key_name, **random_key_pair()) + + try: + rsa_public_key = rsa_public_key_parse(public_key_material) + except ValueError: + raise InvalidKeyPairFormatError() + + fingerprint = rsa_public_key_fingerprint(rsa_public_key) + keypair = KeyPair(key_name, material=public_key_material, fingerprint=fingerprint) self.keypairs[key_name] = keypair return keypair diff --git a/moto/ec2/utils.py b/moto/ec2/utils.py index f5c9b8512..a998f18ef 100644 --- a/moto/ec2/utils.py +++ b/moto/ec2/utils.py @@ -1,10 +1,19 @@ from __future__ import unicode_literals +import base64 +import hashlib import fnmatch import random import re import six +from cryptography.hazmat.primitives import serialization +from cryptography.hazmat.backends import default_backend +from cryptography.hazmat.primitives.asymmetric import rsa +import sshpubkeys.exceptions +from sshpubkeys.keys import SSHKey + + EC2_RESOURCE_TO_PREFIX = { 'customer-gateway': 'cgw', 'dhcp-options': 'dopt', @@ -453,23 +462,19 @@ def simple_aws_filter_to_re(filter_string): def random_key_pair(): - def random_hex(): - return chr(random.choice(list(range(48, 58)) + list(range(97, 102)))) + private_key = rsa.generate_private_key( + public_exponent=65537, + key_size=2048, + backend=default_backend()) + private_key_material = private_key.private_bytes( + encoding=serialization.Encoding.PEM, + format=serialization.PrivateFormat.TraditionalOpenSSL, + encryption_algorithm=serialization.NoEncryption()) + public_key_fingerprint = rsa_public_key_fingerprint(private_key.public_key()) - def random_fingerprint(): - return ':'.join([random_hex() + random_hex() for i in range(20)]) - - def random_material(): - return ''.join([ - chr(random.choice(list(range(65, 91)) + list(range(48, 58)) + - list(range(97, 102)))) - for i in range(1000) - ]) - material = "---- BEGIN RSA PRIVATE KEY ----" + random_material() + \ - "-----END RSA PRIVATE KEY-----" return { - 'fingerprint': random_fingerprint(), - 'material': material + 'fingerprint': public_key_fingerprint, + 'material': private_key_material.decode('ascii') } @@ -535,3 +540,28 @@ def generate_instance_identity_document(instance): } return document + + +def rsa_public_key_parse(key_material): + try: + if not isinstance(key_material, six.binary_type): + key_material = key_material.encode("ascii") + + decoded_key = base64.b64decode(key_material).decode("ascii") + public_key = SSHKey(decoded_key) + except (sshpubkeys.exceptions.InvalidKeyException, UnicodeDecodeError): + raise ValueError('bad key') + + if not public_key.rsa: + raise ValueError('bad key') + + return public_key.rsa + + +def rsa_public_key_fingerprint(rsa_public_key): + key_data = rsa_public_key.public_bytes( + encoding=serialization.Encoding.DER, + format=serialization.PublicFormat.SubjectPublicKeyInfo) + fingerprint_hex = hashlib.md5(key_data).hexdigest() + fingerprint = re.sub(r'([a-f0-9]{2})(?!$)', r'\1:', fingerprint_hex) + return fingerprint diff --git a/setup.py b/setup.py index 7ee84cf6e..bcc4db4d9 100755 --- a/setup.py +++ b/setup.py @@ -28,7 +28,7 @@ install_requires = [ "xmltodict", "six>1.9", "werkzeug", - "PyYAML", + "PyYAML==3.13", "pytz", "python-dateutil<3.0.0,>=2.1", "python-jose<4.0.0", @@ -39,6 +39,7 @@ install_requires = [ "responses>=0.9.0", "idna<2.9,>=2.5", "cfn-lint", + "sshpubkeys>=3.1.0,<4.0" ] extras_require = { diff --git a/tests/test_ec2/__init__.py b/tests/test_ec2/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/tests/test_ec2/helpers.py b/tests/test_ec2/helpers.py new file mode 100644 index 000000000..94c9c10cb --- /dev/null +++ b/tests/test_ec2/helpers.py @@ -0,0 +1,15 @@ +import six + +from cryptography.hazmat.backends import default_backend +from cryptography.hazmat.primitives import serialization +from cryptography.hazmat.primitives.asymmetric import rsa + + +def rsa_check_private_key(private_key_material): + assert isinstance(private_key_material, six.string_types) + + private_key = serialization.load_pem_private_key( + data=private_key_material.encode('ascii'), + backend=default_backend(), + password=None) + assert isinstance(private_key, rsa.RSAPrivateKey) diff --git a/tests/test_ec2/test_key_pairs.py b/tests/test_ec2/test_key_pairs.py index 0a7fb9f76..dfe6eabdf 100644 --- a/tests/test_ec2/test_key_pairs.py +++ b/tests/test_ec2/test_key_pairs.py @@ -4,12 +4,46 @@ import tests.backport_assert_raises from nose.tools import assert_raises import boto -import six import sure # noqa from boto.exception import EC2ResponseError from moto import mock_ec2_deprecated +from .helpers import rsa_check_private_key + + +RSA_PUBLIC_KEY_OPENSSH = b"""\ +ssh-rsa \ +AAAAB3NzaC1yc2EAAAADAQABAAABAQDusXfgTE4eBP50NglSzCSEGnIL6+cr6m3H\ +6cZANOQ+P1o/W4BdtcAL3sor4iGi7SOeJgo\8kweyMQrhrt6HaKGgromRiz37LQx\ +4YIAcBi4Zd023mO/V7Rc2Chh18mWgLSmA6ng+j37ip6452zxtv0jHAz9pJolbKBp\ +JzbZlPN45ZCTk9ck0fSVHRl6VRSSPQcpqi65XpRf+35zNOCGCc1mAOOTmw59Q2a6\ +A3t8mL7r91aM5q6QOQm219lctFM8O7HRJnDgmhGpnjRwE1LyKktWTbgFZ4SNWU2X\ +qusUO07jKuSxzPumXBeU+JEtx0J1tqZwJlpGt2R+0qN7nKnPl2+hx \ +moto@github.com""" + +RSA_PUBLIC_KEY_RFC4716 = b"""\ +---- BEGIN SSH2 PUBLIC KEY ---- +AAAAB3NzaC1yc2EAAAADAQABAAABAQDusXfgTE4eBP50NglSzCSEGnIL6+cr6m3H6cZANO +Q+P1o/W4BdtcAL3sor4iGi7SOeJgo8kweyMQrhrt6HaKGgromRiz37LQx4YIAcBi4Zd023 +mO/V7Rc2Chh18mWgLSmA6ng+j37ip6452zxtv0jHAz9pJolbKBpJzbZlPN45ZCTk9ck0fS +VHRl6VRSSPQcpqi65XpRf+35zNOCGCc1mAOOTmw59Q2a6A3t8mL7r91aM5q6QOQm219lct +FM8O7HRJnDgmhGpnjRwE1LyKktWTbgFZ4SNWU2XqusUO07jKuSxzPumXBeU+JEtx0J1tqZ +wJlpGt2R+0qN7nKnPl2+hx +---- END SSH2 PUBLIC KEY ---- +""" + +RSA_PUBLIC_KEY_FINGERPRINT = "6a:49:07:1c:7e:bd:d2:bd:96:25:fe:b5:74:83:ae:fd" + +DSA_PUBLIC_KEY_OPENSSH = b"""ssh-dss \ +AAAAB3NzaC1kc3MAAACBAJ0aXctVwbN6VB81gpo8R7DUk8zXRjZvrkg8Y8vEGt63gklpNJNsLXtEUXkl5D4c0nD2FZO1rJNqFoe\ +OQOCoGSfclHvt9w4yPl/lUEtb3Qtj1j80MInETHr19vaSunRk5R+M+8YH+LLcdYdz7MijuGey02mbi0H9K5nUIcuLMArVAAAAFQ\ +D0RDvsObRWBlnaW8645obZBM86jwAAAIBNZwf3B4krIzAwVfkMHLDSdAvs7lOWE7o8SJLzr9t4a9HhYp9SLbMzJ815KWfidEYV2\ ++s4ZaPCfcZ1GENFRbE8rixz5eMAjEUXEPMJkblDZTHzMsH96z2cOCQZ0vfOmgznsf18Uf725pqo9OqAioEsTJjX8jtI2qNPEBU0\ +uhMSZQAAAIBBMGhDu5CWPUlS2QG7vzmzw81XasmHE/s2YPDRbolkriwlunpgwZhCscoQP8HFHY+DLUVvUb+GZwBmFt4l1uHl03b\ +ffsm7UIHtCBYERr9Nx0u20ldfhkgB1lhaJb5o0ZJ3pmJ38KChfyHe5EUcqRdEFo89Mp72VI2Z6UHyL175RA== \ +moto@github.com""" + @mock_ec2_deprecated def test_key_pairs_empty(): @@ -33,14 +67,15 @@ def test_key_pairs_create(): conn = boto.connect_ec2('the_key', 'the_secret') with assert_raises(EC2ResponseError) as ex: - kp = conn.create_key_pair('foo', dry_run=True) + conn.create_key_pair('foo', dry_run=True) ex.exception.error_code.should.equal('DryRunOperation') ex.exception.status.should.equal(400) ex.exception.message.should.equal( 'An error occurred (DryRunOperation) when calling the CreateKeyPair operation: Request would have succeeded, but DryRun flag is set') kp = conn.create_key_pair('foo') - assert kp.material.startswith('---- BEGIN RSA PRIVATE KEY ----') + rsa_check_private_key(kp.material) + kps = conn.get_all_key_pairs() assert len(kps) == 1 assert kps[0].name == 'foo' @@ -49,13 +84,19 @@ def test_key_pairs_create(): @mock_ec2_deprecated def test_key_pairs_create_two(): conn = boto.connect_ec2('the_key', 'the_secret') - kp = conn.create_key_pair('foo') - kp = conn.create_key_pair('bar') - assert kp.material.startswith('---- BEGIN RSA PRIVATE KEY ----') + + kp1 = conn.create_key_pair('foo') + rsa_check_private_key(kp1.material) + + kp2 = conn.create_key_pair('bar') + rsa_check_private_key(kp2.material) + + assert kp1.material != kp2.material + kps = conn.get_all_key_pairs() kps.should.have.length_of(2) - [i.name for i in kps].should.contain('foo') - [i.name for i in kps].should.contain('bar') + assert {i.name for i in kps} == {'foo', 'bar'} + kps = conn.get_all_key_pairs('foo') kps.should.have.length_of(1) kps[0].name.should.equal('foo') @@ -64,8 +105,7 @@ def test_key_pairs_create_two(): @mock_ec2_deprecated def test_key_pairs_create_exist(): conn = boto.connect_ec2('the_key', 'the_secret') - kp = conn.create_key_pair('foo') - assert kp.material.startswith('---- BEGIN RSA PRIVATE KEY ----') + conn.create_key_pair('foo') assert len(conn.get_all_key_pairs()) == 1 with assert_raises(EC2ResponseError) as cm: @@ -105,23 +145,30 @@ def test_key_pairs_import(): conn = boto.connect_ec2('the_key', 'the_secret') with assert_raises(EC2ResponseError) as ex: - kp = conn.import_key_pair('foo', b'content', dry_run=True) + conn.import_key_pair('foo', RSA_PUBLIC_KEY_OPENSSH, dry_run=True) ex.exception.error_code.should.equal('DryRunOperation') ex.exception.status.should.equal(400) ex.exception.message.should.equal( 'An error occurred (DryRunOperation) when calling the ImportKeyPair operation: Request would have succeeded, but DryRun flag is set') - kp = conn.import_key_pair('foo', b'content') - assert kp.name == 'foo' + kp1 = conn.import_key_pair('foo', RSA_PUBLIC_KEY_OPENSSH) + assert kp1.name == 'foo' + assert kp1.fingerprint == RSA_PUBLIC_KEY_FINGERPRINT + + kp2 = conn.import_key_pair('foo2', RSA_PUBLIC_KEY_RFC4716) + assert kp2.name == 'foo2' + assert kp2.fingerprint == RSA_PUBLIC_KEY_FINGERPRINT + kps = conn.get_all_key_pairs() - assert len(kps) == 1 - assert kps[0].name == 'foo' + assert len(kps) == 2 + assert kps[0].name == kp1.name + assert kps[1].name == kp2.name @mock_ec2_deprecated def test_key_pairs_import_exist(): conn = boto.connect_ec2('the_key', 'the_secret') - kp = conn.import_key_pair('foo', b'content') + kp = conn.import_key_pair('foo', RSA_PUBLIC_KEY_OPENSSH) assert kp.name == 'foo' assert len(conn.get_all_key_pairs()) == 1 @@ -132,6 +179,32 @@ def test_key_pairs_import_exist(): cm.exception.request_id.should_not.be.none +@mock_ec2_deprecated +def test_key_pairs_invalid(): + conn = boto.connect_ec2('the_key', 'the_secret') + + with assert_raises(EC2ResponseError) as ex: + conn.import_key_pair('foo', b'') + ex.exception.error_code.should.equal('InvalidKeyPair.Format') + ex.exception.status.should.equal(400) + ex.exception.message.should.equal( + 'Key is not in valid OpenSSH public key format') + + with assert_raises(EC2ResponseError) as ex: + conn.import_key_pair('foo', b'garbage') + ex.exception.error_code.should.equal('InvalidKeyPair.Format') + ex.exception.status.should.equal(400) + ex.exception.message.should.equal( + 'Key is not in valid OpenSSH public key format') + + with assert_raises(EC2ResponseError) as ex: + conn.import_key_pair('foo', DSA_PUBLIC_KEY_OPENSSH) + ex.exception.error_code.should.equal('InvalidKeyPair.Format') + ex.exception.status.should.equal(400) + ex.exception.message.should.equal( + 'Key is not in valid OpenSSH public key format') + + @mock_ec2_deprecated def test_key_pair_filters(): conn = boto.connect_ec2('the_key', 'the_secret') diff --git a/tests/test_ec2/test_utils.py b/tests/test_ec2/test_utils.py index ef540e193..49192dc79 100644 --- a/tests/test_ec2/test_utils.py +++ b/tests/test_ec2/test_utils.py @@ -1,8 +1,12 @@ from moto.ec2 import utils +from .helpers import rsa_check_private_key + def test_random_key_pair(): key_pair = utils.random_key_pair() - assert len(key_pair['fingerprint']) == 59 - assert key_pair['material'].startswith('---- BEGIN RSA PRIVATE KEY ----') - assert key_pair['material'].endswith('-----END RSA PRIVATE KEY-----') + rsa_check_private_key(key_pair['material']) + + # AWS uses MD5 fingerprints, which are 47 characters long, *not* SHA1 + # fingerprints with 59 characters. + assert len(key_pair['fingerprint']) == 47 From 868d0107bf3966e794d0917cfb13bcd46f3a872e Mon Sep 17 00:00:00 2001 From: David Date: Sat, 25 May 2019 03:18:16 -0700 Subject: [PATCH 31/41] Autoscaling instance azs (#2030) * Add instance AZ support in autoscaling * Resolve py36-py27 format string error in test_autoscaling --- moto/autoscaling/models.py | 49 +++++++--- moto/autoscaling/responses.py | 4 +- tests/test_autoscaling/test_autoscaling.py | 48 +++++++--- tests/test_autoscaling/test_elbv2.py | 2 +- tests/test_autoscaling/utils.py | 14 ++- tests/test_ec2/test_elastic_block_store.py | 28 +++--- tests/test_ec2/test_instances.py | 4 +- tests/test_ec2/test_regions.py | 12 ++- tests/test_elb/test_elb.py | 2 +- tests/test_redshift/test_redshift.py | 105 ++++++++++----------- 10 files changed, 157 insertions(+), 111 deletions(-) diff --git a/moto/autoscaling/models.py b/moto/autoscaling/models.py index 27e81a87c..24811be73 100644 --- a/moto/autoscaling/models.py +++ b/moto/autoscaling/models.py @@ -1,4 +1,7 @@ from __future__ import unicode_literals + +import random + from boto.ec2.blockdevicemapping import BlockDeviceType, BlockDeviceMapping from moto.compat import OrderedDict from moto.core import BaseBackend, BaseModel @@ -159,13 +162,7 @@ class FakeAutoScalingGroup(BaseModel): self.autoscaling_backend = autoscaling_backend self.name = name - if not availability_zones and not vpc_zone_identifier: - raise AutoscalingClientError( - "ValidationError", - "At least one Availability Zone or VPC Subnet is required." - ) - self.availability_zones = availability_zones - self.vpc_zone_identifier = vpc_zone_identifier + self._set_azs_and_vpcs(availability_zones, vpc_zone_identifier) self.max_size = max_size self.min_size = min_size @@ -188,6 +185,35 @@ class FakeAutoScalingGroup(BaseModel): self.tags = tags if tags else [] self.set_desired_capacity(desired_capacity) + def _set_azs_and_vpcs(self, availability_zones, vpc_zone_identifier, update=False): + # for updates, if only AZs are provided, they must not clash with + # the AZs of existing VPCs + if update and availability_zones and not vpc_zone_identifier: + vpc_zone_identifier = self.vpc_zone_identifier + + if vpc_zone_identifier: + # extract azs for vpcs + subnet_ids = vpc_zone_identifier.split(',') + subnets = self.autoscaling_backend.ec2_backend.get_all_subnets(subnet_ids=subnet_ids) + vpc_zones = [subnet.availability_zone for subnet in subnets] + + if availability_zones and set(availability_zones) != set(vpc_zones): + raise AutoscalingClientError( + "ValidationError", + "The availability zones of the specified subnets and the Auto Scaling group do not match", + ) + availability_zones = vpc_zones + elif not availability_zones: + if not update: + raise AutoscalingClientError( + "ValidationError", + "At least one Availability Zone or VPC Subnet is required." + ) + return + + self.availability_zones = availability_zones + self.vpc_zone_identifier = vpc_zone_identifier + @classmethod def create_from_cloudformation_json(cls, resource_name, cloudformation_json, region_name): properties = cloudformation_json['Properties'] @@ -246,8 +272,8 @@ class FakeAutoScalingGroup(BaseModel): health_check_period, health_check_type, placement_group, termination_policies, new_instances_protected_from_scale_in=None): - if availability_zones: - self.availability_zones = availability_zones + self._set_azs_and_vpcs(availability_zones, vpc_zone_identifier, update=True) + if max_size is not None: self.max_size = max_size if min_size is not None: @@ -257,8 +283,6 @@ class FakeAutoScalingGroup(BaseModel): self.launch_config = self.autoscaling_backend.launch_configurations[ launch_config_name] self.launch_config_name = launch_config_name - if vpc_zone_identifier is not None: - self.vpc_zone_identifier = vpc_zone_identifier if health_check_period is not None: self.health_check_period = health_check_period if health_check_type is not None: @@ -319,7 +343,8 @@ class FakeAutoScalingGroup(BaseModel): self.launch_config.user_data, self.launch_config.security_groups, instance_type=self.launch_config.instance_type, - tags={'instance': propagated_tags} + tags={'instance': propagated_tags}, + placement=random.choice(self.availability_zones), ) for instance in reservation.instances: instance.autoscaling_group = self diff --git a/moto/autoscaling/responses.py b/moto/autoscaling/responses.py index 3b2752d46..985c6f852 100644 --- a/moto/autoscaling/responses.py +++ b/moto/autoscaling/responses.py @@ -499,7 +499,7 @@ DESCRIBE_AUTOSCALING_GROUPS_TEMPLATE = """ {{ instance_state.health_status }} - us-east-1e + {{ instance_state.instance.placement }} {{ instance_state.instance.id }} {{ group.launch_config_name }} {{ instance_state.lifecycle_state }} @@ -585,7 +585,7 @@ DESCRIBE_AUTOSCALING_INSTANCES_TEMPLATE = """ {{ instance_state.health_status }} {{ instance_state.instance.autoscaling_group.name }} - us-east-1e + {{ instance_state.instance.placement }} {{ instance_state.instance.id }} {{ instance_state.instance.autoscaling_group.launch_config_name }} {{ instance_state.lifecycle_state }} diff --git a/tests/test_autoscaling/test_autoscaling.py b/tests/test_autoscaling/test_autoscaling.py index d01f2dcb3..750605c07 100644 --- a/tests/test_autoscaling/test_autoscaling.py +++ b/tests/test_autoscaling/test_autoscaling.py @@ -32,7 +32,7 @@ def test_create_autoscaling_group(): group = AutoScalingGroup( name='tester_group', - availability_zones=['us-east-1c', 'us-east-1b'], + availability_zones=['us-east-1a', 'us-east-1b'], default_cooldown=60, desired_capacity=2, health_check_period=100, @@ -42,7 +42,10 @@ def test_create_autoscaling_group(): launch_config=config, load_balancers=["test_lb"], placement_group="test_placement", - vpc_zone_identifier=mocked_networking['subnet1'], + vpc_zone_identifier="{subnet1},{subnet2}".format( + subnet1=mocked_networking['subnet1'], + subnet2=mocked_networking['subnet2'], + ), termination_policies=["OldestInstance", "NewestInstance"], tags=[Tag( resource_id='tester_group', @@ -57,12 +60,15 @@ def test_create_autoscaling_group(): group = conn.get_all_groups()[0] group.name.should.equal('tester_group') set(group.availability_zones).should.equal( - set(['us-east-1c', 'us-east-1b'])) + set(['us-east-1a', 'us-east-1b'])) group.desired_capacity.should.equal(2) group.max_size.should.equal(2) group.min_size.should.equal(2) group.instances.should.have.length_of(2) - group.vpc_zone_identifier.should.equal(mocked_networking['subnet1']) + group.vpc_zone_identifier.should.equal("{subnet1},{subnet2}".format( + subnet1=mocked_networking['subnet1'], + subnet2=mocked_networking['subnet2'], + )) group.launch_config_name.should.equal('tester') group.default_cooldown.should.equal(60) group.health_check_period.should.equal(100) @@ -109,7 +115,7 @@ def test_create_autoscaling_groups_defaults(): group.launch_config_name.should.equal('tester') # Defaults - list(group.availability_zones).should.equal([]) + list(group.availability_zones).should.equal(['us-east-1a']) # subnet1 group.desired_capacity.should.equal(2) group.vpc_zone_identifier.should.equal(mocked_networking['subnet1']) group.default_cooldown.should.equal(300) @@ -217,7 +223,6 @@ def test_autoscaling_update(): group = AutoScalingGroup( name='tester_group', - availability_zones=['us-east-1c', 'us-east-1b'], desired_capacity=2, max_size=2, min_size=2, @@ -227,13 +232,16 @@ def test_autoscaling_update(): conn.create_auto_scaling_group(group) group = conn.get_all_groups()[0] + group.availability_zones.should.equal(['us-east-1a']) group.vpc_zone_identifier.should.equal(mocked_networking['subnet1']) - group.vpc_zone_identifier = 'subnet-5678efgh' + group.availability_zones = ['us-east-1b'] + group.vpc_zone_identifier = mocked_networking['subnet2'] group.update() group = conn.get_all_groups()[0] - group.vpc_zone_identifier.should.equal('subnet-5678efgh') + group.availability_zones.should.equal(['us-east-1b']) + group.vpc_zone_identifier.should.equal(mocked_networking['subnet2']) @mock_autoscaling_deprecated @@ -249,7 +257,7 @@ def test_autoscaling_tags_update(): group = AutoScalingGroup( name='tester_group', - availability_zones=['us-east-1c', 'us-east-1b'], + availability_zones=['us-east-1a'], desired_capacity=2, max_size=2, min_size=2, @@ -309,7 +317,7 @@ def test_autoscaling_group_delete(): @mock_autoscaling_deprecated def test_autoscaling_group_describe_instances(): mocked_networking = setup_networking_deprecated() - conn = boto.connect_autoscale() + conn = boto.ec2.autoscale.connect_to_region('us-east-1') config = LaunchConfiguration( name='tester', image_id='ami-abcd1234', @@ -332,7 +340,7 @@ def test_autoscaling_group_describe_instances(): instances[0].health_status.should.equal('Healthy') autoscale_instance_ids = [instance.instance_id for instance in instances] - ec2_conn = boto.connect_ec2() + ec2_conn = boto.ec2.connect_to_region('us-east-1') reservations = ec2_conn.get_all_instances() instances = reservations[0].instances instances.should.have.length_of(2) @@ -355,7 +363,7 @@ def test_set_desired_capacity_up(): group = AutoScalingGroup( name='tester_group', - availability_zones=['us-east-1c', 'us-east-1b'], + availability_zones=['us-east-1a'], desired_capacity=2, max_size=2, min_size=2, @@ -391,7 +399,7 @@ def test_set_desired_capacity_down(): group = AutoScalingGroup( name='tester_group', - availability_zones=['us-east-1c', 'us-east-1b'], + availability_zones=['us-east-1a'], desired_capacity=2, max_size=2, min_size=2, @@ -427,7 +435,7 @@ def test_set_desired_capacity_the_same(): group = AutoScalingGroup( name='tester_group', - availability_zones=['us-east-1c', 'us-east-1b'], + availability_zones=['us-east-1a'], desired_capacity=2, max_size=2, min_size=2, @@ -739,8 +747,12 @@ def test_describe_autoscaling_groups_boto3(): response['ResponseMetadata']['HTTPStatusCode'].should.equal(200) group = response['AutoScalingGroups'][0] group['AutoScalingGroupName'].should.equal('test_asg') + group['AvailabilityZones'].should.equal(['us-east-1a']) + group['VPCZoneIdentifier'].should.equal(mocked_networking['subnet1']) group['NewInstancesProtectedFromScaleIn'].should.equal(True) - group['Instances'][0]['ProtectedFromScaleIn'].should.equal(True) + for instance in group['Instances']: + instance['AvailabilityZone'].should.equal('us-east-1a') + instance['ProtectedFromScaleIn'].should.equal(True) @mock_autoscaling @@ -771,6 +783,7 @@ def test_describe_autoscaling_instances_boto3(): response = client.describe_auto_scaling_instances(InstanceIds=instance_ids) for instance in response['AutoScalingInstances']: instance['AutoScalingGroupName'].should.equal('test_asg') + instance['AvailabilityZone'].should.equal('us-east-1a') instance['ProtectedFromScaleIn'].should.equal(True) @@ -794,6 +807,10 @@ def test_update_autoscaling_group_boto3(): _ = client.update_auto_scaling_group( AutoScalingGroupName='test_asg', MinSize=1, + VPCZoneIdentifier="{subnet1},{subnet2}".format( + subnet1=mocked_networking['subnet1'], + subnet2=mocked_networking['subnet2'], + ), NewInstancesProtectedFromScaleIn=False, ) @@ -802,6 +819,7 @@ def test_update_autoscaling_group_boto3(): ) group = response['AutoScalingGroups'][0] group['MinSize'].should.equal(1) + set(group['AvailabilityZones']).should.equal({'us-east-1a', 'us-east-1b'}) group['NewInstancesProtectedFromScaleIn'].should.equal(False) diff --git a/tests/test_autoscaling/test_elbv2.py b/tests/test_autoscaling/test_elbv2.py index 3a50484c1..a142fd133 100644 --- a/tests/test_autoscaling/test_elbv2.py +++ b/tests/test_autoscaling/test_elbv2.py @@ -106,7 +106,7 @@ def test_detach_all_target_groups(): MaxSize=INSTANCE_COUNT, DesiredCapacity=INSTANCE_COUNT, TargetGroupARNs=[target_group_arn], - VPCZoneIdentifier=mocked_networking['vpc']) + VPCZoneIdentifier=mocked_networking['subnet1']) response = client.describe_load_balancer_target_groups( AutoScalingGroupName='test_asg') diff --git a/tests/test_autoscaling/utils.py b/tests/test_autoscaling/utils.py index b167ba5f5..ebbffbed3 100644 --- a/tests/test_autoscaling/utils.py +++ b/tests/test_autoscaling/utils.py @@ -1,5 +1,6 @@ import boto import boto3 +from boto import vpc as boto_vpc from moto import mock_ec2, mock_ec2_deprecated @@ -19,9 +20,14 @@ def setup_networking(): @mock_ec2_deprecated def setup_networking_deprecated(): - conn = boto.connect_vpc() + conn = boto_vpc.connect_to_region('us-east-1') vpc = conn.create_vpc("10.11.0.0/16") - subnet1 = conn.create_subnet(vpc.id, "10.11.1.0/24") - subnet2 = conn.create_subnet(vpc.id, "10.11.2.0/24") + subnet1 = conn.create_subnet( + vpc.id, + "10.11.1.0/24", + availability_zone='us-east-1a') + subnet2 = conn.create_subnet( + vpc.id, + "10.11.2.0/24", + availability_zone='us-east-1b') return {'vpc': vpc.id, 'subnet1': subnet1.id, 'subnet2': subnet2.id} - diff --git a/tests/test_ec2/test_elastic_block_store.py b/tests/test_ec2/test_elastic_block_store.py index 728be5d1f..ab5b31ba0 100644 --- a/tests/test_ec2/test_elastic_block_store.py +++ b/tests/test_ec2/test_elastic_block_store.py @@ -16,7 +16,7 @@ from moto import mock_ec2_deprecated, mock_ec2 @mock_ec2_deprecated def test_create_and_delete_volume(): - conn = boto.connect_ec2('the_key', 'the_secret') + conn = boto.ec2.connect_to_region("us-east-1") volume = conn.create_volume(80, "us-east-1a") all_volumes = conn.get_all_volumes() @@ -52,7 +52,7 @@ def test_create_and_delete_volume(): @mock_ec2_deprecated def test_create_encrypted_volume_dryrun(): - conn = boto.connect_ec2('the_key', 'the_secret') + conn = boto.ec2.connect_to_region("us-east-1") with assert_raises(EC2ResponseError) as ex: conn.create_volume(80, "us-east-1a", encrypted=True, dry_run=True) ex.exception.error_code.should.equal('DryRunOperation') @@ -63,7 +63,7 @@ def test_create_encrypted_volume_dryrun(): @mock_ec2_deprecated def test_create_encrypted_volume(): - conn = boto.connect_ec2('the_key', 'the_secret') + conn = boto.ec2.connect_to_region("us-east-1") volume = conn.create_volume(80, "us-east-1a", encrypted=True) with assert_raises(EC2ResponseError) as ex: @@ -79,7 +79,7 @@ def test_create_encrypted_volume(): @mock_ec2_deprecated def test_filter_volume_by_id(): - conn = boto.connect_ec2('the_key', 'the_secret') + conn = boto.ec2.connect_to_region("us-east-1") volume1 = conn.create_volume(80, "us-east-1a") volume2 = conn.create_volume(36, "us-east-1b") volume3 = conn.create_volume(20, "us-east-1c") @@ -99,7 +99,7 @@ def test_filter_volume_by_id(): @mock_ec2_deprecated def test_volume_filters(): - conn = boto.connect_ec2('the_key', 'the_secret') + conn = boto.ec2.connect_to_region("us-east-1") reservation = conn.run_instances('ami-1234abcd') instance = reservation.instances[0] @@ -196,7 +196,7 @@ def test_volume_filters(): @mock_ec2_deprecated def test_volume_attach_and_detach(): - conn = boto.connect_ec2('the_key', 'the_secret') + conn = boto.ec2.connect_to_region("us-east-1") reservation = conn.run_instances('ami-1234abcd') instance = reservation.instances[0] volume = conn.create_volume(80, "us-east-1a") @@ -252,7 +252,7 @@ def test_volume_attach_and_detach(): @mock_ec2_deprecated def test_create_snapshot(): - conn = boto.connect_ec2('the_key', 'the_secret') + conn = boto.ec2.connect_to_region("us-east-1") volume = conn.create_volume(80, "us-east-1a") with assert_raises(EC2ResponseError) as ex: @@ -291,7 +291,7 @@ def test_create_snapshot(): @mock_ec2_deprecated def test_create_encrypted_snapshot(): - conn = boto.connect_ec2('the_key', 'the_secret') + conn = boto.ec2.connect_to_region("us-east-1") volume = conn.create_volume(80, "us-east-1a", encrypted=True) snapshot = volume.create_snapshot('a test snapshot') snapshot.update() @@ -306,7 +306,7 @@ def test_create_encrypted_snapshot(): @mock_ec2_deprecated def test_filter_snapshot_by_id(): - conn = boto.connect_ec2('the_key', 'the_secret') + conn = boto.ec2.connect_to_region("us-east-1") volume1 = conn.create_volume(36, "us-east-1a") snap1 = volume1.create_snapshot('a test snapshot 1') volume2 = conn.create_volume(42, 'us-east-1a') @@ -333,7 +333,7 @@ def test_filter_snapshot_by_id(): @mock_ec2_deprecated def test_snapshot_filters(): - conn = boto.connect_ec2('the_key', 'the_secret') + conn = boto.ec2.connect_to_region("us-east-1") volume1 = conn.create_volume(20, "us-east-1a", encrypted=False) volume2 = conn.create_volume(25, "us-east-1a", encrypted=True) @@ -404,7 +404,7 @@ def test_snapshot_filters(): def test_snapshot_attribute(): import copy - conn = boto.connect_ec2('the_key', 'the_secret') + conn = boto.ec2.connect_to_region("us-east-1") volume = conn.create_volume(80, "us-east-1a") snapshot = volume.create_snapshot() @@ -507,7 +507,7 @@ def test_snapshot_attribute(): @mock_ec2_deprecated def test_create_volume_from_snapshot(): - conn = boto.connect_ec2('the_key', 'the_secret') + conn = boto.ec2.connect_to_region("us-east-1") volume = conn.create_volume(80, "us-east-1a") snapshot = volume.create_snapshot('a test snapshot') @@ -529,7 +529,7 @@ def test_create_volume_from_snapshot(): @mock_ec2_deprecated def test_create_volume_from_encrypted_snapshot(): - conn = boto.connect_ec2('the_key', 'the_secret') + conn = boto.ec2.connect_to_region("us-east-1") volume = conn.create_volume(80, "us-east-1a", encrypted=True) snapshot = volume.create_snapshot('a test snapshot') @@ -574,7 +574,7 @@ def test_modify_attribute_blockDeviceMapping(): @mock_ec2_deprecated def test_volume_tag_escaping(): - conn = boto.connect_ec2('the_key', 'the_secret') + conn = boto.ec2.connect_to_region("us-east-1") vol = conn.create_volume(10, 'us-east-1a') snapshot = conn.create_snapshot(vol.id, 'Desc') diff --git a/tests/test_ec2/test_instances.py b/tests/test_ec2/test_instances.py index c0f0eea4d..f14f85721 100644 --- a/tests/test_ec2/test_instances.py +++ b/tests/test_ec2/test_instances.py @@ -42,7 +42,7 @@ def test_add_servers(): @freeze_time("2014-01-01 05:00:00") @mock_ec2_deprecated def test_instance_launch_and_terminate(): - conn = boto.connect_ec2('the_key', 'the_secret') + conn = boto.ec2.connect_to_region("us-east-1") with assert_raises(EC2ResponseError) as ex: reservation = conn.run_instances('ami-1234abcd', dry_run=True) @@ -820,7 +820,7 @@ def test_run_instance_with_instance_type(): @mock_ec2_deprecated def test_run_instance_with_default_placement(): - conn = boto.connect_ec2('the_key', 'the_secret') + conn = boto.ec2.connect_to_region("us-east-1") reservation = conn.run_instances('ami-1234abcd') instance = reservation.instances[0] diff --git a/tests/test_ec2/test_regions.py b/tests/test_ec2/test_regions.py index 1e87b253c..f94c78eaf 100644 --- a/tests/test_ec2/test_regions.py +++ b/tests/test_ec2/test_regions.py @@ -68,8 +68,10 @@ def test_create_autoscaling_group(): image_id='ami-abcd1234', instance_type='m1.small', ) - us_conn.create_launch_configuration(config) + x = us_conn.create_launch_configuration(config) + us_subnet_id = list(ec2_backends['us-east-1'].subnets['us-east-1c'].keys())[0] + ap_subnet_id = list(ec2_backends['ap-northeast-1'].subnets['ap-northeast-1a'].keys())[0] group = boto.ec2.autoscale.AutoScalingGroup( name='us_tester_group', availability_zones=['us-east-1c'], @@ -82,7 +84,7 @@ def test_create_autoscaling_group(): launch_config=config, load_balancers=["us_test_lb"], placement_group="us_test_placement", - vpc_zone_identifier='subnet-1234abcd', + vpc_zone_identifier=us_subnet_id, termination_policies=["OldestInstance", "NewestInstance"], ) us_conn.create_auto_scaling_group(group) @@ -107,7 +109,7 @@ def test_create_autoscaling_group(): launch_config=config, load_balancers=["ap_test_lb"], placement_group="ap_test_placement", - vpc_zone_identifier='subnet-5678efgh', + vpc_zone_identifier=ap_subnet_id, termination_policies=["OldestInstance", "NewestInstance"], ) ap_conn.create_auto_scaling_group(group) @@ -121,7 +123,7 @@ def test_create_autoscaling_group(): us_group.desired_capacity.should.equal(2) us_group.max_size.should.equal(2) us_group.min_size.should.equal(2) - us_group.vpc_zone_identifier.should.equal('subnet-1234abcd') + us_group.vpc_zone_identifier.should.equal(us_subnet_id) us_group.launch_config_name.should.equal('us_tester') us_group.default_cooldown.should.equal(60) us_group.health_check_period.should.equal(100) @@ -137,7 +139,7 @@ def test_create_autoscaling_group(): ap_group.desired_capacity.should.equal(2) ap_group.max_size.should.equal(2) ap_group.min_size.should.equal(2) - ap_group.vpc_zone_identifier.should.equal('subnet-5678efgh') + ap_group.vpc_zone_identifier.should.equal(ap_subnet_id) ap_group.launch_config_name.should.equal('ap_tester') ap_group.default_cooldown.should.equal(60) ap_group.health_check_period.should.equal(100) diff --git a/tests/test_elb/test_elb.py b/tests/test_elb/test_elb.py index a67508430..447896f15 100644 --- a/tests/test_elb/test_elb.py +++ b/tests/test_elb/test_elb.py @@ -21,7 +21,7 @@ from moto import mock_elb, mock_ec2, mock_elb_deprecated, mock_ec2_deprecated @mock_ec2_deprecated def test_create_load_balancer(): conn = boto.connect_elb() - ec2 = boto.connect_ec2('the_key', 'the_secret') + ec2 = boto.ec2.connect_to_region("us-east-1") security_group = ec2.create_security_group('sg-abc987', 'description') diff --git a/tests/test_redshift/test_redshift.py b/tests/test_redshift/test_redshift.py index 9301aaa72..541614788 100644 --- a/tests/test_redshift/test_redshift.py +++ b/tests/test_redshift/test_redshift.py @@ -177,30 +177,29 @@ def test_default_cluster_attributes(): cluster['NumberOfNodes'].should.equal(1) -@mock_redshift_deprecated -@mock_ec2_deprecated +@mock_redshift +@mock_ec2 def test_create_cluster_in_subnet_group(): - vpc_conn = boto.connect_vpc() - vpc = vpc_conn.create_vpc("10.0.0.0/16") - subnet = vpc_conn.create_subnet(vpc.id, "10.0.0.0/24") - redshift_conn = boto.connect_redshift() - redshift_conn.create_cluster_subnet_group( - "my_subnet_group", - "This is my subnet group", - subnet_ids=[subnet.id], + ec2 = boto3.resource('ec2', region_name='us-east-1') + vpc = ec2.create_vpc(CidrBlock="10.0.0.0/16") + subnet = ec2.create_subnet(VpcId=vpc.id, CidrBlock="10.0.0.0/24") + client = boto3.client('redshift', region_name='us-east-1') + client.create_cluster_subnet_group( + ClusterSubnetGroupName="my_subnet_group", + Description="This is my subnet group", + SubnetIds=[subnet.id], ) - redshift_conn.create_cluster( - "my_cluster", - node_type="dw.hs1.xlarge", - master_username="username", - master_user_password="password", - cluster_subnet_group_name='my_subnet_group', + client.create_cluster( + ClusterIdentifier="my_cluster", + NodeType="dw.hs1.xlarge", + MasterUsername="username", + MasterUserPassword="password", + ClusterSubnetGroupName='my_subnet_group', ) - cluster_response = redshift_conn.describe_clusters("my_cluster") - cluster = cluster_response['DescribeClustersResponse'][ - 'DescribeClustersResult']['Clusters'][0] + cluster_response = client.describe_clusters(ClusterIdentifier="my_cluster") + cluster = cluster_response['Clusters'][0] cluster['ClusterSubnetGroupName'].should.equal('my_subnet_group') @@ -476,28 +475,26 @@ def test_modify_cluster(): cluster['NumberOfNodes'].should.equal(1) -@mock_redshift_deprecated -@mock_ec2_deprecated +@mock_redshift +@mock_ec2 def test_create_cluster_subnet_group(): - vpc_conn = boto.connect_vpc() - vpc = vpc_conn.create_vpc("10.0.0.0/16") - subnet1 = vpc_conn.create_subnet(vpc.id, "10.0.0.0/24") - subnet2 = vpc_conn.create_subnet(vpc.id, "10.0.1.0/24") + ec2 = boto3.resource('ec2', region_name='us-east-1') + vpc = ec2.create_vpc(CidrBlock="10.0.0.0/16") + subnet1 = ec2.create_subnet(VpcId=vpc.id, CidrBlock="10.0.0.0/24") + subnet2 = ec2.create_subnet(VpcId=vpc.id, CidrBlock="10.0.1.0/24") + client = boto3.client('redshift', region_name='us-east-1') - redshift_conn = boto.connect_redshift() - - redshift_conn.create_cluster_subnet_group( - "my_subnet", - "This is my subnet group", - subnet_ids=[subnet1.id, subnet2.id], + client.create_cluster_subnet_group( + ClusterSubnetGroupName='my_subnet_group', + Description='This is my subnet group', + SubnetIds=[subnet1.id, subnet2.id], ) - subnets_response = redshift_conn.describe_cluster_subnet_groups( - "my_subnet") - my_subnet = subnets_response['DescribeClusterSubnetGroupsResponse'][ - 'DescribeClusterSubnetGroupsResult']['ClusterSubnetGroups'][0] + subnets_response = client.describe_cluster_subnet_groups( + ClusterSubnetGroupName="my_subnet_group") + my_subnet = subnets_response['ClusterSubnetGroups'][0] - my_subnet['ClusterSubnetGroupName'].should.equal("my_subnet") + my_subnet['ClusterSubnetGroupName'].should.equal("my_subnet_group") my_subnet['Description'].should.equal("This is my subnet group") subnet_ids = [subnet['SubnetIdentifier'] for subnet in my_subnet['Subnets']] @@ -522,35 +519,33 @@ def test_describe_non_existent_subnet_group(): "not-a-subnet-group").should.throw(ClusterSubnetGroupNotFound) -@mock_redshift_deprecated -@mock_ec2_deprecated +@mock_redshift +@mock_ec2 def test_delete_cluster_subnet_group(): - vpc_conn = boto.connect_vpc() - vpc = vpc_conn.create_vpc("10.0.0.0/16") - subnet = vpc_conn.create_subnet(vpc.id, "10.0.0.0/24") - redshift_conn = boto.connect_redshift() + ec2 = boto3.resource('ec2', region_name='us-east-1') + vpc = ec2.create_vpc(CidrBlock="10.0.0.0/16") + subnet = ec2.create_subnet(VpcId=vpc.id, CidrBlock="10.0.0.0/24") + client = boto3.client('redshift', region_name='us-east-1') - redshift_conn.create_cluster_subnet_group( - "my_subnet", - "This is my subnet group", - subnet_ids=[subnet.id], + client.create_cluster_subnet_group( + ClusterSubnetGroupName='my_subnet_group', + Description='This is my subnet group', + SubnetIds=[subnet.id], ) - subnets_response = redshift_conn.describe_cluster_subnet_groups() - subnets = subnets_response['DescribeClusterSubnetGroupsResponse'][ - 'DescribeClusterSubnetGroupsResult']['ClusterSubnetGroups'] + subnets_response = client.describe_cluster_subnet_groups() + subnets = subnets_response['ClusterSubnetGroups'] subnets.should.have.length_of(1) - redshift_conn.delete_cluster_subnet_group("my_subnet") + client.delete_cluster_subnet_group(ClusterSubnetGroupName="my_subnet_group") - subnets_response = redshift_conn.describe_cluster_subnet_groups() - subnets = subnets_response['DescribeClusterSubnetGroupsResponse'][ - 'DescribeClusterSubnetGroupsResult']['ClusterSubnetGroups'] + subnets_response = client.describe_cluster_subnet_groups() + subnets = subnets_response['ClusterSubnetGroups'] subnets.should.have.length_of(0) # Delete invalid id - redshift_conn.delete_cluster_subnet_group.when.called_with( - "not-a-subnet-group").should.throw(ClusterSubnetGroupNotFound) + client.delete_cluster_subnet_group.when.called_with( + ClusterSubnetGroupName="not-a-subnet-group").should.throw(ClientError) @mock_redshift_deprecated From 1b9153416519294607fa68a124c10696af60f16e Mon Sep 17 00:00:00 2001 From: William Richard Date: Sat, 25 May 2019 06:18:39 -0400 Subject: [PATCH 32/41] Add support for redirect actions on ELBv2 listeners (#2029) --- moto/elbv2/exceptions.py | 2 +- moto/elbv2/models.py | 58 ++++++++++---- moto/elbv2/responses.py | 42 +++++++++- tests/test_elbv2/test_elbv2.py | 140 +++++++++++++++++++++++++++++++++ 4 files changed, 223 insertions(+), 19 deletions(-) diff --git a/moto/elbv2/exceptions.py b/moto/elbv2/exceptions.py index 0bf9649d7..11dcbcb21 100644 --- a/moto/elbv2/exceptions.py +++ b/moto/elbv2/exceptions.py @@ -131,7 +131,7 @@ class InvalidActionTypeError(ELBClientError): def __init__(self, invalid_name, index): super(InvalidActionTypeError, self).__init__( "ValidationError", - "1 validation error detected: Value '%s' at 'actions.%s.member.type' failed to satisfy constraint: Member must satisfy enum value set: [forward]" % (invalid_name, index) + "1 validation error detected: Value '%s' at 'actions.%s.member.type' failed to satisfy constraint: Member must satisfy enum value set: [forward, redirect]" % (invalid_name, index) ) diff --git a/moto/elbv2/models.py b/moto/elbv2/models.py index 3925fa95d..8d98f187d 100644 --- a/moto/elbv2/models.py +++ b/moto/elbv2/models.py @@ -204,8 +204,20 @@ class FakeListener(BaseModel): # transform default actions to confirm with the rest of the code and XML templates if "DefaultActions" in properties: default_actions = [] - for action in properties['DefaultActions']: - default_actions.append({'type': action['Type'], 'target_group_arn': action['TargetGroupArn']}) + for i, action in enumerate(properties['DefaultActions']): + action_type = action['Type'] + if action_type == 'forward': + default_actions.append({'type': action_type, 'target_group_arn': action['TargetGroupArn']}) + elif action_type == 'redirect': + redirect_action = {'type': action_type, } + for redirect_config_key, redirect_config_value in action['RedirectConfig'].items(): + # need to match the output of _get_list_prefix + if redirect_config_key == 'StatusCode': + redirect_config_key = 'status_code' + redirect_action['redirect_config._' + redirect_config_key.lower()] = redirect_config_value + default_actions.append(redirect_action) + else: + raise InvalidActionTypeError(action_type, i + 1) else: default_actions = None @@ -417,11 +429,15 @@ class ELBv2Backend(BaseBackend): for i, action in enumerate(actions): index = i + 1 action_type = action['type'] - if action_type not in ['forward']: + if action_type == 'forward': + action_target_group_arn = action['target_group_arn'] + if action_target_group_arn not in target_group_arns: + raise ActionTargetGroupNotFoundError(action_target_group_arn) + elif action_type == 'redirect': + # nothing to do + pass + else: raise InvalidActionTypeError(action_type, index) - action_target_group_arn = action['target_group_arn'] - if action_target_group_arn not in target_group_arns: - raise ActionTargetGroupNotFoundError(action_target_group_arn) # TODO: check for error 'TooManyRegistrationsForTargetId' # TODO: check for error 'TooManyRules' @@ -483,10 +499,18 @@ class ELBv2Backend(BaseBackend): arn = load_balancer_arn.replace(':loadbalancer/', ':listener/') + "/%s%s" % (port, id(self)) listener = FakeListener(load_balancer_arn, arn, protocol, port, ssl_policy, certificate, default_actions) balancer.listeners[listener.arn] = listener - for action in default_actions: - if action['target_group_arn'] in self.target_groups.keys(): - target_group = self.target_groups[action['target_group_arn']] - target_group.load_balancer_arns.append(load_balancer_arn) + for i, action in enumerate(default_actions): + action_type = action['type'] + if action_type == 'forward': + if action['target_group_arn'] in self.target_groups.keys(): + target_group = self.target_groups[action['target_group_arn']] + target_group.load_balancer_arns.append(load_balancer_arn) + elif action_type == 'redirect': + # nothing to do + pass + else: + raise InvalidActionTypeError(action_type, i + 1) + return listener def describe_load_balancers(self, arns, names): @@ -649,11 +673,15 @@ class ELBv2Backend(BaseBackend): for i, action in enumerate(actions): index = i + 1 action_type = action['type'] - if action_type not in ['forward']: + if action_type == 'forward': + action_target_group_arn = action['target_group_arn'] + if action_target_group_arn not in target_group_arns: + raise ActionTargetGroupNotFoundError(action_target_group_arn) + elif action_type == 'redirect': + # nothing to do + pass + else: raise InvalidActionTypeError(action_type, index) - action_target_group_arn = action['target_group_arn'] - if action_target_group_arn not in target_group_arns: - raise ActionTargetGroupNotFoundError(action_target_group_arn) # TODO: check for error 'TooManyRegistrationsForTargetId' # TODO: check for error 'TooManyRules' @@ -873,7 +901,7 @@ class ELBv2Backend(BaseBackend): # Its already validated in responses.py listener.ssl_policy = ssl_policy - if default_actions is not None: + if default_actions is not None and default_actions != []: # Is currently not validated listener.default_actions = default_actions diff --git a/moto/elbv2/responses.py b/moto/elbv2/responses.py index 1814f1273..3ca53240b 100644 --- a/moto/elbv2/responses.py +++ b/moto/elbv2/responses.py @@ -704,7 +704,11 @@ CREATE_RULE_TEMPLATE = """ + {% if action["type"] == "forward" %} {{ action["target_group_arn"] }} + {% elif action["type"] == "redirect" %} + {{ action["redirect_config"] }} + {% endif %} {% endfor %} @@ -772,7 +776,15 @@ CREATE_LISTENER_TEMPLATE = """{{ action["target_group_arn"] }} + {% elif action["type"] == "redirect" %} + + {{ action["redirect_config._protocol"] }} + {{ action["redirect_config._port"] }} + {{ action["redirect_config._status_code"] }} + + {% endif %} {% endfor %} @@ -877,7 +889,15 @@ DESCRIBE_RULES_TEMPLATE = """ + {% if action["type"] == "forward" %} {{ action["target_group_arn"] }} + {% elif action["type"] == "redirect" %} + + {{ action["redirect_config._protocol"] }} + {{ action["redirect_config._port"] }} + {{ action["redirect_config._status_code"] }} + + {% endif %} {% endfor %} @@ -970,7 +990,15 @@ DESCRIBE_LISTENERS_TEMPLATE = """{{ action["target_group_arn"] }}m + {% elif action["type"] == "redirect" %} + + {{ action["redirect_config._protocol"] }} + {{ action["redirect_config._port"] }} + {{ action["redirect_config._status_code"] }} + + {% endif %} {% endfor %} @@ -1399,7 +1427,15 @@ MODIFY_LISTENER_TEMPLATE = """{{ action["target_group_arn"] }} + {% elif action["type"] == "redirect" %} + + {{ action["redirect_config._protocol"] }} + {{ action["redirect_config._port"] }} + {{ action["redirect_config._status_code"] }} + + {% endif %} {% endfor %} diff --git a/tests/test_elbv2/test_elbv2.py b/tests/test_elbv2/test_elbv2.py index b58345fdb..2010e384a 100644 --- a/tests/test_elbv2/test_elbv2.py +++ b/tests/test_elbv2/test_elbv2.py @@ -1586,3 +1586,143 @@ def test_create_target_groups_through_cloudformation(): assert len( [tg for tg in target_group_dicts if tg['TargetGroupName'].startswith('test-stack')] ) == 2 + + +@mock_elbv2 +@mock_ec2 +def test_redirect_action_listener_rule(): + conn = boto3.client('elbv2', region_name='us-east-1') + ec2 = boto3.resource('ec2', region_name='us-east-1') + + security_group = ec2.create_security_group( + GroupName='a-security-group', Description='First One') + vpc = ec2.create_vpc(CidrBlock='172.28.7.0/24', InstanceTenancy='default') + subnet1 = ec2.create_subnet( + VpcId=vpc.id, + CidrBlock='172.28.7.192/26', + AvailabilityZone='us-east-1a') + subnet2 = ec2.create_subnet( + VpcId=vpc.id, + CidrBlock='172.28.7.192/26', + AvailabilityZone='us-east-1b') + + response = conn.create_load_balancer( + Name='my-lb', + Subnets=[subnet1.id, subnet2.id], + SecurityGroups=[security_group.id], + Scheme='internal', + Tags=[{'Key': 'key_name', 'Value': 'a_value'}]) + + load_balancer_arn = response.get('LoadBalancers')[0].get('LoadBalancerArn') + + response = conn.create_listener(LoadBalancerArn=load_balancer_arn, + Protocol='HTTP', + Port=80, + DefaultActions=[ + {'Type': 'redirect', + 'RedirectConfig': { + 'Protocol': 'HTTPS', + 'Port': '443', + 'StatusCode': 'HTTP_301' + }}]) + + listener = response.get('Listeners')[0] + expected_default_actions = [{ + 'Type': 'redirect', + 'RedirectConfig': { + 'Protocol': 'HTTPS', + 'Port': '443', + 'StatusCode': 'HTTP_301' + } + }] + listener.get('DefaultActions').should.equal(expected_default_actions) + listener_arn = listener.get('ListenerArn') + + describe_rules_response = conn.describe_rules(ListenerArn=listener_arn) + describe_rules_response['Rules'][0]['Actions'].should.equal(expected_default_actions) + + describe_listener_response = conn.describe_listeners(ListenerArns=[listener_arn, ]) + describe_listener_actions = describe_listener_response['Listeners'][0]['DefaultActions'] + describe_listener_actions.should.equal(expected_default_actions) + + modify_listener_response = conn.modify_listener(ListenerArn=listener_arn, Port=81) + modify_listener_actions = modify_listener_response['Listeners'][0]['DefaultActions'] + modify_listener_actions.should.equal(expected_default_actions) + + +@mock_elbv2 +@mock_cloudformation +def test_redirect_action_listener_rule_cloudformation(): + cnf_conn = boto3.client('cloudformation', region_name='us-east-1') + elbv2_client = boto3.client('elbv2', region_name='us-east-1') + + template = { + "AWSTemplateFormatVersion": "2010-09-09", + "Description": "ECS Cluster Test CloudFormation", + "Resources": { + "testVPC": { + "Type": "AWS::EC2::VPC", + "Properties": { + "CidrBlock": "10.0.0.0/16", + }, + }, + "subnet1": { + "Type": "AWS::EC2::Subnet", + "Properties": { + "CidrBlock": "10.0.0.0/24", + "VpcId": {"Ref": "testVPC"}, + "AvalabilityZone": "us-east-1b", + }, + }, + "subnet2": { + "Type": "AWS::EC2::Subnet", + "Properties": { + "CidrBlock": "10.0.1.0/24", + "VpcId": {"Ref": "testVPC"}, + "AvalabilityZone": "us-east-1b", + }, + }, + "testLb": { + "Type": "AWS::ElasticLoadBalancingV2::LoadBalancer", + "Properties": { + "Name": "my-lb", + "Subnets": [{"Ref": "subnet1"}, {"Ref": "subnet2"}], + "Type": "application", + "SecurityGroups": [], + } + }, + "testListener": { + "Type": "AWS::ElasticLoadBalancingV2::Listener", + "Properties": { + "LoadBalancerArn": {"Ref": "testLb"}, + "Port": 80, + "Protocol": "HTTP", + "DefaultActions": [{ + "Type": "redirect", + "RedirectConfig": { + "Port": "443", + "Protocol": "HTTPS", + "StatusCode": "HTTP_301", + } + }] + } + + } + } + } + template_json = json.dumps(template) + cnf_conn.create_stack(StackName="test-stack", TemplateBody=template_json) + + describe_load_balancers_response = elbv2_client.describe_load_balancers(Names=['my-lb',]) + describe_load_balancers_response['LoadBalancers'].should.have.length_of(1) + load_balancer_arn = describe_load_balancers_response['LoadBalancers'][0]['LoadBalancerArn'] + + describe_listeners_response = elbv2_client.describe_listeners(LoadBalancerArn=load_balancer_arn) + + describe_listeners_response['Listeners'].should.have.length_of(1) + describe_listeners_response['Listeners'][0]['DefaultActions'].should.equal([{ + 'Type': 'redirect', + 'RedirectConfig': { + 'Port': '443', 'Protocol': 'HTTPS', 'StatusCode': 'HTTP_301', + } + },]) From d9524109659b6075eba26ba586ff4b5a0aab1e53 Mon Sep 17 00:00:00 2001 From: Gregory Bataille Date: Sat, 25 May 2019 12:19:00 +0200 Subject: [PATCH 33/41] 1987 support transfer acceleration (#2018) * chore(): remove executable flag on moto/s3/response.py * chore(): ignore .eggs temp file * feat(#1987): get bucket acceleration support * feat(#1987): put bucket acceleration support * feat(#1987): suspend undefined bucket is a no-op * feat(#1987): validate accelerate_configuration status * feat(#1987): bucket containing dots do not support acceleration * doc(#1987): update implementation coverage --- .gitignore | 1 + IMPLEMENTATION_COVERAGE.md | 4 +- moto/s3/models.py | 26 +++++++++++-- moto/s3/responses.py | 31 +++++++++++++++ tests/test_s3/test_s3.py | 77 ++++++++++++++++++++++++++++++++++++++ 5 files changed, 133 insertions(+), 6 deletions(-) mode change 100755 => 100644 moto/s3/responses.py diff --git a/.gitignore b/.gitignore index efb489651..0a24fe476 100644 --- a/.gitignore +++ b/.gitignore @@ -18,3 +18,4 @@ venv/ .python-version .vscode/ tests/file.tmp +.eggs/ diff --git a/IMPLEMENTATION_COVERAGE.md b/IMPLEMENTATION_COVERAGE.md index 504037469..506e4e284 100644 --- a/IMPLEMENTATION_COVERAGE.md +++ b/IMPLEMENTATION_COVERAGE.md @@ -3540,7 +3540,7 @@ - [ ] delete_object - [ ] delete_object_tagging - [ ] delete_objects -- [ ] get_bucket_accelerate_configuration +- [X] get_bucket_accelerate_configuration - [X] get_bucket_acl - [ ] get_bucket_analytics_configuration - [ ] get_bucket_cors @@ -3574,7 +3574,7 @@ - [ ] list_objects - [ ] list_objects_v2 - [ ] list_parts -- [ ] put_bucket_accelerate_configuration +- [X] put_bucket_accelerate_configuration - [ ] put_bucket_acl - [ ] put_bucket_analytics_configuration - [X] put_bucket_cors diff --git a/moto/s3/models.py b/moto/s3/models.py index 9e4a6a766..59a7af580 100644 --- a/moto/s3/models.py +++ b/moto/s3/models.py @@ -17,8 +17,11 @@ import six from bisect import insort from moto.core import BaseBackend, BaseModel from moto.core.utils import iso_8601_datetime_with_milliseconds, rfc_1123_datetime -from .exceptions import BucketAlreadyExists, MissingBucket, InvalidBucketName, InvalidPart, \ - EntityTooSmall, MissingKey, InvalidNotificationDestination, MalformedXML, InvalidStorageClass, DuplicateTagKeys +from .exceptions import ( + BucketAlreadyExists, MissingBucket, InvalidBucketName, InvalidPart, InvalidRequest, + EntityTooSmall, MissingKey, InvalidNotificationDestination, MalformedXML, InvalidStorageClass, + InvalidTargetBucketForLogging, DuplicateTagKeys, CrossLocationLoggingProhibitted +) from .utils import clean_key_name, _VersionedKeyStore MAX_BUCKET_NAME_LENGTH = 63 @@ -463,6 +466,7 @@ class FakeBucket(BaseModel): self.cors = [] self.logging = {} self.notification_configuration = None + self.accelerate_configuration = None @property def location(self): @@ -557,7 +561,6 @@ class FakeBucket(BaseModel): self.rules = [] def set_cors(self, rules): - from moto.s3.exceptions import InvalidRequest, MalformedXML self.cors = [] if len(rules) > 100: @@ -607,7 +610,6 @@ class FakeBucket(BaseModel): self.logging = {} return - from moto.s3.exceptions import InvalidTargetBucketForLogging, CrossLocationLoggingProhibitted # Target bucket must exist in the same account (assuming all moto buckets are in the same account): if not bucket_backend.buckets.get(logging_config["TargetBucket"]): raise InvalidTargetBucketForLogging("The target bucket for logging does not exist.") @@ -655,6 +657,13 @@ class FakeBucket(BaseModel): if region != self.region_name: raise InvalidNotificationDestination() + def set_accelerate_configuration(self, accelerate_config): + if self.accelerate_configuration is None and accelerate_config == 'Suspended': + # Cannot "suspend" a not active acceleration. Leaves it undefined + return + + self.accelerate_configuration = accelerate_config + def set_website_configuration(self, website_configuration): self.website_configuration = website_configuration @@ -857,6 +866,15 @@ class S3Backend(BaseBackend): bucket = self.get_bucket(bucket_name) bucket.set_notification_configuration(notification_config) + def put_bucket_accelerate_configuration(self, bucket_name, accelerate_configuration): + if accelerate_configuration not in ['Enabled', 'Suspended']: + raise MalformedXML() + + bucket = self.get_bucket(bucket_name) + if bucket.name.find('.') != -1: + raise InvalidRequest('PutBucketAccelerateConfiguration') + bucket.set_accelerate_configuration(accelerate_configuration) + def initiate_multipart(self, bucket_name, key_name, metadata): bucket = self.get_bucket(bucket_name) new_multipart = FakeMultipart(key_name, metadata) diff --git a/moto/s3/responses.py b/moto/s3/responses.py old mode 100755 new mode 100644 index 856178941..42d064828 --- a/moto/s3/responses.py +++ b/moto/s3/responses.py @@ -257,6 +257,13 @@ class ResponseObject(_TemplateEnvironmentMixin): return 200, {}, "" template = self.response_template(S3_GET_BUCKET_NOTIFICATION_CONFIG) return template.render(bucket=bucket) + elif "accelerate" in querystring: + bucket = self.backend.get_bucket(bucket_name) + if bucket.accelerate_configuration is None: + template = self.response_template(S3_BUCKET_ACCELERATE_NOT_SET) + return 200, {}, template.render() + template = self.response_template(S3_BUCKET_ACCELERATE) + return template.render(bucket=bucket) elif 'versions' in querystring: delimiter = querystring.get('delimiter', [None])[0] @@ -442,6 +449,15 @@ class ResponseObject(_TemplateEnvironmentMixin): raise MalformedXML() except Exception as e: raise e + elif "accelerate" in querystring: + try: + accelerate_status = self._accelerate_config_from_xml(body) + self.backend.put_bucket_accelerate_configuration(bucket_name, accelerate_status) + return "" + except KeyError: + raise MalformedXML() + except Exception as e: + raise e else: if body: @@ -1034,6 +1050,11 @@ class ResponseObject(_TemplateEnvironmentMixin): return parsed_xml["NotificationConfiguration"] + def _accelerate_config_from_xml(self, xml): + parsed_xml = xmltodict.parse(xml) + config = parsed_xml['AccelerateConfiguration'] + return config['Status'] + def _key_response_delete(self, bucket_name, query, key_name, headers): if query.get('uploadId'): upload_id = query['uploadId'][0] @@ -1686,3 +1707,13 @@ S3_GET_BUCKET_NOTIFICATION_CONFIG = """ {% endfor %} """ + +S3_BUCKET_ACCELERATE = """ + + {{ bucket.accelerate_configuration }} + +""" + +S3_BUCKET_ACCELERATE_NOT_SET = """ + +""" diff --git a/tests/test_s3/test_s3.py b/tests/test_s3/test_s3.py index 6af23849c..1e9d25327 100644 --- a/tests/test_s3/test_s3.py +++ b/tests/test_s3/test_s3.py @@ -2820,3 +2820,80 @@ def test_boto3_bucket_name_too_short(): with assert_raises(ClientError) as exc: s3.create_bucket(Bucket='x'*2) exc.exception.response['Error']['Code'].should.equal('InvalidBucketName') + +@mock_s3 +def test_accelerated_none_when_unspecified(): + bucket_name = 'some_bucket' + s3 = boto3.client('s3') + s3.create_bucket(Bucket=bucket_name) + resp = s3.get_bucket_accelerate_configuration(Bucket=bucket_name) + resp.shouldnt.have.key('Status') + +@mock_s3 +def test_can_enable_bucket_acceleration(): + bucket_name = 'some_bucket' + s3 = boto3.client('s3') + s3.create_bucket(Bucket=bucket_name) + resp = s3.put_bucket_accelerate_configuration( + Bucket=bucket_name, + AccelerateConfiguration={'Status': 'Enabled'}, + ) + resp.keys().should.have.length_of(1) # Response contains nothing (only HTTP headers) + resp = s3.get_bucket_accelerate_configuration(Bucket=bucket_name) + resp.should.have.key('Status') + resp['Status'].should.equal('Enabled') + +@mock_s3 +def test_can_suspend_bucket_acceleration(): + bucket_name = 'some_bucket' + s3 = boto3.client('s3') + s3.create_bucket(Bucket=bucket_name) + resp = s3.put_bucket_accelerate_configuration( + Bucket=bucket_name, + AccelerateConfiguration={'Status': 'Enabled'}, + ) + resp = s3.put_bucket_accelerate_configuration( + Bucket=bucket_name, + AccelerateConfiguration={'Status': 'Suspended'}, + ) + resp.keys().should.have.length_of(1) # Response contains nothing (only HTTP headers) + resp = s3.get_bucket_accelerate_configuration(Bucket=bucket_name) + resp.should.have.key('Status') + resp['Status'].should.equal('Suspended') + +@mock_s3 +def test_suspending_acceleration_on_not_configured_bucket_does_nothing(): + bucket_name = 'some_bucket' + s3 = boto3.client('s3') + s3.create_bucket(Bucket=bucket_name) + resp = s3.put_bucket_accelerate_configuration( + Bucket=bucket_name, + AccelerateConfiguration={'Status': 'Suspended'}, + ) + resp.keys().should.have.length_of(1) # Response contains nothing (only HTTP headers) + resp = s3.get_bucket_accelerate_configuration(Bucket=bucket_name) + resp.shouldnt.have.key('Status') + +@mock_s3 +def test_accelerate_configuration_status_validation(): + bucket_name = 'some_bucket' + s3 = boto3.client('s3') + s3.create_bucket(Bucket=bucket_name) + with assert_raises(ClientError) as exc: + s3.put_bucket_accelerate_configuration( + Bucket=bucket_name, + AccelerateConfiguration={'Status': 'bad_status'}, + ) + exc.exception.response['Error']['Code'].should.equal('MalformedXML') + +@mock_s3 +def test_accelerate_configuration_is_not_supported_when_bucket_name_has_dots(): + bucket_name = 'some.bucket.with.dots' + s3 = boto3.client('s3') + s3.create_bucket(Bucket=bucket_name) + with assert_raises(ClientError) as exc: + s3.put_bucket_accelerate_configuration( + Bucket=bucket_name, + AccelerateConfiguration={'Status': 'Enabled'}, + ) + exc.exception.response['Error']['Code'].should.equal('InvalidRequest') From 4cce4defac738bb0e6fd7d475c07377211b4d6e4 Mon Sep 17 00:00:00 2001 From: Jordan Sanders Date: Sat, 25 May 2019 05:19:26 -0500 Subject: [PATCH 34/41] Support CustomAmiId in EMR (#2037) --- moto/emr/models.py | 4 +++- moto/emr/responses.py | 15 ++++++++++++ tests/test_emr/test_emr_boto3.py | 41 ++++++++++++++++++++++++++++++++ 3 files changed, 59 insertions(+), 1 deletion(-) diff --git a/moto/emr/models.py b/moto/emr/models.py index 6b7147e3f..4b591acb1 100644 --- a/moto/emr/models.py +++ b/moto/emr/models.py @@ -97,7 +97,8 @@ class FakeCluster(BaseModel): visible_to_all_users='false', release_label=None, requested_ami_version=None, - running_ami_version=None): + running_ami_version=None, + custom_ami_id=None): self.id = cluster_id or random_cluster_id() emr_backend.clusters[self.id] = self self.emr_backend = emr_backend @@ -162,6 +163,7 @@ class FakeCluster(BaseModel): self.release_label = release_label self.requested_ami_version = requested_ami_version self.running_ami_version = running_ami_version + self.custom_ami_id = custom_ami_id self.role = job_flow_role or 'EMRJobflowDefault' self.service_role = service_role diff --git a/moto/emr/responses.py b/moto/emr/responses.py index 933e0177b..c807b5f54 100644 --- a/moto/emr/responses.py +++ b/moto/emr/responses.py @@ -267,6 +267,18 @@ class ElasticMapReduceResponse(BaseResponse): else: kwargs['running_ami_version'] = '1.0.0' + custom_ami_id = self._get_param('CustomAmiId') + if custom_ami_id: + kwargs['custom_ami_id'] = custom_ami_id + if release_label and release_label < 'emr-5.7.0': + message = 'Custom AMI is not allowed' + raise EmrError(error_type='ValidationException', + message=message, template='error_json') + elif ami_version: + message = 'Custom AMI is not supported in this version of EMR' + raise EmrError(error_type='ValidationException', + message=message, template='error_json') + cluster = self.backend.run_job_flow(**kwargs) applications = self._get_list_prefix('Applications.member') @@ -375,6 +387,9 @@ DESCRIBE_CLUSTER_TEMPLATE = """ Date: Sat, 25 May 2019 19:32:52 +0200 Subject: [PATCH 36/41] Fix small issue in README example (#1804) --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index 56f73e28e..70faee2c8 100644 --- a/README.md +++ b/README.md @@ -47,7 +47,7 @@ def test_my_model_save(): body = conn.Object('mybucket', 'steve').get()['Body'].read().decode("utf-8") - assert body == b'is awesome' + assert body == 'is awesome' ``` With the decorator wrapping the test, all the calls to s3 are automatically mocked out. The mock keeps the state of the buckets and keys. From 6fb85ac43075b2e29d832edecc44935da61699ae Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bendeg=C3=BAz=20=C3=81cs?= <30595431+acsbendi@users.noreply.github.com> Date: Sat, 25 May 2019 19:34:29 +0200 Subject: [PATCH 37/41] Updated implementation coverage for cloudformation (#2139) * Updated implementation coverage for cloudformation * Updated cloudformation coverage with recently implemented endpoints --- IMPLEMENTATION_COVERAGE.md | 19 +++++++++++++------ 1 file changed, 13 insertions(+), 6 deletions(-) diff --git a/IMPLEMENTATION_COVERAGE.md b/IMPLEMENTATION_COVERAGE.md index b198ac865..e03eaabe1 100644 --- a/IMPLEMENTATION_COVERAGE.md +++ b/IMPLEMENTATION_COVERAGE.md @@ -470,7 +470,7 @@ - [ ] upgrade_applied_schema - [ ] upgrade_published_schema -## cloudformation - 56% implemented +## cloudformation - 65% implemented - [ ] cancel_update_stack - [ ] continue_update_rollback - [X] create_change_set @@ -481,19 +481,24 @@ - [X] delete_stack - [X] delete_stack_instances - [X] delete_stack_set +- [ ] deploy - [ ] describe_account_limits - [X] describe_change_set -- [ ] describe_stack_events +- [ ] describe_stack_drift_detection_status +- [X] describe_stack_events - [X] describe_stack_instance -- [ ] describe_stack_resource -- [ ] describe_stack_resources +- [X] describe_stack_resource +- [ ] describe_stack_resource_drifts +- [X] describe_stack_resources - [X] describe_stack_set - [X] describe_stack_set_operation - [X] describe_stacks +- [ ] detect_stack_drift +- [ ] detect_stack_resource_drift - [ ] estimate_template_cost - [X] execute_change_set - [ ] get_stack_policy -- [ ] get_template +- [X] get_template - [ ] get_template_summary - [X] list_change_sets - [X] list_exports @@ -504,6 +509,7 @@ - [X] list_stack_set_operations - [X] list_stack_sets - [X] list_stacks +- [ ] package - [ ] set_stack_policy - [ ] signal_resource - [X] stop_stack_set_operation @@ -511,7 +517,8 @@ - [X] update_stack_instances - [X] update_stack_set - [ ] update_termination_protection -- [ ] validate_template +- [X] validate_template +- [ ] wait ## cloudfront - 0% implemented - [ ] create_cloud_front_origin_access_identity From c739c5331e60147c446d039dc68b9590c51c4af8 Mon Sep 17 00:00:00 2001 From: Garrett Date: Sat, 25 May 2019 13:34:47 -0400 Subject: [PATCH 38/41] Handle UnicodeEncodeError when parsing querystring (#2170) --- moto/core/responses.py | 11 +++++++++-- tests/test_core/test_responses.py | 10 +++++++++- 2 files changed, 18 insertions(+), 3 deletions(-) diff --git a/moto/core/responses.py b/moto/core/responses.py index 8fb247f75..9da36b865 100644 --- a/moto/core/responses.py +++ b/moto/core/responses.py @@ -152,11 +152,18 @@ class BaseResponse(_TemplateEnvironmentMixin): for key, value in flat.items(): querystring[key] = [value] elif self.body: - querystring.update(parse_qs(raw_body, keep_blank_values=True)) + try: + querystring.update(parse_qs(raw_body, keep_blank_values=True)) + except UnicodeEncodeError: + pass # ignore encoding errors, as the body may not contain a legitimate querystring if not querystring: querystring.update(headers) - querystring = _decode_dict(querystring) + try: + querystring = _decode_dict(querystring) + except UnicodeDecodeError: + pass # ignore decoding errors, as the body may not contain a legitimate querystring + self.uri = full_url self.path = urlparse(full_url).path self.querystring = querystring diff --git a/tests/test_core/test_responses.py b/tests/test_core/test_responses.py index c3cc27aef..d0f672ab8 100644 --- a/tests/test_core/test_responses.py +++ b/tests/test_core/test_responses.py @@ -2,7 +2,9 @@ from __future__ import unicode_literals import sure # noqa -from moto.core.responses import AWSServiceSpec +from botocore.awsrequest import AWSPreparedRequest + +from moto.core.responses import AWSServiceSpec, BaseResponse from moto.core.responses import flatten_json_request_body @@ -79,3 +81,9 @@ def test_flatten_json_request_body(): i += 1 key = keyfmt.format(idx + 1, i) props.should.equal(body['Configurations'][idx]['Properties']) + + +def test_parse_qs_unicode_decode_error(): + body = b'{"key": "%D0"}, "C": "#0 = :0"}' + request = AWSPreparedRequest('GET', 'http://request', {'foo': 'bar'}, body, False) + BaseResponse().setup_class(request, request.url, request.headers) From f408709ef9cec376dabc9d2ba9d7964a27d5a0f3 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bendeg=C3=BAz=20=C3=81cs?= <30595431+acsbendi@users.noreply.github.com> Date: Sat, 25 May 2019 19:35:07 +0200 Subject: [PATCH 39/41] VPC IPv4 validation (#2026) * Implemented throwing invalid subnet range error and fixed breaking tests. * Implemented throwing invalid CIDR block parameter error for vpcs and subnets. * Implemented throwing invalid destination CIDR block error. * IPv6 addresses not accepted, strict checking disabled. * Implemented throwing invalid subnet conflict error and fixed breaking tests. * Implemented throwing invalid VPC range error and fixed breaking tests. * Fixed accidentally removed ). * Fixed test case trying to create two subnets with the same CIDR range. --- moto/ec2/exceptions.py | 45 +++++++++++++++ moto/ec2/models.py | 32 ++++++++++- .../test_cloudformation_stack_integration.py | 4 +- tests/test_ec2/test_network_acls.py | 2 +- tests/test_ec2/test_route_tables.py | 24 ++++++++ tests/test_ec2/test_security_groups.py | 2 +- tests/test_ec2/test_spot_fleet.py | 2 +- tests/test_ec2/test_spot_instances.py | 2 +- tests/test_ec2/test_subnets.py | 55 ++++++++++++++++++- tests/test_ec2/test_vpcs.py | 24 ++++++++ tests/test_elbv2/test_elbv2.py | 40 +++++++------- tests/test_rds/test_rds.py | 10 ++-- tests/test_rds2/test_rds2.py | 16 +++--- .../test_resourcegroupstaggingapi.py | 2 +- 14 files changed, 215 insertions(+), 45 deletions(-) diff --git a/moto/ec2/exceptions.py b/moto/ec2/exceptions.py index 0453a42a1..d93ced163 100644 --- a/moto/ec2/exceptions.py +++ b/moto/ec2/exceptions.py @@ -430,6 +430,51 @@ class OperationNotPermitted(EC2ClientError): ) +class InvalidSubnetRangeError(EC2ClientError): + + def __init__(self, cidr_block): + super(InvalidSubnetRangeError, self).__init__( + "InvalidSubnet.Range", + "The CIDR '{}' is invalid.".format(cidr_block) + ) + + +class InvalidCIDRBlockParameterError(EC2ClientError): + + def __init__(self, cidr_block): + super(InvalidCIDRBlockParameterError, self).__init__( + "InvalidParameterValue", + "Value ({}) for parameter cidrBlock is invalid. This is not a valid CIDR block.".format(cidr_block) + ) + + +class InvalidDestinationCIDRBlockParameterError(EC2ClientError): + + def __init__(self, cidr_block): + super(InvalidDestinationCIDRBlockParameterError, self).__init__( + "InvalidParameterValue", + "Value ({}) for parameter destinationCidrBlock is invalid. This is not a valid CIDR block.".format(cidr_block) + ) + + +class InvalidSubnetConflictError(EC2ClientError): + + def __init__(self, cidr_block): + super(InvalidSubnetConflictError, self).__init__( + "InvalidSubnet.Conflict", + "The CIDR '{}' conflicts with another subnet".format(cidr_block) + ) + + +class InvalidVPCRangeError(EC2ClientError): + + def __init__(self, cidr_block): + super(InvalidVPCRangeError, self).__init__( + "InvalidVpc.Range", + "The CIDR '{}' is invalid.".format(cidr_block) + ) + + # accept exception class OperationNotPermitted2(EC2ClientError): def __init__(self, client_region, pcx_id, acceptor_region): diff --git a/moto/ec2/models.py b/moto/ec2/models.py index 7d0a4ac08..c2e5970bd 100644 --- a/moto/ec2/models.py +++ b/moto/ec2/models.py @@ -36,8 +36,10 @@ from .exceptions import ( InvalidAMIIdError, InvalidAMIAttributeItemValueError, InvalidAssociationIdError, + InvalidCIDRBlockParameterError, InvalidCIDRSubnetError, InvalidCustomerGatewayIdError, + InvalidDestinationCIDRBlockParameterError, InvalidDHCPOptionsIdError, InvalidDomainError, InvalidID, @@ -58,13 +60,16 @@ from .exceptions import ( InvalidSecurityGroupDuplicateError, InvalidSecurityGroupNotFoundError, InvalidSnapshotIdError, + InvalidSubnetConflictError, InvalidSubnetIdError, + InvalidSubnetRangeError, InvalidVolumeIdError, InvalidVolumeAttachmentError, InvalidVpcCidrBlockAssociationIdError, InvalidVPCPeeringConnectionIdError, InvalidVPCPeeringConnectionStateTransitionError, InvalidVPCIdError, + InvalidVPCRangeError, InvalidVpnGatewayIdError, InvalidVpnConnectionIdError, MalformedAMIIdError, @@ -2151,6 +2156,12 @@ class VPCBackend(object): def create_vpc(self, cidr_block, instance_tenancy='default', amazon_provided_ipv6_cidr_block=False): vpc_id = random_vpc_id() + try: + vpc_cidr_block = ipaddress.IPv4Network(six.text_type(cidr_block), strict=False) + except ValueError: + raise InvalidCIDRBlockParameterError(cidr_block) + if vpc_cidr_block.prefixlen < 16 or vpc_cidr_block.prefixlen > 28: + raise InvalidVPCRangeError(cidr_block) vpc = VPC(self, vpc_id, cidr_block, len(self.vpcs) == 0, instance_tenancy, amazon_provided_ipv6_cidr_block) self.vpcs[vpc_id] = vpc @@ -2367,7 +2378,7 @@ class Subnet(TaggedEC2Resource): self.id = subnet_id self.vpc_id = vpc_id self.cidr_block = cidr_block - self.cidr = ipaddress.ip_network(six.text_type(self.cidr_block)) + self.cidr = ipaddress.IPv4Network(six.text_type(self.cidr_block), strict=False) self._availability_zone = availability_zone self.default_for_az = default_for_az self.map_public_ip_on_launch = map_public_ip_on_launch @@ -2499,7 +2510,19 @@ class SubnetBackend(object): def create_subnet(self, vpc_id, cidr_block, availability_zone): subnet_id = random_subnet_id() - self.get_vpc(vpc_id) # Validate VPC exists + vpc = self.get_vpc(vpc_id) # Validate VPC exists and the supplied CIDR block is a subnet of the VPC's + vpc_cidr_block = ipaddress.IPv4Network(six.text_type(vpc.cidr_block), strict=False) + try: + subnet_cidr_block = ipaddress.IPv4Network(six.text_type(cidr_block), strict=False) + except ValueError: + raise InvalidCIDRBlockParameterError(cidr_block) + if not (vpc_cidr_block.network_address <= subnet_cidr_block.network_address and + vpc_cidr_block.broadcast_address >= subnet_cidr_block.broadcast_address): + raise InvalidSubnetRangeError(cidr_block) + + for subnet in self.get_all_subnets(filters={'vpc-id': vpc_id}): + if subnet.cidr.overlaps(subnet_cidr_block): + raise InvalidSubnetConflictError(cidr_block) # if this is the first subnet for an availability zone, # consider it the default @@ -2759,6 +2782,11 @@ class RouteBackend(object): elif EC2_RESOURCE_TO_PREFIX['internet-gateway'] in gateway_id: gateway = self.get_internet_gateway(gateway_id) + try: + ipaddress.IPv4Network(six.text_type(destination_cidr_block), strict=False) + except ValueError: + raise InvalidDestinationCIDRBlockParameterError(destination_cidr_block) + route = Route(route_table, destination_cidr_block, local=local, gateway=gateway, instance=self.get_instance( diff --git a/tests/test_cloudformation/test_cloudformation_stack_integration.py b/tests/test_cloudformation/test_cloudformation_stack_integration.py index 87fbdb0e6..42ddd2351 100644 --- a/tests/test_cloudformation/test_cloudformation_stack_integration.py +++ b/tests/test_cloudformation/test_cloudformation_stack_integration.py @@ -2089,7 +2089,7 @@ def test_stack_kms(): def test_stack_spot_fleet(): conn = boto3.client('ec2', 'us-east-1') - vpc = conn.create_vpc(CidrBlock="10.0.0.0/8")['Vpc'] + vpc = conn.create_vpc(CidrBlock="10.0.0.0/16")['Vpc'] subnet = conn.create_subnet( VpcId=vpc['VpcId'], CidrBlock='10.0.0.0/16', AvailabilityZone='us-east-1a')['Subnet'] subnet_id = subnet['SubnetId'] @@ -2173,7 +2173,7 @@ def test_stack_spot_fleet(): def test_stack_spot_fleet_should_figure_out_default_price(): conn = boto3.client('ec2', 'us-east-1') - vpc = conn.create_vpc(CidrBlock="10.0.0.0/8")['Vpc'] + vpc = conn.create_vpc(CidrBlock="10.0.0.0/16")['Vpc'] subnet = conn.create_subnet( VpcId=vpc['VpcId'], CidrBlock='10.0.0.0/16', AvailabilityZone='us-east-1a')['Subnet'] subnet_id = subnet['SubnetId'] diff --git a/tests/test_ec2/test_network_acls.py b/tests/test_ec2/test_network_acls.py index 9c92c949e..5b7f107dd 100644 --- a/tests/test_ec2/test_network_acls.py +++ b/tests/test_ec2/test_network_acls.py @@ -28,7 +28,7 @@ def test_new_subnet_associates_with_default_network_acl(): conn = boto.connect_vpc('the_key', 'the secret') vpc = conn.get_all_vpcs()[0] - subnet = conn.create_subnet(vpc.id, "10.0.0.0/18") + subnet = conn.create_subnet(vpc.id, "172.31.48.0/20") all_network_acls = conn.get_all_network_acls() all_network_acls.should.have.length_of(1) diff --git a/tests/test_ec2/test_route_tables.py b/tests/test_ec2/test_route_tables.py index b27484468..de33b3f7a 100644 --- a/tests/test_ec2/test_route_tables.py +++ b/tests/test_ec2/test_route_tables.py @@ -6,6 +6,7 @@ from nose.tools import assert_raises import boto import boto3 from boto.exception import EC2ResponseError +from botocore.exceptions import ClientError import sure # noqa from moto import mock_ec2, mock_ec2_deprecated @@ -528,3 +529,26 @@ def test_network_acl_tagging(): if na.id == route_table.id) test_route_table.tags.should.have.length_of(1) test_route_table.tags["a key"].should.equal("some value") + + +@mock_ec2 +def test_create_route_with_invalid_destination_cidr_block_parameter(): + ec2 = boto3.resource('ec2', region_name='us-west-1') + + vpc = ec2.create_vpc(CidrBlock='10.0.0.0/16') + vpc.reload() + vpc.is_default.shouldnt.be.ok + + route_table = ec2.create_route_table(VpcId=vpc.id) + route_table.reload() + + internet_gateway = ec2.create_internet_gateway() + vpc.attach_internet_gateway(InternetGatewayId=internet_gateway.id) + internet_gateway.reload() + + destination_cidr_block = '1000.1.0.0/20' + with assert_raises(ClientError) as ex: + route = route_table.create_route(DestinationCidrBlock=destination_cidr_block, GatewayId=internet_gateway.id) + str(ex.exception).should.equal( + "An error occurred (InvalidParameterValue) when calling the CreateRoute " + "operation: Value ({}) for parameter destinationCidrBlock is invalid. This is not a valid CIDR block.".format(destination_cidr_block)) \ No newline at end of file diff --git a/tests/test_ec2/test_security_groups.py b/tests/test_ec2/test_security_groups.py index d843087a6..c09b1e8f4 100644 --- a/tests/test_ec2/test_security_groups.py +++ b/tests/test_ec2/test_security_groups.py @@ -501,7 +501,7 @@ def test_sec_group_rule_limit_vpc(): ec2_conn = boto.connect_ec2() vpc_conn = boto.connect_vpc() - vpc = vpc_conn.create_vpc('10.0.0.0/8') + vpc = vpc_conn.create_vpc('10.0.0.0/16') sg = ec2_conn.create_security_group('test', 'test', vpc_id=vpc.id) other_sg = ec2_conn.create_security_group('test_2', 'test', vpc_id=vpc.id) diff --git a/tests/test_ec2/test_spot_fleet.py b/tests/test_ec2/test_spot_fleet.py index 190f3b1f1..6221d633f 100644 --- a/tests/test_ec2/test_spot_fleet.py +++ b/tests/test_ec2/test_spot_fleet.py @@ -7,7 +7,7 @@ from moto import mock_ec2 def get_subnet_id(conn): - vpc = conn.create_vpc(CidrBlock="10.0.0.0/8")['Vpc'] + vpc = conn.create_vpc(CidrBlock="10.0.0.0/16")['Vpc'] subnet = conn.create_subnet( VpcId=vpc['VpcId'], CidrBlock='10.0.0.0/16', AvailabilityZone='us-east-1a')['Subnet'] subnet_id = subnet['SubnetId'] diff --git a/tests/test_ec2/test_spot_instances.py b/tests/test_ec2/test_spot_instances.py index 05f8ee88f..ab08d392c 100644 --- a/tests/test_ec2/test_spot_instances.py +++ b/tests/test_ec2/test_spot_instances.py @@ -17,7 +17,7 @@ from moto.core.utils import iso_8601_datetime_with_milliseconds @mock_ec2 def test_request_spot_instances(): conn = boto3.client('ec2', 'us-east-1') - vpc = conn.create_vpc(CidrBlock="10.0.0.0/8")['Vpc'] + vpc = conn.create_vpc(CidrBlock="10.0.0.0/16")['Vpc'] subnet = conn.create_subnet( VpcId=vpc['VpcId'], CidrBlock='10.0.0.0/16', AvailabilityZone='us-east-1a')['Subnet'] subnet_id = subnet['SubnetId'] diff --git a/tests/test_ec2/test_subnets.py b/tests/test_ec2/test_subnets.py index 99e6d45d8..38571b285 100644 --- a/tests/test_ec2/test_subnets.py +++ b/tests/test_ec2/test_subnets.py @@ -7,7 +7,7 @@ import boto3 import boto import boto.vpc from boto.exception import EC2ResponseError -from botocore.exceptions import ParamValidationError +from botocore.exceptions import ParamValidationError, ClientError import json import sure # noqa @@ -84,7 +84,7 @@ def test_default_subnet(): default_vpc.is_default.should.be.ok subnet = ec2.create_subnet( - VpcId=default_vpc.id, CidrBlock='172.31.0.0/20', AvailabilityZone='us-west-1a') + VpcId=default_vpc.id, CidrBlock='172.31.48.0/20', AvailabilityZone='us-west-1a') subnet.reload() subnet.map_public_ip_on_launch.shouldnt.be.ok @@ -126,7 +126,7 @@ def test_modify_subnet_attribute(): vpc = list(ec2.vpcs.all())[0] subnet = ec2.create_subnet( - VpcId=vpc.id, CidrBlock='10.0.0.0/24', AvailabilityZone='us-west-1a') + VpcId=vpc.id, CidrBlock="172.31.48.0/20", AvailabilityZone='us-west-1a') # 'map_public_ip_on_launch' is set when calling 'DescribeSubnets' action subnet.reload() @@ -289,3 +289,52 @@ def test_subnet_tags_through_cloudformation(): subnet = vpc_conn.get_all_subnets(filters={'cidrBlock': '10.0.0.0/24'})[0] subnet.tags["foo"].should.equal("bar") subnet.tags["blah"].should.equal("baz") + + +@mock_ec2 +def test_create_subnet_with_invalid_cidr_range(): + ec2 = boto3.resource('ec2', region_name='us-west-1') + + vpc = ec2.create_vpc(CidrBlock='10.0.0.0/16') + vpc.reload() + vpc.is_default.shouldnt.be.ok + + subnet_cidr_block = '10.1.0.0/20' + with assert_raises(ClientError) as ex: + subnet = ec2.create_subnet(VpcId=vpc.id, CidrBlock=subnet_cidr_block) + str(ex.exception).should.equal( + "An error occurred (InvalidSubnet.Range) when calling the CreateSubnet " + "operation: The CIDR '{}' is invalid.".format(subnet_cidr_block)) + + +@mock_ec2 +def test_create_subnet_with_invalid_cidr_block_parameter(): + ec2 = boto3.resource('ec2', region_name='us-west-1') + + vpc = ec2.create_vpc(CidrBlock='10.0.0.0/16') + vpc.reload() + vpc.is_default.shouldnt.be.ok + + subnet_cidr_block = '1000.1.0.0/20' + with assert_raises(ClientError) as ex: + subnet = ec2.create_subnet(VpcId=vpc.id, CidrBlock=subnet_cidr_block) + str(ex.exception).should.equal( + "An error occurred (InvalidParameterValue) when calling the CreateSubnet " + "operation: Value ({}) for parameter cidrBlock is invalid. This is not a valid CIDR block.".format(subnet_cidr_block)) + + +@mock_ec2 +def test_create_subnets_with_overlapping_cidr_blocks(): + ec2 = boto3.resource('ec2', region_name='us-west-1') + + vpc = ec2.create_vpc(CidrBlock='10.0.0.0/16') + vpc.reload() + vpc.is_default.shouldnt.be.ok + + subnet_cidr_block = '10.0.0.0/24' + with assert_raises(ClientError) as ex: + subnet1 = ec2.create_subnet(VpcId=vpc.id, CidrBlock=subnet_cidr_block) + subnet2 = ec2.create_subnet(VpcId=vpc.id, CidrBlock=subnet_cidr_block) + str(ex.exception).should.equal( + "An error occurred (InvalidSubnet.Conflict) when calling the CreateSubnet " + "operation: The CIDR '{}' conflicts with another subnet".format(subnet_cidr_block)) diff --git a/tests/test_ec2/test_vpcs.py b/tests/test_ec2/test_vpcs.py index 318491b44..ad17deb3c 100644 --- a/tests/test_ec2/test_vpcs.py +++ b/tests/test_ec2/test_vpcs.py @@ -539,3 +539,27 @@ def test_ipv6_cidr_block_association_filters(): filtered_vpcs = list(ec2.vpcs.filter(Filters=[{'Name': 'ipv6-cidr-block-association.state', 'Values': ['associated']}])) filtered_vpcs.should.be.length_of(2) # 2 of 4 VPCs + + +@mock_ec2 +def test_create_vpc_with_invalid_cidr_block_parameter(): + ec2 = boto3.resource('ec2', region_name='us-west-1') + + vpc_cidr_block = '1000.1.0.0/20' + with assert_raises(ClientError) as ex: + vpc = ec2.create_vpc(CidrBlock=vpc_cidr_block) + str(ex.exception).should.equal( + "An error occurred (InvalidParameterValue) when calling the CreateVpc " + "operation: Value ({}) for parameter cidrBlock is invalid. This is not a valid CIDR block.".format(vpc_cidr_block)) + + +@mock_ec2 +def test_create_vpc_with_invalid_cidr_range(): + ec2 = boto3.resource('ec2', region_name='us-west-1') + + vpc_cidr_block = '10.1.0.0/29' + with assert_raises(ClientError) as ex: + vpc = ec2.create_vpc(CidrBlock=vpc_cidr_block) + str(ex.exception).should.equal( + "An error occurred (InvalidVpc.Range) when calling the CreateVpc " + "operation: The CIDR '{}' is invalid.".format(vpc_cidr_block)) diff --git a/tests/test_elbv2/test_elbv2.py b/tests/test_elbv2/test_elbv2.py index 2010e384a..03273ad3a 100644 --- a/tests/test_elbv2/test_elbv2.py +++ b/tests/test_elbv2/test_elbv2.py @@ -27,7 +27,7 @@ def test_create_load_balancer(): AvailabilityZone='us-east-1a') subnet2 = ec2.create_subnet( VpcId=vpc.id, - CidrBlock='172.28.7.192/26', + CidrBlock='172.28.7.0/26', AvailabilityZone='us-east-1b') response = conn.create_load_balancer( @@ -69,7 +69,7 @@ def test_describe_load_balancers(): AvailabilityZone='us-east-1a') subnet2 = ec2.create_subnet( VpcId=vpc.id, - CidrBlock='172.28.7.192/26', + CidrBlock='172.28.7.0/26', AvailabilityZone='us-east-1b') conn.create_load_balancer( @@ -112,7 +112,7 @@ def test_add_remove_tags(): vpc = ec2.create_vpc(CidrBlock='172.28.7.0/24', InstanceTenancy='default') subnet1 = ec2.create_subnet( VpcId=vpc.id, - CidrBlock='172.28.7.192/26', + CidrBlock='172.28.7.0/26', AvailabilityZone='us-east-1a') subnet2 = ec2.create_subnet( VpcId=vpc.id, @@ -234,7 +234,7 @@ def test_create_elb_in_multiple_region(): InstanceTenancy='default') subnet1 = ec2.create_subnet( VpcId=vpc.id, - CidrBlock='172.28.7.192/26', + CidrBlock='172.28.7.0/26', AvailabilityZone=region + 'a') subnet2 = ec2.create_subnet( VpcId=vpc.id, @@ -275,7 +275,7 @@ def test_create_target_group_and_listeners(): AvailabilityZone='us-east-1a') subnet2 = ec2.create_subnet( VpcId=vpc.id, - CidrBlock='172.28.7.192/26', + CidrBlock='172.28.7.0/26', AvailabilityZone='us-east-1b') response = conn.create_load_balancer( @@ -434,7 +434,7 @@ def test_create_target_group_without_non_required_parameters(): AvailabilityZone='us-east-1a') subnet2 = ec2.create_subnet( VpcId=vpc.id, - CidrBlock='172.28.7.192/26', + CidrBlock='172.28.7.0/26', AvailabilityZone='us-east-1b') response = conn.create_load_balancer( @@ -538,7 +538,7 @@ def test_describe_paginated_balancers(): AvailabilityZone='us-east-1a') subnet2 = ec2.create_subnet( VpcId=vpc.id, - CidrBlock='172.28.7.192/26', + CidrBlock='172.28.7.0/26', AvailabilityZone='us-east-1b') for i in range(51): @@ -573,7 +573,7 @@ def test_delete_load_balancer(): AvailabilityZone='us-east-1a') subnet2 = ec2.create_subnet( VpcId=vpc.id, - CidrBlock='172.28.7.192/26', + CidrBlock='172.28.7.0/26', AvailabilityZone='us-east-1b') response = conn.create_load_balancer( @@ -606,7 +606,7 @@ def test_register_targets(): AvailabilityZone='us-east-1a') subnet2 = ec2.create_subnet( VpcId=vpc.id, - CidrBlock='172.28.7.192/26', + CidrBlock='172.28.7.0/26', AvailabilityZone='us-east-1b') conn.create_load_balancer( @@ -682,7 +682,7 @@ def test_target_group_attributes(): AvailabilityZone='us-east-1a') subnet2 = ec2.create_subnet( VpcId=vpc.id, - CidrBlock='172.28.7.192/26', + CidrBlock='172.28.7.0/26', AvailabilityZone='us-east-1b') response = conn.create_load_balancer( @@ -773,7 +773,7 @@ def test_handle_listener_rules(): AvailabilityZone='us-east-1a') subnet2 = ec2.create_subnet( VpcId=vpc.id, - CidrBlock='172.28.7.192/26', + CidrBlock='172.28.7.0/26', AvailabilityZone='us-east-1b') response = conn.create_load_balancer( @@ -1078,7 +1078,7 @@ def test_describe_invalid_target_group(): AvailabilityZone='us-east-1a') subnet2 = ec2.create_subnet( VpcId=vpc.id, - CidrBlock='172.28.7.192/26', + CidrBlock='172.28.7.0/26', AvailabilityZone='us-east-1b') response = conn.create_load_balancer( @@ -1124,7 +1124,7 @@ def test_describe_target_groups_no_arguments(): AvailabilityZone='us-east-1a') subnet2 = ec2.create_subnet( VpcId=vpc.id, - CidrBlock='172.28.7.192/26', + CidrBlock='172.28.7.0/26', AvailabilityZone='us-east-1b') response = conn.create_load_balancer( @@ -1188,7 +1188,7 @@ def test_set_ip_address_type(): AvailabilityZone='us-east-1a') subnet2 = ec2.create_subnet( VpcId=vpc.id, - CidrBlock='172.28.7.192/26', + CidrBlock='172.28.7.0/26', AvailabilityZone='us-east-1b') response = client.create_load_balancer( @@ -1238,7 +1238,7 @@ def test_set_security_groups(): AvailabilityZone='us-east-1a') subnet2 = ec2.create_subnet( VpcId=vpc.id, - CidrBlock='172.28.7.192/26', + CidrBlock='172.28.7.0/26', AvailabilityZone='us-east-1b') response = client.create_load_balancer( @@ -1275,11 +1275,11 @@ def test_set_subnets(): vpc = ec2.create_vpc(CidrBlock='172.28.7.0/24', InstanceTenancy='default') subnet1 = ec2.create_subnet( VpcId=vpc.id, - CidrBlock='172.28.7.192/26', + CidrBlock='172.28.7.0/26', AvailabilityZone='us-east-1a') subnet2 = ec2.create_subnet( VpcId=vpc.id, - CidrBlock='172.28.7.192/26', + CidrBlock='172.28.7.64/26', AvailabilityZone='us-east-1b') subnet3 = ec2.create_subnet( VpcId=vpc.id, @@ -1332,7 +1332,7 @@ def test_set_subnets(): AvailabilityZone='us-east-1a') subnet2 = ec2.create_subnet( VpcId=vpc.id, - CidrBlock='172.28.7.192/26', + CidrBlock='172.28.7.0/26', AvailabilityZone='us-east-1b') response = client.create_load_balancer( @@ -1421,7 +1421,7 @@ def test_modify_listener_http_to_https(): AvailabilityZone='eu-central-1a') subnet2 = ec2.create_subnet( VpcId=vpc.id, - CidrBlock='172.28.7.192/26', + CidrBlock='172.28.7.0/26', AvailabilityZone='eu-central-1b') response = client.create_load_balancer( @@ -1603,7 +1603,7 @@ def test_redirect_action_listener_rule(): AvailabilityZone='us-east-1a') subnet2 = ec2.create_subnet( VpcId=vpc.id, - CidrBlock='172.28.7.192/26', + CidrBlock='172.28.7.128/26', AvailabilityZone='us-east-1b') response = conn.create_load_balancer( diff --git a/tests/test_rds/test_rds.py b/tests/test_rds/test_rds.py index 5bf733dc6..af330e672 100644 --- a/tests/test_rds/test_rds.py +++ b/tests/test_rds/test_rds.py @@ -174,8 +174,8 @@ def test_add_security_group_to_database(): def test_add_database_subnet_group(): vpc_conn = boto.vpc.connect_to_region("us-west-2") vpc = vpc_conn.create_vpc("10.0.0.0/16") - subnet1 = vpc_conn.create_subnet(vpc.id, "10.1.0.0/24") - subnet2 = vpc_conn.create_subnet(vpc.id, "10.2.0.0/24") + subnet1 = vpc_conn.create_subnet(vpc.id, "10.0.1.0/24") + subnet2 = vpc_conn.create_subnet(vpc.id, "10.0.2.0/24") subnet_ids = [subnet1.id, subnet2.id] conn = boto.rds.connect_to_region("us-west-2") @@ -191,7 +191,7 @@ def test_add_database_subnet_group(): def test_describe_database_subnet_group(): vpc_conn = boto.vpc.connect_to_region("us-west-2") vpc = vpc_conn.create_vpc("10.0.0.0/16") - subnet = vpc_conn.create_subnet(vpc.id, "10.1.0.0/24") + subnet = vpc_conn.create_subnet(vpc.id, "10.0.1.0/24") conn = boto.rds.connect_to_region("us-west-2") conn.create_db_subnet_group("db_subnet1", "my db subnet", [subnet.id]) @@ -209,7 +209,7 @@ def test_describe_database_subnet_group(): def test_delete_database_subnet_group(): vpc_conn = boto.vpc.connect_to_region("us-west-2") vpc = vpc_conn.create_vpc("10.0.0.0/16") - subnet = vpc_conn.create_subnet(vpc.id, "10.1.0.0/24") + subnet = vpc_conn.create_subnet(vpc.id, "10.0.1.0/24") conn = boto.rds.connect_to_region("us-west-2") conn.create_db_subnet_group("db_subnet1", "my db subnet", [subnet.id]) @@ -227,7 +227,7 @@ def test_delete_database_subnet_group(): def test_create_database_in_subnet_group(): vpc_conn = boto.vpc.connect_to_region("us-west-2") vpc = vpc_conn.create_vpc("10.0.0.0/16") - subnet = vpc_conn.create_subnet(vpc.id, "10.1.0.0/24") + subnet = vpc_conn.create_subnet(vpc.id, "10.0.1.0/24") conn = boto.rds.connect_to_region("us-west-2") conn.create_db_subnet_group("db_subnet1", "my db subnet", [subnet.id]) diff --git a/tests/test_rds2/test_rds2.py b/tests/test_rds2/test_rds2.py index cf9805444..a25b53196 100644 --- a/tests/test_rds2/test_rds2.py +++ b/tests/test_rds2/test_rds2.py @@ -1045,9 +1045,9 @@ def test_create_database_subnet_group(): vpc_conn = boto3.client('ec2', 'us-west-2') vpc = vpc_conn.create_vpc(CidrBlock='10.0.0.0/16')['Vpc'] subnet1 = vpc_conn.create_subnet( - VpcId=vpc['VpcId'], CidrBlock='10.1.0.0/24')['Subnet'] + VpcId=vpc['VpcId'], CidrBlock='10.0.1.0/24')['Subnet'] subnet2 = vpc_conn.create_subnet( - VpcId=vpc['VpcId'], CidrBlock='10.1.0.0/26')['Subnet'] + VpcId=vpc['VpcId'], CidrBlock='10.0.2.0/24')['Subnet'] subnet_ids = [subnet1['SubnetId'], subnet2['SubnetId']] conn = boto3.client('rds', region_name='us-west-2') @@ -1069,7 +1069,7 @@ def test_create_database_in_subnet_group(): vpc_conn = boto3.client('ec2', 'us-west-2') vpc = vpc_conn.create_vpc(CidrBlock='10.0.0.0/16')['Vpc'] subnet = vpc_conn.create_subnet( - VpcId=vpc['VpcId'], CidrBlock='10.1.0.0/24')['Subnet'] + VpcId=vpc['VpcId'], CidrBlock='10.0.1.0/24')['Subnet'] conn = boto3.client('rds', region_name='us-west-2') conn.create_db_subnet_group(DBSubnetGroupName='db_subnet1', @@ -1094,7 +1094,7 @@ def test_describe_database_subnet_group(): vpc_conn = boto3.client('ec2', 'us-west-2') vpc = vpc_conn.create_vpc(CidrBlock='10.0.0.0/16')['Vpc'] subnet = vpc_conn.create_subnet( - VpcId=vpc['VpcId'], CidrBlock='10.1.0.0/24')['Subnet'] + VpcId=vpc['VpcId'], CidrBlock='10.0.1.0/24')['Subnet'] conn = boto3.client('rds', region_name='us-west-2') conn.create_db_subnet_group(DBSubnetGroupName="db_subnet1", @@ -1123,7 +1123,7 @@ def test_delete_database_subnet_group(): vpc_conn = boto3.client('ec2', 'us-west-2') vpc = vpc_conn.create_vpc(CidrBlock='10.0.0.0/16')['Vpc'] subnet = vpc_conn.create_subnet( - VpcId=vpc['VpcId'], CidrBlock='10.1.0.0/24')['Subnet'] + VpcId=vpc['VpcId'], CidrBlock='10.0.1.0/24')['Subnet'] conn = boto3.client('rds', region_name='us-west-2') result = conn.describe_db_subnet_groups() @@ -1149,7 +1149,7 @@ def test_list_tags_database_subnet_group(): vpc_conn = boto3.client('ec2', 'us-west-2') vpc = vpc_conn.create_vpc(CidrBlock='10.0.0.0/16')['Vpc'] subnet = vpc_conn.create_subnet( - VpcId=vpc['VpcId'], CidrBlock='10.1.0.0/24')['Subnet'] + VpcId=vpc['VpcId'], CidrBlock='10.0.1.0/24')['Subnet'] conn = boto3.client('rds', region_name='us-west-2') result = conn.describe_db_subnet_groups() @@ -1176,7 +1176,7 @@ def test_add_tags_database_subnet_group(): vpc_conn = boto3.client('ec2', 'us-west-2') vpc = vpc_conn.create_vpc(CidrBlock='10.0.0.0/16')['Vpc'] subnet = vpc_conn.create_subnet( - VpcId=vpc['VpcId'], CidrBlock='10.1.0.0/24')['Subnet'] + VpcId=vpc['VpcId'], CidrBlock='10.0.1.0/24')['Subnet'] conn = boto3.client('rds', region_name='us-west-2') result = conn.describe_db_subnet_groups() @@ -1207,7 +1207,7 @@ def test_remove_tags_database_subnet_group(): vpc_conn = boto3.client('ec2', 'us-west-2') vpc = vpc_conn.create_vpc(CidrBlock='10.0.0.0/16')['Vpc'] subnet = vpc_conn.create_subnet( - VpcId=vpc['VpcId'], CidrBlock='10.1.0.0/24')['Subnet'] + VpcId=vpc['VpcId'], CidrBlock='10.0.1.0/24')['Subnet'] conn = boto3.client('rds', region_name='us-west-2') result = conn.describe_db_subnet_groups() diff --git a/tests/test_resourcegroupstaggingapi/test_resourcegroupstaggingapi.py b/tests/test_resourcegroupstaggingapi/test_resourcegroupstaggingapi.py index 759063329..8015472bf 100644 --- a/tests/test_resourcegroupstaggingapi/test_resourcegroupstaggingapi.py +++ b/tests/test_resourcegroupstaggingapi/test_resourcegroupstaggingapi.py @@ -239,7 +239,7 @@ def test_get_resources_elbv2(): AvailabilityZone='us-east-1a') subnet2 = ec2.create_subnet( VpcId=vpc.id, - CidrBlock='172.28.7.192/26', + CidrBlock='172.28.7.0/26', AvailabilityZone='us-east-1b') conn.create_load_balancer( From 7271fb9391016961633c00c36fcca76e26a7afc8 Mon Sep 17 00:00:00 2001 From: Alexander Mohr Date: Sat, 25 May 2019 12:19:33 -0700 Subject: [PATCH 40/41] Fix S3 backend operations with VersionId (#2055) * fix s3 issues * fix merge conflict * fix and add test cases --- moto/s3/models.py | 7 +++---- moto/s3/responses.py | 21 ++++++++++++++++----- tests/test_s3/test_s3.py | 23 +++++++++++++++++++++++ 3 files changed, 42 insertions(+), 9 deletions(-) diff --git a/moto/s3/models.py b/moto/s3/models.py index 59a7af580..7488114e3 100644 --- a/moto/s3/models.py +++ b/moto/s3/models.py @@ -912,12 +912,11 @@ class S3Backend(BaseBackend): return multipart.set_part(part_id, value) def copy_part(self, dest_bucket_name, multipart_id, part_id, - src_bucket_name, src_key_name, start_byte, end_byte): - src_key_name = clean_key_name(src_key_name) - src_bucket = self.get_bucket(src_bucket_name) + src_bucket_name, src_key_name, src_version_id, start_byte, end_byte): dest_bucket = self.get_bucket(dest_bucket_name) multipart = dest_bucket.multiparts[multipart_id] - src_value = src_bucket.keys[src_key_name].value + + src_value = self.get_key(src_bucket_name, src_key_name, version_id=src_version_id).value if start_byte is not None: src_value = src_value[start_byte:end_byte + 1] return multipart.set_part(part_id, src_value) diff --git a/moto/s3/responses.py b/moto/s3/responses.py index 42d064828..e03666666 100644 --- a/moto/s3/responses.py +++ b/moto/s3/responses.py @@ -707,6 +707,8 @@ class ResponseObject(_TemplateEnvironmentMixin): if 'x-amz-copy-source' in request.headers: src = unquote(request.headers.get("x-amz-copy-source")).lstrip("/") src_bucket, src_key = src.split("/", 1) + + src_key, src_version_id = src_key.split("?versionId=") if "?versionId=" in src_key else (src_key, None) src_range = request.headers.get( 'x-amz-copy-source-range', '').split("bytes=")[-1] @@ -716,9 +718,13 @@ class ResponseObject(_TemplateEnvironmentMixin): except ValueError: start_byte, end_byte = None, None - key = self.backend.copy_part( - bucket_name, upload_id, part_number, src_bucket, - src_key, start_byte, end_byte) + if self.backend.get_key(src_bucket, src_key, version_id=src_version_id): + key = self.backend.copy_part( + bucket_name, upload_id, part_number, src_bucket, + src_key, src_version_id, start_byte, end_byte) + else: + return 404, response_headers, "" + template = self.response_template(S3_MULTIPART_UPLOAD_RESPONSE) response = template.render(part=key) else: @@ -757,8 +763,13 @@ class ResponseObject(_TemplateEnvironmentMixin): lstrip("/").split("/", 1) src_version_id = parse_qs(src_key_parsed.query).get( 'versionId', [None])[0] - self.backend.copy_key(src_bucket, src_key, bucket_name, key_name, - storage=storage_class, acl=acl, src_version_id=src_version_id) + + if self.backend.get_key(src_bucket, src_key, version_id=src_version_id): + self.backend.copy_key(src_bucket, src_key, bucket_name, key_name, + storage=storage_class, acl=acl, src_version_id=src_version_id) + else: + return 404, response_headers, "" + new_key = self.backend.get_key(bucket_name, key_name) mdirective = request.headers.get('x-amz-metadata-directive') if mdirective is not None and mdirective == 'REPLACE': diff --git a/tests/test_s3/test_s3.py b/tests/test_s3/test_s3.py index 1e9d25327..f26964ab7 100644 --- a/tests/test_s3/test_s3.py +++ b/tests/test_s3/test_s3.py @@ -1529,6 +1529,28 @@ def test_boto3_copy_object_with_versioning(): # Version should be different to previous version obj2_version_new.should_not.equal(obj2_version) + client.copy_object(CopySource={'Bucket': 'blah', 'Key': 'test2', 'VersionId': obj2_version}, Bucket='blah', Key='test3') + obj3_version_new = client.get_object(Bucket='blah', Key='test3')['VersionId'] + obj3_version_new.should_not.equal(obj2_version_new) + + # Copy file that doesn't exist + with assert_raises(ClientError) as e: + client.copy_object(CopySource={'Bucket': 'blah', 'Key': 'test4', 'VersionId': obj2_version}, Bucket='blah', Key='test5') + e.exception.response['Error']['Code'].should.equal('404') + + response = client.create_multipart_upload(Bucket='blah', Key='test4') + upload_id = response['UploadId'] + response = client.upload_part_copy(Bucket='blah', Key='test4', CopySource={'Bucket': 'blah', 'Key': 'test3', 'VersionId': obj3_version_new}, + UploadId=upload_id, PartNumber=1) + etag = response["CopyPartResult"]["ETag"] + client.complete_multipart_upload( + Bucket='blah', Key='test4', UploadId=upload_id, + MultipartUpload={'Parts': [{'ETag': etag, 'PartNumber': 1}]}) + + response = client.get_object(Bucket='blah', Key='test4') + data = response["Body"].read() + data.should.equal(b'test2') + @mock_s3 def test_boto3_copy_object_from_unversioned_to_versioned_bucket(): @@ -2762,6 +2784,7 @@ def test_boto3_multiple_delete_markers(): latest['Key'].should.equal('key-with-versions-and-unicode-ó') oldest['Key'].should.equal('key-with-versions-and-unicode-ó') + @mock_s3 def test_get_stream_gzipped(): payload = b"this is some stuff here" From 21917c4b936113e09133f21a988ff60d4d8e832d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bendeg=C3=BAz=20=C3=81cs?= <30595431+acsbendi@users.noreply.github.com> Date: Sun, 26 May 2019 03:02:14 +0200 Subject: [PATCH 41/41] Bug fix for default network ACL entries (#2056) * Fixed a bug where default network ACL entries could not be deleted. * Implemented throwing error when a network entry with the same rule number and egress value already exists. * Fixed syntax errors. * Added socket.timeout to possibly raised exceptions in wait_for for Python 3. --- moto/ec2/exceptions.py | 9 ++++++++ moto/ec2/models.py | 13 +++++++---- tests/test_ec2/test_network_acls.py | 36 +++++++++++++++++++++++++++++ wait_for.py | 3 ++- 4 files changed, 55 insertions(+), 6 deletions(-) diff --git a/moto/ec2/exceptions.py b/moto/ec2/exceptions.py index d93ced163..1357d49e2 100644 --- a/moto/ec2/exceptions.py +++ b/moto/ec2/exceptions.py @@ -430,6 +430,15 @@ class OperationNotPermitted(EC2ClientError): ) +class NetworkAclEntryAlreadyExistsError(EC2ClientError): + + def __init__(self, rule_number): + super(NetworkAclEntryAlreadyExistsError, self).__init__( + "NetworkAclEntryAlreadyExists", + "The network acl entry identified by {} already exists.".format(rule_number) + ) + + class InvalidSubnetRangeError(EC2ClientError): def __init__(self, cidr_block): diff --git a/moto/ec2/models.py b/moto/ec2/models.py index c2e5970bd..b894853d2 100644 --- a/moto/ec2/models.py +++ b/moto/ec2/models.py @@ -76,6 +76,7 @@ from .exceptions import ( MalformedDHCPOptionsIdError, MissingParameterError, MotoNotImplementedError, + NetworkAclEntryAlreadyExistsError, OperationNotPermitted, OperationNotPermitted2, OperationNotPermitted3, @@ -3664,10 +3665,10 @@ class NetworkAclBackend(object): def add_default_entries(self, network_acl_id): default_acl_entries = [ - {'rule_number': 100, 'rule_action': 'allow', 'egress': 'true'}, - {'rule_number': 32767, 'rule_action': 'deny', 'egress': 'true'}, - {'rule_number': 100, 'rule_action': 'allow', 'egress': 'false'}, - {'rule_number': 32767, 'rule_action': 'deny', 'egress': 'false'} + {'rule_number': "100", 'rule_action': 'allow', 'egress': 'true'}, + {'rule_number': "32767", 'rule_action': 'deny', 'egress': 'true'}, + {'rule_number': "100", 'rule_action': 'allow', 'egress': 'false'}, + {'rule_number': "32767", 'rule_action': 'deny', 'egress': 'false'} ] for entry in default_acl_entries: self.create_network_acl_entry(network_acl_id=network_acl_id, rule_number=entry['rule_number'], protocol='-1', @@ -3698,12 +3699,14 @@ class NetworkAclBackend(object): icmp_code, icmp_type, port_range_from, port_range_to): + network_acl = self.get_network_acl(network_acl_id) + if any(entry.egress == egress and entry.rule_number == rule_number for entry in network_acl.network_acl_entries): + raise NetworkAclEntryAlreadyExistsError(rule_number) network_acl_entry = NetworkAclEntry(self, network_acl_id, rule_number, protocol, rule_action, egress, cidr_block, icmp_code, icmp_type, port_range_from, port_range_to) - network_acl = self.get_network_acl(network_acl_id) network_acl.network_acl_entries.append(network_acl_entry) return network_acl_entry diff --git a/tests/test_ec2/test_network_acls.py b/tests/test_ec2/test_network_acls.py index 5b7f107dd..d4c330f00 100644 --- a/tests/test_ec2/test_network_acls.py +++ b/tests/test_ec2/test_network_acls.py @@ -2,6 +2,8 @@ from __future__ import unicode_literals import boto import boto3 import sure # noqa +from nose.tools import assert_raises +from botocore.exceptions import ClientError from moto import mock_ec2_deprecated, mock_ec2 @@ -214,3 +216,37 @@ def test_default_network_acl_default_entries(): unique_entries.append(entry) unique_entries.should.have.length_of(4) + + +@mock_ec2 +def test_delete_default_network_acl_default_entry(): + ec2 = boto3.resource('ec2', region_name='us-west-1') + default_network_acl = next(iter(ec2.network_acls.all()), None) + default_network_acl.is_default.should.be.ok + + default_network_acl.entries.should.have.length_of(4) + first_default_network_acl_entry = default_network_acl.entries[0] + + default_network_acl.delete_entry(Egress=first_default_network_acl_entry['Egress'], + RuleNumber=first_default_network_acl_entry['RuleNumber']) + + default_network_acl.entries.should.have.length_of(3) + + +@mock_ec2 +def test_duplicate_network_acl_entry(): + ec2 = boto3.resource('ec2', region_name='us-west-1') + default_network_acl = next(iter(ec2.network_acls.all()), None) + default_network_acl.is_default.should.be.ok + + rule_number = 200 + egress = True + default_network_acl.create_entry(CidrBlock="0.0.0.0/0", Egress=egress, Protocol="-1", RuleAction="allow", RuleNumber=rule_number) + + with assert_raises(ClientError) as ex: + default_network_acl.create_entry(CidrBlock="10.0.0.0/0", Egress=egress, Protocol="-1", RuleAction="deny", RuleNumber=rule_number) + str(ex.exception).should.equal( + "An error occurred (NetworkAclEntryAlreadyExists) when calling the CreateNetworkAclEntry " + "operation: The network acl entry identified by {} already exists.".format(rule_number)) + + diff --git a/wait_for.py b/wait_for.py index d313ea5a9..1f291c16b 100755 --- a/wait_for.py +++ b/wait_for.py @@ -12,8 +12,9 @@ except ImportError: # py3 import urllib.request as urllib from urllib.error import URLError + import socket - EXCEPTIONS = (URLError, ConnectionResetError) + EXCEPTIONS = (URLError, socket.timeout, ConnectionResetError) start_ts = time.time()