From 5a7c711a74f3f91c86ac2527af09f4c40f49b6aa Mon Sep 17 00:00:00 2001 From: Peter Gorniak Date: Fri, 25 Nov 2016 21:07:24 -0800 Subject: [PATCH 001/412] bring dynamodb2 update expression handling closer to spec --- moto/dynamodb2/models.py | 36 +++++++++++++++++------------------- 1 file changed, 17 insertions(+), 19 deletions(-) diff --git a/moto/dynamodb2/models.py b/moto/dynamodb2/models.py index a70d6347d..e9980410b 100644 --- a/moto/dynamodb2/models.py +++ b/moto/dynamodb2/models.py @@ -3,6 +3,7 @@ from collections import defaultdict import datetime import decimal import json +import re from moto.compat import OrderedDict from moto.core import BaseBackend @@ -110,27 +111,24 @@ class Item(object): } def update(self, update_expression, expression_attribute_names, expression_attribute_values): - ACTION_VALUES = ['SET', 'set', 'REMOVE', 'remove'] - - action = None - for value in update_expression.split(): - if value in ACTION_VALUES: - # An action - action = value - continue - else: + parts = [p for p in re.split(r'\b(SET|REMOVE|ADD|DELETE)\b', update_expression) if p] + for action, valstr in zip(parts[:-1:1], parts[1::1]): + values = valstr.split(',') + for value in values: # A Real value value = value.lstrip(":").rstrip(",") - for k, v in expression_attribute_names.items(): - value = value.replace(k, v) - if action == "REMOVE" or action == 'remove': - self.attrs.pop(value, None) - elif action == 'SET' or action == 'set': - key, value = value.split("=") - if value in expression_attribute_values: - self.attrs[key] = DynamoType(expression_attribute_values[value]) - else: - self.attrs[key] = DynamoType({"S": value}) + for k, v in expression_attribute_names.items(): + value = value.replace(k, v) + if action == "REMOVE" or action == 'remove': + self.attrs.pop(value, None) + elif action == 'SET' or action == 'set': + key, value = value.split("=") + key = key.strip() + value = value.strip() + if value in expression_attribute_values: + self.attrs[key] = DynamoType(expression_attribute_values[value]) + else: + self.attrs[key] = DynamoType({"S": value}) def update_with_attribute_updates(self, attribute_updates): for attribute_name, update_action in attribute_updates.items(): From 2c505615631c2958e7b5f4a3c196b5386370a9e0 Mon Sep 17 00:00:00 2001 From: Peter Gorniak Date: Tue, 29 Nov 2016 14:04:23 -0800 Subject: [PATCH 002/412] fix decoding keys in query condition --- moto/dynamodb2/models.py | 7 ++++--- moto/dynamodb2/responses.py | 9 ++++++--- 2 files changed, 10 insertions(+), 6 deletions(-) diff --git a/moto/dynamodb2/models.py b/moto/dynamodb2/models.py index e9980410b..2c6d8d60f 100644 --- a/moto/dynamodb2/models.py +++ b/moto/dynamodb2/models.py @@ -119,9 +119,9 @@ class Item(object): value = value.lstrip(":").rstrip(",") for k, v in expression_attribute_names.items(): value = value.replace(k, v) - if action == "REMOVE" or action == 'remove': + if action == "REMOVE": self.attrs.pop(value, None) - elif action == 'SET' or action == 'set': + elif action == 'SET': key, value = value.split("=") key = key.strip() value = value.strip() @@ -129,6 +129,8 @@ class Item(object): self.attrs[key] = DynamoType(expression_attribute_values[value]) else: self.attrs[key] = DynamoType({"S": value}) + else: + raise NotImplementedError('{} update action not yet supported'.format(action)) def update_with_attribute_updates(self, attribute_updates): for attribute_name, update_action in attribute_updates.items(): @@ -323,7 +325,6 @@ class Table(object): def query(self, hash_key, range_comparison, range_objs, limit, exclusive_start_key, scan_index_forward, index_name=None, **filter_kwargs): results = [] - if index_name: all_indexes = (self.global_indexes or []) + (self.indexes or []) indexes_by_name = dict((i['IndexName'], i) for i in all_indexes) diff --git a/moto/dynamodb2/responses.py b/moto/dynamodb2/responses.py index 081afc2c4..eea2bace5 100644 --- a/moto/dynamodb2/responses.py +++ b/moto/dynamodb2/responses.py @@ -279,19 +279,22 @@ class DynamoHandler(BaseResponse): else: index = table.schema - key_map = [column for _, column in sorted((k, v) for k, v in self.body['ExpressionAttributeNames'].items())] + reverse_attribute_lookup = {v: k for k, v in self.body['ExpressionAttributeNames'].iteritems()} if " AND " in key_condition_expression: expressions = key_condition_expression.split(" AND ", 1) index_hash_key = [key for key in index if key['KeyType'] == 'HASH'][0] - hash_key_index_in_key_map = key_map.index(index_hash_key['AttributeName']) + hash_key_var = reverse_attribute_lookup.get(index_hash_key['AttributeName'], index_hash_key['AttributeName']) + i, hash_key_expression = ((i, e) for i, e in enumerate(expressions) if re.search(r'[\s(]#n1\b'.format(hash_key_var), e)).next() + hash_key_expression = hash_key_expression.strip('()') + expressions.pop(i) - hash_key_expression = expressions.pop(hash_key_index_in_key_map).strip('()') # TODO implement more than one range expression and OR operators range_key_expression = expressions[0].strip('()') range_key_expression_components = range_key_expression.split() range_comparison = range_key_expression_components[1] + if 'AND' in range_key_expression: range_comparison = 'BETWEEN' range_values = [ From 98a39cf4b53ffeff99cfc31c990b6fafefe54089 Mon Sep 17 00:00:00 2001 From: Peter Gorniak Date: Tue, 6 Dec 2016 12:14:57 -0800 Subject: [PATCH 003/412] account for keys potentially being substrings of other keys (e.g. #c1 and #c10) --- moto/dynamodb2/models.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/moto/dynamodb2/models.py b/moto/dynamodb2/models.py index 2c6d8d60f..5e7888842 100644 --- a/moto/dynamodb2/models.py +++ b/moto/dynamodb2/models.py @@ -118,7 +118,7 @@ class Item(object): # A Real value value = value.lstrip(":").rstrip(",") for k, v in expression_attribute_names.items(): - value = value.replace(k, v) + value = re.sub(r'{}\b'.format(k), v, value) if action == "REMOVE": self.attrs.pop(value, None) elif action == 'SET': From 390bef77521a683c3599198fc57a3375d6d79137 Mon Sep 17 00:00:00 2001 From: Peter Gorniak Date: Tue, 6 Dec 2016 16:57:36 -0800 Subject: [PATCH 004/412] fake change to force push because github was broken --- moto/dynamodb2/models.py | 1 + 1 file changed, 1 insertion(+) diff --git a/moto/dynamodb2/models.py b/moto/dynamodb2/models.py index 5e7888842..0e88d594b 100644 --- a/moto/dynamodb2/models.py +++ b/moto/dynamodb2/models.py @@ -119,6 +119,7 @@ class Item(object): value = value.lstrip(":").rstrip(",") for k, v in expression_attribute_names.items(): value = re.sub(r'{}\b'.format(k), v, value) + if action == "REMOVE": self.attrs.pop(value, None) elif action == 'SET': From 3c128fdb51d7cdb81f8128fe73a5adca6daf4c6a Mon Sep 17 00:00:00 2001 From: Peter Gorniak Date: Wed, 7 Dec 2016 11:47:48 -0800 Subject: [PATCH 005/412] correct looping through update actions, value stripping, hash key regex --- moto/dynamodb2/models.py | 4 ++-- moto/dynamodb2/responses.py | 3 ++- 2 files changed, 4 insertions(+), 3 deletions(-) diff --git a/moto/dynamodb2/models.py b/moto/dynamodb2/models.py index 0e88d594b..15a3e3ba3 100644 --- a/moto/dynamodb2/models.py +++ b/moto/dynamodb2/models.py @@ -112,11 +112,11 @@ class Item(object): def update(self, update_expression, expression_attribute_names, expression_attribute_values): parts = [p for p in re.split(r'\b(SET|REMOVE|ADD|DELETE)\b', update_expression) if p] - for action, valstr in zip(parts[:-1:1], parts[1::1]): + for action, valstr in zip(parts[:-1:2], parts[1::2]): values = valstr.split(',') for value in values: # A Real value - value = value.lstrip(":").rstrip(",") + value = value.lstrip(":").rstrip(",").strip() for k, v in expression_attribute_names.items(): value = re.sub(r'{}\b'.format(k), v, value) diff --git a/moto/dynamodb2/responses.py b/moto/dynamodb2/responses.py index eea2bace5..39cdaae4e 100644 --- a/moto/dynamodb2/responses.py +++ b/moto/dynamodb2/responses.py @@ -286,7 +286,8 @@ class DynamoHandler(BaseResponse): index_hash_key = [key for key in index if key['KeyType'] == 'HASH'][0] hash_key_var = reverse_attribute_lookup.get(index_hash_key['AttributeName'], index_hash_key['AttributeName']) - i, hash_key_expression = ((i, e) for i, e in enumerate(expressions) if re.search(r'[\s(]#n1\b'.format(hash_key_var), e)).next() + hash_key_regex = r'(^|[\s(]){}\b'.format(hash_key_var) + i, hash_key_expression = ((i, e) for i, e in enumerate(expressions) if re.search(hash_key_regex, e)).next() hash_key_expression = hash_key_expression.strip('()') expressions.pop(i) From 0c875fd268d06d6b7fe8084537ae2bd3d653f8e6 Mon Sep 17 00:00:00 2001 From: Peter Gorniak Date: Wed, 7 Dec 2016 13:31:15 -0800 Subject: [PATCH 006/412] fixes for python 2.6 and 3 --- moto/dynamodb2/responses.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/moto/dynamodb2/responses.py b/moto/dynamodb2/responses.py index 39cdaae4e..815bd9f57 100644 --- a/moto/dynamodb2/responses.py +++ b/moto/dynamodb2/responses.py @@ -279,7 +279,7 @@ class DynamoHandler(BaseResponse): else: index = table.schema - reverse_attribute_lookup = {v: k for k, v in self.body['ExpressionAttributeNames'].iteritems()} + reverse_attribute_lookup = dict((v, k) for k, v in six.iteritems(self.body['ExpressionAttributeNames'])) if " AND " in key_condition_expression: expressions = key_condition_expression.split(" AND ", 1) From 114de9ba0b8e745a0054408713dafdfb535d0ec3 Mon Sep 17 00:00:00 2001 From: Peter Gorniak Date: Wed, 7 Dec 2016 13:55:26 -0800 Subject: [PATCH 007/412] more fixes for 2.6 and 3 --- moto/dynamodb2/responses.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/moto/dynamodb2/responses.py b/moto/dynamodb2/responses.py index 815bd9f57..636a0f9d3 100644 --- a/moto/dynamodb2/responses.py +++ b/moto/dynamodb2/responses.py @@ -286,8 +286,8 @@ class DynamoHandler(BaseResponse): index_hash_key = [key for key in index if key['KeyType'] == 'HASH'][0] hash_key_var = reverse_attribute_lookup.get(index_hash_key['AttributeName'], index_hash_key['AttributeName']) - hash_key_regex = r'(^|[\s(]){}\b'.format(hash_key_var) - i, hash_key_expression = ((i, e) for i, e in enumerate(expressions) if re.search(hash_key_regex, e)).next() + hash_key_regex = r'(^|[\s(]){0}\b'.format(hash_key_var) + i, hash_key_expression = next((i, e) for i, e in enumerate(expressions) if re.search(hash_key_regex, e)) hash_key_expression = hash_key_expression.strip('()') expressions.pop(i) From d4a31e5e50c1eca8fb6f885c60b56c15a50dce09 Mon Sep 17 00:00:00 2001 From: Peter Gorniak Date: Thu, 8 Dec 2016 14:34:21 -0800 Subject: [PATCH 008/412] unit tests did not catch this, but this will not work under python 2.6 --- moto/dynamodb2/models.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/moto/dynamodb2/models.py b/moto/dynamodb2/models.py index 15a3e3ba3..4bca83582 100644 --- a/moto/dynamodb2/models.py +++ b/moto/dynamodb2/models.py @@ -118,7 +118,7 @@ class Item(object): # A Real value value = value.lstrip(":").rstrip(",").strip() for k, v in expression_attribute_names.items(): - value = re.sub(r'{}\b'.format(k), v, value) + value = re.sub(r'{0}\b'.format(k), v, value) if action == "REMOVE": self.attrs.pop(value, None) From aad1e177870a8a418f38b85c2826b08d26fd48b7 Mon Sep 17 00:00:00 2001 From: Jack Danger Canty Date: Thu, 11 May 2017 09:35:24 -0700 Subject: [PATCH 009/412] Shorter sleeps in SQS test One of these tests actually waited the entire 60 seconds of the visibility timeout but that value appears to have been copied from a previous test that didn't. Updating all tests with shorter timeouts so folks who copy setup code in the future don't fall into this trap --- tests/test_sqs/test_sqs.py | 30 +++++++++++++++--------------- 1 file changed, 15 insertions(+), 15 deletions(-) diff --git a/tests/test_sqs/test_sqs.py b/tests/test_sqs/test_sqs.py index 0df4c2dc9..c7d067bd8 100644 --- a/tests/test_sqs/test_sqs.py +++ b/tests/test_sqs/test_sqs.py @@ -126,7 +126,7 @@ def test_delete_queue(): sqs = boto3.resource('sqs', region_name='us-east-1') conn = boto3.client("sqs", region_name='us-east-1') conn.create_queue(QueueName="test-queue", - Attributes={"VisibilityTimeout": "60"}) + Attributes={"VisibilityTimeout": "3"}) queue = sqs.Queue('test-queue') conn.list_queues()['QueueUrls'].should.have.length_of(1) @@ -143,10 +143,10 @@ def test_set_queue_attribute(): sqs = boto3.resource('sqs', region_name='us-east-1') conn = boto3.client("sqs", region_name='us-east-1') conn.create_queue(QueueName="test-queue", - Attributes={"VisibilityTimeout": '60'}) + Attributes={"VisibilityTimeout": '3'}) queue = sqs.Queue("test-queue") - queue.attributes['VisibilityTimeout'].should.equal('60') + queue.attributes['VisibilityTimeout'].should.equal('3') queue.set_attributes(Attributes={"VisibilityTimeout": '45'}) queue = sqs.Queue("test-queue") @@ -176,7 +176,7 @@ def test_send_message(): @mock_sqs_deprecated def test_send_message_with_xml_characters(): conn = boto.connect_sqs('the_key', 'the_secret') - queue = conn.create_queue("test-queue", visibility_timeout=60) + queue = conn.create_queue("test-queue", visibility_timeout=3) queue.set_message_class(RawMessage) body_one = '< & >' @@ -192,7 +192,7 @@ def test_send_message_with_xml_characters(): @mock_sqs_deprecated def test_send_message_with_attributes(): conn = boto.connect_sqs('the_key', 'the_secret') - queue = conn.create_queue("test-queue", visibility_timeout=60) + queue = conn.create_queue("test-queue", visibility_timeout=3) queue.set_message_class(RawMessage) body = 'this is a test message' @@ -217,13 +217,13 @@ def test_send_message_with_attributes(): @mock_sqs_deprecated def test_send_message_with_delay(): conn = boto.connect_sqs('the_key', 'the_secret') - queue = conn.create_queue("test-queue", visibility_timeout=60) + queue = conn.create_queue("test-queue", visibility_timeout=3) queue.set_message_class(RawMessage) body_one = 'this is a test message' body_two = 'this is another test message' - queue.write(queue.new_message(body_one), delay_seconds=60) + queue.write(queue.new_message(body_one), delay_seconds=3) queue.write(queue.new_message(body_two)) queue.count().should.equal(1) @@ -238,7 +238,7 @@ def test_send_message_with_delay(): @mock_sqs_deprecated def test_send_large_message_fails(): conn = boto.connect_sqs('the_key', 'the_secret') - queue = conn.create_queue("test-queue", visibility_timeout=60) + queue = conn.create_queue("test-queue", visibility_timeout=3) queue.set_message_class(RawMessage) body_one = 'test message' * 200000 @@ -271,7 +271,7 @@ def test_message_becomes_inflight_when_received(): @mock_sqs_deprecated def test_receive_message_with_explicit_visibility_timeout(): conn = boto.connect_sqs('the_key', 'the_secret') - queue = conn.create_queue("test-queue", visibility_timeout=60) + queue = conn.create_queue("test-queue", visibility_timeout=3) queue.set_message_class(RawMessage) body_one = 'this is another test message' @@ -360,7 +360,7 @@ def test_read_message_from_queue(): @mock_sqs_deprecated def test_queue_length(): conn = boto.connect_sqs('the_key', 'the_secret') - queue = conn.create_queue("test-queue", visibility_timeout=60) + queue = conn.create_queue("test-queue", visibility_timeout=3) queue.set_message_class(RawMessage) queue.write(queue.new_message('this is a test message')) @@ -371,7 +371,7 @@ def test_queue_length(): @mock_sqs_deprecated def test_delete_message(): conn = boto.connect_sqs('the_key', 'the_secret') - queue = conn.create_queue("test-queue", visibility_timeout=60) + queue = conn.create_queue("test-queue", visibility_timeout=3) queue.set_message_class(RawMessage) queue.write(queue.new_message('this is a test message')) @@ -392,7 +392,7 @@ def test_delete_message(): @mock_sqs_deprecated def test_send_batch_operation(): conn = boto.connect_sqs('the_key', 'the_secret') - queue = conn.create_queue("test-queue", visibility_timeout=60) + queue = conn.create_queue("test-queue", visibility_timeout=3) # See https://github.com/boto/boto/issues/831 queue.set_message_class(RawMessage) @@ -414,7 +414,7 @@ def test_send_batch_operation(): @mock_sqs_deprecated def test_send_batch_operation_with_message_attributes(): conn = boto.connect_sqs('the_key', 'the_secret') - queue = conn.create_queue("test-queue", visibility_timeout=60) + queue = conn.create_queue("test-queue", visibility_timeout=3) queue.set_message_class(RawMessage) message_tuple = ("my_first_message", 'test message 1', 0, { @@ -431,7 +431,7 @@ def test_send_batch_operation_with_message_attributes(): @mock_sqs_deprecated def test_delete_batch_operation(): conn = boto.connect_sqs('the_key', 'the_secret') - queue = conn.create_queue("test-queue", visibility_timeout=60) + queue = conn.create_queue("test-queue", visibility_timeout=3) conn.send_message_batch(queue, [ ("my_first_message", 'test message 1', 0), @@ -450,7 +450,7 @@ def test_queue_attributes(): conn = boto.connect_sqs('the_key', 'the_secret') queue_name = 'test-queue' - visibility_timeout = 60 + visibility_timeout = 3 queue = conn.create_queue( queue_name, visibility_timeout=visibility_timeout) From 517416c4d95eb31d40ac3fa81921ac6873f3a72b Mon Sep 17 00:00:00 2001 From: Simon-Pierre Gingras Date: Fri, 19 May 2017 15:59:25 -0700 Subject: [PATCH 010/412] feat(s3) HeadObject: honor If-Modified-Since header --- moto/core/utils.py | 8 +++++++- moto/s3/responses.py | 12 +++++++++++- tests/test_s3/test_s3.py | 28 ++++++++++++++++++++++++++++ 3 files changed, 46 insertions(+), 2 deletions(-) diff --git a/moto/core/utils.py b/moto/core/utils.py index 7d4a9d412..9ee0c1814 100644 --- a/moto/core/utils.py +++ b/moto/core/utils.py @@ -174,11 +174,17 @@ def iso_8601_datetime_without_milliseconds(datetime): return datetime.strftime("%Y-%m-%dT%H:%M:%S") + 'Z' +RFC1123 = '%a, %d %b %Y %H:%M:%S GMT' + + def rfc_1123_datetime(datetime): - RFC1123 = '%a, %d %b %Y %H:%M:%S GMT' return datetime.strftime(RFC1123) +def str_to_rfc_1123_datetime(str): + return datetime.datetime.strptime(str, RFC1123) + + def unix_time(dt=None): dt = dt or datetime.datetime.utcnow() epoch = datetime.datetime.utcfromtimestamp(0) diff --git a/moto/s3/responses.py b/moto/s3/responses.py index fd33c5ead..43e27a815 100644 --- a/moto/s3/responses.py +++ b/moto/s3/responses.py @@ -3,6 +3,7 @@ from __future__ import unicode_literals import re import six +from moto.core.utils import str_to_rfc_1123_datetime from six.moves.urllib.parse import parse_qs, urlparse import xmltodict @@ -595,12 +596,21 @@ class ResponseObject(_TemplateEnvironmentMixin): def _key_response_head(self, bucket_name, query, key_name, headers): response_headers = {} version_id = query.get('versionId', [None])[0] + + if_modified_since = headers.get('if-modified-since', None) + if if_modified_since: + if_modified_since = str_to_rfc_1123_datetime(if_modified_since) + key = self.backend.get_key( bucket_name, key_name, version_id=version_id) if key: response_headers.update(key.metadata) response_headers.update(key.response_dict) - return 200, response_headers, "" + + if if_modified_since and key.last_modified < if_modified_since: + return 304, response_headers, 'Not Modified' + else: + return 200, response_headers, "" else: return 404, response_headers, "" diff --git a/tests/test_s3/test_s3.py b/tests/test_s3/test_s3.py index de9c6a7de..6af653f9e 100644 --- a/tests/test_s3/test_s3.py +++ b/tests/test_s3/test_s3.py @@ -1,6 +1,8 @@ # -*- coding: utf-8 -*- from __future__ import unicode_literals + +import datetime from six.moves.urllib.request import urlopen from six.moves.urllib.error import HTTPError from functools import wraps @@ -10,6 +12,7 @@ import json import boto import boto3 from botocore.client import ClientError +import botocore.exceptions from boto.exception import S3CreateError, S3ResponseError from boto.s3.connection import S3Connection from boto.s3.key import Key @@ -1266,6 +1269,31 @@ def test_boto3_head_object_with_versioning(): old_head_object['ContentLength'].should.equal(len(old_content)) +@mock_s3 +def test_boto3_head_object_if_modified_since(): + s3 = boto3.client('s3', region_name='us-east-1') + bucket_name = "blah" + s3.create_bucket(Bucket=bucket_name) + + key = 'hello.txt' + + with freeze_time(datetime.datetime.now() - datetime.timedelta(hours=3)): + s3.put_object( + Bucket=bucket_name, + Key=key, + Body='test' + ) + + with assert_raises(botocore.exceptions.ClientError) as err: + s3.head_object( + Bucket=bucket_name, + Key=key, + IfModifiedSince=datetime.datetime.now() - datetime.timedelta(hours=2) + ) + e = err.exception + e.response['Error'].should.equal({'Code': '304', 'Message': 'Not Modified'}) + + @mock_s3 @reduced_min_part_size def test_boto3_multipart_etag(): From 588e211c7157d04d8c9ab31a8bb3abfcc0a18b6e Mon Sep 17 00:00:00 2001 From: Mike Bjerkness Date: Mon, 22 May 2017 15:49:59 -0500 Subject: [PATCH 011/412] Adding ECR --- moto/__init__.py | 1 + moto/ecr/__init__.py | 7 ++ moto/ecr/models.py | 221 ++++++++++++++++++++++++++++++++++++++++++ moto/ecr/responses.py | 72 ++++++++++++++ moto/ecr/urls.py | 10 ++ 5 files changed, 311 insertions(+) create mode 100644 moto/ecr/__init__.py create mode 100644 moto/ecr/models.py create mode 100644 moto/ecr/responses.py create mode 100644 moto/ecr/urls.py diff --git a/moto/__init__.py b/moto/__init__.py index c93719cb2..d6f84db5e 100644 --- a/moto/__init__.py +++ b/moto/__init__.py @@ -14,6 +14,7 @@ from .datapipeline import mock_datapipeline, mock_datapipeline_deprecated # fla from .dynamodb import mock_dynamodb, mock_dynamodb_deprecated # flake8: noqa from .dynamodb2 import mock_dynamodb2, mock_dynamodb2_deprecated # flake8: noqa from .ec2 import mock_ec2, mock_ec2_deprecated # flake8: noqa +from .ecr import mock_ecr, mock_ecr_deprecated # flake8: noqa from .ecs import mock_ecs, mock_ecs_deprecated # flake8: noqa from .elb import mock_elb, mock_elb_deprecated # flake8: noqa from .emr import mock_emr, mock_emr_deprecated # flake8: noqa diff --git a/moto/ecr/__init__.py b/moto/ecr/__init__.py new file mode 100644 index 000000000..56b2cacbb --- /dev/null +++ b/moto/ecr/__init__.py @@ -0,0 +1,7 @@ +from __future__ import unicode_literals +from .models import ecr_backends +from ..core.models import base_decorator, deprecated_base_decorator + +ecr_backend = ecr_backends['us-east-1'] +mock_ecr = base_decorator(ecr_backends) +mock_ecr_deprecated = deprecated_base_decorator(ecr_backends) diff --git a/moto/ecr/models.py b/moto/ecr/models.py new file mode 100644 index 000000000..5f8255007 --- /dev/null +++ b/moto/ecr/models.py @@ -0,0 +1,221 @@ +from __future__ import unicode_literals +# from datetime import datetime +from random import random + +from moto.core import BaseBackend, BaseModel +from moto.ec2 import ec2_backends +from copy import copy +import hashlib + + +class BaseObject(BaseModel): + + def camelCase(self, key): + words = [] + for i, word in enumerate(key.split('_')): + if i > 0: + words.append(word.title()) + else: + words.append(word) + return ''.join(words) + + def gen_response_object(self): + response_object = copy(self.__dict__) + for key, value in response_object.items(): + if '_' in key: + response_object[self.camelCase(key)] = value + del response_object[key] + return response_object + + @property + def response_object(self): + return self.gen_response_object() + + +class Repository(BaseObject): + + def __init__(self, repository_name): + self.arn = 'arn:aws:ecr:us-east-1:012345678910:repository/{0}'.format( + repository_name) + self.name = repository_name + # self.created = datetime.utcnow() + self.uri = '012345678910.dkr.ecr.us-east-1.amazonaws.com/{0}'.format( + repository_name + ) + self.registry_id = '012345678910' + self.images = [] + + @property + def physical_resource_id(self): + return self.name + + @property + def response_object(self): + response_object = self.gen_response_object() + + response_object['registryId'] = self.registry_id + response_object['repositoryArn'] = self.arn + response_object['repositoryName'] = self.name + response_object['repositoryUri'] = self.uri + # response_object['createdAt'] = self.created + del response_object['arn'], response_object['name'] + return response_object + + @classmethod + def create_from_cloudformation_json(cls, resource_name, cloudformation_json, region_name): + properties = cloudformation_json['Properties'] + + ecr_backend = ecr_backends[region_name] + return ecr_backend.create_repository( + # RepositoryName is optional in CloudFormation, thus create a random + # name if necessary + repository_name=properties.get( + 'RepositoryName', 'ecrrepository{0}'.format(int(random() * 10 ** 6))), + ) + + @classmethod + def update_from_cloudformation_json(cls, original_resource, new_resource_name, cloudformation_json, region_name): + properties = cloudformation_json['Properties'] + + if original_resource.name != properties['RepositoryName']: + ecr_backend = ecr_backends[region_name] + ecr_backend.delete_cluster(original_resource.arn) + return ecr_backend.create_repository( + # RepositoryName is optional in CloudFormation, thus create a + # random name if necessary + repository_name=properties.get( + 'RepositoryName', 'RepositoryName{0}'.format(int(random() * 10 ** 6))), + ) + else: + # no-op when nothing changed between old and new resources + return original_resource + + +class Image(BaseObject): + + def __init__(self, tag, manifest, repository, registry_id="012345678910"): + self.image_tag = tag + self.image_manifest = manifest + self.image_size_in_bytes = 50 * 1024 * 1024 + self.repository = repository + self.registry_id = registry_id + self.image_digest = None + self.image_pushed_at = None + + def _create_digest(self): + image_contents = 'docker_image{0}'.format(int(random() * 10 ** 6)) + self.image_digest = "sha256:%s" % hashlib.sha256(image_contents).hexdigest() + + def get_image_digest(self): + if not self.image_digest: + self._create_digest() + return self.image_digest + + @property + def response_object(self): + response_object = self.gen_response_object() + response_object['imageId'] = {} + response_object['imageId']['imageTag'] = self.image_tag + response_object['imageId']['imageDigest'] = self.get_image_digest() + response_object['imageManifest'] = self.image_manifest + response_object['repositoryName'] = self.repository + response_object['registryId'] = self.registry_id + return response_object + + @property + def response_list_object(self): + response_object = self.gen_response_object() + response_object['imageTag'] = self.image_tag + response_object['imageDigest'] = "i don't know" + return response_object + + @property + def response_describe_object(self): + response_object = self.gen_response_object() + response_object['imageTags'] = [self.image_tag] + response_object['imageDigest'] = self.get_image_digest() + response_object['imageManifest'] = self.image_manifest + response_object['repositoryName'] = self.repository + response_object['registryId'] = self.registry_id + response_object['imageSizeInBytes'] = self.image_size_in_bytes + response_object['imagePushedAt'] = '2017-05-09' + return response_object + + +class ECRBackend(BaseBackend): + + def __init__(self): + self.repositories = {} + + def describe_repositories(self, registry_id=None, repository_names=None): + """ + maxResults and nextToken not implemented + """ + repositories = [] + for repository in self.repositories.values(): + # If a registry_id was supplied, ensure this repository matches + if registry_id: + if repository.registry_id != registry_id: + continue + # If a list of repository names was supplied, esure this repository + # is in that list + if repository_names: + if repository.name not in repository_names: + continue + repositories.append(repository.response_object) + return repositories + + def create_repository(self, repository_name): + repository = Repository(repository_name) + self.repositories[repository_name] = repository + return repository + + def delete_repository(self, respository_name, registry_id=None): + if respository_name in self.repositories: + return self.repositories.pop(respository_name) + else: + raise Exception("{0} is not a repository".format(respository_name)) + + def list_images(self, repository_name, registry_id=None): + """ + maxResults and filtering not implemented + """ + images = [] + for repository in self.repositories.values(): + if repository_name: + if repository.name != repository_name: + continue + if registry_id: + if repository.registry_id != registry_id: + continue + + for image in repository.images: + images.append(image) + return images + + def describe_images(self, repository_name, registry_id=None, image_id=None): + + if repository_name in self.repositories: + repository = self.repositories[repository_name] + else: + raise Exception("{0} is not a repository".format(repository_name)) + + response = [] + for image in repository.images: + response.append(image) + return response + + def put_image(self, repository_name, image_manifest, image_tag): + if repository_name in self.repositories: + repository = self.repositories[repository_name] + else: + raise Exception("{0} is not a repository".format(repository_name)) + + image = Image(image_tag, image_manifest, repository_name) + repository.images.append(image) + return image + + +ecr_backends = {} +for region, ec2_backend in ec2_backends.items(): + ecr_backends[region] = ECRBackend() diff --git a/moto/ecr/responses.py b/moto/ecr/responses.py new file mode 100644 index 000000000..3a37162a0 --- /dev/null +++ b/moto/ecr/responses.py @@ -0,0 +1,72 @@ +from __future__ import unicode_literals +import json + +from moto.core.responses import BaseResponse +from .models import ecr_backends + + +class ECRResponse(BaseResponse): + + @property + def ecr_backend(self): + return ecr_backends[self.region] + + @property + def request_params(self): + try: + return json.loads(self.body) + except ValueError: + return {} + + def _get_param(self, param): + return self.request_params.get(param, None) + + def create_repository(self): + repository_name = self._get_param('repositoryName') + if repository_name is None: + repository_name = 'default' + repository = self.ecr_backend.create_repository(repository_name) + return json.dumps({ + 'repository': repository.response_object + }) + + def describe_repositories(self): + describe_repositories_name = self._get_param('repositoryNames') + repositories = self.ecr_backend.describe_repositories(describe_repositories_name) + return json.dumps({ + 'repositories': repositories, + 'failures': [] + }) + + def delete_repository(self): + repository_str = self._get_param('repositoryName') + repository = self.ecr_backend.delete_repository(repository_str) + return json.dumps({ + 'repository': repository.response_object + }) + + def put_image(self): + repository_str = self._get_param('repositoryName') + image_manifest = self._get_param('imageManifest') + image_tag = self._get_param('imageTag') + image = self.ecr_backend.put_image(repository_str, image_manifest, image_tag) + + return json.dumps({ + 'image': image.response_object + }) + + def list_images(self): + repository_str = self._get_param('repositoryName') + registry_id = self._get_param('registryId') + images = self.ecr_backend.list_images(repository_str, registry_id) + return json.dumps({ + 'imageIds': [image.response_list_object for image in images], + }) + + def describe_images(self): + repository_str = self._get_param('repositoryName') + registry_id = self._get_param('registryId') + images = self.ecr_backend.describe_images(repository_str, registry_id) + return json.dumps({ + 'imageDetails': [image.response_describe_object for image in images], + }) diff --git a/moto/ecr/urls.py b/moto/ecr/urls.py new file mode 100644 index 000000000..86b8a8dbc --- /dev/null +++ b/moto/ecr/urls.py @@ -0,0 +1,10 @@ +from __future__ import unicode_literals +from .responses import ECRResponse + +url_bases = [ + "https?://ecr.(.+).amazonaws.com", +] + +url_paths = { + '{0}/$': ECRResponse.dispatch, +} From 20b30695403f79e240929a76b9f8604484c069fc Mon Sep 17 00:00:00 2001 From: Mike Bjerkness Date: Mon, 22 May 2017 15:52:17 -0500 Subject: [PATCH 012/412] Add ECR tests --- tests/test_ecr/test_ecr_boto3.py | 241 +++++++++++++++++++++++++++++++ 1 file changed, 241 insertions(+) create mode 100644 tests/test_ecr/test_ecr_boto3.py diff --git a/tests/test_ecr/test_ecr_boto3.py b/tests/test_ecr/test_ecr_boto3.py new file mode 100644 index 000000000..ce5a54b17 --- /dev/null +++ b/tests/test_ecr/test_ecr_boto3.py @@ -0,0 +1,241 @@ +from __future__ import unicode_literals + +# from nose.tools import assert_raises +import hashlib +import json +from random import random + +import sure # noqa + +import boto3 + +from moto import mock_ecr +import datetime + + +def _create_image_digest(contents=None): + if not contents: + contents = 'docker_image{0}'.format(int(random() * 10 ** 6)) + return "sha256:%s" % hashlib.sha256(contents).hexdigest() + + +def _create_image_manifest(): + return { + "schemaVersion": 2, + "mediaType": "application/vnd.docker.distribution.manifest.v2+json", + "config": + { + "mediaType": "application/vnd.docker.container.image.v1+json", + "size": 7023, + "digest": _create_image_digest("config") + }, + "layers": [ + { + "mediaType": "application/vnd.docker.image.rootfs.diff.tar.gzip", + "size": 32654, + "digest": _create_image_digest("layer1") + }, + { + "mediaType": "application/vnd.docker.image.rootfs.diff.tar.gzip", + "size": 16724, + "digest": _create_image_digest("layer2") + }, + { + "mediaType": "application/vnd.docker.image.rootfs.diff.tar.gzip", + "size": 73109, + "digest": _create_image_digest("layer3") + } + ] + } + + +@mock_ecr +def test_create_repository(): + client = boto3.client('ecr', region_name='us-east-1') + response = client.create_repository( + repositoryName='test_ecr_repository' + ) + response['repository']['repositoryName'].should.equal('test_ecr_repository') + response['repository']['repositoryArn'].should.equal( + 'arn:aws:ecr:us-east-1:012345678910:repository/test_ecr_repository') + response['repository']['registryId'].should.equal('012345678910') + response['repository']['repositoryUri'].should.equal( + '012345678910.dkr.ecr.us-east-1.amazonaws.com/test_ecr_repository') + # response['repository']['createdAt'].should.equal(0) + + +@mock_ecr +def test_describe_repositories(): + client = boto3.client('ecr', region_name='us-east-1') + _ = client.create_repository( + repositoryName='test_repository1' + ) + _ = client.create_repository( + repositoryName='test_repository0' + ) + response = client.describe_repositories() + len(response['repositories']).should.equal(2) + + respository_arns = ['arn:aws:ecr:us-east-1:012345678910:repository/test_repository1', + 'arn:aws:ecr:us-east-1:012345678910:repository/test_repository0'] + set([response['repositories'][0]['repositoryArn'], + response['repositories'][1]['repositoryArn']]).should.equal(set(respository_arns)) + + respository_uris = ['012345678910.dkr.ecr.us-east-1.amazonaws.com/test_repository1', + '012345678910.dkr.ecr.us-east-1.amazonaws.com/test_repository0'] + set([response['repositories'][0]['repositoryUri'], + response['repositories'][1]['repositoryUri']]).should.equal(set(respository_uris)) + + +@mock_ecr +def test_delete_repository(): + client = boto3.client('ecr', region_name='us-east-1') + _ = client.create_repository( + repositoryName='test_repository' + ) + response = client.delete_repository(repositoryName='test_repository') + response['repository']['repositoryName'].should.equal('test_repository') + response['repository']['repositoryArn'].should.equal( + 'arn:aws:ecr:us-east-1:012345678910:repository/test_repository') + response['repository']['registryId'].should.equal('012345678910') + response['repository']['repositoryUri'].should.equal( + '012345678910.dkr.ecr.us-east-1.amazonaws.com/test_repository') + # response['repository']['createdAt'].should.equal(0) + + response = client.describe_repositories() + len(response['repositories']).should.equal(0) + + +@mock_ecr +def test_put_image(): + client = boto3.client('ecr', region_name='us-east-1') + _ = client.create_repository( + repositoryName='test_repository' + ) + response = client.put_image( + repositoryName='test_repository', + imageManifest=json.dumps(_create_image_manifest()), + imageTag='latest' + ) + + response['image']['repositoryName'].should.equal('test_repository') + response['image']['imageId']['imageTag'].should.equal('latest') + + +@mock_ecr +def test_list_images(): + client = boto3.client('ecr', region_name='us-east-1') + _ = client.create_repository( + repositoryName='test_repository' + ) + + _ = client.put_image( + repositoryName='test_repository', + imageManifest=json.dumps(_create_image_manifest()), + imageTag='latest' + ) + + _ = client.put_image( + repositoryName='test_repository', + imageManifest=json.dumps(_create_image_manifest()), + imageTag='v1' + ) + + _ = client.put_image( + repositoryName='test_repository', + imageManifest=json.dumps(_create_image_manifest()), + imageTag='v2' + ) + + response = client.list_images(repositoryName='test_repository') + type(response['imageIds']).should.be(list) + len(response['imageIds']).should.be(3) + + image_tags = ['latest', 'v1', 'v2'] + set([response['imageIds'][0]['imageTag'], + response['imageIds'][1]['imageTag'], + response['imageIds'][2]['imageTag']]).should.equal(set(image_tags)) + + +@mock_ecr +def test_describe_images(): + client = boto3.client('ecr', region_name='us-east-1') + _ = client.create_repository( + repositoryName='test_repository' + ) + + _ = client.put_image( + repositoryName='test_repository', + imageManifest=json.dumps(_create_image_manifest()), + imageTag='latest' + ) + + _ = client.put_image( + repositoryName='test_repository', + imageManifest=json.dumps(_create_image_manifest()), + imageTag='v1' + ) + + _ = client.put_image( + repositoryName='test_repository', + imageManifest=json.dumps(_create_image_manifest()), + imageTag='v2' + ) + + response = client.describe_images(repositoryName='test_repository') + type(response['imageDetails']).should.be(list) + len(response['imageDetails']).should.be(3) + + response['imageDetails'][0]['imageDigest'].should.contain("sha") + response['imageDetails'][1]['imageDigest'].should.contain("sha") + response['imageDetails'][2]['imageDigest'].should.contain("sha") + + response['imageDetails'][0]['registryId'].should.equal("012345678910") + response['imageDetails'][1]['registryId'].should.equal("012345678910") + response['imageDetails'][2]['registryId'].should.equal("012345678910") + + response['imageDetails'][0]['repositoryName'].should.equal("test_repository") + response['imageDetails'][1]['repositoryName'].should.equal("test_repository") + response['imageDetails'][2]['repositoryName'].should.equal("test_repository") + + len(response['imageDetails'][0]['imageTags']).should.be(1) + len(response['imageDetails'][1]['imageTags']).should.be(1) + len(response['imageDetails'][2]['imageTags']).should.be(1) + + image_tags = ['latest', 'v1', 'v2'] + set([response['imageDetails'][0]['imageTags'][0], + response['imageDetails'][1]['imageTags'][0], + response['imageDetails'][2]['imageTags'][0]]).should.equal(set(image_tags)) + + response['imageDetails'][0]['imageSizeInBytes'].should.equal(52428800) + response['imageDetails'][1]['imageSizeInBytes'].should.equal(52428800) + response['imageDetails'][2]['imageSizeInBytes'].should.equal(52428800) + + # response['imageDetails'][0]['imagePushedAt'].should.equal('2017-05-09') + # response['imageDetails'][1]['imagePushedAt'].should.equal('2017-05-09') + # response['imageDetails'][2]['imagePushedAt'].should.equal('2017-05-09') + + ''' + image_digests = [ + "hi", "mike", "name" + ] + set([response['imageDetails'][0]['imageDigest'], + response['imageDetails'][1]['imageDigest'], + response['imageDetails'][2]['imageDigest']]).should.equal(set(image_digests)) + ''' + + +''' +'imageDetails': [ + { + 'registryId': 'string', + 'repositoryName': 'string', + 'imageDigest': 'string', + 'imageTags': [ + 'string', + ], + 'imageSizeInBytes': 123, + 'imagePushedAt': datetime(2015, 1, 1) + }, +], +''' From 5e88b5d1b49bc853f67969b2b7cc0b9bc346d9f9 Mon Sep 17 00:00:00 2001 From: Jack Danger Canty Date: Wed, 26 Apr 2017 23:40:28 -0700 Subject: [PATCH 013/412] MD5 calculation of SQS message attributes This implements the same MD5 hashing pattern as implemented in the Ruby and Java AWS SDKs Doesn't yet handle list types but if you're reading this you might be surprised how easy that is to add. Give it a shot and if you get stuck reach out to me for help. --- moto/sqs/models.py | 55 +++++++++++++++++++++++++++++++++++--- moto/sqs/responses.py | 18 +++++-------- tests/test_sqs/test_sqs.py | 38 ++++++++++++++++++++++++-- 3 files changed, 93 insertions(+), 18 deletions(-) diff --git a/moto/sqs/models.py b/moto/sqs/models.py index cedf03199..f8b7d91b1 100644 --- a/moto/sqs/models.py +++ b/moto/sqs/models.py @@ -1,7 +1,9 @@ from __future__ import unicode_literals +import base64 import hashlib import re +import struct from xml.sax.saxutils import escape import boto.sqs @@ -17,6 +19,8 @@ from .exceptions import ( DEFAULT_ACCOUNT_ID = 123456789012 DEFAULT_SENDER_ID = "AIDAIT2UOQQY3AUEKVGXU" +TRANSPORT_TYPE_ENCODINGS = {'String': b'\x01', 'Binary': b'\x02', 'Number': b'\x01'} + class Message(BaseModel): @@ -33,10 +37,53 @@ class Message(BaseModel): self.delayed_until = 0 @property - def md5(self): - body_md5 = hashlib.md5() - body_md5.update(self._body.encode('utf-8')) - return body_md5.hexdigest() + def body_md5(self): + md5 = hashlib.md5() + md5.update(self._body.encode('utf-8')) + return md5.hexdigest() + + @property + def attribute_md5(self): + """ + The MD5 of all attributes is calculated by first generating a + utf-8 string from each attribute and MD5-ing the concatenation + of them all. Each attribute is encoded with some bytes that + describe the length of each part and the type of attribute. + + Not yet implemented: + List types (https://github.com/aws/aws-sdk-java/blob/7844c64cf248aed889811bf2e871ad6b276a89ca/aws-java-sdk-sqs/src/main/java/com/amazonaws/services/sqs/MessageMD5ChecksumHandler.java#L58k) + """ + md5 = hashlib.md5() + for name in sorted(self.message_attributes.keys()): + attr = self.message_attributes[name] + data_type = attr['data_type'] + + encoded = ''.encode('utf-8') + # Each part of each attribute is encoded right after it's + # own length is packed into a 4-byte integer + # 'timestamp' -> b'\x00\x00\x00\t' + encoded += struct.pack("!I", len(name.encode('utf-8'))) + name.encode('utf-8') + # The datatype is additionally given a final byte + # representing which type it is + encoded += struct.pack("!I", len(data_type)).encode('utf-8') + data_type.encode('utf-8') + encoded += TRANSPORT_TYPE_ENCODINGS[data_type] + + if data_type == 'String' or data_type == 'Number': + value = attr['string_value'] + elif data_type == 'Binary': + value = base64.b64decode(attr['binary_value']) + else: + print("Moto hasn't implemented MD5 hashing for {} attributes".format(data_type)) + # The following should be enough of a clue to users that + # they are not, in fact, looking at a correct MD5 while + # also following the character and length constraints of + # MD5 so as not to break client softwre + return('deadbeefdeadbeefdeadbeefdeadbeef') + + encoded += struct.pack("!I", len(value.encode('utf-8'))) + value.encode('utf-8') + + md5.update(encoded) + return md5.hexdigest() @property def body(self): diff --git a/moto/sqs/responses.py b/moto/sqs/responses.py index 75602b1b7..53bbac6ef 100644 --- a/moto/sqs/responses.py +++ b/moto/sqs/responses.py @@ -337,11 +337,9 @@ SET_QUEUE_ATTRIBUTE_RESPONSE = """ SEND_MESSAGE_RESPONSE = """ - {{- message.md5 -}} + {{- message.body_md5 -}} - {% if message.message_attributes.items()|count > 0 %} - 324758f82d026ac6ec5b31a3b192d1e3 - {% endif %} + {{- message.attribute_md5 -}} {{- message.id -}} @@ -357,7 +355,7 @@ RECEIVE_MESSAGE_RESPONSE = """ {{ message.id }} {{ message.receipt_handle }} - {{ message.md5 }} + {{ message.body_md5 }} {{ message.body }} SenderId @@ -375,9 +373,7 @@ RECEIVE_MESSAGE_RESPONSE = """ ApproximateFirstReceiveTimestamp {{ message.approximate_first_receive_timestamp }} - {% if message.message_attributes.items()|count > 0 %} - 324758f82d026ac6ec5b31a3b192d1e3 - {% endif %} + {{- message.attribute_md5 -}} {% for name, value in message.message_attributes.items() %} {{ name }} @@ -405,10 +401,8 @@ SEND_MESSAGE_BATCH_RESPONSE = """ {{ message.user_id }} {{ message.id }} - {{ message.md5 }} - {% if message.message_attributes.items()|count > 0 %} - 324758f82d026ac6ec5b31a3b192d1e3 - {% endif %} + {{ message.body_md5 }} + {{- message.attribute_md5 -}} {% endfor %} diff --git a/tests/test_sqs/test_sqs.py b/tests/test_sqs/test_sqs.py index f179d9f85..987efa3d5 100644 --- a/tests/test_sqs/test_sqs.py +++ b/tests/test_sqs/test_sqs.py @@ -43,10 +43,44 @@ def test_get_inexistent_queue(): def test_message_send(): sqs = boto3.resource('sqs', region_name='us-east-1') queue = sqs.create_queue(QueueName="blah") - msg = queue.send_message(MessageBody="derp") - + msg = queue.send_message( + MessageBody="derp", + MessageAttributes={ + 'timestamp': { + 'StringValue': '1493147359900', + 'DataType': 'Number', + } + } + ) msg.get('MD5OfMessageBody').should.equal( '58fd9edd83341c29f1aebba81c31e257') + msg.get('MD5OfMessageAttributes').should.equal( + '235c5c510d26fb653d073faed50ae77c') + msg.get('ResponseMetadata', {}).get('RequestId').should.equal( + '27daac76-34dd-47df-bd01-1f6e873584a0') + msg.get('MessageId').should_not.contain(' \n') + + messages = queue.receive_messages() + messages.should.have.length_of(1) + + +@mock_sqs +def test_message_with_complex_attributes(): + sqs = boto3.resource('sqs', region_name='us-east-1') + queue = sqs.create_queue(QueueName="blah") + msg = queue.send_message( + MessageBody="derp", + MessageAttributes={ + 'ccc': {'StringValue': 'testjunk', 'DataType': 'String'}, + 'aaa': {'BinaryValue': b'\x02\x03\x04', 'DataType': 'Binary'}, + 'zzz': {'DataType': 'Number', 'StringValue': '0230.01'}, + 'öther_encodings': {'DataType': 'String', 'StringValue': 'T\xFCst'} + } + ) + msg.get('MD5OfMessageBody').should.equal( + '58fd9edd83341c29f1aebba81c31e257') + msg.get('MD5OfMessageAttributes').should.equal( + '8ae21a7957029ef04146b42aeaa18a22') msg.get('ResponseMetadata', {}).get('RequestId').should.equal( '27daac76-34dd-47df-bd01-1f6e873584a0') msg.get('MessageId').should_not.contain(' \n') From daba69914767f0b48fbf379cea44d12d21f2e635 Mon Sep 17 00:00:00 2001 From: Jack Danger Canty Date: Thu, 11 May 2017 07:06:42 -0700 Subject: [PATCH 014/412] binary values are sent as base64-encoded strings --- tests/test_sqs/test_sqs.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/tests/test_sqs/test_sqs.py b/tests/test_sqs/test_sqs.py index 987efa3d5..0e1149200 100644 --- a/tests/test_sqs/test_sqs.py +++ b/tests/test_sqs/test_sqs.py @@ -7,6 +7,7 @@ import botocore.exceptions from boto.exception import SQSError from boto.sqs.message import RawMessage, Message +import base64 import requests import sure # noqa import time @@ -233,7 +234,7 @@ def test_send_message_with_attributes(): message = queue.new_message(body) message_attributes = { 'test.attribute_name': {'data_type': 'String', 'string_value': 'attribute value'}, - 'test.binary_attribute': {'data_type': 'Binary', 'binary_value': 'binary value'}, + 'test.binary_attribute': {'data_type': 'Binary', 'binary_value': base64.b64encode('binary value')}, 'test.number_attribute': {'data_type': 'Number', 'string_value': 'string value'} } message.message_attributes = message_attributes From 6679def702922d19eeea5e9e0016311a868b58de Mon Sep 17 00:00:00 2001 From: Jack Danger Canty Date: Thu, 11 May 2017 09:28:19 -0700 Subject: [PATCH 015/412] Python 2/3 compat for MD5 of SQS attributes --- moto/sqs/models.py | 14 ++++++++++---- tests/test_sqs/test_sqs.py | 3 ++- 2 files changed, 12 insertions(+), 5 deletions(-) diff --git a/moto/sqs/models.py b/moto/sqs/models.py index f8b7d91b1..d2c538ecb 100644 --- a/moto/sqs/models.py +++ b/moto/sqs/models.py @@ -3,6 +3,7 @@ from __future__ import unicode_literals import base64 import hashlib import re +import six import struct from xml.sax.saxutils import escape @@ -53,24 +54,29 @@ class Message(BaseModel): Not yet implemented: List types (https://github.com/aws/aws-sdk-java/blob/7844c64cf248aed889811bf2e871ad6b276a89ca/aws-java-sdk-sqs/src/main/java/com/amazonaws/services/sqs/MessageMD5ChecksumHandler.java#L58k) """ + def utf8(str): + if isinstance(str, six.string_types): + return str.encode('utf-8') + return str md5 = hashlib.md5() for name in sorted(self.message_attributes.keys()): attr = self.message_attributes[name] data_type = attr['data_type'] - encoded = ''.encode('utf-8') + encoded = utf8('') # Each part of each attribute is encoded right after it's # own length is packed into a 4-byte integer # 'timestamp' -> b'\x00\x00\x00\t' - encoded += struct.pack("!I", len(name.encode('utf-8'))) + name.encode('utf-8') + encoded += struct.pack("!I", len(utf8(name))) + utf8(name) # The datatype is additionally given a final byte # representing which type it is - encoded += struct.pack("!I", len(data_type)).encode('utf-8') + data_type.encode('utf-8') + encoded += struct.pack("!I", len(data_type)) + utf8(data_type) encoded += TRANSPORT_TYPE_ENCODINGS[data_type] if data_type == 'String' or data_type == 'Number': value = attr['string_value'] elif data_type == 'Binary': + print(data_type, attr['binary_value'], type(attr['binary_value'])) value = base64.b64decode(attr['binary_value']) else: print("Moto hasn't implemented MD5 hashing for {} attributes".format(data_type)) @@ -80,7 +86,7 @@ class Message(BaseModel): # MD5 so as not to break client softwre return('deadbeefdeadbeefdeadbeefdeadbeef') - encoded += struct.pack("!I", len(value.encode('utf-8'))) + value.encode('utf-8') + encoded += struct.pack("!I", len(utf8(value))) + utf8(value) md5.update(encoded) return md5.hexdigest() diff --git a/tests/test_sqs/test_sqs.py b/tests/test_sqs/test_sqs.py index 0e1149200..cad8ace76 100644 --- a/tests/test_sqs/test_sqs.py +++ b/tests/test_sqs/test_sqs.py @@ -232,9 +232,10 @@ def test_send_message_with_attributes(): body = 'this is a test message' message = queue.new_message(body) + BASE64_BINARY = base64.b64encode(b'binary value').decode('utf-8') message_attributes = { 'test.attribute_name': {'data_type': 'String', 'string_value': 'attribute value'}, - 'test.binary_attribute': {'data_type': 'Binary', 'binary_value': base64.b64encode('binary value')}, + 'test.binary_attribute': {'data_type': 'Binary', 'binary_value': BASE64_BINARY}, 'test.number_attribute': {'data_type': 'Number', 'string_value': 'string value'} } message.message_attributes = message_attributes From a21413f4eaec49b216c890619d306d9468401740 Mon Sep 17 00:00:00 2001 From: Kate Heddleston Date: Wed, 17 May 2017 13:03:33 -0700 Subject: [PATCH 016/412] NoSuchKey error in S3 is actually '404' Fixes #571 and #953 --- moto/s3/models.py | 4 ++-- moto/s3/responses.py | 4 +++- tests/test_s3/test_s3.py | 5 +++-- 3 files changed, 8 insertions(+), 5 deletions(-) diff --git a/moto/s3/models.py b/moto/s3/models.py index 3cd50050d..b824c4dbf 100644 --- a/moto/s3/models.py +++ b/moto/s3/models.py @@ -11,7 +11,7 @@ import six from bisect import insort from moto.core import BaseBackend, BaseModel from moto.core.utils import iso_8601_datetime_with_milliseconds, rfc_1123_datetime -from .exceptions import BucketAlreadyExists, MissingBucket, MissingKey, InvalidPart, EntityTooSmall +from .exceptions import BucketAlreadyExists, MissingBucket, InvalidPart, EntityTooSmall from .utils import clean_key_name, _VersionedKeyStore UPLOAD_ID_BYTES = 43 @@ -473,7 +473,7 @@ class S3Backend(BaseBackend): if isinstance(key, FakeKey): return key else: - raise MissingKey(key_name=key_name) + return None def initiate_multipart(self, bucket_name, key_name, metadata): bucket = self.get_bucket(bucket_name) diff --git a/moto/s3/responses.py b/moto/s3/responses.py index fd33c5ead..9a3c00e19 100644 --- a/moto/s3/responses.py +++ b/moto/s3/responses.py @@ -12,7 +12,7 @@ from moto.core.responses import _TemplateEnvironmentMixin from moto.s3bucket_path.utils import bucket_name_from_url as bucketpath_bucket_name_from_url, parse_key_name as bucketpath_parse_key_name, is_delete_keys as bucketpath_is_delete_keys -from .exceptions import BucketAlreadyExists, S3ClientError, InvalidPartOrder +from .exceptions import BucketAlreadyExists, S3ClientError, MissingKey, InvalidPartOrder from .models import s3_backend, get_canned_acl, FakeGrantee, FakeGrant, FakeAcl, FakeKey from .utils import bucket_name_from_url, metadata_from_headers from xml.dom import minidom @@ -508,6 +508,8 @@ class ResponseObject(_TemplateEnvironmentMixin): version_id = query.get('versionId', [None])[0] key = self.backend.get_key( bucket_name, key_name, version_id=version_id) + if key is None: + raise MissingKey(key_name) if 'acl' in query: template = self.response_template(S3_OBJECT_ACL_RESPONSE) return 200, response_headers, template.render(obj=key) diff --git a/tests/test_s3/test_s3.py b/tests/test_s3/test_s3.py index de9c6a7de..3907cec6e 100644 --- a/tests/test_s3/test_s3.py +++ b/tests/test_s3/test_s3.py @@ -1223,9 +1223,10 @@ def test_boto3_head_object(): s3.Object('blah', 'hello.txt').meta.client.head_object( Bucket='blah', Key='hello.txt') - with assert_raises(ClientError): + with assert_raises(ClientError) as e: s3.Object('blah', 'hello2.txt').meta.client.head_object( Bucket='blah', Key='hello_bad.txt') + e.exception.response['Error']['Code'].should.equal('404') @mock_s3 @@ -1353,7 +1354,7 @@ def test_boto3_delete_markers(): Bucket=bucket_name, Key=key ) - e.response['Error']['Code'].should.equal('NoSuchKey') + e.response['Error']['Code'].should.equal('404') s3.delete_object( Bucket=bucket_name, From 9f019792df857bd8247aedc3f2ea55f595d58b84 Mon Sep 17 00:00:00 2001 From: Mike Bjerkness Date: Mon, 22 May 2017 21:57:14 -0500 Subject: [PATCH 017/412] Added tests --- moto/ecr/responses.py | 5 +- tests/test_ecr/test_ecr_boto3.py | 186 +++++++++++++++++++++++++------ 2 files changed, 159 insertions(+), 32 deletions(-) diff --git a/moto/ecr/responses.py b/moto/ecr/responses.py index 3a37162a0..a778b6bac 100644 --- a/moto/ecr/responses.py +++ b/moto/ecr/responses.py @@ -32,7 +32,10 @@ class ECRResponse(BaseResponse): def describe_repositories(self): describe_repositories_name = self._get_param('repositoryNames') - repositories = self.ecr_backend.describe_repositories(describe_repositories_name) + registry_id = self._get_param('registryId') + + repositories = self.ecr_backend.describe_repositories( + repository_names=describe_repositories_name, registry_id=registry_id) return json.dumps({ 'repositories': repositories, 'failures': [] diff --git a/tests/test_ecr/test_ecr_boto3.py b/tests/test_ecr/test_ecr_boto3.py index ce5a54b17..5a9f5bb61 100644 --- a/tests/test_ecr/test_ecr_boto3.py +++ b/tests/test_ecr/test_ecr_boto3.py @@ -1,6 +1,5 @@ from __future__ import unicode_literals -# from nose.tools import assert_raises import hashlib import json from random import random @@ -10,7 +9,6 @@ import sure # noqa import boto3 from moto import mock_ecr -import datetime def _create_image_digest(contents=None): @@ -87,6 +85,73 @@ def test_describe_repositories(): response['repositories'][1]['repositoryUri']]).should.equal(set(respository_uris)) +@mock_ecr +def test_describe_repositories_1(): + client = boto3.client('ecr', region_name='us-east-1') + _ = client.create_repository( + repositoryName='test_repository1' + ) + _ = client.create_repository( + repositoryName='test_repository0' + ) + response = client.describe_repositories(registryId='012345678910') + len(response['repositories']).should.equal(2) + + respository_arns = ['arn:aws:ecr:us-east-1:012345678910:repository/test_repository1', + 'arn:aws:ecr:us-east-1:012345678910:repository/test_repository0'] + set([response['repositories'][0]['repositoryArn'], + response['repositories'][1]['repositoryArn']]).should.equal(set(respository_arns)) + + respository_uris = ['012345678910.dkr.ecr.us-east-1.amazonaws.com/test_repository1', + '012345678910.dkr.ecr.us-east-1.amazonaws.com/test_repository0'] + set([response['repositories'][0]['repositoryUri'], + response['repositories'][1]['repositoryUri']]).should.equal(set(respository_uris)) + + +@mock_ecr +def test_describe_repositories_2(): + client = boto3.client('ecr', region_name='us-east-1') + _ = client.create_repository( + repositoryName='test_repository1' + ) + _ = client.create_repository( + repositoryName='test_repository0' + ) + response = client.describe_repositories(registryId='109876543210') + len(response['repositories']).should.equal(0) + + +@mock_ecr +def test_describe_repositories_3(): + client = boto3.client('ecr', region_name='us-east-1') + _ = client.create_repository( + repositoryName='test_repository1' + ) + _ = client.create_repository( + repositoryName='test_repository0' + ) + response = client.describe_repositories(repositoryNames=['test_repository1']) + len(response['repositories']).should.equal(1) + respository_arn = 'arn:aws:ecr:us-east-1:012345678910:repository/test_repository1' + response['repositories'][0]['repositoryArn'].should.equal(respository_arn) + + respository_uri = '012345678910.dkr.ecr.us-east-1.amazonaws.com/test_repository1' + response['repositories'][0]['repositoryUri'].should.equal(respository_uri) + + +@mock_ecr +def test_describe_repositories_4(): + client = boto3.client('ecr', region_name='us-east-1') + _ = client.create_repository( + repositoryName='test_repository1' + ) + _ = client.create_repository( + repositoryName='test_repository0' + ) + response = client.describe_repositories(repositoryNames=['not_a_valid_name']) + len(response['repositories']).should.equal(0) + + @mock_ecr def test_delete_repository(): client = boto3.client('ecr', region_name='us-east-1') @@ -106,6 +171,20 @@ def test_delete_repository(): len(response['repositories']).should.equal(0) +@mock_ecr +def test_delete_repository_1(): + client = boto3.client('ecr', region_name='us-east-1') + _ = client.create_repository( + repositoryName='test_repository' + ) + + invalid_repository_name = 'not_a_repository' + try: + client.delete_repository(repositoryName=invalid_repository_name) + except Exception as e: + str(e).should.equal('{0} is not a repository'.format(invalid_repository_name)) + + @mock_ecr def test_put_image(): client = boto3.client('ecr', region_name='us-east-1') @@ -126,28 +205,38 @@ def test_put_image(): def test_list_images(): client = boto3.client('ecr', region_name='us-east-1') _ = client.create_repository( - repositoryName='test_repository' + repositoryName='test_repository_1' + ) + + _ = client.create_repository( + repositoryName='test_repository_2' ) _ = client.put_image( - repositoryName='test_repository', + repositoryName='test_repository_1', imageManifest=json.dumps(_create_image_manifest()), imageTag='latest' ) _ = client.put_image( - repositoryName='test_repository', + repositoryName='test_repository_1', imageManifest=json.dumps(_create_image_manifest()), imageTag='v1' ) _ = client.put_image( - repositoryName='test_repository', + repositoryName='test_repository_1', imageManifest=json.dumps(_create_image_manifest()), imageTag='v2' ) - response = client.list_images(repositoryName='test_repository') + _ = client.put_image( + repositoryName='test_repository_2', + imageManifest=json.dumps(_create_image_manifest()), + imageTag='oldest' + ) + + response = client.list_images(repositoryName='test_repository_1') type(response['imageIds']).should.be(list) len(response['imageIds']).should.be(3) @@ -156,6 +245,15 @@ def test_list_images(): response['imageIds'][1]['imageTag'], response['imageIds'][2]['imageTag']]).should.equal(set(image_tags)) + response = client.list_images(repositoryName='test_repository_2') + type(response['imageIds']).should.be(list) + len(response['imageIds']).should.be(1) + response['imageIds'][0]['imageTag'].should.equal('oldest') + + response = client.list_images(repositoryName='test_repository_2', registryId='109876543210') + type(response['imageIds']).should.be(list) + len(response['imageIds']).should.be(0) + @mock_ecr def test_describe_images(): @@ -211,31 +309,57 @@ def test_describe_images(): response['imageDetails'][1]['imageSizeInBytes'].should.equal(52428800) response['imageDetails'][2]['imageSizeInBytes'].should.equal(52428800) - # response['imageDetails'][0]['imagePushedAt'].should.equal('2017-05-09') - # response['imageDetails'][1]['imagePushedAt'].should.equal('2017-05-09') - # response['imageDetails'][2]['imagePushedAt'].should.equal('2017-05-09') + invalid_repository_name = 'not_a_valid_repository' + try: + client.describe_images(repositoryName=invalid_repository_name) + except Exception as e: + str(e).should.equal('{0} is not a repository'.format(invalid_repository_name)) - ''' - image_digests = [ - "hi", "mike", "name" - ] - set([response['imageDetails'][0]['imageDigest'], - response['imageDetails'][1]['imageDigest'], - response['imageDetails'][2]['imageDigest']]).should.equal(set(image_digests)) - ''' + +@mock_ecr +def test_put_image(): + client = boto3.client('ecr', region_name='us-east-1') + _ = client.create_repository( + repositoryName='test_repository' + ) + + response = client.put_image( + repositoryName='test_repository', + imageManifest=json.dumps(_create_image_manifest()), + imageTag='latest' + ) + + response['image']['imageId']['imageTag'].should.equal('latest') + response['image']['imageId']['imageDigest'].should.contain("sha") + response['image']['repositoryName'].should.equal('test_repository') + response['image']['registryId'].should.equal('012345678910') + + invalid_repository_name = 'not_a_valid_repository' + + try: + client.put_image( + repositoryName=invalid_repository_name, + imageManifest=json.dumps(_create_image_manifest()), + imageTag='latest') + except Exception as e: + str(e).should.equal('{0} is not a repository'.format(invalid_repository_name)) ''' -'imageDetails': [ - { - 'registryId': 'string', - 'repositoryName': 'string', - 'imageDigest': 'string', - 'imageTags': [ - 'string', - ], - 'imageSizeInBytes': 123, - 'imagePushedAt': datetime(2015, 1, 1) - }, -], -''' +obj = { + "image": { + "repository": "test_repository", + "imageManifest": "{\"layers\": [{\"mediaType\": \"application/vnd.docker.image.rootfs.diff.tar.gzip\", \"digest\": \"sha256:77ea7eee3d80b1a38f83906dd3048e2689457eb90e18a7d12f839c5ae37106a2\", \"size\": 32654}, {\"mediaType\": \"application/vnd.docker.image.rootfs.diff.tar.gzip\", \"digest\": \"sha256:95cf1a2e1698fe3ca1fcc3f653119146b271d0b62e487ec264441e886a11bd06\", \"size\": 16724}, {\"mediaType\": \"application/vnd.docker.image.rootfs.diff.tar.gzip\", \"digest\": \"sha256:a0e70458d19e37e14d6388030a017c587283e2fb6ef10c0744cad0294c47e8f8\", \"size\": 73109}], \"schemaVersion\": 2, \"config\": {\"mediaType\": \"application/vnd.docker.container.image.v1+json\", \"digest\": \"sha256:b79606fb3afea5bd1609ed40b622142f1c98125abcfe89a76a661b0e8e343910\", \"size\": 7023}, \"mediaType\": \"application/vnd.docker.distribution.manifest.v2+json\"}", + "imageId": { + "imageTag": "latest", + "imageDigest": "sha256:c639b9999fadc04554ed2ef5cec140d35136d23f0ee15ad71f0708e334fc21ba" + }, + "imageSizeInBytes": 52428800, + "imageDigest": null, + "imageTag": "latest", + "registryId": "012345678910", + "repositoryName": "test_repository", + "imagePushedAt": null + } +} +''' \ No newline at end of file From d6873c3dcb916c59f2dfc39d21960e778cdd1f09 Mon Sep 17 00:00:00 2001 From: Mike Bjerkness Date: Mon, 22 May 2017 22:04:33 -0500 Subject: [PATCH 018/412] Adding ECR to moto/backends.py --- moto/backends.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/moto/backends.py b/moto/backends.py index eae94db75..0af4ae2e2 100644 --- a/moto/backends.py +++ b/moto/backends.py @@ -10,6 +10,7 @@ from moto.datapipeline import datapipeline_backends from moto.dynamodb import dynamodb_backends from moto.dynamodb2 import dynamodb_backends2 from moto.ec2 import ec2_backends +from moto.ecr import ecr_backends from moto.ecs import ecs_backends from moto.elb import elb_backends from moto.emr import emr_backends @@ -39,6 +40,7 @@ BACKENDS = { 'dynamodb': dynamodb_backends, 'dynamodb2': dynamodb_backends2, 'ec2': ec2_backends, + 'ecr': ecr_backends, 'ecs': ecs_backends, 'elb': elb_backends, 'events': events_backends, From c7a166f68e3c84a44744e5129782c8338e5eca47 Mon Sep 17 00:00:00 2001 From: Mike Bjerkness Date: Mon, 22 May 2017 22:29:49 -0500 Subject: [PATCH 019/412] Remove tests that expect exceptions. --- tests/test_ecr/test_ecr_boto3.py | 54 ++------------------------------ 1 file changed, 2 insertions(+), 52 deletions(-) diff --git a/tests/test_ecr/test_ecr_boto3.py b/tests/test_ecr/test_ecr_boto3.py index 5a9f5bb61..f466823d4 100644 --- a/tests/test_ecr/test_ecr_boto3.py +++ b/tests/test_ecr/test_ecr_boto3.py @@ -169,21 +169,7 @@ def test_delete_repository(): response = client.describe_repositories() len(response['repositories']).should.equal(0) - - -@mock_ecr -def test_delete_repository_1(): - client = boto3.client('ecr', region_name='us-east-1') - _ = client.create_repository( - repositoryName='test_repository' - ) - - invalid_repository_name = 'not_a_repository' - try: - client.delete_repository(repositoryName=invalid_repository_name) - except Exception as e: - str(e).should.equal('{0} is not a repository'.format(invalid_repository_name)) - + @mock_ecr def test_put_image(): @@ -309,12 +295,6 @@ def test_describe_images(): response['imageDetails'][1]['imageSizeInBytes'].should.equal(52428800) response['imageDetails'][2]['imageSizeInBytes'].should.equal(52428800) - invalid_repository_name = 'not_a_valid_repository' - try: - client.describe_images(repositoryName=invalid_repository_name) - except Exception as e: - str(e).should.equal('{0} is not a repository'.format(invalid_repository_name)) - @mock_ecr def test_put_image(): @@ -332,34 +312,4 @@ def test_put_image(): response['image']['imageId']['imageTag'].should.equal('latest') response['image']['imageId']['imageDigest'].should.contain("sha") response['image']['repositoryName'].should.equal('test_repository') - response['image']['registryId'].should.equal('012345678910') - - invalid_repository_name = 'not_a_valid_repository' - - try: - client.put_image( - repositoryName=invalid_repository_name, - imageManifest=json.dumps(_create_image_manifest()), - imageTag='latest') - except Exception as e: - str(e).should.equal('{0} is not a repository'.format(invalid_repository_name)) - - -''' -obj = { - "image": { - "repository": "test_repository", - "imageManifest": "{\"layers\": [{\"mediaType\": \"application/vnd.docker.image.rootfs.diff.tar.gzip\", \"digest\": \"sha256:77ea7eee3d80b1a38f83906dd3048e2689457eb90e18a7d12f839c5ae37106a2\", \"size\": 32654}, {\"mediaType\": \"application/vnd.docker.image.rootfs.diff.tar.gzip\", \"digest\": \"sha256:95cf1a2e1698fe3ca1fcc3f653119146b271d0b62e487ec264441e886a11bd06\", \"size\": 16724}, {\"mediaType\": \"application/vnd.docker.image.rootfs.diff.tar.gzip\", \"digest\": \"sha256:a0e70458d19e37e14d6388030a017c587283e2fb6ef10c0744cad0294c47e8f8\", \"size\": 73109}], \"schemaVersion\": 2, \"config\": {\"mediaType\": \"application/vnd.docker.container.image.v1+json\", \"digest\": \"sha256:b79606fb3afea5bd1609ed40b622142f1c98125abcfe89a76a661b0e8e343910\", \"size\": 7023}, \"mediaType\": \"application/vnd.docker.distribution.manifest.v2+json\"}", - "imageId": { - "imageTag": "latest", - "imageDigest": "sha256:c639b9999fadc04554ed2ef5cec140d35136d23f0ee15ad71f0708e334fc21ba" - }, - "imageSizeInBytes": 52428800, - "imageDigest": null, - "imageTag": "latest", - "registryId": "012345678910", - "repositoryName": "test_repository", - "imagePushedAt": null - } -} -''' \ No newline at end of file + response['image']['registryId'].should.equal('012345678910') \ No newline at end of file From 35692b5c9ac5e00a81d4d11278298c796f4c7cf7 Mon Sep 17 00:00:00 2001 From: Mike Bjerkness Date: Mon, 22 May 2017 22:49:59 -0500 Subject: [PATCH 020/412] Stub out all remaining ECR methods with NotImplementedError. --- moto/ecr/responses.py | 75 +++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 75 insertions(+) diff --git a/moto/ecr/responses.py b/moto/ecr/responses.py index a778b6bac..f8b1606cc 100644 --- a/moto/ecr/responses.py +++ b/moto/ecr/responses.py @@ -73,3 +73,78 @@ class ECRResponse(BaseResponse): return json.dumps({ 'imageDetails': [image.response_describe_object for image in images], }) + + def batch_check_layer_availability(self): + if self.is_not_dryrun('BatchCheckLayerAvailability'): + raise NotImplementedError( + 'ECR.batch_check_layer_availability is not yet implemented') + + def batch_delete_image(self): + if self.is_not_dryrun('BatchDeleteImage'): + raise NotImplementedError( + 'ECR.batch_delete_image is not yet implemented') + + def batch_get_image(self): + if self.is_not_dryrun('BatchGetImage'): + raise NotImplementedError( + 'ECR.batch_get_image is not yet implemented') + + def can_paginate(self): + if self.is_not_dryrun('CanPaginate'): + raise NotImplementedError( + 'ECR.can_paginate is not yet implemented') + + def complete_layer_upload(self): + if self.is_not_dryrun('CompleteLayerUpload'): + raise NotImplementedError( + 'ECR.complete_layer_upload is not yet implemented') + + def delete_repository_policy(self): + if self.is_not_dryrun('DeleteRepositoryPolicy'): + raise NotImplementedError( + 'ECR.delete_repository_policy is not yet implemented') + + def generate_presigned_url(self): + if self.is_not_dryrun('GeneratePresignedUrl'): + raise NotImplementedError( + 'ECR.generate_presigned_url is not yet implemented') + + def get_authorization_token(self): + if self.is_not_dryrun('GetAuthorizationToken'): + raise NotImplementedError( + 'ECR.get_authorization_token is not yet implemented') + + def get_download_url_for_layer(self): + if self.is_not_dryrun('GetDownloadUrlForLayer'): + raise NotImplementedError( + 'ECR.get_download_url_for_layer is not yet implemented') + + def get_paginator(self): + if self.is_not_dryrun('GetPaginator'): + raise NotImplementedError( + 'ECR.get_paginator is not yet implemented') + + def get_repository_policy(self): + if self.is_not_dryrun('GetRepositoryPolicy'): + raise NotImplementedError( + 'ECR.get_repository_policy is not yet implemented') + + def get_waiter(self): + if self.is_not_dryrun('GetWaiter'): + raise NotImplementedError( + 'ECR.get_waiter is not yet implemented') + + def initiate_layer_upload(self): + if self.is_not_dryrun('InitiateLayerUpload'): + raise NotImplementedError( + 'ECR.initiate_layer_upload is not yet implemented') + + def set_repository_policy(self): + if self.is_not_dryrun('SetRepositoryPolicy'): + raise NotImplementedError( + 'ECR.set_repository_policy is not yet implemented') + + def upload_layer_part(self): + if self.is_not_dryrun('UploadLayerPart'): + raise NotImplementedError( + 'ECR.upload_layer_part is not yet implemented') From 91d99e56951b818462a07f036858577367f1000b Mon Sep 17 00:00:00 2001 From: Mike Bjerkness Date: Mon, 22 May 2017 22:50:39 -0500 Subject: [PATCH 021/412] Fix python 3 error with generate sha --- tests/test_ecr/test_ecr_boto3.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tests/test_ecr/test_ecr_boto3.py b/tests/test_ecr/test_ecr_boto3.py index f466823d4..1191c42d2 100644 --- a/tests/test_ecr/test_ecr_boto3.py +++ b/tests/test_ecr/test_ecr_boto3.py @@ -14,7 +14,7 @@ from moto import mock_ecr def _create_image_digest(contents=None): if not contents: contents = 'docker_image{0}'.format(int(random() * 10 ** 6)) - return "sha256:%s" % hashlib.sha256(contents).hexdigest() + return "sha256:%s" % hashlib.sha256(contents.encode('utf-8')).hexdigest() def _create_image_manifest(): @@ -169,7 +169,7 @@ def test_delete_repository(): response = client.describe_repositories() len(response['repositories']).should.equal(0) - + @mock_ecr def test_put_image(): From 25ae4d42a2d3c50007d369c3288c0482037d95e0 Mon Sep 17 00:00:00 2001 From: Mike Bjerkness Date: Mon, 22 May 2017 23:04:36 -0500 Subject: [PATCH 022/412] Fix encoding error in ecr/models.py --- moto/ecr/models.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/moto/ecr/models.py b/moto/ecr/models.py index 5f8255007..82ce2ebd6 100644 --- a/moto/ecr/models.py +++ b/moto/ecr/models.py @@ -104,7 +104,7 @@ class Image(BaseObject): def _create_digest(self): image_contents = 'docker_image{0}'.format(int(random() * 10 ** 6)) - self.image_digest = "sha256:%s" % hashlib.sha256(image_contents).hexdigest() + self.image_digest = "sha256:%s" % hashlib.sha256(image_contents.encode('utf-8')).hexdigest() def get_image_digest(self): if not self.image_digest: From a2a651493628397dc32005320dd4203ebb83993a Mon Sep 17 00:00:00 2001 From: Simon-Pierre Gingras Date: Tue, 23 May 2017 11:29:01 -0700 Subject: [PATCH 023/412] attempt at fixing tests --- moto/s3/responses.py | 4 ++-- tests/test_s3/test_s3.py | 13 ++++++------- 2 files changed, 8 insertions(+), 9 deletions(-) diff --git a/moto/s3/responses.py b/moto/s3/responses.py index 43e27a815..dbc6bf28f 100644 --- a/moto/s3/responses.py +++ b/moto/s3/responses.py @@ -484,7 +484,7 @@ class ResponseObject(_TemplateEnvironmentMixin): elif method == 'PUT': return self._key_response_put(request, body, bucket_name, query, key_name, headers) elif method == 'HEAD': - return self._key_response_head(bucket_name, query, key_name, headers) + return self._key_response_head(bucket_name, query, key_name, headers=request.headers) elif method == 'DELETE': return self._key_response_delete(bucket_name, query, key_name, headers) elif method == 'POST': @@ -597,7 +597,7 @@ class ResponseObject(_TemplateEnvironmentMixin): response_headers = {} version_id = query.get('versionId', [None])[0] - if_modified_since = headers.get('if-modified-since', None) + if_modified_since = headers.get('If-Modified-Since', None) if if_modified_since: if_modified_since = str_to_rfc_1123_datetime(if_modified_since) diff --git a/tests/test_s3/test_s3.py b/tests/test_s3/test_s3.py index 6af653f9e..cd1c2e43e 100644 --- a/tests/test_s3/test_s3.py +++ b/tests/test_s3/test_s3.py @@ -1277,18 +1277,17 @@ def test_boto3_head_object_if_modified_since(): key = 'hello.txt' - with freeze_time(datetime.datetime.now() - datetime.timedelta(hours=3)): - s3.put_object( - Bucket=bucket_name, - Key=key, - Body='test' - ) + s3.put_object( + Bucket=bucket_name, + Key=key, + Body='test' + ) with assert_raises(botocore.exceptions.ClientError) as err: s3.head_object( Bucket=bucket_name, Key=key, - IfModifiedSince=datetime.datetime.now() - datetime.timedelta(hours=2) + IfModifiedSince=datetime.datetime.utcnow() + datetime.timedelta(hours=1) ) e = err.exception e.response['Error'].should.equal({'Code': '304', 'Message': 'Not Modified'}) From 4e2f775c1f4167f91d08e9df997e790df847e549 Mon Sep 17 00:00:00 2001 From: Jeff Hardy Date: Fri, 26 May 2017 12:37:33 -0700 Subject: [PATCH 024/412] Use region list from Boto. Boto can be configured with extra regions, but moto will fail to import if they are not in the hardcoded list in ec2/models.py. Instead, use the region list from boto to build the ec2_backends dict to ensure all regions are available. --- moto/ec2/models.py | 21 +++------------------ tests/test_ec2/test_regions.py | 8 +++++++- 2 files changed, 10 insertions(+), 19 deletions(-) diff --git a/moto/ec2/models.py b/moto/ec2/models.py index 7fa7e1009..7e3df9880 100755 --- a/moto/ec2/models.py +++ b/moto/ec2/models.py @@ -5,6 +5,8 @@ import itertools import re import six +import boto.ec2 + from collections import defaultdict from datetime import datetime from boto.ec2.instance import Instance as BotoInstance, Reservation @@ -1143,24 +1145,7 @@ class Zone(object): class RegionsAndZonesBackend(object): - regions = [ - Region("ap-northeast-1", "ec2.ap-northeast-1.amazonaws.com"), - Region("ap-northeast-2", "ec2.ap-northeast-2.amazonaws.com"), - Region("ap-south-1", "ec2.ap-south-1.amazonaws.com"), - Region("ap-southeast-1", "ec2.ap-southeast-1.amazonaws.com"), - Region("ap-southeast-2", "ec2.ap-southeast-2.amazonaws.com"), - Region("ca-central-1", "ec2.ca-central-1.amazonaws.com.cn"), - Region("cn-north-1", "ec2.cn-north-1.amazonaws.com.cn"), - Region("eu-central-1", "ec2.eu-central-1.amazonaws.com"), - Region("eu-west-1", "ec2.eu-west-1.amazonaws.com"), - Region("eu-west-2", "ec2.eu-west-2.amazonaws.com"), - Region("sa-east-1", "ec2.sa-east-1.amazonaws.com"), - Region("us-east-1", "ec2.us-east-1.amazonaws.com"), - Region("us-east-2", "ec2.us-east-2.amazonaws.com"), - Region("us-gov-west-1", "ec2.us-gov-west-1.amazonaws.com"), - Region("us-west-1", "ec2.us-west-1.amazonaws.com"), - Region("us-west-2", "ec2.us-west-2.amazonaws.com"), - ] + regions = [Region(ri.name, ri.endpoint) for ri in boto.ec2.regions()] zones = dict( (region, [Zone(region + c, region) for c in 'abc']) diff --git a/tests/test_ec2/test_regions.py b/tests/test_ec2/test_regions.py index 4beca7c67..1e87b253c 100644 --- a/tests/test_ec2/test_regions.py +++ b/tests/test_ec2/test_regions.py @@ -5,13 +5,19 @@ import boto.ec2.elb import sure from moto import mock_ec2_deprecated, mock_autoscaling_deprecated, mock_elb_deprecated +from moto.ec2 import ec2_backends + +def test_use_boto_regions(): + boto_regions = {r.name for r in boto.ec2.regions()} + moto_regions = set(ec2_backends) + + moto_regions.should.equal(boto_regions) def add_servers_to_region(ami_id, count, region): conn = boto.ec2.connect_to_region(region) for index in range(count): conn.run_instances(ami_id) - @mock_ec2_deprecated def test_add_servers_to_a_single_region(): region = 'ap-northeast-1' From 98264148e1041d08d80effdc9e7574dc8cf1b93f Mon Sep 17 00:00:00 2001 From: Jack Danger Canty Date: Wed, 31 May 2017 15:11:42 -0700 Subject: [PATCH 025/412] ELB connection draining timeout defaults to 300 seconds --- moto/elb/responses.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/moto/elb/responses.py b/moto/elb/responses.py index ed8d6d03a..2bc76385f 100644 --- a/moto/elb/responses.py +++ b/moto/elb/responses.py @@ -159,7 +159,7 @@ class ELBResponse(BaseResponse): if connection_draining: attribute = ConnectionDrainingAttribute() attribute.enabled = connection_draining["enabled"] == "true" - attribute.timeout = connection_draining["timeout"] + attribute.timeout = connection_draining.get("timeout") self.elb_backend.set_connection_draining_attribute( load_balancer_name, attribute) From b0c83c4e70999c9230b2d1fc922e6aae4ea5ebc8 Mon Sep 17 00:00:00 2001 From: Jack Danger Canty Date: Wed, 31 May 2017 15:53:31 -0700 Subject: [PATCH 026/412] Testing ELB connection draining timeouts --- moto/elb/responses.py | 17 ++++++++++------- tests/test_elb/test_elb.py | 36 ++++++++++++++++++++++++++++++++++++ 2 files changed, 46 insertions(+), 7 deletions(-) diff --git a/moto/elb/responses.py b/moto/elb/responses.py index 2bc76385f..ec20486f0 100644 --- a/moto/elb/responses.py +++ b/moto/elb/responses.py @@ -159,9 +159,8 @@ class ELBResponse(BaseResponse): if connection_draining: attribute = ConnectionDrainingAttribute() attribute.enabled = connection_draining["enabled"] == "true" - attribute.timeout = connection_draining.get("timeout") - self.elb_backend.set_connection_draining_attribute( - load_balancer_name, attribute) + attribute.timeout = connection_draining.get("timeout", 300) + self.elb_backend.set_connection_draining_attribute(load_balancer_name, attribute) connection_settings = self._get_dict_param( "LoadBalancerAttributes.ConnectionSettings.") @@ -172,7 +171,7 @@ class ELBResponse(BaseResponse): load_balancer_name, attribute) template = self.response_template(MODIFY_ATTRIBUTES_TEMPLATE) - return template.render(attributes=load_balancer.attributes) + return template.render(load_balancer=load_balancer, attributes=load_balancer.attributes) def create_load_balancer_policy(self): load_balancer_name = self._get_param('LoadBalancerName') @@ -592,9 +591,11 @@ DESCRIBE_ATTRIBUTES_TEMPLATE = """{{ attributes.cross_zone_load_balancing.enabled }} - {{ attributes.connection_draining.enabled }} {% if attributes.connection_draining.enabled %} + true {{ attributes.connection_draining.timeout }} + {% else %} + false {% endif %} @@ -607,7 +608,7 @@ DESCRIBE_ATTRIBUTES_TEMPLATE = """ - my-loadbalancer + {{ load_balancer.name }} {{ attributes.access_log.enabled }} @@ -624,9 +625,11 @@ MODIFY_ATTRIBUTES_TEMPLATE = """ Date: Wed, 24 May 2017 09:54:00 -0300 Subject: [PATCH 028/412] extended CloudFormation models for Lambda and DynamoDB --- moto/awslambda/models.py | 79 ++++++++++++++++--- moto/cloudformation/parsing.py | 8 +- moto/cloudwatch/models.py | 21 +++++ moto/dynamodb/models.py | 22 ++++++ .../test_cloudformation_stack_crud.py | 78 ++++++++++++++++++ 5 files changed, 195 insertions(+), 13 deletions(-) diff --git a/moto/awslambda/models.py b/moto/awslambda/models.py index 1e651cb04..13d4726ac 100644 --- a/moto/awslambda/models.py +++ b/moto/awslambda/models.py @@ -4,6 +4,7 @@ import base64 import datetime import hashlib import io +import os import json import sys import zipfile @@ -16,12 +17,12 @@ except: import boto.awslambda from moto.core import BaseBackend, BaseModel from moto.s3.models import s3_backend -from moto.s3.exceptions import MissingBucket +from moto.s3.exceptions import MissingBucket, MissingKey class LambdaFunction(BaseModel): - def __init__(self, spec): + def __init__(self, spec, validate_s3=True): # required self.code = spec['Code'] self.function_name = spec['FunctionName'] @@ -58,24 +59,25 @@ class LambdaFunction(BaseModel): self.code_size = len(to_unzip_code) self.code_sha_256 = hashlib.sha256(to_unzip_code).hexdigest() else: - # validate s3 bucket + # validate s3 bucket and key + key = None try: # FIXME: does not validate bucket region key = s3_backend.get_key( self.code['S3Bucket'], self.code['S3Key']) except MissingBucket: - raise ValueError( - "InvalidParameterValueException", - "Error occurred while GetObject. S3 Error Code: NoSuchBucket. S3 Error Message: The specified bucket does not exist") - else: - # validate s3 key - if key is None: + if do_validate_s3(): + raise ValueError( + "InvalidParameterValueException", + "Error occurred while GetObject. S3 Error Code: NoSuchBucket. S3 Error Message: The specified bucket does not exist") + except MissingKey: + if do_validate_s3(): raise ValueError( "InvalidParameterValueException", "Error occurred while GetObject. S3 Error Code: NoSuchKey. S3 Error Message: The specified key does not exist.") - else: - self.code_size = key.size - self.code_sha_256 = hashlib.sha256(key.value).hexdigest() + if key: + self.code_size = key.size + self.code_sha_256 = hashlib.sha256(key.value).hexdigest() self.function_arn = 'arn:aws:lambda:123456789012:function:{0}'.format( self.function_name) @@ -209,6 +211,13 @@ class LambdaFunction(BaseModel): fn = backend.create_function(spec) return fn + def get_cfn_attribute(self, attribute_name): + from moto.cloudformation.exceptions import UnformattedGetAttTemplateException + if attribute_name == 'Arn': + region = 'us-east-1' + return 'arn:aws:lambda:{0}:123456789012:function:{1}'.format(region, self.function_name) + raise UnformattedGetAttTemplateException() + @staticmethod def _create_zipfile_from_plaintext_code(code): zip_output = io.BytesIO() @@ -219,6 +228,48 @@ class LambdaFunction(BaseModel): return zip_output.read() +class EventSourceMapping(BaseModel): + + def __init__(self, spec): + # required + self.function_name = spec['FunctionName'] + self.event_source_arn = spec['EventSourceArn'] + self.starting_position = spec['StartingPosition'] + + # optional + self.batch_size = spec.get('BatchSize', 100) + self.enabled = spec.get('Enabled', True) + self.starting_position_timestamp = spec.get('StartingPositionTimestamp', None) + + @classmethod + def create_from_cloudformation_json(cls, resource_name, cloudformation_json, region_name): + properties = cloudformation_json['Properties'] + spec = { + 'FunctionName': properties['FunctionName'], + 'EventSourceArn': properties['EventSourceArn'], + 'StartingPosition': properties['StartingPosition'] + } + optional_properties = 'BatchSize Enabled StartingPositionTimestamp'.split() + for prop in optional_properties: + if prop in properties: + spec[prop] = properties[prop] + return EventSourceMapping(spec) + + +class LambdaVersion(BaseModel): + + def __init__(self, spec): + self.version = spec['Version'] + + @classmethod + def create_from_cloudformation_json(cls, resource_name, cloudformation_json, region_name): + properties = cloudformation_json['Properties'] + spec = { + 'Version': properties.get('Version') + } + return LambdaVersion(spec) + + class LambdaBackend(BaseBackend): def __init__(self): @@ -242,6 +293,10 @@ class LambdaBackend(BaseBackend): return self._functions.values() +def do_validate_s3(): + return os.environ.get('VALIDATE_LAMBDA_S3', '') in ['', '1', 'true'] + + lambda_backends = {} for region in boto.awslambda.regions(): lambda_backends[region.name] = LambdaBackend() diff --git a/moto/cloudformation/parsing.py b/moto/cloudformation/parsing.py index 6d38289c7..1908a2a71 100644 --- a/moto/cloudformation/parsing.py +++ b/moto/cloudformation/parsing.py @@ -7,7 +7,9 @@ import warnings from moto.autoscaling import models as autoscaling_models from moto.awslambda import models as lambda_models +from moto.cloudwatch import models as cloudwatch_models from moto.datapipeline import models as datapipeline_models +from moto.dynamodb import models as dynamodb_models from moto.ec2 import models as ec2_models from moto.ecs import models as ecs_models from moto.elb import models as elb_models @@ -27,7 +29,10 @@ from boto.cloudformation.stack import Output MODEL_MAP = { "AWS::AutoScaling::AutoScalingGroup": autoscaling_models.FakeAutoScalingGroup, "AWS::AutoScaling::LaunchConfiguration": autoscaling_models.FakeLaunchConfiguration, + "AWS::DynamoDB::Table": dynamodb_models.Table, + "AWS::Lambda::EventSourceMapping": lambda_models.EventSourceMapping, "AWS::Lambda::Function": lambda_models.LambdaFunction, + "AWS::Lambda::Version": lambda_models.LambdaVersion, "AWS::EC2::EIP": ec2_models.ElasticAddress, "AWS::EC2::Instance": ec2_models.Instance, "AWS::EC2::InternetGateway": ec2_models.InternetGateway, @@ -53,6 +58,7 @@ MODEL_MAP = { "AWS::IAM::InstanceProfile": iam_models.InstanceProfile, "AWS::IAM::Role": iam_models.Role, "AWS::KMS::Key": kms_models.Key, + "AWS::Logs::LogGroup": cloudwatch_models.LogGroup, "AWS::RDS::DBInstance": rds_models.Database, "AWS::RDS::DBSecurityGroup": rds_models.SecurityGroup, "AWS::RDS::DBSubnetGroup": rds_models.SubnetGroup, @@ -133,7 +139,7 @@ def clean_json(resource_json, resources_map): try: return resource.get_cfn_attribute(resource_json['Fn::GetAtt'][1]) except NotImplementedError as n: - logger.warning(n.message.format( + logger.warning(str(n).format( resource_json['Fn::GetAtt'][0])) except UnformattedGetAttTemplateException: raise ValidationError( diff --git a/moto/cloudwatch/models.py b/moto/cloudwatch/models.py index dd97ddcbb..ed0086d93 100644 --- a/moto/cloudwatch/models.py +++ b/moto/cloudwatch/models.py @@ -111,6 +111,27 @@ class CloudWatchBackend(BaseBackend): return self.metric_data +class LogGroup(BaseModel): + + def __init__(self, spec): + # required + self.name = spec['LogGroupName'] + # optional + self.tags = spec.get('Tags', []) + + @classmethod + def create_from_cloudformation_json(cls, resource_name, cloudformation_json, region_name): + properties = cloudformation_json['Properties'] + spec = { + 'LogGroupName': properties['LogGroupName'] + } + optional_properties = 'Tags'.split() + for prop in optional_properties: + if prop in properties: + spec[prop] = properties[prop] + return LogGroup(spec) + + cloudwatch_backends = {} for region in boto.ec2.cloudwatch.regions(): cloudwatch_backends[region.name] = CloudWatchBackend() diff --git a/moto/dynamodb/models.py b/moto/dynamodb/models.py index 39bf15fca..300189a0e 100644 --- a/moto/dynamodb/models.py +++ b/moto/dynamodb/models.py @@ -137,6 +137,20 @@ class Table(BaseModel): } return results + @classmethod + def create_from_cloudformation_json(cls, resource_name, cloudformation_json, region_name): + properties = cloudformation_json['Properties'] + key_attr = [i['AttributeName'] for i in properties['KeySchema'] if i['KeyType'] == 'HASH'][0] + key_type = [i['AttributeType'] for i in properties['AttributeDefinitions'] if i['AttributeName'] == key_attr][0] + spec = { + 'name': properties['TableName'], + 'hash_key_attr': key_attr, + 'hash_key_type': key_type + } + # TODO: optional properties still missing: + # range_key_attr, range_key_type, read_capacity, write_capacity + return Table(**spec) + def __len__(self): count = 0 for key, value in self.items.items(): @@ -245,6 +259,14 @@ class Table(BaseModel): except KeyError: return None + def get_cfn_attribute(self, attribute_name): + from moto.cloudformation.exceptions import UnformattedGetAttTemplateException + if attribute_name == 'StreamArn': + region = 'us-east-1' + time = '2000-01-01T00:00:00.000' + return 'arn:aws:dynamodb:{0}:123456789012:table/{1}/stream/{2}'.format(region, self.name, time) + raise UnformattedGetAttTemplateException() + class DynamoDBBackend(BaseBackend): diff --git a/tests/test_cloudformation/test_cloudformation_stack_crud.py b/tests/test_cloudformation/test_cloudformation_stack_crud.py index eb3798f82..0e3634756 100644 --- a/tests/test_cloudformation/test_cloudformation_stack_crud.py +++ b/tests/test_cloudformation/test_cloudformation_stack_crud.py @@ -1,5 +1,6 @@ from __future__ import unicode_literals +import os import json import boto @@ -565,3 +566,80 @@ def test_describe_stack_events_shows_create_update_and_delete(): assert False, "Too many stack events" list(stack_events_to_look_for).should.be.empty + + +@mock_cloudformation_deprecated +@mock_route53_deprecated +def test_create_stack_lambda_and_dynamodb(): + conn = boto.connect_cloudformation() + dummy_template = { + "AWSTemplateFormatVersion": "2010-09-09", + "Description": "Stack Lambda Test 1", + "Parameters": { + }, + "Resources": { + "func1": { + "Type" : "AWS::Lambda::Function", + "Properties" : { + "Code": { + "S3Bucket": "bucket_123", + "S3Key": "key_123" + }, + "FunctionName": "func1", + "Handler": "handler.handler", + "Role": "role1", + "Runtime": "python2.7", + "Description": "descr", + "MemorySize": 12345, + } + }, + "func1version": { + "Type": "AWS::Lambda::LambdaVersion", + "Properties" : { + "Version": "v1.2.3" + } + }, + "tab1": { + "Type" : "AWS::DynamoDB::Table", + "Properties" : { + "TableName": "tab1", + "KeySchema": [{ + "AttributeName": "attr1", + "KeyType": "HASH" + }], + "AttributeDefinitions": [{ + "AttributeName": "attr1", + "AttributeType": "string" + }], + "ProvisionedThroughput": { + "ReadCapacityUnits": 10, + "WriteCapacityUnits": 10 + } + } + }, + "func1mapping": { + "Type": "AWS::Lambda::EventSourceMapping", + "Properties" : { + "FunctionName": "v1.2.3", + "EventSourceArn": "arn:aws:dynamodb:region:XXXXXX:table/tab1/stream/2000T00:00:00.000", + "StartingPosition": "0", + "BatchSize": 100, + "Enabled": True + } + } + }, + } + validate_s3_before = os.environ.get('VALIDATE_LAMBDA_S3', '') + try: + os.environ['VALIDATE_LAMBDA_S3'] = 'false' + conn.create_stack( + "test_stack_lambda_1", + template_body=json.dumps(dummy_template), + parameters={}.items() + ) + finally: + os.environ['VALIDATE_LAMBDA_S3'] = validate_s3_before + + stack = conn.describe_stacks()[0] + resources = stack.list_resources() + assert len(resources) == 4 From a0651ccde556a441281b1a902ec994b1faf8f1bd Mon Sep 17 00:00:00 2001 From: Jessie Nadler Date: Fri, 2 Jun 2017 16:18:52 -0400 Subject: [PATCH 029/412] Add exports to CloudFormationBackend --- moto/cloudformation/models.py | 10 +++++++++- moto/cloudformation/parsing.py | 32 +++++++++++++++++++++++++++++++- 2 files changed, 40 insertions(+), 2 deletions(-) diff --git a/moto/cloudformation/models.py b/moto/cloudformation/models.py index 0dc262b2d..4a033b6d2 100644 --- a/moto/cloudformation/models.py +++ b/moto/cloudformation/models.py @@ -42,7 +42,7 @@ class FakeStack(BaseModel): return resource_map def _create_output_map(self): - output_map = OutputMap(self.resource_map, self.template_dict) + output_map = OutputMap(self.resource_map, self.template_dict, self.stack_id) output_map.create() return output_map @@ -90,6 +90,10 @@ class FakeStack(BaseModel): def stack_outputs(self): return self.output_map.values() + @property + def exports(self): + return self.output_map.exports + def update(self, template, role_arn=None, parameters=None, tags=None): self._add_stack_event("UPDATE_IN_PROGRESS", resource_status_reason="User Initiated") self.template = template @@ -131,6 +135,7 @@ class CloudFormationBackend(BaseBackend): def __init__(self): self.stacks = OrderedDict() self.deleted_stacks = {} + self.exports = OrderedDict() def create_stack(self, name, template, parameters, region_name, notification_arns=None, tags=None, role_arn=None): stack_id = generate_stack_id(name) @@ -145,6 +150,8 @@ class CloudFormationBackend(BaseBackend): role_arn=role_arn, ) self.stacks[stack_id] = new_stack + for export in new_stack.exports: + self.exports[export.name] = export return new_stack def describe_stacks(self, name_or_stack_id): @@ -191,6 +198,7 @@ class CloudFormationBackend(BaseBackend): stack = self.stacks.pop(name_or_stack_id, None) stack.delete() self.deleted_stacks[stack.stack_id] = stack + [self.exports.pop(export.name) for export in stack.exports] return self.stacks.pop(name_or_stack_id, None) else: # Delete by stack name diff --git a/moto/cloudformation/parsing.py b/moto/cloudformation/parsing.py index 6d38289c7..248ecc57a 100644 --- a/moto/cloudformation/parsing.py +++ b/moto/cloudformation/parsing.py @@ -454,8 +454,9 @@ class ResourceMap(collections.Mapping): class OutputMap(collections.Mapping): - def __init__(self, resources, template): + def __init__(self, resources, template, stack_id): self._template = template + self._stack_id = stack_id self._output_json_map = template.get('Outputs') # Create the default resources @@ -484,6 +485,35 @@ class OutputMap(collections.Mapping): def outputs(self): return self._output_json_map.keys() if self._output_json_map else [] + @property + def exports(self): + exports = [] + if self.outputs: + for key, value in self._output_json_map.iteritems(): + if value.get('Export'): + exports.append(Export(self._stack_id, value['Export'].get('Name'), value.get('Value'))) + return exports + def create(self): for output in self.outputs: self[output] + + +class Export(object): + + def __init__(self, exporting_stack_id, name, value): + self._exporting_stack_id = exporting_stack_id + self._name = name + self._value = value + + @property + def exporting_stack_id(self): + return self._exporting_stack_id + + @property + def name(self): + return self._name + + @property + def value(self): + return self._value From 5eb866146a71455ac94bfe7750a1d88503620790 Mon Sep 17 00:00:00 2001 From: Peter Gorniak Date: Fri, 2 Jun 2017 13:19:45 -0700 Subject: [PATCH 030/412] add assert to catch odd numbers in operator/value parsing --- moto/dynamodb2/models.py | 1 + 1 file changed, 1 insertion(+) diff --git a/moto/dynamodb2/models.py b/moto/dynamodb2/models.py index 4bca83582..1a609bebb 100644 --- a/moto/dynamodb2/models.py +++ b/moto/dynamodb2/models.py @@ -112,6 +112,7 @@ class Item(object): def update(self, update_expression, expression_attribute_names, expression_attribute_values): parts = [p for p in re.split(r'\b(SET|REMOVE|ADD|DELETE)\b', update_expression) if p] + assert len(parts) % 2 == 0, "Mismatched operators and values in update expression: '{}'".format(update_expression) for action, valstr in zip(parts[:-1:2], parts[1::2]): values = valstr.split(',') for value in values: From de9ea10eb1faabe1e724b86af719ca6ddef7bfa5 Mon Sep 17 00:00:00 2001 From: Jessie Nadler Date: Fri, 2 Jun 2017 16:22:48 -0400 Subject: [PATCH 031/412] Add list_exports to CloudFormationResponse --- moto/cloudformation/models.py | 11 +++ moto/cloudformation/responses.py | 26 +++++++ .../test_cloudformation_stack_crud_boto3.py | 76 +++++++++++++++++++ 3 files changed, 113 insertions(+) diff --git a/moto/cloudformation/models.py b/moto/cloudformation/models.py index 4a033b6d2..2b3dfee47 100644 --- a/moto/cloudformation/models.py +++ b/moto/cloudformation/models.py @@ -206,6 +206,17 @@ class CloudFormationBackend(BaseBackend): if stack.name == name_or_stack_id: self.delete_stack(stack.stack_id) + def list_exports(self, token): + all_exports = [x for x in self.exports.values()] + if token is None: + exports = all_exports[0:100] + next_token = '100' if len(all_exports) > 100 else None + else: + token = int(token) + exports = all_exports[token:token + 100] + next_token = str(token + 100) if len(all_exports) > token + 100 else None + return exports, next_token + cloudformation_backends = {} for region in boto.cloudformation.regions(): diff --git a/moto/cloudformation/responses.py b/moto/cloudformation/responses.py index 60f647efa..d66a172a8 100644 --- a/moto/cloudformation/responses.py +++ b/moto/cloudformation/responses.py @@ -210,6 +210,12 @@ class CloudFormationResponse(BaseResponse): template = self.response_template(DELETE_STACK_RESPONSE_TEMPLATE) return template.render() + def list_exports(self): + token = self._get_param('NextToken') + exports, next_token = self.cloudformation_backend.list_exports(token=token) + template = self.response_template(LIST_EXPORTS_RESPONSE) + return template.render(exports=exports, next_token=next_token) + CREATE_STACK_RESPONSE_TEMPLATE = """ @@ -410,3 +416,23 @@ DELETE_STACK_RESPONSE_TEMPLATE = """ """ + +LIST_EXPORTS_RESPONSE = """ + + + {% for export in exports %} + + {{ export.exporting_stack_id }} + {{ export.name }} + {{ export.value }} + + {% endfor %} + + {% if next_token %} + {{ next_token }} + {% endif %} + + + 5ccc7dcd-744c-11e5-be70-example + +""" diff --git a/tests/test_cloudformation/test_cloudformation_stack_crud_boto3.py b/tests/test_cloudformation/test_cloudformation_stack_crud_boto3.py index 85815e9f8..8b4d72ad3 100644 --- a/tests/test_cloudformation/test_cloudformation_stack_crud_boto3.py +++ b/tests/test_cloudformation/test_cloudformation_stack_crud_boto3.py @@ -57,8 +57,31 @@ dummy_update_template = { } } +dummy_output_template = { + "AWSTemplateFormatVersion": "2010-09-09", + "Description": "Stack 1", + "Resources": { + "Instance": { + "Type": "AWS::EC2::Instance", + "Properties": { + "ImageId": "ami-08111162" + } + } + }, + "Outputs" : { + "StackVPC" : { + "Description" : "The ID of the VPC", + "Value" : "VPCID", + "Export" : { + "Name" : "My VPC ID" + } + } + } +} + dummy_template_json = json.dumps(dummy_template) dummy_update_template_json = json.dumps(dummy_template) +dummy_output_template_json = json.dumps(dummy_output_template) @mock_cloudformation @@ -408,3 +431,56 @@ def test_stack_events(): assert False, "Too many stack events" list(stack_events_to_look_for).should.be.empty + + +@mock_cloudformation +def test_list_exports(): + cf_client = boto3.client('cloudformation', region_name='us-east-1') + cf_resource = boto3.resource('cloudformation', region_name='us-east-1') + stack = cf_resource.create_stack( + StackName="test_stack", + TemplateBody=dummy_output_template_json, + ) + output_value = 'VPCID' + exports = cf_client.list_exports()['Exports'] + + stack.outputs.should.have.length_of(1) + stack.outputs[0]['OutputValue'].should.equal(output_value) + + exports.should.have.length_of(1) + exports[0]['ExportingStackId'].should.equal(stack.stack_id) + exports[0]['Name'].should.equal('My VPC ID') + exports[0]['Value'].should.equal(output_value) + + +@mock_cloudformation +def test_list_exports_with_token(): + cf = boto3.client('cloudformation', region_name='us-east-1') + for i in range(101): + cf.create_stack( + StackName="test_stack", + TemplateBody=dummy_output_template_json, + ) + exports = cf.list_exports() + exports['Exports'].should.have.length_of(100) + exports.get('NextToken').should_not.be.none + + more_exports = cf.list_exports(NextToken=exports['NextToken']) + more_exports['Exports'].should.have.length_of(1) + more_exports.get('NextToken').should.be.none + + +@mock_cloudformation +def test_delete_stack_with_export(): + cf = boto3.client('cloudformation', region_name='us-east-1') + stack = cf.create_stack( + StackName="test_stack", + TemplateBody=dummy_output_template_json, + ) + + stack_id = stack['StackId'] + exports = cf.list_exports()['Exports'] + exports.should.have.length_of(1) + + cf.delete_stack(StackName=stack_id) + cf.list_exports()['Exports'].should.have.length_of(0) From c6603c6248d6690e080b6b076e330c6d818d48a9 Mon Sep 17 00:00:00 2001 From: Jessie Nadler Date: Fri, 2 Jun 2017 16:23:42 -0400 Subject: [PATCH 032/412] Validate export names are unique --- moto/cloudformation/models.py | 7 +++++++ .../test_cloudformation_stack_crud_boto3.py | 19 ++++++++++++++++++- 2 files changed, 25 insertions(+), 1 deletion(-) diff --git a/moto/cloudformation/models.py b/moto/cloudformation/models.py index 2b3dfee47..00cbf781d 100644 --- a/moto/cloudformation/models.py +++ b/moto/cloudformation/models.py @@ -150,6 +150,7 @@ class CloudFormationBackend(BaseBackend): role_arn=role_arn, ) self.stacks[stack_id] = new_stack + self._validate_export_uniqueness(new_stack) for export in new_stack.exports: self.exports[export.name] = export return new_stack @@ -217,6 +218,12 @@ class CloudFormationBackend(BaseBackend): next_token = str(token + 100) if len(all_exports) > token + 100 else None return exports, next_token + def _validate_export_uniqueness(self, stack): + new_stack_export_names = [x.name for x in stack.exports] + export_names = self.exports.keys() + if not set(export_names).isdisjoint(new_stack_export_names): + raise ValidationError(stack.stack_id, message='Export names must be unique across a given region') + cloudformation_backends = {} for region in boto.cloudformation.regions(): diff --git a/tests/test_cloudformation/test_cloudformation_stack_crud_boto3.py b/tests/test_cloudformation/test_cloudformation_stack_crud_boto3.py index 8b4d72ad3..ba324985f 100644 --- a/tests/test_cloudformation/test_cloudformation_stack_crud_boto3.py +++ b/tests/test_cloudformation/test_cloudformation_stack_crud_boto3.py @@ -12,6 +12,7 @@ import sure # noqa # Ensure 'assert_raises' context manager support for Python 2.6 import tests.backport_assert_raises # noqa from nose.tools import assert_raises +import random dummy_template = { "AWSTemplateFormatVersion": "2010-09-09", @@ -457,9 +458,11 @@ def test_list_exports(): def test_list_exports_with_token(): cf = boto3.client('cloudformation', region_name='us-east-1') for i in range(101): + # Add index to ensure name is unique + dummy_output_template['Outputs']['StackVPC']['Export']['Name'] += str(i) cf.create_stack( StackName="test_stack", - TemplateBody=dummy_output_template_json, + TemplateBody=json.dumps(dummy_output_template), ) exports = cf.list_exports() exports['Exports'].should.have.length_of(100) @@ -484,3 +487,17 @@ def test_delete_stack_with_export(): cf.delete_stack(StackName=stack_id) cf.list_exports()['Exports'].should.have.length_of(0) + + +@mock_cloudformation +def test_export_names_must_be_unique(): + cf = boto3.resource('cloudformation', region_name='us-east-1') + first_stack = cf.create_stack( + StackName="test_stack", + TemplateBody=dummy_output_template_json, + ) + with assert_raises(ClientError): + cf.create_stack( + StackName="test_stack", + TemplateBody=dummy_output_template_json, + ) From b713eef491b84f440a85de25b10d6304874f817f Mon Sep 17 00:00:00 2001 From: Peter Gorniak Date: Fri, 2 Jun 2017 13:41:33 -0700 Subject: [PATCH 033/412] cleanup after merge --- moto/dynamodb2/responses.py | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/moto/dynamodb2/responses.py b/moto/dynamodb2/responses.py index 39b67240c..d3fa68b7b 100644 --- a/moto/dynamodb2/responses.py +++ b/moto/dynamodb2/responses.py @@ -316,16 +316,18 @@ class DynamoHandler(BaseResponse): else: index = table.schema - reverse_attribute_lookup = dict((v, k) for k, v in - six.iteritems(self.body['ExpressionAttributeNames'])) + reverse_attribute_lookup = dict((v, k) for k, v in + six.iteritems(self.body['ExpressionAttributeNames'])) if " AND " in key_condition_expression: expressions = key_condition_expression.split(" AND ", 1) index_hash_key = [key for key in index if key['KeyType'] == 'HASH'][0] - hash_key_var = reverse_attribute_lookup.get(index_hash_key['AttributeName'], index_hash_key['AttributeName']) + hash_key_var = reverse_attribute_lookup.get(index_hash_key['AttributeName'], + index_hash_key['AttributeName']) hash_key_regex = r'(^|[\s(]){0}\b'.format(hash_key_var) - i, hash_key_expression = next((i, e) for i, e in enumerate(expressions) if re.search(hash_key_regex, e)) + i, hash_key_expression = next((i, e) for i, e in enumerate(expressions) + if re.search(hash_key_regex, e)) hash_key_expression = hash_key_expression.strip('()') expressions.pop(i) From 87752457a38cbfa78fd85c526dded8460f96d309 Mon Sep 17 00:00:00 2001 From: Jessie Nadler Date: Fri, 2 Jun 2017 16:57:16 -0400 Subject: [PATCH 034/412] Remove useless list comprehension --- moto/cloudformation/models.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/moto/cloudformation/models.py b/moto/cloudformation/models.py index 00cbf781d..6557af9dc 100644 --- a/moto/cloudformation/models.py +++ b/moto/cloudformation/models.py @@ -208,7 +208,7 @@ class CloudFormationBackend(BaseBackend): self.delete_stack(stack.stack_id) def list_exports(self, token): - all_exports = [x for x in self.exports.values()] + all_exports = self.exports.values() if token is None: exports = all_exports[0:100] next_token = '100' if len(all_exports) > 100 else None From c0afcfade5cf3d578a598b279339f8cbc4c53c3d Mon Sep 17 00:00:00 2001 From: Jessie Nadler Date: Fri, 2 Jun 2017 17:03:16 -0400 Subject: [PATCH 035/412] Use .items() not .iteritems() --- moto/cloudformation/parsing.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/moto/cloudformation/parsing.py b/moto/cloudformation/parsing.py index 248ecc57a..2a00984e4 100644 --- a/moto/cloudformation/parsing.py +++ b/moto/cloudformation/parsing.py @@ -489,7 +489,7 @@ class OutputMap(collections.Mapping): def exports(self): exports = [] if self.outputs: - for key, value in self._output_json_map.iteritems(): + for key, value in self._output_json_map.items(): if value.get('Export'): exports.append(Export(self._stack_id, value['Export'].get('Name'), value.get('Value'))) return exports From 9d37992c64efb56b09c413edbc990825713fa09c Mon Sep 17 00:00:00 2001 From: Jessie Nadler Date: Fri, 2 Jun 2017 17:16:25 -0400 Subject: [PATCH 036/412] Make all_exports subscriptable --- moto/cloudformation/models.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/moto/cloudformation/models.py b/moto/cloudformation/models.py index 6557af9dc..c25103a4c 100644 --- a/moto/cloudformation/models.py +++ b/moto/cloudformation/models.py @@ -208,7 +208,7 @@ class CloudFormationBackend(BaseBackend): self.delete_stack(stack.stack_id) def list_exports(self, token): - all_exports = self.exports.values() + all_exports = list(self.exports.values()) if token is None: exports = all_exports[0:100] next_token = '100' if len(all_exports) > 100 else None From 49c947ece753e6e501f48d83b196f2aa9cd0db5f Mon Sep 17 00:00:00 2001 From: Steve Pulec Date: Sat, 3 Jun 2017 19:06:49 -0400 Subject: [PATCH 037/412] Stop autodecoding content so we can mimic requests. Closes #963. --- moto/packages/responses/responses.py | 2 ++ tests/test_s3/test_s3.py | 29 ++++++++++++++++++++++++++++ 2 files changed, 31 insertions(+) diff --git a/moto/packages/responses/responses.py b/moto/packages/responses/responses.py index 1f5892b25..0226a7fb1 100644 --- a/moto/packages/responses/responses.py +++ b/moto/packages/responses/responses.py @@ -270,6 +270,8 @@ class RequestsMock(object): body=body, headers=headers, preload_content=False, + # Need to not decode_content to mimic requests + decode_content=False, ) response = adapter.build_response(request, response) diff --git a/tests/test_s3/test_s3.py b/tests/test_s3/test_s3.py index a4b8719f6..5c830a905 100644 --- a/tests/test_s3/test_s3.py +++ b/tests/test_s3/test_s3.py @@ -6,7 +6,9 @@ import datetime from six.moves.urllib.request import urlopen from six.moves.urllib.error import HTTPError from functools import wraps +from gzip import GzipFile from io import BytesIO +import zlib import json import boto @@ -1405,6 +1407,33 @@ def test_boto3_delete_markers(): ) +@mock_s3 +def test_get_stream_gzipped(): + payload = "this is some stuff here" + + s3_client = boto3.client("s3", region_name='us-east-1') + s3_client.create_bucket(Bucket='moto-tests') + buffer_ = BytesIO() + with GzipFile(fileobj=buffer_, mode='w') as f: + f.write(payload) + payload_gz = buffer_.getvalue() + + s3_client.put_object( + Bucket='moto-tests', + Key='keyname', + Body=payload_gz, + ContentEncoding='gzip', + ) + + obj = s3_client.get_object( + Bucket='moto-tests', + Key='keyname', + ) + res = zlib.decompress(obj['Body'].read(), 16+zlib.MAX_WBITS) + assert res == payload + + + TEST_XML = """\ From 113bfcb4eacaa3346e28f1e5103928aaa4c47c83 Mon Sep 17 00:00:00 2001 From: Steve Pulec Date: Sat, 3 Jun 2017 19:29:59 -0400 Subject: [PATCH 038/412] Fix duplicate bucket creation with LocationConstraint. Closes #970. --- moto/s3/responses.py | 6 ++++++ tests/test_s3/test_s3.py | 16 ++++++++++++++++ 2 files changed, 22 insertions(+) diff --git a/moto/s3/responses.py b/moto/s3/responses.py index 115fe98ae..3b349d864 100644 --- a/moto/s3/responses.py +++ b/moto/s3/responses.py @@ -336,6 +336,12 @@ class ResponseObject(_TemplateEnvironmentMixin): self.backend.set_bucket_website_configuration(bucket_name, body) return "" else: + if body: + try: + region_name = xmltodict.parse(body)['CreateBucketConfiguration']['LocationConstraint'] + except KeyError: + pass + try: new_bucket = self.backend.create_bucket( bucket_name, region_name) diff --git a/tests/test_s3/test_s3.py b/tests/test_s3/test_s3.py index 5c830a905..8841f9f71 100644 --- a/tests/test_s3/test_s3.py +++ b/tests/test_s3/test_s3.py @@ -1207,6 +1207,22 @@ def test_boto3_bucket_create(): "utf-8").should.equal("some text") +@mock_s3 +def test_bucket_create_duplicate(): + s3 = boto3.resource('s3', region_name='us-west-2') + s3.create_bucket(Bucket="blah", CreateBucketConfiguration={ + 'LocationConstraint': 'us-west-2', + }) + with assert_raises(ClientError) as exc: + s3.create_bucket( + Bucket="blah", + CreateBucketConfiguration={ + 'LocationConstraint': 'us-west-2', + } + ) + exc.exception.response['Error']['Code'].should.equal('BucketAlreadyExists') + + @mock_s3 def test_boto3_bucket_create_eu_central(): s3 = boto3.resource('s3', region_name='eu-central-1') From a956c3a85cea2c2c890ae89b95eb4e1b76b9d16a Mon Sep 17 00:00:00 2001 From: Steve Pulec Date: Sat, 3 Jun 2017 19:35:23 -0400 Subject: [PATCH 039/412] Fix tests for py3. --- tests/test_s3/test_s3.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/test_s3/test_s3.py b/tests/test_s3/test_s3.py index 8841f9f71..1cb00d4be 100644 --- a/tests/test_s3/test_s3.py +++ b/tests/test_s3/test_s3.py @@ -1425,7 +1425,7 @@ def test_boto3_delete_markers(): @mock_s3 def test_get_stream_gzipped(): - payload = "this is some stuff here" + payload = b"this is some stuff here" s3_client = boto3.client("s3", region_name='us-east-1') s3_client.create_bucket(Bucket='moto-tests') From 94ec799d8ac2c068af061143afa28c462fc55beb Mon Sep 17 00:00:00 2001 From: Steve Pulec Date: Sat, 3 Jun 2017 20:12:47 -0400 Subject: [PATCH 040/412] Update changelog. --- CHANGELOG.md | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index cb13a0a04..e0ec033f3 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -4,6 +4,13 @@ Moto Changelog Latest ------ +1.0.1 +----- + + * Add Cloudformation exports + * Add ECR + * IAM policy versions + 1.0.0 ----- From 856de724d0678e04861a9fae186782f5ee2f2e75 Mon Sep 17 00:00:00 2001 From: Steve Pulec Date: Sat, 3 Jun 2017 20:13:03 -0400 Subject: [PATCH 041/412] 1.0.1 --- moto/__init__.py | 2 +- setup.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/moto/__init__.py b/moto/__init__.py index d6f84db5e..304e25cc5 100644 --- a/moto/__init__.py +++ b/moto/__init__.py @@ -3,7 +3,7 @@ import logging # logging.getLogger('boto').setLevel(logging.CRITICAL) __title__ = 'moto' -__version__ = '1.0.0' +__version__ = '1.0.1' from .apigateway import mock_apigateway, mock_apigateway_deprecated # flake8: noqa from .autoscaling import mock_autoscaling, mock_autoscaling_deprecated # flake8: noqa diff --git a/setup.py b/setup.py index 9b23c602d..289c1684c 100644 --- a/setup.py +++ b/setup.py @@ -24,7 +24,7 @@ extras_require = { setup( name='moto', - version='1.0.0', + version='1.0.1', description='A library that allows your python tests to easily' ' mock out the boto library', author='Steve Pulec', From 1abd880ab8d8db4eb04f12242e98cb4dbd863454 Mon Sep 17 00:00:00 2001 From: Giacomo Tagliabue Date: Tue, 6 Jun 2017 22:26:18 -0400 Subject: [PATCH 042/412] add pass_through option to responses --- moto/packages/responses/responses.py | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/moto/packages/responses/responses.py b/moto/packages/responses/responses.py index 0226a7fb1..3bc437f0b 100644 --- a/moto/packages/responses/responses.py +++ b/moto/packages/responses/responses.py @@ -10,6 +10,7 @@ import six from collections import namedtuple, Sequence, Sized from functools import update_wrapper from cookies import Cookies +from requests.adapters import HTTPAdapter from requests.utils import cookiejar_from_dict from requests.exceptions import ConnectionError from requests.sessions import REDIRECT_STATI @@ -120,10 +121,12 @@ class RequestsMock(object): POST = 'POST' PUT = 'PUT' - def __init__(self, assert_all_requests_are_fired=True): + def __init__(self, assert_all_requests_are_fired=True, pass_through=True): self._calls = CallList() self.reset() self.assert_all_requests_are_fired = assert_all_requests_are_fired + self.pass_through = pass_through + self.original_send = HTTPAdapter.send def reset(self): self._urls = [] @@ -235,6 +238,9 @@ class RequestsMock(object): match = self._find_match(request) # TODO(dcramer): find the correct class for this if match is None: + if self.pass_through: + return self.original_send(adapter, request, **kwargs) + error_msg = 'Connection refused: {0} {1}'.format(request.method, request.url) response = ConnectionError(error_msg) @@ -317,7 +323,7 @@ class RequestsMock(object): # expose default mock namespace -mock = _default_mock = RequestsMock(assert_all_requests_are_fired=False) +mock = _default_mock = RequestsMock(assert_all_requests_are_fired=False, pass_through=False) __all__ = [] for __attr in (a for a in dir(_default_mock) if not a.startswith('_')): __all__.append(__attr) From a1549b04b43be71c07b3b391ac5b391b40163d8c Mon Sep 17 00:00:00 2001 From: Jessie Nadler Date: Thu, 8 Jun 2017 11:38:29 -0400 Subject: [PATCH 043/412] Add Fn::Split and Fn::Select support --- moto/cloudformation/parsing.py | 9 ++++++ .../test_cloudformation/test_stack_parsing.py | 28 +++++++++++++++++++ 2 files changed, 37 insertions(+) diff --git a/moto/cloudformation/parsing.py b/moto/cloudformation/parsing.py index eee6aa8e7..09b4530af 100644 --- a/moto/cloudformation/parsing.py +++ b/moto/cloudformation/parsing.py @@ -162,6 +162,15 @@ def clean_json(resource_json, resources_map): if cleaned_val else '{0}'.format(val)) return resource_json['Fn::Join'][0].join(join_list) + if 'Fn::Split' in resource_json: + to_split = clean_json(resource_json['Fn::Split'][1], resources_map) + return to_split.split(resource_json['Fn::Split'][0]) + + if 'Fn::Select' in resource_json: + select_index = int(resource_json['Fn::Select'][0]) + select_list = clean_json(resource_json['Fn::Select'][1], resources_map) + return select_list[select_index] + cleaned_json = {} for key, value in resource_json.items(): cleaned_val = clean_json(value, resources_map) diff --git a/tests/test_cloudformation/test_stack_parsing.py b/tests/test_cloudformation/test_stack_parsing.py index 610b02325..7b582b9b5 100644 --- a/tests/test_cloudformation/test_stack_parsing.py +++ b/tests/test_cloudformation/test_stack_parsing.py @@ -72,6 +72,19 @@ get_attribute_output = { } } +split_select_template = { + "AWSTemplateFormatVersion": "2010-09-09", + "Resources": { + "Queue": { + "Type": "AWS::SQS::Queue", + "Properties": { + "QueueName": {"Fn::Select": [ "1", {"Fn::Split": [ "-", "123-myqueue" ] } ] }, + "VisibilityTimeout": 60, + } + } + } +} + outputs_template = dict(list(dummy_template.items()) + list(output_dict.items())) bad_outputs_template = dict( @@ -85,6 +98,7 @@ output_type_template_json = json.dumps(outputs_template) bad_output_template_json = json.dumps(bad_outputs_template) get_attribute_outputs_template_json = json.dumps( get_attribute_outputs_template) +split_select_template_json = json.dumps(split_select_template) def test_parse_stack_resources(): @@ -266,3 +280,17 @@ def test_reference_other_conditions(): resources_map={}, condition_map={"OtherCondition": True}, ).should.equal(False) + + +def test_parse_split_and_select(): + stack = FakeStack( + stack_id="test_id", + name="test_stack", + template=split_select_template_json, + parameters={}, + region_name='us-west-1') + + stack.resource_map.should.have.length_of(1) + queue = stack.resource_map['Queue'] + queue.name.should.equal("myqueue") + From 711dbaf4fdc34017d5147988bece77ac8b2e68ec Mon Sep 17 00:00:00 2001 From: Jessie Nadler Date: Thu, 8 Jun 2017 13:30:17 -0400 Subject: [PATCH 044/412] Simplify Fn::Join parsing --- moto/cloudformation/parsing.py | 8 ++------ 1 file changed, 2 insertions(+), 6 deletions(-) diff --git a/moto/cloudformation/parsing.py b/moto/cloudformation/parsing.py index 09b4530af..71a60371a 100644 --- a/moto/cloudformation/parsing.py +++ b/moto/cloudformation/parsing.py @@ -155,12 +155,8 @@ def clean_json(resource_json, resources_map): return clean_json(false_value, resources_map) if 'Fn::Join' in resource_json: - join_list = [] - for val in resource_json['Fn::Join'][1]: - cleaned_val = clean_json(val, resources_map) - join_list.append('{0}'.format(cleaned_val) - if cleaned_val else '{0}'.format(val)) - return resource_json['Fn::Join'][0].join(join_list) + join_list = clean_json(resource_json['Fn::Join'][1], resources_map) + return resource_json['Fn::Join'][0].join([str(x) for x in join_list]) if 'Fn::Split' in resource_json: to_split = clean_json(resource_json['Fn::Split'][1], resources_map) From d3faaad46b655ce50048d0d0819d618886804a43 Mon Sep 17 00:00:00 2001 From: Jessie Nadler Date: Thu, 8 Jun 2017 15:21:32 -0400 Subject: [PATCH 045/412] Add Fn::Sub support --- moto/cloudformation/parsing.py | 20 +++++++++++ .../test_cloudformation/test_stack_parsing.py | 33 +++++++++++++++++++ 2 files changed, 53 insertions(+) diff --git a/moto/cloudformation/parsing.py b/moto/cloudformation/parsing.py index 71a60371a..744b1d08e 100644 --- a/moto/cloudformation/parsing.py +++ b/moto/cloudformation/parsing.py @@ -4,6 +4,7 @@ import functools import logging import copy import warnings +import re from moto.autoscaling import models as autoscaling_models from moto.awslambda import models as lambda_models @@ -167,6 +168,25 @@ def clean_json(resource_json, resources_map): select_list = clean_json(resource_json['Fn::Select'][1], resources_map) return select_list[select_index] + if 'Fn::Sub' in resource_json: + if isinstance(resource_json['Fn::Sub'], list): + warnings.warn( + "Tried to parse Fn::Sub with variable mapping but it's not supported by moto's CloudFormation implementation") + else: + fn_sub_value = clean_json(resource_json['Fn::Sub'], resources_map) + to_sub = re.findall('(?=\${)[^!^"]*?}', fn_sub_value) + literals = re.findall('(?=\${!)[^"]*?}', fn_sub_value) + for sub in to_sub: + if '.' in sub: + cleaned_ref = clean_json({'Fn::GetAtt': re.findall('(?<=\${)[^"]*?(?=})', sub)[0].split('.')}, resources_map) + else: + cleaned_ref = clean_json({'Ref': re.findall('(?<=\${)[^"]*?(?=})', sub)[0]}, resources_map) + fn_sub_value = fn_sub_value.replace(sub, cleaned_ref) + for literal in literals: + fn_sub_value = fn_sub_value.replace(literal, literal.replace('!', '')) + return fn_sub_value + pass + cleaned_json = {} for key, value in resource_json.items(): cleaned_val = clean_json(value, resources_map) diff --git a/tests/test_cloudformation/test_stack_parsing.py b/tests/test_cloudformation/test_stack_parsing.py index 7b582b9b5..594515468 100644 --- a/tests/test_cloudformation/test_stack_parsing.py +++ b/tests/test_cloudformation/test_stack_parsing.py @@ -85,6 +85,26 @@ split_select_template = { } } +sub_template = { + "AWSTemplateFormatVersion": "2010-09-09", + "Resources": { + "Queue1": { + "Type": "AWS::SQS::Queue", + "Properties": { + "QueueName": {"Fn::Sub": '${AWS::StackName}-queue-${!Literal}'}, + "VisibilityTimeout": 60, + } + }, + "Queue2": { + "Type": "AWS::SQS::Queue", + "Properties": { + "QueueName": {"Fn::Sub": '${Queue1.QueueName}'}, + "VisibilityTimeout": 60, + } + }, + } +} + outputs_template = dict(list(dummy_template.items()) + list(output_dict.items())) bad_outputs_template = dict( @@ -99,6 +119,7 @@ bad_output_template_json = json.dumps(bad_outputs_template) get_attribute_outputs_template_json = json.dumps( get_attribute_outputs_template) split_select_template_json = json.dumps(split_select_template) +sub_template_json = json.dumps(sub_template) def test_parse_stack_resources(): @@ -294,3 +315,15 @@ def test_parse_split_and_select(): queue = stack.resource_map['Queue'] queue.name.should.equal("myqueue") + +def test_sub(): + stack = FakeStack( + stack_id="test_id", + name="test_stack", + template=sub_template_json, + parameters={}, + region_name='us-west-1') + + queue1 = stack.resource_map['Queue1'] + queue2 = stack.resource_map['Queue2'] + queue2.name.should.equal(queue1.name) From 8e4c79625c4008ee5382d506ad80248ee647f4df Mon Sep 17 00:00:00 2001 From: Jessie Nadler Date: Thu, 8 Jun 2017 15:33:14 -0400 Subject: [PATCH 046/412] Clean Export name and value before appending to exports --- moto/cloudformation/parsing.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/moto/cloudformation/parsing.py b/moto/cloudformation/parsing.py index 744b1d08e..8877b90c7 100644 --- a/moto/cloudformation/parsing.py +++ b/moto/cloudformation/parsing.py @@ -522,7 +522,9 @@ class OutputMap(collections.Mapping): if self.outputs: for key, value in self._output_json_map.items(): if value.get('Export'): - exports.append(Export(self._stack_id, value['Export'].get('Name'), value.get('Value'))) + cleaned_name = clean_json(value['Export'].get('Name'), self._resource_map) + cleaned_value = clean_json(value.get('Value'), self._resource_map) + exports.append(Export(self._stack_id, cleaned_name, cleaned_value)) return exports def create(self): From f5106f2cc811efdb4464e1de6ff4dacae7427839 Mon Sep 17 00:00:00 2001 From: Jessie Nadler Date: Thu, 8 Jun 2017 15:33:28 -0400 Subject: [PATCH 047/412] Add Fn::ImportValue support --- moto/cloudformation/models.py | 6 +- moto/cloudformation/parsing.py | 9 ++- .../test_cloudformation_stack_crud_boto3.py | 36 +++++++++++- .../test_cloudformation/test_stack_parsing.py | 55 ++++++++++++++++++- 4 files changed, 101 insertions(+), 5 deletions(-) diff --git a/moto/cloudformation/models.py b/moto/cloudformation/models.py index c25103a4c..ec922d8f5 100644 --- a/moto/cloudformation/models.py +++ b/moto/cloudformation/models.py @@ -15,7 +15,7 @@ from .exceptions import ValidationError class FakeStack(BaseModel): - def __init__(self, stack_id, name, template, parameters, region_name, notification_arns=None, tags=None, role_arn=None): + def __init__(self, stack_id, name, template, parameters, region_name, notification_arns=None, tags=None, role_arn=None, cross_stack_resources=None): self.stack_id = stack_id self.name = name self.template = template @@ -30,6 +30,7 @@ class FakeStack(BaseModel): resource_status_reason="User Initiated") self.description = self.template_dict.get('Description') + self.cross_stack_resources = cross_stack_resources or [] self.resource_map = self._create_resource_map() self.output_map = self._create_output_map() self._add_stack_event("CREATE_COMPLETE") @@ -37,7 +38,7 @@ class FakeStack(BaseModel): def _create_resource_map(self): resource_map = ResourceMap( - self.stack_id, self.name, self.parameters, self.tags, self.region_name, self.template_dict) + self.stack_id, self.name, self.parameters, self.tags, self.region_name, self.template_dict, self.cross_stack_resources) resource_map.create() return resource_map @@ -148,6 +149,7 @@ class CloudFormationBackend(BaseBackend): notification_arns=notification_arns, tags=tags, role_arn=role_arn, + cross_stack_resources=self.exports, ) self.stacks[stack_id] = new_stack self._validate_export_uniqueness(new_stack) diff --git a/moto/cloudformation/parsing.py b/moto/cloudformation/parsing.py index 8877b90c7..928cd68e0 100644 --- a/moto/cloudformation/parsing.py +++ b/moto/cloudformation/parsing.py @@ -187,6 +187,12 @@ def clean_json(resource_json, resources_map): return fn_sub_value pass + if 'Fn::ImportValue' in resource_json: + cleaned_val = clean_json(resource_json['Fn::ImportValue'], resources_map) + values = [x.value for x in resources_map.cross_stack_resources.values() if x.name == cleaned_val] + if any(values): + return values[0] + cleaned_json = {} for key, value in resource_json.items(): cleaned_val = clean_json(value, resources_map) @@ -326,13 +332,14 @@ class ResourceMap(collections.Mapping): each resources is passed this lazy map that it can grab dependencies from. """ - def __init__(self, stack_id, stack_name, parameters, tags, region_name, template): + def __init__(self, stack_id, stack_name, parameters, tags, region_name, template, cross_stack_resources): self._template = template self._resource_json_map = template['Resources'] self._region_name = region_name self.input_parameters = parameters self.tags = copy.deepcopy(tags) self.resolved_parameters = {} + self.cross_stack_resources = cross_stack_resources # Create the default resources self._parsed_resources = { diff --git a/tests/test_cloudformation/test_cloudformation_stack_crud_boto3.py b/tests/test_cloudformation/test_cloudformation_stack_crud_boto3.py index ba324985f..e428d1f63 100644 --- a/tests/test_cloudformation/test_cloudformation_stack_crud_boto3.py +++ b/tests/test_cloudformation/test_cloudformation_stack_crud_boto3.py @@ -5,7 +5,7 @@ import boto import boto.s3 import boto.s3.key from botocore.exceptions import ClientError -from moto import mock_cloudformation, mock_s3 +from moto import mock_cloudformation, mock_s3, mock_sqs import json import sure # noqa @@ -80,9 +80,23 @@ dummy_output_template = { } } +dummy_import_template = { + "AWSTemplateFormatVersion": "2010-09-09", + "Resources": { + "Queue": { + "Type": "AWS::SQS::Queue", + "Properties": { + "QueueName": {"Fn::ImportValue": 'My VPC ID'}, + "VisibilityTimeout": 60, + } + } + } +} + dummy_template_json = json.dumps(dummy_template) dummy_update_template_json = json.dumps(dummy_template) dummy_output_template_json = json.dumps(dummy_output_template) +dummy_import_template_json = json.dumps(dummy_import_template) @mock_cloudformation @@ -501,3 +515,23 @@ def test_export_names_must_be_unique(): StackName="test_stack", TemplateBody=dummy_output_template_json, ) + +@mock_sqs +@mock_cloudformation +def test_stack_with_imports(): + cf = boto3.resource('cloudformation', region_name='us-east-1') + ec2_resource = boto3.resource('sqs', region_name='us-east-1') + + output_stack = cf.create_stack( + StackName="test_stack1", + TemplateBody=dummy_output_template_json, + ) + import_stack = cf.create_stack( + StackName="test_stack2", + TemplateBody=dummy_import_template_json + ) + + output_stack.outputs.should.have.length_of(1) + output = output_stack.outputs[0]['OutputValue'] + queue = ec2_resource.get_queue_by_name(QueueName=output) + queue.should_not.be.none diff --git a/tests/test_cloudformation/test_stack_parsing.py b/tests/test_cloudformation/test_stack_parsing.py index 594515468..ee53e9a68 100644 --- a/tests/test_cloudformation/test_stack_parsing.py +++ b/tests/test_cloudformation/test_stack_parsing.py @@ -7,7 +7,7 @@ import sure # noqa from moto.cloudformation.exceptions import ValidationError from moto.cloudformation.models import FakeStack -from moto.cloudformation.parsing import resource_class_from_type, parse_condition +from moto.cloudformation.parsing import resource_class_from_type, parse_condition, Export from moto.sqs.models import Queue from moto.s3.models import FakeBucket from boto.cloudformation.stack import Output @@ -105,6 +105,38 @@ sub_template = { } } +export_value_template = { + "AWSTemplateFormatVersion": "2010-09-09", + "Resources": { + "Queue": { + "Type": "AWS::SQS::Queue", + "Properties": { + "QueueName": {"Fn::Sub": '${AWS::StackName}-queue'}, + "VisibilityTimeout": 60, + } + } + }, + "Outputs": { + "Output1": { + "Value": "value", + "Export": {"Name": 'queue-us-west-1'} + } + } +} + +import_value_template = { + "AWSTemplateFormatVersion": "2010-09-09", + "Resources": { + "Queue": { + "Type": "AWS::SQS::Queue", + "Properties": { + "QueueName": {"Fn::ImportValue": 'queue-us-west-1'}, + "VisibilityTimeout": 60, + } + } + } +} + outputs_template = dict(list(dummy_template.items()) + list(output_dict.items())) bad_outputs_template = dict( @@ -120,6 +152,8 @@ get_attribute_outputs_template_json = json.dumps( get_attribute_outputs_template) split_select_template_json = json.dumps(split_select_template) sub_template_json = json.dumps(sub_template) +export_value_template_json = json.dumps(export_value_template) +import_value_template_json = json.dumps(import_value_template) def test_parse_stack_resources(): @@ -327,3 +361,22 @@ def test_sub(): queue1 = stack.resource_map['Queue1'] queue2 = stack.resource_map['Queue2'] queue2.name.should.equal(queue1.name) + + +def test_import(): + export_stack = FakeStack( + stack_id="test_id", + name="test_stack", + template=export_value_template_json, + parameters={}, + region_name='us-west-1') + import_stack = FakeStack( + stack_id="test_id", + name="test_stack", + template=import_value_template_json, + parameters={}, + region_name='us-west-1', + cross_stack_resources={export_stack.exports[0].value: export_stack.exports[0]}) + + queue = import_stack.resource_map['Queue'] + queue.name.should.equal("value") From d94d7f696218aca14571e289113b8bf1e7040ba1 Mon Sep 17 00:00:00 2001 From: Paul Carleton Date: Fri, 9 Jun 2017 12:27:49 -0700 Subject: [PATCH 048/412] Add propagated tags and ASG name tag to asg instances --- moto/autoscaling/models.py | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/moto/autoscaling/models.py b/moto/autoscaling/models.py index ec46d1182..18ec7e35a 100644 --- a/moto/autoscaling/models.py +++ b/moto/autoscaling/models.py @@ -9,6 +9,7 @@ from moto.elb.exceptions import LoadBalancerNotFoundError # http://docs.aws.amazon.com/AutoScaling/latest/DeveloperGuide/AS_Concepts.html#Cooldown DEFAULT_COOLDOWN = 300 +ASG_NAME_TAG = "aws:autoscaling:groupName" class InstanceState(object): @@ -169,8 +170,8 @@ class FakeAutoScalingGroup(BaseModel): self.termination_policies = termination_policies self.instance_states = [] - self.set_desired_capacity(desired_capacity) self.tags = tags if tags else [] + self.set_desired_capacity(desired_capacity) @classmethod def create_from_cloudformation_json(cls, resource_name, cloudformation_json, region_name): @@ -261,12 +262,17 @@ class FakeAutoScalingGroup(BaseModel): # Need more instances count_needed = int(self.desired_capacity) - \ int(curr_instance_count) + + propagated_tags = {t['key']: t['value'] for t in self.tags + if t['propagate_at_launch'] == 'true'} + propagated_tags[ASG_NAME_TAG] = self.name reservation = self.autoscaling_backend.ec2_backend.add_instances( self.launch_config.image_id, count_needed, self.launch_config.user_data, self.launch_config.security_groups, instance_type=self.launch_config.instance_type, + tags={'instance': propagated_tags} ) for instance in reservation.instances: instance.autoscaling_group = self From dc0edb9b8cd0bf7e924f560940f1ad2a2862a4d5 Mon Sep 17 00:00:00 2001 From: Paul Carleton Date: Fri, 9 Jun 2017 13:10:00 -0700 Subject: [PATCH 049/412] Add test for asg tags --- tests/test_autoscaling/test_autoscaling.py | 26 +++++++++++++++++++++- 1 file changed, 25 insertions(+), 1 deletion(-) diff --git a/tests/test_autoscaling/test_autoscaling.py b/tests/test_autoscaling/test_autoscaling.py index 8487ecb49..5cc697785 100644 --- a/tests/test_autoscaling/test_autoscaling.py +++ b/tests/test_autoscaling/test_autoscaling.py @@ -8,7 +8,7 @@ from boto.ec2.autoscale import Tag import boto.ec2.elb import sure # noqa -from moto import mock_autoscaling, mock_ec2_deprecated, mock_elb_deprecated, mock_autoscaling_deprecated +from moto import mock_autoscaling, mock_ec2_deprecated, mock_elb_deprecated, mock_autoscaling_deprecated, mock_ec2 from tests.helpers import requires_boto_gte @@ -138,6 +138,30 @@ def test_list_many_autoscaling_groups(): groups.should.have.length_of(51) assert 'NextToken' not in response2.keys() +@mock_autoscaling +@mock_ec2 +def test_list_many_autoscaling_groups(): + conn = boto3.client('autoscaling', region_name='us-east-1') + conn.create_launch_configuration(LaunchConfigurationName='TestLC') + + conn.create_auto_scaling_group(AutoScalingGroupName='TestGroup1', + MinSize=1, + MaxSize=2, + LaunchConfigurationName='TestLC', + Tags=[{ + "ResourceId": 'TestGroup1', + "ResourceType": "auto-scaling-group", + "PropagateAtLaunch": True, + "Key": 'TestTagKey1', + "Value": 'TestTagValue1' + }]) + + ec2 = boto3.client('ec2', region_name='us-east-1') + instances = ec2.describe_instances() + + tags = instances['Reservations'][0]['Instances'][0]['Tags'] + tags.should.contain({u'Value': 'TestTagValue1', u'Key': 'TestTagKey1'}) + tags.should.contain({u'Value': 'TestGroup1', u'Key': 'aws:autoscaling:groupName'}) @mock_autoscaling_deprecated def test_autoscaling_group_describe_filter(): From 5429f3590ec6e5fcd8b25adbcfa8534f343ae338 Mon Sep 17 00:00:00 2001 From: Paul Carleton Date: Fri, 9 Jun 2017 15:22:39 -0700 Subject: [PATCH 050/412] Fix linting problem --- moto/autoscaling/models.py | 1 + 1 file changed, 1 insertion(+) diff --git a/moto/autoscaling/models.py b/moto/autoscaling/models.py index 18ec7e35a..a2fcb2a63 100644 --- a/moto/autoscaling/models.py +++ b/moto/autoscaling/models.py @@ -11,6 +11,7 @@ DEFAULT_COOLDOWN = 300 ASG_NAME_TAG = "aws:autoscaling:groupName" + class InstanceState(object): def __init__(self, instance, lifecycle_state="InService"): From be07fbda523592c4d29b30453e1f182a9dce630a Mon Sep 17 00:00:00 2001 From: Greg Sterin Date: Fri, 9 Jun 2017 17:32:19 -0700 Subject: [PATCH 051/412] Support Expected in dynamoDB updateItem --- moto/dynamodb2/models.py | 31 ++++++++++- moto/dynamodb2/responses.py | 38 ++++++++++++- .../test_dynamodb_table_without_range_key.py | 55 +++++++++++++++++++ 3 files changed, 120 insertions(+), 4 deletions(-) diff --git a/moto/dynamodb2/models.py b/moto/dynamodb2/models.py index d632119d9..7525a43a9 100644 --- a/moto/dynamodb2/models.py +++ b/moto/dynamodb2/models.py @@ -634,7 +634,8 @@ class DynamoDBBackend(BaseBackend): return table.scan(scan_filters, limit, exclusive_start_key) - def update_item(self, table_name, key, update_expression, attribute_updates, expression_attribute_names, expression_attribute_values): + def update_item(self, table_name, key, update_expression, attribute_updates, expression_attribute_names, + expression_attribute_values, expected=None): table = self.get_table(table_name) if all([table.hash_key_attr in key, table.range_key_attr in key]): @@ -652,6 +653,34 @@ class DynamoDBBackend(BaseBackend): range_value = None item = table.get_item(hash_value, range_value) + + if item is None: + item_attr = {} + elif hasattr(item, 'attrs'): + item_attr = item.attrs + else: + item_attr = item + + if not expected: + expected = {} + + for key, val in expected.items(): + if 'Exists' in val and val['Exists'] is False: + if key in item_attr: + raise ValueError("The conditional request failed") + elif key not in item_attr: + raise ValueError("The conditional request failed") + elif 'Value' in val and DynamoType(val['Value']).value != item_attr[key].value: + raise ValueError("The conditional request failed") + elif 'ComparisonOperator' in val: + comparison_func = get_comparison_func( + val['ComparisonOperator']) + dynamo_types = [DynamoType(ele) for ele in val[ + "AttributeValueList"]] + for t in dynamo_types: + if not comparison_func(item_attr[key].value, t.value): + raise ValueError('The conditional request failed') + # Update does not fail on new items, so create one if item is None: data = { diff --git a/moto/dynamodb2/responses.py b/moto/dynamodb2/responses.py index aa5561f58..1d9b70043 100644 --- a/moto/dynamodb2/responses.py +++ b/moto/dynamodb2/responses.py @@ -207,7 +207,7 @@ class DynamoHandler(BaseResponse): try: result = dynamodb_backend2.put_item( name, item, expected, overwrite) - except Exception: + except ValueError: er = 'com.amazonaws.dynamodb.v20111205#ConditionalCheckFailedException' return self.error(er) @@ -474,14 +474,46 @@ class DynamoHandler(BaseResponse): 'ExpressionAttributeValues', {}) existing_item = dynamodb_backend2.get_item(name, key) + if 'Expected' in self.body: + expected = self.body['Expected'] + else: + expected = None + + # Attempt to parse simple ConditionExpressions into an Expected + # expression + if not expected: + condition_expression = self.body.get('ConditionExpression') + if condition_expression and 'OR' not in condition_expression: + cond_items = [c.strip() + for c in condition_expression.split('AND')] + + if cond_items: + expected = {} + exists_re = re.compile('^attribute_exists\((.*)\)$') + not_exists_re = re.compile( + '^attribute_not_exists\((.*)\)$') + + for cond in cond_items: + exists_m = exists_re.match(cond) + not_exists_m = not_exists_re.match(cond) + if exists_m: + expected[exists_m.group(1)] = {'Exists': True} + elif not_exists_m: + expected[not_exists_m.group(1)] = {'Exists': False} + # Support spaces between operators in an update expression # E.g. `a = b + c` -> `a=b+c` if update_expression: update_expression = re.sub( '\s*([=\+-])\s*', '\\1', update_expression) - item = dynamodb_backend2.update_item( - name, key, update_expression, attribute_updates, expression_attribute_names, expression_attribute_values) + try: + item = dynamodb_backend2.update_item( + name, key, update_expression, attribute_updates, expression_attribute_names, expression_attribute_values, + expected) + except ValueError: + er = 'com.amazonaws.dynamodb.v20111205#ConditionalCheckFailedException' + return self.error(er) item_dict = item.to_json() item_dict['ConsumedCapacityUnits'] = 0.5 diff --git a/tests/test_dynamodb2/test_dynamodb_table_without_range_key.py b/tests/test_dynamodb2/test_dynamodb_table_without_range_key.py index 4f08c5094..0e1099559 100644 --- a/tests/test_dynamodb2/test_dynamodb_table_without_range_key.py +++ b/tests/test_dynamodb2/test_dynamodb_table_without_range_key.py @@ -608,6 +608,61 @@ def test_boto3_put_item_conditions_fails(): } }).should.throw(botocore.client.ClientError) +@mock_dynamodb2 +def test_boto3_update_item_conditions_fails(): + table = _create_user_table() + table.put_item(Item={'username': 'johndoe', 'foo': 'baz'}) + table.update_item.when.called_with( + Key={'username': 'johndoe'}, + UpdateExpression='SET foo=bar', + Expected={ + 'foo': { + 'Value': 'bar', + } + }).should.throw(botocore.client.ClientError) + +@mock_dynamodb2 +def test_boto3_update_item_conditions_fails_because_expect_not_exists(): + table = _create_user_table() + table.put_item(Item={'username': 'johndoe', 'foo': 'baz'}) + table.update_item.when.called_with( + Key={'username': 'johndoe'}, + UpdateExpression='SET foo=bar', + Expected={ + 'foo': { + 'Exists': False + } + }).should.throw(botocore.client.ClientError) + +@mock_dynamodb2 +def test_boto3_update_item_conditions_pass(): + table = _create_user_table() + table.put_item(Item={'username': 'johndoe', 'foo': 'bar'}) + table.update_item( + Key={'username': 'johndoe'}, + UpdateExpression='SET foo=baz', + Expected={ + 'foo': { + 'Value': 'bar', + } + }) + returned_item = table.get_item(Key={'username': 'johndoe'}) + assert dict(returned_item)['Item']['foo'].should.equal("baz") + +@mock_dynamodb2 +def test_boto3_update_item_conditions_pass_because_expext_not_exists(): + table = _create_user_table() + table.put_item(Item={'username': 'johndoe', 'foo': 'bar'}) + table.update_item( + Key={'username': 'johndoe'}, + UpdateExpression='SET foo=baz', + Expected={ + 'whatever': { + 'Exists': False, + } + }) + returned_item = table.get_item(Key={'username': 'johndoe'}) + assert dict(returned_item)['Item']['foo'].should.equal("baz") @mock_dynamodb2 def test_boto3_put_item_conditions_pass(): From c8794e842d89991224d92a11de9d364948a5685f Mon Sep 17 00:00:00 2001 From: Jack Danger Canty Date: Mon, 12 Jun 2017 16:42:42 -0700 Subject: [PATCH 052/412] create_load_balancer requires port definitions Throw the appropriate error when defining a loadbalancer with no ports --- moto/elb/exceptions.py | 8 ++++++++ moto/elb/models.py | 7 +++++-- tests/test_elb/test_elb.py | 14 ++++++++++++++ 3 files changed, 27 insertions(+), 2 deletions(-) diff --git a/moto/elb/exceptions.py b/moto/elb/exceptions.py index 897bd6dd1..071181a6c 100644 --- a/moto/elb/exceptions.py +++ b/moto/elb/exceptions.py @@ -47,3 +47,11 @@ class DuplicateLoadBalancerName(ELBClientError): "DuplicateLoadBalancerName", "The specified load balancer name already exists for this account: {0}" .format(name)) + + +class EmptyListenersError(ELBClientError): + + def __init__(self): + super(EmptyListenersError, self).__init__( + "ValidationError", + "Listeners cannot be empty") diff --git a/moto/elb/models.py b/moto/elb/models.py index 9ca6bdb4d..5b6a58bb9 100644 --- a/moto/elb/models.py +++ b/moto/elb/models.py @@ -16,10 +16,11 @@ from moto.compat import OrderedDict from moto.core import BaseBackend, BaseModel from moto.ec2.models import ec2_backends from .exceptions import ( - LoadBalancerNotFoundError, - TooManyTagsError, BadHealthCheckDefinition, DuplicateLoadBalancerName, + EmptyListenersError, + LoadBalancerNotFoundError, + TooManyTagsError, ) @@ -239,6 +240,8 @@ class ELBBackend(BaseBackend): vpc_id = subnet.vpc_id if name in self.load_balancers: raise DuplicateLoadBalancerName(name) + if not ports: + raise EmptyListenersError() new_load_balancer = FakeLoadBalancer( name=name, zones=zones, ports=ports, scheme=scheme, subnets=subnets, vpc_id=vpc_id) self.load_balancers[name] = new_load_balancer diff --git a/tests/test_elb/test_elb.py b/tests/test_elb/test_elb.py index f413e4731..36f96c0e2 100644 --- a/tests/test_elb/test_elb.py +++ b/tests/test_elb/test_elb.py @@ -15,7 +15,9 @@ from boto.ec2.elb.policies import ( LBCookieStickinessPolicy, OtherPolicy, ) +from botocore.exceptions import ClientError from boto.exception import BotoServerError +from nose.tools import assert_raises import sure # noqa from moto import mock_elb, mock_ec2, mock_elb_deprecated, mock_ec2_deprecated @@ -109,6 +111,18 @@ def test_create_and_delete_boto3_support(): 'LoadBalancerDescriptions']).should.have.length_of(0) +@mock_elb +def test_create_load_balancer_with_no_listeners_defined(): + client = boto3.client('elb', region_name='us-east-1') + + with assert_raises(ClientError): + client.create_load_balancer( + LoadBalancerName='my-lb', + Listeners=[], + AvailabilityZones=['us-east-1a', 'us-east-1b'] + ) + + @mock_elb def test_describe_paginated_balancers(): client = boto3.client('elb', region_name='us-east-1') From 559a863d7f382d7d85e7ec213df90d60fa006ab0 Mon Sep 17 00:00:00 2001 From: Jack Danger Canty Date: Tue, 13 Jun 2017 17:09:09 -0700 Subject: [PATCH 053/412] Include db_name when describing RDS instances --- moto/rds2/models.py | 1 + tests/test_rds2/test_rds2.py | 2 ++ 2 files changed, 3 insertions(+) diff --git a/moto/rds2/models.py b/moto/rds2/models.py index eda181f40..4036cdcd1 100644 --- a/moto/rds2/models.py +++ b/moto/rds2/models.py @@ -131,6 +131,7 @@ class Database(BaseModel): template = Template(""" {{ database.backup_retention_period }} {{ database.status }} + {% if database.db_name %}{{ database.db_name }}{% endif %} {{ database.multi_az }} {{ database.db_instance_identifier }} diff --git a/tests/test_rds2/test_rds2.py b/tests/test_rds2/test_rds2.py index 7eadf2d36..81c0deee6 100644 --- a/tests/test_rds2/test_rds2.py +++ b/tests/test_rds2/test_rds2.py @@ -14,12 +14,14 @@ def test_create_database(): database = conn.create_db_instance(DBInstanceIdentifier='db-master-1', AllocatedStorage=10, Engine='postgres', + DBName='staging-postgres', DBInstanceClass='db.m1.small', MasterUsername='root', MasterUserPassword='hunter2', Port=1234, DBSecurityGroups=["my_sg"]) database['DBInstance']['DBInstanceStatus'].should.equal('available') + database['DBInstance']['DBName'].should.equal('staging-postgres') database['DBInstance']['DBInstanceIdentifier'].should.equal("db-master-1") database['DBInstance']['AllocatedStorage'].should.equal(10) database['DBInstance']['DBInstanceClass'].should.equal("db.m1.small") From a0471b04072d9538aa885c97964f22617e9ac879 Mon Sep 17 00:00:00 2001 From: Peter Gorniak Date: Thu, 15 Jun 2017 15:34:58 -0700 Subject: [PATCH 054/412] add comment about splitting update expression by operator keywords --- moto/dynamodb2/models.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/moto/dynamodb2/models.py b/moto/dynamodb2/models.py index 32dbfadbd..e6f050781 100644 --- a/moto/dynamodb2/models.py +++ b/moto/dynamodb2/models.py @@ -116,7 +116,10 @@ class Item(BaseModel): } def update(self, update_expression, expression_attribute_names, expression_attribute_values): + # Update subexpressions are identifiable by the operator keyword, so split on that and + # get rid of the empty leading string. parts = [p for p in re.split(r'\b(SET|REMOVE|ADD|DELETE)\b', update_expression) if p] + # make sure that we correctly found only operator/value pairs assert len(parts) % 2 == 0, "Mismatched operators and values in update expression: '{}'".format(update_expression) for action, valstr in zip(parts[:-1:2], parts[1::2]): values = valstr.split(',') From c118d12e6fa5e8791133c8135c73906e1164a8b4 Mon Sep 17 00:00:00 2001 From: Declan Shanaghy Date: Mon, 19 Jun 2017 18:22:33 -0700 Subject: [PATCH 055/412] Add describe_parameters support --- moto/ssm/models.py | 6 ++++++ moto/ssm/responses.py | 14 ++++++++++++++ setup.py | 2 +- 3 files changed, 21 insertions(+), 1 deletion(-) diff --git a/moto/ssm/models.py b/moto/ssm/models.py index 3344623dd..cb4d5946e 100644 --- a/moto/ssm/models.py +++ b/moto/ssm/models.py @@ -46,6 +46,12 @@ class SimpleSystemManagerBackend(BaseBackend): except KeyError: pass + def get_all_parameters(self): + result = [] + for k, _ in self._parameters.iteritems(): + result.append(self._parameters[k]) + return result + def get_parameters(self, names, with_decryption): result = [] for name in names: diff --git a/moto/ssm/responses.py b/moto/ssm/responses.py index ee21d7380..6c53bf039 100644 --- a/moto/ssm/responses.py +++ b/moto/ssm/responses.py @@ -43,6 +43,20 @@ class SimpleSystemManagerResponse(BaseResponse): return json.dumps(response) + def describe_parameters(self): + # filters = self._get_param('Filters') + result = self.ssm_backend.get_all_parameters() + + response = { + 'Parameters': [], + } + + for parameter in result: + param_data = parameter.response_object(False) + response['Parameters'].append(param_data) + + return json.dumps(response) + def put_parameter(self): name = self._get_param('Name') description = self._get_param('Description') diff --git a/setup.py b/setup.py index 289c1684c..2da16557c 100644 --- a/setup.py +++ b/setup.py @@ -24,7 +24,7 @@ extras_require = { setup( name='moto', - version='1.0.1', + version='1.0.1.1', description='A library that allows your python tests to easily' ' mock out the boto library', author='Steve Pulec', From db20dfcd82034ca49d98ed3d112bc454bbacf1d4 Mon Sep 17 00:00:00 2001 From: Declan Shanaghy Date: Tue, 20 Jun 2017 11:47:53 -0700 Subject: [PATCH 056/412] Added filtering --- moto/ssm/models.py | 5 +- moto/ssm/responses.py | 51 +++++++++++- setup.py | 2 +- tests/test_ssm/test_ssm_boto3.py | 137 +++++++++++++++++++++++++++++++ 4 files changed, 190 insertions(+), 5 deletions(-) diff --git a/moto/ssm/models.py b/moto/ssm/models.py index cb4d5946e..4efa22817 100644 --- a/moto/ssm/models.py +++ b/moto/ssm/models.py @@ -28,11 +28,14 @@ class Parameter(BaseModel): return value[len(prefix):] def response_object(self, decrypt=False): - return { + r = { 'Name': self.name, 'Type': self.type, 'Value': self.decrypt(self.value) if decrypt else self.value } + if self.keyid: + r['KeyId'] = self.keyid + return r class SimpleSystemManagerBackend(BaseBackend): diff --git a/moto/ssm/responses.py b/moto/ssm/responses.py index 6c53bf039..f4ed9561d 100644 --- a/moto/ssm/responses.py +++ b/moto/ssm/responses.py @@ -44,16 +44,61 @@ class SimpleSystemManagerResponse(BaseResponse): return json.dumps(response) def describe_parameters(self): - # filters = self._get_param('Filters') + page_size = 10 + filters = self._get_param('Filters') + token = self._get_param('NextToken') + if hasattr(token, 'strip'): + token = token.strip() + if not token: + token = '0' + + token = int(token) + + result = self.ssm_backend.get_all_parameters() response = { 'Parameters': [], } - for parameter in result: + end = token + page_size + for parameter in result[token:]: param_data = parameter.response_object(False) - response['Parameters'].append(param_data) + add = False + + if filters: + for filter in filters: + if filter['Key'] == 'Name': + k = param_data['Name'] + for v in filter['Values']: + if k.startswith(v): + add = True + break + elif filter['Key'] == 'Type': + k = param_data['Type'] + for v in filter['Values']: + if k == v: + add = True + break + elif filter['Key'] == 'KeyId': + k = param_data.get('KeyId') + if k: + for v in filter['Values']: + if k == v: + add = True + break + else: + add = True + + if add: + response['Parameters'].append(param_data) + + token = token + 1 + if len(response['Parameters']) == page_size: + response['NextToken'] = str(end) + break + + return json.dumps(response) diff --git a/setup.py b/setup.py index 2da16557c..b00567895 100644 --- a/setup.py +++ b/setup.py @@ -24,7 +24,7 @@ extras_require = { setup( name='moto', - version='1.0.1.1', + version='1.0.1.2', description='A library that allows your python tests to easily' ' mock out the boto library', author='Steve Pulec', diff --git a/tests/test_ssm/test_ssm_boto3.py b/tests/test_ssm/test_ssm_boto3.py index 6b8a1a369..8b5d1f200 100644 --- a/tests/test_ssm/test_ssm_boto3.py +++ b/tests/test_ssm/test_ssm_boto3.py @@ -47,6 +47,143 @@ def test_put_parameter(): response['Parameters'][0]['Type'].should.equal('String') +@mock_ssm +def test_describe_parameters(): + client = boto3.client('ssm', region_name='us-east-1') + + client.put_parameter( + Name='test', + Description='A test parameter', + Value='value', + Type='String') + + response = client.describe_parameters() + + len(response['Parameters']).should.equal(1) + response['Parameters'][0]['Name'].should.equal('test') + response['Parameters'][0]['Type'].should.equal('String') + + +@mock_ssm +def test_describe_parameters_paging(): + client = boto3.client('ssm', region_name='us-east-1') + + for i in range(50): + client.put_parameter( + Name="param-%d" % i, + Value="value-%d" % i, + Type="String" + ) + + response = client.describe_parameters() + len(response['Parameters']).should.equal(10) + response['NextToken'].should.equal('10') + + response = client.describe_parameters(NextToken=response['NextToken']) + len(response['Parameters']).should.equal(10) + response['NextToken'].should.equal('20') + + response = client.describe_parameters(NextToken=response['NextToken']) + len(response['Parameters']).should.equal(10) + response['NextToken'].should.equal('30') + + response = client.describe_parameters(NextToken=response['NextToken']) + len(response['Parameters']).should.equal(10) + response['NextToken'].should.equal('40') + + response = client.describe_parameters(NextToken=response['NextToken']) + len(response['Parameters']).should.equal(10) + response['NextToken'].should.equal('50') + + response = client.describe_parameters(NextToken=response['NextToken']) + len(response['Parameters']).should.equal(0) + ''.should.equal(response.get('NextToken', '')) + +@mock_ssm +def test_describe_parameters_filter_names(): + client = boto3.client('ssm', region_name='us-east-1') + + for i in range(50): + p = { + 'Name': "param-%d" % i, + 'Value': "value-%d" % i, + 'Type': "String" + } + if i % 5 == 0: + p['Type'] = 'SecureString' + p['KeyId'] = 'a key' + client.put_parameter(**p) + + + response = client.describe_parameters(Filters=[ + { + 'Key': 'Name', + 'Values': ['param-45', 'param-22'] + }, + ]) + len(response['Parameters']).should.equal(2) + response['Parameters'][0]['Name'].should.equal('param-22') + response['Parameters'][0]['Type'].should.equal('String') + response['Parameters'][1]['Name'].should.equal('param-45') + response['Parameters'][1]['Type'].should.equal('SecureString') + ''.should.equal(response.get('NextToken', '')) + +@mock_ssm +def test_describe_parameters_filter_type(): + client = boto3.client('ssm', region_name='us-east-1') + + for i in range(50): + p = { + 'Name': "param-%d" % i, + 'Value': "value-%d" % i, + 'Type': "String" + } + if i % 5 == 0: + p['Type'] = 'SecureString' + p['KeyId'] = 'a key' + client.put_parameter(**p) + + + response = client.describe_parameters(Filters=[ + { + 'Key': 'Type', + 'Values': ['SecureString'] + }, + ]) + len(response['Parameters']).should.equal(10) + response['Parameters'][0]['Name'].should.equal('param-35') + response['Parameters'][0]['Type'].should.equal('SecureString') + '10'.should.equal(response.get('NextToken', '')) + +@mock_ssm +def test_describe_parameters_filter_keyid(): + client = boto3.client('ssm', region_name='us-east-1') + + for i in range(50): + p = { + 'Name': "param-%d" % i, + 'Value': "value-%d" % i, + 'Type': "String" + } + if i % 5 == 0: + p['Type'] = 'SecureString' + p['KeyId'] = "key:%d" % i + client.put_parameter(**p) + + + response = client.describe_parameters(Filters=[ + { + 'Key': 'KeyId', + 'Values': ['key:5','key:10'] + }, + ]) + len(response['Parameters']).should.equal(2) + response['Parameters'][0]['Name'].should.equal('param-10') + response['Parameters'][0]['Type'].should.equal('SecureString') + response['Parameters'][1]['Name'].should.equal('param-5') + response['Parameters'][1]['Type'].should.equal('SecureString') + ''.should.equal(response.get('NextToken', '')) + @mock_ssm def test_put_parameter_secure_default_kms(): client = boto3.client('ssm', region_name='us-east-1') From 05ddcef2a023a88018e42249a7364a7410479dc4 Mon Sep 17 00:00:00 2001 From: Jack Danger Canty Date: Tue, 20 Jun 2017 11:58:18 -0700 Subject: [PATCH 057/412] Re-enabling tests on Python3 --- tests/test_rds/test_rds.py | 10 ------ tests/test_rds2/test_rds2.py | 63 +----------------------------------- 2 files changed, 1 insertion(+), 72 deletions(-) diff --git a/tests/test_rds/test_rds.py b/tests/test_rds/test_rds.py index 0a474ee26..5bf733dc6 100644 --- a/tests/test_rds/test_rds.py +++ b/tests/test_rds/test_rds.py @@ -10,7 +10,6 @@ from moto import mock_ec2_deprecated, mock_rds_deprecated, mock_rds from tests.helpers import disable_on_py3 -@disable_on_py3() @mock_rds_deprecated def test_create_database(): conn = boto.rds.connect_to_region("us-west-2") @@ -28,7 +27,6 @@ def test_create_database(): database.security_groups[0].name.should.equal('my_sg') -@disable_on_py3() @mock_rds_deprecated def test_get_databases(): conn = boto.rds.connect_to_region("us-west-2") @@ -46,7 +44,6 @@ def test_get_databases(): databases[0].id.should.equal("db-master-1") -@disable_on_py3() @mock_rds def test_get_databases_paginated(): conn = boto3.client('rds', region_name="us-west-2") @@ -73,7 +70,6 @@ def test_describe_non_existant_database(): "not-a-db").should.throw(BotoServerError) -@disable_on_py3() @mock_rds_deprecated def test_delete_database(): conn = boto.rds.connect_to_region("us-west-2") @@ -158,7 +154,6 @@ def test_security_group_authorize(): security_group.ip_ranges[0].cidr_ip.should.equal('10.3.2.45/32') -@disable_on_py3() @mock_rds_deprecated def test_add_security_group_to_database(): conn = boto.rds.connect_to_region("us-west-2") @@ -227,7 +222,6 @@ def test_delete_database_subnet_group(): "db_subnet1").should.throw(BotoServerError) -@disable_on_py3() @mock_ec2_deprecated @mock_rds_deprecated def test_create_database_in_subnet_group(): @@ -245,7 +239,6 @@ def test_create_database_in_subnet_group(): database.subnet_group.name.should.equal("db_subnet1") -@disable_on_py3() @mock_rds_deprecated def test_create_database_replica(): conn = boto.rds.connect_to_region("us-west-2") @@ -271,7 +264,6 @@ def test_create_database_replica(): list(primary.read_replica_dbinstance_identifiers).should.have.length_of(0) -@disable_on_py3() @mock_rds_deprecated def test_create_cross_region_database_replica(): west_1_conn = boto.rds.connect_to_region("us-west-1") @@ -299,7 +291,6 @@ def test_create_cross_region_database_replica(): list(primary.read_replica_dbinstance_identifiers).should.have.length_of(0) -@disable_on_py3() @mock_rds_deprecated def test_connecting_to_us_east_1(): # boto does not use us-east-1 in the URL for RDS, @@ -320,7 +311,6 @@ def test_connecting_to_us_east_1(): database.security_groups[0].name.should.equal('my_sg') -@disable_on_py3() @mock_rds_deprecated def test_create_database_with_iops(): conn = boto.rds.connect_to_region("us-west-2") diff --git a/tests/test_rds2/test_rds2.py b/tests/test_rds2/test_rds2.py index 81c0deee6..148b00aa1 100644 --- a/tests/test_rds2/test_rds2.py +++ b/tests/test_rds2/test_rds2.py @@ -4,10 +4,8 @@ from botocore.exceptions import ClientError, ParamValidationError import boto3 import sure # noqa from moto import mock_ec2, mock_kms, mock_rds2 -from tests.helpers import disable_on_py3 -@disable_on_py3() @mock_rds2 def test_create_database(): conn = boto3.client('rds', region_name='us-west-2') @@ -32,7 +30,6 @@ def test_create_database(): 'arn:aws:rds:us-west-2:1234567890:db:db-master-1') -@disable_on_py3() @mock_rds2 def test_get_databases(): conn = boto3.client('rds', region_name='us-west-2') @@ -67,7 +64,6 @@ def test_get_databases(): 'arn:aws:rds:us-west-2:1234567890:db:db-master-1') -@disable_on_py3() @mock_rds2 def test_get_databases_paginated(): conn = boto3.client('rds', region_name="us-west-2") @@ -86,7 +82,7 @@ def test_get_databases_paginated(): resp2 = conn.describe_db_instances(Marker=resp["Marker"]) resp2["DBInstances"].should.have.length_of(1) -@disable_on_py3() + @mock_rds2 def test_describe_non_existant_database(): conn = boto3.client('rds', region_name='us-west-2') @@ -94,7 +90,6 @@ def test_describe_non_existant_database(): DBInstanceIdentifier="not-a-db").should.throw(ClientError) -@disable_on_py3() @mock_rds2 def test_modify_db_instance(): conn = boto3.client('rds', region_name='us-west-2') @@ -115,7 +110,6 @@ def test_modify_db_instance(): instances['DBInstances'][0]['AllocatedStorage'].should.equal(20) -@disable_on_py3() @mock_rds2 def test_modify_non_existant_database(): conn = boto3.client('rds', region_name='us-west-2') @@ -124,7 +118,6 @@ def test_modify_non_existant_database(): ApplyImmediately=True).should.throw(ClientError) -@disable_on_py3() @mock_rds2 def test_reboot_db_instance(): conn = boto3.client('rds', region_name='us-west-2') @@ -140,7 +133,6 @@ def test_reboot_db_instance(): database['DBInstance']['DBInstanceIdentifier'].should.equal("db-master-1") -@disable_on_py3() @mock_rds2 def test_reboot_non_existant_database(): conn = boto3.client('rds', region_name='us-west-2') @@ -148,7 +140,6 @@ def test_reboot_non_existant_database(): DBInstanceIdentifier="not-a-db").should.throw(ClientError) -@disable_on_py3() @mock_rds2 def test_delete_database(): conn = boto3.client('rds', region_name='us-west-2') @@ -170,7 +161,6 @@ def test_delete_database(): list(instances['DBInstances']).should.have.length_of(0) -@disable_on_py3() @mock_rds2 def test_delete_non_existant_database(): conn = boto3.client('rds2', region_name="us-west-2") @@ -178,7 +168,6 @@ def test_delete_non_existant_database(): DBInstanceIdentifier="not-a-db").should.throw(ClientError) -@disable_on_py3() @mock_rds2 def test_create_option_group(): conn = boto3.client('rds', region_name='us-west-2') @@ -193,7 +182,6 @@ def test_create_option_group(): option_group['OptionGroup']['MajorEngineVersion'].should.equal('5.6') -@disable_on_py3() @mock_rds2 def test_create_option_group_bad_engine_name(): conn = boto3.client('rds', region_name='us-west-2') @@ -203,7 +191,6 @@ def test_create_option_group_bad_engine_name(): OptionGroupDescription='test invalid engine').should.throw(ClientError) -@disable_on_py3() @mock_rds2 def test_create_option_group_bad_engine_major_version(): conn = boto3.client('rds', region_name='us-west-2') @@ -213,7 +200,6 @@ def test_create_option_group_bad_engine_major_version(): OptionGroupDescription='test invalid engine version').should.throw(ClientError) -@disable_on_py3() @mock_rds2 def test_create_option_group_empty_description(): conn = boto3.client('rds', region_name='us-west-2') @@ -223,7 +209,6 @@ def test_create_option_group_empty_description(): OptionGroupDescription='').should.throw(ClientError) -@disable_on_py3() @mock_rds2 def test_create_option_group_duplicate(): conn = boto3.client('rds', region_name='us-west-2') @@ -237,7 +222,6 @@ def test_create_option_group_duplicate(): OptionGroupDescription='test option group').should.throw(ClientError) -@disable_on_py3() @mock_rds2 def test_describe_option_group(): conn = boto3.client('rds', region_name='us-west-2') @@ -250,7 +234,6 @@ def test_describe_option_group(): 'OptionGroupName'].should.equal('test') -@disable_on_py3() @mock_rds2 def test_describe_non_existant_option_group(): conn = boto3.client('rds', region_name='us-west-2') @@ -258,7 +241,6 @@ def test_describe_non_existant_option_group(): OptionGroupName="not-a-option-group").should.throw(ClientError) -@disable_on_py3() @mock_rds2 def test_delete_option_group(): conn = boto3.client('rds', region_name='us-west-2') @@ -274,7 +256,6 @@ def test_delete_option_group(): OptionGroupName='test').should.throw(ClientError) -@disable_on_py3() @mock_rds2 def test_delete_non_existant_option_group(): conn = boto3.client('rds', region_name='us-west-2') @@ -282,7 +263,6 @@ def test_delete_non_existant_option_group(): OptionGroupName='non-existant').should.throw(ClientError) -@disable_on_py3() @mock_rds2 def test_describe_option_group_options(): conn = boto3.client('rds', region_name='us-west-2') @@ -301,7 +281,6 @@ def test_describe_option_group_options(): EngineName='mysql', MajorEngineVersion='non-existent').should.throw(ClientError) -@disable_on_py3() @mock_rds2 def test_modify_option_group(): conn = boto3.client('rds', region_name='us-west-2') @@ -317,7 +296,6 @@ def test_modify_option_group(): result['OptionGroup']['OptionGroupName'].should.equal('test') -@disable_on_py3() @mock_rds2 def test_modify_option_group_no_options(): conn = boto3.client('rds', region_name='us-west-2') @@ -327,7 +305,6 @@ def test_modify_option_group_no_options(): OptionGroupName='test').should.throw(ClientError) -@disable_on_py3() @mock_rds2 def test_modify_non_existant_option_group(): conn = boto3.client('rds', region_name='us-west-2') @@ -335,7 +312,6 @@ def test_modify_non_existant_option_group(): 'OptionName', 'Port', 'DBSecurityGroupMemberships', 'VpcSecurityGroupMemberships', 'OptionSettings')]).should.throw(ParamValidationError) -@disable_on_py3() @mock_rds2 def test_delete_non_existant_database(): conn = boto3.client('rds', region_name='us-west-2') @@ -343,7 +319,6 @@ def test_delete_non_existant_database(): DBInstanceIdentifier="not-a-db").should.throw(ClientError) -@disable_on_py3() @mock_rds2 def test_list_tags_invalid_arn(): conn = boto3.client('rds', region_name='us-west-2') @@ -351,7 +326,6 @@ def test_list_tags_invalid_arn(): ResourceName='arn:aws:rds:bad-arn').should.throw(ClientError) -@disable_on_py3() @mock_rds2 def test_list_tags_db(): conn = boto3.client('rds', region_name='us-west-2') @@ -385,7 +359,6 @@ def test_list_tags_db(): 'Key': 'foo1'}]) -@disable_on_py3() @mock_rds2 def test_add_tags_db(): conn = boto3.client('rds', region_name='us-west-2') @@ -426,7 +399,6 @@ def test_add_tags_db(): list(result['TagList']).should.have.length_of(3) -@disable_on_py3() @mock_rds2 def test_remove_tags_db(): conn = boto3.client('rds', region_name='us-west-2') @@ -458,7 +430,6 @@ def test_remove_tags_db(): len(result['TagList']).should.equal(1) -@disable_on_py3() @mock_rds2 def test_add_tags_option_group(): conn = boto3.client('rds', region_name='us-west-2') @@ -484,7 +455,6 @@ def test_add_tags_option_group(): list(result['TagList']).should.have.length_of(2) -@disable_on_py3() @mock_rds2 def test_remove_tags_option_group(): conn = boto3.client('rds', region_name='us-west-2') @@ -514,7 +484,6 @@ def test_remove_tags_option_group(): list(result['TagList']).should.have.length_of(1) -@disable_on_py3() @mock_rds2 def test_create_database_security_group(): conn = boto3.client('rds', region_name='us-west-2') @@ -527,7 +496,6 @@ def test_create_database_security_group(): result['DBSecurityGroup']['IPRanges'].should.equal([]) -@disable_on_py3() @mock_rds2 def test_get_security_groups(): conn = boto3.client('rds', region_name='us-west-2') @@ -548,7 +516,6 @@ def test_get_security_groups(): result['DBSecurityGroups'][0]['DBSecurityGroupName'].should.equal("db_sg1") -@disable_on_py3() @mock_rds2 def test_get_non_existant_security_group(): conn = boto3.client('rds', region_name='us-west-2') @@ -556,7 +523,6 @@ def test_get_non_existant_security_group(): DBSecurityGroupName="not-a-sg").should.throw(ClientError) -@disable_on_py3() @mock_rds2 def test_delete_database_security_group(): conn = boto3.client('rds', region_name='us-west-2') @@ -571,7 +537,6 @@ def test_delete_database_security_group(): result['DBSecurityGroups'].should.have.length_of(0) -@disable_on_py3() @mock_rds2 def test_delete_non_existant_security_group(): conn = boto3.client('rds', region_name='us-west-2') @@ -579,7 +544,6 @@ def test_delete_non_existant_security_group(): DBSecurityGroupName="not-a-db").should.throw(ClientError) -@disable_on_py3() @mock_rds2 def test_security_group_authorize(): conn = boto3.client('rds', region_name='us-west-2') @@ -605,7 +569,6 @@ def test_security_group_authorize(): ]) -@disable_on_py3() @mock_rds2 def test_add_security_group_to_database(): conn = boto3.client('rds', region_name='us-west-2') @@ -629,7 +592,6 @@ def test_add_security_group_to_database(): 'DBSecurityGroupName'].should.equal('db_sg') -@disable_on_py3() @mock_rds2 def test_list_tags_security_group(): conn = boto3.client('rds', region_name='us-west-2') @@ -651,7 +613,6 @@ def test_list_tags_security_group(): 'Key': 'foo1'}]) -@disable_on_py3() @mock_rds2 def test_add_tags_security_group(): conn = boto3.client('rds', region_name='us-west-2') @@ -676,7 +637,6 @@ def test_add_tags_security_group(): 'Key': 'foo1'}]) -@disable_on_py3() @mock_rds2 def test_remove_tags_security_group(): conn = boto3.client('rds', region_name='us-west-2') @@ -698,7 +658,6 @@ def test_remove_tags_security_group(): result['TagList'].should.equal([{'Value': 'bar1', 'Key': 'foo1'}]) -@disable_on_py3() @mock_ec2 @mock_rds2 def test_create_database_subnet_group(): @@ -723,7 +682,6 @@ def test_create_database_subnet_group(): list(subnet_group_ids).should.equal(subnet_ids) -@disable_on_py3() @mock_ec2 @mock_rds2 def test_create_database_in_subnet_group(): @@ -749,7 +707,6 @@ def test_create_database_in_subnet_group(): 'DBSubnetGroupName'].should.equal('db_subnet1') -@disable_on_py3() @mock_ec2 @mock_rds2 def test_describe_database_subnet_group(): @@ -779,7 +736,6 @@ def test_describe_database_subnet_group(): DBSubnetGroupName="not-a-subnet").should.throw(ClientError) -@disable_on_py3() @mock_ec2 @mock_rds2 def test_delete_database_subnet_group(): @@ -806,7 +762,6 @@ def test_delete_database_subnet_group(): DBSubnetGroupName="db_subnet1").should.throw(ClientError) -@disable_on_py3() @mock_ec2 @mock_rds2 def test_list_tags_database_subnet_group(): @@ -834,7 +789,6 @@ def test_list_tags_database_subnet_group(): 'Key': 'foo1'}]) -@disable_on_py3() @mock_ec2 @mock_rds2 def test_add_tags_database_subnet_group(): @@ -866,7 +820,6 @@ def test_add_tags_database_subnet_group(): 'Key': 'foo1'}]) -@disable_on_py3() @mock_ec2 @mock_rds2 def test_remove_tags_database_subnet_group(): @@ -894,7 +847,6 @@ def test_remove_tags_database_subnet_group(): result['TagList'].should.equal([{'Value': 'bar1', 'Key': 'foo1'}]) -@disable_on_py3() @mock_rds2 def test_create_database_replica(): conn = boto3.client('rds', region_name='us-west-2') @@ -928,7 +880,6 @@ def test_create_database_replica(): 'ReadReplicaDBInstanceIdentifiers'].should.equal([]) -@disable_on_py3() @mock_rds2 @mock_kms def test_create_database_with_encrypted_storage(): @@ -954,7 +905,6 @@ def test_create_database_with_encrypted_storage(): key['KeyMetadata']['KeyId']) -@disable_on_py3() @mock_rds2 def test_create_db_parameter_group(): conn = boto3.client('rds', region_name='us-west-2') @@ -970,7 +920,6 @@ def test_create_db_parameter_group(): 'Description'].should.equal('test parameter group') -@disable_on_py3() @mock_rds2 def test_create_db_instance_with_parameter_group(): conn = boto3.client('rds', region_name='us-west-2') @@ -994,7 +943,6 @@ def test_create_db_instance_with_parameter_group(): 'ParameterApplyStatus'].should.equal('in-sync') -@disable_on_py3() @mock_rds2 def test_create_database_with_default_port(): conn = boto3.client('rds', region_name='us-west-2') @@ -1008,7 +956,6 @@ def test_create_database_with_default_port(): database['DBInstance']['Endpoint']['Port'].should.equal(5432) -@disable_on_py3() @mock_rds2 def test_modify_db_instance_with_parameter_group(): conn = boto3.client('rds', region_name='us-west-2') @@ -1042,7 +989,6 @@ def test_modify_db_instance_with_parameter_group(): 'ParameterApplyStatus'].should.equal('in-sync') -@disable_on_py3() @mock_rds2 def test_create_db_parameter_group_empty_description(): conn = boto3.client('rds', region_name='us-west-2') @@ -1051,7 +997,6 @@ def test_create_db_parameter_group_empty_description(): Description='').should.throw(ClientError) -@disable_on_py3() @mock_rds2 def test_create_db_parameter_group_duplicate(): conn = boto3.client('rds', region_name='us-west-2') @@ -1063,7 +1008,6 @@ def test_create_db_parameter_group_duplicate(): Description='test parameter group').should.throw(ClientError) -@disable_on_py3() @mock_rds2 def test_describe_db_parameter_group(): conn = boto3.client('rds', region_name='us-west-2') @@ -1076,7 +1020,6 @@ def test_describe_db_parameter_group(): 'DBParameterGroupName'].should.equal('test') -@disable_on_py3() @mock_rds2 def test_describe_non_existant_db_parameter_group(): conn = boto3.client('rds', region_name='us-west-2') @@ -1085,7 +1028,6 @@ def test_describe_non_existant_db_parameter_group(): len(db_parameter_groups['DBParameterGroups']).should.equal(0) -@disable_on_py3() @mock_rds2 def test_delete_db_parameter_group(): conn = boto3.client('rds', region_name='us-west-2') @@ -1102,7 +1044,6 @@ def test_delete_db_parameter_group(): len(db_parameter_groups['DBParameterGroups']).should.equal(0) -@disable_on_py3() @mock_rds2 def test_modify_db_parameter_group(): conn = boto3.client('rds', region_name='us-west-2') @@ -1128,7 +1069,6 @@ def test_modify_db_parameter_group(): db_parameters['Parameters'][0]['ApplyMethod'].should.equal('immediate') -@disable_on_py3() @mock_rds2 def test_delete_non_existant_db_parameter_group(): conn = boto3.client('rds', region_name='us-west-2') @@ -1136,7 +1076,6 @@ def test_delete_non_existant_db_parameter_group(): DBParameterGroupName='non-existant').should.throw(ClientError) -@disable_on_py3() @mock_rds2 def test_create_parameter_group_with_tags(): conn = boto3.client('rds', region_name='us-west-2') From f2d64e86395864a355d660ba023a1a7f27916d00 Mon Sep 17 00:00:00 2001 From: Declan Shanaghy Date: Tue, 20 Jun 2017 12:03:50 -0700 Subject: [PATCH 058/412] Revert version bump --- setup.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/setup.py b/setup.py index b00567895..289c1684c 100644 --- a/setup.py +++ b/setup.py @@ -24,7 +24,7 @@ extras_require = { setup( name='moto', - version='1.0.1.2', + version='1.0.1', description='A library that allows your python tests to easily' ' mock out the boto library', author='Steve Pulec', From 5a4b2139501b1ec695afac6970615efd61686010 Mon Sep 17 00:00:00 2001 From: Declan Shanaghy Date: Tue, 20 Jun 2017 12:38:52 -0700 Subject: [PATCH 059/412] Remove blank lines --- moto/ssm/responses.py | 5 ----- 1 file changed, 5 deletions(-) diff --git a/moto/ssm/responses.py b/moto/ssm/responses.py index f4ed9561d..09fe6d0c2 100644 --- a/moto/ssm/responses.py +++ b/moto/ssm/responses.py @@ -51,12 +51,9 @@ class SimpleSystemManagerResponse(BaseResponse): token = token.strip() if not token: token = '0' - token = int(token) - result = self.ssm_backend.get_all_parameters() - response = { 'Parameters': [], } @@ -98,8 +95,6 @@ class SimpleSystemManagerResponse(BaseResponse): response['NextToken'] = str(end) break - - return json.dumps(response) def put_parameter(self): From b67e10d5c9e89f6e9531057ffdd847ba56dec4ba Mon Sep 17 00:00:00 2001 From: William Richard Date: Tue, 20 Jun 2017 15:32:32 -0400 Subject: [PATCH 060/412] Make sure the repository response_object is json serializable with images If images had been pushed to a repository, they would be included in the response object, and the json encoder could not serialize the Image class. Since they are not included in the boto response, I just deleted the images field from the response object for Repositories. I also found a duplicate test in the ecr class, so I removed one of them. --- moto/ecr/models.py | 2 +- tests/test_ecr/test_ecr_boto3.py | 41 ++++++++++++++++---------------- 2 files changed, 22 insertions(+), 21 deletions(-) diff --git a/moto/ecr/models.py b/moto/ecr/models.py index 82ce2ebd6..cbe8b2565 100644 --- a/moto/ecr/models.py +++ b/moto/ecr/models.py @@ -58,7 +58,7 @@ class Repository(BaseObject): response_object['repositoryName'] = self.name response_object['repositoryUri'] = self.uri # response_object['createdAt'] = self.created - del response_object['arn'], response_object['name'] + del response_object['arn'], response_object['name'], response_object['images'] return response_object @classmethod diff --git a/tests/test_ecr/test_ecr_boto3.py b/tests/test_ecr/test_ecr_boto3.py index 1191c42d2..3a32c1515 100644 --- a/tests/test_ecr/test_ecr_boto3.py +++ b/tests/test_ecr/test_ecr_boto3.py @@ -152,6 +152,23 @@ def test_describe_repositories_4(): len(response['repositories']).should.equal(0) +@mock_ecr +def test_describe_repositories_with_image(): + client = boto3.client('ecr', region_name='us-east-1') + _ = client.create_repository( + repositoryName='test_repository' + ) + + _ = client.put_image( + repositoryName='test_repository', + imageManifest=json.dumps(_create_image_manifest()), + imageTag='latest' + ) + + response = client.describe_repositories(repositoryNames=['test_repository']) + len(response['repositories']).should.equal(1) + + @mock_ecr def test_delete_repository(): client = boto3.client('ecr', region_name='us-east-1') @@ -177,14 +194,17 @@ def test_put_image(): _ = client.create_repository( repositoryName='test_repository' ) + response = client.put_image( repositoryName='test_repository', imageManifest=json.dumps(_create_image_manifest()), imageTag='latest' ) - response['image']['repositoryName'].should.equal('test_repository') response['image']['imageId']['imageTag'].should.equal('latest') + response['image']['imageId']['imageDigest'].should.contain("sha") + response['image']['repositoryName'].should.equal('test_repository') + response['image']['registryId'].should.equal('012345678910') @mock_ecr @@ -294,22 +314,3 @@ def test_describe_images(): response['imageDetails'][0]['imageSizeInBytes'].should.equal(52428800) response['imageDetails'][1]['imageSizeInBytes'].should.equal(52428800) response['imageDetails'][2]['imageSizeInBytes'].should.equal(52428800) - - -@mock_ecr -def test_put_image(): - client = boto3.client('ecr', region_name='us-east-1') - _ = client.create_repository( - repositoryName='test_repository' - ) - - response = client.put_image( - repositoryName='test_repository', - imageManifest=json.dumps(_create_image_manifest()), - imageTag='latest' - ) - - response['image']['imageId']['imageTag'].should.equal('latest') - response['image']['imageId']['imageDigest'].should.contain("sha") - response['image']['repositoryName'].should.equal('test_repository') - response['image']['registryId'].should.equal('012345678910') \ No newline at end of file From f0fae81af1f522e7344708e5117b70d2ae7957c1 Mon Sep 17 00:00:00 2001 From: Declan Shanaghy Date: Tue, 20 Jun 2017 12:55:01 -0700 Subject: [PATCH 061/412] Fix iteritems --- moto/ssm/models.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/moto/ssm/models.py b/moto/ssm/models.py index 4efa22817..f1aac336b 100644 --- a/moto/ssm/models.py +++ b/moto/ssm/models.py @@ -51,7 +51,7 @@ class SimpleSystemManagerBackend(BaseBackend): def get_all_parameters(self): result = [] - for k, _ in self._parameters.iteritems(): + for k, _ in self._parameters.items(): result.append(self._parameters[k]) return result From 3f20ad2c13acbf1dce8f7decacb3a7a6d30e9db6 Mon Sep 17 00:00:00 2001 From: William Richard Date: Tue, 20 Jun 2017 16:22:34 -0400 Subject: [PATCH 062/412] Support filtering by image id or image tag when describing ecr images --- moto/ecr/models.py | 19 ++++++++--- moto/ecr/responses.py | 3 +- tests/test_ecr/test_ecr_boto3.py | 54 ++++++++++++++++++++++++++++++++ 3 files changed, 71 insertions(+), 5 deletions(-) diff --git a/moto/ecr/models.py b/moto/ecr/models.py index cbe8b2565..b90700ff4 100644 --- a/moto/ecr/models.py +++ b/moto/ecr/models.py @@ -193,16 +193,27 @@ class ECRBackend(BaseBackend): images.append(image) return images - def describe_images(self, repository_name, registry_id=None, image_id=None): + def describe_images(self, repository_name, registry_id=None, image_ids=None): if repository_name in self.repositories: repository = self.repositories[repository_name] else: raise Exception("{0} is not a repository".format(repository_name)) - response = [] - for image in repository.images: - response.append(image) + if image_ids: + response = set() + for image_id in image_ids: + if 'imageDigest' in image_id: + desired_digest = image_id['imageDigest'] + response.update([i for i in repository.images if i.get_image_digest() == desired_digest]) + if 'imageTag' in image_id: + desired_tag = image_id['imageTag'] + response.update([i for i in repository.images if i.image_tag == desired_tag]) + else: + response = [] + for image in repository.images: + response.append(image) + return response def put_image(self, repository_name, image_manifest, image_tag): diff --git a/moto/ecr/responses.py b/moto/ecr/responses.py index f8b1606cc..40d8cfb66 100644 --- a/moto/ecr/responses.py +++ b/moto/ecr/responses.py @@ -69,7 +69,8 @@ class ECRResponse(BaseResponse): def describe_images(self): repository_str = self._get_param('repositoryName') registry_id = self._get_param('registryId') - images = self.ecr_backend.describe_images(repository_str, registry_id) + image_ids = self._get_param('imageIds') + images = self.ecr_backend.describe_images(repository_str, registry_id, image_ids) return json.dumps({ 'imageDetails': [image.response_describe_object for image in images], }) diff --git a/tests/test_ecr/test_ecr_boto3.py b/tests/test_ecr/test_ecr_boto3.py index 3a32c1515..647015446 100644 --- a/tests/test_ecr/test_ecr_boto3.py +++ b/tests/test_ecr/test_ecr_boto3.py @@ -314,3 +314,57 @@ def test_describe_images(): response['imageDetails'][0]['imageSizeInBytes'].should.equal(52428800) response['imageDetails'][1]['imageSizeInBytes'].should.equal(52428800) response['imageDetails'][2]['imageSizeInBytes'].should.equal(52428800) + + +@mock_ecr +def test_describe_images_by_tag(): + client = boto3.client('ecr', region_name='us-east-1') + _ = client.create_repository( + repositoryName='test_repository' + ) + + tag_map = {} + for tag in ['latest', 'v1', 'v2']: + put_response = client.put_image( + repositoryName='test_repository', + imageManifest=json.dumps(_create_image_manifest()), + imageTag=tag + ) + tag_map[tag] = put_response['image'] + + for tag, put_response in tag_map.items(): + response = client.describe_images(repositoryName='test_repository', imageIds=[{'imageTag': tag}]) + len(response['imageDetails']).should.be(1) + image_detail = response['imageDetails'][0] + image_detail['registryId'].should.equal("012345678910") + image_detail['repositoryName'].should.equal("test_repository") + image_detail['imageTags'].should.equal([put_response['imageId']['imageTag']]) + image_detail['imageDigest'].should.equal(put_response['imageId']['imageDigest']) + + +@mock_ecr +def test_describe_images_by_digest(): + client = boto3.client('ecr', region_name='us-east-1') + _ = client.create_repository( + repositoryName='test_repository' + ) + + tags = ['latest', 'v1', 'v2'] + digest_map = {} + for tag in tags: + put_response = client.put_image( + repositoryName='test_repository', + imageManifest=json.dumps(_create_image_manifest()), + imageTag=tag + ) + digest_map[put_response['image']['imageId']['imageDigest']] = put_response['image'] + + for digest, put_response in digest_map.items(): + response = client.describe_images(repositoryName='test_repository', + imageIds=[{'imageDigest': digest}]) + len(response['imageDetails']).should.be(1) + image_detail = response['imageDetails'][0] + image_detail['registryId'].should.equal("012345678910") + image_detail['repositoryName'].should.equal("test_repository") + image_detail['imageTags'].should.equal([put_response['imageId']['imageTag']]) + image_detail['imageDigest'].should.equal(digest) From 63f01039c3f321c6f726c620eba6ec66e98f55ec Mon Sep 17 00:00:00 2001 From: Jack Danger Canty Date: Tue, 20 Jun 2017 13:51:25 -0700 Subject: [PATCH 063/412] Implementing RDS Snapshots --- moto/rds2/exceptions.py | 8 ++++ moto/rds2/models.py | 81 +++++++++++++++++++++++++++++++++++- moto/rds2/responses.py | 59 +++++++++++++++++++++++++- tests/test_rds2/test_rds2.py | 75 +++++++++++++++++++++++++++++++++ 4 files changed, 221 insertions(+), 2 deletions(-) diff --git a/moto/rds2/exceptions.py b/moto/rds2/exceptions.py index 29e92941d..057a13ba2 100644 --- a/moto/rds2/exceptions.py +++ b/moto/rds2/exceptions.py @@ -28,6 +28,14 @@ class DBInstanceNotFoundError(RDSClientError): "Database {0} not found.".format(database_identifier)) +class DBSnapshotNotFoundError(RDSClientError): + + def __init__(self): + super(DBSnapshotNotFoundError, self).__init__( + 'DBSnapshotNotFound', + "DBSnapshotIdentifier does not refer to an existing DB snapshot.") + + class DBSecurityGroupNotFoundError(RDSClientError): def __init__(self, security_group_name): diff --git a/moto/rds2/models.py b/moto/rds2/models.py index 4036cdcd1..ae97ba1f2 100644 --- a/moto/rds2/models.py +++ b/moto/rds2/models.py @@ -1,6 +1,7 @@ from __future__ import unicode_literals import copy +import datetime from collections import defaultdict import boto.rds2 @@ -10,9 +11,11 @@ from moto.cloudformation.exceptions import UnformattedGetAttTemplateException from moto.compat import OrderedDict from moto.core import BaseBackend, BaseModel from moto.core.utils import get_random_hex +from moto.core.utils import iso_8601_datetime_with_milliseconds from moto.ec2.models import ec2_backends from .exceptions import (RDSClientError, DBInstanceNotFoundError, + DBSnapshotNotFoundError, DBSecurityGroupNotFoundError, DBSubnetGroupNotFoundError, DBParameterGroupNotFoundError) @@ -205,7 +208,7 @@ class Database(BaseModel): {% endif %} {% if database.iops %} {{ database.iops }} - io1 + standard {% else %} {{ database.storage_type }} {% endif %} @@ -399,6 +402,53 @@ class Database(BaseModel): backend.delete_database(self.db_instance_identifier) +class Snapshot(BaseModel): + def __init__(self, database, snapshot_id, tags): + self.database = database + self.snapshot_id = snapshot_id + self.tags = tags + self.created_at = iso_8601_datetime_with_milliseconds(datetime.datetime.now()) + + @property + def snapshot_arn(self): + return "arn:aws:rds:{0}:1234567890:snapshot:{1}".format(self.database.region, self.snapshot_id) + + def to_xml(self): + template = Template(""" + {{ snapshot.snapshot_id }} + {{ database.db_instance_identifier }} + {{ snapshot.created_at }} + {{ database.engine }} + {{ database.allocated_storage }} + available + {{ database.port }} + {{ database.availability_zone }} + {{ database.db_subnet_group.vpc_id }} + {{ snapshot.created_at }} + {{ database.master_username }} + {{ database.engine_version }} + general-public-license + manual + {% if database.iops %} + {{ database.iops }} + io1 + {% else %} + {{ database.storage_type }} + {% endif %} + {{ database.option_group_name }} + {{ 100 }} + {{ database.region }} + + + {{ database.storage_encrypted }} + {{ database.kms_key_id }} + {{ snapshot.snapshot_arn }} + + false + """) + return template.render(snapshot=self, database=self.database) + + class SecurityGroup(BaseModel): def __init__(self, group_name, description, tags): @@ -607,6 +657,7 @@ class RDS2Backend(BaseBackend): self.arn_regex = re_compile( r'^arn:aws:rds:.*:[0-9]*:(db|es|og|pg|ri|secgrp|snapshot|subgrp):.*$') self.databases = OrderedDict() + self.snapshots = OrderedDict() self.db_parameter_groups = {} self.option_groups = {} self.security_groups = {} @@ -624,6 +675,20 @@ class RDS2Backend(BaseBackend): self.databases[database_id] = database return database + def create_snapshot(self, db_instance_identifier, db_snapshot_identifier, tags): + database = self.databases.get(db_instance_identifier) + if not database: + raise DBInstanceNotFoundError(db_instance_identifier) + snapshot = Snapshot(database, db_snapshot_identifier, tags) + self.snapshots[db_snapshot_identifier] = snapshot + return snapshot + + def delete_snapshot(self, db_snapshot_identifier): + if db_snapshot_identifier not in self.snapshots: + raise DBSnapshotNotFoundError() + + return self.snapshots.pop(db_snapshot_identifier) + def create_database_replica(self, db_kwargs): database_id = db_kwargs['db_instance_identifier'] source_database_id = db_kwargs['source_db_identifier'] @@ -646,6 +711,20 @@ class RDS2Backend(BaseBackend): raise DBInstanceNotFoundError(db_instance_identifier) return self.databases.values() + def describe_snapshots(self, db_instance_identifier, db_snapshot_identifier): + if db_instance_identifier: + for snapshot in self.snapshots.values(): + if snapshot.database.db_instance_identifier == db_instance_identifier: + return [snapshot] + raise DBSnapshotNotFoundError() + + if db_snapshot_identifier: + if db_snapshot_identifier in self.snapshots: + return [self.snapshots[db_snapshot_identifier]] + raise DBSnapshotNotFoundError() + + return self.snapshots.values() + def modify_database(self, db_instance_identifier, db_kwargs): database = self.describe_databases(db_instance_identifier)[0] database.update(db_kwargs) diff --git a/moto/rds2/responses.py b/moto/rds2/responses.py index f8f33f2b9..cdadd3424 100644 --- a/moto/rds2/responses.py +++ b/moto/rds2/responses.py @@ -39,7 +39,7 @@ class RDS2Response(BaseResponse): "region": self.region, "security_groups": self._get_multi_param('DBSecurityGroups.DBSecurityGroupName'), "storage_encrypted": self._get_param("StorageEncrypted"), - "storage_type": self._get_param("StorageType"), + "storage_type": self._get_param("StorageType", 'standard'), # VpcSecurityGroupIds.member.N "tags": list(), } @@ -150,6 +150,27 @@ class RDS2Response(BaseResponse): template = self.response_template(REBOOT_DATABASE_TEMPLATE) return template.render(database=database) + def create_db_snapshot(self): + db_instance_identifier = self._get_param('DBInstanceIdentifier') + db_snapshot_identifier = self._get_param('DBSnapshotIdentifier') + tags = self._get_param('Tags', []) + snapshot = self.backend.create_snapshot(db_instance_identifier, db_snapshot_identifier, tags) + template = self.response_template(CREATE_SNAPSHOT_TEMPLATE) + return template.render(snapshot=snapshot) + + def describe_db_snapshots(self): + db_instance_identifier = self._get_param('DBInstanceIdentifier') + db_snapshot_identifier = self._get_param('DBSnapshotIdentifier') + snapshots = self.backend.describe_snapshots(db_instance_identifier, db_snapshot_identifier) + template = self.response_template(DESCRIBE_SNAPSHOTS_TEMPLATE) + return template.render(snapshots=snapshots) + + def delete_db_snapshot(self): + db_snapshot_identifier = self._get_param('DBSnapshotIdentifier') + snapshot = self.backend.delete_snapshot(db_snapshot_identifier) + template = self.response_template(DELETE_SNAPSHOT_TEMPLATE) + return template.render(snapshot=snapshot) + def list_tags_for_resource(self): arn = self._get_param('ResourceName') template = self.response_template(LIST_TAGS_FOR_RESOURCE_TEMPLATE) @@ -397,6 +418,42 @@ DELETE_DATABASE_TEMPLATE = """ + + {{ snapshot.to_xml() }} + + + 523e3218-afc7-11c3-90f5-f90431260ab4 + + +""" + +DESCRIBE_SNAPSHOTS_TEMPLATE = """ + + + {%- for snapshot in snapshots -%} + {{ snapshot.to_xml() }} + {%- endfor -%} + + {% if marker %} + {{ marker }} + {% endif %} + + + 523e3218-afc7-11c3-90f5-f90431260ab4 + +""" + +DELETE_SNAPSHOT_TEMPLATE = """ + + {{ snapshot.to_xml() }} + + + 523e3218-afc7-11c3-90f5-f90431260ab4 + + +""" + CREATE_SECURITY_GROUP_TEMPLATE = """ {{ security_group.to_xml() }} diff --git a/tests/test_rds2/test_rds2.py b/tests/test_rds2/test_rds2.py index 148b00aa1..7a801257c 100644 --- a/tests/test_rds2/test_rds2.py +++ b/tests/test_rds2/test_rds2.py @@ -168,6 +168,81 @@ def test_delete_non_existant_database(): DBInstanceIdentifier="not-a-db").should.throw(ClientError) +@mock_rds2 +def test_create_db_snapshots(): + conn = boto3.client('rds', region_name='us-west-2') + conn.create_db_snapshot.when.called_with( + DBInstanceIdentifier='db-primary-1', + DBSnapshotIdentifier='snapshot-1').should.throw(ClientError) + + conn.create_db_instance(DBInstanceIdentifier='db-primary-1', + AllocatedStorage=10, + Engine='postgres', + DBName='staging-postgres', + DBInstanceClass='db.m1.small', + MasterUsername='root', + MasterUserPassword='hunter2', + Port=1234, + DBSecurityGroups=["my_sg"]) + + snapshot = conn.create_db_snapshot(DBInstanceIdentifier='db-primary-1', + DBSnapshotIdentifier='g-1').get('DBSnapshot') + + snapshot.get('Engine').should.equal('postgres') + snapshot.get('DBInstanceIdentifier').should.equal('db-primary-1') + snapshot.get('DBSnapshotIdentifier').should.equal('g-1') + + +@mock_rds2 +def test_describe_db_snapshots(): + conn = boto3.client('rds', region_name='us-west-2') + conn.create_db_instance(DBInstanceIdentifier='db-primary-1', + AllocatedStorage=10, + Engine='postgres', + DBName='staging-postgres', + DBInstanceClass='db.m1.small', + MasterUsername='root', + MasterUserPassword='hunter2', + Port=1234, + DBSecurityGroups=["my_sg"]) + conn.describe_db_snapshots.when.called_with( + DBInstanceIdentifier="db-primary-1").should.throw(ClientError) + + created = conn.create_db_snapshot(DBInstanceIdentifier='db-primary-1', + DBSnapshotIdentifier='snapshot-1').get('DBSnapshot') + + created.get('Engine').should.equal('postgres') + + by_database_id = conn.describe_db_snapshots(DBInstanceIdentifier='db-primary-1').get('DBSnapshots') + by_snapshot_id = conn.describe_db_snapshots(DBSnapshotIdentifier='snapshot-1').get('DBSnapshots') + by_snapshot_id.should.equal(by_database_id) + + snapshot = by_snapshot_id[0] + snapshot.should.equal(created) + snapshot.get('Engine').should.equal('postgres') + + +@mock_rds2 +def test_delete_db_snapshot(): + conn = boto3.client('rds', region_name='us-west-2') + conn.create_db_instance(DBInstanceIdentifier='db-primary-1', + AllocatedStorage=10, + Engine='postgres', + DBName='staging-postgres', + DBInstanceClass='db.m1.small', + MasterUsername='root', + MasterUserPassword='hunter2', + Port=1234, + DBSecurityGroups=["my_sg"]) + conn.create_db_snapshot(DBInstanceIdentifier='db-primary-1', + DBSnapshotIdentifier='snapshot-1') + + conn.describe_db_snapshots(DBSnapshotIdentifier='snapshot-1').get('DBSnapshots')[0] + conn.delete_db_snapshot(DBSnapshotIdentifier='snapshot-1') + conn.describe_db_snapshots.when.called_with( + DBSnapshotIdentifier='snapshot-1').should.throw(ClientError) + + @mock_rds2 def test_create_option_group(): conn = boto3.client('rds', region_name='us-west-2') From ccb4ffde7c4f5bede92a4601f2832316a3335d56 Mon Sep 17 00:00:00 2001 From: Jack Danger Canty Date: Tue, 20 Jun 2017 13:53:22 -0700 Subject: [PATCH 064/412] Supporting io1 type --- moto/rds2/models.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/moto/rds2/models.py b/moto/rds2/models.py index ae97ba1f2..2d9a66401 100644 --- a/moto/rds2/models.py +++ b/moto/rds2/models.py @@ -208,7 +208,7 @@ class Database(BaseModel): {% endif %} {% if database.iops %} {{ database.iops }} - standard + io1 {% else %} {{ database.storage_type }} {% endif %} From fb2efb1c6dca45b6781fa4e2d977735aeedc2d9a Mon Sep 17 00:00:00 2001 From: Jack Danger Canty Date: Tue, 20 Jun 2017 14:00:56 -0700 Subject: [PATCH 065/412] Implementing snapshots on rds delete --- moto/rds/responses.py | 3 ++- moto/rds2/models.py | 8 +++++--- 2 files changed, 7 insertions(+), 4 deletions(-) diff --git a/moto/rds/responses.py b/moto/rds/responses.py index 0895a8bf2..cdcbe3603 100644 --- a/moto/rds/responses.py +++ b/moto/rds/responses.py @@ -114,7 +114,8 @@ class RDSResponse(BaseResponse): def delete_db_instance(self): db_instance_identifier = self._get_param('DBInstanceIdentifier') - database = self.backend.delete_database(db_instance_identifier) + db_snapshot_name = self._get_param('FinalDBSnapshotIdentifier') + database = self.backend.delete_database(db_instance_identifier, db_snapshot_name) template = self.response_template(DELETE_DATABASE_TEMPLATE) return template.render(database=database) diff --git a/moto/rds2/models.py b/moto/rds2/models.py index 2d9a66401..549f6e247 100644 --- a/moto/rds2/models.py +++ b/moto/rds2/models.py @@ -403,10 +403,10 @@ class Database(BaseModel): class Snapshot(BaseModel): - def __init__(self, database, snapshot_id, tags): + def __init__(self, database, snapshot_id, tags=None): self.database = database self.snapshot_id = snapshot_id - self.tags = tags + self.tags = tags or [] self.created_at = iso_8601_datetime_with_milliseconds(datetime.datetime.now()) @property @@ -746,13 +746,15 @@ class RDS2Backend(BaseBackend): return backend.describe_databases(db_name)[0] - def delete_database(self, db_instance_identifier): + def delete_database(self, db_instance_identifier, db_snapshot_name): if db_instance_identifier in self.databases: database = self.databases.pop(db_instance_identifier) if database.is_replica: primary = self.find_db_from_id(database.source_db_identifier) primary.remove_replica(database) database.status = 'deleting' + if db_snapshot_name: + self.snapshots[db_snapshot_name] = Snapshot(database, db_snapshot_name) return database else: raise DBInstanceNotFoundError(db_instance_identifier) From 8df7169915cf4e894133c7640ea8758e2eddda6a Mon Sep 17 00:00:00 2001 From: Jack Danger Canty Date: Tue, 20 Jun 2017 14:01:28 -0700 Subject: [PATCH 066/412] Snapshots are optional --- moto/rds2/models.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/moto/rds2/models.py b/moto/rds2/models.py index 549f6e247..86f7bae9a 100644 --- a/moto/rds2/models.py +++ b/moto/rds2/models.py @@ -746,7 +746,7 @@ class RDS2Backend(BaseBackend): return backend.describe_databases(db_name)[0] - def delete_database(self, db_instance_identifier, db_snapshot_name): + def delete_database(self, db_instance_identifier, db_snapshot_name=None): if db_instance_identifier in self.databases: database = self.databases.pop(db_instance_identifier) if database.is_replica: From e57798cb96966b83e42fb9ab3a9cc74f32879084 Mon Sep 17 00:00:00 2001 From: Jack Danger Canty Date: Tue, 20 Jun 2017 14:46:13 -0700 Subject: [PATCH 067/412] Implementing snapshots on rds instance deletion --- moto/rds/responses.py | 3 +-- moto/rds2/responses.py | 3 ++- tests/test_rds2/test_rds2.py | 14 ++++++++++---- 3 files changed, 13 insertions(+), 7 deletions(-) diff --git a/moto/rds/responses.py b/moto/rds/responses.py index cdcbe3603..0895a8bf2 100644 --- a/moto/rds/responses.py +++ b/moto/rds/responses.py @@ -114,8 +114,7 @@ class RDSResponse(BaseResponse): def delete_db_instance(self): db_instance_identifier = self._get_param('DBInstanceIdentifier') - db_snapshot_name = self._get_param('FinalDBSnapshotIdentifier') - database = self.backend.delete_database(db_instance_identifier, db_snapshot_name) + database = self.backend.delete_database(db_instance_identifier) template = self.response_template(DELETE_DATABASE_TEMPLATE) return template.render(database=database) diff --git a/moto/rds2/responses.py b/moto/rds2/responses.py index cdadd3424..b26f2e347 100644 --- a/moto/rds2/responses.py +++ b/moto/rds2/responses.py @@ -140,7 +140,8 @@ class RDS2Response(BaseResponse): def delete_db_instance(self): db_instance_identifier = self._get_param('DBInstanceIdentifier') - database = self.backend.delete_database(db_instance_identifier) + db_snapshot_name = self._get_param('FinalDBSnapshotIdentifier') + database = self.backend.delete_database(db_instance_identifier, db_snapshot_name) template = self.response_template(DELETE_DATABASE_TEMPLATE) return template.render(database=database) diff --git a/tests/test_rds2/test_rds2.py b/tests/test_rds2/test_rds2.py index 7a801257c..f869dc1ce 100644 --- a/tests/test_rds2/test_rds2.py +++ b/tests/test_rds2/test_rds2.py @@ -145,10 +145,10 @@ def test_delete_database(): conn = boto3.client('rds', region_name='us-west-2') instances = conn.describe_db_instances() list(instances['DBInstances']).should.have.length_of(0) - conn.create_db_instance(DBInstanceIdentifier='db-master-1', + conn.create_db_instance(DBInstanceIdentifier='db-primary-1', AllocatedStorage=10, - DBInstanceClass='postgres', - Engine='db.m1.small', + Engine='postgres', + DBInstanceClass='db.m1.small', MasterUsername='root', MasterUserPassword='hunter2', Port=1234, @@ -156,10 +156,16 @@ def test_delete_database(): instances = conn.describe_db_instances() list(instances['DBInstances']).should.have.length_of(1) - conn.delete_db_instance(DBInstanceIdentifier="db-master-1") + conn.delete_db_instance(DBInstanceIdentifier="db-primary-1", + FinalDBSnapshotIdentifier='primary-1-snapshot') + instances = conn.describe_db_instances() list(instances['DBInstances']).should.have.length_of(0) + # Saved the snapshot + snapshots = conn.describe_db_snapshots(DBInstanceIdentifier="db-primary-1").get('DBSnapshots') + snapshots[0].get('Engine').should.equal('postgres') + @mock_rds2 def test_delete_non_existant_database(): From c5ce2848befef6b8475da22d6ab3804493e19f6a Mon Sep 17 00:00:00 2001 From: William Richard Date: Wed, 21 Jun 2017 12:58:01 -0400 Subject: [PATCH 068/412] Boto3 and cloudformation have different keys for auto scaling tags - handle that gracefully --- moto/autoscaling/models.py | 19 +-- tests/test_autoscaling/test_autoscaling.py | 33 +++-- .../test_cloudformation_stack_integration.py | 119 ++++++++++++------ 3 files changed, 115 insertions(+), 56 deletions(-) diff --git a/moto/autoscaling/models.py b/moto/autoscaling/models.py index a2fcb2a63..9df9fea12 100644 --- a/moto/autoscaling/models.py +++ b/moto/autoscaling/models.py @@ -13,14 +13,12 @@ ASG_NAME_TAG = "aws:autoscaling:groupName" class InstanceState(object): - def __init__(self, instance, lifecycle_state="InService"): self.instance = instance self.lifecycle_state = lifecycle_state class FakeScalingPolicy(BaseModel): - def __init__(self, name, policy_type, adjustment_type, as_name, scaling_adjustment, cooldown, autoscaling_backend): self.name = name @@ -47,7 +45,6 @@ class FakeScalingPolicy(BaseModel): class FakeLaunchConfiguration(BaseModel): - def __init__(self, name, image_id, key_name, ramdisk_id, kernel_id, security_groups, user_data, instance_type, instance_monitoring, instance_profile_name, spot_price, ebs_optimized, associate_public_ip_address, block_device_mapping_dict): @@ -146,7 +143,6 @@ class FakeLaunchConfiguration(BaseModel): class FakeAutoScalingGroup(BaseModel): - def __init__(self, name, availability_zones, desired_capacity, max_size, min_size, launch_config_name, vpc_zone_identifier, default_cooldown, health_check_period, health_check_type, @@ -261,11 +257,17 @@ class FakeAutoScalingGroup(BaseModel): if self.desired_capacity > curr_instance_count: # Need more instances - count_needed = int(self.desired_capacity) - \ - int(curr_instance_count) + count_needed = int(self.desired_capacity) - int(curr_instance_count) + + propagated_tags = {} + for tag in self.tags: + # boto uses 'propagate_at_launch + # boto3 and cloudformation use PropagateAtLaunch + if 'propagate_at_launch' in tag and tag['propagate_at_launch'] == 'true': + propagated_tags[tag['key']] = tag['value'] + if 'PropagateAtLaunch' in tag and tag['PropagateAtLaunch']: + propagated_tags[tag['Key']] = tag['Value'] - propagated_tags = {t['key']: t['value'] for t in self.tags - if t['propagate_at_launch'] == 'true'} propagated_tags[ASG_NAME_TAG] = self.name reservation = self.autoscaling_backend.ec2_backend.add_instances( self.launch_config.image_id, @@ -290,7 +292,6 @@ class FakeAutoScalingGroup(BaseModel): class AutoScalingBackend(BaseBackend): - def __init__(self, ec2_backend, elb_backend): self.autoscaling_groups = OrderedDict() self.launch_configurations = OrderedDict() diff --git a/tests/test_autoscaling/test_autoscaling.py b/tests/test_autoscaling/test_autoscaling.py index 5cc697785..b919eb71c 100644 --- a/tests/test_autoscaling/test_autoscaling.py +++ b/tests/test_autoscaling/test_autoscaling.py @@ -138,6 +138,7 @@ def test_list_many_autoscaling_groups(): groups.should.have.length_of(51) assert 'NextToken' not in response2.keys() + @mock_autoscaling @mock_ec2 def test_list_many_autoscaling_groups(): @@ -163,6 +164,7 @@ def test_list_many_autoscaling_groups(): tags.should.contain({u'Value': 'TestTagValue1', u'Key': 'TestTagKey1'}) tags.should.contain({u'Value': 'TestGroup1', u'Key': 'aws:autoscaling:groupName'}) + @mock_autoscaling_deprecated def test_autoscaling_group_describe_filter(): conn = boto.connect_autoscale() @@ -493,7 +495,20 @@ def test_create_autoscaling_group_boto3(): LaunchConfigurationName='test_launch_configuration', MinSize=0, MaxSize=20, - DesiredCapacity=5 + DesiredCapacity=5, + Tags=[ + {'ResourceId': 'test_asg', + 'ResourceType': 'auto-scaling-group', + 'Key': 'propogated-tag-key', + 'Value': 'propogate-tag-value', + 'PropagateAtLaunch': True + }, + {'ResourceId': 'test_asg', + 'ResourceType': 'auto-scaling-group', + 'Key': 'not-propogated-tag-key', + 'Value': 'not-propogate-tag-value', + 'PropagateAtLaunch': False + }] ) response['ResponseMetadata']['HTTPStatusCode'].should.equal(200) @@ -556,12 +571,14 @@ def test_autoscaling_taqs_update_boto3(): MinSize=0, MaxSize=20, DesiredCapacity=5, - Tags=[{ - "ResourceId": 'test_asg', - "Key": 'test_key', - "Value": 'test_value', - "PropagateAtLaunch": True - }] + Tags=[ + { + "ResourceId": 'test_asg', + "Key": 'test_key', + "Value": 'test_value', + "PropagateAtLaunch": True + }, + ] ) client.create_or_update_tags(Tags=[{ @@ -573,7 +590,7 @@ def test_autoscaling_taqs_update_boto3(): "ResourceId": 'test_asg', "Key": 'test_key2', "Value": 'test_value2', - "PropagateAtLaunch": True + "PropagateAtLaunch": False }]) response = client.describe_auto_scaling_groups( diff --git a/tests/test_cloudformation/test_cloudformation_stack_integration.py b/tests/test_cloudformation/test_cloudformation_stack_integration.py index 87dcfd950..df696d879 100644 --- a/tests/test_cloudformation/test_cloudformation_stack_integration.py +++ b/tests/test_cloudformation/test_cloudformation_stack_integration.py @@ -382,7 +382,7 @@ def test_stack_elb_integration_with_update(): "Protocol": "HTTP", } ], - "Policies": {"Ref" : "AWS::NoValue"}, + "Policies": {"Ref": "AWS::NoValue"}, } }, }, @@ -536,8 +536,8 @@ def test_stack_security_groups(): @mock_autoscaling_deprecated() @mock_elb_deprecated() @mock_cloudformation_deprecated() +@mock_ec2_deprecated() def test_autoscaling_group_with_elb(): - web_setup_template = { "AWSTemplateFormatVersion": "2010-09-09", @@ -550,7 +550,17 @@ def test_autoscaling_group_with_elb(): "MinSize": "2", "MaxSize": "2", "DesiredCapacity": "2", - "LoadBalancerNames": [{"Ref": "my-elb"}] + "LoadBalancerNames": [{"Ref": "my-elb"}], + "Tags": [ + { + "Key": "propagated-test-tag", "Value": "propagated-test-tag-value", + "PropagateAtLaunch": True}, + { + "Key": "not-propagated-test-tag", + "Value": "not-propagated-test-tag-value", + "PropagateAtLaunch": False + } + ] }, }, @@ -611,7 +621,8 @@ def test_autoscaling_group_with_elb(): as_group_resource.physical_resource_id.should.contain("my-as-group") launch_config_resource = [ - resource for resource in resources if resource.resource_type == 'AWS::AutoScaling::LaunchConfiguration'][0] + resource for resource in resources if + resource.resource_type == 'AWS::AutoScaling::LaunchConfiguration'][0] launch_config_resource.physical_resource_id.should.contain( "my-launch-config") @@ -619,9 +630,20 @@ def test_autoscaling_group_with_elb(): 'AWS::ElasticLoadBalancing::LoadBalancer'][0] elb_resource.physical_resource_id.should.contain("my-elb") + # confirm the instances were created with the right tags + ec2_conn = boto.ec2.connect_to_region('us-west-1') + reservations = ec2_conn.get_all_reservations() + len(reservations).should.equal(1) + reservation = reservations[0] + len(reservation.instances).should.equal(2) + for instance in reservation.instances: + instance.tags['propagated-test-tag'].should.equal('propagated-test-tag-value') + instance.tags.keys().should_not.contain('not-propagated-test-tag') + @mock_autoscaling_deprecated() @mock_cloudformation_deprecated() +@mock_ec2_deprecated() def test_autoscaling_group_update(): asg_template = { "AWSTemplateFormatVersion": "2010-09-09", @@ -661,6 +683,16 @@ def test_autoscaling_group_update(): asg.desired_capacity.should.equal(2) asg_template['Resources']['my-as-group']['Properties']['MaxSize'] = 3 + asg_template['Resources']['my-as-group']['Properties']['Tags'] = [ + { + "Key": "propagated-test-tag", "Value": "propagated-test-tag-value", + "PropagateAtLaunch": True}, + { + "Key": "not-propagated-test-tag", + "Value": "not-propagated-test-tag-value", + "PropagateAtLaunch": False + } + ] asg_template_json = json.dumps(asg_template) conn.update_stack( "asg_stack", @@ -671,11 +703,22 @@ def test_autoscaling_group_update(): asg.max_size.should.equal(3) asg.desired_capacity.should.equal(2) + # confirm the instances were created with the right tags + ec2_conn = boto.ec2.connect_to_region('us-west-1') + reservations = ec2_conn.get_all_reservations() + running_instance_count = 0 + for res in reservations: + for instance in res.instances: + if instance.state == 'running': + running_instance_count += 1 + instance.tags['propagated-test-tag'].should.equal('propagated-test-tag-value') + instance.tags.keys().should_not.contain('not-propagated-test-tag') + running_instance_count.should.equal(2) + @mock_ec2_deprecated() @mock_cloudformation_deprecated() def test_vpc_single_instance_in_subnet(): - template_json = json.dumps(vpc_single_instance_in_subnet.template) conn = boto.cloudformation.connect_to_region("us-west-1") conn.create_stack( @@ -738,16 +781,16 @@ def test_rds_db_parameter_groups(): TemplateBody=template_json, Parameters=[{'ParameterKey': key, 'ParameterValue': value} for key, value in [ - ("DBInstanceIdentifier", "master_db"), - ("DBName", "my_db"), - ("DBUser", "my_user"), - ("DBPassword", "my_password"), - ("DBAllocatedStorage", "20"), - ("DBInstanceClass", "db.m1.medium"), - ("EC2SecurityGroup", "application"), - ("MultiAZ", "true"), - ] - ], + ("DBInstanceIdentifier", "master_db"), + ("DBName", "my_db"), + ("DBUser", "my_user"), + ("DBPassword", "my_password"), + ("DBAllocatedStorage", "20"), + ("DBInstanceClass", "db.m1.medium"), + ("EC2SecurityGroup", "application"), + ("MultiAZ", "true"), + ] + ], ) rds_conn = boto3.client('rds', region_name="us-west-1") @@ -758,8 +801,10 @@ def test_rds_db_parameter_groups(): 'DBParameterGroups'][0]['DBParameterGroupName'] found_cloudformation_set_parameter = False - for db_parameter in rds_conn.describe_db_parameters(DBParameterGroupName=db_parameter_group_name)['Parameters']: - if db_parameter['ParameterName'] == 'BACKLOG_QUEUE_LIMIT' and db_parameter['ParameterValue'] == '2048': + for db_parameter in rds_conn.describe_db_parameters(DBParameterGroupName=db_parameter_group_name)[ + 'Parameters']: + if db_parameter['ParameterName'] == 'BACKLOG_QUEUE_LIMIT' and db_parameter[ + 'ParameterValue'] == '2048': found_cloudformation_set_parameter = True found_cloudformation_set_parameter.should.equal(True) @@ -965,7 +1010,6 @@ def test_iam_roles(): @mock_ec2_deprecated() @mock_cloudformation_deprecated() def test_single_instance_with_ebs_volume(): - template_json = json.dumps(single_instance_with_ebs_volume.template) conn = boto.cloudformation.connect_to_region("us-west-1") conn.create_stack( @@ -1005,7 +1049,6 @@ def test_create_template_without_required_param(): @mock_ec2_deprecated() @mock_cloudformation_deprecated() def test_classic_eip(): - template_json = json.dumps(ec2_classic_eip.template) conn = boto.cloudformation.connect_to_region("us-west-1") conn.create_stack("test_stack", template_body=template_json) @@ -1022,7 +1065,6 @@ def test_classic_eip(): @mock_ec2_deprecated() @mock_cloudformation_deprecated() def test_vpc_eip(): - template_json = json.dumps(vpc_eip.template) conn = boto.cloudformation.connect_to_region("us-west-1") conn.create_stack("test_stack", template_body=template_json) @@ -1039,7 +1081,6 @@ def test_vpc_eip(): @mock_ec2_deprecated() @mock_cloudformation_deprecated() def test_fn_join(): - template_json = json.dumps(fn_join.template) conn = boto.cloudformation.connect_to_region("us-west-1") conn.create_stack("test_stack", template_body=template_json) @@ -2009,25 +2050,25 @@ def test_stack_spot_fleet(): "TargetCapacity": 6, "AllocationStrategy": "diversified", "LaunchSpecifications": [ - { - "EbsOptimized": "false", - "InstanceType": 't2.small', - "ImageId": "ami-1234", - "SubnetId": subnet_id, - "WeightedCapacity": "2", - "SpotPrice": "0.13", - }, { - "EbsOptimized": "true", - "InstanceType": 't2.large', - "ImageId": "ami-1234", - "Monitoring": {"Enabled": "true"}, - "SecurityGroups": [{"GroupId": "sg-123"}], - "SubnetId": subnet_id, - "IamInstanceProfile": {"Arn": "arn:aws:iam::123456789012:role/fleet"}, - "WeightedCapacity": "4", - "SpotPrice": "10.00", - } + "EbsOptimized": "false", + "InstanceType": 't2.small', + "ImageId": "ami-1234", + "SubnetId": subnet_id, + "WeightedCapacity": "2", + "SpotPrice": "0.13", + }, + { + "EbsOptimized": "true", + "InstanceType": 't2.large', + "ImageId": "ami-1234", + "Monitoring": {"Enabled": "true"}, + "SecurityGroups": [{"GroupId": "sg-123"}], + "SubnetId": subnet_id, + "IamInstanceProfile": {"Arn": "arn:aws:iam::123456789012:role/fleet"}, + "WeightedCapacity": "4", + "SpotPrice": "10.00", + } ] } } From 8ca27e184aa98b62fd6841862499f66084b71bf1 Mon Sep 17 00:00:00 2001 From: Declan Shanaghy Date: Mon, 26 Jun 2017 11:17:36 -0700 Subject: [PATCH 069/412] Simplify tests --- tests/test_ssm/test_ssm_boto3.py | 12 ++++-------- 1 file changed, 4 insertions(+), 8 deletions(-) diff --git a/tests/test_ssm/test_ssm_boto3.py b/tests/test_ssm/test_ssm_boto3.py index 8b5d1f200..a62536a23 100644 --- a/tests/test_ssm/test_ssm_boto3.py +++ b/tests/test_ssm/test_ssm_boto3.py @@ -118,14 +118,12 @@ def test_describe_parameters_filter_names(): response = client.describe_parameters(Filters=[ { 'Key': 'Name', - 'Values': ['param-45', 'param-22'] + 'Values': ['param-22'] }, ]) - len(response['Parameters']).should.equal(2) + len(response['Parameters']).should.equal(1) response['Parameters'][0]['Name'].should.equal('param-22') response['Parameters'][0]['Type'].should.equal('String') - response['Parameters'][1]['Name'].should.equal('param-45') - response['Parameters'][1]['Type'].should.equal('SecureString') ''.should.equal(response.get('NextToken', '')) @mock_ssm @@ -174,14 +172,12 @@ def test_describe_parameters_filter_keyid(): response = client.describe_parameters(Filters=[ { 'Key': 'KeyId', - 'Values': ['key:5','key:10'] + 'Values': ['key:10'] }, ]) - len(response['Parameters']).should.equal(2) + len(response['Parameters']).should.equal(1) response['Parameters'][0]['Name'].should.equal('param-10') response['Parameters'][0]['Type'].should.equal('SecureString') - response['Parameters'][1]['Name'].should.equal('param-5') - response['Parameters'][1]['Type'].should.equal('SecureString') ''.should.equal(response.get('NextToken', '')) @mock_ssm From 27f1248788b71943e3c04c9b7e173ac70e22dd81 Mon Sep 17 00:00:00 2001 From: Declan Shanaghy Date: Mon, 26 Jun 2017 11:20:56 -0700 Subject: [PATCH 070/412] Fix spacing --- tests/test_ssm/test_ssm_boto3.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/tests/test_ssm/test_ssm_boto3.py b/tests/test_ssm/test_ssm_boto3.py index a62536a23..de0793e82 100644 --- a/tests/test_ssm/test_ssm_boto3.py +++ b/tests/test_ssm/test_ssm_boto3.py @@ -99,6 +99,7 @@ def test_describe_parameters_paging(): len(response['Parameters']).should.equal(0) ''.should.equal(response.get('NextToken', '')) + @mock_ssm def test_describe_parameters_filter_names(): client = boto3.client('ssm', region_name='us-east-1') @@ -114,7 +115,6 @@ def test_describe_parameters_filter_names(): p['KeyId'] = 'a key' client.put_parameter(**p) - response = client.describe_parameters(Filters=[ { 'Key': 'Name', @@ -126,6 +126,7 @@ def test_describe_parameters_filter_names(): response['Parameters'][0]['Type'].should.equal('String') ''.should.equal(response.get('NextToken', '')) + @mock_ssm def test_describe_parameters_filter_type(): client = boto3.client('ssm', region_name='us-east-1') @@ -153,6 +154,7 @@ def test_describe_parameters_filter_type(): response['Parameters'][0]['Type'].should.equal('SecureString') '10'.should.equal(response.get('NextToken', '')) + @mock_ssm def test_describe_parameters_filter_keyid(): client = boto3.client('ssm', region_name='us-east-1') @@ -180,6 +182,7 @@ def test_describe_parameters_filter_keyid(): response['Parameters'][0]['Type'].should.equal('SecureString') ''.should.equal(response.get('NextToken', '')) + @mock_ssm def test_put_parameter_secure_default_kms(): client = boto3.client('ssm', region_name='us-east-1') From 7bf5211bef53c3b821bb60435ddab9db9f96da9e Mon Sep 17 00:00:00 2001 From: Declan Shanaghy Date: Mon, 26 Jun 2017 12:07:44 -0700 Subject: [PATCH 071/412] Simplify test 2 --- tests/test_ssm/test_ssm_boto3.py | 1 - 1 file changed, 1 deletion(-) diff --git a/tests/test_ssm/test_ssm_boto3.py b/tests/test_ssm/test_ssm_boto3.py index de0793e82..60a027933 100644 --- a/tests/test_ssm/test_ssm_boto3.py +++ b/tests/test_ssm/test_ssm_boto3.py @@ -150,7 +150,6 @@ def test_describe_parameters_filter_type(): }, ]) len(response['Parameters']).should.equal(10) - response['Parameters'][0]['Name'].should.equal('param-35') response['Parameters'][0]['Type'].should.equal('SecureString') '10'.should.equal(response.get('NextToken', '')) From 8921920ae6671cd251ffdb18ed452d6932bdb203 Mon Sep 17 00:00:00 2001 From: Waldemar Hummer Date: Tue, 27 Jun 2017 17:18:21 +1000 Subject: [PATCH 072/412] add flag to enable SSL for moto_server --- moto/server.py | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/moto/server.py b/moto/server.py index e5426bc7a..be41f1ed0 100644 --- a/moto/server.py +++ b/moto/server.py @@ -171,6 +171,12 @@ def main(argv=sys.argv[1:]): help='Reload server on a file change', default=False ) + parser.add_argument( + '-s', '--ssl', + action='store_true', + help='Enable SSL encrypted connection (use https://... URL)', + default=False + ) args = parser.parse_args(argv) @@ -180,7 +186,8 @@ def main(argv=sys.argv[1:]): main_app.debug = True run_simple(args.host, args.port, main_app, - threaded=True, use_reloader=args.reload) + threaded=True, use_reloader=args.reload, + ssl_context='adhoc' if args.ssl else None) if __name__ == '__main__': From c4b9088bfcee05f39ccec8989eafa15b0fc69ddf Mon Sep 17 00:00:00 2001 From: Steven Cipriano Date: Tue, 27 Jun 2017 11:31:43 -0700 Subject: [PATCH 073/412] Add support for recursive emr settings - Updates _RecursiveDictRef to not implement __getitem__, avoiding errors when using recursive settings for an emr job flow --- moto/core/responses.py | 3 +++ tests/test_emr/test_emr_boto3.py | 17 ++++++++++++++++- 2 files changed, 19 insertions(+), 1 deletion(-) diff --git a/moto/core/responses.py b/moto/core/responses.py index adad5d1de..82e9d4cad 100644 --- a/moto/core/responses.py +++ b/moto/core/responses.py @@ -414,6 +414,9 @@ class _RecursiveDictRef(object): def __getattr__(self, key): return self.dic.__getattr__(key) + def __getitem__(self, key): + return self.dic.__getitem__(key) + def set_reference(self, key, dic): """Set the RecursiveDictRef object to keep reference to dict object (dic) at the key. diff --git a/tests/test_emr/test_emr_boto3.py b/tests/test_emr/test_emr_boto3.py index 830abdb85..237ff8bba 100644 --- a/tests/test_emr/test_emr_boto3.py +++ b/tests/test_emr/test_emr_boto3.py @@ -64,7 +64,18 @@ def test_describe_cluster(): args['Configurations'] = [ {'Classification': 'yarn-site', 'Properties': {'someproperty': 'somevalue', - 'someotherproperty': 'someothervalue'}}] + 'someotherproperty': 'someothervalue'}}, + {'Classification': 'nested-configs', + 'Properties': {}, + 'Configurations': [ + { + 'Classification': 'nested-config', + 'Properties': { + 'nested-property': 'nested-value' + } + } + ]} + ] args['Instances']['AdditionalMasterSecurityGroups'] = ['additional-master'] args['Instances']['AdditionalSlaveSecurityGroups'] = ['additional-slave'] args['Instances']['Ec2KeyName'] = 'mykey' @@ -87,6 +98,10 @@ def test_describe_cluster(): config['Classification'].should.equal('yarn-site') config['Properties'].should.equal(args['Configurations'][0]['Properties']) + nested_config = cl['Configurations'][1] + nested_config['Classification'].should.equal('nested-configs') + nested_config['Properties'].should.equal(args['Configurations'][1]['Properties']) + attrs = cl['Ec2InstanceAttributes'] attrs['AdditionalMasterSecurityGroups'].should.equal( args['Instances']['AdditionalMasterSecurityGroups']) From 898031b40c9bd4a4396f5972e0916081c90f7575 Mon Sep 17 00:00:00 2001 From: Luis Jimenez Date: Thu, 29 Jun 2017 09:24:09 -0400 Subject: [PATCH 074/412] SQSResponse: include MD5OfMessageAttributes parameter only when there are message attributes --- moto/sqs/responses.py | 6 +++ tests/test_sqs/test_sqs.py | 76 +++++++++++++++++++++++++++++++++++--- 2 files changed, 76 insertions(+), 6 deletions(-) diff --git a/moto/sqs/responses.py b/moto/sqs/responses.py index 53bbac6ef..ba4a56b8f 100644 --- a/moto/sqs/responses.py +++ b/moto/sqs/responses.py @@ -339,7 +339,9 @@ SEND_MESSAGE_RESPONSE = """ {{- message.body_md5 -}} + {% if message.message_attributes.items()|count > 0 %} {{- message.attribute_md5 -}} + {% endif %} {{- message.id -}} @@ -373,7 +375,9 @@ RECEIVE_MESSAGE_RESPONSE = """ ApproximateFirstReceiveTimestamp {{ message.approximate_first_receive_timestamp }} + {% if message.message_attributes.items()|count > 0 %} {{- message.attribute_md5 -}} + {% endif %} {% for name, value in message.message_attributes.items() %} {{ name }} @@ -402,7 +406,9 @@ SEND_MESSAGE_BATCH_RESPONSE = """ {{ message.user_id }} {{ message.id }} {{ message.body_md5 }} + {% if message.message_attributes.items()|count > 0 %} {{- message.attribute_md5 -}} + {% endif %} {% endfor %} diff --git a/tests/test_sqs/test_sqs.py b/tests/test_sqs/test_sqs.py index b01a55406..db351f5ab 100644 --- a/tests/test_sqs/test_sqs.py +++ b/tests/test_sqs/test_sqs.py @@ -39,9 +39,25 @@ def test_get_inexistent_queue(): sqs.get_queue_by_name.when.called_with( QueueName='nonexisting-queue').should.throw(botocore.exceptions.ClientError) +@mock_sqs +def test_message_send_without_attributes(): + sqs = boto3.resource('sqs', region_name='us-east-1') + queue = sqs.create_queue(QueueName="blah") + msg = queue.send_message( + MessageBody="derp" + ) + msg.get('MD5OfMessageBody').should.equal( + '58fd9edd83341c29f1aebba81c31e257') + msg.shouldnt.have.key('MD5OfMessageAttributes') + msg.get('ResponseMetadata', {}).get('RequestId').should.equal( + '27daac76-34dd-47df-bd01-1f6e873584a0') + msg.get('MessageId').should_not.contain(' \n') + + messages = queue.receive_messages() + messages.should.have.length_of(1) @mock_sqs -def test_message_send(): +def test_message_send_with_attributes(): sqs = boto3.resource('sqs', region_name='us-east-1') queue = sqs.create_queue(QueueName="blah") msg = queue.send_message( @@ -189,7 +205,7 @@ def test_set_queue_attribute(): @mock_sqs -def test_send_message(): +def test_send_receive_message_without_attributes(): sqs = boto3.resource('sqs', region_name='us-east-1') conn = boto3.client("sqs", region_name='us-east-1') conn.create_queue(QueueName="test-queue") @@ -198,14 +214,62 @@ def test_send_message(): body_one = 'this is a test message' body_two = 'this is another test message' - response = queue.send_message(MessageBody=body_one) - response = queue.send_message(MessageBody=body_two) + queue.send_message(MessageBody=body_one) + queue.send_message(MessageBody=body_two) messages = conn.receive_message( QueueUrl=queue.url, MaxNumberOfMessages=2)['Messages'] - messages[0]['Body'].should.equal(body_one) - messages[1]['Body'].should.equal(body_two) + message1 = messages[0] + message2 = messages[1] + + message1['Body'].should.equal(body_one) + message2['Body'].should.equal(body_two) + + message1.shouldnt.have.key('MD5OfMessageAttributes') + message2.shouldnt.have.key('MD5OfMessageAttributes') + +@mock_sqs +def test_send_receive_message_with_attributes(): + sqs = boto3.resource('sqs', region_name='us-east-1') + conn = boto3.client("sqs", region_name='us-east-1') + conn.create_queue(QueueName="test-queue") + queue = sqs.Queue("test-queue") + + body_one = 'this is a test message' + body_two = 'this is another test message' + + queue.send_message( + MessageBody=body_one, + MessageAttributes={ + 'timestamp': { + 'StringValue': '1493147359900', + 'DataType': 'Number', + } + } + ) + + queue.send_message( + MessageBody=body_two, + MessageAttributes={ + 'timestamp': { + 'StringValue': '1493147359901', + 'DataType': 'Number', + } + } + ) + + messages = conn.receive_message( + QueueUrl=queue.url, MaxNumberOfMessages=2)['Messages'] + + message1 = messages[0] + message2 = messages[1] + + message1.get('Body').should.equal(body_one) + message2.get('Body').should.equal(body_two) + + message1.get('MD5OfMessageAttributes').should.equal('235c5c510d26fb653d073faed50ae77c') + message2.get('MD5OfMessageAttributes').should.equal('994258b45346a2cc3f9cbb611aa7af30') @mock_sqs From e4f42d58807c9ca73a9d8d8ebaa71bf364827dd3 Mon Sep 17 00:00:00 2001 From: Ferran Puig Date: Mon, 3 Jul 2017 16:17:01 +0200 Subject: [PATCH 075/412] Don't use exponential notation for SQS message timestamps --- moto/sqs/models.py | 4 ++-- tests/test_sqs/test_sqs.py | 19 +++++++++++++++++++ 2 files changed, 21 insertions(+), 2 deletions(-) diff --git a/moto/sqs/models.py b/moto/sqs/models.py index 43a633c42..f6657269c 100644 --- a/moto/sqs/models.py +++ b/moto/sqs/models.py @@ -96,7 +96,7 @@ class Message(BaseModel): return escape(self._body) def mark_sent(self, delay_seconds=None): - self.sent_timestamp = unix_time_millis() + self.sent_timestamp = int(unix_time_millis()) if delay_seconds: self.delay(delay_seconds=delay_seconds) @@ -111,7 +111,7 @@ class Message(BaseModel): visibility_timeout = 0 if not self.approximate_first_receive_timestamp: - self.approximate_first_receive_timestamp = unix_time_millis() + self.approximate_first_receive_timestamp = int(unix_time_millis()) self.approximate_receive_count += 1 diff --git a/tests/test_sqs/test_sqs.py b/tests/test_sqs/test_sqs.py index db351f5ab..3eb8e2213 100644 --- a/tests/test_sqs/test_sqs.py +++ b/tests/test_sqs/test_sqs.py @@ -272,6 +272,25 @@ def test_send_receive_message_with_attributes(): message2.get('MD5OfMessageAttributes').should.equal('994258b45346a2cc3f9cbb611aa7af30') +@mock_sqs +def test_send_receive_message_timestamps(): + sqs = boto3.resource('sqs', region_name='us-east-1') + conn = boto3.client("sqs", region_name='us-east-1') + conn.create_queue(QueueName="test-queue") + queue = sqs.Queue("test-queue") + + queue.send_message(MessageBody="derp") + messages = conn.receive_message( + QueueUrl=queue.url, MaxNumberOfMessages=1)['Messages'] + + message = messages[0] + sent_timestamp = message.get('Attributes').get('SentTimestamp') + approximate_first_receive_timestamp = message.get('Attributes').get('ApproximateFirstReceiveTimestamp') + + int.when.called_with(sent_timestamp).shouldnt.throw(ValueError) + int.when.called_with(approximate_first_receive_timestamp).shouldnt.throw(ValueError) + + @mock_sqs def test_receive_messages_with_wait_seconds_timeout_of_zero(): """ From c3d9f4e056013b8844f64789c9268b89a332848f Mon Sep 17 00:00:00 2001 From: Jack Danger Canty Date: Wed, 5 Jul 2017 16:02:45 -0700 Subject: [PATCH 076/412] Persisting selected LicenseModel in RDS instances --- moto/rds/models.py | 2 +- moto/rds2/models.py | 7 +++---- moto/rds2/responses.py | 1 + tests/test_rds2/test_rds2.py | 2 ++ 4 files changed, 7 insertions(+), 5 deletions(-) diff --git a/moto/rds/models.py b/moto/rds/models.py index a499b134d..77deff09d 100644 --- a/moto/rds/models.py +++ b/moto/rds/models.py @@ -182,7 +182,7 @@ class Database(BaseModel): {{ database.source_db_identifier }} {% endif %} {{ database.engine }} - general-public-license + {{ database.license_model }} {{ database.engine_version }} diff --git a/moto/rds2/models.py b/moto/rds2/models.py index 86f7bae9a..5abd2ed1b 100644 --- a/moto/rds2/models.py +++ b/moto/rds2/models.py @@ -89,8 +89,7 @@ class Database(BaseModel): self.preferred_backup_window = kwargs.get( 'preferred_backup_window', '13:14-13:44') - self.license_model = kwargs.get( - 'license_model', 'general-public-license') + self.license_model = kwargs.get('license_model', 'general-public-license') self.option_group_name = kwargs.get('option_group_name', None) self.default_option_groups = {"MySQL": "default.mysql5.6", "mysql": "default.mysql5.6", @@ -159,7 +158,7 @@ class Database(BaseModel): {{ database.source_db_identifier }} {% endif %} {{ database.engine }} - general-public-license + {{ database.license_model }} {{ database.engine_version }} @@ -427,7 +426,7 @@ class Snapshot(BaseModel): {{ snapshot.created_at }} {{ database.master_username }} {{ database.engine_version }} - general-public-license + {{ database.license_model }} manual {% if database.iops %} {{ database.iops }} diff --git a/moto/rds2/responses.py b/moto/rds2/responses.py index b26f2e347..ef02bfbf1 100644 --- a/moto/rds2/responses.py +++ b/moto/rds2/responses.py @@ -26,6 +26,7 @@ class RDS2Response(BaseResponse): "db_subnet_group_name": self._get_param("DBSubnetGroupName"), "engine": self._get_param("Engine"), "engine_version": self._get_param("EngineVersion"), + "license_model": self._get_param("LicenseModel"), "iops": self._get_int_param("Iops"), "kms_key_id": self._get_param("KmsKeyId"), "master_user_password": self._get_param('MasterUserPassword'), diff --git a/tests/test_rds2/test_rds2.py b/tests/test_rds2/test_rds2.py index f869dc1ce..a50f99868 100644 --- a/tests/test_rds2/test_rds2.py +++ b/tests/test_rds2/test_rds2.py @@ -14,6 +14,7 @@ def test_create_database(): Engine='postgres', DBName='staging-postgres', DBInstanceClass='db.m1.small', + LicenseModel='license-included', MasterUsername='root', MasterUserPassword='hunter2', Port=1234, @@ -23,6 +24,7 @@ def test_create_database(): database['DBInstance']['DBInstanceIdentifier'].should.equal("db-master-1") database['DBInstance']['AllocatedStorage'].should.equal(10) database['DBInstance']['DBInstanceClass'].should.equal("db.m1.small") + database['DBInstance']['LicenseModel'].should.equal("license-included") database['DBInstance']['MasterUsername'].should.equal("root") database['DBInstance']['DBSecurityGroups'][0][ 'DBSecurityGroupName'].should.equal('my_sg') From dbbbc01f886fd1c9d264687c25f36e73feab3966 Mon Sep 17 00:00:00 2001 From: Jack Danger Canty Date: Thu, 6 Jul 2017 21:29:18 -0700 Subject: [PATCH 077/412] Test boto3 elb listener deletion --- tests/test_elb/test_elb.py | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/tests/test_elb/test_elb.py b/tests/test_elb/test_elb.py index 36f96c0e2..78c6e0ad0 100644 --- a/tests/test_elb/test_elb.py +++ b/tests/test_elb/test_elb.py @@ -214,6 +214,13 @@ def test_create_and_delete_listener_boto3_support(): balancer['ListenerDescriptions'][1]['Listener'][ 'InstancePort'].should.equal(8443) + client.delete_load_balancer_listeners( + LoadBalancerName='my-lb', + LoadBalancerPorts=[443]) + + balancer = client.describe_load_balancers()['LoadBalancerDescriptions'][0] + list(balancer['ListenerDescriptions']).should.have.length_of(1) + @mock_elb_deprecated def test_set_sslcertificate(): From 98342bfcc3c66d63c4e691c492e19840149cfdbb Mon Sep 17 00:00:00 2001 From: Jack Danger Date: Thu, 6 Jul 2017 21:52:01 -0700 Subject: [PATCH 078/412] Raise error on duplicate elbv1 listener AWS returns an error condition when a listener is defined that interferes with an existing listener on the same load balancer port. --- moto/elb/exceptions.py | 9 +++++++++ moto/elb/models.py | 7 +++++++ tests/test_elb/test_elb.py | 8 ++++++++ 3 files changed, 24 insertions(+) diff --git a/moto/elb/exceptions.py b/moto/elb/exceptions.py index 071181a6c..6c316ef47 100644 --- a/moto/elb/exceptions.py +++ b/moto/elb/exceptions.py @@ -40,6 +40,15 @@ class BadHealthCheckDefinition(ELBClientError): "HealthCheck Target must begin with one of HTTP, TCP, HTTPS, SSL") +class DuplicateListenerError(ELBClientError): + + def __init__(self, name, port): + super(DuplicateListenerError, self).__init__( + "DuplicateListener", + "A listener already exists for {0} with LoadBalancerPort {1}, but with a different InstancePort, Protocol, or SSLCertificateId" + .format(name, port)) + + class DuplicateLoadBalancerName(ELBClientError): def __init__(self, name): diff --git a/moto/elb/models.py b/moto/elb/models.py index 5b6a58bb9..d09548340 100644 --- a/moto/elb/models.py +++ b/moto/elb/models.py @@ -18,6 +18,7 @@ from moto.ec2.models import ec2_backends from .exceptions import ( BadHealthCheckDefinition, DuplicateLoadBalancerName, + DuplicateListenerError, EmptyListenersError, LoadBalancerNotFoundError, TooManyTagsError, @@ -257,6 +258,12 @@ class ELBBackend(BaseBackend): ssl_certificate_id = port.get('sslcertificate_id') for listener in balancer.listeners: if lb_port == listener.load_balancer_port: + if protocol != listener.protocol: + raise DuplicateListenerError(name, lb_port) + if instance_port != listener.instance_port: + raise DuplicateListenerError(name, lb_port) + if ssl_certificate_id != listener.ssl_certificate_id: + raise DuplicateListenerError(name, lb_port) break else: balancer.listeners.append(FakeListener( diff --git a/tests/test_elb/test_elb.py b/tests/test_elb/test_elb.py index 78c6e0ad0..f9019eed2 100644 --- a/tests/test_elb/test_elb.py +++ b/tests/test_elb/test_elb.py @@ -214,6 +214,14 @@ def test_create_and_delete_listener_boto3_support(): balancer['ListenerDescriptions'][1]['Listener'][ 'InstancePort'].should.equal(8443) + # Creating this listener with an conflicting definition throws error + with assert_raises(ClientError): + client.create_load_balancer_listeners( + LoadBalancerName='my-lb', + Listeners=[ + {'Protocol': 'tcp', 'LoadBalancerPort': 443, 'InstancePort': 1234}] + ) + client.delete_load_balancer_listeners( LoadBalancerName='my-lb', LoadBalancerPorts=[443]) From 2a65f40a194854aef6337357cc29ba5d55b4c2ef Mon Sep 17 00:00:00 2001 From: fdfk Date: Tue, 11 Jul 2017 08:02:31 +0000 Subject: [PATCH 079/412] Adding list_verified_email_addresses and testing --- moto/ses/models.py | 7 +++++++ moto/ses/responses.py | 31 +++++++++++++++++++++++++++++++ tests/test_ses/test_ses_boto3.py | 7 +++++++ 3 files changed, 45 insertions(+) diff --git a/moto/ses/models.py b/moto/ses/models.py index 2f51d1473..179f4d8e0 100644 --- a/moto/ses/models.py +++ b/moto/ses/models.py @@ -36,6 +36,7 @@ class SESBackend(BaseBackend): def __init__(self): self.addresses = [] + self.email_addresses = [] self.domains = [] self.sent_messages = [] self.sent_message_count = 0 @@ -49,12 +50,18 @@ class SESBackend(BaseBackend): def verify_email_identity(self, address): self.addresses.append(address) + def verify_email_address(self, address): + self.email_addresses.append(address) + def verify_domain(self, domain): self.domains.append(domain) def list_identities(self): return self.domains + self.addresses + def list_verified_email_addresses(self): + return self.email_addresses + def delete_identity(self, identity): if '@' in identity: self.addresses.remove(identity) diff --git a/moto/ses/responses.py b/moto/ses/responses.py index d7bfe0787..6cd018aa6 100644 --- a/moto/ses/responses.py +++ b/moto/ses/responses.py @@ -15,11 +15,22 @@ class EmailResponse(BaseResponse): template = self.response_template(VERIFY_EMAIL_IDENTITY) return template.render() + def verify_email_address(self): + address = self.querystring.get('EmailAddress')[0] + ses_backend.verify_email_address(address) + template = self.response_template(VERIFY_EMAIL_ADDRESS) + return template.render() + def list_identities(self): identities = ses_backend.list_identities() template = self.response_template(LIST_IDENTITIES_RESPONSE) return template.render(identities=identities) + def list_verified_email_addresses(self): + email_addresses = ses_backend.list_verified_email_addresses() + template = self.response_template(LIST_VERIFIED_EMAIL_RESPONSE) + return template.render(email_addresses=email_addresses) + def verify_domain_dkim(self): domain = self.querystring.get('Domain')[0] ses_backend.verify_domain(domain) @@ -95,6 +106,13 @@ VERIFY_EMAIL_IDENTITY = """ + + + 47e0ef1a-9bf2-11e1-9279-0100e8cf109a + +""" + LIST_IDENTITIES_RESPONSE = """ @@ -108,6 +126,19 @@ LIST_IDENTITIES_RESPONSE = """ + + + {% for email in email_addresses %} + {{ email }} + {% endfor %} + + + + cacecf23-9bf1-11e1-9279-0100e8cf109a + +""" + VERIFY_DOMAIN_DKIM_RESPONSE = """ diff --git a/tests/test_ses/test_ses_boto3.py b/tests/test_ses/test_ses_boto3.py index 224ebb626..5d39f61d4 100644 --- a/tests/test_ses/test_ses_boto3.py +++ b/tests/test_ses/test_ses_boto3.py @@ -19,6 +19,13 @@ def test_verify_email_identity(): address = identities['Identities'][0] address.should.equal('test@example.com') +@mock_ses +def test_verify_email_address(): + conn = boto3.client('ses', region_name='us-east-1') + conn.verify_email_address(EmailAddress="test@example.com") + email_addresses = conn.list_verified_email_addresses() + email = email_addresses['VerifiedEmailAddresses'][0] + email.should.equal('test@example.com') @mock_ses def test_domain_verify(): From 5e5333c24351cb00f21cabab5f5666b284d196d8 Mon Sep 17 00:00:00 2001 From: gilgamezh Date: Fri, 14 Jul 2017 19:29:20 -0300 Subject: [PATCH 080/412] Avoid to override SocketType when disabling the mock and bad_socket_shadow is True --- moto/packages/httpretty/core.py | 17 ++++++++++------- 1 file changed, 10 insertions(+), 7 deletions(-) diff --git a/moto/packages/httpretty/core.py b/moto/packages/httpretty/core.py index 0974f38dd..5a8d01798 100644 --- a/moto/packages/httpretty/core.py +++ b/moto/packages/httpretty/core.py @@ -72,6 +72,10 @@ from datetime import datetime from datetime import timedelta from errno import EAGAIN +# Some versions of python internally shadowed the +# SocketType variable incorrectly https://bugs.python.org/issue20386 +BAD_SOCKET_SHADOW = socket.socket != socket.SocketType + old_socket = socket.socket old_create_connection = socket.create_connection old_gethostbyname = socket.gethostbyname @@ -976,7 +980,8 @@ class httpretty(HttpBaseClass): def disable(cls): cls._is_enabled = False socket.socket = old_socket - socket.SocketType = old_socket + if not BAD_SOCKET_SHADOW: + socket.SocketType = old_socket socket._socketobject = old_socket socket.create_connection = old_create_connection @@ -986,7 +991,8 @@ class httpretty(HttpBaseClass): socket.__dict__['socket'] = old_socket socket.__dict__['_socketobject'] = old_socket - socket.__dict__['SocketType'] = old_socket + if not BAD_SOCKET_SHADOW: + socket.__dict__['SocketType'] = old_socket socket.__dict__['create_connection'] = old_create_connection socket.__dict__['gethostname'] = old_gethostname @@ -1014,13 +1020,10 @@ class httpretty(HttpBaseClass): @classmethod def enable(cls): cls._is_enabled = True - # Some versions of python internally shadowed the - # SocketType variable incorrectly https://bugs.python.org/issue20386 - bad_socket_shadow = (socket.socket != socket.SocketType) socket.socket = fakesock.socket socket._socketobject = fakesock.socket - if not bad_socket_shadow: + if not BAD_SOCKET_SHADOW: socket.SocketType = fakesock.socket socket.create_connection = create_fake_connection @@ -1030,7 +1033,7 @@ class httpretty(HttpBaseClass): socket.__dict__['socket'] = fakesock.socket socket.__dict__['_socketobject'] = fakesock.socket - if not bad_socket_shadow: + if not BAD_SOCKET_SHADOW: socket.__dict__['SocketType'] = fakesock.socket socket.__dict__['create_connection'] = create_fake_connection From abf3078c28f59813315ad753b7332c6369b4ccbd Mon Sep 17 00:00:00 2001 From: eric-weaver Date: Sat, 15 Jul 2017 22:36:12 -0400 Subject: [PATCH 081/412] implement s3 object tagging --- moto/s3/models.py | 36 ++++++++++++++- moto/s3/responses.py | 49 ++++++++++++++++++++- tests/test_s3/__init__.py | 0 tests/test_s3/test_s3.py | 92 +++++++++++++++++++++++++++++++++++++++ 4 files changed, 175 insertions(+), 2 deletions(-) create mode 100644 tests/test_s3/__init__.py diff --git a/moto/s3/models.py b/moto/s3/models.py index b824c4dbf..c1a4fb04d 100644 --- a/moto/s3/models.py +++ b/moto/s3/models.py @@ -11,7 +11,7 @@ import six from bisect import insort from moto.core import BaseBackend, BaseModel from moto.core.utils import iso_8601_datetime_with_milliseconds, rfc_1123_datetime -from .exceptions import BucketAlreadyExists, MissingBucket, InvalidPart, EntityTooSmall +from .exceptions import BucketAlreadyExists, MissingBucket, InvalidPart, EntityTooSmall, MissingKey from .utils import clean_key_name, _VersionedKeyStore UPLOAD_ID_BYTES = 43 @@ -43,6 +43,7 @@ class FakeKey(BaseModel): self._etag = etag self._version_id = version_id self._is_versioned = is_versioned + self._tagging = FakeTagging() @property def version_id(self): @@ -59,6 +60,9 @@ class FakeKey(BaseModel): self._metadata = {} self._metadata.update(metadata) + def set_tagging(self, tagging): + self._tagging = tagging + def set_storage_class(self, storage_class): self._storage_class = storage_class @@ -103,6 +107,10 @@ class FakeKey(BaseModel): def metadata(self): return self._metadata + @property + def tagging(self): + return self._tagging + @property def response_dict(self): res = { @@ -253,6 +261,25 @@ def get_canned_acl(acl): return FakeAcl(grants=grants) +class FakeTagging(BaseModel): + + def __init__(self, tag_set=None): + self.tag_set = tag_set or FakeTagSet() + + +class FakeTagSet(BaseModel): + + def __init__(self, tags=None): + self.tags = tags or [] + + +class FakeTag(BaseModel): + + def __init__(self, key, value=None): + self.key = key + self.value = value + + class LifecycleRule(BaseModel): def __init__(self, id=None, prefix=None, status=None, expiration_days=None, @@ -475,6 +502,13 @@ class S3Backend(BaseBackend): else: return None + def set_key_tagging(self, bucket_name, key_name, tagging): + key = self.get_key(bucket_name, key_name) + if key is None: + raise MissingKey(key_name) + key.set_tagging(tagging) + return key + def initiate_multipart(self, bucket_name, key_name, metadata): bucket = self.get_bucket(bucket_name) new_multipart = FakeMultipart(key_name, metadata) diff --git a/moto/s3/responses.py b/moto/s3/responses.py index 3b349d864..a1d5757c8 100644 --- a/moto/s3/responses.py +++ b/moto/s3/responses.py @@ -14,7 +14,7 @@ from moto.s3bucket_path.utils import bucket_name_from_url as bucketpath_bucket_n from .exceptions import BucketAlreadyExists, S3ClientError, MissingKey, InvalidPartOrder -from .models import s3_backend, get_canned_acl, FakeGrantee, FakeGrant, FakeAcl, FakeKey +from .models import s3_backend, get_canned_acl, FakeGrantee, FakeGrant, FakeAcl, FakeKey, FakeTagging, FakeTagSet, FakeTag from .utils import bucket_name_from_url, metadata_from_headers from xml.dom import minidom @@ -520,6 +520,9 @@ class ResponseObject(_TemplateEnvironmentMixin): if 'acl' in query: template = self.response_template(S3_OBJECT_ACL_RESPONSE) return 200, response_headers, template.render(obj=key) + if 'tagging' in query: + template = self.response_template(S3_OBJECT_TAGGING_RESPONSE) + return 200, response_headers, template.render(obj=key) response_headers.update(key.metadata) response_headers.update(key.response_dict) @@ -556,6 +559,7 @@ class ResponseObject(_TemplateEnvironmentMixin): storage_class = request.headers.get('x-amz-storage-class', 'STANDARD') acl = self._acl_from_headers(request.headers) + tagging = self._tagging_from_headers(request.headers) if 'acl' in query: key = self.backend.get_key(bucket_name, key_name) @@ -563,6 +567,11 @@ class ResponseObject(_TemplateEnvironmentMixin): key.set_acl(acl) return 200, response_headers, "" + if 'tagging' in query: + tagging = self._tagging_from_xml(body) + self.backend.set_key_tagging(bucket_name, key_name, tagging) + return 200, response_headers, "" + if 'x-amz-copy-source' in request.headers: # Copy key src_key_parsed = urlparse(request.headers.get("x-amz-copy-source")) @@ -596,6 +605,7 @@ class ResponseObject(_TemplateEnvironmentMixin): new_key.set_metadata(metadata) new_key.set_acl(acl) new_key.website_redirect_location = request.headers.get('x-amz-website-redirect-location') + new_key.set_tagging(tagging) template = self.response_template(S3_OBJECT_RESPONSE) response_headers.update(new_key.response_dict) @@ -655,6 +665,30 @@ class ResponseObject(_TemplateEnvironmentMixin): else: return None + def _tagging_from_headers(self, headers): + if headers.get('x-amz-tagging'): + parsed_header = parse_qs(headers['x-amz-tagging'], keep_blank_values=True) + tags = [] + for tag in parsed_header.items(): + tags.append(FakeTag(tag[0], tag[1][0])) + + tag_set = FakeTagSet(tags) + tagging = FakeTagging(tag_set) + return tagging + else: + return FakeTagging() + + def _tagging_from_xml(self, xml): + parsed_xml = xmltodict.parse(xml) + + tags = [] + for tag in parsed_xml['Tagging']['TagSet']['Tag']: + tags.append(FakeTag(tag['Key'], tag['Value'])) + + tag_set = FakeTagSet(tags) + tagging = FakeTagging(tag_set) + return tagging + def _key_response_delete(self, bucket_name, query, key_name, headers): if query.get('uploadId'): upload_id = query['uploadId'][0] @@ -968,6 +1002,19 @@ S3_OBJECT_ACL_RESPONSE = """ """ +S3_OBJECT_TAGGING_RESPONSE = """\ + + + + {% for tag in obj.tagging.tag_set.tags %} + + {{ tag.key }} + {{ tag.value }} + + {% endfor %} + +""" + S3_OBJECT_COPY_RESPONSE = """\ {{ key.etag }} diff --git a/tests/test_s3/__init__.py b/tests/test_s3/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/tests/test_s3/test_s3.py b/tests/test_s3/test_s3.py index 1cb00d4be..6e6b999ce 100644 --- a/tests/test_s3/test_s3.py +++ b/tests/test_s3/test_s3.py @@ -1340,6 +1340,98 @@ def test_boto3_multipart_etag(): resp['ETag'].should.equal(EXPECTED_ETAG) +@mock_s3 +def test_boto3_put_object_with_tagging(): + s3 = boto3.client('s3', region_name='us-east-1') + bucket_name = 'mybucket' + key = 'key-with-tags' + s3.create_bucket(Bucket=bucket_name) + + s3.put_object( + Bucket=bucket_name, + Key=key, + Body='test', + Tagging='foo=bar', + ) + + resp = s3.get_object_tagging(Bucket=bucket_name, Key=key) + + resp['TagSet'].should.contain({'Key': 'foo', 'Value': 'bar'}) + + +@mock_s3 +def test_boto3_put_object_tagging(): + s3 = boto3.client('s3', region_name='us-east-1') + bucket_name = 'mybucket' + key = 'key-with-tags' + s3.create_bucket(Bucket=bucket_name) + + with assert_raises(ClientError) as err: + s3.put_object_tagging( + Bucket=bucket_name, + Key=key, + Tagging={'TagSet': [ + {'Key': 'item1', 'Value': 'foo'}, + {'Key': 'item2', 'Value': 'bar'}, + ]} + ) + + e = err.exception + e.response['Error'].should.equal({ + 'Code': 'NoSuchKey', + 'Message': 'The specified key does not exist.', + 'RequestID': '7a62c49f-347e-4fc4-9331-6e8eEXAMPLE', + }) + + s3.put_object( + Bucket=bucket_name, + Key=key, + Body='test' + ) + + resp = s3.put_object_tagging( + Bucket=bucket_name, + Key=key, + Tagging={'TagSet': [ + {'Key': 'item1', 'Value': 'foo'}, + {'Key': 'item2', 'Value': 'bar'}, + ]} + ) + + resp['ResponseMetadata']['HTTPStatusCode'].should.equal(200) + + +@mock_s3 +def test_boto3_get_object_tagging(): + s3 = boto3.client('s3', region_name='us-east-1') + bucket_name = 'mybucket' + key = 'key-with-tags' + s3.create_bucket(Bucket=bucket_name) + + s3.put_object( + Bucket=bucket_name, + Key=key, + Body='test' + ) + + resp = s3.get_object_tagging(Bucket=bucket_name, Key=key) + resp['TagSet'].should.have.length_of(0) + + resp = s3.put_object_tagging( + Bucket=bucket_name, + Key=key, + Tagging={'TagSet': [ + {'Key': 'item1', 'Value': 'foo'}, + {'Key': 'item2', 'Value': 'bar'}, + ]} + ) + resp = s3.get_object_tagging(Bucket=bucket_name, Key=key) + + resp['TagSet'].should.have.length_of(2) + resp['TagSet'].should.contain({'Key': 'item1', 'Value': 'foo'}) + resp['TagSet'].should.contain({'Key': 'item2', 'Value': 'bar'}) + + @mock_s3 def test_boto3_list_object_versions(): s3 = boto3.client('s3', region_name='us-east-1') From 63b09eae133df001af4391dcdb2c18cfab744a0f Mon Sep 17 00:00:00 2001 From: Christian Hellman Date: Mon, 17 Jul 2017 23:33:40 +0000 Subject: [PATCH 082/412] Added DescribeAccountAttributes --- moto/ec2/responses/__init__.py | 2 + moto/ec2/responses/account_attributes.py | 69 +++++++++++++++++++++++ tests/test_ec2/test_account_attributes.py | 44 +++++++++++++++ 3 files changed, 115 insertions(+) create mode 100644 moto/ec2/responses/account_attributes.py create mode 100644 tests/test_ec2/test_account_attributes.py diff --git a/moto/ec2/responses/__init__.py b/moto/ec2/responses/__init__.py index 449d25a45..1222a7ef8 100644 --- a/moto/ec2/responses/__init__.py +++ b/moto/ec2/responses/__init__.py @@ -1,5 +1,6 @@ from __future__ import unicode_literals +from .account_attributes import AccountAttributes from .amazon_dev_pay import AmazonDevPay from .amis import AmisResponse from .availability_zones_and_regions import AvailabilityZonesAndRegions @@ -34,6 +35,7 @@ from .nat_gateways import NatGateways class EC2Response( + AccountAttributes, AmazonDevPay, AmisResponse, AvailabilityZonesAndRegions, diff --git a/moto/ec2/responses/account_attributes.py b/moto/ec2/responses/account_attributes.py new file mode 100644 index 000000000..8a5b9a4b0 --- /dev/null +++ b/moto/ec2/responses/account_attributes.py @@ -0,0 +1,69 @@ +from __future__ import unicode_literals +from moto.core.responses import BaseResponse + + +class AccountAttributes(BaseResponse): + + def describe_account_attributes(self): + template = self.response_template(DESCRIBE_ACCOUNT_ATTRIBUTES_RESULT) + return template.render() + + +DESCRIBE_ACCOUNT_ATTRIBUTES_RESULT = u""" + + 7a62c49f-347e-4fc4-9331-6e8eEXAMPLE + + + vpc-max-security-groups-per-interface + + + 5 + + + + + max-instances + + + 20 + + + + + supported-platforms + + + EC2 + + + VPC + + + + + default-vpc + + + none + + + + + max-elastic-ips + + + 5 + + + + + vpc-max-elastic-ips + + + 5 + + + + + +""" diff --git a/tests/test_ec2/test_account_attributes.py b/tests/test_ec2/test_account_attributes.py new file mode 100644 index 000000000..30309bec8 --- /dev/null +++ b/tests/test_ec2/test_account_attributes.py @@ -0,0 +1,44 @@ +from __future__ import unicode_literals +import boto3 +from moto import mock_ec2 +import sure # noqa + + +@mock_ec2 +def test_describe_account_attributes(): + conn = boto3.client('ec2', region_name='us-east-1') + response = conn.describe_account_attributes() + expected_attribute_values = [{ + 'AttributeValues': [{ + 'AttributeValue': '5' + }], + 'AttributeName': 'vpc-max-security-groups-per-interface' + }, { + 'AttributeValues': [{ + 'AttributeValue': '20' + }], + 'AttributeName': 'max-instances' + }, { + 'AttributeValues': [{ + 'AttributeValue': 'EC2' + }, { + 'AttributeValue': 'VPC' + }], + 'AttributeName': 'supported-platforms' + }, { + 'AttributeValues': [{ + 'AttributeValue': 'none' + }], + 'AttributeName': 'default-vpc' + }, { + 'AttributeValues': [{ + 'AttributeValue': '5' + }], + 'AttributeName': 'max-elastic-ips' + }, { + 'AttributeValues': [{ + 'AttributeValue': '5' + }], + 'AttributeName': 'vpc-max-elastic-ips' + }] + response['AccountAttributes'].should.equal(expected_attribute_values) From 73ede75c392e43de47ba804ea165256c5ab39bd3 Mon Sep 17 00:00:00 2001 From: Jack Danger Date: Wed, 19 Jul 2017 12:20:01 -0700 Subject: [PATCH 083/412] Adding test for ELBv1 security groups --- tests/test_elb/test_elb.py | 22 ++++++++++++++++++++++ 1 file changed, 22 insertions(+) diff --git a/tests/test_elb/test_elb.py b/tests/test_elb/test_elb.py index f9019eed2..681ffb830 100644 --- a/tests/test_elb/test_elb.py +++ b/tests/test_elb/test_elb.py @@ -143,6 +143,28 @@ def test_describe_paginated_balancers(): assert 'NextToken' not in resp2.keys() +@mock_elb +@mock_ec2 +def test_add_and_remove_security_groups(): + client = boto3.client('elb', region_name='us-east-1') + ec2 = boto3.resource('ec2', region_name='us-west-1') + + vpc = ec2.create_vpc(CidrBlock='10.0.0.0/16') + security_group = ec2.create_security_group( + GroupName='sg01', Description='Test security group sg01', VpcId=vpc.id) + + client.create_load_balancer( + LoadBalancerName='my-lb', + Listeners=[ + {'Protocol': 'tcp', 'LoadBalancerPort': 80, 'InstancePort': 8080}], + AvailabilityZones=['us-east-1a', 'us-east-1b'] + ) + + response = client.apply_security_groups_to_load_balancer( + LoadBalancerName='my-lb', + SecurityGroups=[security_group.id]) + assert response['SecurityGroups'] == [security_group.id] + @mock_elb_deprecated def test_add_listener(): From 7d0a575ab10ee8b7975076238d2aafc2472c2a3a Mon Sep 17 00:00:00 2001 From: Jack Danger Date: Wed, 19 Jul 2017 12:20:58 -0700 Subject: [PATCH 084/412] Removing unused import --- tests/test_elb/test_elb.py | 1 - 1 file changed, 1 deletion(-) diff --git a/tests/test_elb/test_elb.py b/tests/test_elb/test_elb.py index 681ffb830..1feed433e 100644 --- a/tests/test_elb/test_elb.py +++ b/tests/test_elb/test_elb.py @@ -11,7 +11,6 @@ from boto.ec2.elb.attributes import ( ) from boto.ec2.elb.policies import ( Policies, - AppCookieStickinessPolicy, LBCookieStickinessPolicy, OtherPolicy, ) From b512316c828f8c484f7fd781dde031975ca61205 Mon Sep 17 00:00:00 2001 From: Jack Danger Date: Wed, 19 Jul 2017 12:36:04 -0700 Subject: [PATCH 085/412] removing further unused imports --- tests/test_elb/test_elb.py | 5 ----- 1 file changed, 5 deletions(-) diff --git a/tests/test_elb/test_elb.py b/tests/test_elb/test_elb.py index 1feed433e..35dddc39e 100644 --- a/tests/test_elb/test_elb.py +++ b/tests/test_elb/test_elb.py @@ -9,11 +9,6 @@ from boto.ec2.elb.attributes import ( ConnectionDrainingAttribute, AccessLogAttribute, ) -from boto.ec2.elb.policies import ( - Policies, - LBCookieStickinessPolicy, - OtherPolicy, -) from botocore.exceptions import ClientError from boto.exception import BotoServerError from nose.tools import assert_raises From 6ed8d12317f2f223767320930d8f886472796d05 Mon Sep 17 00:00:00 2001 From: Jack Danger Date: Wed, 19 Jul 2017 15:58:49 -0700 Subject: [PATCH 086/412] Enforcing ELB security groups must be real --- moto/ec2/models.py | 2 +- moto/elb/exceptions.py | 8 ++++++++ moto/elb/models.py | 27 ++++++++++++++++++++++++--- moto/elb/responses.py | 34 ++++++++++++++++++++++++++++------ tests/test_elb/test_elb.py | 21 ++++++++++++++++++--- 5 files changed, 79 insertions(+), 13 deletions(-) diff --git a/moto/ec2/models.py b/moto/ec2/models.py index 7e3df9880..6c35093e2 100755 --- a/moto/ec2/models.py +++ b/moto/ec2/models.py @@ -3553,8 +3553,8 @@ class EC2Backend(BaseBackend, InstanceBackend, TagBackend, AmiBackend, DHCPOptionsSetBackend, NetworkAclBackend, VpnGatewayBackend, CustomerGatewayBackend, NatGatewayBackend): def __init__(self, region_name): - super(EC2Backend, self).__init__() self.region_name = region_name + super(EC2Backend, self).__init__() # Default VPC exists by default, which is the current behavior # of EC2-VPC. See for detail: diff --git a/moto/elb/exceptions.py b/moto/elb/exceptions.py index 6c316ef47..3ea6a1642 100644 --- a/moto/elb/exceptions.py +++ b/moto/elb/exceptions.py @@ -64,3 +64,11 @@ class EmptyListenersError(ELBClientError): super(EmptyListenersError, self).__init__( "ValidationError", "Listeners cannot be empty") + + +class InvalidSecurityGroupError(ELBClientError): + + def __init__(self): + super(InvalidSecurityGroupError, self).__init__( + "ValidationError", + "One or more of the specified security groups do not exist.") diff --git a/moto/elb/models.py b/moto/elb/models.py index d09548340..504c68908 100644 --- a/moto/elb/models.py +++ b/moto/elb/models.py @@ -20,6 +20,7 @@ from .exceptions import ( DuplicateLoadBalancerName, DuplicateListenerError, EmptyListenersError, + InvalidSecurityGroupError, LoadBalancerNotFoundError, TooManyTagsError, ) @@ -63,7 +64,7 @@ class FakeBackend(BaseModel): class FakeLoadBalancer(BaseModel): - def __init__(self, name, zones, ports, scheme='internet-facing', vpc_id=None, subnets=None): + def __init__(self, name, zones, ports, scheme='internet-facing', vpc_id=None, subnets=None, security_groups=None): self.name = name self.health_check = None self.instance_ids = [] @@ -77,6 +78,7 @@ class FakeLoadBalancer(BaseModel): self.policies.other_policies = [] self.policies.app_cookie_stickiness_policies = [] self.policies.lb_cookie_stickiness_policies = [] + self.security_groups = security_groups or [] self.subnets = subnets or [] self.vpc_id = vpc_id or 'vpc-56e10e3d' self.tags = {} @@ -233,7 +235,7 @@ class ELBBackend(BaseBackend): self.__dict__ = {} self.__init__(region_name) - def create_load_balancer(self, name, zones, ports, scheme='internet-facing', subnets=None): + def create_load_balancer(self, name, zones, ports, scheme='internet-facing', subnets=None, security_groups=None): vpc_id = None ec2_backend = ec2_backends[self.region_name] if subnets: @@ -243,8 +245,19 @@ class ELBBackend(BaseBackend): raise DuplicateLoadBalancerName(name) if not ports: raise EmptyListenersError() + if not security_groups: + security_groups = [] + for security_group in security_groups: + if ec2_backend.get_security_group_from_id(security_group) is None: + raise InvalidSecurityGroupError() new_load_balancer = FakeLoadBalancer( - name=name, zones=zones, ports=ports, scheme=scheme, subnets=subnets, vpc_id=vpc_id) + name=name, + zones=zones, + ports=ports, + scheme=scheme, + subnets=subnets, + security_groups=security_groups, + vpc_id=vpc_id) self.load_balancers[name] = new_load_balancer return new_load_balancer @@ -302,6 +315,14 @@ class ELBBackend(BaseBackend): def get_load_balancer(self, load_balancer_name): return self.load_balancers.get(load_balancer_name) + def apply_security_groups_to_load_balancer(self, load_balancer_name, security_group_ids): + load_balancer = self.load_balancers.get(load_balancer_name) + ec2_backend = ec2_backends[self.region_name] + for security_group_id in security_group_ids: + if ec2_backend.get_security_group_from_id(security_group_id) is None: + raise InvalidSecurityGroupError() + load_balancer.security_groups = security_group_ids + def configure_health_check(self, load_balancer_name, timeout, healthy_threshold, unhealthy_threshold, interval, target): diff --git a/moto/elb/responses.py b/moto/elb/responses.py index ec20486f0..659c454b1 100644 --- a/moto/elb/responses.py +++ b/moto/elb/responses.py @@ -27,6 +27,7 @@ class ELBResponse(BaseResponse): ports = self._get_list_prefix("Listeners.member") scheme = self._get_param('Scheme') subnets = self._get_multi_param("Subnets.member") + security_groups = self._get_multi_param("SecurityGroups.member") load_balancer = self.elb_backend.create_load_balancer( name=load_balancer_name, @@ -34,6 +35,7 @@ class ELBResponse(BaseResponse): ports=ports, scheme=scheme, subnets=subnets, + security_groups=security_groups, ) self._add_tags(load_balancer) template = self.response_template(CREATE_LOAD_BALANCER_TEMPLATE) @@ -84,6 +86,13 @@ class ELBResponse(BaseResponse): template = self.response_template(DELETE_LOAD_BALANCER_TEMPLATE) return template.render() + def apply_security_groups_to_load_balancer(self): + load_balancer_name = self._get_param('LoadBalancerName') + security_group_ids = self._get_multi_param("SecurityGroups.member") + self.elb_backend.apply_security_groups_to_load_balancer(load_balancer_name, security_group_ids) + template = self.response_template(APPLY_SECURITY_GROUPS_TEMPLATE) + return template.render(security_group_ids=security_group_ids) + def configure_health_check(self): check = self.elb_backend.configure_health_check( load_balancer_name=self._get_param('LoadBalancerName'), @@ -99,8 +108,7 @@ class ELBResponse(BaseResponse): def register_instances_with_load_balancer(self): load_balancer_name = self._get_param('LoadBalancerName') - instance_ids = [value[0] for key, value in self.querystring.items( - ) if "Instances.member" in key] + instance_ids = [param.values()[0] for param in self._get_list_prefix('Instances.member')] template = self.response_template(REGISTER_INSTANCES_TEMPLATE) load_balancer = self.elb_backend.register_instances( load_balancer_name, instance_ids) @@ -119,8 +127,7 @@ class ELBResponse(BaseResponse): def deregister_instances_from_load_balancer(self): load_balancer_name = self._get_param('LoadBalancerName') - instance_ids = [value[0] for key, value in self.querystring.items( - ) if "Instances.member" in key] + instance_ids = [param.values()[0] for param in self._get_list_prefix('Instances.member')] template = self.response_template(DEREGISTER_INSTANCES_TEMPLATE) load_balancer = self.elb_backend.deregister_instances( load_balancer_name, instance_ids) @@ -252,8 +259,7 @@ class ELBResponse(BaseResponse): def describe_instance_health(self): load_balancer_name = self._get_param('LoadBalancerName') - instance_ids = [value[0] for key, value in self.querystring.items( - ) if "Instances.member" in key] + instance_ids = [param.values()[0] for param in self._get_list_prefix('Instances.member')] if len(instance_ids) == 0: instance_ids = self.elb_backend.get_load_balancer( load_balancer_name).instance_ids @@ -400,6 +406,9 @@ DESCRIBE_LOAD_BALANCERS_TEMPLATE = """ + + + {% for security_group_id in security_group_ids %} + {{ security_group_id }} + {% endfor %} + + + + f9880f01-7852-629d-a6c3-3ae2-666a409287e6dc0c + +""" + CONFIGURE_HEALTH_CHECK_TEMPLATE = """ diff --git a/tests/test_elb/test_elb.py b/tests/test_elb/test_elb.py index 35dddc39e..98ec7d8e6 100644 --- a/tests/test_elb/test_elb.py +++ b/tests/test_elb/test_elb.py @@ -18,17 +18,22 @@ from moto import mock_elb, mock_ec2, mock_elb_deprecated, mock_ec2_deprecated @mock_elb_deprecated +@mock_ec2_deprecated def test_create_load_balancer(): conn = boto.connect_elb() + ec2 = boto.connect_ec2('the_key', 'the_secret') + + security_group = ec2.create_security_group('sg-abc987', 'description') zones = ['us-east-1a', 'us-east-1b'] ports = [(80, 8080, 'http'), (443, 8443, 'tcp')] - conn.create_load_balancer('my-lb', zones, ports, scheme='internal') + conn.create_load_balancer('my-lb', zones, ports, scheme='internal', security_groups=[security_group.id]) balancers = conn.get_all_load_balancers() balancer = balancers[0] balancer.name.should.equal("my-lb") balancer.scheme.should.equal("internal") + list(balancer.security_groups).should.equal([security_group.id]) set(balancer.availability_zones).should.equal( set(['us-east-1a', 'us-east-1b'])) listener1 = balancer.listeners[0] @@ -139,9 +144,9 @@ def test_describe_paginated_balancers(): @mock_elb @mock_ec2 -def test_add_and_remove_security_groups(): +def test_apply_security_groups_to_load_balancer(): client = boto3.client('elb', region_name='us-east-1') - ec2 = boto3.resource('ec2', region_name='us-west-1') + ec2 = boto3.resource('ec2', region_name='us-east-1') vpc = ec2.create_vpc(CidrBlock='10.0.0.0/16') security_group = ec2.create_security_group( @@ -157,7 +162,17 @@ def test_add_and_remove_security_groups(): response = client.apply_security_groups_to_load_balancer( LoadBalancerName='my-lb', SecurityGroups=[security_group.id]) + assert response['SecurityGroups'] == [security_group.id] + balancer = client.describe_load_balancers()['LoadBalancerDescriptions'][0] + assert balancer['SecurityGroups'] == [security_group.id] + + # Usign a not-real security group raises an error + with assert_raises(ClientError) as error: + response = client.apply_security_groups_to_load_balancer( + LoadBalancerName='my-lb', + SecurityGroups=['not-really-a-security-group']) + assert "One or more of the specified security groups do not exist." in str(error.exception) @mock_elb_deprecated From 45d723044099d19ced81118c2ebc7c87b79d65ed Mon Sep 17 00:00:00 2001 From: Jack Danger Date: Wed, 19 Jul 2017 16:01:00 -0700 Subject: [PATCH 087/412] fixing typo --- tests/test_elb/test_elb.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/test_elb/test_elb.py b/tests/test_elb/test_elb.py index 98ec7d8e6..5827e70c7 100644 --- a/tests/test_elb/test_elb.py +++ b/tests/test_elb/test_elb.py @@ -167,7 +167,7 @@ def test_apply_security_groups_to_load_balancer(): balancer = client.describe_load_balancers()['LoadBalancerDescriptions'][0] assert balancer['SecurityGroups'] == [security_group.id] - # Usign a not-real security group raises an error + # Using a not-real security group raises an error with assert_raises(ClientError) as error: response = client.apply_security_groups_to_load_balancer( LoadBalancerName='my-lb', From 7d00f6e92c1434d55be4d4da6c2b8bc2ad091433 Mon Sep 17 00:00:00 2001 From: Jack Danger Date: Wed, 19 Jul 2017 16:33:24 -0700 Subject: [PATCH 088/412] python 2 support for dict_values --- moto/elb/responses.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/moto/elb/responses.py b/moto/elb/responses.py index 659c454b1..b1980c9b2 100644 --- a/moto/elb/responses.py +++ b/moto/elb/responses.py @@ -108,7 +108,7 @@ class ELBResponse(BaseResponse): def register_instances_with_load_balancer(self): load_balancer_name = self._get_param('LoadBalancerName') - instance_ids = [param.values()[0] for param in self._get_list_prefix('Instances.member')] + instance_ids = [list(param.values())[0] for param in self._get_list_prefix('Instances.member')] template = self.response_template(REGISTER_INSTANCES_TEMPLATE) load_balancer = self.elb_backend.register_instances( load_balancer_name, instance_ids) @@ -127,7 +127,7 @@ class ELBResponse(BaseResponse): def deregister_instances_from_load_balancer(self): load_balancer_name = self._get_param('LoadBalancerName') - instance_ids = [param.values()[0] for param in self._get_list_prefix('Instances.member')] + instance_ids = [list(param.values())[0] for param in self._get_list_prefix('Instances.member')] template = self.response_template(DEREGISTER_INSTANCES_TEMPLATE) load_balancer = self.elb_backend.deregister_instances( load_balancer_name, instance_ids) @@ -259,7 +259,7 @@ class ELBResponse(BaseResponse): def describe_instance_health(self): load_balancer_name = self._get_param('LoadBalancerName') - instance_ids = [param.values()[0] for param in self._get_list_prefix('Instances.member')] + instance_ids = [list(param.values())[0] for param in self._get_list_prefix('Instances.member')] if len(instance_ids) == 0: instance_ids = self.elb_backend.get_load_balancer( load_balancer_name).instance_ids From 115b9cee3e823208fa8ff389f6fd955b849ae673 Mon Sep 17 00:00:00 2001 From: Waldemar Hummer Date: Thu, 20 Jul 2017 14:25:46 +1000 Subject: [PATCH 089/412] add CloudFormation model for Kinesis streams --- moto/cloudformation/parsing.py | 2 ++ moto/kinesis/models.py | 7 +++++ .../test_cloudformation_stack_crud.py | 29 ++++++++++++++++++- 3 files changed, 37 insertions(+), 1 deletion(-) diff --git a/moto/cloudformation/parsing.py b/moto/cloudformation/parsing.py index 928cd68e0..923ada058 100644 --- a/moto/cloudformation/parsing.py +++ b/moto/cloudformation/parsing.py @@ -15,6 +15,7 @@ from moto.ec2 import models as ec2_models from moto.ecs import models as ecs_models from moto.elb import models as elb_models from moto.iam import models as iam_models +from moto.kinesis import models as kinesis_models from moto.kms import models as kms_models from moto.rds import models as rds_models from moto.rds2 import models as rds2_models @@ -31,6 +32,7 @@ MODEL_MAP = { "AWS::AutoScaling::AutoScalingGroup": autoscaling_models.FakeAutoScalingGroup, "AWS::AutoScaling::LaunchConfiguration": autoscaling_models.FakeLaunchConfiguration, "AWS::DynamoDB::Table": dynamodb_models.Table, + "AWS::Kinesis::Stream": kinesis_models.Stream, "AWS::Lambda::EventSourceMapping": lambda_models.EventSourceMapping, "AWS::Lambda::Function": lambda_models.LambdaFunction, "AWS::Lambda::Version": lambda_models.LambdaVersion, diff --git a/moto/kinesis/models.py b/moto/kinesis/models.py index 13900e6a6..aae94bbbd 100644 --- a/moto/kinesis/models.py +++ b/moto/kinesis/models.py @@ -172,6 +172,13 @@ class Stream(BaseModel): } } + @classmethod + def create_from_cloudformation_json(cls, resource_name, cloudformation_json, region_name): + properties = cloudformation_json['Properties'] + region = properties.get('Region', 'us-east-1') + shard_count = properties.get('ShardCount', 1) + return Stream(properties['Name'], shard_count, region) + class FirehoseRecord(BaseModel): diff --git a/tests/test_cloudformation/test_cloudformation_stack_crud.py b/tests/test_cloudformation/test_cloudformation_stack_crud.py index 0e3634756..801faf8a1 100644 --- a/tests/test_cloudformation/test_cloudformation_stack_crud.py +++ b/tests/test_cloudformation/test_cloudformation_stack_crud.py @@ -569,7 +569,6 @@ def test_describe_stack_events_shows_create_update_and_delete(): @mock_cloudformation_deprecated -@mock_route53_deprecated def test_create_stack_lambda_and_dynamodb(): conn = boto.connect_cloudformation() dummy_template = { @@ -643,3 +642,31 @@ def test_create_stack_lambda_and_dynamodb(): stack = conn.describe_stacks()[0] resources = stack.list_resources() assert len(resources) == 4 + + +@mock_cloudformation_deprecated +def test_create_stack_kinesis(): + conn = boto.connect_cloudformation() + dummy_template = { + "AWSTemplateFormatVersion": "2010-09-09", + "Description": "Stack Kinesis Test 1", + "Parameters": {}, + "Resources": { + "stream1": { + "Type" : "AWS::Kinesis::Stream", + "Properties" : { + "Name": "stream1", + "ShardCount": 2 + } + } + } + } + conn.create_stack( + "test_stack_kinesis_1", + template_body=json.dumps(dummy_template), + parameters={}.items() + ) + + stack = conn.describe_stacks()[0] + resources = stack.list_resources() + assert len(resources) == 1 From 38fa6809c086b0aae80fa9c6746671627b63e239 Mon Sep 17 00:00:00 2001 From: Taro Sato Date: Wed, 19 Jul 2017 17:18:31 -0700 Subject: [PATCH 090/412] Make HEAD bucket throw ClientError instead of NoSuchBucket on boto3 --- moto/s3/responses.py | 11 +++++++++-- tests/test_s3/test_s3.py | 26 +++++++++++++++++++++++++- 2 files changed, 34 insertions(+), 3 deletions(-) diff --git a/moto/s3/responses.py b/moto/s3/responses.py index a1d5757c8..ec1361cb8 100644 --- a/moto/s3/responses.py +++ b/moto/s3/responses.py @@ -13,7 +13,7 @@ from moto.core.responses import _TemplateEnvironmentMixin from moto.s3bucket_path.utils import bucket_name_from_url as bucketpath_bucket_name_from_url, parse_key_name as bucketpath_parse_key_name, is_delete_keys as bucketpath_is_delete_keys -from .exceptions import BucketAlreadyExists, S3ClientError, MissingKey, InvalidPartOrder +from .exceptions import BucketAlreadyExists, S3ClientError, MissingBucket, MissingKey, InvalidPartOrder from .models import s3_backend, get_canned_acl, FakeGrantee, FakeGrant, FakeAcl, FakeKey, FakeTagging, FakeTagSet, FakeTag from .utils import bucket_name_from_url, metadata_from_headers from xml.dom import minidom @@ -155,7 +155,14 @@ class ResponseObject(_TemplateEnvironmentMixin): "Method {0} has not been impelemented in the S3 backend yet".format(method)) def _bucket_response_head(self, bucket_name, headers): - self.backend.get_bucket(bucket_name) + try: + self.backend.get_bucket(bucket_name) + except MissingBucket: + # Unless we do this, boto3 does not raise ClientError on + # HEAD (which the real API responds with), and instead + # raises NoSuchBucket, leading to inconsistency in + # error response between real and mocked responses. + return 404, {}, "Not Found" return 200, {}, "" def _bucket_response_get(self, bucket_name, querystring, headers): diff --git a/tests/test_s3/test_s3.py b/tests/test_s3/test_s3.py index 6e6b999ce..26b25dd9a 100644 --- a/tests/test_s3/test_s3.py +++ b/tests/test_s3/test_s3.py @@ -1250,6 +1250,31 @@ def test_boto3_head_object(): e.exception.response['Error']['Code'].should.equal('404') +@mock_s3 +def test_boto3_bucket_deletion(): + cli = boto3.client('s3', region_name='us-east-1') + cli.create_bucket(Bucket="foobar") + + cli.put_object(Bucket="foobar", Key="the-key", Body="some value") + + # Try to delete a bucket that still has keys + cli.delete_bucket.when.called_with(Bucket="foobar").should.throw( + cli.exceptions.ClientError, + ('An error occurred (BucketNotEmpty) when calling the DeleteBucket operation: ' + 'The bucket you tried to delete is not empty')) + + cli.delete_object(Bucket="foobar", Key="the-key") + cli.delete_bucket(Bucket="foobar") + + # Get non-existing bucket + cli.head_bucket.when.called_with(Bucket="foobar").should.throw( + cli.exceptions.ClientError, + "An error occurred (404) when calling the HeadBucket operation: Not Found") + + # Delete non-existing bucket + cli.delete_bucket.when.called_with(Bucket="foobar").should.throw(cli.exceptions.NoSuchBucket) + + @mock_s3 def test_boto3_get_object(): s3 = boto3.resource('s3', region_name='us-east-1') @@ -1560,4 +1585,3 @@ TEST_XML = """\ """ - From 025e975e446f0945c53182dbe6aebfe43c07273f Mon Sep 17 00:00:00 2001 From: William Richard Date: Tue, 25 Jul 2017 17:54:05 -0400 Subject: [PATCH 091/412] Add ecr get_authorization_token response and tests --- moto/ecr/responses.py | 17 ++++++++++---- tests/test_ecr/test_ecr_boto3.py | 38 ++++++++++++++++++++++++++++++++ 2 files changed, 51 insertions(+), 4 deletions(-) diff --git a/moto/ecr/responses.py b/moto/ecr/responses.py index 40d8cfb66..6c12a186d 100644 --- a/moto/ecr/responses.py +++ b/moto/ecr/responses.py @@ -1,12 +1,13 @@ from __future__ import unicode_literals import json +from datetime import datetime +import time from moto.core.responses import BaseResponse from .models import ecr_backends class ECRResponse(BaseResponse): - @property def ecr_backend(self): return ecr_backends[self.region] @@ -111,9 +112,17 @@ class ECRResponse(BaseResponse): 'ECR.generate_presigned_url is not yet implemented') def get_authorization_token(self): - if self.is_not_dryrun('GetAuthorizationToken'): - raise NotImplementedError( - 'ECR.get_authorization_token is not yet implemented') + registry_ids = self._get_param('registryIds') + if not registry_ids: + registry_ids = [self.region] + auth_data = [] + for registry_id in registry_ids: + auth_data.append({ + 'authorizationToken': '{}-auth-token'.format(registry_id), + 'expiresAt': time.mktime(datetime(2015, 1, 1).timetuple()), + 'proxyEndpoint': 'https://012345678910.dkr.ecr.{}.amazonaws.com'.format(registry_id) + }) + return json.dumps({'authorizationData': auth_data}) def get_download_url_for_layer(self): if self.is_not_dryrun('GetDownloadUrlForLayer'): diff --git a/tests/test_ecr/test_ecr_boto3.py b/tests/test_ecr/test_ecr_boto3.py index 647015446..5a10fb778 100644 --- a/tests/test_ecr/test_ecr_boto3.py +++ b/tests/test_ecr/test_ecr_boto3.py @@ -2,11 +2,13 @@ from __future__ import unicode_literals import hashlib import json +from datetime import datetime from random import random import sure # noqa import boto3 +from dateutil.tz import tzlocal from moto import mock_ecr @@ -368,3 +370,39 @@ def test_describe_images_by_digest(): image_detail['repositoryName'].should.equal("test_repository") image_detail['imageTags'].should.equal([put_response['imageId']['imageTag']]) image_detail['imageDigest'].should.equal(digest) + + +@mock_ecr +def test_get_authorization_token_assume_region(): + client = boto3.client('ecr', region_name='us-east-1') + auth_token_response = client.get_authorization_token() + + list(auth_token_response.keys()).should.equal(['authorizationData', 'ResponseMetadata']) + auth_token_response['authorizationData'].should.equal([ + { + 'authorizationToken': 'us-east-1-auth-token', + 'proxyEndpoint': 'https://012345678910.dkr.ecr.us-east-1.amazonaws.com', + 'expiresAt': datetime(2015, 1, 1, tzinfo=tzlocal()) + }, + ]) + + +@mock_ecr +def test_get_authorization_token_explicit_regions(): + client = boto3.client('ecr', region_name='us-east-1') + auth_token_response = client.get_authorization_token(registryIds=['us-east-1', 'us-west-1']) + + list(auth_token_response.keys()).should.equal(['authorizationData', 'ResponseMetadata']) + auth_token_response['authorizationData'].should.equal([ + { + 'authorizationToken': 'us-east-1-auth-token', + 'proxyEndpoint': 'https://012345678910.dkr.ecr.us-east-1.amazonaws.com', + 'expiresAt': datetime(2015, 1, 1, tzinfo=tzlocal()), + }, + { + 'authorizationToken': 'us-west-1-auth-token', + 'proxyEndpoint': 'https://012345678910.dkr.ecr.us-west-1.amazonaws.com', + 'expiresAt': datetime(2015, 1, 1, tzinfo=tzlocal()) + + } + ]) From a5089c3d690ad835d3b6034cd7948662e1d753fa Mon Sep 17 00:00:00 2001 From: James Brennan Date: Wed, 26 Jul 2017 11:38:12 +0000 Subject: [PATCH 092/412] Add add, remove, list endpoints for SSM tags --- moto/ssm/models.py | 16 ++++++++++++++++ moto/ssm/responses.py | 25 +++++++++++++++++++++++++ tests/test_ssm/test_ssm_boto3.py | 30 ++++++++++++++++++++++++++++++ 3 files changed, 71 insertions(+) diff --git a/moto/ssm/models.py b/moto/ssm/models.py index f1aac336b..63cb3c8ba 100644 --- a/moto/ssm/models.py +++ b/moto/ssm/models.py @@ -1,5 +1,7 @@ from __future__ import unicode_literals +from collections import defaultdict + from moto.core import BaseBackend, BaseModel from moto.ec2 import ec2_backends @@ -42,6 +44,7 @@ class SimpleSystemManagerBackend(BaseBackend): def __init__(self): self._parameters = {} + self._resource_tags = defaultdict(lambda: defaultdict(dict)) def delete_parameter(self, name): try: @@ -68,6 +71,19 @@ class SimpleSystemManagerBackend(BaseBackend): self._parameters[name] = Parameter( name, value, type, description, keyid) + def add_tags_to_resource(self, resource_type, resource_id, tags): + for key, value in tags.items(): + self._resource_tags[resource_type][resource_id][key] = value + + def remove_tags_from_resource(self, resource_type, resource_id, keys): + tags = self._resource_tags[resource_type][resource_id] + for key in keys: + if key in tags: + del tags[key] + + def list_tags_for_resource(self, resource_type, resource_id): + return self._resource_tags[resource_type][resource_id] + ssm_backends = {} for region, ec2_backend in ec2_backends.items(): diff --git a/moto/ssm/responses.py b/moto/ssm/responses.py index 09fe6d0c2..1fa1a81b2 100644 --- a/moto/ssm/responses.py +++ b/moto/ssm/responses.py @@ -108,3 +108,28 @@ class SimpleSystemManagerResponse(BaseResponse): self.ssm_backend.put_parameter( name, description, value, type_, keyid, overwrite) return json.dumps({}) + + def add_tags_to_resource(self): + resource_id = self._get_param('ResourceId') + resource_type = self._get_param('ResourceType') + tags = {t['Key']: t['Value'] for t in self._get_param('Tags')} + self.ssm_backend.add_tags_to_resource( + resource_id, resource_type, tags) + return json.dumps({}) + + def remove_tags_from_resource(self): + resource_id = self._get_param('ResourceId') + resource_type = self._get_param('ResourceType') + keys = self._get_param('TagKeys') + self.ssm_backend.remove_tags_from_resource( + resource_id, resource_type, keys) + return json.dumps({}) + + def list_tags_for_resource(self): + resource_id = self._get_param('ResourceId') + resource_type = self._get_param('ResourceType') + tags = self.ssm_backend.list_tags_for_resource( + resource_id, resource_type) + tag_list = [{'Key': k, 'Value': v} for (k, v) in tags.items()] + response = {'TagList': tag_list} + return json.dumps(response) diff --git a/tests/test_ssm/test_ssm_boto3.py b/tests/test_ssm/test_ssm_boto3.py index 60a027933..418c58708 100644 --- a/tests/test_ssm/test_ssm_boto3.py +++ b/tests/test_ssm/test_ssm_boto3.py @@ -247,3 +247,33 @@ def test_put_parameter_secure_custom_kms(): response['Parameters'][0]['Name'].should.equal('test') response['Parameters'][0]['Value'].should.equal('value') response['Parameters'][0]['Type'].should.equal('SecureString') + +@mock_ssm +def test_add_remove_list_tags_for_resource(): + client = boto3.client('ssm', region_name='us-east-1') + + client.add_tags_to_resource( + ResourceId='test', + ResourceType='Parameter', + Tags=[{'Key': 'test-key', 'Value': 'test-value'}] + ) + + response = client.list_tags_for_resource( + ResourceId='test', + ResourceType='Parameter' + ) + len(response['TagList']).should.equal(1) + response['TagList'][0]['Key'].should.equal('test-key') + response['TagList'][0]['Value'].should.equal('test-value') + + client.remove_tags_from_resource( + ResourceId='test', + ResourceType='Parameter', + TagKeys=['test-key'] + ) + + response = client.list_tags_for_resource( + ResourceId='test', + ResourceType='Parameter' + ) + len(response['TagList']).should.equal(0) From aeefc8056d34dc8830342bfe70326700a9df1b67 Mon Sep 17 00:00:00 2001 From: William Richard Date: Wed, 26 Jul 2017 12:03:20 -0400 Subject: [PATCH 093/412] Boto actually returns a base64 encoded string of : Fix the mock to do the same thing --- moto/ecr/responses.py | 5 ++++- tests/test_ecr/test_ecr_boto3.py | 6 +++--- 2 files changed, 7 insertions(+), 4 deletions(-) diff --git a/moto/ecr/responses.py b/moto/ecr/responses.py index 6c12a186d..4fa0946b8 100644 --- a/moto/ecr/responses.py +++ b/moto/ecr/responses.py @@ -1,5 +1,6 @@ from __future__ import unicode_literals import json +from base64 import b64encode from datetime import datetime import time @@ -117,8 +118,10 @@ class ECRResponse(BaseResponse): registry_ids = [self.region] auth_data = [] for registry_id in registry_ids: + password = '{}-auth-token'.format(registry_id) + auth_token = b64encode("AWS:{}".format(password).encode('ascii')).decode() auth_data.append({ - 'authorizationToken': '{}-auth-token'.format(registry_id), + 'authorizationToken': auth_token, 'expiresAt': time.mktime(datetime(2015, 1, 1).timetuple()), 'proxyEndpoint': 'https://012345678910.dkr.ecr.{}.amazonaws.com'.format(registry_id) }) diff --git a/tests/test_ecr/test_ecr_boto3.py b/tests/test_ecr/test_ecr_boto3.py index 5a10fb778..581906321 100644 --- a/tests/test_ecr/test_ecr_boto3.py +++ b/tests/test_ecr/test_ecr_boto3.py @@ -380,7 +380,7 @@ def test_get_authorization_token_assume_region(): list(auth_token_response.keys()).should.equal(['authorizationData', 'ResponseMetadata']) auth_token_response['authorizationData'].should.equal([ { - 'authorizationToken': 'us-east-1-auth-token', + 'authorizationToken': 'QVdTOnVzLWVhc3QtMS1hdXRoLXRva2Vu', 'proxyEndpoint': 'https://012345678910.dkr.ecr.us-east-1.amazonaws.com', 'expiresAt': datetime(2015, 1, 1, tzinfo=tzlocal()) }, @@ -395,12 +395,12 @@ def test_get_authorization_token_explicit_regions(): list(auth_token_response.keys()).should.equal(['authorizationData', 'ResponseMetadata']) auth_token_response['authorizationData'].should.equal([ { - 'authorizationToken': 'us-east-1-auth-token', + 'authorizationToken': 'QVdTOnVzLWVhc3QtMS1hdXRoLXRva2Vu', 'proxyEndpoint': 'https://012345678910.dkr.ecr.us-east-1.amazonaws.com', 'expiresAt': datetime(2015, 1, 1, tzinfo=tzlocal()), }, { - 'authorizationToken': 'us-west-1-auth-token', + 'authorizationToken': 'QVdTOnVzLXdlc3QtMS1hdXRoLXRva2Vu', 'proxyEndpoint': 'https://012345678910.dkr.ecr.us-west-1.amazonaws.com', 'expiresAt': datetime(2015, 1, 1, tzinfo=tzlocal()) From 3eef3c23b113493027bf1c4de65107719e3e8a74 Mon Sep 17 00:00:00 2001 From: Jack Danger Date: Wed, 26 Jul 2017 22:57:55 -0700 Subject: [PATCH 094/412] Updating examples in README to latest API --- README.md | 42 +++++++++++++++++++++------------------- tests/test_s3/test_s3.py | 3 +-- 2 files changed, 23 insertions(+), 22 deletions(-) diff --git a/README.md b/README.md index f07984328..369d430f5 100644 --- a/README.md +++ b/README.md @@ -123,28 +123,29 @@ It gets even better! Moto isn't just for Python code and it isn't just for S3. L Imagine you have a function that you use to launch new ec2 instances: ```python -import boto +import boto3 + def add_servers(ami_id, count): - conn = boto.connect_ec2('the_key', 'the_secret') - for index in range(count): - conn.run_instances(ami_id) + client = boto3.client('ec2', region_name='us-west-1') + client.run_instances(ImageId=ami_id, MinCount=count, MaxCount=count) ``` To test it: ```python from . import add_servers +from moto import mock_ec2 @mock_ec2 def test_add_servers(): add_servers('ami-1234abcd', 2) - conn = boto.connect_ec2('the_key', 'the_secret') - reservations = conn.get_all_instances() - assert len(reservations) == 2 - instance1 = reservations[0].instances[0] - assert instance1.image_id == 'ami-1234abcd' + client = boto3.client('ec2', region_name='us-west-1') + instances = client.describe_instances()['Reservations'][0]['Instances'] + assert len(instances) == 2 + instance1 = instances[0] + assert instance1['ImageId'] == 'ami-1234abcd' ``` ## Usage @@ -156,13 +157,14 @@ All of the services can be used as a decorator, context manager, or in a raw for ```python @mock_s3 def test_my_model_save(): - conn = boto.connect_s3() - conn.create_bucket('mybucket') - + # Create Bucket so that test can run + conn = boto3.resource('s3', region_name='us-east-1') + conn.create_bucket(Bucket='mybucket') model_instance = MyModel('steve', 'is awesome') model_instance.save() + body = conn.Object('mybucket', 'steve').get()['Body'].read().decode() - assert conn.get_bucket('mybucket').get_key('steve').get_contents_as_string() == 'is awesome' + assert body == 'is awesome' ``` ### Context Manager @@ -170,13 +172,13 @@ def test_my_model_save(): ```python def test_my_model_save(): with mock_s3(): - conn = boto.connect_s3() - conn.create_bucket('mybucket') - + conn = boto3.resource('s3', region_name='us-east-1') + conn.create_bucket(Bucket='mybucket') model_instance = MyModel('steve', 'is awesome') model_instance.save() + body = conn.Object('mybucket', 'steve').get()['Body'].read().decode() - assert conn.get_bucket('mybucket').get_key('steve').get_contents_as_string() == 'is awesome' + assert body == 'is awesome' ``` @@ -187,13 +189,13 @@ def test_my_model_save(): mock = mock_s3() mock.start() - conn = boto.connect_s3() - conn.create_bucket('mybucket') + conn = boto3.resource('s3', region_name='us-east-1') + conn.create_bucket(Bucket='mybucket') model_instance = MyModel('steve', 'is awesome') model_instance.save() - assert conn.get_bucket('mybucket').get_key('steve').get_contents_as_string() == 'is awesome' + assert conn.Object('mybucket', 'steve').get()['Body'].read().decode() == 'is awesome' mock.stop() ``` diff --git a/tests/test_s3/test_s3.py b/tests/test_s3/test_s3.py index 26b25dd9a..619a60302 100644 --- a/tests/test_s3/test_s3.py +++ b/tests/test_s3/test_s3.py @@ -74,8 +74,7 @@ def test_my_model_save(): model_instance = MyModel('steve', 'is awesome') model_instance.save() - body = conn.Object('mybucket', 'steve').get()[ - 'Body'].read().decode("utf-8") + body = conn.Object('mybucket', 'steve').get()['Body'].read().decode() assert body == 'is awesome' From e445c81e83b2b9602fea4956b9f40e4b339c113b Mon Sep 17 00:00:00 2001 From: Jack Danger Date: Sun, 23 Jul 2017 22:31:58 -0700 Subject: [PATCH 095/412] Implement IAM {update,get}_login_profile --- moto/iam/models.py | 19 ++++++++++++++ moto/iam/responses.py | 51 +++++++++++++++++++++++++++++++++----- tests/test_iam/test_iam.py | 23 +++++++++++++++++ 3 files changed, 87 insertions(+), 6 deletions(-) diff --git a/moto/iam/models.py b/moto/iam/models.py index da11d58b2..1e4b58578 100644 --- a/moto/iam/models.py +++ b/moto/iam/models.py @@ -256,6 +256,7 @@ class User(BaseModel): self.policies = {} self.access_keys = [] self.password = None + self.password_reset_required = False @property def arn(self): @@ -772,6 +773,24 @@ class IAMBackend(BaseBackend): raise IAMConflictException( "User {0} already has password".format(user_name)) user.password = password + return user + + def get_login_profile(self, user_name): + user = self.get_user(user_name) + if not user.password: + raise IAMNotFoundException( + "Login profile for {0} not found".format(user_name)) + return user + + def update_login_profile(self, user_name, password, password_reset_required): + # This does not currently deal with PasswordPolicyViolation. + user = self.get_user(user_name) + if not user.password: + raise IAMNotFoundException( + "Login profile for {0} not found".format(user_name)) + user.password = password + user.password_reset_required = password_reset_required + return user def delete_login_profile(self, user_name): user = self.get_user(user_name) diff --git a/moto/iam/responses.py b/moto/iam/responses.py index 138c08d23..a5e5081c3 100644 --- a/moto/iam/responses.py +++ b/moto/iam/responses.py @@ -290,10 +290,27 @@ class IamResponse(BaseResponse): def create_login_profile(self): user_name = self._get_param('UserName') password = self._get_param('Password') - iam_backend.create_login_profile(user_name, password) + password = self._get_param('Password') + user = iam_backend.create_login_profile(user_name, password) template = self.response_template(CREATE_LOGIN_PROFILE_TEMPLATE) - return template.render(user_name=user_name) + return template.render(user=user) + + def get_login_profile(self): + user_name = self._get_param('UserName') + user = iam_backend.get_login_profile(user_name) + + template = self.response_template(GET_LOGIN_PROFILE_TEMPLATE) + return template.render(user=user) + + def update_login_profile(self): + user_name = self._get_param('UserName') + password = self._get_param('Password') + password_reset_required = self._get_param('PasswordResetRequired') + user = iam_backend.update_login_profile(user_name, password, password_reset_required) + + template = self.response_template(UPDATE_LOGIN_PROFILE_TEMPLATE) + return template.render(user=user) def add_user_to_group(self): group_name = self._get_param('GroupName') @@ -918,12 +935,11 @@ LIST_USERS_TEMPLATE = """<{{ action }}UsersResponse> """ -CREATE_LOGIN_PROFILE_TEMPLATE = """ - +CREATE_LOGIN_PROFILE_TEMPLATE = """ - {{ user_name }} - 2011-09-19T23:00:56Z + {{ user.name }} + {{ user.created_iso_8601 }} @@ -932,6 +948,29 @@ CREATE_LOGIN_PROFILE_TEMPLATE = """ """ +GET_LOGIN_PROFILE_TEMPLATE = """ + + + {{ user.name }} + {{ user.created_iso_8601 }} + {% if user.password_reset_required %} + true + {% endif %} + + + + 7a62c49f-347e-4fc4-9331-6e8eEXAMPLE + + +""" + +UPDATE_LOGIN_PROFILE_TEMPLATE = """ + + 7a62c49f-347e-4fc4-9331-6e8eEXAMPLE + + +""" + GET_USER_POLICY_TEMPLATE = """ {{ user_name }} diff --git a/tests/test_iam/test_iam.py b/tests/test_iam/test_iam.py index 46b727360..b5968f722 100644 --- a/tests/test_iam/test_iam.py +++ b/tests/test_iam/test_iam.py @@ -114,6 +114,29 @@ def test_remove_role_from_instance_profile(): dict(profile.roles).should.be.empty +@mock_iam() +def test_get_login_profile(): + conn = boto3.client('iam', region_name='us-east-1') + conn.create_user(UserName='my-user') + conn.create_login_profile(UserName='my-user', Password='my-pass') + + response = conn.get_login_profile(UserName='my-user') + response['LoginProfile']['UserName'].should.equal('my-user') + + +@mock_iam() +def test_update_login_profile(): + conn = boto3.client('iam', region_name='us-east-1') + conn.create_user(UserName='my-user') + conn.create_login_profile(UserName='my-user', Password='my-pass') + response = conn.get_login_profile(UserName='my-user') + response['LoginProfile'].get('PasswordResetRequired').should.equal(None) + + conn.update_login_profile(UserName='my-user', Password='new-pass', PasswordResetRequired=True) + response = conn.get_login_profile(UserName='my-user') + response['LoginProfile'].get('PasswordResetRequired').should.equal(True) + + @mock_iam() def test_delete_role(): conn = boto3.client('iam', region_name='us-east-1') From 92eedcf291999629d1b8333cb2c7e15bc08e4b26 Mon Sep 17 00:00:00 2001 From: sodastsai Date: Sun, 30 Jul 2017 20:44:06 +0800 Subject: [PATCH 096/412] Send JSON message to HTTP endpoint of SNS By the documentation from AWS - http://docs.aws.amazon.com/sns/latest/dg/SendMessageToHttp.html , SNS would send messages to HTTP/HTTPS endpoint in JSON format. But current implementation of `moto` sends messages in form-data format. --- moto/sns/models.py | 2 +- tests/test_sns/test_publishing_boto3.py | 11 ++++++++++- 2 files changed, 11 insertions(+), 2 deletions(-) diff --git a/moto/sns/models.py b/moto/sns/models.py index 5289c8bcd..a6b6c3a52 100644 --- a/moto/sns/models.py +++ b/moto/sns/models.py @@ -84,7 +84,7 @@ class Subscription(BaseModel): sqs_backends[region].send_message(queue_name, message) elif self.protocol in ['http', 'https']: post_data = self.get_post_data(message, message_id) - requests.post(self.endpoint, data=post_data) + requests.post(self.endpoint, json=post_data) def get_post_data(self, message, message_id): return { diff --git a/tests/test_sns/test_publishing_boto3.py b/tests/test_sns/test_publishing_boto3.py index cda9fed60..00c9ac7e2 100644 --- a/tests/test_sns/test_publishing_boto3.py +++ b/tests/test_sns/test_publishing_boto3.py @@ -1,4 +1,7 @@ from __future__ import unicode_literals + +import json + from six.moves.urllib.parse import parse_qs import boto3 @@ -56,9 +59,15 @@ def test_publish_to_sqs_in_different_region(): @freeze_time("2013-01-01") @mock_sns def test_publish_to_http(): - responses.add( + def callback(request): + request.headers["Content-Type"].should.equal("application/json") + json.loads.when.called_with(request.body).should_not.throw(Exception) + return 200, {}, "" + + responses.add_callback( method="POST", url="http://example.com/foobar", + callback=callback, ) conn = boto3.client('sns', region_name='us-east-1') From d76559ee7c8724d86f975a7eb2fb02fb01686dbb Mon Sep 17 00:00:00 2001 From: Peter Us Date: Mon, 31 Jul 2017 13:37:29 +0200 Subject: [PATCH 097/412] SNS delete_topic should also delete subscriptions. --- moto/sns/models.py | 9 ++++++- tests/test_sns/test_subscriptions.py | 31 ++++++++++++++++++++++ tests/test_sns/test_subscriptions_boto3.py | 30 +++++++++++++++++++++ 3 files changed, 69 insertions(+), 1 deletion(-) diff --git a/moto/sns/models.py b/moto/sns/models.py index 5289c8bcd..6d0833476 100644 --- a/moto/sns/models.py +++ b/moto/sns/models.py @@ -193,10 +193,17 @@ class SNSBackend(BaseBackend): next_token = None return values, next_token + def _get_topic_subscriptions(self, topic): + return [sub for sub in self.subscriptions.values() if sub.topic == topic] + def list_topics(self, next_token=None): return self._get_values_nexttoken(self.topics, next_token) def delete_topic(self, arn): + topic = self.get_topic(arn) + subscriptions = self._get_topic_subscriptions(topic) + for sub in subscriptions: + self.unsubscribe(sub.arn) self.topics.pop(arn) def get_topic(self, arn): @@ -222,7 +229,7 @@ class SNSBackend(BaseBackend): if topic_arn: topic = self.get_topic(topic_arn) filtered = OrderedDict( - [(k, sub) for k, sub in self.subscriptions.items() if sub.topic == topic]) + [(sub.arn, sub) for sub in self._get_topic_subscriptions(topic)]) return self._get_values_nexttoken(filtered, next_token) else: return self._get_values_nexttoken(self.subscriptions, next_token) diff --git a/tests/test_sns/test_subscriptions.py b/tests/test_sns/test_subscriptions.py index c521bb428..292fd83c0 100644 --- a/tests/test_sns/test_subscriptions.py +++ b/tests/test_sns/test_subscriptions.py @@ -34,6 +34,37 @@ def test_creating_subscription(): "ListSubscriptionsResult"]["Subscriptions"] subscriptions.should.have.length_of(0) +@mock_sns_deprecated +def test_deleting_subscriptions_by_deleting_topic(): + conn = boto.connect_sns() + conn.create_topic("some-topic") + topics_json = conn.get_all_topics() + topic_arn = topics_json["ListTopicsResponse"][ + "ListTopicsResult"]["Topics"][0]['TopicArn'] + + conn.subscribe(topic_arn, "http", "http://example.com/") + + subscriptions = conn.get_all_subscriptions()["ListSubscriptionsResponse"][ + "ListSubscriptionsResult"]["Subscriptions"] + subscriptions.should.have.length_of(1) + subscription = subscriptions[0] + subscription["TopicArn"].should.equal(topic_arn) + subscription["Protocol"].should.equal("http") + subscription["SubscriptionArn"].should.contain(topic_arn) + subscription["Endpoint"].should.equal("http://example.com/") + + # Now delete the topic + conn.delete_topic(topic_arn) + + # And there should now be 0 topics + topics_json = conn.get_all_topics() + topics = topics_json["ListTopicsResponse"]["ListTopicsResult"]["Topics"] + topics.should.have.length_of(0) + + # And there should be zero subscriptions left + subscriptions = conn.get_all_subscriptions()["ListSubscriptionsResponse"][ + "ListSubscriptionsResult"]["Subscriptions"] + subscriptions.should.have.length_of(0) @mock_sns_deprecated def test_getting_subscriptions_by_topic(): diff --git a/tests/test_sns/test_subscriptions_boto3.py b/tests/test_sns/test_subscriptions_boto3.py index 906c483f7..ac325ed20 100644 --- a/tests/test_sns/test_subscriptions_boto3.py +++ b/tests/test_sns/test_subscriptions_boto3.py @@ -33,6 +33,36 @@ def test_creating_subscription(): subscriptions = conn.list_subscriptions()["Subscriptions"] subscriptions.should.have.length_of(0) +@mock_sns +def test_deleting_subscriptions_by_deleting_topic(): + conn = boto3.client('sns', region_name='us-east-1') + conn.create_topic(Name="some-topic") + response = conn.list_topics() + topic_arn = response["Topics"][0]['TopicArn'] + + conn.subscribe(TopicArn=topic_arn, + Protocol="http", + Endpoint="http://example.com/") + + subscriptions = conn.list_subscriptions()["Subscriptions"] + subscriptions.should.have.length_of(1) + subscription = subscriptions[0] + subscription["TopicArn"].should.equal(topic_arn) + subscription["Protocol"].should.equal("http") + subscription["SubscriptionArn"].should.contain(topic_arn) + subscription["Endpoint"].should.equal("http://example.com/") + + # Now delete the topic + conn.delete_topic(TopicArn=topic_arn) + + # And there should now be 0 topics + topics_json = conn.list_topics() + topics = topics_json["Topics"] + topics.should.have.length_of(0) + + # And there should be zero subscriptions left + subscriptions = conn.list_subscriptions()["Subscriptions"] + subscriptions.should.have.length_of(0) @mock_sns def test_getting_subscriptions_by_topic(): From 5011cd28b65a3edf145374fb6a9bec1a5c92c257 Mon Sep 17 00:00:00 2001 From: Jack Danger Date: Wed, 2 Aug 2017 11:45:27 -0700 Subject: [PATCH 098/412] Allow boto3 redshift cluster subnet group creation Boto3 deviates from the AWS docs in the way subnets are described when creating a Redshift cluster subnet group. This entry in botocore nests the SubnetIds under SubnetIdentifier tags: https://github.com/boto/botocore/blob/develop/botocore/data/redshift/2012-12-01/service-2.json#L5423-L5429 referenced here: https://github.com/boto/botocore/blob/develop/botocore/data/redshift/2012-12-01/service-2.json#L2296 And the AWS docs do not nest them that way: https://docs.aws.amazon.com/redshift/latest/APIReference/API_CreateClusterSubnetGroup.html Fixes #1029 --- moto/redshift/responses.py | 4 ++++ tests/test_redshift/test_redshift.py | 31 +++++++++++++++++++++++++++- 2 files changed, 34 insertions(+), 1 deletion(-) diff --git a/moto/redshift/responses.py b/moto/redshift/responses.py index ba28b1343..48f113cf2 100644 --- a/moto/redshift/responses.py +++ b/moto/redshift/responses.py @@ -122,6 +122,10 @@ class RedshiftResponse(BaseResponse): cluster_subnet_group_name = self._get_param('ClusterSubnetGroupName') description = self._get_param('Description') subnet_ids = self._get_multi_param('SubnetIds.member') + # There's a bug in boto3 where the subnet ids are not passed + # according to the AWS documentation + if not subnet_ids: + subnet_ids = self._get_multi_param('SubnetIds.SubnetIdentifier') subnet_group = self.redshift_backend.create_cluster_subnet_group( cluster_subnet_group_name=cluster_subnet_group_name, diff --git a/tests/test_redshift/test_redshift.py b/tests/test_redshift/test_redshift.py index 045e30246..aff3e8bed 100644 --- a/tests/test_redshift/test_redshift.py +++ b/tests/test_redshift/test_redshift.py @@ -11,7 +11,10 @@ from boto.redshift.exceptions import ( ) import sure # noqa -from moto import mock_ec2_deprecated, mock_redshift_deprecated, mock_redshift +from moto import mock_ec2 +from moto import mock_ec2_deprecated +from moto import mock_redshift +from moto import mock_redshift_deprecated @mock_redshift @@ -153,6 +156,32 @@ def test_create_cluster_in_subnet_group(): cluster['ClusterSubnetGroupName'].should.equal('my_subnet_group') +@mock_redshift +@mock_ec2 +def test_create_cluster_in_subnet_group_boto3(): + ec2 = boto3.resource('ec2', region_name='us-east-1') + vpc = ec2.create_vpc(CidrBlock='10.0.0.0/16') + subnet = ec2.create_subnet(VpcId=vpc.id, CidrBlock='10.0.0.0/24') + client = boto3.client('redshift', region_name='us-east-1') + client.create_cluster_subnet_group( + ClusterSubnetGroupName='my_subnet_group', + Description='This is my subnet group', + SubnetIds=[subnet.id] + ) + + client.create_cluster( + ClusterIdentifier="my_cluster", + NodeType="dw.hs1.xlarge", + MasterUsername="username", + MasterUserPassword="password", + ClusterSubnetGroupName='my_subnet_group', + ) + + cluster_response = client.describe_clusters(ClusterIdentifier="my_cluster") + cluster = cluster_response['Clusters'][0] + cluster['ClusterSubnetGroupName'].should.equal('my_subnet_group') + + @mock_redshift_deprecated def test_create_cluster_with_security_group(): conn = boto.redshift.connect_to_region("us-east-1") From 04e623ea144e9759ba7f33574381eff913444dfc Mon Sep 17 00:00:00 2001 From: Jack Danger Date: Thu, 20 Jul 2017 15:00:30 -0700 Subject: [PATCH 099/412] Implemented core endpoints of ELBv2 --- docs/index.rst | 1 + moto/__init__.py | 1 + moto/elbv2/__init__.py | 6 + moto/elbv2/exceptions.py | 103 +++++ moto/elbv2/models.py | 312 +++++++++++++++ moto/elbv2/responses.py | 649 ++++++++++++++++++++++++++++++++ moto/elbv2/urls.py | 10 + tests/test_elbv2/test_elbv2.py | 447 ++++++++++++++++++++++ tests/test_elbv2/test_server.py | 17 + 9 files changed, 1546 insertions(+) create mode 100644 moto/elbv2/__init__.py create mode 100644 moto/elbv2/exceptions.py create mode 100644 moto/elbv2/models.py create mode 100644 moto/elbv2/responses.py create mode 100644 moto/elbv2/urls.py create mode 100644 tests/test_elbv2/test_elbv2.py create mode 100644 tests/test_elbv2/test_server.py diff --git a/docs/index.rst b/docs/index.rst index 2ce31febd..9a9fa5261 100644 --- a/docs/index.rst +++ b/docs/index.rst @@ -43,6 +43,7 @@ Currently implemented Services: | ECS | @mock_ecs | basic endpoints done | +-----------------------+---------------------+-----------------------------------+ | ELB | @mock_elb | core endpoints done | +| | @mock_elbv2 | core endpoints done | +-----------------------+---------------------+-----------------------------------+ | EMR | @mock_emr | core endpoints done | +-----------------------+---------------------+-----------------------------------+ diff --git a/moto/__init__.py b/moto/__init__.py index 304e25cc5..728d8db71 100644 --- a/moto/__init__.py +++ b/moto/__init__.py @@ -17,6 +17,7 @@ from .ec2 import mock_ec2, mock_ec2_deprecated # flake8: noqa from .ecr import mock_ecr, mock_ecr_deprecated # flake8: noqa from .ecs import mock_ecs, mock_ecs_deprecated # flake8: noqa from .elb import mock_elb, mock_elb_deprecated # flake8: noqa +from .elbv2 import mock_elbv2 # flake8: noqa from .emr import mock_emr, mock_emr_deprecated # flake8: noqa from .events import mock_events # flake8: noqa from .glacier import mock_glacier, mock_glacier_deprecated # flake8: noqa diff --git a/moto/elbv2/__init__.py b/moto/elbv2/__init__.py new file mode 100644 index 000000000..21a6d06c6 --- /dev/null +++ b/moto/elbv2/__init__.py @@ -0,0 +1,6 @@ +from __future__ import unicode_literals +from .models import elbv2_backends +from ..core.models import base_decorator + +elb_backend = elbv2_backends['us-east-1'] +mock_elbv2 = base_decorator(elbv2_backends) diff --git a/moto/elbv2/exceptions.py b/moto/elbv2/exceptions.py new file mode 100644 index 000000000..397aa115b --- /dev/null +++ b/moto/elbv2/exceptions.py @@ -0,0 +1,103 @@ +from __future__ import unicode_literals +from moto.core.exceptions import RESTError + + +class ELBClientError(RESTError): + code = 400 + + +class DuplicateTagKeysError(ELBClientError): + + def __init__(self, cidr): + super(DuplicateTagKeysError, self).__init__( + "DuplicateTagKeys", + "Tag key was specified more than once: {0}" + .format(cidr)) + + +class LoadBalancerNotFoundError(ELBClientError): + + def __init__(self): + super(LoadBalancerNotFoundError, self).__init__( + "LoadBalancerNotFound", + "The specified load balancer does not exist.") + + +class ListenerNotFoundError(ELBClientError): + + def __init__(self): + super(ListenerNotFoundError, self).__init__( + "ListenerNotFound", + "The specified listener does not exist.") + + +class SubnetNotFoundError(ELBClientError): + + def __init__(self): + super(SubnetNotFoundError, self).__init__( + "SubnetNotFound", + "The specified subnet does not exist.") + + +class TargetGroupNotFoundError(ELBClientError): + + def __init__(self): + super(TooManyTagsError, self).__init__( + "TargetGroupNotFound", + "The specified target group does not exist.") + + +class TooManyTagsError(ELBClientError): + + def __init__(self): + super(TooManyTagsError, self).__init__( + "TooManyTagsError", + "The quota for the number of tags that can be assigned to a load balancer has been reached") + + +class BadHealthCheckDefinition(ELBClientError): + + def __init__(self): + super(BadHealthCheckDefinition, self).__init__( + "ValidationError", + "HealthCheck Target must begin with one of HTTP, TCP, HTTPS, SSL") + + +class DuplicateListenerError(ELBClientError): + + def __init__(self): + super(DuplicateListenerError, self).__init__( + "DuplicateListener", + "A listener with the specified port already exists.") + + +class DuplicateLoadBalancerName(ELBClientError): + + def __init__(self): + super(DuplicateLoadBalancerName, self).__init__( + "DuplicateLoadBalancerName", + "A load balancer with the specified name already exists.") + + +class DuplicateTargetGroupName(ELBClientError): + + def __init__(self): + super(DuplicateTargetGroupName, self).__init__( + "DuplicateTargetGroupName", + "A target group with the specified name already exists.") + + +class InvalidTargetError(ELBClientError): + + def __init__(self): + super(InvalidTargetError, self).__init__( + "InvalidTarget", + "The specified target does not exist or is not in the same VPC as the target group.") + + +class EmptyListenersError(ELBClientError): + + def __init__(self): + super(EmptyListenersError, self).__init__( + "ValidationError", + "Listeners cannot be empty") diff --git a/moto/elbv2/models.py b/moto/elbv2/models.py new file mode 100644 index 000000000..7682ae097 --- /dev/null +++ b/moto/elbv2/models.py @@ -0,0 +1,312 @@ +from __future__ import unicode_literals + +import datetime +from moto.compat import OrderedDict +from moto.core import BaseBackend, BaseModel +from moto.ec2.models import ec2_backends +from .exceptions import ( + DuplicateLoadBalancerName, + DuplicateListenerError, + DuplicateTargetGroupName, + InvalidTargetError, + ListenerNotFoundError, + LoadBalancerNotFoundError, + SubnetNotFoundError, + TargetGroupNotFoundError, + TooManyTagsError, +) + + +class FakeHealthStatus(BaseModel): + + def __init__(self, instance_id, port, health_port, status, reason=None): + self.instance_id = instance_id + self.port = port + self.health_port = health_port + self.status = status + self.reason = reason + + +class FakeTargetGroup(BaseModel): + def __init__(self, + name, + arn, + vpc_id, + protocol, + port, + healthcheck_protocol, + healthcheck_port, + healthcheck_path, + healthcheck_interval_seconds, + healthcheck_timeout_seconds, + healthy_threshold_count, + unhealthy_threshold_count): + self.name = name + self.arn = arn + self.vpc_id = vpc_id + self.protocol = protocol + self.port = port + self.healthcheck_protocol = healthcheck_protocol + self.healthcheck_port = healthcheck_port + self.healthcheck_path = healthcheck_path + self.healthcheck_interval_seconds = healthcheck_interval_seconds + self.healthcheck_timeout_seconds = healthcheck_timeout_seconds + self.healthy_threshold_count = healthy_threshold_count + self.unhealthy_threshold_count = unhealthy_threshold_count + self.load_balancer_arns = [] + + self.targets = OrderedDict() + + def register(self, targets): + for target in targets: + self.targets[target['id']] = { + 'id': target['id'], + 'port': target.get('port', self.port), + } + + def deregister(self, targets): + for target in targets: + t = self.targets.pop(target['id']) + if not t: + raise InvalidTargetError() + + def health_for(self, target): + t = self.targets.get(target['id']) + if t is None: + raise InvalidTargetError() + return FakeHealthStatus(t['id'], t['port'], self.healthcheck_port, 'healthy') + + +class FakeListener(BaseModel): + + def __init__(self, load_balancer_arn, arn, protocol, port, ssl_policy, certificate, default_actions): + self.load_balancer_arn = load_balancer_arn + self.arn = arn + self.protocol = protocol.upper() + self.port = port + self.ssl_policy = ssl_policy + self.certificate = certificate + self.default_actions = default_actions + + +class FakeBackend(BaseModel): + + def __init__(self, instance_port): + self.instance_port = instance_port + self.policy_names = [] + + def __repr__(self): + return "FakeBackend(inp: %s, policies: %s)" % (self.instance_port, self.policy_names) + + +class FakeLoadBalancer(BaseModel): + + def __init__(self, name, security_groups, subnets, vpc_id, arn, dns_name, scheme='internet-facing'): + self.name = name + self.created_time = datetime.datetime.now() + self.scheme = scheme + self.security_groups = security_groups + self.subnets = subnets or [] + self.vpc_id = vpc_id + self.listeners = OrderedDict() + self.tags = {} + self.arn = arn + self.dns_name = dns_name + + @property + def physical_resource_id(self): + return self.name + + def add_tag(self, key, value): + if len(self.tags) >= 10 and key not in self.tags: + raise TooManyTagsError() + self.tags[key] = value + + def list_tags(self): + return self.tags + + def remove_tag(self, key): + if key in self.tags: + del self.tags[key] + + def delete(self, region): + ''' Not exposed as part of the ELB API - used for CloudFormation. ''' + elbv2_backends[region].delete_load_balancer(self.arn) + + +class ELBv2Backend(BaseBackend): + + def __init__(self, region_name=None): + self.region_name = region_name + self.target_groups = OrderedDict() + self.load_balancers = OrderedDict() + + def reset(self): + region_name = self.region_name + self.__dict__ = {} + self.__init__(region_name) + + def create_load_balancer(self, name, security_groups, subnet_ids, scheme='internet-facing'): + vpc_id = None + ec2_backend = ec2_backends[self.region_name] + subnets = [] + if not subnet_ids: + raise SubnetNotFoundError() + for subnet_id in subnet_ids: + subnet = ec2_backend.get_subnet(subnet_id) + if subnet is None: + raise SubnetNotFoundError() + subnets.append(subnet) + + vpc_id = subnets[0].vpc_id + arn = "arn:aws:elasticloadbalancing:%s:1:loadbalancer/%s/50dc6c495c0c9188" % (self.region_name, name) + dns_name = "%s-1.%s.elb.amazonaws.com" % (name, self.region_name) + + if arn in self.load_balancers: + raise DuplicateLoadBalancerName() + + new_load_balancer = FakeLoadBalancer( + name=name, + security_groups=security_groups, + arn=arn, + scheme=scheme, + subnets=subnets, + vpc_id=vpc_id, + dns_name=dns_name) + self.load_balancers[arn] = new_load_balancer + return new_load_balancer + + def create_target_group(self, name, **kwargs): + for target_group in self.target_groups.values(): + if target_group.name == name: + raise DuplicateTargetGroupName() + + arn = "arn:aws:elasticloadbalancing:%s:1:targetgroup/%s/50dc6c495c0c9188" % (self.region_name, name) + target_group = FakeTargetGroup(name, arn, **kwargs) + self.target_groups[target_group.arn] = target_group + return target_group + + def create_listener(self, load_balancer_arn, protocol, port, ssl_policy, certificate, default_actions): + balancer = self.load_balancers.get(load_balancer_arn) + if balancer is None: + raise LoadBalancerNotFoundError() + if port in balancer.listeners: + raise DuplicateListenerError() + + arn = load_balancer_arn.replace(':loadbalancer/', ':listener/') + "/%s%s" % (port, id(self)) + listener = FakeListener(load_balancer_arn, arn, protocol, port, ssl_policy, certificate, default_actions) + balancer.listeners[listener.arn] = listener + return listener + + def describe_load_balancers(self, arns, names): + balancers = self.load_balancers.values() + arns = arns or [] + names = names or [] + if not arns and not names: + return balancers + + matched_balancers = [] + matched_balancer = None + + for arn in arns: + for balancer in balancers: + if balancer.arn == arn: + matched_balancer = balancer + if matched_balancer is None: + raise LoadBalancerNotFoundError() + elif matched_balancer not in matched_balancers: + matched_balancers.append(matched_balancer) + + for name in names: + for balancer in balancers: + if balancer.name == name: + matched_balancer = balancer + if matched_balancer is None: + raise LoadBalancerNotFoundError() + elif matched_balancer not in matched_balancers: + matched_balancers.append(matched_balancer) + + return matched_balancers + + def describe_target_groups(self, load_balancer_arn, target_group_arns, names): + if load_balancer_arn: + if load_balancer_arn not in self.load_balancers: + raise LoadBalancerNotFoundError() + return [tg for tg in self.target_groups.values() + if load_balancer_arn in tg.load_balancer_arns] + + if target_group_arns: + try: + return [self.target_groups[arn] for arn in target_group_arns] + except KeyError: + raise TargetGroupNotFoundError() + if names: + matched = [] + for name in names: + found = None + for target_group in self.target_groups: + if target_group.name == name: + found = target_group + if not found: + raise TargetGroupNotFoundError() + matched.append(found) + return matched + + return self.target_groups.values() + + def describe_listeners(self, load_balancer_arn, listener_arns): + if load_balancer_arn: + if load_balancer_arn not in self.load_balancers: + raise LoadBalancerNotFoundError() + return self.load_balancers.get(load_balancer_arn).listeners.values() + + matched = [] + for load_balancer in self.load_balancers.values(): + for listener_arn in listener_arns: + listener = load_balancer.listeners.get(listener_arn) + if not listener: + raise ListenerNotFoundError() + matched.append(listener) + return matched + + def delete_load_balancer(self, arn): + self.load_balancers.pop(arn, None) + + def delete_target_group(self, target_group_arn): + target_group = self.target_groups.pop(target_group_arn) + if target_group: + return target_group + raise TargetGroupNotFoundError() + + def delete_listener(self, listener_arn): + for load_balancer in self.load_balancers.values(): + listener = load_balancer.listeners.pop(listener_arn) + if listener: + return listener + raise ListenerNotFoundError() + + def register_targets(self, target_group_arn, instances): + target_group = self.target_groups.get(target_group_arn) + if target_group is None: + raise TargetGroupNotFoundError() + target_group.register(instances) + + def deregister_targets(self, target_group_arn, instances): + target_group = self.target_groups.get(target_group_arn) + if target_group is None: + raise TargetGroupNotFoundError() + target_group.deregister(instances) + + def describe_target_health(self, target_group_arn, targets): + target_group = self.target_groups.get(target_group_arn) + if target_group is None: + raise TargetGroupNotFoundError() + + if not targets: + targets = target_group.targets.values() + return [target_group.health_for(target) for target in targets] + + +elbv2_backends = {} +for region in ec2_backends.keys(): + elbv2_backends[region] = ELBv2Backend(region) diff --git a/moto/elbv2/responses.py b/moto/elbv2/responses.py new file mode 100644 index 000000000..585a413d4 --- /dev/null +++ b/moto/elbv2/responses.py @@ -0,0 +1,649 @@ +from __future__ import unicode_literals +from moto.core.responses import BaseResponse +from .models import elbv2_backends +from .exceptions import DuplicateTagKeysError, LoadBalancerNotFoundError + + +class ELBResponse(BaseResponse): + + @property + def elb_backend(self): + return elbv2_backends[self.region] + + def create_load_balancer(self): + load_balancer_name = self._get_param('Name') + subnet_ids = self._get_multi_param("Subnets.member") + security_groups = self._get_multi_param("SecurityGroups.member") + scheme = self._get_param('Scheme') + + load_balancer = self.elb_backend.create_load_balancer( + name=load_balancer_name, + security_groups=security_groups, + subnet_ids=subnet_ids, + scheme=scheme, + ) + self._add_tags(load_balancer) + template = self.response_template(CREATE_LOAD_BALANCER_TEMPLATE) + return template.render(load_balancer=load_balancer) + + def create_target_group(self): + name = self._get_param('Name') + vpc_id = self._get_param('VpcId') + protocol = self._get_param('Protocol') + port = self._get_param('Port') + healthcheck_protocol = self._get_param('HealthCheckProtocol', 'HTTP') + healthcheck_port = self._get_param('HealthCheckPort', 'traffic-port') + healthcheck_path = self._get_param('HealthCheckPath', '/') + healthcheck_interval_seconds = self._get_param('HealthCheckIntervalSeconds', '30') + healthcheck_timeout_seconds = self._get_param('HealthCheckTimeoutSeconds', '5') + healthy_threshold_count = self._get_param('HealthyThresholdCount', '5') + unhealthy_threshold_count = self._get_param('UnhealthyThresholdCount', '2') + + target_group = self.elb_backend.create_target_group( + name, + vpc_id=vpc_id, + protocol=protocol, + port=port, + healthcheck_protocol=healthcheck_protocol, + healthcheck_port=healthcheck_port, + healthcheck_path=healthcheck_path, + healthcheck_interval_seconds=healthcheck_interval_seconds, + healthcheck_timeout_seconds=healthcheck_timeout_seconds, + healthy_threshold_count=healthy_threshold_count, + unhealthy_threshold_count=unhealthy_threshold_count, + ) + + template = self.response_template(CREATE_TARGET_GROUP_TEMPLATE) + return template.render(target_group=target_group) + + def create_listener(self): + load_balancer_arn = self._get_param('LoadBalancerArn') + protocol = self._get_param('Protocol') + port = self._get_param('Port') + ssl_policy = self._get_param('SslPolicy', 'ELBSecurityPolicy-2016-08') + certificates = self._get_list_prefix('Certificates.member') + if certificates: + certificate = certificates[0].get('certificate_arn') + else: + certificate = None + default_actions = self._get_list_prefix('DefaultActions.member') + + listener = self.elb_backend.create_listener( + load_balancer_arn=load_balancer_arn, + protocol=protocol, + port=port, + ssl_policy=ssl_policy, + certificate=certificate, + default_actions=default_actions) + + template = self.response_template(CREATE_LISTENER_TEMPLATE) + return template.render(listener=listener) + + def describe_load_balancers(self): + arns = self._get_multi_param("LoadBalancerArns.member") + names = self._get_multi_param("Names.member") + all_load_balancers = list(self.elb_backend.describe_load_balancers(arns, names)) + marker = self._get_param('Marker') + all_names = [balancer.name for balancer in all_load_balancers] + if marker: + start = all_names.index(marker) + 1 + else: + start = 0 + page_size = self._get_param('PageSize', 50) # the default is 400, but using 50 to make testing easier + load_balancers_resp = all_load_balancers[start:start + page_size] + next_marker = None + if len(all_load_balancers) > start + page_size: + next_marker = load_balancers_resp[-1].name + + template = self.response_template(DESCRIBE_LOAD_BALANCERS_TEMPLATE) + return template.render(load_balancers=load_balancers_resp, marker=next_marker) + + def describe_target_groups(self): + load_balancer_arn = self._get_param('LoadBalancerArn') + target_group_arns = self._get_multi_param('TargetGroupArns.member') + names = self._get_multi_param('Names.member') + + target_groups = self.elb_backend.describe_target_groups(load_balancer_arn, target_group_arns, names) + template = self.response_template(DESCRIBE_TARGET_GROUPS_TEMPLATE) + return template.render(target_groups=target_groups) + + def describe_listeners(self): + load_balancer_arn = self._get_param('LoadBalancerArn') + listener_arns = self._get_multi_param('ListenerArns.member') + if not load_balancer_arn and not listener_arns: + raise LoadBalancerNotFoundError() + + listeners = self.elb_backend.describe_listeners(load_balancer_arn, listener_arns) + template = self.response_template(DESCRIBE_LISTENERS_TEMPLATE) + return template.render(listeners=listeners) + + def delete_load_balancer(self): + arn = self._get_param('LoadBalancerArn') + self.elb_backend.delete_load_balancer(arn) + template = self.response_template(DELETE_LOAD_BALANCER_TEMPLATE) + return template.render() + + def delete_target_group(self): + arn = self._get_param('TargetGroupArn') + self.elb_backend.delete_target_group(arn) + template = self.response_template(DELETE_TARGET_GROUP_TEMPLATE) + return template.render() + + def delete_listener(self): + arn = self._get_param('ListenerArn') + self.elb_backend.delete_listener(arn) + template = self.response_template(DELETE_LISTENER_TEMPLATE) + return template.render() + + def register_targets(self): + target_group_arn = self._get_param('TargetGroupArn') + targets = self._get_list_prefix('Targets.member') + self.elb_backend.register_targets(target_group_arn, targets) + + template = self.response_template(REGISTER_TARGETS_TEMPLATE) + return template.render() + + def deregister_targets(self): + target_group_arn = self._get_param('TargetGroupArn') + targets = self._get_list_prefix('Targets.member') + self.elb_backend.deregister_targets(target_group_arn, targets) + + template = self.response_template(DEREGISTER_TARGETS_TEMPLATE) + return template.render() + + def describe_target_health(self): + target_group_arn = self._get_param('TargetGroupArn') + targets = self._get_list_prefix('Targets.member') + target_health_descriptions = self.elb_backend.describe_target_health(target_group_arn, targets) + + template = self.response_template(DESCRIBE_TARGET_HEALTH_TEMPLATE) + return template.render(target_health_descriptions=target_health_descriptions) + + def add_tags(self): + resource_arns = self._get_multi_param('ResourceArns.member') + + for arn in resource_arns: + load_balancer = self.elb_backend.load_balancers.get(arn) + if not load_balancer: + raise LoadBalancerNotFoundError() + self._add_tags(load_balancer) + + template = self.response_template(ADD_TAGS_TEMPLATE) + return template.render() + + def remove_tags(self): + resource_arns = self._get_multi_param('ResourceArns.member') + tag_keys = self._get_multi_param('TagKeys.member') + + for arn in resource_arns: + load_balancer = self.elb_backend.load_balancers.get(arn) + if not load_balancer: + raise LoadBalancerNotFoundError() + [load_balancer.remove_tag(key) for key in tag_keys] + + template = self.response_template(REMOVE_TAGS_TEMPLATE) + return template.render() + + def describe_tags(self): + elbs = [] + for key, value in self.querystring.items(): + if "ResourceArns.member" in key: + number = key.split('.')[2] + load_balancer_arn = self._get_param( + 'ResourceArns.member.{0}'.format(number)) + elb = self.elb_backend.load_balancers.get(load_balancer_arn) + if not elb: + raise LoadBalancerNotFoundError() + elbs.append(elb) + + template = self.response_template(DESCRIBE_TAGS_TEMPLATE) + return template.render(load_balancers=elbs) + + def _add_tags(self, elb): + tag_values = [] + tag_keys = [] + + for t_key, t_val in sorted(self.querystring.items()): + if t_key.startswith('Tags.member.'): + if t_key.split('.')[3] == 'Key': + tag_keys.extend(t_val) + elif t_key.split('.')[3] == 'Value': + tag_values.extend(t_val) + + counts = {} + for i in tag_keys: + counts[i] = tag_keys.count(i) + + counts = sorted(counts.items(), key=lambda i: i[1], reverse=True) + + if counts and counts[0][1] > 1: + # We have dupes... + raise DuplicateTagKeysError(counts[0]) + + for tag_key, tag_value in zip(tag_keys, tag_values): + elb.add_tag(tag_key, tag_value) + + +ADD_TAGS_TEMPLATE = """ + + + 360e81f7-1100-11e4-b6ed-0f30EXAMPLE + +""" + +REMOVE_TAGS_TEMPLATE = """ + + + 360e81f7-1100-11e4-b6ed-0f30EXAMPLE + +""" + +DESCRIBE_TAGS_TEMPLATE = """ + + + {% for load_balancer in load_balancers %} + + {{ load_balancer.arn }} + + {% for key, value in load_balancer.tags.items() %} + + {{ value }} + {{ key }} + + {% endfor %} + + + {% endfor %} + + + + 360e81f7-1100-11e4-b6ed-0f30EXAMPLE + +""" + + +CREATE_LOAD_BALANCER_TEMPLATE = """ + + + + {{ load_balancer.arn }} + {{ load_balancer.scheme }} + {{ load_balancer.name }} + {{ load_balancer.vpc_id }} + Z2P70J7EXAMPLE + {{ load_balancer.created_time }} + + {% for subnet in load_balancer.subnets %} + + {{ subnet.id }} + {{ subnet.availability_zone }} + + {% endfor %} + + + {% for security_group in load_balancer.security_groups %} + {{ security_group }} + {% endfor %} + + {{ load_balancer.dns_name }} + + provisioning + + application + + + + + 32d531b2-f2d0-11e5-9192-3fff33344cfa + +""" + +CREATE_TARGET_GROUP_TEMPLATE = """ + + + + {{ target_group.arn }} + {{ target_group.name }} + {{ target_group.protocol }} + {{ target_group.port }} + {{ target_group.vpc_id }} + {{ target_group.health_check_protocol }} + {{ target_group.healthcheck_port }} + {{ target_group.healthcheck_path }} + {{ target_group.healthcheck_interval_seconds }} + {{ target_group.healthcheck_timeout_seconds }} + {{ target_group.healthy_threshold_count }} + {{ target_group.unhealthy_threshold_count }} + + 200 + + + + + + b83fe90e-f2d5-11e5-b95d-3b2c1831fc26 + +""" + +CREATE_LISTENER_TEMPLATE = """ + + + + {{ listener.load_balancer_arn }} + {{ listener.protocol }} + {% if listener.certificate %} + + + {{ listener.certificate }} + + + {% endif %} + {{ listener.port }} + {{ listener.ssl_policy }} + {{ listener.arn }} + + {% for action in listener.default_actions %} + + {{ action.type }} + {{ action.target_group_arn }} + + {% endfor %} + + + + + + 97f1bb38-f390-11e5-b95d-3b2c1831fc26 + +""" + +DELETE_LOAD_BALANCER_TEMPLATE = """ + + + 1549581b-12b7-11e3-895e-1334aEXAMPLE + +""" + +DELETE_TARGET_GROUP_TEMPLATE = """ + + + 1549581b-12b7-11e3-895e-1334aEXAMPLE + +""" + +DELETE_LISTENER_TEMPLATE = """ + + + 1549581b-12b7-11e3-895e-1334aEXAMPLE + +""" + +DESCRIBE_LOAD_BALANCERS_TEMPLATE = """ + + + {% for load_balancer in load_balancers %} + + {{ load_balancer.arn }} + {{ load_balancer.scheme }} + {{ load_balancer.name }} + {{ load_balancer.vpc_id }} + Z2P70J7EXAMPLE + {{ load_balancer.created_time }} + + {% for subnet in load_balancer.subnets %} + + {{ subnet.id }} + {{ subnet.availability_zone }} + + {% endfor %} + + + {% for security_group in load_balancer.security_groups %} + {{ security_group }} + {% endfor %} + + {{ load_balancer.dns_name }} + + provisioning + + application + + {% endfor %} + + {% if marker %} + {{ marker }} + {% endif %} + + + f9880f01-7852-629d-a6c3-3ae2-666a409287e6dc0c + +""" + + +DESCRIBE_TARGET_GROUPS_TEMPLATE = """ + + + {% for target_group in target_groups %} + + {{ target_group.arn }} + {{ target_group.name }} + {{ target_group.protocol }} + {{ target_group.port }} + {{ target_group.vpc_id }} + {{ target_group.health_check_protocol }} + {{ target_group.healthcheck_port }} + {{ target_group.healthcheck_path }} + {{ target_group.healthcheck_interval_seconds }} + {{ target_group.healthcheck_timeout_seconds }} + {{ target_group.healthy_threshold_count }} + {{ target_group.unhealthy_threshold_count }} + + 200 + + + {% for load_balancer_arn in target_group.load_balancer_arns %} + {{ load_balancer_arn }} + {% endfor %} + + + {% endfor %} + + + + 70092c0e-f3a9-11e5-ae48-cff02092876b + +""" + + +DESCRIBE_LISTENERS_TEMPLATE = """ + + + {% for listener in listeners %} + + {{ listener.load_balancer_arn }} + {{ listener.protocol }} + {% if listener.certificate %} + + + {{ listener.certificate }} + + + {% endif %} + {{ listener.port }} + {{ listener.ssl_policy }} + {{ listener.arn }} + + {% for action in listener.default_actions %} + + {{ action.type }} + {{ action.target_group_arn }} + + {% endfor %} + + + {% endfor %} + + + + 65a3a7ea-f39c-11e5-b543-9f2c3fbb9bee + +""" + +CONFIGURE_HEALTH_CHECK_TEMPLATE = """ + + + {{ check.interval }} + {{ check.target }} + {{ check.healthy_threshold }} + {{ check.timeout }} + {{ check.unhealthy_threshold }} + + + + f9880f01-7852-629d-a6c3-3ae2-666a409287e6dc0c + +""" + +REGISTER_TARGETS_TEMPLATE = """ + + + + f9880f01-7852-629d-a6c3-3ae2-666a409287e6dc0c + +""" + +DEREGISTER_TARGETS_TEMPLATE = """ + + + + f9880f01-7852-629d-a6c3-3ae2-666a409287e6dc0c + +""" + +SET_LOAD_BALANCER_SSL_CERTIFICATE = """ + + + 83c88b9d-12b7-11e3-8b82-87b12EXAMPLE + +""" + + +DELETE_LOAD_BALANCER_LISTENERS = """ + + + 83c88b9d-12b7-11e3-8b82-87b12EXAMPLE + +""" + +DESCRIBE_ATTRIBUTES_TEMPLATE = """ + + + + {{ attributes.access_log.enabled }} + {% if attributes.access_log.enabled %} + {{ attributes.access_log.s3_bucket_name }} + {{ attributes.access_log.s3_bucket_prefix }} + {{ attributes.access_log.emit_interval }} + {% endif %} + + + {{ attributes.connecting_settings.idle_timeout }} + + + {{ attributes.cross_zone_load_balancing.enabled }} + + + {% if attributes.connection_draining.enabled %} + true + {{ attributes.connection_draining.timeout }} + {% else %} + false + {% endif %} + + + + + 83c88b9d-12b7-11e3-8b82-87b12EXAMPLE + + +""" + +MODIFY_ATTRIBUTES_TEMPLATE = """ + + {{ load_balancer.name }} + + + {{ attributes.access_log.enabled }} + {% if attributes.access_log.enabled %} + {{ attributes.access_log.s3_bucket_name }} + {{ attributes.access_log.s3_bucket_prefix }} + {{ attributes.access_log.emit_interval }} + {% endif %} + + + {{ attributes.connecting_settings.idle_timeout }} + + + {{ attributes.cross_zone_load_balancing.enabled }} + + + {% if attributes.connection_draining.enabled %} + true + {{ attributes.connection_draining.timeout }} + {% else %} + false + {% endif %} + + + + + 83c88b9d-12b7-11e3-8b82-87b12EXAMPLE + + +""" + +CREATE_LOAD_BALANCER_POLICY_TEMPLATE = """ + + + 83c88b9d-12b7-11e3-8b82-87b12EXAMPLE + + +""" + +SET_LOAD_BALANCER_POLICIES_OF_LISTENER_TEMPLATE = """ + + + 07b1ecbc-1100-11e3-acaf-dd7edEXAMPLE + + +""" + +SET_LOAD_BALANCER_POLICIES_FOR_BACKEND_SERVER_TEMPLATE = """ + + + 0eb9b381-dde0-11e2-8d78-6ddbaEXAMPLE + + +""" + +DESCRIBE_TARGET_HEALTH_TEMPLATE = """ + + + {% for target_health in target_health_descriptions %} + + {{ target_health.health_port }} + + {{ target_health.status }} + + + {{ target_health.port }} + {{ target_health.instance_id }} + + + {% endfor %} + + + + c534f810-f389-11e5-9192-3fff33344cfa + +""" diff --git a/moto/elbv2/urls.py b/moto/elbv2/urls.py new file mode 100644 index 000000000..48fcb37ab --- /dev/null +++ b/moto/elbv2/urls.py @@ -0,0 +1,10 @@ +from __future__ import unicode_literals +from .responses import ELBResponse + +url_bases = [ + "https?://elasticloadbalancing.(.+).amazonaws.com", +] + +url_paths = { + '{0}/$': ELBResponse.dispatch, +} diff --git a/tests/test_elbv2/test_elbv2.py b/tests/test_elbv2/test_elbv2.py new file mode 100644 index 000000000..c9eb9ea43 --- /dev/null +++ b/tests/test_elbv2/test_elbv2.py @@ -0,0 +1,447 @@ +from __future__ import unicode_literals +import boto3 +import botocore +from botocore.exceptions import ClientError +from nose.tools import assert_raises +import sure # noqa + +from moto import mock_elbv2, mock_ec2 + + +@mock_elbv2 +@mock_ec2 +def test_create_load_balancer(): + conn = boto3.client('elbv2', region_name='us-east-1') + ec2 = boto3.resource('ec2', region_name='us-east-1') + + security_group = ec2.create_security_group(GroupName='a-security-group', Description='First One') + vpc = ec2.create_vpc(CidrBlock='172.28.7.0/24', InstanceTenancy='default') + subnet1 = ec2.create_subnet(VpcId=vpc.id, CidrBlock='172.28.7.192/26', AvailabilityZone='us-east-1a') + subnet2 = ec2.create_subnet(VpcId=vpc.id, CidrBlock='172.28.7.192/26', AvailabilityZone='us-east-1b') + + response = conn.create_load_balancer( + Name='my-lb', + Subnets=[subnet1.id, subnet2.id], + SecurityGroups=[security_group.id], + Scheme='internal', + Tags=[{'Key': 'key_name', 'Value': 'a_value'}]) + + lb = response.get('LoadBalancers')[0] + + lb.get('DNSName').should.equal("my-lb-1.us-east-1.elb.amazonaws.com") + lb.get('LoadBalancerArn').should.equal('arn:aws:elasticloadbalancing:us-east-1:1:loadbalancer/my-lb/50dc6c495c0c9188') + lb.get('SecurityGroups').should.equal([security_group.id]) + lb.get('AvailabilityZones').should.equal([ + {'SubnetId': subnet1.id, 'ZoneName': 'us-east-1a'}, + {'SubnetId': subnet2.id, 'ZoneName': 'us-east-1b'}]) + + # Ensure the tags persisted + response = conn.describe_tags(ResourceArns=[lb.get('LoadBalancerArn')]) + tags = {d['Key']: d['Value'] for d in response['TagDescriptions'][0]['Tags']} + tags.should.equal({'key_name': 'a_value'}) + + +@mock_elbv2 +@mock_ec2 +def test_describe_load_balancers(): + conn = boto3.client('elbv2', region_name='us-east-1') + ec2 = boto3.resource('ec2', region_name='us-east-1') + + security_group = ec2.create_security_group(GroupName='a-security-group', Description='First One') + vpc = ec2.create_vpc(CidrBlock='172.28.7.0/24', InstanceTenancy='default') + subnet1 = ec2.create_subnet(VpcId=vpc.id, CidrBlock='172.28.7.192/26', AvailabilityZone='us-east-1a') + subnet2 = ec2.create_subnet(VpcId=vpc.id, CidrBlock='172.28.7.192/26', AvailabilityZone='us-east-1b') + + conn.create_load_balancer( + Name='my-lb', + Subnets=[subnet1.id, subnet2.id], + SecurityGroups=[security_group.id], + Scheme='internal', + Tags=[{'Key': 'key_name', 'Value': 'a_value'}]) + + response = conn.describe_load_balancers() + + response.get('LoadBalancers').should.have.length_of(1) + lb = response.get('LoadBalancers')[0] + lb.get('LoadBalancerName').should.equal('my-lb') + + response = conn.describe_load_balancers(LoadBalancerArns=[lb.get('LoadBalancerArn')]) + response.get('LoadBalancers')[0].get('LoadBalancerName').should.equal('my-lb') + + response = conn.describe_load_balancers(Names=['my-lb']) + response.get('LoadBalancers')[0].get('LoadBalancerName').should.equal('my-lb') + + with assert_raises(ClientError): + conn.describe_load_balancers(LoadBalancerArns=['not-a/real/arn']) + with assert_raises(ClientError): + conn.describe_load_balancers(Names=['nope']) + + +@mock_elbv2 +@mock_ec2 +def test_add_remove_tags(): + conn = boto3.client('elbv2', region_name='us-east-1') + + ec2 = boto3.resource('ec2', region_name='us-east-1') + + security_group = ec2.create_security_group(GroupName='a-security-group', Description='First One') + vpc = ec2.create_vpc(CidrBlock='172.28.7.0/24', InstanceTenancy='default') + subnet1 = ec2.create_subnet(VpcId=vpc.id, CidrBlock='172.28.7.192/26', AvailabilityZone='us-east-1a') + subnet2 = ec2.create_subnet(VpcId=vpc.id, CidrBlock='172.28.7.192/26', AvailabilityZone='us-east-1b') + + conn.create_load_balancer( + Name='my-lb', + Subnets=[subnet1.id, subnet2.id], + SecurityGroups=[security_group.id], + Scheme='internal', + Tags=[{'Key': 'key_name', 'Value': 'a_value'}]) + + lbs = conn.describe_load_balancers()['LoadBalancers'] + lbs.should.have.length_of(1) + lb = lbs[0] + + with assert_raises(ClientError): + conn.add_tags(ResourceArns=['missing-arn'], + Tags=[{ + 'Key': 'a', + 'Value': 'b' + }]) + + conn.add_tags(ResourceArns=[lb.get('LoadBalancerArn')], + Tags=[{ + 'Key': 'a', + 'Value': 'b' + }]) + + tags = {d['Key']: d['Value'] for d in conn.describe_tags( + ResourceArns=[lb.get('LoadBalancerArn')])['TagDescriptions'][0]['Tags']} + tags.should.have.key('a').which.should.equal('b') + + conn.add_tags(ResourceArns=[lb.get('LoadBalancerArn')], + Tags=[{ + 'Key': 'a', + 'Value': 'b' + }, { + 'Key': 'b', + 'Value': 'b' + }, { + 'Key': 'c', + 'Value': 'b' + }, { + 'Key': 'd', + 'Value': 'b' + }, { + 'Key': 'e', + 'Value': 'b' + }, { + 'Key': 'f', + 'Value': 'b' + }, { + 'Key': 'g', + 'Value': 'b' + }, { + 'Key': 'h', + 'Value': 'b' + }, { + 'Key': 'j', + 'Value': 'b' + }]) + + conn.add_tags.when.called_with(ResourceArns=[lb.get('LoadBalancerArn')], + Tags=[{ + 'Key': 'k', + 'Value': 'b' + }]).should.throw(botocore.exceptions.ClientError) + + conn.add_tags(ResourceArns=[lb.get('LoadBalancerArn')], + Tags=[{ + 'Key': 'j', + 'Value': 'c' + }]) + + tags = {d['Key']: d['Value'] for d in conn.describe_tags( + ResourceArns=[lb.get('LoadBalancerArn')])['TagDescriptions'][0]['Tags']} + + tags.should.have.key('a').which.should.equal('b') + tags.should.have.key('b').which.should.equal('b') + tags.should.have.key('c').which.should.equal('b') + tags.should.have.key('d').which.should.equal('b') + tags.should.have.key('e').which.should.equal('b') + tags.should.have.key('f').which.should.equal('b') + tags.should.have.key('g').which.should.equal('b') + tags.should.have.key('h').which.should.equal('b') + tags.should.have.key('j').which.should.equal('c') + tags.shouldnt.have.key('k') + + conn.remove_tags(ResourceArns=[lb.get('LoadBalancerArn')], + TagKeys=['a']) + + tags = {d['Key']: d['Value'] for d in conn.describe_tags( + ResourceArns=[lb.get('LoadBalancerArn')])['TagDescriptions'][0]['Tags']} + + tags.shouldnt.have.key('a') + tags.should.have.key('b').which.should.equal('b') + tags.should.have.key('c').which.should.equal('b') + tags.should.have.key('d').which.should.equal('b') + tags.should.have.key('e').which.should.equal('b') + tags.should.have.key('f').which.should.equal('b') + tags.should.have.key('g').which.should.equal('b') + tags.should.have.key('h').which.should.equal('b') + tags.should.have.key('j').which.should.equal('c') + + +@mock_elbv2 +@mock_ec2 +def test_create_elb_in_multiple_region(): + for region in ['us-west-1', 'us-west-2']: + conn = boto3.client('elbv2', region_name=region) + ec2 = boto3.resource('ec2', region_name=region) + + security_group = ec2.create_security_group(GroupName='a-security-group', Description='First One') + vpc = ec2.create_vpc(CidrBlock='172.28.7.0/24', InstanceTenancy='default') + subnet1 = ec2.create_subnet(VpcId=vpc.id, CidrBlock='172.28.7.192/26', AvailabilityZone=region + 'a') + subnet2 = ec2.create_subnet(VpcId=vpc.id, CidrBlock='172.28.7.192/26', AvailabilityZone=region + 'b') + + conn.create_load_balancer( + Name='my-lb', + Subnets=[subnet1.id, subnet2.id], + SecurityGroups=[security_group.id], + Scheme='internal', + Tags=[{'Key': 'key_name', 'Value': 'a_value'}]) + + list( + boto3.client('elbv2', region_name='us-west-1').describe_load_balancers().get('LoadBalancers') + ).should.have.length_of(1) + list( + boto3.client('elbv2', region_name='us-west-2').describe_load_balancers().get('LoadBalancers') + ).should.have.length_of(1) + + +@mock_elbv2 +@mock_ec2 +def test_create_target_group_and_listeners(): + conn = boto3.client('elbv2', region_name='us-east-1') + ec2 = boto3.resource('ec2', region_name='us-east-1') + + security_group = ec2.create_security_group(GroupName='a-security-group', Description='First One') + vpc = ec2.create_vpc(CidrBlock='172.28.7.0/24', InstanceTenancy='default') + subnet1 = ec2.create_subnet(VpcId=vpc.id, CidrBlock='172.28.7.192/26', AvailabilityZone='us-east-1a') + subnet2 = ec2.create_subnet(VpcId=vpc.id, CidrBlock='172.28.7.192/26', AvailabilityZone='us-east-1b') + + response = conn.create_load_balancer( + Name='my-lb', + Subnets=[subnet1.id, subnet2.id], + SecurityGroups=[security_group.id], + Scheme='internal', + Tags=[{'Key': 'key_name', 'Value': 'a_value'}]) + + load_balancer_arn = response.get('LoadBalancers')[0].get('LoadBalancerArn') + + response = conn.create_target_group( + Name='a-target', + Protocol='HTTP', + Port=8080, + VpcId=vpc.id, + HealthCheckProtocol='HTTP', + HealthCheckPort='8080', + HealthCheckPath='/', + HealthCheckIntervalSeconds=5, + HealthCheckTimeoutSeconds=5, + HealthyThresholdCount=5, + UnhealthyThresholdCount=2, + Matcher={'HttpCode': '200'}) + target_group = response.get('TargetGroups')[0] + + # Check it's in the describe_target_groups response + response = conn.describe_target_groups() + response.get('TargetGroups').should.have.length_of(1) + + # Plain HTTP listener + response = conn.create_listener( + LoadBalancerArn=load_balancer_arn, + Protocol='HTTP', + Port=80, + DefaultActions=[{'Type': 'forward', 'TargetGroupArn': target_group.get('TargetGroupArn')}]) + listener = response.get('Listeners')[0] + listener.get('Port').should.equal(80) + listener.get('Protocol').should.equal('HTTP') + listener.get('DefaultActions').should.equal([{ + 'TargetGroupArn': target_group.get('TargetGroupArn'), + 'Type': 'forward'}]) + http_listener_arn = listener.get('ListenerArn') + + # And another with SSL + response = conn.create_listener( + LoadBalancerArn=load_balancer_arn, + Protocol='HTTPS', + Port=443, + Certificates=[{'CertificateArn': 'arn:aws:iam:123456789012:server-certificate/test-cert'}], + DefaultActions=[{'Type': 'forward', 'TargetGroupArn': target_group.get('TargetGroupArn')}]) + listener = response.get('Listeners')[0] + listener.get('Port').should.equal(443) + listener.get('Protocol').should.equal('HTTPS') + listener.get('Certificates').should.equal([{ + 'CertificateArn': 'arn:aws:iam:123456789012:server-certificate/test-cert', + }]) + listener.get('DefaultActions').should.equal([{ + 'TargetGroupArn': target_group.get('TargetGroupArn'), + 'Type': 'forward'}]) + + https_listener_arn = listener.get('ListenerArn') + + response = conn.describe_listeners(LoadBalancerArn=load_balancer_arn) + response.get('Listeners').should.have.length_of(2) + response = conn.describe_listeners(ListenerArns=[https_listener_arn]) + response.get('Listeners').should.have.length_of(1) + listener = response.get('Listeners')[0] + listener.get('Port').should.equal(443) + listener.get('Protocol').should.equal('HTTPS') + + response = conn.describe_listeners(ListenerArns=[http_listener_arn, https_listener_arn]) + response.get('Listeners').should.have.length_of(2) + + # Delete one listener + response = conn.describe_listeners(LoadBalancerArn=load_balancer_arn) + response.get('Listeners').should.have.length_of(2) + conn.delete_listener(ListenerArn=http_listener_arn) + response = conn.describe_listeners(LoadBalancerArn=load_balancer_arn) + response.get('Listeners').should.have.length_of(1) + + # Then delete the load balancer + conn.delete_load_balancer(LoadBalancerArn=load_balancer_arn) + + # It's gone + response = conn.describe_load_balancers() + response.get('LoadBalancers').should.have.length_of(0) + + # And it deleted the remaining listener + response = conn.describe_listeners(ListenerArns=[http_listener_arn, https_listener_arn]) + response.get('Listeners').should.have.length_of(0) + + # But not the target groups + response = conn.describe_target_groups() + response.get('TargetGroups').should.have.length_of(1) + + # Which we'll now delete + conn.delete_target_group(TargetGroupArn=target_group.get('TargetGroupArn')) + response = conn.describe_target_groups() + response.get('TargetGroups').should.have.length_of(0) + + +@mock_elbv2 +@mock_ec2 +def test_describe_paginated_balancers(): + conn = boto3.client('elbv2', region_name='us-east-1') + ec2 = boto3.resource('ec2', region_name='us-east-1') + + security_group = ec2.create_security_group(GroupName='a-security-group', Description='First One') + vpc = ec2.create_vpc(CidrBlock='172.28.7.0/24', InstanceTenancy='default') + subnet1 = ec2.create_subnet(VpcId=vpc.id, CidrBlock='172.28.7.192/26', AvailabilityZone='us-east-1a') + subnet2 = ec2.create_subnet(VpcId=vpc.id, CidrBlock='172.28.7.192/26', AvailabilityZone='us-east-1b') + + for i in range(51): + conn.create_load_balancer( + Name='my-lb%d' % i, + Subnets=[subnet1.id, subnet2.id], + SecurityGroups=[security_group.id], + Scheme='internal', + Tags=[{'Key': 'key_name', 'Value': 'a_value'}]) + + resp = conn.describe_load_balancers() + resp['LoadBalancers'].should.have.length_of(50) + resp['NextMarker'].should.equal(resp['LoadBalancers'][-1]['LoadBalancerName']) + resp2 = conn.describe_load_balancers(Marker=resp['NextMarker']) + resp2['LoadBalancers'].should.have.length_of(1) + assert 'NextToken' not in resp2.keys() + + +@mock_elbv2 +@mock_ec2 +def test_delete_load_balancer(): + conn = boto3.client('elbv2', region_name='us-east-1') + ec2 = boto3.resource('ec2', region_name='us-east-1') + + security_group = ec2.create_security_group(GroupName='a-security-group', Description='First One') + vpc = ec2.create_vpc(CidrBlock='172.28.7.0/24', InstanceTenancy='default') + subnet1 = ec2.create_subnet(VpcId=vpc.id, CidrBlock='172.28.7.192/26', AvailabilityZone='us-east-1a') + subnet2 = ec2.create_subnet(VpcId=vpc.id, CidrBlock='172.28.7.192/26', AvailabilityZone='us-east-1b') + + response = conn.create_load_balancer( + Name='my-lb', + Subnets=[subnet1.id, subnet2.id], + SecurityGroups=[security_group.id], + Scheme='internal', + Tags=[{'Key': 'key_name', 'Value': 'a_value'}]) + + response.get('LoadBalancers').should.have.length_of(1) + lb = response.get('LoadBalancers')[0] + + conn.delete_load_balancer(LoadBalancerArn=lb.get('LoadBalancerArn')) + balancers = conn.describe_load_balancers().get('LoadBalancers') + balancers.should.have.length_of(0) + + +@mock_ec2 +@mock_elbv2 +def test_register_targets(): + conn = boto3.client('elbv2', region_name='us-east-1') + ec2 = boto3.resource('ec2', region_name='us-east-1') + + security_group = ec2.create_security_group(GroupName='a-security-group', Description='First One') + vpc = ec2.create_vpc(CidrBlock='172.28.7.0/24', InstanceTenancy='default') + subnet1 = ec2.create_subnet(VpcId=vpc.id, CidrBlock='172.28.7.192/26', AvailabilityZone='us-east-1a') + subnet2 = ec2.create_subnet(VpcId=vpc.id, CidrBlock='172.28.7.192/26', AvailabilityZone='us-east-1b') + + conn.create_load_balancer( + Name='my-lb', + Subnets=[subnet1.id, subnet2.id], + SecurityGroups=[security_group.id], + Scheme='internal', + Tags=[{'Key': 'key_name', 'Value': 'a_value'}]) + + response = conn.create_target_group( + Name='a-target', + Protocol='HTTP', + Port=8080, + VpcId=vpc.id, + HealthCheckProtocol='HTTP', + HealthCheckPort='8080', + HealthCheckPath='/', + HealthCheckIntervalSeconds=5, + HealthCheckTimeoutSeconds=5, + HealthyThresholdCount=5, + UnhealthyThresholdCount=2, + Matcher={'HttpCode': '200'}) + target_group = response.get('TargetGroups')[0] + + # No targets registered yet + response = conn.describe_target_health(TargetGroupArn=target_group.get('TargetGroupArn')) + response.get('TargetHealthDescriptions').should.have.length_of(0) + + response = ec2.create_instances( + ImageId='ami-1234abcd', MinCount=2, MaxCount=2) + instance_id1 = response[0].id + instance_id2 = response[1].id + + response = conn.register_targets( + TargetGroupArn=target_group.get('TargetGroupArn'), + Targets=[ + { + 'Id': instance_id1, + 'Port': 5060, + }, + { + 'Id': instance_id2, + 'Port': 4030, + }, + ]) + + response = conn.describe_target_health(TargetGroupArn=target_group.get('TargetGroupArn')) + response.get('TargetHealthDescriptions').should.have.length_of(2) + + response = conn.deregister_targets( + TargetGroupArn=target_group.get('TargetGroupArn'), + Targets=[{'Id': instance_id2}]) + + response = conn.describe_target_health(TargetGroupArn=target_group.get('TargetGroupArn')) + response.get('TargetHealthDescriptions').should.have.length_of(1) diff --git a/tests/test_elbv2/test_server.py b/tests/test_elbv2/test_server.py new file mode 100644 index 000000000..6dc271920 --- /dev/null +++ b/tests/test_elbv2/test_server.py @@ -0,0 +1,17 @@ +from __future__ import unicode_literals +import sure # noqa + +import moto.server as server + +''' +Test the different server responses +''' + + +def test_elb_describe_instances(): + backend = server.create_backend_app("elbv2") + test_client = backend.test_client() + + res = test_client.get('/?Action=DescribeLoadBalancers') + + res.data.should.contain(b'DescribeLoadBalancersResponse') From ee6d2537004a2591ec3fd5466a41aa1ff30eb737 Mon Sep 17 00:00:00 2001 From: Jack Danger Date: Fri, 21 Jul 2017 16:28:56 -0700 Subject: [PATCH 100/412] updating reference in server test --- tests/test_elbv2/test_server.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/test_elbv2/test_server.py b/tests/test_elbv2/test_server.py index 6dc271920..05786104d 100644 --- a/tests/test_elbv2/test_server.py +++ b/tests/test_elbv2/test_server.py @@ -8,7 +8,7 @@ Test the different server responses ''' -def test_elb_describe_instances(): +def test_elbv2_describe_instances(): backend = server.create_backend_app("elbv2") test_client = backend.test_client() From 5cd1e2450d50917202c6648fb5befb34ef47a944 Mon Sep 17 00:00:00 2001 From: Jack Danger Date: Sun, 23 Jul 2017 22:06:55 -0700 Subject: [PATCH 101/412] adding elbv2 backend --- moto/backends.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/moto/backends.py b/moto/backends.py index 0af4ae2e2..b452b45fd 100644 --- a/moto/backends.py +++ b/moto/backends.py @@ -13,6 +13,7 @@ from moto.ec2 import ec2_backends from moto.ecr import ecr_backends from moto.ecs import ecs_backends from moto.elb import elb_backends +from moto.elbv2 import elbv2_backends from moto.emr import emr_backends from moto.events import events_backends from moto.glacier import glacier_backends @@ -43,6 +44,7 @@ BACKENDS = { 'ecr': ecr_backends, 'ecs': ecs_backends, 'elb': elb_backends, + 'elbv2': elbv2_backends, 'events': events_backends, 'emr': emr_backends, 'glacier': glacier_backends, From ce392fab79ba8a160f34c9ad90025ab3ccaefe98 Mon Sep 17 00:00:00 2001 From: Jack Danger Date: Tue, 1 Aug 2017 18:12:36 -0700 Subject: [PATCH 102/412] Properly dispatch by api version in server mode I'm not happy with this solution. Please think of a fix if you're reading this. --- moto/elb/urls.py | 30 +++++++++++++++++++++++++++++- moto/elbv2/responses.py | 34 +++++++++++++++++----------------- moto/elbv2/urls.py | 10 +++------- 3 files changed, 49 insertions(+), 25 deletions(-) diff --git a/moto/elb/urls.py b/moto/elb/urls.py index 48fcb37ab..a81ebc3e0 100644 --- a/moto/elb/urls.py +++ b/moto/elb/urls.py @@ -1,10 +1,38 @@ from __future__ import unicode_literals from .responses import ELBResponse +from moto.elbv2.responses import ELBV2Response + + +def api_version_elb_backend(*args, **kwargs): + """ + ELB and ELBV2 (Classic and Application load balancers) use the same + hostname and url space. To differentiate them we must read the + `Version` parameter out of the url-encoded request body. TODO: There + has _got_ to be a better way to do this. Please help us think of + one. + """ + request = args[0] + + if hasattr(request, 'values'): + # boto3 + version = request.values.get('Version') + else: + # boto + request.parse_request() + version = request.querystring.get('Version')[0] + + if '2012-06-01' == version: + return ELBResponse.dispatch(*args, **kwargs) + elif '2015-12-01' == version: + return ELBV2Response.dispatch(*args, **kwargs) + else: + raise Exception("Unknown ELB API version: {}".format(version)) + url_bases = [ "https?://elasticloadbalancing.(.+).amazonaws.com", ] url_paths = { - '{0}/$': ELBResponse.dispatch, + '{0}/$': api_version_elb_backend, } diff --git a/moto/elbv2/responses.py b/moto/elbv2/responses.py index 585a413d4..c000dd0c9 100644 --- a/moto/elbv2/responses.py +++ b/moto/elbv2/responses.py @@ -4,10 +4,10 @@ from .models import elbv2_backends from .exceptions import DuplicateTagKeysError, LoadBalancerNotFoundError -class ELBResponse(BaseResponse): +class ELBV2Response(BaseResponse): @property - def elb_backend(self): + def elbv2_backend(self): return elbv2_backends[self.region] def create_load_balancer(self): @@ -16,7 +16,7 @@ class ELBResponse(BaseResponse): security_groups = self._get_multi_param("SecurityGroups.member") scheme = self._get_param('Scheme') - load_balancer = self.elb_backend.create_load_balancer( + load_balancer = self.elbv2_backend.create_load_balancer( name=load_balancer_name, security_groups=security_groups, subnet_ids=subnet_ids, @@ -39,7 +39,7 @@ class ELBResponse(BaseResponse): healthy_threshold_count = self._get_param('HealthyThresholdCount', '5') unhealthy_threshold_count = self._get_param('UnhealthyThresholdCount', '2') - target_group = self.elb_backend.create_target_group( + target_group = self.elbv2_backend.create_target_group( name, vpc_id=vpc_id, protocol=protocol, @@ -68,7 +68,7 @@ class ELBResponse(BaseResponse): certificate = None default_actions = self._get_list_prefix('DefaultActions.member') - listener = self.elb_backend.create_listener( + listener = self.elbv2_backend.create_listener( load_balancer_arn=load_balancer_arn, protocol=protocol, port=port, @@ -82,7 +82,7 @@ class ELBResponse(BaseResponse): def describe_load_balancers(self): arns = self._get_multi_param("LoadBalancerArns.member") names = self._get_multi_param("Names.member") - all_load_balancers = list(self.elb_backend.describe_load_balancers(arns, names)) + all_load_balancers = list(self.elbv2_backend.describe_load_balancers(arns, names)) marker = self._get_param('Marker') all_names = [balancer.name for balancer in all_load_balancers] if marker: @@ -103,7 +103,7 @@ class ELBResponse(BaseResponse): target_group_arns = self._get_multi_param('TargetGroupArns.member') names = self._get_multi_param('Names.member') - target_groups = self.elb_backend.describe_target_groups(load_balancer_arn, target_group_arns, names) + target_groups = self.elbv2_backend.describe_target_groups(load_balancer_arn, target_group_arns, names) template = self.response_template(DESCRIBE_TARGET_GROUPS_TEMPLATE) return template.render(target_groups=target_groups) @@ -113,32 +113,32 @@ class ELBResponse(BaseResponse): if not load_balancer_arn and not listener_arns: raise LoadBalancerNotFoundError() - listeners = self.elb_backend.describe_listeners(load_balancer_arn, listener_arns) + listeners = self.elbv2_backend.describe_listeners(load_balancer_arn, listener_arns) template = self.response_template(DESCRIBE_LISTENERS_TEMPLATE) return template.render(listeners=listeners) def delete_load_balancer(self): arn = self._get_param('LoadBalancerArn') - self.elb_backend.delete_load_balancer(arn) + self.elbv2_backend.delete_load_balancer(arn) template = self.response_template(DELETE_LOAD_BALANCER_TEMPLATE) return template.render() def delete_target_group(self): arn = self._get_param('TargetGroupArn') - self.elb_backend.delete_target_group(arn) + self.elbv2_backend.delete_target_group(arn) template = self.response_template(DELETE_TARGET_GROUP_TEMPLATE) return template.render() def delete_listener(self): arn = self._get_param('ListenerArn') - self.elb_backend.delete_listener(arn) + self.elbv2_backend.delete_listener(arn) template = self.response_template(DELETE_LISTENER_TEMPLATE) return template.render() def register_targets(self): target_group_arn = self._get_param('TargetGroupArn') targets = self._get_list_prefix('Targets.member') - self.elb_backend.register_targets(target_group_arn, targets) + self.elbv2_backend.register_targets(target_group_arn, targets) template = self.response_template(REGISTER_TARGETS_TEMPLATE) return template.render() @@ -146,7 +146,7 @@ class ELBResponse(BaseResponse): def deregister_targets(self): target_group_arn = self._get_param('TargetGroupArn') targets = self._get_list_prefix('Targets.member') - self.elb_backend.deregister_targets(target_group_arn, targets) + self.elbv2_backend.deregister_targets(target_group_arn, targets) template = self.response_template(DEREGISTER_TARGETS_TEMPLATE) return template.render() @@ -154,7 +154,7 @@ class ELBResponse(BaseResponse): def describe_target_health(self): target_group_arn = self._get_param('TargetGroupArn') targets = self._get_list_prefix('Targets.member') - target_health_descriptions = self.elb_backend.describe_target_health(target_group_arn, targets) + target_health_descriptions = self.elbv2_backend.describe_target_health(target_group_arn, targets) template = self.response_template(DESCRIBE_TARGET_HEALTH_TEMPLATE) return template.render(target_health_descriptions=target_health_descriptions) @@ -163,7 +163,7 @@ class ELBResponse(BaseResponse): resource_arns = self._get_multi_param('ResourceArns.member') for arn in resource_arns: - load_balancer = self.elb_backend.load_balancers.get(arn) + load_balancer = self.elbv2_backend.load_balancers.get(arn) if not load_balancer: raise LoadBalancerNotFoundError() self._add_tags(load_balancer) @@ -176,7 +176,7 @@ class ELBResponse(BaseResponse): tag_keys = self._get_multi_param('TagKeys.member') for arn in resource_arns: - load_balancer = self.elb_backend.load_balancers.get(arn) + load_balancer = self.elbv2_backend.load_balancers.get(arn) if not load_balancer: raise LoadBalancerNotFoundError() [load_balancer.remove_tag(key) for key in tag_keys] @@ -191,7 +191,7 @@ class ELBResponse(BaseResponse): number = key.split('.')[2] load_balancer_arn = self._get_param( 'ResourceArns.member.{0}'.format(number)) - elb = self.elb_backend.load_balancers.get(load_balancer_arn) + elb = self.elbv2_backend.load_balancers.get(load_balancer_arn) if not elb: raise LoadBalancerNotFoundError() elbs.append(elb) diff --git a/moto/elbv2/urls.py b/moto/elbv2/urls.py index 48fcb37ab..ff72e3605 100644 --- a/moto/elbv2/urls.py +++ b/moto/elbv2/urls.py @@ -1,10 +1,6 @@ from __future__ import unicode_literals -from .responses import ELBResponse +from .responses import ELBV2Response -url_bases = [ - "https?://elasticloadbalancing.(.+).amazonaws.com", -] +url_bases = [] -url_paths = { - '{0}/$': ELBResponse.dispatch, -} +url_paths = {} From d56c30932f8e2c3a50b4a01f9381887f727fc9c8 Mon Sep 17 00:00:00 2001 From: Jack Danger Date: Tue, 1 Aug 2017 18:19:26 -0700 Subject: [PATCH 103/412] this is handled in moto/elb/urls.py --- moto/elbv2/urls.py | 1 - 1 file changed, 1 deletion(-) diff --git a/moto/elbv2/urls.py b/moto/elbv2/urls.py index ff72e3605..13e04a224 100644 --- a/moto/elbv2/urls.py +++ b/moto/elbv2/urls.py @@ -1,5 +1,4 @@ from __future__ import unicode_literals -from .responses import ELBV2Response url_bases = [] From 8188fea0ced1e75610b835745d78c6a18532fd02 Mon Sep 17 00:00:00 2001 From: Jack Danger Date: Tue, 1 Aug 2017 18:26:38 -0700 Subject: [PATCH 104/412] This is required for the server test to work --- moto/elbv2/urls.py | 9 +++++++-- tests/test_elbv2/test_server.py | 2 +- 2 files changed, 8 insertions(+), 3 deletions(-) diff --git a/moto/elbv2/urls.py b/moto/elbv2/urls.py index 13e04a224..13a8e056f 100644 --- a/moto/elbv2/urls.py +++ b/moto/elbv2/urls.py @@ -1,5 +1,10 @@ from __future__ import unicode_literals +from .responses import ELBV2Response -url_bases = [] +url_bases = [ + "https?://elasticloadbalancing.(.+).amazonaws.com", +] -url_paths = {} +url_paths = { + '{0}/$': ELBV2Response.dispatch, +} diff --git a/tests/test_elbv2/test_server.py b/tests/test_elbv2/test_server.py index 05786104d..5acad4051 100644 --- a/tests/test_elbv2/test_server.py +++ b/tests/test_elbv2/test_server.py @@ -8,7 +8,7 @@ Test the different server responses ''' -def test_elbv2_describe_instances(): +def test_elbv2_describe_load_balancers(): backend = server.create_backend_app("elbv2") test_client = backend.test_client() From 2f05f6c9eaec6eeae5b6d47c5ede92a7c70f8723 Mon Sep 17 00:00:00 2001 From: Jack Danger Date: Wed, 2 Aug 2017 13:29:14 -0700 Subject: [PATCH 105/412] Adding version string to server tests --- tests/test_elb/test_server.py | 2 +- tests/test_elbv2/test_server.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/tests/test_elb/test_server.py b/tests/test_elb/test_server.py index 04b12524e..0033284d7 100644 --- a/tests/test_elb/test_server.py +++ b/tests/test_elb/test_server.py @@ -12,6 +12,6 @@ def test_elb_describe_instances(): backend = server.create_backend_app("elb") test_client = backend.test_client() - res = test_client.get('/?Action=DescribeLoadBalancers') + res = test_client.get('/?Action=DescribeLoadBalancers&Version=2015-12-01') res.data.should.contain(b'DescribeLoadBalancersResponse') diff --git a/tests/test_elbv2/test_server.py b/tests/test_elbv2/test_server.py index 5acad4051..ddd40a02d 100644 --- a/tests/test_elbv2/test_server.py +++ b/tests/test_elbv2/test_server.py @@ -12,6 +12,6 @@ def test_elbv2_describe_load_balancers(): backend = server.create_backend_app("elbv2") test_client = backend.test_client() - res = test_client.get('/?Action=DescribeLoadBalancers') + res = test_client.get('/?Action=DescribeLoadBalancers&Version=2015-12-01') res.data.should.contain(b'DescribeLoadBalancersResponse') From 08a932f5f104a9174d2118fafc0c95e812161936 Mon Sep 17 00:00:00 2001 From: Jack Danger Date: Wed, 2 Aug 2017 13:29:26 -0700 Subject: [PATCH 106/412] handling AWSPreparedRequest instances in dispatch --- moto/elb/urls.py | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/moto/elb/urls.py b/moto/elb/urls.py index a81ebc3e0..6b754fcce 100644 --- a/moto/elb/urls.py +++ b/moto/elb/urls.py @@ -1,5 +1,8 @@ from __future__ import unicode_literals -from .responses import ELBResponse +from six.moves.urllib.parse import parse_qs +from botocore.awsrequest import AWSPreparedRequest + +from moto.elb.responses import ELBResponse from moto.elbv2.responses import ELBV2Response @@ -16,6 +19,9 @@ def api_version_elb_backend(*args, **kwargs): if hasattr(request, 'values'): # boto3 version = request.values.get('Version') + elif isinstance(request, AWSPreparedRequest): + # botocore + version = parse_qs(request.body).get('Version')[0] else: # boto request.parse_request() From 161a187ee595ff8cf2061b02c4e9951a9301f23d Mon Sep 17 00:00:00 2001 From: Jack Danger Date: Wed, 2 Aug 2017 13:30:07 -0700 Subject: [PATCH 107/412] updating explanation of boto client usage --- moto/elb/urls.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/moto/elb/urls.py b/moto/elb/urls.py index 6b754fcce..3d96e1892 100644 --- a/moto/elb/urls.py +++ b/moto/elb/urls.py @@ -20,10 +20,10 @@ def api_version_elb_backend(*args, **kwargs): # boto3 version = request.values.get('Version') elif isinstance(request, AWSPreparedRequest): - # botocore + # boto in-memory version = parse_qs(request.body).get('Version')[0] else: - # boto + # boto in server mode request.parse_request() version = request.querystring.get('Version')[0] From 543e5fb07730c8116cab33da75ef3d492a21c48e Mon Sep 17 00:00:00 2001 From: Jack Danger Date: Wed, 2 Aug 2017 15:57:15 -0700 Subject: [PATCH 108/412] Implementing ELBV2 target group attributes --- moto/elbv2/models.py | 5 +++ moto/elbv2/responses.py | 58 ++++++++++++++++++++++++++- tests/test_elbv2/test_elbv2.py | 73 ++++++++++++++++++++++++++++++++++ 3 files changed, 135 insertions(+), 1 deletion(-) diff --git a/moto/elbv2/models.py b/moto/elbv2/models.py index 7682ae097..10d9ad220 100644 --- a/moto/elbv2/models.py +++ b/moto/elbv2/models.py @@ -55,6 +55,11 @@ class FakeTargetGroup(BaseModel): self.unhealthy_threshold_count = unhealthy_threshold_count self.load_balancer_arns = [] + self.attributes = { + 'deregistration_delay.timeout_seconds': 300, + 'stickiness.enabled': 'false', + } + self.targets = OrderedDict() def register(self, targets): diff --git a/moto/elbv2/responses.py b/moto/elbv2/responses.py index c000dd0c9..751652901 100644 --- a/moto/elbv2/responses.py +++ b/moto/elbv2/responses.py @@ -1,7 +1,9 @@ from __future__ import unicode_literals from moto.core.responses import BaseResponse from .models import elbv2_backends -from .exceptions import DuplicateTagKeysError, LoadBalancerNotFoundError +from .exceptions import DuplicateTagKeysError +from .exceptions import LoadBalancerNotFoundError +from .exceptions import TargetGroupNotFoundError class ELBV2Response(BaseResponse): @@ -107,6 +109,14 @@ class ELBV2Response(BaseResponse): template = self.response_template(DESCRIBE_TARGET_GROUPS_TEMPLATE) return template.render(target_groups=target_groups) + def describe_target_group_attributes(self): + target_group_arn = self._get_param('TargetGroupArn') + target_group = self.elbv2_backend.target_groups.get(target_group_arn) + if not target_group: + raise TargetGroupNotFoundError() + template = self.response_template(DESCRIBE_TARGET_GROUP_ATTRIBUTES_TEMPLATE) + return template.render(attributes=target_group.attributes) + def describe_listeners(self): load_balancer_arn = self._get_param('LoadBalancerArn') listener_arns = self._get_multi_param('ListenerArns.member') @@ -135,6 +145,19 @@ class ELBV2Response(BaseResponse): template = self.response_template(DELETE_LISTENER_TEMPLATE) return template.render() + def modify_target_group_attributes(self): + target_group_arn = self._get_param('TargetGroupArn') + target_group = self.elbv2_backend.target_groups.get(target_group_arn) + attributes = { + attr['key']: attr['value'] + for attr in self._get_list_prefix('Attributes.member') + } + target_group.attributes.update(attributes) + if not target_group: + raise TargetGroupNotFoundError() + template = self.response_template(MODIFY_TARGET_GROUP_ATTRIBUTES_TEMPLATE) + return template.render(attributes=attributes) + def register_targets(self): target_group_arn = self._get_param('TargetGroupArn') targets = self._get_list_prefix('Targets.member') @@ -455,6 +478,23 @@ DESCRIBE_TARGET_GROUPS_TEMPLATE = """ + + + {% for key, value in attributes.items() %} + + {{ key }} + {{ value }} + + {% endfor %} + + + + 70092c0e-f3a9-11e5-ae48-cff02092876b + +""" + + DESCRIBE_LISTENERS_TEMPLATE = """ @@ -504,6 +544,22 @@ CONFIGURE_HEALTH_CHECK_TEMPLATE = """ + + + {% for key, value in attributes.items() %} + + {{ key }} + {{ value }} + + {% endfor %} + + + + 70092c0e-f3a9-11e5-ae48-cff02092876b + +""" + REGISTER_TARGETS_TEMPLATE = """ diff --git a/tests/test_elbv2/test_elbv2.py b/tests/test_elbv2/test_elbv2.py index c9eb9ea43..6bfe2ca4f 100644 --- a/tests/test_elbv2/test_elbv2.py +++ b/tests/test_elbv2/test_elbv2.py @@ -445,3 +445,76 @@ def test_register_targets(): response = conn.describe_target_health(TargetGroupArn=target_group.get('TargetGroupArn')) response.get('TargetHealthDescriptions').should.have.length_of(1) + + +@mock_ec2 +@mock_elbv2 +def test_target_group_attributes(): + conn = boto3.client('elbv2', region_name='us-east-1') + ec2 = boto3.resource('ec2', region_name='us-east-1') + + security_group = ec2.create_security_group(GroupName='a-security-group', Description='First One') + vpc = ec2.create_vpc(CidrBlock='172.28.7.0/24', InstanceTenancy='default') + subnet1 = ec2.create_subnet(VpcId=vpc.id, CidrBlock='172.28.7.192/26', AvailabilityZone='us-east-1a') + subnet2 = ec2.create_subnet(VpcId=vpc.id, CidrBlock='172.28.7.192/26', AvailabilityZone='us-east-1b') + + response = conn.create_load_balancer( + Name='my-lb', + Subnets=[subnet1.id, subnet2.id], + SecurityGroups=[security_group.id], + Scheme='internal', + Tags=[{'Key': 'key_name', 'Value': 'a_value'}]) + + response = conn.create_target_group( + Name='a-target', + Protocol='HTTP', + Port=8080, + VpcId=vpc.id, + HealthCheckProtocol='HTTP', + HealthCheckPort='8080', + HealthCheckPath='/', + HealthCheckIntervalSeconds=5, + HealthCheckTimeoutSeconds=5, + HealthyThresholdCount=5, + UnhealthyThresholdCount=2, + Matcher={'HttpCode': '200'}) + target_group = response.get('TargetGroups')[0] + + # Check it's in the describe_target_groups response + response = conn.describe_target_groups() + response.get('TargetGroups').should.have.length_of(1) + target_group_arn = target_group['TargetGroupArn'] + + # The attributes should start with the two defaults + response = conn.describe_target_group_attributes(TargetGroupArn=target_group_arn) + response['Attributes'].should.have.length_of(2) + attributes = {attr['Key']: attr['Value'] for attr in response['Attributes']} + attributes['deregistration_delay.timeout_seconds'].should.equal('300') + attributes['stickiness.enabled'].should.equal('false') + + # add cookie stickiness + response = conn.modify_target_group_attributes( + TargetGroupArn=target_group_arn, + Attributes=[ + { + 'Key': 'stickiness.enabled', + 'Value': 'true', + }, + { + 'Key': 'stickiness.type', + 'Value': 'lb_cookie', + }, + ]) + + # the response should have only the keys updated + response['Attributes'].should.have.length_of(2) + attributes = {attr['Key']: attr['Value'] for attr in response['Attributes']} + attributes['stickiness.type'].should.equal('lb_cookie') + attributes['stickiness.enabled'].should.equal('true') + + # These new values should be in the full attribute list + response = conn.describe_target_group_attributes(TargetGroupArn=target_group_arn) + response['Attributes'].should.have.length_of(3) + attributes = {attr['Key']: attr['Value'] for attr in response['Attributes']} + attributes['stickiness.type'].should.equal('lb_cookie') + attributes['stickiness.enabled'].should.equal('true') From 7cff4067789e59fa68372a6ef0bbdb06415c435c Mon Sep 17 00:00:00 2001 From: Jack Danger Date: Wed, 2 Aug 2017 15:58:32 -0700 Subject: [PATCH 109/412] fixing case of comments --- tests/test_elbv2/test_elbv2.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tests/test_elbv2/test_elbv2.py b/tests/test_elbv2/test_elbv2.py index 6bfe2ca4f..ece17571d 100644 --- a/tests/test_elbv2/test_elbv2.py +++ b/tests/test_elbv2/test_elbv2.py @@ -492,7 +492,7 @@ def test_target_group_attributes(): attributes['deregistration_delay.timeout_seconds'].should.equal('300') attributes['stickiness.enabled'].should.equal('false') - # add cookie stickiness + # Add cookie stickiness response = conn.modify_target_group_attributes( TargetGroupArn=target_group_arn, Attributes=[ @@ -506,7 +506,7 @@ def test_target_group_attributes(): }, ]) - # the response should have only the keys updated + # The response should have only the keys updated response['Attributes'].should.have.length_of(2) attributes = {attr['Key']: attr['Value'] for attr in response['Attributes']} attributes['stickiness.type'].should.equal('lb_cookie') From 0bceaabc40ba779846b3c8e45976aebff7116bde Mon Sep 17 00:00:00 2001 From: Andrew Hill Date: Fri, 4 Aug 2017 11:57:48 +1000 Subject: [PATCH 110/412] Fix SWF name in docs --- README.md | 2 +- docs/_build/html/_sources/index.rst.txt | 2 +- docs/index.rst | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/README.md b/README.md index 369d430f5..5c4cdc259 100644 --- a/README.md +++ b/README.md @@ -114,7 +114,7 @@ It gets even better! Moto isn't just for Python code and it isn't just for S3. L |------------------------------------------------------------------------------| | STS | @mock_sts | core endpoints done | |------------------------------------------------------------------------------| -| SWF | @mock_sfw | basic endpoints done | +| SWF | @mock_swf | basic endpoints done | |------------------------------------------------------------------------------| ``` diff --git a/docs/_build/html/_sources/index.rst.txt b/docs/_build/html/_sources/index.rst.txt index 2ce31febd..0c4133048 100644 --- a/docs/_build/html/_sources/index.rst.txt +++ b/docs/_build/html/_sources/index.rst.txt @@ -74,7 +74,7 @@ Currently implemented Services: +-----------------------+---------------------+-----------------------------------+ | STS | @mock_sts | core endpoints done | +-----------------------+---------------------+-----------------------------------+ -| SWF | @mock_sfw | basic endpoints done | +| SWF | @mock_swf | basic endpoints done | +-----------------------+---------------------+-----------------------------------+ diff --git a/docs/index.rst b/docs/index.rst index 9a9fa5261..321342401 100644 --- a/docs/index.rst +++ b/docs/index.rst @@ -75,7 +75,7 @@ Currently implemented Services: +-----------------------+---------------------+-----------------------------------+ | STS | @mock_sts | core endpoints done | +-----------------------+---------------------+-----------------------------------+ -| SWF | @mock_sfw | basic endpoints done | +| SWF | @mock_swf | basic endpoints done | +-----------------------+---------------------+-----------------------------------+ From ce2f3e6e2b30d5bd412064922acca468e71a6aea Mon Sep 17 00:00:00 2001 From: Waldemar Hummer Date: Sat, 5 Aug 2017 15:47:40 +1000 Subject: [PATCH 111/412] fix receiving of messages from queues with a dot character in their name --- moto/sqs/urls.py | 2 +- tests/test_sqs/test_server.py | 31 +++++++++++++++++++------------ 2 files changed, 20 insertions(+), 13 deletions(-) diff --git a/moto/sqs/urls.py b/moto/sqs/urls.py index 0780615ab..9ec014a80 100644 --- a/moto/sqs/urls.py +++ b/moto/sqs/urls.py @@ -9,5 +9,5 @@ dispatch = SQSResponse().dispatch url_paths = { '{0}/$': dispatch, - '{0}/(?P\d+)/(?P[a-zA-Z0-9\-_]+)': dispatch, + '{0}/(?P\d+)/(?P[a-zA-Z0-9\-_\.]+)': dispatch, } diff --git a/tests/test_sqs/test_server.py b/tests/test_sqs/test_server.py index b7a43ab90..e7f745fd2 100644 --- a/tests/test_sqs/test_server.py +++ b/tests/test_sqs/test_server.py @@ -19,22 +19,29 @@ def test_sqs_list_identities(): res = test_client.get('/?Action=ListQueues') res.data.should.contain(b"ListQueuesResponse") - res = test_client.put('/?Action=CreateQueue&QueueName=testqueue') - res = test_client.put('/?Action=CreateQueue&QueueName=otherqueue') + # Make sure that we can receive messages from queues whose name contains dots (".") + # The AWS API mandates that the names of FIFO queues use the suffix ".fifo" + # See: https://github.com/spulec/moto/issues/866 + + for queue_name in ('testqueue', 'otherqueue.fifo'): + + res = test_client.put('/?Action=CreateQueue&QueueName=%s' % queue_name) + + + res = test_client.put( + '/123/%s?MessageBody=test-message&Action=SendMessage' % queue_name) + + res = test_client.get( + '/123/%s?Action=ReceiveMessage&MaxNumberOfMessages=1' % queue_name) + + message = re.search("(.*?)", + res.data.decode('utf-8')).groups()[0] + message.should.equal('test-message') res = test_client.get('/?Action=ListQueues&QueueNamePrefix=other') + res.data.should.contain(b'otherqueue.fifo') res.data.should_not.contain(b'testqueue') - res = test_client.put( - '/123/testqueue?MessageBody=test-message&Action=SendMessage') - - res = test_client.get( - '/123/testqueue?Action=ReceiveMessage&MaxNumberOfMessages=1') - - message = re.search("(.*?)", - res.data.decode('utf-8')).groups()[0] - message.should.equal('test-message') - def test_messages_polling(): backend = server.create_backend_app("sqs") From 24d1562d2fff60963b3497792634e33c08acf1d8 Mon Sep 17 00:00:00 2001 From: Waldemar Hummer Date: Sat, 5 Aug 2017 20:29:40 +1000 Subject: [PATCH 112/412] allow non-ascii characters in request URLs --- moto/server.py | 8 ++++++++ tests/test_s3/test_server.py | 17 +++++++++++++++++ 2 files changed, 25 insertions(+) diff --git a/moto/server.py b/moto/server.py index be41f1ed0..8d0103cc2 100644 --- a/moto/server.py +++ b/moto/server.py @@ -3,6 +3,7 @@ import json import re import sys import argparse +import six from six.moves.urllib.parse import urlencode @@ -47,6 +48,13 @@ class DomainDispatcherApplication(object): def get_application(self, environ): path_info = environ.get('PATH_INFO', '') + + # The URL path might contain non-ASCII text, for instance unicode S3 bucket names + if six.PY2 and isinstance(path_info, str): + path_info = six.u(path_info) + if six.PY3 and isinstance(path_info, six.binary_type): + path_info = path_info.decode('utf-8') + if path_info.startswith("/moto-api") or path_info == "/favicon.ico": host = "moto_api" elif path_info.startswith("/latest/meta-data/"): diff --git a/tests/test_s3/test_server.py b/tests/test_s3/test_server.py index 5353ec209..c3ca3c3ff 100644 --- a/tests/test_s3/test_server.py +++ b/tests/test_s3/test_server.py @@ -1,3 +1,5 @@ +# coding=utf-8 + from __future__ import unicode_literals import sure # noqa @@ -78,3 +80,18 @@ def test_s3_server_post_without_content_length(): res = test_client.post('/', "https://tester.localhost:5000/", environ_overrides={'CONTENT_LENGTH': ''}) res.status_code.should.equal(411) + + +def test_s3_server_post_unicode_bucket_key(): + # Make sure that we can deal with non-ascii characters in request URLs (e.g., S3 object names) + dispatcher = server.DomainDispatcherApplication(server.create_backend_app) + backend_app = dispatcher.get_application({ + 'HTTP_HOST': 's3.amazonaws.com', + 'PATH_INFO': '/test-bucket/test-object-てすと' + }) + assert backend_app + backend_app = dispatcher.get_application({ + 'HTTP_HOST': 's3.amazonaws.com', + 'PATH_INFO': '/test-bucket/test-object-てすと'.encode('utf-8') + }) + assert backend_app From 5ed546d59cd66251d3c25b1759a1eb59132d47a5 Mon Sep 17 00:00:00 2001 From: ygrosu Date: Wed, 9 Aug 2017 10:56:15 +0300 Subject: [PATCH 113/412] updating documentation to describe support for boto2 --- README.md | 22 ++++++++++++++++++++++ 1 file changed, 22 insertions(+) diff --git a/README.md b/README.md index 5c4cdc259..cabf6b45f 100644 --- a/README.md +++ b/README.md @@ -148,6 +148,28 @@ def test_add_servers(): assert instance1['ImageId'] == 'ami-1234abcd' ``` +#### Using moto 1.0.X with boto2 +moto 1.0.X mock docorators are defined for boto3 and do not work with boto2. Use the @mock_AWSSVC_deprecated to work with boto2. + +Using moto with boto2 +```python +from moto import mock_ec2_deprecated +import boto + +@mock_ec2_deprecated +def test_something_with_ec2(): + ec2_conn = boto.ec2.connect_to_region('us-east-1') + ec2_conn.get_only_instances(instance_ids='i-123456') + +``` + +When using both boto2 and boto3, one can do this to avoid confusion: +```python +from moto import mock_ec2_deprecated as mock_ec2_b2 +from moto import mock_ec2 + +``` + ## Usage All of the services can be used as a decorator, context manager, or in a raw form. From 0a03a7237e50c9867bbe2c7f7bc19565ec75065d Mon Sep 17 00:00:00 2001 From: Brian Pandola Date: Wed, 9 Aug 2017 18:43:21 -0700 Subject: [PATCH 114/412] Redshift Updates - Implement create_cluster_snapshot endpoint - Implement describe_cluster_snapshots endpoint - Implement delete_cluster_snapshot endpoint - Implement restore_from_cluster_snapshot endpoint - Implement limited support for describe_tags endpoint - Correctly serialize errors to json (for boto) or xml (for boto3) - Simulate cluster spin up by returning initial status as 'creating' and subsequent statuses as 'available' - Fix issue with modify_cluster endpoint where cluster values get set to None when omitted from request - Add 'Endpoint' key to describe_clusters response syntax --- moto/redshift/exceptions.py | 15 +++ moto/redshift/models.py | 102 ++++++++++++++- moto/redshift/responses.py | 180 +++++++++++++++++++++++++- tests/test_redshift/test_redshift.py | 185 ++++++++++++++++++++++++++- 4 files changed, 474 insertions(+), 8 deletions(-) diff --git a/moto/redshift/exceptions.py b/moto/redshift/exceptions.py index 8bcca807e..877e850e4 100644 --- a/moto/redshift/exceptions.py +++ b/moto/redshift/exceptions.py @@ -56,3 +56,18 @@ class InvalidSubnetError(RedshiftClientError): super(InvalidSubnetError, self).__init__( 'InvalidSubnet', "Subnet {0} not found.".format(subnet_identifier)) + + +class ClusterSnapshotNotFoundError(RedshiftClientError): + def __init__(self, snapshot_identifier): + super(ClusterSnapshotNotFoundError, self).__init__( + 'ClusterSnapshotNotFound', + "Snapshot {0} not found.".format(snapshot_identifier)) + + +class ClusterSnapshotAlreadyExistsError(RedshiftClientError): + def __init__(self, snapshot_identifier): + super(ClusterSnapshotAlreadyExistsError, self).__init__( + 'ClusterSnapshotAlreadyExists', + "Cannot create the snapshot because a snapshot with the " + "identifier {0} already exists".format(snapshot_identifier)) diff --git a/moto/redshift/models.py b/moto/redshift/models.py index 5e64f7a16..29c802fb0 100644 --- a/moto/redshift/models.py +++ b/moto/redshift/models.py @@ -1,12 +1,19 @@ from __future__ import unicode_literals +import copy +import datetime + import boto.redshift +from moto.compat import OrderedDict from moto.core import BaseBackend, BaseModel +from moto.core.utils import iso_8601_datetime_with_milliseconds from moto.ec2 import ec2_backends from .exceptions import ( ClusterNotFoundError, ClusterParameterGroupNotFoundError, ClusterSecurityGroupNotFoundError, + ClusterSnapshotAlreadyExistsError, + ClusterSnapshotNotFoundError, ClusterSubnetGroupNotFoundError, InvalidSubnetError, ) @@ -23,6 +30,7 @@ class Cluster(BaseModel): encrypted, region): self.redshift_backend = redshift_backend self.cluster_identifier = cluster_identifier + self.status = 'available' self.node_type = node_type self.master_username = master_username self.master_user_password = master_user_password @@ -152,7 +160,7 @@ class Cluster(BaseModel): } for group in self.vpc_security_groups], "ClusterSubnetGroupName": self.cluster_subnet_group_name, "AvailabilityZone": self.availability_zone, - "ClusterStatus": "creating", + "ClusterStatus": self.status, "NumberOfNodes": self.number_of_nodes, "AutomatedSnapshotRetentionPeriod": self.automated_snapshot_retention_period, "PubliclyAccessible": self.publicly_accessible, @@ -171,6 +179,13 @@ class Cluster(BaseModel): "NodeType": self.node_type, "ClusterIdentifier": self.cluster_identifier, "AllowVersionUpgrade": self.allow_version_upgrade, + "Endpoint": { + "Address": '{}.{}.redshift.amazonaws.com'.format( + self.cluster_identifier, + self.region), + "Port": self.port + }, + "PendingModifiedValues": [] } @@ -262,6 +277,42 @@ class ParameterGroup(BaseModel): } +class Snapshot(BaseModel): + + def __init__(self, cluster, snapshot_identifier, tags=None): + self.cluster = copy.copy(cluster) + self.snapshot_identifier = snapshot_identifier + self.snapshot_type = 'manual' + self.status = 'available' + self.tags = tags or [] + self.create_time = iso_8601_datetime_with_milliseconds( + datetime.datetime.now()) + + @property + def arn(self): + return "arn:aws:redshift:{0}:1234567890:snapshot:{1}/{2}".format( + self.cluster.region, + self.cluster.cluster_identifier, + self.snapshot_identifier) + + def to_json(self): + return { + 'SnapshotIdentifier': self.snapshot_identifier, + 'ClusterIdentifier': self.cluster.cluster_identifier, + 'SnapshotCreateTime': self.create_time, + 'Status': self.status, + 'Port': self.cluster.port, + 'AvailabilityZone': self.cluster.availability_zone, + 'MasterUsername': self.cluster.master_username, + 'ClusterVersion': self.cluster.cluster_version, + 'SnapshotType': self.snapshot_type, + 'NodeType': self.cluster.node_type, + 'NumberOfNodes': self.cluster.number_of_nodes, + 'DBName': self.cluster.db_name, + 'Tags': self.tags + } + + class RedshiftBackend(BaseBackend): def __init__(self, ec2_backend): @@ -278,6 +329,7 @@ class RedshiftBackend(BaseBackend): ) } self.ec2_backend = ec2_backend + self.snapshots = OrderedDict() def reset(self): ec2_backend = self.ec2_backend @@ -383,6 +435,54 @@ class RedshiftBackend(BaseBackend): return self.parameter_groups.pop(parameter_group_name) raise ClusterParameterGroupNotFoundError(parameter_group_name) + def create_snapshot(self, cluster_identifier, snapshot_identifier, tags): + cluster = self.clusters.get(cluster_identifier) + if not cluster: + raise ClusterNotFoundError(cluster_identifier) + if self.snapshots.get(snapshot_identifier) is not None: + raise ClusterSnapshotAlreadyExistsError(snapshot_identifier) + snapshot = Snapshot(cluster, snapshot_identifier, tags) + self.snapshots[snapshot_identifier] = snapshot + return snapshot + + def describe_snapshots(self, cluster_identifier, snapshot_identifier): + if cluster_identifier: + for snapshot in self.snapshots.values(): + if snapshot.cluster.cluster_identifier == cluster_identifier: + return [snapshot] + raise ClusterNotFoundError(cluster_identifier) + + if snapshot_identifier: + if snapshot_identifier in self.snapshots: + return [self.snapshots[snapshot_identifier]] + raise ClusterSnapshotNotFoundError(snapshot_identifier) + + return self.snapshots.values() + + def delete_snapshot(self, snapshot_identifier): + if snapshot_identifier not in self.snapshots: + raise ClusterSnapshotNotFoundError(snapshot_identifier) + + deleted_snapshot = self.snapshots.pop(snapshot_identifier) + deleted_snapshot.status = 'deleted' + return deleted_snapshot + + def describe_tags_for_resource_type(self, resource_type): + tagged_resources = [] + if resource_type == 'Snapshot': + for snapshot in self.snapshots.values(): + for tag in snapshot.tags: + data = { + 'ResourceName': snapshot.arn, + 'ResourceType': 'snapshot', + 'Tag': { + 'Key': tag['Key'], + 'Value': tag['Value'] + } + } + tagged_resources.append(data) + return tagged_resources + redshift_backends = {} for region in boto.redshift.regions(): diff --git a/moto/redshift/responses.py b/moto/redshift/responses.py index 48f113cf2..411569d01 100644 --- a/moto/redshift/responses.py +++ b/moto/redshift/responses.py @@ -1,12 +1,31 @@ from __future__ import unicode_literals import json + import dicttoxml +from jinja2 import Template +from six import iteritems from moto.core.responses import BaseResponse from .models import redshift_backends +def convert_json_error_to_xml(json_error): + error = json.loads(json_error) + code = error['Error']['Code'] + message = error['Error']['Message'] + template = Template(""" + + + {{ code }} + {{ message }} + Sender + + 6876f774-7273-11e4-85dc-39e55ca848d1 + """) + return template.render(code=code, message=message) + + class RedshiftResponse(BaseResponse): @property @@ -20,6 +39,24 @@ class RedshiftResponse(BaseResponse): xml = dicttoxml.dicttoxml(response, attr_type=False, root=False) return xml.decode("utf-8") + def call_action(self): + status, headers, body = super(RedshiftResponse, self).call_action() + if status >= 400 and not self.request_json: + body = convert_json_error_to_xml(body) + return status, headers, body + + def unpack_complex_list_params(self, label, names): + unpacked_list = list() + count = 1 + while self._get_param('{0}.{1}.{2}'.format(label, count, names[0])): + param = dict() + for i in range(len(names)): + param[names[i]] = self._get_param( + '{0}.{1}.{2}'.format(label, count, names[i])) + unpacked_list.append(param) + count += 1 + return unpacked_list + def create_cluster(self): cluster_kwargs = { "cluster_identifier": self._get_param('ClusterIdentifier'), @@ -43,12 +80,66 @@ class RedshiftResponse(BaseResponse): "encrypted": self._get_param("Encrypted"), "region": self.region, } - cluster = self.redshift_backend.create_cluster(**cluster_kwargs) - + cluster = self.redshift_backend.create_cluster(**cluster_kwargs).to_json() + cluster['ClusterStatus'] = 'creating' return self.get_response({ "CreateClusterResponse": { "CreateClusterResult": { - "Cluster": cluster.to_json(), + "Cluster": cluster, + }, + "ResponseMetadata": { + "RequestId": "384ac68d-3775-11df-8963-01868b7c937a", + } + } + }) + + def restore_from_cluster_snapshot(self): + snapshot_identifier = self._get_param('SnapshotIdentifier') + snapshots = self.redshift_backend.describe_snapshots( + None, + snapshot_identifier) + snapshot = snapshots[0] + kwargs_from_snapshot = { + "node_type": snapshot.cluster.node_type, + "master_username": snapshot.cluster.master_username, + "master_user_password": snapshot.cluster.master_user_password, + "db_name": snapshot.cluster.db_name, + "cluster_type": 'multi-node' if snapshot.cluster.number_of_nodes > 1 else 'single-node', + "availability_zone": snapshot.cluster.availability_zone, + "port": snapshot.cluster.port, + "cluster_version": snapshot.cluster.cluster_version, + "number_of_nodes": snapshot.cluster.number_of_nodes, + } + kwargs_from_request = { + "cluster_identifier": self._get_param('ClusterIdentifier'), + "port": self._get_int_param('Port'), + "availability_zone": self._get_param('AvailabilityZone'), + "allow_version_upgrade": self._get_bool_param( + 'AllowVersionUpgrade'), + "cluster_subnet_group_name": self._get_param( + 'ClusterSubnetGroupName'), + "publicly_accessible": self._get_param("PubliclyAccessible"), + "cluster_parameter_group_name": self._get_param( + 'ClusterParameterGroupName'), + "cluster_security_groups": self._get_multi_param( + 'ClusterSecurityGroups.member'), + "vpc_security_group_ids": self._get_multi_param( + 'VpcSecurityGroupIds.member'), + "preferred_maintenance_window": self._get_param( + 'PreferredMaintenanceWindow'), + "automated_snapshot_retention_period": self._get_int_param( + 'AutomatedSnapshotRetentionPeriod'), + "region": self.region, + "encrypted": False, + } + kwargs_from_snapshot.update(kwargs_from_request) + cluster_kwargs = kwargs_from_snapshot + cluster = self.redshift_backend.create_cluster(**cluster_kwargs).to_json() + cluster['ClusterStatus'] = 'creating' + return self.get_response({ + "RestoreFromClusterSnapshotResponse": { + "RestoreFromClusterSnapshotResult": { + "Cluster": cluster, }, "ResponseMetadata": { "RequestId": "384ac68d-3775-11df-8963-01868b7c937a", @@ -72,7 +163,7 @@ class RedshiftResponse(BaseResponse): }) def modify_cluster(self): - cluster_kwargs = { + request_kwargs = { "cluster_identifier": self._get_param('ClusterIdentifier'), "new_cluster_identifier": self._get_param('NewClusterIdentifier'), "node_type": self._get_param('NodeType'), @@ -90,6 +181,19 @@ class RedshiftResponse(BaseResponse): "publicly_accessible": self._get_param("PubliclyAccessible"), "encrypted": self._get_param("Encrypted"), } + # There's a bug in boto3 where the security group ids are not passed + # according to the AWS documentation + if not request_kwargs['vpc_security_group_ids']: + request_kwargs['vpc_security_group_ids'] = self._get_multi_param( + 'VpcSecurityGroupIds.VpcSecurityGroupId') + + cluster_kwargs = {} + # We only want parameters that were actually passed in, otherwise + # we'll stomp all over our cluster metadata with None values. + for (key, value) in iteritems(request_kwargs): + if value is not None and value != []: + cluster_kwargs[key] = value + cluster = self.redshift_backend.modify_cluster(**cluster_kwargs) return self.get_response({ @@ -273,3 +377,71 @@ class RedshiftResponse(BaseResponse): } } }) + + def create_cluster_snapshot(self): + cluster_identifier = self._get_param('ClusterIdentifier') + snapshot_identifier = self._get_param('SnapshotIdentifier') + tags = self.unpack_complex_list_params( + 'Tags.Tag', ('Key', 'Value')) + snapshot = self.redshift_backend.create_snapshot(cluster_identifier, + snapshot_identifier, + tags) + return self.get_response({ + 'CreateClusterSnapshotResponse': { + "CreateClusterSnapshotResult": { + "Snapshot": snapshot.to_json(), + }, + "ResponseMetadata": { + "RequestId": "384ac68d-3775-11df-8963-01868b7c937a", + } + } + }) + + def describe_cluster_snapshots(self): + cluster_identifier = self._get_param('ClusterIdentifier') + snapshot_identifier = self._get_param('DBSnapshotIdentifier') + snapshots = self.redshift_backend.describe_snapshots(cluster_identifier, + snapshot_identifier) + return self.get_response({ + "DescribeClusterSnapshotsResponse": { + "DescribeClusterSnapshotsResult": { + "Snapshots": [snapshot.to_json() for snapshot in snapshots] + }, + "ResponseMetadata": { + "RequestId": "384ac68d-3775-11df-8963-01868b7c937a", + } + } + }) + + def delete_cluster_snapshot(self): + snapshot_identifier = self._get_param('SnapshotIdentifier') + snapshot = self.redshift_backend.delete_snapshot(snapshot_identifier) + + return self.get_response({ + "DeleteClusterSnapshotResponse": { + "DeleteClusterSnapshotResult": { + "Snapshot": snapshot.to_json() + }, + "ResponseMetadata": { + "RequestId": "384ac68d-3775-11df-8963-01868b7c937a", + } + } + }) + + def describe_tags(self): + resource_type = self._get_param('ResourceType') + if resource_type != 'Snapshot': + raise NotImplementedError( + "The describe_tags action has not been fully implemented.") + tagged_resources = \ + self.redshift_backend.describe_tags_for_resource_type(resource_type) + return self.get_response({ + "DescribeTagsResponse": { + "DescribeTagsResult": { + "TaggedResources": tagged_resources + }, + "ResponseMetadata": { + "RequestId": "384ac68d-3775-11df-8963-01868b7c937a", + } + } + }) diff --git a/tests/test_redshift/test_redshift.py b/tests/test_redshift/test_redshift.py index aff3e8bed..1df503de2 100644 --- a/tests/test_redshift/test_redshift.py +++ b/tests/test_redshift/test_redshift.py @@ -9,6 +9,9 @@ from boto.redshift.exceptions import ( ClusterSubnetGroupNotFound, InvalidSubnet, ) +from botocore.exceptions import ( + ClientError +) import sure # noqa from moto import mock_ec2 @@ -36,7 +39,7 @@ def test_create_cluster(): conn = boto.redshift.connect_to_region("us-east-1") cluster_identifier = 'my_cluster' - conn.create_cluster( + cluster_response = conn.create_cluster( cluster_identifier, node_type="dw.hs1.xlarge", master_username="username", @@ -51,6 +54,8 @@ def test_create_cluster(): allow_version_upgrade=True, number_of_nodes=3, ) + cluster_response['CreateClusterResponse']['CreateClusterResult'][ + 'Cluster']['ClusterStatus'].should.equal('creating') cluster_response = conn.describe_clusters(cluster_identifier) cluster = cluster_response['DescribeClustersResponse'][ @@ -320,7 +325,6 @@ def test_modify_cluster(): cluster_identifier, cluster_type="multi-node", node_type="dw.hs1.xlarge", - number_of_nodes=2, cluster_security_groups="security_group", master_user_password="new_password", cluster_parameter_group_name="my_parameter_group", @@ -343,7 +347,8 @@ def test_modify_cluster(): 'ParameterGroupName'].should.equal("my_parameter_group") cluster['AutomatedSnapshotRetentionPeriod'].should.equal(7) cluster['AllowVersionUpgrade'].should.equal(False) - cluster['NumberOfNodes'].should.equal(2) + # This one should remain unmodified. + cluster['NumberOfNodes'].should.equal(1) @mock_redshift_deprecated @@ -523,3 +528,177 @@ def test_delete_cluster_parameter_group(): # Delete invalid id conn.delete_cluster_parameter_group.when.called_with( "not-a-parameter-group").should.throw(ClusterParameterGroupNotFound) + + +@mock_redshift +def test_create_cluster_snapshot(): + client = boto3.client('redshift', region_name='us-east-1') + cluster_identifier = 'my_cluster' + snapshot_identifier = 'my_snapshot' + + cluster_response = client.create_cluster( + DBName='test-db', + ClusterIdentifier=cluster_identifier, + ClusterType='single-node', + NodeType='ds2.xlarge', + MasterUsername='username', + MasterUserPassword='password', + ) + cluster_response['Cluster']['NodeType'].should.equal('ds2.xlarge') + + snapshot_response = client.create_cluster_snapshot( + SnapshotIdentifier=snapshot_identifier, + ClusterIdentifier=cluster_identifier, + Tags=[{'Key': 'test-tag-key', + 'Value': 'test-tag-value'}] + ) + snapshot = snapshot_response['Snapshot'] + snapshot['SnapshotIdentifier'].should.equal(snapshot_identifier) + snapshot['ClusterIdentifier'].should.equal(cluster_identifier) + snapshot['NumberOfNodes'].should.equal(1) + snapshot['NodeType'].should.equal('ds2.xlarge') + snapshot['MasterUsername'].should.equal('username') + + +@mock_redshift +def test_delete_cluster_snapshot(): + client = boto3.client('redshift', region_name='us-east-1') + cluster_identifier = 'my_cluster' + snapshot_identifier = 'my_snapshot' + + client.create_cluster( + ClusterIdentifier=cluster_identifier, + ClusterType='single-node', + NodeType='ds2.xlarge', + MasterUsername='username', + MasterUserPassword='password', + ) + client.create_cluster_snapshot( + SnapshotIdentifier=snapshot_identifier, + ClusterIdentifier=cluster_identifier + ) + + snapshots = client.describe_cluster_snapshots()['Snapshots'] + list(snapshots).should.have.length_of(1) + + client.delete_cluster_snapshot(SnapshotIdentifier=snapshot_identifier)[ + 'Snapshot']['Status'].should.equal('deleted') + + snapshots = client.describe_cluster_snapshots()['Snapshots'] + list(snapshots).should.have.length_of(0) + + # Delete invalid id + client.delete_cluster_snapshot.when.called_with( + SnapshotIdentifier="not-a-snapshot").should.throw(ClientError) + + +@mock_redshift +def test_cluster_snapshot_already_exists(): + client = boto3.client('redshift', region_name='us-east-1') + cluster_identifier = 'my_cluster' + snapshot_identifier = 'my_snapshot' + + client.create_cluster( + DBName='test-db', + ClusterIdentifier=cluster_identifier, + ClusterType='single-node', + NodeType='ds2.xlarge', + MasterUsername='username', + MasterUserPassword='password', + ) + + client.create_cluster_snapshot( + SnapshotIdentifier=snapshot_identifier, + ClusterIdentifier=cluster_identifier + ) + + client.create_cluster_snapshot.when.called_with( + SnapshotIdentifier=snapshot_identifier, + ClusterIdentifier=cluster_identifier + ).should.throw(ClientError) + + +@mock_redshift +def test_create_cluster_from_snapshot(): + client = boto3.client('redshift', region_name='us-east-1') + original_cluster_identifier = 'original-cluster' + original_snapshot_identifier = 'original-snapshot' + new_cluster_identifier = 'new-cluster' + + client.create_cluster( + ClusterIdentifier=original_cluster_identifier, + ClusterType='single-node', + NodeType='ds2.xlarge', + MasterUsername='username', + MasterUserPassword='password', + ) + client.create_cluster_snapshot( + SnapshotIdentifier=original_snapshot_identifier, + ClusterIdentifier=original_cluster_identifier + ) + response = client.restore_from_cluster_snapshot( + ClusterIdentifier=new_cluster_identifier, + SnapshotIdentifier=original_snapshot_identifier, + Port=1234 + ) + response['Cluster']['ClusterStatus'].should.equal('creating') + + response = client.describe_clusters( + ClusterIdentifier=new_cluster_identifier + ) + new_cluster = response['Clusters'][0] + new_cluster['NodeType'].should.equal('ds2.xlarge') + new_cluster['MasterUsername'].should.equal('username') + new_cluster['Endpoint']['Port'].should.equal(1234) + + +@mock_redshift +def test_create_cluster_status_update(): + client = boto3.client('redshift', region_name='us-east-1') + cluster_identifier = 'test-cluster' + + response = client.create_cluster( + ClusterIdentifier=cluster_identifier, + ClusterType='single-node', + NodeType='ds2.xlarge', + MasterUsername='username', + MasterUserPassword='password', + ) + response['Cluster']['ClusterStatus'].should.equal('creating') + + response = client.describe_clusters( + ClusterIdentifier=cluster_identifier + ) + response['Clusters'][0]['ClusterStatus'].should.equal('available') + + +@mock_redshift +def test_describe_snapshot_tags(): + client = boto3.client('redshift', region_name='us-east-1') + cluster_identifier = 'my_cluster' + snapshot_identifier = 'my_snapshot' + tag_key = 'test-tag-key' + tag_value = 'teat-tag-value' + + client.create_cluster( + DBName='test-db', + ClusterIdentifier=cluster_identifier, + ClusterType='single-node', + NodeType='ds2.xlarge', + MasterUsername='username', + MasterUserPassword='password', + ) + + client.create_cluster_snapshot( + SnapshotIdentifier=snapshot_identifier, + ClusterIdentifier=cluster_identifier, + Tags=[{'Key': tag_key, + 'Value': tag_value}] + ) + + tags_response = client.describe_tags(ResourceType='Snapshot') + tagged_resources = tags_response['TaggedResources'] + list(tagged_resources).should.have.length_of(1) + tag = tagged_resources[0]['Tag'] + tag['Key'].should.equal(tag_key) + tag['Value'].should.equal(tag_value) From e4da4f6cd58d402a46ba10abfa7a602cac3ced7c Mon Sep 17 00:00:00 2001 From: Hugo Lopes Tavares Date: Thu, 10 Aug 2017 19:33:38 -0400 Subject: [PATCH 115/412] Add more error handling to the ECR backend The error messages were copied from `botocore`. New exceptions: RepositoryNotFoundException & ImageNotFoundException. --- moto/ecr/exceptions.py | 22 +++++++++++ moto/ecr/models.py | 55 +++++++++++++++++++--------- moto/ecr/responses.py | 3 +- requirements-dev.txt | 2 +- tests/test_ecr/test_ecr_boto3.py | 63 +++++++++++++++++++++++++------- 5 files changed, 112 insertions(+), 33 deletions(-) create mode 100644 moto/ecr/exceptions.py diff --git a/moto/ecr/exceptions.py b/moto/ecr/exceptions.py new file mode 100644 index 000000000..f7b951b53 --- /dev/null +++ b/moto/ecr/exceptions.py @@ -0,0 +1,22 @@ +from __future__ import unicode_literals +from moto.core.exceptions import RESTError + + +class RepositoryNotFoundException(RESTError): + code = 400 + + def __init__(self, repository_name, registry_id): + super(RepositoryNotFoundException, self).__init__( + error_type="RepositoryNotFoundException", + message="The repository with name '{0}' does not exist in the registry " + "with id '{1}'".format(repository_name, registry_id)) + + +class ImageNotFoundException(RESTError): + code = 400 + + def __init__(self, image_id, repository_name, registry_id): + super(ImageNotFoundException, self).__init__( + error_type="ImageNotFoundException", + message="The image with imageId {0} does not exist within the repository with name '{1}' " + "in the registry with id '{2}'".format(image_id, repository_name, registry_id)) diff --git a/moto/ecr/models.py b/moto/ecr/models.py index b90700ff4..f5b6f24e4 100644 --- a/moto/ecr/models.py +++ b/moto/ecr/models.py @@ -7,6 +7,11 @@ from moto.ec2 import ec2_backends from copy import copy import hashlib +from moto.ecr.exceptions import ImageNotFoundException, RepositoryNotFoundException + + +DEFAULT_REGISTRY_ID = '012345678910' + class BaseObject(BaseModel): @@ -35,14 +40,13 @@ class BaseObject(BaseModel): class Repository(BaseObject): def __init__(self, repository_name): - self.arn = 'arn:aws:ecr:us-east-1:012345678910:repository/{0}'.format( - repository_name) + self.registry_id = DEFAULT_REGISTRY_ID + self.arn = 'arn:aws:ecr:us-east-1:{0}:repository/{1}'.format( + self.registry_id, repository_name) self.name = repository_name # self.created = datetime.utcnow() - self.uri = '012345678910.dkr.ecr.us-east-1.amazonaws.com/{0}'.format( - repository_name - ) - self.registry_id = '012345678910' + self.uri = '{0}.dkr.ecr.us-east-1.amazonaws.com/{1}'.format( + self.registry_id, repository_name) self.images = [] @property @@ -93,7 +97,7 @@ class Repository(BaseObject): class Image(BaseObject): - def __init__(self, tag, manifest, repository, registry_id="012345678910"): + def __init__(self, tag, manifest, repository, registry_id=DEFAULT_REGISTRY_ID): self.image_tag = tag self.image_manifest = manifest self.image_size_in_bytes = 50 * 1024 * 1024 @@ -151,6 +155,11 @@ class ECRBackend(BaseBackend): """ maxResults and nextToken not implemented """ + if repository_names: + for repository_name in repository_names: + if repository_name not in self.repositories: + raise RepositoryNotFoundException(repository_name, registry_id or DEFAULT_REGISTRY_ID) + repositories = [] for repository in self.repositories.values(): # If a registry_id was supplied, ensure this repository matches @@ -170,11 +179,11 @@ class ECRBackend(BaseBackend): self.repositories[repository_name] = repository return repository - def delete_repository(self, respository_name, registry_id=None): - if respository_name in self.repositories: - return self.repositories.pop(respository_name) + def delete_repository(self, repository_name, registry_id=None): + if repository_name in self.repositories: + return self.repositories.pop(repository_name) else: - raise Exception("{0} is not a repository".format(respository_name)) + raise RepositoryNotFoundException(repository_name, registry_id or DEFAULT_REGISTRY_ID) def list_images(self, repository_name, registry_id=None): """ @@ -198,17 +207,27 @@ class ECRBackend(BaseBackend): if repository_name in self.repositories: repository = self.repositories[repository_name] else: - raise Exception("{0} is not a repository".format(repository_name)) + raise RepositoryNotFoundException(repository_name, registry_id or DEFAULT_REGISTRY_ID) if image_ids: response = set() for image_id in image_ids: - if 'imageDigest' in image_id: - desired_digest = image_id['imageDigest'] - response.update([i for i in repository.images if i.get_image_digest() == desired_digest]) - if 'imageTag' in image_id: - desired_tag = image_id['imageTag'] - response.update([i for i in repository.images if i.image_tag == desired_tag]) + found = False + for image in repository.images: + if (('imageDigest' in image_id and image.get_image_digest() == image_id['imageDigest']) or + ('imageTag' in image_id and image.image_tag == image_id['imageTag'])): + found = True + response.add(image) + if not found: + image_id_representation = "{imageDigest:'%s', imageTag:'%s'}" % ( + image_id.get('imageDigest', 'null'), + image_id.get('imageTag', 'null'), + ) + raise ImageNotFoundException( + image_id=image_id_representation, + repository_name=repository_name, + registry_id=registry_id or DEFAULT_REGISTRY_ID) + else: response = [] for image in repository.images: diff --git a/moto/ecr/responses.py b/moto/ecr/responses.py index 4fa0946b8..6207de4eb 100644 --- a/moto/ecr/responses.py +++ b/moto/ecr/responses.py @@ -45,7 +45,8 @@ class ECRResponse(BaseResponse): def delete_repository(self): repository_str = self._get_param('repositoryName') - repository = self.ecr_backend.delete_repository(repository_str) + registry_id = self._get_param('registryId') + repository = self.ecr_backend.delete_repository(repository_str, registry_id) return json.dumps({ 'repository': repository.response_object }) diff --git a/requirements-dev.txt b/requirements-dev.txt index 52def6ed0..e2f379a59 100644 --- a/requirements-dev.txt +++ b/requirements-dev.txt @@ -7,5 +7,5 @@ flake8 freezegun flask boto3>=1.4.4 -botocore>=1.4.28 +botocore>=1.5.77 six diff --git a/tests/test_ecr/test_ecr_boto3.py b/tests/test_ecr/test_ecr_boto3.py index 581906321..67d1a2cab 100644 --- a/tests/test_ecr/test_ecr_boto3.py +++ b/tests/test_ecr/test_ecr_boto3.py @@ -5,9 +5,11 @@ import json from datetime import datetime from random import random +import re import sure # noqa import boto3 +from botocore.exceptions import ClientError from dateutil.tz import tzlocal from moto import mock_ecr @@ -141,19 +143,6 @@ def test_describe_repositories_3(): response['repositories'][0]['repositoryUri'].should.equal(respository_uri) -@mock_ecr -def test_describe_repositories_4(): - client = boto3.client('ecr', region_name='us-east-1') - _ = client.create_repository( - repositoryName='test_repository1' - ) - _ = client.create_repository( - repositoryName='test_repository0' - ) - response = client.describe_repositories(repositoryNames=['not_a_valid_name']) - len(response['repositories']).should.equal(0) - - @mock_ecr def test_describe_repositories_with_image(): client = boto3.client('ecr', region_name='us-east-1') @@ -344,6 +333,54 @@ def test_describe_images_by_tag(): image_detail['imageDigest'].should.equal(put_response['imageId']['imageDigest']) +@mock_ecr +def test_describe_repository_that_doesnt_exist(): + client = boto3.client('ecr', region_name='us-east-1') + + error_msg = re.compile( + r".*The repository with name 'repo-that-doesnt-exist' does not exist in the registry with id '123'.*", + re.MULTILINE) + client.describe_repositories.when.called_with( + repositoryNames=['repo-that-doesnt-exist'], + registryId='123', + ).should.throw(ClientError, error_msg) + +@mock_ecr +def test_describe_image_that_doesnt_exist(): + client = boto3.client('ecr', region_name='us-east-1') + client.create_repository(repositoryName='test_repository') + + error_msg1 = re.compile( + r".*The image with imageId {imageDigest:'null', imageTag:'testtag'} does not exist within " + r"the repository with name 'test_repository' in the registry with id '123'.*", + re.MULTILINE) + + client.describe_images.when.called_with( + repositoryName='test_repository', imageIds=[{'imageTag': 'testtag'}], registryId='123', + ).should.throw(ClientError, error_msg1) + + error_msg2 = re.compile( + r".*The repository with name 'repo-that-doesnt-exist' does not exist in the registry with id '123'.*", + re.MULTILINE) + client.describe_images.when.called_with( + repositoryName='repo-that-doesnt-exist', imageIds=[{'imageTag': 'testtag'}], registryId='123', + ).should.throw(ClientError, error_msg2) + + +@mock_ecr +def test_delete_repository_that_doesnt_exist(): + client = boto3.client('ecr', region_name='us-east-1') + + error_msg = re.compile( + r".*The repository with name 'repo-that-doesnt-exist' does not exist in the registry with id '123'.*", + re.MULTILINE) + + client.delete_repository.when.called_with( + repositoryName='repo-that-doesnt-exist', + registryId='123').should.throw( + ClientError, error_msg) + + @mock_ecr def test_describe_images_by_digest(): client = boto3.client('ecr', region_name='us-east-1') From 973264d9403a3512048ce0239a15c6c96a00dcf0 Mon Sep 17 00:00:00 2001 From: Hugo Lopes Tavares Date: Thu, 10 Aug 2017 19:54:00 -0400 Subject: [PATCH 116/412] Convert struct argument to bytestring to avoid errors with Python 2.7.6 --- moto/sqs/models.py | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/moto/sqs/models.py b/moto/sqs/models.py index f6657269c..e6209b4ba 100644 --- a/moto/sqs/models.py +++ b/moto/sqs/models.py @@ -59,6 +59,7 @@ class Message(BaseModel): return str.encode('utf-8') return str md5 = hashlib.md5() + struct_format = "!I".encode('ascii') # ensure it's a bytestring for name in sorted(self.message_attributes.keys()): attr = self.message_attributes[name] data_type = attr['data_type'] @@ -67,10 +68,10 @@ class Message(BaseModel): # Each part of each attribute is encoded right after it's # own length is packed into a 4-byte integer # 'timestamp' -> b'\x00\x00\x00\t' - encoded += struct.pack("!I", len(utf8(name))) + utf8(name) + encoded += struct.pack(struct_format, len(utf8(name))) + utf8(name) # The datatype is additionally given a final byte # representing which type it is - encoded += struct.pack("!I", len(data_type)) + utf8(data_type) + encoded += struct.pack(struct_format, len(data_type)) + utf8(data_type) encoded += TRANSPORT_TYPE_ENCODINGS[data_type] if data_type == 'String' or data_type == 'Number': @@ -86,7 +87,7 @@ class Message(BaseModel): # MD5 so as not to break client softwre return('deadbeefdeadbeefdeadbeefdeadbeef') - encoded += struct.pack("!I", len(utf8(value))) + utf8(value) + encoded += struct.pack(struct_format, len(utf8(value))) + utf8(value) md5.update(encoded) return md5.hexdigest() From 2447725e98bc790ea2aa55761a53ffbaf46decc9 Mon Sep 17 00:00:00 2001 From: Alexander Mohr Date: Thu, 10 Aug 2017 17:14:31 -0700 Subject: [PATCH 117/412] fix list-object-versions with prefix (#1045) fix list-object-versions with prefix --- moto/s3/models.py | 5 +++-- moto/s3/responses.py | 5 +++-- tests/test_s3/test_s3.py | 8 ++++++++ 3 files changed, 14 insertions(+), 4 deletions(-) diff --git a/moto/s3/models.py b/moto/s3/models.py index c1a4fb04d..4ea33adb6 100644 --- a/moto/s3/models.py +++ b/moto/s3/models.py @@ -422,14 +422,15 @@ class S3Backend(BaseBackend): encoding_type=None, key_marker=None, max_keys=None, - version_id_marker=None): + version_id_marker=None, + prefix=''): bucket = self.get_bucket(bucket_name) if any((delimiter, encoding_type, key_marker, version_id_marker)): raise NotImplementedError( "Called get_bucket_versions with some of delimiter, encoding_type, key_marker, version_id_marker") - return itertools.chain(*(l for _, l in bucket.keys.iterlists())) + return itertools.chain(*(l for key, l in bucket.keys.iterlists() if key.startswith(prefix))) def get_bucket_policy(self, bucket_name): return self.get_bucket(bucket_name).policy diff --git a/moto/s3/responses.py b/moto/s3/responses.py index ec1361cb8..dea80518d 100644 --- a/moto/s3/responses.py +++ b/moto/s3/responses.py @@ -215,7 +215,7 @@ class ResponseObject(_TemplateEnvironmentMixin): encoding_type = querystring.get('encoding-type', [None])[0] key_marker = querystring.get('key-marker', [None])[0] max_keys = querystring.get('max-keys', [None])[0] - prefix = querystring.get('prefix', [None])[0] + prefix = querystring.get('prefix', [''])[0] version_id_marker = querystring.get('version-id-marker', [None])[0] bucket = self.backend.get_bucket(bucket_name) @@ -225,7 +225,8 @@ class ResponseObject(_TemplateEnvironmentMixin): encoding_type=encoding_type, key_marker=key_marker, max_keys=max_keys, - version_id_marker=version_id_marker + version_id_marker=version_id_marker, + prefix=prefix ) latest_versions = self.backend.get_bucket_latest_versions( bucket_name=bucket_name diff --git a/tests/test_s3/test_s3.py b/tests/test_s3/test_s3.py index 619a60302..3832026eb 100644 --- a/tests/test_s3/test_s3.py +++ b/tests/test_s3/test_s3.py @@ -770,6 +770,14 @@ def test_list_versions(): versions[1].version_id.should.equal('1') versions[1].get_contents_as_string().should.equal(b"Version 2") + key = Key(bucket, 'the2-key') + key.set_contents_from_string("Version 1") + + keys = list(bucket.list()) + keys.should.have.length_of(2) + versions = list(bucket.list_versions(prefix='the2-')) + versions.should.have.length_of(1) + @mock_s3_deprecated def test_acl_setting(): From d8b0df2bef12787058a4b6a2c984f8f3a9e3ad44 Mon Sep 17 00:00:00 2001 From: Ali Rizwan Date: Fri, 11 Aug 2017 15:19:36 +0200 Subject: [PATCH 118/412] Added support for SSM delete_parameters Signed-off-by: Ali Rizwan --- moto/ssm/models.py | 10 ++++++++++ moto/ssm/responses.py | 16 ++++++++++++++++ tests/test_ssm/test_ssm_boto3.py | 19 +++++++++++++++++++ 3 files changed, 45 insertions(+) diff --git a/moto/ssm/models.py b/moto/ssm/models.py index 63cb3c8ba..bbb84ad91 100644 --- a/moto/ssm/models.py +++ b/moto/ssm/models.py @@ -52,6 +52,16 @@ class SimpleSystemManagerBackend(BaseBackend): except KeyError: pass + def delete_parameters(self, names): + result = [] + for name in names: + try: + del self._parameters[name] + result.append(name) + except KeyError: + pass + return result + def get_all_parameters(self): result = [] for k, _ in self._parameters.items(): diff --git a/moto/ssm/responses.py b/moto/ssm/responses.py index 1fa1a81b2..73c29484a 100644 --- a/moto/ssm/responses.py +++ b/moto/ssm/responses.py @@ -26,6 +26,22 @@ class SimpleSystemManagerResponse(BaseResponse): self.ssm_backend.delete_parameter(name) return json.dumps({}) + def delete_parameters(self): + names = self._get_param('Names') + result = self.ssm_backend.delete_parameters(names) + + response = { + 'DeletedParameters': [], + 'InvalidParameters': [] + } + + for name in names: + if name in result: + response['DeletedParameters'].append(name) + else: + response['InvalidParameters'].append(name) + return json.dumps(response) + def get_parameters(self): names = self._get_param('Names') with_decryption = self._get_param('WithDecryption') diff --git a/tests/test_ssm/test_ssm_boto3.py b/tests/test_ssm/test_ssm_boto3.py index 418c58708..38e266445 100644 --- a/tests/test_ssm/test_ssm_boto3.py +++ b/tests/test_ssm/test_ssm_boto3.py @@ -24,6 +24,25 @@ def test_delete_parameter(): response = client.get_parameters(Names=['test']) len(response['Parameters']).should.equal(0) +@mock_ssm +def test_delete_parameters(): + client = boto3.client('ssm', region_name='us-east-1') + + client.put_parameter( + Name='test', + Description='A test parameter', + Value='value', + Type='String') + + response = client.get_parameters(Names=['test']) + len(response['Parameters']).should.equal(1) + + result = client.delete_parameters(Names=['test', 'invalid']) + len(result['DeletedParameters']).should.equal(1) + len(result['InvalidParameters']).should.equal(1) + + response = client.get_parameters(Names=['test']) + len(response['Parameters']).should.equal(0) @mock_ssm def test_put_parameter(): From ecf77d64cdf62eaee6ce108ff2e4e3ef3b0892d4 Mon Sep 17 00:00:00 2001 From: rocky4570fft Date: Sun, 13 Aug 2017 11:52:27 +1000 Subject: [PATCH 119/412] add rds stop-start --- moto/rds2/models.py | 31 +++++++++++++++++++++++++++++++ moto/rds2/responses.py | 35 +++++++++++++++++++++++++++++++++-- 2 files changed, 64 insertions(+), 2 deletions(-) diff --git a/moto/rds2/models.py b/moto/rds2/models.py index 5abd2ed1b..6ae0c98a0 100644 --- a/moto/rds2/models.py +++ b/moto/rds2/models.py @@ -733,6 +733,37 @@ class RDS2Backend(BaseBackend): database = self.describe_databases(db_instance_identifier)[0] return database + def _check_can_stop_rds_instance_(self, database=None): + # todo: certain rds types not allowed to be stopped at this time. + if database: + if database.is_replica or database.multi_az: + # should be 400 error + return RDSClientError('InvalidDBClusterStateFault', 'Invalid DB type, when trying to perform StopDBInstance. See AWS RDS documentation on rds.stop_db_instance') + return True + + def stop_database(self, db_instance_identifier, snapshot_name=None): + database = self.describe_databases(db_instance_identifier)[0] + self._check_can_stop_rds_instance_(database) + if database.status != 'available': + return RDSClientError('InvalidDBInstanceState', 'when calling the StopDBInstance operation: Instance testdb is not in available state') + self.create_rds_snapshot(db_instance_identifier, db_instance_identifier) + database.status = 'shutdown' + return database + + def start_database(self, db_instance_identifier): + database = self.describe_databases(db_instance_identifier)[0] + if database.status != 'shutdown': + # should be 400 error + return RDSClientError('InvalidDBInstanceState', 'when calling the StartDBInstance operation: Instance {} is not stopped, it cannot be started.' % db_instance_identifier) + database.status = 'available' + return + + def create_rds_snapshot(self, db_instance_identifier, db_snapshot_identifier): + # todo + # DBSnapshotAlreadyExists + # SnapshotQuotaExceeded + return None + def find_db_from_id(self, db_id): if self.arn_regex.match(db_id): arn_breakdown = db_id.split(':') diff --git a/moto/rds2/responses.py b/moto/rds2/responses.py index ef02bfbf1..d8108c7f8 100644 --- a/moto/rds2/responses.py +++ b/moto/rds2/responses.py @@ -23,6 +23,7 @@ class RDS2Response(BaseResponse): "db_instance_identifier": self._get_param('DBInstanceIdentifier'), "db_name": self._get_param("DBName"), "db_parameter_group_name": self._get_param("DBParameterGroupName"), + "db_snapshot_identifier": self._get_param('DBSnapshotIdentifier'), "db_subnet_group_name": self._get_param("DBSubnetGroupName"), "engine": self._get_param("Engine"), "engine_version": self._get_param("EngineVersion"), @@ -193,6 +194,19 @@ class RDS2Response(BaseResponse): template = self.response_template(REMOVE_TAGS_FROM_RESOURCE_TEMPLATE) return template.render() + def stop_db_instance(self): + db_instance_identifier = self._get_param('DBInstanceIdentifier') + db_snapshot_identifier = self._get_param('DBInstanceIdentifier') + database = self.backend.stop_database(db_instance_identifier, db_snapshot_identifier) + template = self.response_template(STOP_DATABASE_TEMPLATE) + return template.render(database=database) + + def start_db_instance(self): + db_instance_identifier = self._get_param('DBInstanceIdentifier') + database = self.backend.start_database(db_instance_identifier) + template = self.response_template(START_DATABASE_TEMPLATE) + return template.render(database=database) + def create_db_security_group(self): group_name = self._get_param('DBSecurityGroupName') description = self._get_param('DBSecurityGroupDescription') @@ -410,8 +424,25 @@ REBOOT_DATABASE_TEMPLATE = """ + + {{ database.to_xml() }} + + + 523e3218-afc7-11c3-90f5-f90431260ab9 + +""" -DELETE_DATABASE_TEMPLATE = """ +STOP_DATABASE_TEMPLATE = """ + + {{ database.to_xml() }} + + + 523e3218-afc7-11c3-90f5-f90431260ab8 + +""" + +DELETE_DATABASE_TEMPLATE = """ {{ database.to_xml() }} @@ -420,7 +451,7 @@ DELETE_DATABASE_TEMPLATE = """ +CREATE_SNAPSHOT_TEMPLATE = """ {{ snapshot.to_xml() }} From 7afd3532c6b39f8eacc6ba0e1ad69472da9609ee Mon Sep 17 00:00:00 2001 From: rocky4570fft Date: Sun, 13 Aug 2017 12:00:21 +1000 Subject: [PATCH 120/412] fixup typos and cleanup code a little --- moto/rds2/models.py | 21 ++++++++------------- 1 file changed, 8 insertions(+), 13 deletions(-) diff --git a/moto/rds2/models.py b/moto/rds2/models.py index 6ae0c98a0..d9c1483ca 100644 --- a/moto/rds2/models.py +++ b/moto/rds2/models.py @@ -733,32 +733,27 @@ class RDS2Backend(BaseBackend): database = self.describe_databases(db_instance_identifier)[0] return database - def _check_can_stop_rds_instance_(self, database=None): + def stop_database(self, db_instance_identifier, db_snapshot_identifier=None): + database = self.describe_databases(db_instance_identifier)[0] # todo: certain rds types not allowed to be stopped at this time. - if database: - if database.is_replica or database.multi_az: + if database.is_replica or database.multi_az: # should be 400 error return RDSClientError('InvalidDBClusterStateFault', 'Invalid DB type, when trying to perform StopDBInstance. See AWS RDS documentation on rds.stop_db_instance') - return True - - def stop_database(self, db_instance_identifier, snapshot_name=None): - database = self.describe_databases(db_instance_identifier)[0] - self._check_can_stop_rds_instance_(database) if database.status != 'available': - return RDSClientError('InvalidDBInstanceState', 'when calling the StopDBInstance operation: Instance testdb is not in available state') - self.create_rds_snapshot(db_instance_identifier, db_instance_identifier) + return RDSClientError('InvalidDBInstanceState', 'when calling the StopDBInstance operation: Instance %s is not in available state' % db_instance_identifier) + self.create_rds_snapshot(db_instance_identifier, db_snapshot_identifier) database.status = 'shutdown' return database def start_database(self, db_instance_identifier): database = self.describe_databases(db_instance_identifier)[0] - if database.status != 'shutdown': - # should be 400 error - return RDSClientError('InvalidDBInstanceState', 'when calling the StartDBInstance operation: Instance {} is not stopped, it cannot be started.' % db_instance_identifier) + if database.status != 'shutdown': # should be 400 error + return RDSClientError('InvalidDBInstanceState', 'when calling the StartDBInstance operation: Instance %s is not stopped, it cannot be started.' % db_instance_identifier) database.status = 'available' return def create_rds_snapshot(self, db_instance_identifier, db_snapshot_identifier): + database = self.describe_databases(db_instance_identifier)[0] # todo # DBSnapshotAlreadyExists # SnapshotQuotaExceeded From 6c3c6623bfb666300dc998e51165667c236c140c Mon Sep 17 00:00:00 2001 From: rocky4570fft Date: Sun, 13 Aug 2017 12:02:49 +1000 Subject: [PATCH 121/412] should only call create_snapshot if db_snapshot_identifier is filled --- moto/rds2/models.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/moto/rds2/models.py b/moto/rds2/models.py index d9c1483ca..edb46a157 100644 --- a/moto/rds2/models.py +++ b/moto/rds2/models.py @@ -741,7 +741,8 @@ class RDS2Backend(BaseBackend): return RDSClientError('InvalidDBClusterStateFault', 'Invalid DB type, when trying to perform StopDBInstance. See AWS RDS documentation on rds.stop_db_instance') if database.status != 'available': return RDSClientError('InvalidDBInstanceState', 'when calling the StopDBInstance operation: Instance %s is not in available state' % db_instance_identifier) - self.create_rds_snapshot(db_instance_identifier, db_snapshot_identifier) + if db_snapshot_identifier: + self.create_rds_snapshot(db_instance_identifier, db_snapshot_identifier) database.status = 'shutdown' return database From 9687b6e03e3985aa2137d5e7c115d084d60752c5 Mon Sep 17 00:00:00 2001 From: rocky4570fft Date: Sun, 13 Aug 2017 12:05:22 +1000 Subject: [PATCH 122/412] get appropritate parameters - doh --- moto/rds2/responses.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/moto/rds2/responses.py b/moto/rds2/responses.py index d8108c7f8..e88e4c603 100644 --- a/moto/rds2/responses.py +++ b/moto/rds2/responses.py @@ -196,7 +196,7 @@ class RDS2Response(BaseResponse): def stop_db_instance(self): db_instance_identifier = self._get_param('DBInstanceIdentifier') - db_snapshot_identifier = self._get_param('DBInstanceIdentifier') + db_snapshot_identifier = self._get_param('DBSnapshotIdentifier') database = self.backend.stop_database(db_instance_identifier, db_snapshot_identifier) template = self.response_template(STOP_DATABASE_TEMPLATE) return template.render(database=database) From ed39c2ea4e5e51611791ced8461498c2768c5a66 Mon Sep 17 00:00:00 2001 From: rocky4570fft Date: Mon, 14 Aug 2017 00:27:15 +1000 Subject: [PATCH 123/412] fix up some issues found in unittests - comment out snapshotting until later --- moto/rds2/exceptions.py | 15 ++++++ moto/rds2/models.py | 34 +++++++------ tests/test_rds2/test_rds2.py | 98 ++++++++++++++++++++++++++++++++++-- 3 files changed, 129 insertions(+), 18 deletions(-) diff --git a/moto/rds2/exceptions.py b/moto/rds2/exceptions.py index 057a13ba2..5e4b38ef7 100644 --- a/moto/rds2/exceptions.py +++ b/moto/rds2/exceptions.py @@ -58,3 +58,18 @@ class DBParameterGroupNotFoundError(RDSClientError): super(DBParameterGroupNotFoundError, self).__init__( 'DBParameterGroupNotFound', 'DB Parameter Group {0} not found.'.format(db_parameter_group_name)) + +class InvalidDBClusterStateFaultError(RDSClientError): + + def __init__(self, database_identifier): + super(InvalidDBClusterStateFaultError, self).__init__( + 'InvalidDBClusterStateFault', + 'Invalid DB type, when trying to perform StopDBInstance on {0}e. See AWS RDS documentation on rds.stop_db_instance'.format(database_identifier)) + +class InvalidDBInstanceStateError(RDSClientError): + + def __init__(self, database_identifier, istate): + estate = "in available state" if istate == 'stop' else "stopped, it cannot be started" + super(InvalidDBInstanceStateError, self).__init__( + 'InvalidDBInstanceState', + 'when calling the {}DBInstance operation: Instance {} is not {}.'.format(istate.title(), database_identifier, estate)) diff --git a/moto/rds2/models.py b/moto/rds2/models.py index edb46a157..9fbb79ed9 100644 --- a/moto/rds2/models.py +++ b/moto/rds2/models.py @@ -18,7 +18,9 @@ from .exceptions import (RDSClientError, DBSnapshotNotFoundError, DBSecurityGroupNotFoundError, DBSubnetGroupNotFoundError, - DBParameterGroupNotFoundError) + DBParameterGroupNotFoundError, + InvalidDBClusterStateFaultError, + InvalidDBInstanceStateError) class Database(BaseModel): @@ -737,28 +739,30 @@ class RDS2Backend(BaseBackend): database = self.describe_databases(db_instance_identifier)[0] # todo: certain rds types not allowed to be stopped at this time. if database.is_replica or database.multi_az: - # should be 400 error - return RDSClientError('InvalidDBClusterStateFault', 'Invalid DB type, when trying to perform StopDBInstance. See AWS RDS documentation on rds.stop_db_instance') + # todo: more db types not supported by stop/start instance api + raise InvalidDBClusterStateFaultError(db_instance_identifier) if database.status != 'available': - return RDSClientError('InvalidDBInstanceState', 'when calling the StopDBInstance operation: Instance %s is not in available state' % db_instance_identifier) - if db_snapshot_identifier: - self.create_rds_snapshot(db_instance_identifier, db_snapshot_identifier) + raise InvalidDBInstanceStateError(db_instance_identifier, 'stop') + # todo: create rds snapshots + # if db_snapshot_identifier: + # self.create_rds_snapshot(db_instance_identifier, db_snapshot_identifier) database.status = 'shutdown' return database def start_database(self, db_instance_identifier): database = self.describe_databases(db_instance_identifier)[0] - if database.status != 'shutdown': # should be 400 error - return RDSClientError('InvalidDBInstanceState', 'when calling the StartDBInstance operation: Instance %s is not stopped, it cannot be started.' % db_instance_identifier) + # todo: bunch of different error messages to be generated from this api call + if database.status != 'shutdown': + raise InvalidDBInstanceStateError(db_instance_identifier, 'start') database.status = 'available' - return + return database - def create_rds_snapshot(self, db_instance_identifier, db_snapshot_identifier): - database = self.describe_databases(db_instance_identifier)[0] - # todo - # DBSnapshotAlreadyExists - # SnapshotQuotaExceeded - return None + # def create_rds_snapshot(self, db_instance_identifier, db_snapshot_identifier): + # database = self.describe_databases(db_instance_identifier)[0] + # # todo + # # DBSnapshotAlreadyExists + # # SnapshotQuotaExceeded + # return None def find_db_from_id(self, db_id): if self.arn_regex.match(db_id): diff --git a/tests/test_rds2/test_rds2.py b/tests/test_rds2/test_rds2.py index a50f99868..40e35c9c1 100644 --- a/tests/test_rds2/test_rds2.py +++ b/tests/test_rds2/test_rds2.py @@ -19,9 +19,6 @@ def test_create_database(): MasterUserPassword='hunter2', Port=1234, DBSecurityGroups=["my_sg"]) - database['DBInstance']['DBInstanceStatus'].should.equal('available') - database['DBInstance']['DBName'].should.equal('staging-postgres') - database['DBInstance']['DBInstanceIdentifier'].should.equal("db-master-1") database['DBInstance']['AllocatedStorage'].should.equal(10) database['DBInstance']['DBInstanceClass'].should.equal("db.m1.small") database['DBInstance']['LicenseModel'].should.equal("license-included") @@ -30,7 +27,102 @@ def test_create_database(): 'DBSecurityGroupName'].should.equal('my_sg') database['DBInstance']['DBInstanceArn'].should.equal( 'arn:aws:rds:us-west-2:1234567890:db:db-master-1') + database['DBInstance']['DBInstanceStatus'].should.equal('available') + database['DBInstance']['DBName'].should.equal('staging-postgres') + database['DBInstance']['DBInstanceIdentifier'].should.equal("db-master-1") +@mock_rds2 +def test_stop_database(): + conn = boto3.client('rds', region_name='us-west-2') + database = conn.create_db_instance(DBInstanceIdentifier='db-master-1', + AllocatedStorage=10, + Engine='postgres', + DBName='staging-postgres', + DBInstanceClass='db.m1.small', + LicenseModel='license-included', + MasterUsername='root', + MasterUserPassword='hunter2', + Port=1234, + DBSecurityGroups=["my_sg"]) + mydb = conn.describe_db_instances(DBInstanceIdentifier=database['DBInstance']['DBInstanceIdentifier'])['DBInstances'][0] + mydb['DBInstanceStatus'].should.equal('available') + # test stopping database + response = conn.stop_db_instance(DBInstanceIdentifier=mydb['DBInstanceIdentifier']) + response['ResponseMetadata']['HTTPStatusCode'].should.equal(200) + response['DBInstance']['DBInstanceStatus'].should.equal('shutdown') + # test rdsclient error when trying to stop an already stopped database + conn.stop_db_instance.when.called_with(DBInstanceIdentifier=mydb['DBInstanceIdentifier']).should.throw(ClientError) + +@mock_rds2 +def test_start_database(): + conn = boto3.client('rds', region_name='us-west-2') + database = conn.create_db_instance(DBInstanceIdentifier='db-master-1', + AllocatedStorage=10, + Engine='postgres', + DBName='staging-postgres', + DBInstanceClass='db.m1.small', + LicenseModel='license-included', + MasterUsername='root', + MasterUserPassword='hunter2', + Port=1234, + DBSecurityGroups=["my_sg"]) + mydb = conn.describe_db_instances(DBInstanceIdentifier=database['DBInstance']['DBInstanceIdentifier'])['DBInstances'][0] + mydb['DBInstanceStatus'].should.equal('available') + # test trying to start an already started database + conn.start_db_instance.when.called_with(DBInstanceIdentifier=mydb['DBInstanceIdentifier']).should.throw(ClientError) + # stop and test start - should go from shutdown to available + response = conn.stop_db_instance(DBInstanceIdentifier=mydb['DBInstanceIdentifier']) + response['DBInstance']['DBInstanceStatus'].should.equal('shutdown') + response = conn.start_db_instance(DBInstanceIdentifier=mydb['DBInstanceIdentifier']) + response['ResponseMetadata']['HTTPStatusCode'].should.equal(200) + response['DBInstance']['DBInstanceStatus'].should.equal('available') + +@mock_rds2 +def test_fail_to_stop_multi_az(): + conn = boto3.client('rds', region_name='us-west-2') + database = conn.create_db_instance(DBInstanceIdentifier='db-master-1', + AllocatedStorage=10, + Engine='postgres', + DBName='staging-postgres', + DBInstanceClass='db.m1.small', + LicenseModel='license-included', + MasterUsername='root', + MasterUserPassword='hunter2', + Port=1234, + DBSecurityGroups=["my_sg"], + MultiAZ=True) + + mydb = conn.describe_db_instances(DBInstanceIdentifier=database['DBInstance']['DBInstanceIdentifier'])['DBInstances'][0] + mydb['DBInstanceStatus'].should.equal('available') + # multi-az databases arent allowed to be shutdown at this time. + conn.stop_db_instance.when.called_with(DBInstanceIdentifier=mydb['DBInstanceIdentifier']).should.throw(ClientError) + # multi-az databases arent allowed to be started up at this time. + conn.start_db_instance.when.called_with(DBInstanceIdentifier=mydb['DBInstanceIdentifier']).should.throw(ClientError) + +@mock_rds2 +def test_fail_to_stop_readreplica(): + conn = boto3.client('rds', region_name='us-west-2') + database = conn.create_db_instance(DBInstanceIdentifier='db-master-1', + AllocatedStorage=10, + Engine='postgres', + DBName='staging-postgres', + DBInstanceClass='db.m1.small', + LicenseModel='license-included', + MasterUsername='root', + MasterUserPassword='hunter2', + Port=1234, + DBSecurityGroups=["my_sg"]) + + replica = conn.create_db_instance_read_replica(DBInstanceIdentifier="db-replica-1", + SourceDBInstanceIdentifier="db-master-1", + DBInstanceClass="db.m1.small") + + mydb = conn.describe_db_instances(DBInstanceIdentifier=replica['DBInstance']['DBInstanceIdentifier'])['DBInstances'][0] + mydb['DBInstanceStatus'].should.equal('available') + # read-replicas are not allowed to be stopped at this time. + conn.stop_db_instance.when.called_with(DBInstanceIdentifier=mydb['DBInstanceIdentifier']).should.throw(ClientError) + # read-replicas are not allowed to be started at this time. + conn.start_db_instance.when.called_with(DBInstanceIdentifier=mydb['DBInstanceIdentifier']).should.throw(ClientError) @mock_rds2 def test_get_databases(): From 85dd2440f42b88644b4f9320c0814a06303ec265 Mon Sep 17 00:00:00 2001 From: rocky4570fft Date: Mon, 14 Aug 2017 00:32:08 +1000 Subject: [PATCH 124/412] oopsie on api version dates for delete and create snapshot_templates, should be on stop/start rds instances --- moto/rds2/responses.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/moto/rds2/responses.py b/moto/rds2/responses.py index e88e4c603..bf76660aa 100644 --- a/moto/rds2/responses.py +++ b/moto/rds2/responses.py @@ -424,7 +424,7 @@ REBOOT_DATABASE_TEMPLATE = """ +START_DATABASE_TEMPLATE = """ {{ database.to_xml() }} @@ -433,7 +433,7 @@ START_DATABASE_TEMPLATE = """ +STOP_DATABASE_TEMPLATE = """ {{ database.to_xml() }} @@ -442,7 +442,7 @@ STOP_DATABASE_TEMPLATE = """ +DELETE_DATABASE_TEMPLATE = """ {{ database.to_xml() }} @@ -451,7 +451,7 @@ DELETE_DATABASE_TEMPLATE = """ +CREATE_SNAPSHOT_TEMPLATE = """ {{ snapshot.to_xml() }} From f2cc60b999f994bca5bf003bd70b4feca3259ca1 Mon Sep 17 00:00:00 2001 From: rocky4570fft Date: Mon, 14 Aug 2017 00:41:26 +1000 Subject: [PATCH 125/412] satisfy the flake tests - doesnt like my flakey code :( --- moto/rds2/exceptions.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/moto/rds2/exceptions.py b/moto/rds2/exceptions.py index 5e4b38ef7..9c0c0144a 100644 --- a/moto/rds2/exceptions.py +++ b/moto/rds2/exceptions.py @@ -59,6 +59,7 @@ class DBParameterGroupNotFoundError(RDSClientError): 'DBParameterGroupNotFound', 'DB Parameter Group {0} not found.'.format(db_parameter_group_name)) + class InvalidDBClusterStateFaultError(RDSClientError): def __init__(self, database_identifier): @@ -66,6 +67,7 @@ class InvalidDBClusterStateFaultError(RDSClientError): 'InvalidDBClusterStateFault', 'Invalid DB type, when trying to perform StopDBInstance on {0}e. See AWS RDS documentation on rds.stop_db_instance'.format(database_identifier)) + class InvalidDBInstanceStateError(RDSClientError): def __init__(self, database_identifier, istate): From 672604d3e7df19b81d30f8896a6baafa8789d4a2 Mon Sep 17 00:00:00 2001 From: David Cuthbert Date: Sun, 13 Aug 2017 21:58:11 -0700 Subject: [PATCH 126/412] Add support for iam:DetachRolePolicy and iam:DeleteRolePolicy. (#1052) * Add support for iam:DetachRolePolicy and iam:DeleteRolePolicy. * Raise proper exceptions for iam:DetachRolePolicy and iam:DeleteRolePolicy when the policy doesn't exist. --- moto/iam/models.py | 23 +++++++++++++++++++++++ moto/iam/responses.py | 20 ++++++++++++++++++++ tests/test_iam/test_iam.py | 38 ++++++++++++++++++++++++++++++++++++++ 3 files changed, 81 insertions(+) diff --git a/moto/iam/models.py b/moto/iam/models.py index 1e4b58578..e30ad09d4 100644 --- a/moto/iam/models.py +++ b/moto/iam/models.py @@ -76,6 +76,10 @@ class ManagedPolicy(Policy): self.attachment_count += 1 role.managed_policies[self.name] = self + def detach_from_role(self, role): + self.attachment_count -= 1 + del role.managed_policies[self.name] + class AWSManagedPolicy(ManagedPolicy): """AWS-managed policy.""" @@ -120,6 +124,13 @@ class Role(BaseModel): def put_policy(self, policy_name, policy_json): self.policies[policy_name] = policy_json + def delete_policy(self, policy_name): + try: + del self.policies[policy_name] + except KeyError: + raise IAMNotFoundException( + "The role policy with name {0} cannot be found.".format(policy_name)) + @property def physical_resource_id(self): return self.id @@ -497,6 +508,14 @@ class IAMBackend(BaseBackend): policy = arns[policy_arn] policy.attach_to_role(self.get_role(role_name)) + def detach_role_policy(self, policy_arn, role_name): + arns = dict((p.arn, p) for p in self.managed_policies.values()) + try: + policy = arns[policy_arn] + policy.detach_from_role(self.get_role(role_name)) + except KeyError: + raise IAMNotFoundException("Policy {0} was not found.".format(policy_arn)) + def create_policy(self, description, path, policy_document, policy_name): policy = ManagedPolicy( policy_name, @@ -584,6 +603,10 @@ class IAMBackend(BaseBackend): role = self.get_role(role_name) role.put_policy(policy_name, policy_json) + def delete_role_policy(self, role_name, policy_name): + role = self.get_role(role_name) + role.delete_policy(policy_name) + def get_role_policy(self, role_name, policy_name): role = self.get_role(role_name) for p, d in role.policies.items(): diff --git a/moto/iam/responses.py b/moto/iam/responses.py index a5e5081c3..5929a2005 100644 --- a/moto/iam/responses.py +++ b/moto/iam/responses.py @@ -13,6 +13,13 @@ class IamResponse(BaseResponse): template = self.response_template(ATTACH_ROLE_POLICY_TEMPLATE) return template.render() + def detach_role_policy(self): + role_name = self._get_param('RoleName') + policy_arn = self._get_param('PolicyArn') + iam_backend.detach_role_policy(policy_arn, role_name) + template = self.response_template(GENERIC_EMPTY_TEMPLATE) + return template.render(name="DetachRolePolicyResponse") + def create_policy(self): description = self._get_param('Description') path = self._get_param('Path') @@ -82,6 +89,13 @@ class IamResponse(BaseResponse): template = self.response_template(GENERIC_EMPTY_TEMPLATE) return template.render(name="PutRolePolicyResponse") + def delete_role_policy(self): + role_name = self._get_param('RoleName') + policy_name = self._get_param('PolicyName') + iam_backend.delete_role_policy(role_name, policy_name) + template = self.response_template(GENERIC_EMPTY_TEMPLATE) + return template.render(name="DeleteRolePolicyResponse") + def get_role_policy(self): role_name = self._get_param('RoleName') policy_name = self._get_param('PolicyName') @@ -446,6 +460,12 @@ ATTACH_ROLE_POLICY_TEMPLATE = """ """ +DETACH_ROLE_POLICY_TEMPLATE = """ + + 7a62c49f-347e-4fc4-9331-6e8eEXAMPLE + +""" + CREATE_POLICY_TEMPLATE = """ diff --git a/tests/test_iam/test_iam.py b/tests/test_iam/test_iam.py index b5968f722..335b458ea 100644 --- a/tests/test_iam/test_iam.py +++ b/tests/test_iam/test_iam.py @@ -213,8 +213,21 @@ def test_list_role_policies(): conn.create_role("my-role") conn.put_role_policy("my-role", "test policy", "my policy") role = conn.list_role_policies("my-role") + role.policy_names.should.have.length_of(1) role.policy_names[0].should.equal("test policy") + conn.put_role_policy("my-role", "test policy 2", "another policy") + role = conn.list_role_policies("my-role") + role.policy_names.should.have.length_of(2) + + conn.delete_role_policy("my-role", "test policy") + role = conn.list_role_policies("my-role") + role.policy_names.should.have.length_of(1) + role.policy_names[0].should.equal("test policy 2") + + with assert_raises(BotoServerError): + conn.delete_role_policy("my-role", "test policy") + @mock_iam_deprecated() def test_put_role_policy(): @@ -548,6 +561,31 @@ def test_managed_policy(): resp['list_attached_role_policies_response']['list_attached_role_policies_result'][ 'attached_policies'].should.have.length_of(2) + conn.detach_role_policy( + "arn:aws:iam::aws:policy/service-role/AmazonElasticMapReduceRole", + role_name) + rows = conn.list_policies(only_attached=True)['list_policies_response'][ + 'list_policies_result']['policies'] + rows.should.have.length_of(1) + for x in rows: + int(x['attachment_count']).should.be.greater_than(0) + + # boto has not implemented this end point but accessible this way + resp = conn.get_response('ListAttachedRolePolicies', + {'RoleName': role_name}, + list_marker='AttachedPolicies') + resp['list_attached_role_policies_response']['list_attached_role_policies_result'][ + 'attached_policies'].should.have.length_of(1) + + with assert_raises(BotoServerError): + conn.detach_role_policy( + "arn:aws:iam::aws:policy/service-role/AmazonElasticMapReduceRole", + role_name) + + with assert_raises(BotoServerError): + conn.detach_role_policy( + "arn:aws:iam::aws:policy/Nonexistent", role_name) + @mock_iam def test_boto3_create_login_profile(): From c84e8c86f0c471eab7ce5107508ec835a86ab1e2 Mon Sep 17 00:00:00 2001 From: rocky4570fft Date: Tue, 15 Aug 2017 00:55:09 +1000 Subject: [PATCH 127/412] modify to use create_snapshot, add extra tests for certain error conditions --- moto/rds2/exceptions.py | 18 +++++++++- moto/rds2/models.py | 24 ++++++------- tests/test_rds2/test_rds2.py | 70 +++++++++++++++++++++++++++++++++--- 3 files changed, 95 insertions(+), 17 deletions(-) diff --git a/moto/rds2/exceptions.py b/moto/rds2/exceptions.py index 9c0c0144a..0e716310e 100644 --- a/moto/rds2/exceptions.py +++ b/moto/rds2/exceptions.py @@ -74,4 +74,20 @@ class InvalidDBInstanceStateError(RDSClientError): estate = "in available state" if istate == 'stop' else "stopped, it cannot be started" super(InvalidDBInstanceStateError, self).__init__( 'InvalidDBInstanceState', - 'when calling the {}DBInstance operation: Instance {} is not {}.'.format(istate.title(), database_identifier, estate)) + 'Instance {} is not {}.'.format(database_identifier, estate)) + + +class SnapshotQuotaExceededError(RDSClientError): + + def __init__(self): + super(SnapshotQuotaExceededError, self).__init__( + 'SnapshotQuotaExceeded', + 'The request cannot be processed because it would exceed the maximum number of snapshots.') + + +class DBSnapshotAlreadyExistsError(RDSClientError): + + def __init__(self, database_snapshot_identifier): + super(DBSnapshotAlreadyExistsError, self).__init__( + 'DBSnapshotAlreadyExists', + 'Cannot create the snapshot because a snapshot with the identifier {} already exists.'.format(database_snapshot_identifier)) diff --git a/moto/rds2/models.py b/moto/rds2/models.py index 9fbb79ed9..d52cb916d 100644 --- a/moto/rds2/models.py +++ b/moto/rds2/models.py @@ -2,6 +2,7 @@ from __future__ import unicode_literals import copy import datetime +import os from collections import defaultdict import boto.rds2 @@ -20,7 +21,9 @@ from .exceptions import (RDSClientError, DBSubnetGroupNotFoundError, DBParameterGroupNotFoundError, InvalidDBClusterStateFaultError, - InvalidDBInstanceStateError) + InvalidDBInstanceStateError, + SnapshotQuotaExceededError, + DBSnapshotAlreadyExistsError) class Database(BaseModel): @@ -410,6 +413,7 @@ class Snapshot(BaseModel): self.tags = tags or [] self.created_at = iso_8601_datetime_with_milliseconds(datetime.datetime.now()) + @property def snapshot_arn(self): return "arn:aws:rds:{0}:1234567890:snapshot:{1}".format(self.database.region, self.snapshot_id) @@ -676,10 +680,14 @@ class RDS2Backend(BaseBackend): self.databases[database_id] = database return database - def create_snapshot(self, db_instance_identifier, db_snapshot_identifier, tags): + def create_snapshot(self, db_instance_identifier, db_snapshot_identifier, tags=None): database = self.databases.get(db_instance_identifier) if not database: raise DBInstanceNotFoundError(db_instance_identifier) + if db_snapshot_identifier in self.snapshots: + raise DBSnapshotAlreadyExistsError(db_snapshot_identifier) + if len(self.snapshots) >= int(os.environ.get('MOTO_RDS_SNAPSHOT_LIMIT', '100')): + raise SnapshotQuotaExceededError() snapshot = Snapshot(database, db_snapshot_identifier, tags) self.snapshots[db_snapshot_identifier] = snapshot return snapshot @@ -743,9 +751,8 @@ class RDS2Backend(BaseBackend): raise InvalidDBClusterStateFaultError(db_instance_identifier) if database.status != 'available': raise InvalidDBInstanceStateError(db_instance_identifier, 'stop') - # todo: create rds snapshots - # if db_snapshot_identifier: - # self.create_rds_snapshot(db_instance_identifier, db_snapshot_identifier) + if db_snapshot_identifier: + self.create_snapshot(db_instance_identifier, db_snapshot_identifier) database.status = 'shutdown' return database @@ -757,13 +764,6 @@ class RDS2Backend(BaseBackend): database.status = 'available' return database - # def create_rds_snapshot(self, db_instance_identifier, db_snapshot_identifier): - # database = self.describe_databases(db_instance_identifier)[0] - # # todo - # # DBSnapshotAlreadyExists - # # SnapshotQuotaExceeded - # return None - def find_db_from_id(self, db_id): if self.arn_regex.match(db_id): arn_breakdown = db_id.split(':') diff --git a/tests/test_rds2/test_rds2.py b/tests/test_rds2/test_rds2.py index 40e35c9c1..97914e7e0 100644 --- a/tests/test_rds2/test_rds2.py +++ b/tests/test_rds2/test_rds2.py @@ -31,6 +31,7 @@ def test_create_database(): database['DBInstance']['DBName'].should.equal('staging-postgres') database['DBInstance']['DBInstanceIdentifier'].should.equal("db-master-1") + @mock_rds2 def test_stop_database(): conn = boto3.client('rds', region_name='us-west-2') @@ -46,12 +47,17 @@ def test_stop_database(): DBSecurityGroups=["my_sg"]) mydb = conn.describe_db_instances(DBInstanceIdentifier=database['DBInstance']['DBInstanceIdentifier'])['DBInstances'][0] mydb['DBInstanceStatus'].should.equal('available') - # test stopping database + # test stopping database should shutdown response = conn.stop_db_instance(DBInstanceIdentifier=mydb['DBInstanceIdentifier']) response['ResponseMetadata']['HTTPStatusCode'].should.equal(200) response['DBInstance']['DBInstanceStatus'].should.equal('shutdown') # test rdsclient error when trying to stop an already stopped database conn.stop_db_instance.when.called_with(DBInstanceIdentifier=mydb['DBInstanceIdentifier']).should.throw(ClientError) + # test stopping a stopped database with snapshot should error and no snapshot should exist for that call + conn.stop_db_instance.when.called_with(DBInstanceIdentifier=mydb['DBInstanceIdentifier'], DBSnapshotIdentifier='rocky4570-rds-snap').should.throw(ClientError) + response = conn.describe_db_snapshots() + response['DBSnapshots'].should.equal([]) + @mock_rds2 def test_start_database(): @@ -68,14 +74,27 @@ def test_start_database(): DBSecurityGroups=["my_sg"]) mydb = conn.describe_db_instances(DBInstanceIdentifier=database['DBInstance']['DBInstanceIdentifier'])['DBInstances'][0] mydb['DBInstanceStatus'].should.equal('available') - # test trying to start an already started database + # test starting an already started database should error conn.start_db_instance.when.called_with(DBInstanceIdentifier=mydb['DBInstanceIdentifier']).should.throw(ClientError) - # stop and test start - should go from shutdown to available - response = conn.stop_db_instance(DBInstanceIdentifier=mydb['DBInstanceIdentifier']) + # stop and test start - should go from shutdown to available, create snapshot and check snapshot + response = conn.stop_db_instance(DBInstanceIdentifier=mydb['DBInstanceIdentifier'], DBSnapshotIdentifier='rocky4570-rds-snap') + response['ResponseMetadata']['HTTPStatusCode'].should.equal(200) response['DBInstance']['DBInstanceStatus'].should.equal('shutdown') + response = conn.describe_db_snapshots() + response['DBSnapshots'][0]['DBSnapshotIdentifier'].should.equal('rocky4570-rds-snap') response = conn.start_db_instance(DBInstanceIdentifier=mydb['DBInstanceIdentifier']) response['ResponseMetadata']['HTTPStatusCode'].should.equal(200) response['DBInstance']['DBInstanceStatus'].should.equal('available') + # starting database should not remove snapshot + response = conn.describe_db_snapshots() + response['DBSnapshots'][0]['DBSnapshotIdentifier'].should.equal('rocky4570-rds-snap') + # test stopping database, create snapshot with existing snapshot already created should throw error + conn.stop_db_instance.when.called_with(DBInstanceIdentifier=mydb['DBInstanceIdentifier'], DBSnapshotIdentifier='rocky4570-rds-snap').should.throw(ClientError) + # test stopping database not invoking snapshot should succeed. + response = conn.stop_db_instance(DBInstanceIdentifier=mydb['DBInstanceIdentifier']) + response['ResponseMetadata']['HTTPStatusCode'].should.equal(200) + response['DBInstance']['DBInstanceStatus'].should.equal('shutdown') + @mock_rds2 def test_fail_to_stop_multi_az(): @@ -99,6 +118,7 @@ def test_fail_to_stop_multi_az(): # multi-az databases arent allowed to be started up at this time. conn.start_db_instance.when.called_with(DBInstanceIdentifier=mydb['DBInstanceIdentifier']).should.throw(ClientError) + @mock_rds2 def test_fail_to_stop_readreplica(): conn = boto3.client('rds', region_name='us-west-2') @@ -124,6 +144,48 @@ def test_fail_to_stop_readreplica(): # read-replicas are not allowed to be started at this time. conn.start_db_instance.when.called_with(DBInstanceIdentifier=mydb['DBInstanceIdentifier']).should.throw(ClientError) + +@mock_rds2 +def test_snapshotquota_exceeded(): + import os + conn = boto3.client('rds', region_name='us-west-2') + database1 = conn.create_db_instance(DBInstanceIdentifier='db-master-1', + AllocatedStorage=10, + Engine='postgres', + DBName='staging-postgres', + DBInstanceClass='db.m1.small', + LicenseModel='license-included', + MasterUsername='root', + MasterUserPassword='hunter2', + Port=1234, + DBSecurityGroups=["my_sg"]) + database2 = conn.create_db_instance(DBInstanceIdentifier='db-master-2', + AllocatedStorage=10, + Engine='postgres', + DBName='staging-postgres', + DBInstanceClass='db.m1.small', + LicenseModel='license-included', + MasterUsername='root', + MasterUserPassword='hunter2', + Port=1234, + DBSecurityGroups=["my_sg"]) + database3 = conn.create_db_instance(DBInstanceIdentifier='db-master-3', + AllocatedStorage=10, + Engine='postgres', + DBName='staging-postgres', + DBInstanceClass='db.m1.small', + LicenseModel='license-included', + MasterUsername='root', + MasterUserPassword='hunter2', + Port=1234, + DBSecurityGroups=["my_sg"]) + conn.stop_db_instance(DBInstanceIdentifier=database1['DBInstance']['DBInstanceIdentifier'], DBSnapshotIdentifier='rocky4570-rds-snap1') + conn.stop_db_instance(DBInstanceIdentifier=database2['DBInstance']['DBInstanceIdentifier'], DBSnapshotIdentifier='rocky4570-rds-snap2') + os.environ['MOTO_RDS_SNAPSHOT_LIMIT'] = '2' + conn.stop_db_instance.when.called_with(DBInstanceIdentifier=database3['DBInstance']['DBInstanceIdentifier'], DBSnapshotIdentifier='rocky4570-rds-snap3').should.throw(ClientError) + os.unsetenv('MOTO_RDS_SNAPSHOT_LIMIT') + + @mock_rds2 def test_get_databases(): conn = boto3.client('rds', region_name='us-west-2') From 1c1ef9dc25afec99452c8e63f7df2e97d9a85810 Mon Sep 17 00:00:00 2001 From: rocky4570fft Date: Tue, 15 Aug 2017 00:58:33 +1000 Subject: [PATCH 128/412] the linter is too linty --- moto/rds2/models.py | 1 - 1 file changed, 1 deletion(-) diff --git a/moto/rds2/models.py b/moto/rds2/models.py index d52cb916d..bb66ead57 100644 --- a/moto/rds2/models.py +++ b/moto/rds2/models.py @@ -413,7 +413,6 @@ class Snapshot(BaseModel): self.tags = tags or [] self.created_at = iso_8601_datetime_with_milliseconds(datetime.datetime.now()) - @property def snapshot_arn(self): return "arn:aws:rds:{0}:1234567890:snapshot:{1}".format(self.database.region, self.snapshot_id) From bca50472021cb97c8a7d6063f68cc4dc86161119 Mon Sep 17 00:00:00 2001 From: Brian Pandola Date: Fri, 11 Aug 2017 17:57:06 -0700 Subject: [PATCH 129/412] Implement additional IAM endpoints - attach_user_policy - detach_user_policy - list_attached_user_policies --- moto/iam/models.py | 37 ++++++++++++++++++++++++ moto/iam/responses.py | 59 ++++++++++++++++++++++++++++++++++++++ tests/test_iam/test_iam.py | 27 +++++++++++++++++ 3 files changed, 123 insertions(+) diff --git a/moto/iam/models.py b/moto/iam/models.py index e30ad09d4..e6f8bae63 100644 --- a/moto/iam/models.py +++ b/moto/iam/models.py @@ -80,6 +80,14 @@ class ManagedPolicy(Policy): self.attachment_count -= 1 del role.managed_policies[self.name] + def attach_to_user(self, user): + self.attachment_count += 1 + user.managed_policies[self.name] = self + + def detach_from_user(self, user): + self.attachment_count -= 1 + del user.managed_policies[self.name] + class AWSManagedPolicy(ManagedPolicy): """AWS-managed policy.""" @@ -265,6 +273,7 @@ class User(BaseModel): self.created = datetime.utcnow() self.mfa_devices = {} self.policies = {} + self.managed_policies = {} self.access_keys = [] self.password = None self.password_reset_required = False @@ -516,6 +525,16 @@ class IAMBackend(BaseBackend): except KeyError: raise IAMNotFoundException("Policy {0} was not found.".format(policy_arn)) + def attach_user_policy(self, policy_arn, user_name): + arns = dict((p.arn, p) for p in self.managed_policies.values()) + policy = arns[policy_arn] + policy.attach_to_user(self.get_user(user_name)) + + def detach_user_policy(self, policy_arn, user_name): + arns = dict((p.arn, p) for p in self.managed_policies.values()) + policy = arns[policy_arn] + policy.detach_from_user(self.get_user(user_name)) + def create_policy(self, description, path, policy_document, policy_name): policy = ManagedPolicy( policy_name, @@ -547,6 +566,24 @@ class IAMBackend(BaseBackend): return policies, marker + def list_attached_user_policies(self, user_name, marker=None, max_items=100, path_prefix='/'): + policies = self.get_user(user_name).managed_policies.values() + + if path_prefix: + policies = [p for p in policies if p.path.startswith(path_prefix)] + + policies = sorted(policies, key=lambda policy: policy.name) + start_idx = int(marker) if marker else 0 + + policies = policies[start_idx:start_idx + max_items] + + if len(policies) < max_items: + marker = None + else: + marker = str(start_idx + max_items) + + return policies, marker + def list_policies(self, marker, max_items, only_attached, path_prefix, scope): policies = self.managed_policies.values() diff --git a/moto/iam/responses.py b/moto/iam/responses.py index 5929a2005..e79d8bc80 100644 --- a/moto/iam/responses.py +++ b/moto/iam/responses.py @@ -20,6 +20,20 @@ class IamResponse(BaseResponse): template = self.response_template(GENERIC_EMPTY_TEMPLATE) return template.render(name="DetachRolePolicyResponse") + def attach_user_policy(self): + policy_arn = self._get_param('PolicyArn') + user_name = self._get_param('UserName') + iam_backend.attach_user_policy(policy_arn, user_name) + template = self.response_template(ATTACH_USER_POLICY_TEMPLATE) + return template.render() + + def detach_user_policy(self): + policy_arn = self._get_param('PolicyArn') + user_name = self._get_param('UserName') + iam_backend.detach_user_policy(policy_arn, user_name) + template = self.response_template(DETACH_USER_POLICY_TEMPLATE) + return template.render() + def create_policy(self): description = self._get_param('Description') path = self._get_param('Path') @@ -40,6 +54,17 @@ class IamResponse(BaseResponse): template = self.response_template(LIST_ATTACHED_ROLE_POLICIES_TEMPLATE) return template.render(policies=policies, marker=marker) + def list_attached_user_policies(self): + marker = self._get_param('Marker') + max_items = self._get_int_param('MaxItems', 100) + path_prefix = self._get_param('PathPrefix', '/') + user_name = self._get_param('UserName') + policies, marker = iam_backend.list_attached_user_policies( + user_name, marker=marker, max_items=max_items, + path_prefix=path_prefix) + template = self.response_template(LIST_ATTACHED_USER_POLICIES_TEMPLATE) + return template.render(policies=policies, marker=marker) + def list_policies(self): marker = self._get_param('Marker') max_items = self._get_int_param('MaxItems', 100) @@ -466,6 +491,18 @@ DETACH_ROLE_POLICY_TEMPLATE = """ """ +ATTACH_USER_POLICY_TEMPLATE = """ + + 7a62c49f-347e-4fc4-9331-6e8eEXAMPLE + +""" + +DETACH_USER_POLICY_TEMPLATE = """ + + 7a62c49f-347e-4fc4-9331-6e8eEXAMPLE + +""" + CREATE_POLICY_TEMPLATE = """ @@ -506,6 +543,28 @@ LIST_ATTACHED_ROLE_POLICIES_TEMPLATE = """ """ +LIST_ATTACHED_USER_POLICIES_TEMPLATE = """ + + {% if marker is none %} + false + {% else %} + true + {{ marker }} + {% endif %} + + {% for policy in policies %} + + {{ policy.name }} + {{ policy.arn }} + + {% endfor %} + + + + 7a62c49f-347e-4fc4-9331-6e8eEXAMPLE + +""" + LIST_POLICIES_TEMPLATE = """ {% if marker is none %} diff --git a/tests/test_iam/test_iam.py b/tests/test_iam/test_iam.py index 335b458ea..3c567136c 100644 --- a/tests/test_iam/test_iam.py +++ b/tests/test_iam/test_iam.py @@ -599,3 +599,30 @@ def test_boto3_create_login_profile(): with assert_raises(ClientError): conn.create_login_profile(UserName='my-user', Password='Password') + + +@mock_iam() +def test_attach_detach_user_policy(): + iam = boto3.resource('iam', region_name='us-east-1') + client = boto3.client('iam', region_name='us-east-1') + + user = iam.create_user(UserName='test-user') + + policy_name = 'UserAttachedPolicy' + policy = iam.create_policy(PolicyName=policy_name, + PolicyDocument='{"mypolicy": "test"}', + Path='/mypolicy/', + Description='my user attached policy') + + client.attach_user_policy(UserName=user.name, PolicyArn=policy.arn) + + resp = client.list_attached_user_policies(UserName=user.name) + resp['AttachedPolicies'].should.have.length_of(1) + attached_policy = resp['AttachedPolicies'][0] + attached_policy['PolicyArn'].should.equal(policy.arn) + attached_policy['PolicyName'].should.equal(policy_name) + + client.detach_user_policy(UserName=user.name, PolicyArn=policy.arn) + + resp = client.list_attached_user_policies(UserName=user.name) + resp['AttachedPolicies'].should.have.length_of(0) From 7b36f6b790b45e9ab0102c1313d1276698da0fd6 Mon Sep 17 00:00:00 2001 From: Jack Danger Date: Fri, 14 Jul 2017 10:54:10 -0700 Subject: [PATCH 130/412] Adding implementation_coverage.py script --- implementation_coverage.py | 68 ++++++++++++++++++++++++++++++++++++++ 1 file changed, 68 insertions(+) create mode 100644 implementation_coverage.py diff --git a/implementation_coverage.py b/implementation_coverage.py new file mode 100644 index 000000000..d56e442a8 --- /dev/null +++ b/implementation_coverage.py @@ -0,0 +1,68 @@ +import moto +from botocore import xform_name +from botocore.session import Session +import boto3 + + +def get_moto_implementation(service_name): + if not hasattr(moto, service_name): + return None + module = getattr(moto, service_name) + if module is None: + return None + mock = getattr(module, "mock_{}".format(service_name)) + if mock is None: + return None + backends = list(mock().backends.values()) + if backends: + return backends[0] + + +def calculate_implementation_coverage(): + service_names = Session().get_available_services() + coverage = {} + for service_name in service_names: + moto_client = get_moto_implementation(service_name) + real_client = boto3.client(service_name, region_name='us-east-1') + implemented = [] + not_implemented = [] + + operation_names = [xform_name(op) for op in real_client.meta.service_model.operation_names] + for op in operation_names: + if moto_client and op in dir(moto_client): + implemented.append(op) + else: + not_implemented.append(op) + + coverage[service_name] = { + 'implemented': implemented, + 'not_implemented': not_implemented, + } + return coverage + + +def print_implementation_coverage(): + coverage = calculate_implementation_coverage() + for service_name in coverage: + implemented = coverage.get(service_name)['implemented'] + not_implemented = coverage.get(service_name)['not_implemented'] + operations = sorted(implemented + not_implemented) + + if implemented and not_implemented: + percentage_implemented = int(100.0 * len(implemented) / (len(implemented) + len(not_implemented))) + elif implemented: + percentage_implemented = 100 + else: + percentage_implemented = 0 + + print("-----------------------") + print("{} - {}% implemented".format(service_name, percentage_implemented)) + print("-----------------------") + for op in operations: + if op in implemented: + print("[X] {}".format(op)) + else: + print("[ ] {}".format(op)) + +if __name__ == '__main__': + print_implementation_coverage() From 7a3f6e6e47779d7d4918dfb66c165fe1da77a178 Mon Sep 17 00:00:00 2001 From: Jack Danger Date: Mon, 14 Aug 2017 14:51:50 -0700 Subject: [PATCH 131/412] making script executable --- implementation_coverage.py | 1 + 1 file changed, 1 insertion(+) mode change 100644 => 100755 implementation_coverage.py diff --git a/implementation_coverage.py b/implementation_coverage.py old mode 100644 new mode 100755 index d56e442a8..f0d22fc95 --- a/implementation_coverage.py +++ b/implementation_coverage.py @@ -1,3 +1,4 @@ +#!/usr/bin/env python import moto from botocore import xform_name from botocore.session import Session From 592bf868ff01b487465ed4a6969f7068ef74c8ce Mon Sep 17 00:00:00 2001 From: rocky4570fft Date: Tue, 15 Aug 2017 10:34:10 +1000 Subject: [PATCH 132/412] remove test for rds snapshot quota exceed as moto_server doesnt support it --- tests/test_rds2/test_rds2.py | 41 ------------------------------------ 1 file changed, 41 deletions(-) diff --git a/tests/test_rds2/test_rds2.py b/tests/test_rds2/test_rds2.py index 97914e7e0..4ab7dbc60 100644 --- a/tests/test_rds2/test_rds2.py +++ b/tests/test_rds2/test_rds2.py @@ -145,47 +145,6 @@ def test_fail_to_stop_readreplica(): conn.start_db_instance.when.called_with(DBInstanceIdentifier=mydb['DBInstanceIdentifier']).should.throw(ClientError) -@mock_rds2 -def test_snapshotquota_exceeded(): - import os - conn = boto3.client('rds', region_name='us-west-2') - database1 = conn.create_db_instance(DBInstanceIdentifier='db-master-1', - AllocatedStorage=10, - Engine='postgres', - DBName='staging-postgres', - DBInstanceClass='db.m1.small', - LicenseModel='license-included', - MasterUsername='root', - MasterUserPassword='hunter2', - Port=1234, - DBSecurityGroups=["my_sg"]) - database2 = conn.create_db_instance(DBInstanceIdentifier='db-master-2', - AllocatedStorage=10, - Engine='postgres', - DBName='staging-postgres', - DBInstanceClass='db.m1.small', - LicenseModel='license-included', - MasterUsername='root', - MasterUserPassword='hunter2', - Port=1234, - DBSecurityGroups=["my_sg"]) - database3 = conn.create_db_instance(DBInstanceIdentifier='db-master-3', - AllocatedStorage=10, - Engine='postgres', - DBName='staging-postgres', - DBInstanceClass='db.m1.small', - LicenseModel='license-included', - MasterUsername='root', - MasterUserPassword='hunter2', - Port=1234, - DBSecurityGroups=["my_sg"]) - conn.stop_db_instance(DBInstanceIdentifier=database1['DBInstance']['DBInstanceIdentifier'], DBSnapshotIdentifier='rocky4570-rds-snap1') - conn.stop_db_instance(DBInstanceIdentifier=database2['DBInstance']['DBInstanceIdentifier'], DBSnapshotIdentifier='rocky4570-rds-snap2') - os.environ['MOTO_RDS_SNAPSHOT_LIMIT'] = '2' - conn.stop_db_instance.when.called_with(DBInstanceIdentifier=database3['DBInstance']['DBInstanceIdentifier'], DBSnapshotIdentifier='rocky4570-rds-snap3').should.throw(ClientError) - os.unsetenv('MOTO_RDS_SNAPSHOT_LIMIT') - - @mock_rds2 def test_get_databases(): conn = boto3.client('rds', region_name='us-west-2') From b4013f0e60119f8eac926f8924b642c621dcc8d3 Mon Sep 17 00:00:00 2001 From: Toshiya Kawasaki Date: Wed, 16 Aug 2017 16:41:58 +0900 Subject: [PATCH 133/412] Adds ELBv2 support in the README --- README.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/README.md b/README.md index cabf6b45f..251c732eb 100644 --- a/README.md +++ b/README.md @@ -82,6 +82,8 @@ It gets even better! Moto isn't just for Python code and it isn't just for S3. L |------------------------------------------------------------------------------| | ELB | @mock_elb | core endpoints done | |------------------------------------------------------------------------------| +| ELBv2 | @mock_elbv2 | core endpoints done | +|------------------------------------------------------------------------------| | EMR | @mock_emr | core endpoints done | |------------------------------------------------------------------------------| | Glacier | @mock_glacier | core endpoints done | From 1c96335895357d79549094de55f722b5fe3b1be2 Mon Sep 17 00:00:00 2001 From: Ali Rizwan Date: Wed, 16 Aug 2017 12:49:03 +0200 Subject: [PATCH 134/412] get_parameters should list InvalidParameters Signed-off-by: Ali Rizwan --- moto/ssm/responses.py | 4 ++++ tests/test_ssm/test_ssm_boto3.py | 16 ++++++++++++++-- 2 files changed, 18 insertions(+), 2 deletions(-) diff --git a/moto/ssm/responses.py b/moto/ssm/responses.py index 73c29484a..7c31a6dd9 100644 --- a/moto/ssm/responses.py +++ b/moto/ssm/responses.py @@ -57,6 +57,10 @@ class SimpleSystemManagerResponse(BaseResponse): param_data = parameter.response_object(with_decryption) response['Parameters'].append(param_data) + param_names = [param.name for param in result] + for name in names: + if name not in param_names: + response['InvalidParameters'].append(name) return json.dumps(response) def describe_parameters(self): diff --git a/tests/test_ssm/test_ssm_boto3.py b/tests/test_ssm/test_ssm_boto3.py index 38e266445..7a783299b 100644 --- a/tests/test_ssm/test_ssm_boto3.py +++ b/tests/test_ssm/test_ssm_boto3.py @@ -161,7 +161,6 @@ def test_describe_parameters_filter_type(): p['KeyId'] = 'a key' client.put_parameter(**p) - response = client.describe_parameters(Filters=[ { 'Key': 'Type', @@ -188,7 +187,6 @@ def test_describe_parameters_filter_keyid(): p['KeyId'] = "key:%d" % i client.put_parameter(**p) - response = client.describe_parameters(Filters=[ { 'Key': 'KeyId', @@ -201,6 +199,20 @@ def test_describe_parameters_filter_keyid(): ''.should.equal(response.get('NextToken', '')) +@mock_ssm +def test_get_parameter_invalid(): + client = client = boto3.client('ssm', region_name='us-east-1') + response = client.get_parameters( + Names=[ + 'invalid' + ], + WithDecryption=False) + + len(response['Parameters']).should.equal(0) + len(response['InvalidParameters']).should.equal(1) + response['InvalidParameters'][0].should.equal('invalid') + + @mock_ssm def test_put_parameter_secure_default_kms(): client = boto3.client('ssm', region_name='us-east-1') From 05a2715f4b7f273f6f698d21a9127c068dd2c5df Mon Sep 17 00:00:00 2001 From: Toshiya Kawasaki Date: Wed, 16 Aug 2017 21:09:14 +0900 Subject: [PATCH 135/412] add create_rule to elbv2 --- moto/elbv2/exceptions.py | 41 +++++++ moto/elbv2/models.py | 80 +++++++++++++ moto/elbv2/responses.py | 61 ++++++++++ tests/test_elbv2/test_elbv2.py | 204 +++++++++++++++++++++++++++++++++ 4 files changed, 386 insertions(+) diff --git a/moto/elbv2/exceptions.py b/moto/elbv2/exceptions.py index 397aa115b..a03cf9a98 100644 --- a/moto/elbv2/exceptions.py +++ b/moto/elbv2/exceptions.py @@ -101,3 +101,44 @@ class EmptyListenersError(ELBClientError): super(EmptyListenersError, self).__init__( "ValidationError", "Listeners cannot be empty") + + +class PriorityInUseError(ELBClientError): + + def __init__(self): + super(PriorityInUseError, self).__init__( + "PriorityInUse", + "The specified priority is in use.") + + +class InvalidConditionFieldError(ELBClientError): + + def __init__(self, invalid_name): + super(InvalidConditionFieldError, self).__init__( + "ValidationError", + "Condition field '%s' must be one of '[path-pattern, host-header]" % (invalid_name)) + + +class InvalidConditionValueError(ELBClientError): + + def __init__(self, msg): + super(InvalidConditionValueError, self).__init__( + "ValidationError", msg) + + +class InvalidActionTypeError(ELBClientError): + + def __init__(self, invalid_name, index): + super(InvalidActionTypeError, self).__init__( + "ValidationError", + "1 validation error detected: Value '%s' at 'actions.%s.member.type' failed to satisfy constraint: Member must satisfy enum value set: [forward]" % (invalid_name, index) + ) + + +class ActionTargetGroupNotFoundError(ELBClientError): + + def __init__(self, arn): + super(ActionTargetGroupNotFoundError, self).__init__( + "TargetGroupNotFound", + "Target group '%s' not found" % arn + ) diff --git a/moto/elbv2/models.py b/moto/elbv2/models.py index 10d9ad220..a31e07927 100644 --- a/moto/elbv2/models.py +++ b/moto/elbv2/models.py @@ -14,6 +14,11 @@ from .exceptions import ( SubnetNotFoundError, TargetGroupNotFoundError, TooManyTagsError, + PriorityInUseError, + InvalidConditionFieldError, + InvalidConditionValueError, + InvalidActionTypeError, + ActionTargetGroupNotFoundError, ) @@ -92,6 +97,34 @@ class FakeListener(BaseModel): self.ssl_policy = ssl_policy self.certificate = certificate self.default_actions = default_actions + self._non_default_rules = [] + self._default_rule = FakeRule( + listener_arn=self.arn, + conditions=[], + priority='default', + actions=default_actions, + is_default=True + ) + + @property + def rules(self): + return self._non_default_rules + [self._default_rule] + + + def register(self, rule): + self._non_default_rules.append(rule) + self._non_default_rules = sorted(self._non_default_rules, key=lambda x: x.priority) + + +class FakeRule(BaseModel): + + def __init__(self, listener_arn, conditions, priority, actions, is_default): + self.listener_arn = listener_arn + self.arn = listener_arn.replace(':listener/', ':listener-rule/') + "/%s" % (id(self)) + self.conditions = conditions + self.priority = priority # int or 'default' + self.actions = actions + self.is_default = is_default class FakeBackend(BaseModel): @@ -181,6 +214,53 @@ class ELBv2Backend(BaseBackend): self.load_balancers[arn] = new_load_balancer return new_load_balancer + def create_rule(self, listener_arn, conditions, priority, actions): + listeners = self.describe_listeners(None, [listener_arn]) + if not listeners: + raise ListenerNotFound() + listener = listeners[0] + + # validate conditions + for condition in conditions: + field = condition['field'] + if field not in ['path-pattern', 'host-header']: + raise InvalidConditionFieldError(field) + + values = condition['values'] + if len(values) == 0: + raise InvalidConditionValueError('A condition value must be specified') + if len(values) > 1: + raise InvalidConditionValueError( + "The '%s' field contains too many values; the limit is '1'" % field + ) + + # TODO: check pattern of value for 'host-header' + # TODO: check pattern of value for 'path-pattern' + + # validate Priority + for rule in listener.rules: + if rule.priority == priority: + raise PriorityInUseError() + + # validate Actions + target_group_arns = [target_group.arn for target_group in self.target_groups.values()] + for i, action in enumerate(actions): + index = i + 1 + action_type = action['type'] + if action_type not in ['forward']: + raise InvalidActionTypeError(action_type, index) + action_target_group_arn = action['target_group_arn'] + if action_target_group_arn not in target_group_arns: + raise ActionTargetGroupNotFoundError(action_target_group_arn) + + # TODO: check for error 'TooManyRegistrationsForTargetId' + # TODO: check for error 'TooManyRules' + + # create rule + rule = FakeRule(listener.arn, conditions, priority, actions, is_default=False) + listener.register(rule) + return listener.rules + def create_target_group(self, name, **kwargs): for target_group in self.target_groups.values(): if target_group.name == name: diff --git a/moto/elbv2/responses.py b/moto/elbv2/responses.py index 751652901..fe4518b25 100644 --- a/moto/elbv2/responses.py +++ b/moto/elbv2/responses.py @@ -28,6 +28,30 @@ class ELBV2Response(BaseResponse): template = self.response_template(CREATE_LOAD_BALANCER_TEMPLATE) return template.render(load_balancer=load_balancer) + def create_rule(self): + lister_arn = self._get_param('ListenerArn') + _conditions = self._get_list_prefix('Conditions.member') + conditions = [] + for _condition in _conditions: + condition = {} + condition['field'] = _condition['field'] + values = sorted( + [e for e in _condition.items() if e[0].startswith('values.member')], + key=lambda x: x[0] + ) + condition['values'] = [e[1] for e in values] + conditions.append(condition) + priority = self._get_int_param('Priority') + actions = self._get_list_prefix('Actions.member') + rules = self.elbv2_backend.create_rule( + listener_arn=lister_arn, + conditions=conditions, + priority=priority, + actions=actions + ) + template = self.response_template(CREATE_RULE_TEMPLATE) + return template.render(rules=rules) + def create_target_group(self): name = self._get_param('Name') vpc_id = self._get_param('VpcId') @@ -321,6 +345,43 @@ CREATE_LOAD_BALANCER_TEMPLATE = """ + + + {% for rule in rules %} + + {{ "true" if rule.is_default else "false" }} + + {% for condition in rule.conditions %} + + {{ condition["field"] }} + + {% for value in condition["values"] %} + {{ value }} + {% endfor %} + + + {% endfor %} + + {{ rule.priority }} + + {% for action in rule.actions %} + + {{ action["type"] }} + {{ action["target_group_arn"] }} + + {% endfor %} + + {{ rule.arn }} + + {% endfor %} + + + + c5478c83-f397-11e5-bb98-57195a6eb84a + +""" + CREATE_TARGET_GROUP_TEMPLATE = """ diff --git a/tests/test_elbv2/test_elbv2.py b/tests/test_elbv2/test_elbv2.py index ece17571d..580cb6308 100644 --- a/tests/test_elbv2/test_elbv2.py +++ b/tests/test_elbv2/test_elbv2.py @@ -518,3 +518,207 @@ def test_target_group_attributes(): attributes = {attr['Key']: attr['Value'] for attr in response['Attributes']} attributes['stickiness.type'].should.equal('lb_cookie') attributes['stickiness.enabled'].should.equal('true') + + +@mock_elbv2 +@mock_ec2 +def test_create_listener_rules(): + conn = boto3.client('elbv2', region_name='us-east-1') + ec2 = boto3.resource('ec2', region_name='us-east-1') + + security_group = ec2.create_security_group(GroupName='a-security-group', Description='First One') + vpc = ec2.create_vpc(CidrBlock='172.28.7.0/24', InstanceTenancy='default') + subnet1 = ec2.create_subnet(VpcId=vpc.id, CidrBlock='172.28.7.192/26', AvailabilityZone='us-east-1a') + subnet2 = ec2.create_subnet(VpcId=vpc.id, CidrBlock='172.28.7.192/26', AvailabilityZone='us-east-1b') + + response = conn.create_load_balancer( + Name='my-lb', + Subnets=[subnet1.id, subnet2.id], + SecurityGroups=[security_group.id], + Scheme='internal', + Tags=[{'Key': 'key_name', 'Value': 'a_value'}]) + + load_balancer_arn = response.get('LoadBalancers')[0].get('LoadBalancerArn') + + response = conn.create_target_group( + Name='a-target', + Protocol='HTTP', + Port=8080, + VpcId=vpc.id, + HealthCheckProtocol='HTTP', + HealthCheckPort='8080', + HealthCheckPath='/', + HealthCheckIntervalSeconds=5, + HealthCheckTimeoutSeconds=5, + HealthyThresholdCount=5, + UnhealthyThresholdCount=2, + Matcher={'HttpCode': '200'}) + target_group = response.get('TargetGroups')[0] + + # Plain HTTP listener + response = conn.create_listener( + LoadBalancerArn=load_balancer_arn, + Protocol='HTTP', + Port=80, + DefaultActions=[{'Type': 'forward', 'TargetGroupArn': target_group.get('TargetGroupArn')}]) + listener = response.get('Listeners')[0] + listener.get('Port').should.equal(80) + listener.get('Protocol').should.equal('HTTP') + listener.get('DefaultActions').should.equal([{ + 'TargetGroupArn': target_group.get('TargetGroupArn'), + 'Type': 'forward'}]) + http_listener_arn = listener.get('ListenerArn') + + # create first rule + priority = 100 + host = 'xxx.example.com' + path_pattern = 'foobar' + rules = conn.create_rule( + ListenerArn=http_listener_arn, + Priority=priority, + Conditions=[{ + 'Field': 'host-header', + 'Values': [ host ] + }, + { + 'Field': 'path-pattern', + 'Values': [ path_pattern ] + }], + Actions=[{ + 'TargetGroupArn': target_group.get('TargetGroupArn'), + 'Type': 'forward' + }] + ) + rules['Rules'][0].get('Priority').should.equal('100') + + # check if rules is sorted by priority + priority = 50 + host = 'yyy.example.com' + path_pattern = 'foobar' + rules = conn.create_rule( + ListenerArn=http_listener_arn, + Priority=priority, + Conditions=[{ + 'Field': 'host-header', + 'Values': [ host ] + }, + { + 'Field': 'path-pattern', + 'Values': [ path_pattern ] + }], + Actions=[{ + 'TargetGroupArn': target_group.get('TargetGroupArn'), + 'Type': 'forward' + }] + ) + priorities = [rule['Priority'] for rule in rules['Rules']] + priorities.should.equal(['50', '100', 'default']) + + # test for invalid action type + safe_priority = 2 + with assert_raises(ClientError): + r = conn.create_rule( + ListenerArn=http_listener_arn, + Priority=safe_priority, + Conditions=[{ + 'Field': 'host-header', + 'Values': [ host ] + }, + { + 'Field': 'path-pattern', + 'Values': [ path_pattern ] + }], + Actions=[{ + 'TargetGroupArn': target_group.get('TargetGroupArn'), + 'Type': 'forward2' + }] + ) + + # test for invalid action type + safe_priority = 2 + invalid_target_group_arn = target_group.get('TargetGroupArn') + 'x' + with assert_raises(ClientError): + r = conn.create_rule( + ListenerArn=http_listener_arn, + Priority=safe_priority, + Conditions=[{ + 'Field': 'host-header', + 'Values': [ host ] + }, + { + 'Field': 'path-pattern', + 'Values': [ path_pattern ] + }], + Actions=[{ + 'TargetGroupArn': invalid_target_group_arn, + 'Type': 'forward' + }] + ) + + # test for PriorityInUse + host2 = 'yyy.example.com' + with assert_raises(ClientError): + r = conn.create_rule( + ListenerArn=http_listener_arn, + Priority=priority, + Conditions=[{ + 'Field': 'host-header', + 'Values': [ host ] + }, + { + 'Field': 'path-pattern', + 'Values': [ path_pattern ] + }], + Actions=[{ + 'TargetGroupArn': target_group.get('TargetGroupArn'), + 'Type': 'forward' + }] + ) + + # test for invalid condition field_name + safe_priority = 2 + with assert_raises(ClientError): + r = conn.create_rule( + ListenerArn=http_listener_arn, + Priority=safe_priority, + Conditions=[{ + 'Field': 'xxxxxxx', + 'Values': [ host ] + }], + Actions=[{ + 'TargetGroupArn': target_group.get('TargetGroupArn'), + 'Type': 'forward' + }] + ) + + # test for emptry condition value + safe_priority = 2 + with assert_raises(ClientError): + r = conn.create_rule( + ListenerArn=http_listener_arn, + Priority=safe_priority, + Conditions=[{ + 'Field': 'host-header', + 'Values': [] + }], + Actions=[{ + 'TargetGroupArn': target_group.get('TargetGroupArn'), + 'Type': 'forward' + }] + ) + + # test for multiple condition value + safe_priority = 2 + with assert_raises(ClientError): + r = conn.create_rule( + ListenerArn=http_listener_arn, + Priority=safe_priority, + Conditions=[{ + 'Field': 'host-header', + 'Values': [host, host] + }], + Actions=[{ + 'TargetGroupArn': target_group.get('TargetGroupArn'), + 'Type': 'forward' + }] + ) From 3ac10945c12f3b7ca8fa7bed4015c4d49ca28798 Mon Sep 17 00:00:00 2001 From: Toshiya Kawasaki Date: Thu, 17 Aug 2017 00:35:45 +0900 Subject: [PATCH 136/412] add delete_rule to elbv2 --- moto/elbv2/models.py | 12 ++++++++++++ moto/elbv2/responses.py | 13 +++++++++++++ tests/test_elbv2/test_elbv2.py | 4 ++++ 3 files changed, 29 insertions(+) diff --git a/moto/elbv2/models.py b/moto/elbv2/models.py index a31e07927..9182c28d5 100644 --- a/moto/elbv2/models.py +++ b/moto/elbv2/models.py @@ -357,6 +357,18 @@ class ELBv2Backend(BaseBackend): def delete_load_balancer(self, arn): self.load_balancers.pop(arn, None) + def delete_rule(self, arn): + for load_balancer_arn in self.load_balancers: + listeners = self.load_balancers.get(load_balancer_arn).listeners.values() + for listener in listeners: + for rule in listener.rules: + if rule.arn == arn: + listener.rules.remove(rule) + return + + # should raise RuleNotFound Error according to the AWS API doc + # however, boto3 does't raise error even if rule is not found + def delete_target_group(self, target_group_arn): target_group = self.target_groups.pop(target_group_arn) if target_group: diff --git a/moto/elbv2/responses.py b/moto/elbv2/responses.py index fe4518b25..05e21effe 100644 --- a/moto/elbv2/responses.py +++ b/moto/elbv2/responses.py @@ -157,6 +157,12 @@ class ELBV2Response(BaseResponse): template = self.response_template(DELETE_LOAD_BALANCER_TEMPLATE) return template.render() + def delete_rule(self): + arn = self._get_param('RuleArn') + self.elbv2_backend.delete_rule(arn) + template = self.response_template(DELETE_RULE_TEMPLATE) + return template.render() + def delete_target_group(self): arn = self._get_param('TargetGroupArn') self.elbv2_backend.delete_target_group(arn) @@ -448,6 +454,13 @@ DELETE_LOAD_BALANCER_TEMPLATE = """ + + + 1549581b-12b7-11e3-895e-1334aEXAMPLE + +""" + DELETE_TARGET_GROUP_TEMPLATE = """ diff --git a/tests/test_elbv2/test_elbv2.py b/tests/test_elbv2/test_elbv2.py index 580cb6308..81fda21b4 100644 --- a/tests/test_elbv2/test_elbv2.py +++ b/tests/test_elbv2/test_elbv2.py @@ -614,6 +614,10 @@ def test_create_listener_rules(): priorities = [rule['Priority'] for rule in rules['Rules']] priorities.should.equal(['50', '100', 'default']) + arn = rules['Rules'][0]['RuleArn'] + conn.delete_rule(RuleArn=arn) + # TODO: describe rule and ensure rule is removed + # test for invalid action type safe_priority = 2 with assert_raises(ClientError): From a73fa64043b6174518fbbee1a793893e99881968 Mon Sep 17 00:00:00 2001 From: Toshiya Kawasaki Date: Thu, 17 Aug 2017 01:57:02 +0900 Subject: [PATCH 137/412] add describe_rules to elbv2 --- moto/elbv2/exceptions.py | 8 +++++ moto/elbv2/models.py | 24 ++++++++++++++ moto/elbv2/responses.py | 60 ++++++++++++++++++++++++++++++++++ tests/test_elbv2/test_elbv2.py | 27 ++++++++++++++- 4 files changed, 118 insertions(+), 1 deletion(-) diff --git a/moto/elbv2/exceptions.py b/moto/elbv2/exceptions.py index a03cf9a98..a547c4f88 100644 --- a/moto/elbv2/exceptions.py +++ b/moto/elbv2/exceptions.py @@ -142,3 +142,11 @@ class ActionTargetGroupNotFoundError(ELBClientError): "TargetGroupNotFound", "Target group '%s' not found" % arn ) + + +class InvalidDescribeRulesRequest(ELBClientError): + + def __init__(self, msg): + super(InvalidDescribeRulesRequest, self).__init__( + "ValidationError", msg + ) diff --git a/moto/elbv2/models.py b/moto/elbv2/models.py index 9182c28d5..25803ea79 100644 --- a/moto/elbv2/models.py +++ b/moto/elbv2/models.py @@ -19,6 +19,7 @@ from .exceptions import ( InvalidConditionValueError, InvalidActionTypeError, ActionTargetGroupNotFoundError, + InvalidDescribeRulesRequest ) @@ -313,6 +314,29 @@ class ELBv2Backend(BaseBackend): return matched_balancers + def describe_rules(self, listener_arn, rule_arns): + if listener_arn is None and not rule_arns: + raise InvalidDescribeRulesRequest( + "You must specify either listener rule ARNs or a listener ARN" + ) + if listener_arn is not None and rule_arns is not None: + raise InvalidDescribeRulesRequest( + 'Listener rule ARNs and a listener ARN cannot be specified at the same time' + ) + if listener_arn: + listener = self.describe_listeners(None, [listener_arn])[0] + return listener.rules + + # search for rule arns + matched_rules = [] + for load_balancer_arn in self.load_balancers: + listeners = self.load_balancers.get(load_balancer_arn).listeners.values() + for listener in listeners: + for rule in listener.rules: + if rule.arn in rule_arns: + matched_rules.append(rule) + return matched_rules + def describe_target_groups(self, load_balancer_arn, target_group_arns, names): if load_balancer_arn: if load_balancer_arn not in self.load_balancers: diff --git a/moto/elbv2/responses.py b/moto/elbv2/responses.py index 05e21effe..16b170c6a 100644 --- a/moto/elbv2/responses.py +++ b/moto/elbv2/responses.py @@ -1,4 +1,5 @@ from __future__ import unicode_literals +import base64 from moto.core.responses import BaseResponse from .models import elbv2_backends from .exceptions import DuplicateTagKeysError @@ -124,6 +125,26 @@ class ELBV2Response(BaseResponse): template = self.response_template(DESCRIBE_LOAD_BALANCERS_TEMPLATE) return template.render(load_balancers=load_balancers_resp, marker=next_marker) + def describe_rules(self): + listener_arn = self._get_param('ListenerArn') + rule_arns = self._get_multi_param('RuleArns.member') if any(k for k in list(self.querystring.keys()) if k.startswith('RuleArns.member')) else None + all_rules = self.elbv2_backend.describe_rules(listener_arn, rule_arns) + all_arns = [rule.arn for rule in all_rules] + all_arns = [base64.urlsafe_b64encode(bytes(rule.arn, 'UTF-8')) for rule in all_rules] + page_size = self._get_int_param('PageSize', 50) # set 50 for temporary + + marker = self._get_param('Marker') + if marker: + start = all_arns.index(marker) + 1 + else: + start = 0 + rules_resp = all_rules[start:start + page_size] + next_marker = None + if len(all_rules) > start + page_size: + next_marker = base64.urlsafe_b64encode(bytes(rules_resp[-1].arn, 'UTF-8')) + template = self.response_template(DESCRIBE_RULES_TEMPLATE) + return template.render(rules=rules_resp, marker=next_marker) + def describe_target_groups(self): load_balancer_arn = self._get_param('LoadBalancerArn') target_group_arns = self._get_multi_param('TargetGroupArns.member') @@ -516,6 +537,45 @@ DESCRIBE_LOAD_BALANCERS_TEMPLATE = """ + + + {% for rule in rules %} + + {{ "true" if rule.is_default else "false" }} + + {% for condition in rule.conditions %} + + {{ condition["field"] }} + + {% for value in condition["values"] %} + {{ value }} + {% endfor %} + + + {% endfor %} + + {{ rule.priority }} + + {% for action in rule.actions %} + + {{ action["type"] }} + {{ action["target_group_arn"] }} + + {% endfor %} + + {{ rule.arn }} + + {% endfor %} + + {% if marker %} + {{ marker }} + {% endif %} + + + 74926cf3-f3a3-11e5-b543-9f2c3fbb9bee + +""" DESCRIBE_TARGET_GROUPS_TEMPLATE = """ diff --git a/tests/test_elbv2/test_elbv2.py b/tests/test_elbv2/test_elbv2.py index 81fda21b4..43847f510 100644 --- a/tests/test_elbv2/test_elbv2.py +++ b/tests/test_elbv2/test_elbv2.py @@ -614,7 +614,32 @@ def test_create_listener_rules(): priorities = [rule['Priority'] for rule in rules['Rules']] priorities.should.equal(['50', '100', 'default']) - arn = rules['Rules'][0]['RuleArn'] + # test for describe listeners + first_rule = rules['Rules'][0] + obtained_rules = conn.describe_rules(ListenerArn=http_listener_arn) + obtained_rules['Rules'].should.equal(rules['Rules']) + + obtained_rules = conn.describe_rules(RuleArns=[first_rule['RuleArn']]) + obtained_rules['Rules'].should.equal([first_rule]) + + # test for pagination + obtained_rules = conn.describe_rules(ListenerArn=http_listener_arn, PageSize=1) + len(obtained_rules['Rules']).should.equal(1) + obtained_rules.should.have.key('NextMarker') + + # test for invalid describe rule request + with assert_raises(ClientError): + conn.describe_rules() + with assert_raises(ClientError): + conn.describe_rules(RuleArns=[]) + with assert_raises(ClientError): + conn.describe_rules( + ListenerArn=http_listener_arn, + RuleArns=[first_rule['RuleArn']] + ) + + # delete + arn = first_rule['RuleArn'] conn.delete_rule(RuleArn=arn) # TODO: describe rule and ensure rule is removed From 9bc67794852f688348eded74aa9965a48a8acd32 Mon Sep 17 00:00:00 2001 From: Toshiya Kawasaki Date: Thu, 17 Aug 2017 02:25:39 +0900 Subject: [PATCH 138/412] add modify_rules to elbv2 --- moto/elbv2/exceptions.py | 8 +++++ moto/elbv2/models.py | 46 +++++++++++++++++++++++++- moto/elbv2/responses.py | 59 ++++++++++++++++++++++++++++++++++ tests/test_elbv2/test_elbv2.py | 24 +++++++++++++- 4 files changed, 135 insertions(+), 2 deletions(-) diff --git a/moto/elbv2/exceptions.py b/moto/elbv2/exceptions.py index a547c4f88..705aa9622 100644 --- a/moto/elbv2/exceptions.py +++ b/moto/elbv2/exceptions.py @@ -150,3 +150,11 @@ class InvalidDescribeRulesRequest(ELBClientError): super(InvalidDescribeRulesRequest, self).__init__( "ValidationError", msg ) + + +class RuleNotFoundError(ELBClientError): + + def __init__(self): + super(RuleNotFoundError, self).__init__( + "RuleNotFound", + "The specified rule does not exist.") diff --git a/moto/elbv2/models.py b/moto/elbv2/models.py index 25803ea79..664d2a40e 100644 --- a/moto/elbv2/models.py +++ b/moto/elbv2/models.py @@ -19,7 +19,8 @@ from .exceptions import ( InvalidConditionValueError, InvalidActionTypeError, ActionTargetGroupNotFoundError, - InvalidDescribeRulesRequest + InvalidDescribeRulesRequest, + RuleNotFoundError ) @@ -406,6 +407,49 @@ class ELBv2Backend(BaseBackend): return listener raise ListenerNotFoundError() + def modify_rule(self, rule_arn, conditions, actions): + rules = self.describe_rules(listener_arn=None, rule_arns=[rule_arn]) + if not rules: + raise RuleNotFoundError() + rule = rules[0] + + # validate conditions + for condition in conditions: + field = condition['field'] + if field not in ['path-pattern', 'host-header']: + raise InvalidConditionFieldError(field) + + values = condition['values'] + if len(values) == 0: + raise InvalidConditionValueError('A condition value must be specified') + if len(values) > 1: + raise InvalidConditionValueError( + "The '%s' field contains too many values; the limit is '1'" % field + ) + + # TODO: check pattern of value for 'host-header' + # TODO: check pattern of value for 'path-pattern' + + # validate Actions + target_group_arns = [target_group.arn for target_group in self.target_groups.values()] + for i, action in enumerate(actions): + index = i + 1 + action_type = action['type'] + if action_type not in ['forward']: + raise InvalidActionTypeError(action_type, index) + action_target_group_arn = action['target_group_arn'] + if action_target_group_arn not in target_group_arns: + raise ActionTargetGroupNotFoundError(action_target_group_arn) + + # TODO: check for error 'TooManyRegistrationsForTargetId' + # TODO: check for error 'TooManyRules' + + # modify rule + rule.conditions = conditions + rule.actions = actions + return [rule] + + def register_targets(self, target_group_arn, instances): target_group = self.target_groups.get(target_group_arn) if target_group is None: diff --git a/moto/elbv2/responses.py b/moto/elbv2/responses.py index 16b170c6a..2e5f1e299 100644 --- a/moto/elbv2/responses.py +++ b/moto/elbv2/responses.py @@ -196,6 +196,28 @@ class ELBV2Response(BaseResponse): template = self.response_template(DELETE_LISTENER_TEMPLATE) return template.render() + def modify_rule(self): + rule_arn = self._get_param('RuleArn') + _conditions = self._get_list_prefix('Conditions.member') + conditions = [] + for _condition in _conditions: + condition = {} + condition['field'] = _condition['field'] + values = sorted( + [e for e in _condition.items() if e[0].startswith('values.member')], + key=lambda x: x[0] + ) + condition['values'] = [e[1] for e in values] + conditions.append(condition) + actions = self._get_list_prefix('Actions.member') + rules = self.elbv2_backend.modify_rule( + rule_arn=rule_arn, + conditions=conditions, + actions=actions + ) + template = self.response_template(MODIFY_RULE_TEMPLATE) + return template.render(rules=rules) + def modify_target_group_attributes(self): target_group_arn = self._get_param('TargetGroupArn') target_group = self.elbv2_backend.target_groups.get(target_group_arn) @@ -678,6 +700,43 @@ CONFIGURE_HEALTH_CHECK_TEMPLATE = """ + + + {% for rule in rules %} + + {{ "true" if rule.is_default else "false" }} + + {% for condition in rule.conditions %} + + {{ condition["field"] }} + + {% for value in condition["values"] %} + {{ value }} + {% endfor %} + + + {% endfor %} + + {{ rule.priority }} + + {% for action in rule.actions %} + + {{ action["type"] }} + {{ action["target_group_arn"] }} + + {% endfor %} + + {{ rule.arn }} + + {% endfor %} + + + + c5478c83-f397-11e5-bb98-57195a6eb84a + +""" + MODIFY_TARGET_GROUP_ATTRIBUTES_TEMPLATE = """ diff --git a/tests/test_elbv2/test_elbv2.py b/tests/test_elbv2/test_elbv2.py index 43847f510..270dfaafc 100644 --- a/tests/test_elbv2/test_elbv2.py +++ b/tests/test_elbv2/test_elbv2.py @@ -615,10 +615,10 @@ def test_create_listener_rules(): priorities.should.equal(['50', '100', 'default']) # test for describe listeners - first_rule = rules['Rules'][0] obtained_rules = conn.describe_rules(ListenerArn=http_listener_arn) obtained_rules['Rules'].should.equal(rules['Rules']) + first_rule = obtained_rules['Rules'][0] obtained_rules = conn.describe_rules(RuleArns=[first_rule['RuleArn']]) obtained_rules['Rules'].should.equal([first_rule]) @@ -638,6 +638,28 @@ def test_create_listener_rules(): RuleArns=[first_rule['RuleArn']] ) + # modify + new_host = 'new.example.com' + new_path_pattern = 'new_path' + modified_rule = conn.modify_rule( + RuleArn=first_rule['RuleArn'], + Conditions=[{ + 'Field': 'host-header', + 'Values': [ new_host ] + }, + { + 'Field': 'path-pattern', + 'Values': [ new_path_pattern ] + }], + Actions=[{ + 'TargetGroupArn': target_group.get('TargetGroupArn'), + 'Type': 'forward' + }] + + )['Rules'][0] + rules = conn.describe_rules(ListenerArn=http_listener_arn) + modified_rule.should.equal(rules['Rules'][0]) + # delete arn = first_rule['RuleArn'] conn.delete_rule(RuleArn=arn) From 0aaa624205c324776a8f26b2d02116673d216430 Mon Sep 17 00:00:00 2001 From: Toshiya Kawasaki Date: Thu, 17 Aug 2017 02:29:49 +0900 Subject: [PATCH 139/412] Fix respose number of rules of create_rule --- moto/elbv2/models.py | 2 +- tests/test_elbv2/test_elbv2.py | 12 ++++++------ 2 files changed, 7 insertions(+), 7 deletions(-) diff --git a/moto/elbv2/models.py b/moto/elbv2/models.py index 664d2a40e..c51b6a561 100644 --- a/moto/elbv2/models.py +++ b/moto/elbv2/models.py @@ -261,7 +261,7 @@ class ELBv2Backend(BaseBackend): # create rule rule = FakeRule(listener.arn, conditions, priority, actions, is_default=False) listener.register(rule) - return listener.rules + return [rule] def create_target_group(self, name, **kwargs): for target_group in self.target_groups.values(): diff --git a/tests/test_elbv2/test_elbv2.py b/tests/test_elbv2/test_elbv2.py index 270dfaafc..0911ff4a1 100644 --- a/tests/test_elbv2/test_elbv2.py +++ b/tests/test_elbv2/test_elbv2.py @@ -573,7 +573,7 @@ def test_create_listener_rules(): priority = 100 host = 'xxx.example.com' path_pattern = 'foobar' - rules = conn.create_rule( + created_rule = conn.create_rule( ListenerArn=http_listener_arn, Priority=priority, Conditions=[{ @@ -588,8 +588,8 @@ def test_create_listener_rules(): 'TargetGroupArn': target_group.get('TargetGroupArn'), 'Type': 'forward' }] - ) - rules['Rules'][0].get('Priority').should.equal('100') + )['Rules'][0] + created_rule['Priority'].should.equal('100') # check if rules is sorted by priority priority = 50 @@ -611,12 +611,12 @@ def test_create_listener_rules(): 'Type': 'forward' }] ) - priorities = [rule['Priority'] for rule in rules['Rules']] - priorities.should.equal(['50', '100', 'default']) # test for describe listeners obtained_rules = conn.describe_rules(ListenerArn=http_listener_arn) - obtained_rules['Rules'].should.equal(rules['Rules']) + len(obtained_rules['Rules']).should.equal(3) + priorities = [rule['Priority'] for rule in obtained_rules['Rules']] + priorities.should.equal(['50', '100', 'default']) first_rule = obtained_rules['Rules'][0] obtained_rules = conn.describe_rules(RuleArns=[first_rule['RuleArn']]) From e07bce003c1b5ea8b9bf785bbef53052dbe14042 Mon Sep 17 00:00:00 2001 From: Toshiya Kawasaki Date: Thu, 17 Aug 2017 03:10:26 +0900 Subject: [PATCH 140/412] add set_rule_priorities to elbv2 --- moto/elbv2/exceptions.py | 8 +++++ moto/elbv2/models.py | 36 ++++++++++++++++++++- moto/elbv2/responses.py | 46 ++++++++++++++++++++++++++ tests/test_elbv2/test_elbv2.py | 59 +++++++++++++++++++++------------- 4 files changed, 126 insertions(+), 23 deletions(-) diff --git a/moto/elbv2/exceptions.py b/moto/elbv2/exceptions.py index 705aa9622..569fa7eed 100644 --- a/moto/elbv2/exceptions.py +++ b/moto/elbv2/exceptions.py @@ -158,3 +158,11 @@ class RuleNotFoundError(ELBClientError): super(RuleNotFoundError, self).__init__( "RuleNotFound", "The specified rule does not exist.") + + +class DuplicatePriorityError(ELBClientError): + + def __init__(self, invalid_value): + super(DuplicatePriorityError, self).__init__( + "ValidationError", + "Priority '%s' was provided multiple times" % invalid_value) diff --git a/moto/elbv2/models.py b/moto/elbv2/models.py index c51b6a561..398a63481 100644 --- a/moto/elbv2/models.py +++ b/moto/elbv2/models.py @@ -20,7 +20,8 @@ from .exceptions import ( InvalidActionTypeError, ActionTargetGroupNotFoundError, InvalidDescribeRulesRequest, - RuleNotFoundError + RuleNotFoundError, + DuplicatePriorityError ) @@ -471,6 +472,39 @@ class ELBv2Backend(BaseBackend): targets = target_group.targets.values() return [target_group.health_for(target) for target in targets] + def set_rule_priorities(self, rule_priorities): + # validate + priorities = [rule_priority['priority'] for rule_priority in rule_priorities] + for priority in set(priorities): + if priorities.count(priority) > 1: + raise DuplicatePriorityError(priority) + + # validate + for rule_priority in rule_priorities: + given_rule_arn = rule_priority['rule_arn'] + priority = rule_priority['priority'] + _given_rules = self.describe_rules(listener_arn=None, rule_arns=[given_rule_arn]) + if not _given_rules: + raise RuleNotFoundError() + given_rule = _given_rules[0] + listeners = self.describe_listeners(None, [given_rule.listener_arn]) + listener = listeners[0] + for rule_in_listener in listener.rules: + if rule_in_listener.priority == priority: + raise PriorityInUseError() + # modify + modified_rules = [] + for rule_priority in rule_priorities: + given_rule_arn = rule_priority['rule_arn'] + priority = rule_priority['priority'] + _given_rules = self.describe_rules(listener_arn=None, rule_arns=[given_rule_arn]) + if not _given_rules: + raise RuleNotFoundError() + given_rule = _given_rules[0] + given_rule.priority = priority + modified_rules.append(given_rule) + return modified_rules + elbv2_backends = {} for region in ec2_backends.keys(): diff --git a/moto/elbv2/responses.py b/moto/elbv2/responses.py index 2e5f1e299..a798d1e41 100644 --- a/moto/elbv2/responses.py +++ b/moto/elbv2/responses.py @@ -255,6 +255,14 @@ class ELBV2Response(BaseResponse): template = self.response_template(DESCRIBE_TARGET_HEALTH_TEMPLATE) return template.render(target_health_descriptions=target_health_descriptions) + def set_rule_priorities(self): + rule_priorities = self._get_list_prefix('RulePriorities.member') + for rule_priority in rule_priorities: + rule_priority['priority'] = int(rule_priority['priority']) + rules = self.elbv2_backend.set_rule_priorities(rule_priorities) + template = self.response_template(SET_RULE_PRIORITIES_TEMPLATE) + return template.render(rules=rules) + def add_tags(self): resource_arns = self._get_multi_param('ResourceArns.member') @@ -896,3 +904,41 @@ DESCRIBE_TARGET_HEALTH_TEMPLATE = """ + + + {% for rule in rules %} + + {{ "true" if rule.is_default else "false" }} + + {% for condition in rule.conditions %} + + {{ condition["field"] }} + + {% for value in condition["values"] %} + {{ value }} + {% endfor %} + + + {% endfor %} + + {{ rule.priority }} + + {% for action in rule.actions %} + + {{ action["type"] }} + {{ action["target_group_arn"] }} + + {% endfor %} + + {{ rule.arn }} + + {% endfor %} + + + + 4d7a8036-f3a7-11e5-9c02-8fd20490d5a6 + +""" diff --git a/tests/test_elbv2/test_elbv2.py b/tests/test_elbv2/test_elbv2.py index 0911ff4a1..d4220d4c8 100644 --- a/tests/test_elbv2/test_elbv2.py +++ b/tests/test_elbv2/test_elbv2.py @@ -612,6 +612,27 @@ def test_create_listener_rules(): }] ) + # test for PriorityInUse + host2 = 'yyy.example.com' + with assert_raises(ClientError): + r = conn.create_rule( + ListenerArn=http_listener_arn, + Priority=priority, + Conditions=[{ + 'Field': 'host-header', + 'Values': [ host ] + }, + { + 'Field': 'path-pattern', + 'Values': [ path_pattern ] + }], + Actions=[{ + 'TargetGroupArn': target_group.get('TargetGroupArn'), + 'Type': 'forward' + }] + ) + + # test for describe listeners obtained_rules = conn.describe_rules(ListenerArn=http_listener_arn) len(obtained_rules['Rules']).should.equal(3) @@ -619,6 +640,7 @@ def test_create_listener_rules(): priorities.should.equal(['50', '100', 'default']) first_rule = obtained_rules['Rules'][0] + second_rule = obtained_rules['Rules'][1] obtained_rules = conn.describe_rules(RuleArns=[first_rule['RuleArn']]) obtained_rules['Rules'].should.equal([first_rule]) @@ -638,7 +660,7 @@ def test_create_listener_rules(): RuleArns=[first_rule['RuleArn']] ) - # modify + # modify rule new_host = 'new.example.com' new_path_pattern = 'new_path' modified_rule = conn.modify_rule( @@ -660,10 +682,23 @@ def test_create_listener_rules(): rules = conn.describe_rules(ListenerArn=http_listener_arn) modified_rule.should.equal(rules['Rules'][0]) + # modify priority + conn.set_rule_priorities( + RulePriorities=[ + {'RuleArn': first_rule['RuleArn'], 'Priority': int(first_rule['Priority']) - 1} + ] + ) + with assert_raises(ClientError): + conn.set_rule_priorities( + RulePriorities=[ + {'RuleArn': first_rule['RuleArn'], 'Priority': 999}, + {'RuleArn': second_rule['RuleArn'], 'Priority': 999} + ] + ) + # delete arn = first_rule['RuleArn'] conn.delete_rule(RuleArn=arn) - # TODO: describe rule and ensure rule is removed # test for invalid action type safe_priority = 2 @@ -706,26 +741,6 @@ def test_create_listener_rules(): }] ) - # test for PriorityInUse - host2 = 'yyy.example.com' - with assert_raises(ClientError): - r = conn.create_rule( - ListenerArn=http_listener_arn, - Priority=priority, - Conditions=[{ - 'Field': 'host-header', - 'Values': [ host ] - }, - { - 'Field': 'path-pattern', - 'Values': [ path_pattern ] - }], - Actions=[{ - 'TargetGroupArn': target_group.get('TargetGroupArn'), - 'Type': 'forward' - }] - ) - # test for invalid condition field_name safe_priority = 2 with assert_raises(ClientError): From ea2a973813de24ff674bc809dd8d6334ec1bf87b Mon Sep 17 00:00:00 2001 From: Toshiya Kawasaki Date: Thu, 17 Aug 2017 03:22:40 +0900 Subject: [PATCH 141/412] fix syntax --- moto/elbv2/models.py | 6 ++---- moto/elbv2/responses.py | 5 ++--- 2 files changed, 4 insertions(+), 7 deletions(-) diff --git a/moto/elbv2/models.py b/moto/elbv2/models.py index 398a63481..b30ab5764 100644 --- a/moto/elbv2/models.py +++ b/moto/elbv2/models.py @@ -113,7 +113,6 @@ class FakeListener(BaseModel): def rules(self): return self._non_default_rules + [self._default_rule] - def register(self, rule): self._non_default_rules.append(rule) self._non_default_rules = sorted(self._non_default_rules, key=lambda x: x.priority) @@ -125,7 +124,7 @@ class FakeRule(BaseModel): self.listener_arn = listener_arn self.arn = listener_arn.replace(':listener/', ':listener-rule/') + "/%s" % (id(self)) self.conditions = conditions - self.priority = priority # int or 'default' + self.priority = priority # int or 'default' self.actions = actions self.is_default = is_default @@ -220,7 +219,7 @@ class ELBv2Backend(BaseBackend): def create_rule(self, listener_arn, conditions, priority, actions): listeners = self.describe_listeners(None, [listener_arn]) if not listeners: - raise ListenerNotFound() + raise ListenerNotFoundError() listener = listeners[0] # validate conditions @@ -450,7 +449,6 @@ class ELBv2Backend(BaseBackend): rule.actions = actions return [rule] - def register_targets(self, target_group_arn, instances): target_group = self.target_groups.get(target_group_arn) if target_group is None: diff --git a/moto/elbv2/responses.py b/moto/elbv2/responses.py index a798d1e41..5dcc78a75 100644 --- a/moto/elbv2/responses.py +++ b/moto/elbv2/responses.py @@ -131,7 +131,7 @@ class ELBV2Response(BaseResponse): all_rules = self.elbv2_backend.describe_rules(listener_arn, rule_arns) all_arns = [rule.arn for rule in all_rules] all_arns = [base64.urlsafe_b64encode(bytes(rule.arn, 'UTF-8')) for rule in all_rules] - page_size = self._get_int_param('PageSize', 50) # set 50 for temporary + page_size = self._get_int_param('PageSize', 50) # set 50 for temporary marker = self._get_param('Marker') if marker: @@ -905,8 +905,7 @@ DESCRIBE_TARGET_HEALTH_TEMPLATE = """ +SET_RULE_PRIORITIES_TEMPLATE = """ {% for rule in rules %} From edc2e70fcf8a6840e12b0f83087b1a5943c67f11 Mon Sep 17 00:00:00 2001 From: Toshiya Kawasaki Date: Thu, 17 Aug 2017 03:56:01 +0900 Subject: [PATCH 142/412] make b64encode work both on python2 and python3 --- moto/elbv2/responses.py | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/moto/elbv2/responses.py b/moto/elbv2/responses.py index 5dcc78a75..ec26922ec 100644 --- a/moto/elbv2/responses.py +++ b/moto/elbv2/responses.py @@ -1,4 +1,5 @@ from __future__ import unicode_literals +import sys import base64 from moto.core.responses import BaseResponse from .models import elbv2_backends @@ -125,12 +126,17 @@ class ELBV2Response(BaseResponse): template = self.response_template(DESCRIBE_LOAD_BALANCERS_TEMPLATE) return template.render(load_balancers=load_balancers_resp, marker=next_marker) + def _b64_encode(self, s): + if sys.version_info >= (3, 0): + return base64.urlsafe_b64encode(bytes(s, 'UTF-8')) + return s + def describe_rules(self): listener_arn = self._get_param('ListenerArn') rule_arns = self._get_multi_param('RuleArns.member') if any(k for k in list(self.querystring.keys()) if k.startswith('RuleArns.member')) else None all_rules = self.elbv2_backend.describe_rules(listener_arn, rule_arns) all_arns = [rule.arn for rule in all_rules] - all_arns = [base64.urlsafe_b64encode(bytes(rule.arn, 'UTF-8')) for rule in all_rules] + all_arns = [self._b64_encode(rule.arn) for rule in all_rules] page_size = self._get_int_param('PageSize', 50) # set 50 for temporary marker = self._get_param('Marker') @@ -141,7 +147,7 @@ class ELBV2Response(BaseResponse): rules_resp = all_rules[start:start + page_size] next_marker = None if len(all_rules) > start + page_size: - next_marker = base64.urlsafe_b64encode(bytes(rules_resp[-1].arn, 'UTF-8')) + next_marker = rules_resp[-1].arn template = self.response_template(DESCRIBE_RULES_TEMPLATE) return template.render(rules=rules_resp, marker=next_marker) From 5c0d5e920ac3a9dba8a3377e5640a04717f1e311 Mon Sep 17 00:00:00 2001 From: Toshiya Kawasaki Date: Thu, 17 Aug 2017 04:28:32 +0900 Subject: [PATCH 143/412] rename test_create_listener_rules to test_handle_listener_rules --- tests/test_elbv2/test_elbv2.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/test_elbv2/test_elbv2.py b/tests/test_elbv2/test_elbv2.py index d4220d4c8..dec708fd5 100644 --- a/tests/test_elbv2/test_elbv2.py +++ b/tests/test_elbv2/test_elbv2.py @@ -522,7 +522,7 @@ def test_target_group_attributes(): @mock_elbv2 @mock_ec2 -def test_create_listener_rules(): +def test_handle_listener_rules(): conn = boto3.client('elbv2', region_name='us-east-1') ec2 = boto3.resource('ec2', region_name='us-east-1') From 2b10ef85175cf96d567ca82afde5e182b82cf93b Mon Sep 17 00:00:00 2001 From: Toshiya Kawasaki Date: Thu, 17 Aug 2017 04:37:42 +0900 Subject: [PATCH 144/412] dont use base64 for marker at describe_rules --- moto/elbv2/responses.py | 9 +-------- tests/test_elbv2/test_elbv2.py | 6 ++++++ 2 files changed, 7 insertions(+), 8 deletions(-) diff --git a/moto/elbv2/responses.py b/moto/elbv2/responses.py index ec26922ec..a9635029c 100644 --- a/moto/elbv2/responses.py +++ b/moto/elbv2/responses.py @@ -1,6 +1,4 @@ from __future__ import unicode_literals -import sys -import base64 from moto.core.responses import BaseResponse from .models import elbv2_backends from .exceptions import DuplicateTagKeysError @@ -126,17 +124,11 @@ class ELBV2Response(BaseResponse): template = self.response_template(DESCRIBE_LOAD_BALANCERS_TEMPLATE) return template.render(load_balancers=load_balancers_resp, marker=next_marker) - def _b64_encode(self, s): - if sys.version_info >= (3, 0): - return base64.urlsafe_b64encode(bytes(s, 'UTF-8')) - return s - def describe_rules(self): listener_arn = self._get_param('ListenerArn') rule_arns = self._get_multi_param('RuleArns.member') if any(k for k in list(self.querystring.keys()) if k.startswith('RuleArns.member')) else None all_rules = self.elbv2_backend.describe_rules(listener_arn, rule_arns) all_arns = [rule.arn for rule in all_rules] - all_arns = [self._b64_encode(rule.arn) for rule in all_rules] page_size = self._get_int_param('PageSize', 50) # set 50 for temporary marker = self._get_param('Marker') @@ -146,6 +138,7 @@ class ELBV2Response(BaseResponse): start = 0 rules_resp = all_rules[start:start + page_size] next_marker = None + if len(all_rules) > start + page_size: next_marker = rules_resp[-1].arn template = self.response_template(DESCRIBE_RULES_TEMPLATE) diff --git a/tests/test_elbv2/test_elbv2.py b/tests/test_elbv2/test_elbv2.py index dec708fd5..a37eaa9bc 100644 --- a/tests/test_elbv2/test_elbv2.py +++ b/tests/test_elbv2/test_elbv2.py @@ -648,6 +648,12 @@ def test_handle_listener_rules(): obtained_rules = conn.describe_rules(ListenerArn=http_listener_arn, PageSize=1) len(obtained_rules['Rules']).should.equal(1) obtained_rules.should.have.key('NextMarker') + next_marker = obtained_rules['NextMarker'] + + following_rules = conn.describe_rules(ListenerArn=http_listener_arn, PageSize=1, Marker=next_marker) + len(following_rules['Rules']).should.equal(1) + following_rules.should.have.key('NextMarker') + following_rules['Rules'][0]['RuleArn'].should_not.equal(obtained_rules['Rules'][0]['RuleArn']) # test for invalid describe rule request with assert_raises(ClientError): From e57d72ea2b0f7cbb7aee0a7866a61915a4c4ac22 Mon Sep 17 00:00:00 2001 From: Ali Rizwan Date: Thu, 17 Aug 2017 10:45:43 +0200 Subject: [PATCH 145/412] Test travis Signed-off-by: Ali Rizwan --- .travis.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.travis.yml b/.travis.yml index 4783e13c2..fccbdde27 100644 --- a/.travis.yml +++ b/.travis.yml @@ -11,7 +11,7 @@ install: - travis_retry pip install boto3 - travis_retry pip install . - travis_retry pip install -r requirements-dev.txt - - travis_retry pip install coveralls + - travis_retry pip install coveralls==1.1 - | if [ "$TEST_SERVER_MODE" = "true" ]; then AWS_SECRET_ACCESS_KEY=server_secret AWS_ACCESS_KEY_ID=server_key moto_server -p 5000& From 04880dec226db6c562399a04fb0f28c167504bb4 Mon Sep 17 00:00:00 2001 From: Toshiya Kawasaki Date: Thu, 17 Aug 2017 23:34:03 +0900 Subject: [PATCH 146/412] use requests version more than 2.5.0, because requests.request started support json argument from v2.4.2 --- setup.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) mode change 100644 => 100755 setup.py diff --git a/setup.py b/setup.py old mode 100644 new mode 100755 index 289c1684c..e55f867b4 --- a/setup.py +++ b/setup.py @@ -7,7 +7,7 @@ install_requires = [ "boto>=2.36.0", "boto3>=1.2.1", "cookies", - "requests>=2.0", + "requests>=2.5", "xmltodict", "dicttoxml", "six", From ccf4cf28b1a2918210429feb027388876b911351 Mon Sep 17 00:00:00 2001 From: Toshiya Kawasaki Date: Thu, 17 Aug 2017 22:40:39 +0900 Subject: [PATCH 147/412] escape json string when sending message from sns mock to sqs mock --- moto/sns/models.py | 3 ++- tests/test_sns/test_publishing_boto3.py | 31 +++++++++++++++++++++++++ 2 files changed, 33 insertions(+), 1 deletion(-) diff --git a/moto/sns/models.py b/moto/sns/models.py index 41b858891..23f6fcff2 100644 --- a/moto/sns/models.py +++ b/moto/sns/models.py @@ -81,7 +81,8 @@ class Subscription(BaseModel): if self.protocol == 'sqs': queue_name = self.endpoint.split(":")[-1] region = self.endpoint.split(":")[3] - sqs_backends[region].send_message(queue_name, message) + escaped_message = message.replace('"', '\\"') + sqs_backends[region].send_message(queue_name, escaped_message) elif self.protocol in ['http', 'https']: post_data = self.get_post_data(message, message_id) requests.post(self.endpoint, json=post_data) diff --git a/tests/test_sns/test_publishing_boto3.py b/tests/test_sns/test_publishing_boto3.py index 00c9ac7e2..f780cf761 100644 --- a/tests/test_sns/test_publishing_boto3.py +++ b/tests/test_sns/test_publishing_boto3.py @@ -34,6 +34,37 @@ def test_publish_to_sqs(): messages[0].body.should.equal('my message') +@mock_sqs +@mock_sns +def test_publish_to_sqs_dump_json(): + conn = boto3.client('sns', region_name='us-east-1') + conn.create_topic(Name="some-topic") + response = conn.list_topics() + topic_arn = response["Topics"][0]['TopicArn'] + + sqs_conn = boto3.resource('sqs', region_name='us-east-1') + sqs_conn.create_queue(QueueName="test-queue") + + conn.subscribe(TopicArn=topic_arn, + Protocol="sqs", + Endpoint="arn:aws:sqs:us-east-1:123456789012:test-queue") + + message = json.dumps({ + "Records": [{ + "eventVersion": "2.0", + "eventSource": "aws:s3", + "s3": { + "s3SchemaVersion": "1.0" + } + }] + }) + conn.publish(TopicArn=topic_arn, Message=message) + queue = sqs_conn.get_queue_by_name(QueueName="test-queue") + messages = queue.receive_messages(MaxNumberOfMessages=1) + expected = '{\\"Records\\": [{\\"eventVersion\\": \\"2.0\\", \\"eventSource\\": \\"aws:s3\\", \\"s3\\": {\\"s3SchemaVersion\\": \\"1.0\\"}}]}' + messages[0].body.should.equal(expected) + + @mock_sqs @mock_sns def test_publish_to_sqs_in_different_region(): From f38212a545f467b43bde90567d81f3982a3774fc Mon Sep 17 00:00:00 2001 From: Toshiya Kawasaki Date: Thu, 17 Aug 2017 23:26:24 +0900 Subject: [PATCH 148/412] sort keys when dumping dict to json --- tests/test_sns/test_publishing_boto3.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tests/test_sns/test_publishing_boto3.py b/tests/test_sns/test_publishing_boto3.py index f780cf761..967de40e0 100644 --- a/tests/test_sns/test_publishing_boto3.py +++ b/tests/test_sns/test_publishing_boto3.py @@ -57,11 +57,11 @@ def test_publish_to_sqs_dump_json(): "s3SchemaVersion": "1.0" } }] - }) + }, sort_keys=True) conn.publish(TopicArn=topic_arn, Message=message) queue = sqs_conn.get_queue_by_name(QueueName="test-queue") messages = queue.receive_messages(MaxNumberOfMessages=1) - expected = '{\\"Records\\": [{\\"eventVersion\\": \\"2.0\\", \\"eventSource\\": \\"aws:s3\\", \\"s3\\": {\\"s3SchemaVersion\\": \\"1.0\\"}}]}' + expected = u'{\\"Records\\": [{\\"eventSource\\": \\"aws:s3\\", \\"eventVersion\\": \\"2.0\\", \\"s3\\": {\\"s3SchemaVersion\\": \\"1.0\\"}}]}' messages[0].body.should.equal(expected) From 469cfb7b3e34fd71c5f00ed6f5b9e4e2e575a3c3 Mon Sep 17 00:00:00 2001 From: Toshiya Kawasaki Date: Fri, 18 Aug 2017 15:56:53 +0900 Subject: [PATCH 149/412] fix bug that Names filter dont work at describe_target_groups --- moto/elbv2/models.py | 2 +- tests/test_elbv2/test_elbv2.py | 6 ++++++ 2 files changed, 7 insertions(+), 1 deletion(-) diff --git a/moto/elbv2/models.py b/moto/elbv2/models.py index b30ab5764..87e6d7738 100644 --- a/moto/elbv2/models.py +++ b/moto/elbv2/models.py @@ -354,7 +354,7 @@ class ELBv2Backend(BaseBackend): matched = [] for name in names: found = None - for target_group in self.target_groups: + for target_group in self.target_groups.values(): if target_group.name == name: found = target_group if not found: diff --git a/tests/test_elbv2/test_elbv2.py b/tests/test_elbv2/test_elbv2.py index a37eaa9bc..4a47397ef 100644 --- a/tests/test_elbv2/test_elbv2.py +++ b/tests/test_elbv2/test_elbv2.py @@ -485,6 +485,12 @@ def test_target_group_attributes(): response.get('TargetGroups').should.have.length_of(1) target_group_arn = target_group['TargetGroupArn'] + # check if Names filter works + response = conn.describe_target_groups(Names=[]) + response = conn.describe_target_groups(Names=['a-target']) + response.get('TargetGroups').should.have.length_of(1) + target_group_arn = target_group['TargetGroupArn'] + # The attributes should start with the two defaults response = conn.describe_target_group_attributes(TargetGroupArn=target_group_arn) response['Attributes'].should.have.length_of(2) From d4c6111c40c631602c1648a15859b02defe95ea7 Mon Sep 17 00:00:00 2001 From: Toshiya Kawasaki Date: Fri, 18 Aug 2017 23:54:28 +0900 Subject: [PATCH 150/412] raise error when name is more than 32 when creating target group --- moto/elbv2/exceptions.py | 8 ++++++++ moto/elbv2/models.py | 5 ++++- tests/test_elbv2/test_elbv2.py | 17 +++++++++++++++++ 3 files changed, 29 insertions(+), 1 deletion(-) diff --git a/moto/elbv2/exceptions.py b/moto/elbv2/exceptions.py index 569fa7eed..8ac4ef428 100644 --- a/moto/elbv2/exceptions.py +++ b/moto/elbv2/exceptions.py @@ -166,3 +166,11 @@ class DuplicatePriorityError(ELBClientError): super(DuplicatePriorityError, self).__init__( "ValidationError", "Priority '%s' was provided multiple times" % invalid_value) + + +class InvalidTargetGroupNameError(ELBClientError): + + def __init__(self, invalid_name): + super(InvalidTargetGroupNameError, self).__init__( + "ValidationError", + "Target group name '%s' cannot be longer than '32' characters" % invalid_name) diff --git a/moto/elbv2/models.py b/moto/elbv2/models.py index b30ab5764..cb7be361c 100644 --- a/moto/elbv2/models.py +++ b/moto/elbv2/models.py @@ -21,7 +21,8 @@ from .exceptions import ( ActionTargetGroupNotFoundError, InvalidDescribeRulesRequest, RuleNotFoundError, - DuplicatePriorityError + DuplicatePriorityError, + InvalidTargetGroupNameError ) @@ -264,6 +265,8 @@ class ELBv2Backend(BaseBackend): return [rule] def create_target_group(self, name, **kwargs): + if len(name) > 32: + raise InvalidTargetGroupNameError(name) for target_group in self.target_groups.values(): if target_group.name == name: raise DuplicateTargetGroupName() diff --git a/tests/test_elbv2/test_elbv2.py b/tests/test_elbv2/test_elbv2.py index a37eaa9bc..f69e9a963 100644 --- a/tests/test_elbv2/test_elbv2.py +++ b/tests/test_elbv2/test_elbv2.py @@ -327,6 +327,23 @@ def test_create_target_group_and_listeners(): response = conn.describe_target_groups() response.get('TargetGroups').should.have.length_of(0) + # Fail to create target group with name which length is 33 + long_name = 'A' * 33 + with assert_raises(ClientError): + conn.create_target_group( + Name=long_name, + Protocol='HTTP', + Port=8080, + VpcId=vpc.id, + HealthCheckProtocol='HTTP', + HealthCheckPort='8080', + HealthCheckPath='/', + HealthCheckIntervalSeconds=5, + HealthCheckTimeoutSeconds=5, + HealthyThresholdCount=5, + UnhealthyThresholdCount=2, + Matcher={'HttpCode': '200'}) + @mock_elbv2 @mock_ec2 From 775b8a953a7a614e7ebeb149d63065b3bf98df65 Mon Sep 17 00:00:00 2001 From: Toshiya Kawasaki Date: Sat, 19 Aug 2017 00:16:11 +0900 Subject: [PATCH 151/412] add validation for target name --- moto/elbv2/exceptions.py | 6 ++--- moto/elbv2/models.py | 20 +++++++++++++++- tests/test_elbv2/test_elbv2.py | 42 ++++++++++++++++++++++++++++++++++ 3 files changed, 64 insertions(+), 4 deletions(-) diff --git a/moto/elbv2/exceptions.py b/moto/elbv2/exceptions.py index 8ac4ef428..c6e2256e8 100644 --- a/moto/elbv2/exceptions.py +++ b/moto/elbv2/exceptions.py @@ -170,7 +170,7 @@ class DuplicatePriorityError(ELBClientError): class InvalidTargetGroupNameError(ELBClientError): - def __init__(self, invalid_name): + def __init__(self, msg): super(InvalidTargetGroupNameError, self).__init__( - "ValidationError", - "Target group name '%s' cannot be longer than '32' characters" % invalid_name) + "ValidationError", msg + ) diff --git a/moto/elbv2/models.py b/moto/elbv2/models.py index cb7be361c..1caa57f91 100644 --- a/moto/elbv2/models.py +++ b/moto/elbv2/models.py @@ -1,6 +1,7 @@ from __future__ import unicode_literals import datetime +import re from moto.compat import OrderedDict from moto.core import BaseBackend, BaseModel from moto.ec2.models import ec2_backends @@ -266,7 +267,24 @@ class ELBv2Backend(BaseBackend): def create_target_group(self, name, **kwargs): if len(name) > 32: - raise InvalidTargetGroupNameError(name) + raise InvalidTargetGroupNameError( + "Target group name '%s' cannot be longer than '32' characters" % name + ) + if not re.match('^[a-zA-Z0-9\-]+$', name): + raise InvalidTargetGroupNameError( + "Target group name '%s' can only contain characters that are alphanumeric characters or hyphens(-)" % name + ) + + # undocumented validation + if not re.match('(?!.*--)(?!^-)(?!.*-$)^[A-Za-z0-9-]+$', name): + raise InvalidTargetGroupNameError( + "1 validation error detected: Value '%s' at 'targetGroup.targetGroupArn.targetGroupName' failed to satisfy constraint: Member must satisfy regular expression pattern: (?!.*--)(?!^-)(?!.*-$)^[A-Za-z0-9-]+$" % name + ) + + if name.startswith('-') or name.endswith('-'): + raise InvalidTargetGroupNameError( + "Target group name '%s' cannot begin or end with '-'" % name + ) for target_group in self.target_groups.values(): if target_group.name == name: raise DuplicateTargetGroupName() diff --git a/tests/test_elbv2/test_elbv2.py b/tests/test_elbv2/test_elbv2.py index f69e9a963..a60bdcd4e 100644 --- a/tests/test_elbv2/test_elbv2.py +++ b/tests/test_elbv2/test_elbv2.py @@ -327,6 +327,15 @@ def test_create_target_group_and_listeners(): response = conn.describe_target_groups() response.get('TargetGroups').should.have.length_of(0) + +@mock_elbv2 +@mock_ec2 +def test_create_invalid_target_group(): + conn = boto3.client('elbv2', region_name='us-east-1') + ec2 = boto3.resource('ec2', region_name='us-east-1') + + vpc = ec2.create_vpc(CidrBlock='172.28.7.0/24', InstanceTenancy='default') + # Fail to create target group with name which length is 33 long_name = 'A' * 33 with assert_raises(ClientError): @@ -344,6 +353,39 @@ def test_create_target_group_and_listeners(): UnhealthyThresholdCount=2, Matcher={'HttpCode': '200'}) + invalid_names = ['-name', 'name-', '-name-', 'example.com', 'test@test', 'Na--me'] + for name in invalid_names: + with assert_raises(ClientError): + conn.create_target_group( + Name=name, + Protocol='HTTP', + Port=8080, + VpcId=vpc.id, + HealthCheckProtocol='HTTP', + HealthCheckPort='8080', + HealthCheckPath='/', + HealthCheckIntervalSeconds=5, + HealthCheckTimeoutSeconds=5, + HealthyThresholdCount=5, + UnhealthyThresholdCount=2, + Matcher={'HttpCode': '200'}) + + valid_names = ['name', 'Name', '000'] + for name in valid_names: + conn.create_target_group( + Name=name, + Protocol='HTTP', + Port=8080, + VpcId=vpc.id, + HealthCheckProtocol='HTTP', + HealthCheckPort='8080', + HealthCheckPath='/', + HealthCheckIntervalSeconds=5, + HealthCheckTimeoutSeconds=5, + HealthyThresholdCount=5, + UnhealthyThresholdCount=2, + Matcher={'HttpCode': '200'}) + @mock_elbv2 @mock_ec2 From b6cc208534228519aa43d05429a54b461918818e Mon Sep 17 00:00:00 2001 From: Toshiya Kawasaki Date: Tue, 22 Aug 2017 04:28:58 +0900 Subject: [PATCH 152/412] ELBv2 modify_rule changes listener rule partially (#1073) * modify_rule changes listener rule partially * fix syntax * fix outdated code * fix outdated code --- moto/elbv2/exceptions.py | 9 ++++++ moto/elbv2/models.py | 58 +++++++++++++++++++--------------- tests/test_elbv2/test_elbv2.py | 14 ++++---- 3 files changed, 48 insertions(+), 33 deletions(-) diff --git a/moto/elbv2/exceptions.py b/moto/elbv2/exceptions.py index c6e2256e8..e22820966 100644 --- a/moto/elbv2/exceptions.py +++ b/moto/elbv2/exceptions.py @@ -174,3 +174,12 @@ class InvalidTargetGroupNameError(ELBClientError): super(InvalidTargetGroupNameError, self).__init__( "ValidationError", msg ) + + +class InvalidModifyRuleArgumentsError(ELBClientError): + + def __init__(self): + super(InvalidModifyRuleArgumentsError, self).__init__( + "ValidationError", + "Either conditions or actions must be specified" + ) diff --git a/moto/elbv2/models.py b/moto/elbv2/models.py index 0f3ee4518..9092084e6 100644 --- a/moto/elbv2/models.py +++ b/moto/elbv2/models.py @@ -23,7 +23,8 @@ from .exceptions import ( InvalidDescribeRulesRequest, RuleNotFoundError, DuplicatePriorityError, - InvalidTargetGroupNameError + InvalidTargetGroupNameError, + InvalidModifyRuleArgumentsError ) @@ -429,45 +430,50 @@ class ELBv2Backend(BaseBackend): raise ListenerNotFoundError() def modify_rule(self, rule_arn, conditions, actions): + # if conditions or actions is empty list, do not update the attributes + if not conditions and not actions: + raise InvalidModifyRuleArgumentsError() rules = self.describe_rules(listener_arn=None, rule_arns=[rule_arn]) if not rules: raise RuleNotFoundError() rule = rules[0] - # validate conditions - for condition in conditions: - field = condition['field'] - if field not in ['path-pattern', 'host-header']: - raise InvalidConditionFieldError(field) + if conditions: + for condition in conditions: + field = condition['field'] + if field not in ['path-pattern', 'host-header']: + raise InvalidConditionFieldError(field) - values = condition['values'] - if len(values) == 0: - raise InvalidConditionValueError('A condition value must be specified') - if len(values) > 1: - raise InvalidConditionValueError( - "The '%s' field contains too many values; the limit is '1'" % field - ) - - # TODO: check pattern of value for 'host-header' - # TODO: check pattern of value for 'path-pattern' + values = condition['values'] + if len(values) == 0: + raise InvalidConditionValueError('A condition value must be specified') + if len(values) > 1: + raise InvalidConditionValueError( + "The '%s' field contains too many values; the limit is '1'" % field + ) + # TODO: check pattern of value for 'host-header' + # TODO: check pattern of value for 'path-pattern' # validate Actions target_group_arns = [target_group.arn for target_group in self.target_groups.values()] - for i, action in enumerate(actions): - index = i + 1 - action_type = action['type'] - if action_type not in ['forward']: - raise InvalidActionTypeError(action_type, index) - action_target_group_arn = action['target_group_arn'] - if action_target_group_arn not in target_group_arns: - raise ActionTargetGroupNotFoundError(action_target_group_arn) + if actions: + for i, action in enumerate(actions): + index = i + 1 + action_type = action['type'] + if action_type not in ['forward']: + raise InvalidActionTypeError(action_type, index) + action_target_group_arn = action['target_group_arn'] + if action_target_group_arn not in target_group_arns: + raise ActionTargetGroupNotFoundError(action_target_group_arn) # TODO: check for error 'TooManyRegistrationsForTargetId' # TODO: check for error 'TooManyRules' # modify rule - rule.conditions = conditions - rule.actions = actions + if conditions: + rule.conditions = conditions + if actions: + rule.actions = actions return [rule] def register_targets(self, target_group_arn, instances): diff --git a/tests/test_elbv2/test_elbv2.py b/tests/test_elbv2/test_elbv2.py index 43d72b393..be3f484e0 100644 --- a/tests/test_elbv2/test_elbv2.py +++ b/tests/test_elbv2/test_elbv2.py @@ -731,7 +731,7 @@ def test_handle_listener_rules(): RuleArns=[first_rule['RuleArn']] ) - # modify rule + # modify rule partially new_host = 'new.example.com' new_path_pattern = 'new_path' modified_rule = conn.modify_rule( @@ -743,15 +743,15 @@ def test_handle_listener_rules(): { 'Field': 'path-pattern', 'Values': [ new_path_pattern ] - }], - Actions=[{ - 'TargetGroupArn': target_group.get('TargetGroupArn'), - 'Type': 'forward' }] - )['Rules'][0] + rules = conn.describe_rules(ListenerArn=http_listener_arn) - modified_rule.should.equal(rules['Rules'][0]) + obtained_rule = rules['Rules'][0] + modified_rule.should.equal(obtained_rule) + obtained_rule['Conditions'][0]['Values'][0].should.equal(new_host) + obtained_rule['Conditions'][1]['Values'][0].should.equal(new_path_pattern) + obtained_rule['Actions'][0]['TargetGroupArn'].should.equal(target_group.get('TargetGroupArn')) # modify priority conn.set_rule_priorities( From ce7eabd44a137728565385bbb560f25b03546da5 Mon Sep 17 00:00:00 2001 From: Toshiya Kawasaki Date: Tue, 22 Aug 2017 04:29:34 +0900 Subject: [PATCH 153/412] Envelope sns message when sending to sqs (#1066) * envelop message when sendig from sns to sqs * add test for publishing sns message to sqs * specify separators options to json.dumps to avoid json bug in python2 * remove unused print * replace time string for testing server mode --- moto/sns/models.py | 4 +-- tests/test_sns/test_publishing.py | 23 ++++++++++++--- tests/test_sns/test_publishing_boto3.py | 38 +++++++++++++++++++------ 3 files changed, 51 insertions(+), 14 deletions(-) diff --git a/moto/sns/models.py b/moto/sns/models.py index 23f6fcff2..dc7420db4 100644 --- a/moto/sns/models.py +++ b/moto/sns/models.py @@ -81,8 +81,8 @@ class Subscription(BaseModel): if self.protocol == 'sqs': queue_name = self.endpoint.split(":")[-1] region = self.endpoint.split(":")[3] - escaped_message = message.replace('"', '\\"') - sqs_backends[region].send_message(queue_name, escaped_message) + enveloped_message = json.dumps(self.get_post_data(message, message_id), sort_keys=True, indent=2, separators=(',', ': ')) + sqs_backends[region].send_message(queue_name, enveloped_message) elif self.protocol in ['http', 'https']: post_data = self.get_post_data(message, message_id) requests.post(self.endpoint, json=post_data) diff --git a/tests/test_sns/test_publishing.py b/tests/test_sns/test_publishing.py index dd75ff4be..b626e2fac 100644 --- a/tests/test_sns/test_publishing.py +++ b/tests/test_sns/test_publishing.py @@ -2,6 +2,7 @@ from __future__ import unicode_literals from six.moves.urllib.parse import parse_qs import boto +import re from freezegun import freeze_time import sure # noqa @@ -9,6 +10,9 @@ from moto.packages.responses import responses from moto import mock_sns_deprecated, mock_sqs_deprecated +MESSAGE_FROM_SQS_TEMPLATE = '{\n "Message": "%s",\n "MessageId": "%s",\n "Signature": "EXAMPLElDMXvB8r9R83tGoNn0ecwd5UjllzsvSvbItzfaMpN2nk5HVSw7XnOn/49IkxDKz8YrlH2qJXj2iZB0Zo2O71c4qQk1fMUDi3LGpij7RCW7AW9vYYsSqIKRnFS94ilu7NFhUzLiieYr4BKHpdTmdD6c0esKEYBpabxDSc=",\n "SignatureVersion": "1",\n "SigningCertURL": "https://sns.us-east-1.amazonaws.com/SimpleNotificationService-f3ecfb7224c7233fe7bb5f59f96de52f.pem",\n "Subject": "my subject",\n "Timestamp": "2015-01-01T12:00:00.000Z",\n "TopicArn": "arn:aws:sns:%s:123456789012:some-topic",\n "Type": "Notification",\n "UnsubscribeURL": "https://sns.us-east-1.amazonaws.com/?Action=Unsubscribe&SubscriptionArn=arn:aws:sns:us-east-1:123456789012:some-topic:2bcfbf39-05c3-41de-beaa-fcfcc21c8f55"\n}' + + @mock_sqs_deprecated @mock_sns_deprecated def test_publish_to_sqs(): @@ -24,11 +28,16 @@ def test_publish_to_sqs(): conn.subscribe(topic_arn, "sqs", "arn:aws:sqs:us-east-1:123456789012:test-queue") - conn.publish(topic=topic_arn, message="my message") + message_to_publish = 'my message' + with freeze_time("2015-01-01 12:00:00"): + published_message = conn.publish(topic=topic_arn, message=message_to_publish) + published_message_id = published_message['PublishResponse']['PublishResult']['MessageId'] queue = sqs_conn.get_queue("test-queue") message = queue.read(1) - message.get_body().should.equal('my message') + expected = MESSAGE_FROM_SQS_TEMPLATE % (message_to_publish, published_message_id, 'us-east-1') + acquired_message = re.sub("\d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2}\.\d{3}Z", '2015-01-01T12:00:00.000Z', message.get_body()) + acquired_message.should.equal(expected) @mock_sqs_deprecated @@ -46,8 +55,14 @@ def test_publish_to_sqs_in_different_region(): conn.subscribe(topic_arn, "sqs", "arn:aws:sqs:us-west-2:123456789012:test-queue") - conn.publish(topic=topic_arn, message="my message") + message_to_publish = 'my message' + with freeze_time("2015-01-01 12:00:00"): + published_message = conn.publish(topic=topic_arn, message=message_to_publish) + published_message_id = published_message['PublishResponse']['PublishResult']['MessageId'] queue = sqs_conn.get_queue("test-queue") message = queue.read(1) - message.get_body().should.equal('my message') + expected = MESSAGE_FROM_SQS_TEMPLATE % (message_to_publish, published_message_id, 'us-west-1') + + acquired_message = re.sub("\d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2}\.\d{3}Z", '2015-01-01T12:00:00.000Z', message.get_body()) + acquired_message.should.equal(expected) diff --git a/tests/test_sns/test_publishing_boto3.py b/tests/test_sns/test_publishing_boto3.py index 967de40e0..cfb57b9ec 100644 --- a/tests/test_sns/test_publishing_boto3.py +++ b/tests/test_sns/test_publishing_boto3.py @@ -5,13 +5,20 @@ import json from six.moves.urllib.parse import parse_qs import boto3 +import re from freezegun import freeze_time import sure # noqa from moto.packages.responses import responses from moto import mock_sns, mock_sqs +from freezegun import freeze_time +MESSAGE_FROM_SQS_TEMPLATE = '{\n "Message": "%s",\n "MessageId": "%s",\n "Signature": "EXAMPLElDMXvB8r9R83tGoNn0ecwd5UjllzsvSvbItzfaMpN2nk5HVSw7XnOn/49IkxDKz8YrlH2qJXj2iZB0Zo2O71c4qQk1fMUDi3LGpij7RCW7AW9vYYsSqIKRnFS94ilu7NFhUzLiieYr4BKHpdTmdD6c0esKEYBpabxDSc=",\n "SignatureVersion": "1",\n "SigningCertURL": "https://sns.us-east-1.amazonaws.com/SimpleNotificationService-f3ecfb7224c7233fe7bb5f59f96de52f.pem",\n "Subject": "my subject",\n "Timestamp": "2015-01-01T12:00:00.000Z",\n "TopicArn": "arn:aws:sns:%s:123456789012:some-topic",\n "Type": "Notification",\n "UnsubscribeURL": "https://sns.us-east-1.amazonaws.com/?Action=Unsubscribe&SubscriptionArn=arn:aws:sns:us-east-1:123456789012:some-topic:2bcfbf39-05c3-41de-beaa-fcfcc21c8f55"\n}' + + +from nose.plugins.attrib import attr +@attr('slow') @mock_sqs @mock_sns def test_publish_to_sqs(): @@ -26,12 +33,16 @@ def test_publish_to_sqs(): conn.subscribe(TopicArn=topic_arn, Protocol="sqs", Endpoint="arn:aws:sqs:us-east-1:123456789012:test-queue") - - conn.publish(TopicArn=topic_arn, Message="my message") + message = 'my message' + with freeze_time("2015-01-01 12:00:00"): + published_message = conn.publish(TopicArn=topic_arn, Message=message) + published_message_id = published_message['MessageId'] queue = sqs_conn.get_queue_by_name(QueueName="test-queue") messages = queue.receive_messages(MaxNumberOfMessages=1) - messages[0].body.should.equal('my message') + expected = MESSAGE_FROM_SQS_TEMPLATE % (message, published_message_id, 'us-east-1') + acquired_message = re.sub("\d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2}\.\d{3}Z", u'2015-01-01T12:00:00.000Z', messages[0].body) + acquired_message.should.equal(expected) @mock_sqs @@ -58,11 +69,17 @@ def test_publish_to_sqs_dump_json(): } }] }, sort_keys=True) - conn.publish(TopicArn=topic_arn, Message=message) + with freeze_time("2015-01-01 12:00:00"): + published_message = conn.publish(TopicArn=topic_arn, Message=message) + published_message_id = published_message['MessageId'] + queue = sqs_conn.get_queue_by_name(QueueName="test-queue") messages = queue.receive_messages(MaxNumberOfMessages=1) - expected = u'{\\"Records\\": [{\\"eventSource\\": \\"aws:s3\\", \\"eventVersion\\": \\"2.0\\", \\"s3\\": {\\"s3SchemaVersion\\": \\"1.0\\"}}]}' - messages[0].body.should.equal(expected) + + escaped = message.replace('"', '\\"') + expected = MESSAGE_FROM_SQS_TEMPLATE % (escaped, published_message_id, 'us-east-1') + acquired_message = re.sub("\d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2}\.\d{3}Z", u'2015-01-01T12:00:00.000Z', messages[0].body) + acquired_message.should.equal(expected) @mock_sqs @@ -80,11 +97,16 @@ def test_publish_to_sqs_in_different_region(): Protocol="sqs", Endpoint="arn:aws:sqs:us-west-2:123456789012:test-queue") - conn.publish(TopicArn=topic_arn, Message="my message") + message = 'my message' + with freeze_time("2015-01-01 12:00:00"): + published_message = conn.publish(TopicArn=topic_arn, Message=message) + published_message_id = published_message['MessageId'] queue = sqs_conn.get_queue_by_name(QueueName="test-queue") messages = queue.receive_messages(MaxNumberOfMessages=1) - messages[0].body.should.equal('my message') + expected = MESSAGE_FROM_SQS_TEMPLATE % (message, published_message_id, 'us-west-1') + acquired_message = re.sub("\d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2}\.\d{3}Z", u'2015-01-01T12:00:00.000Z', messages[0].body) + acquired_message.should.equal(expected) @freeze_time("2013-01-01") From 2ace59a60e997048e35a5f54704513a0c74ae8ef Mon Sep 17 00:00:00 2001 From: Toshiya Kawasaki Date: Tue, 22 Aug 2017 04:30:03 +0900 Subject: [PATCH 154/412] fix bug that remove_rule doesnt remove rule correctly (#1074) --- moto/elbv2/models.py | 5 ++++- tests/test_elbv2/test_elbv2.py | 2 ++ 2 files changed, 6 insertions(+), 1 deletion(-) diff --git a/moto/elbv2/models.py b/moto/elbv2/models.py index 9092084e6..fa9cd6426 100644 --- a/moto/elbv2/models.py +++ b/moto/elbv2/models.py @@ -116,6 +116,9 @@ class FakeListener(BaseModel): def rules(self): return self._non_default_rules + [self._default_rule] + def remove_rule(self, rule): + self._non_default_rules.remove(rule) + def register(self, rule): self._non_default_rules.append(rule) self._non_default_rules = sorted(self._non_default_rules, key=lambda x: x.priority) @@ -410,7 +413,7 @@ class ELBv2Backend(BaseBackend): for listener in listeners: for rule in listener.rules: if rule.arn == arn: - listener.rules.remove(rule) + listener.remove_rule(rule) return # should raise RuleNotFound Error according to the AWS API doc diff --git a/tests/test_elbv2/test_elbv2.py b/tests/test_elbv2/test_elbv2.py index be3f484e0..fb140cb69 100644 --- a/tests/test_elbv2/test_elbv2.py +++ b/tests/test_elbv2/test_elbv2.py @@ -770,6 +770,8 @@ def test_handle_listener_rules(): # delete arn = first_rule['RuleArn'] conn.delete_rule(RuleArn=arn) + rules = conn.describe_rules(ListenerArn=http_listener_arn)['Rules'] + len(rules).should.equal(2) # test for invalid action type safe_priority = 2 From ed5059367c4b567c88106948d993185f9d84d6ea Mon Sep 17 00:00:00 2001 From: Jack Danger Date: Tue, 22 Aug 2017 23:58:32 -0700 Subject: [PATCH 155/412] Handling the case of missing listeners (#1077) --- moto/elbv2/models.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/moto/elbv2/models.py b/moto/elbv2/models.py index fa9cd6426..6bc8f860e 100644 --- a/moto/elbv2/models.py +++ b/moto/elbv2/models.py @@ -82,7 +82,7 @@ class FakeTargetGroup(BaseModel): def deregister(self, targets): for target in targets: - t = self.targets.pop(target['id']) + t = self.targets.pop(target['id'], None) if not t: raise InvalidTargetError() @@ -420,14 +420,14 @@ class ELBv2Backend(BaseBackend): # however, boto3 does't raise error even if rule is not found def delete_target_group(self, target_group_arn): - target_group = self.target_groups.pop(target_group_arn) + target_group = self.target_groups.pop(target_group_arn, None) if target_group: return target_group raise TargetGroupNotFoundError() def delete_listener(self, listener_arn): for load_balancer in self.load_balancers.values(): - listener = load_balancer.listeners.pop(listener_arn) + listener = load_balancer.listeners.pop(listener_arn, None) if listener: return listener raise ListenerNotFoundError() From 245e75f7b85082201fada4686925df7eab5ef6ca Mon Sep 17 00:00:00 2001 From: Jack Danger Date: Wed, 23 Aug 2017 00:30:48 -0700 Subject: [PATCH 156/412] Fixing trailing whitespace in elbv2 cert (#1078) --- moto/elbv2/responses.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/moto/elbv2/responses.py b/moto/elbv2/responses.py index a9635029c..652ecc566 100644 --- a/moto/elbv2/responses.py +++ b/moto/elbv2/responses.py @@ -668,7 +668,7 @@ DESCRIBE_LISTENERS_TEMPLATE = """ Date: Fri, 25 Aug 2017 21:46:14 -0700 Subject: [PATCH 158/412] bump version to 1.1.0 (#1084) --- setup.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/setup.py b/setup.py index e55f867b4..1075c8406 100755 --- a/setup.py +++ b/setup.py @@ -24,7 +24,7 @@ extras_require = { setup( name='moto', - version='1.0.1', + version='1.1.0', description='A library that allows your python tests to easily' ' mock out the boto library', author='Steve Pulec', From 1f6b1b8c4a78713e685d20a1825262793a5fdeea Mon Sep 17 00:00:00 2001 From: Daniel Lutsch Date: Fri, 25 Aug 2017 23:43:29 -0700 Subject: [PATCH 159/412] Add list support to filters (#1083) * initial pass with TODOs * add list support to get_object_value * fix group-id filters * add tests for sg name and id filters --- moto/ec2/utils.py | 10 +++++++-- tests/test_ec2/test_instances.py | 35 ++++++++++++++++++++++++++++++++ 2 files changed, 43 insertions(+), 2 deletions(-) diff --git a/moto/ec2/utils.py b/moto/ec2/utils.py index 365632abd..8f86d0a8d 100644 --- a/moto/ec2/utils.py +++ b/moto/ec2/utils.py @@ -335,6 +335,11 @@ def get_object_value(obj, attr): val = getattr(val, key) elif isinstance(val, dict): val = val[key] + elif isinstance(val, list): + for item in val: + item_val = get_object_value(item, key) + if item_val: + return item_val else: return None return val @@ -385,8 +390,9 @@ filter_dict_attribute_mapping = { 'state-reason-code': '_state_reason.code', 'source-dest-check': 'source_dest_check', 'vpc-id': 'vpc_id', - 'group-id': 'security_groups', - 'instance.group-id': 'security_groups', + 'group-id': 'security_groups.id', + 'instance.group-id': 'security_groups.id', + 'instance.group-name': 'security_groups.name', 'instance-type': 'instance_type', 'private-ip-address': 'private_ip', 'ip-address': 'public_ip', diff --git a/tests/test_ec2/test_instances.py b/tests/test_ec2/test_instances.py index 75f759c0f..a9af7c31f 100644 --- a/tests/test_ec2/test_instances.py +++ b/tests/test_ec2/test_instances.py @@ -437,6 +437,41 @@ def test_get_instances_filtering_by_ni_private_dns(): ])['Reservations'] reservations[0]['Instances'].should.have.length_of(1) +@mock_ec2 +def test_get_instances_filtering_by_instance_group_name(): + image_id = 'ami-1234abcd' + client = boto3.client('ec2', region_name='us-east-1') + client.create_security_group( + Description='test', + GroupName='test_sg' + ) + client.run_instances(ImageId=image_id, + MinCount=1, + MaxCount=1, + SecurityGroups=['test_sg']) + reservations = client.describe_instances(Filters=[ + {'Name': 'instance.group-name', 'Values': ['test_sg']} + ])['Reservations'] + reservations[0]['Instances'].should.have.length_of(1) + +@mock_ec2 +def test_get_instances_filtering_by_instance_group_id(): + image_id = 'ami-1234abcd' + client = boto3.client('ec2', region_name='us-east-1') + create_sg = client.create_security_group( + Description='test', + GroupName='test_sg' + ) + group_id = create_sg['GroupId'] + client.run_instances(ImageId=image_id, + MinCount=1, + MaxCount=1, + SecurityGroups=['test_sg']) + reservations = client.describe_instances(Filters=[ + {'Name': 'instance.group-id', 'Values': [group_id]} + ])['Reservations'] + reservations[0]['Instances'].should.have.length_of(1) + @mock_ec2_deprecated def test_get_instances_filtering_by_tag(): conn = boto.connect_ec2() From b1618351987185f7deb48d382b827608c51e570b Mon Sep 17 00:00:00 2001 From: Jack Danger Date: Fri, 25 Aug 2017 23:44:03 -0700 Subject: [PATCH 160/412] Create git tag on release (#1085) I'm pretty forgetful so I doubt I'd remember to do this --- Makefile | 2 ++ 1 file changed, 2 insertions(+) diff --git a/Makefile b/Makefile index 300067296..5b5ace72f 100644 --- a/Makefile +++ b/Makefile @@ -17,3 +17,5 @@ test_server: publish: python setup.py sdist bdist_wheel upload + git tag $(python setup.py version) + git push push origin $(python setup.py version) From 15e5fe44e96e1bcb7f584d023b2257773150edf2 Mon Sep 17 00:00:00 2001 From: Jack Danger Date: Fri, 25 Aug 2017 23:45:19 -0700 Subject: [PATCH 161/412] bump version to 1.1.1 (#1089) --- setup.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/setup.py b/setup.py index 1075c8406..07cf98387 100755 --- a/setup.py +++ b/setup.py @@ -24,7 +24,7 @@ extras_require = { setup( name='moto', - version='1.1.0', + version='1.1.1', description='A library that allows your python tests to easily' ' mock out the boto library', author='Steve Pulec', From f052757259d5dec48cdfdaa34a6a7a20f22a186e Mon Sep 17 00:00:00 2001 From: Jack Danger Date: Fri, 25 Aug 2017 23:47:46 -0700 Subject: [PATCH 162/412] Fixing typo in Makefile (#1090) --- Makefile | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/Makefile b/Makefile index 5b5ace72f..cb30c5df1 100644 --- a/Makefile +++ b/Makefile @@ -17,5 +17,5 @@ test_server: publish: python setup.py sdist bdist_wheel upload - git tag $(python setup.py version) - git push push origin $(python setup.py version) + git tag $(python setup.py --version) + git push origin $(python setup.py --version) From 9070b1bf66dc4ce19d5aa8c0917b542e170ba9b3 Mon Sep 17 00:00:00 2001 From: sorensolari Date: Sun, 27 Aug 2017 13:57:16 -0600 Subject: [PATCH 163/412] fix uppercase only issue in update_expression #1091 --- moto/dynamodb2/models.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/moto/dynamodb2/models.py b/moto/dynamodb2/models.py index 7590ee1e1..c68acbbe4 100644 --- a/moto/dynamodb2/models.py +++ b/moto/dynamodb2/models.py @@ -118,10 +118,11 @@ class Item(BaseModel): def update(self, update_expression, expression_attribute_names, expression_attribute_values): # Update subexpressions are identifiable by the operator keyword, so split on that and # get rid of the empty leading string. - parts = [p for p in re.split(r'\b(SET|REMOVE|ADD|DELETE)\b', update_expression) if p] + parts = [p for p in re.split(r'\b(SET|REMOVE|ADD|DELETE)\b', update_expression, flags=re.I) if p] # make sure that we correctly found only operator/value pairs assert len(parts) % 2 == 0, "Mismatched operators and values in update expression: '{}'".format(update_expression) for action, valstr in zip(parts[:-1:2], parts[1::2]): + action = action.upper() values = valstr.split(',') for value in values: # A Real value From 0c3708a8e7f75d90a588160fbe6807e263e091d9 Mon Sep 17 00:00:00 2001 From: Toshiya Kawasaki Date: Fri, 8 Sep 2017 03:19:34 +0900 Subject: [PATCH 164/412] Support SNS subscription attributes (#1087) * remove code for local test * Add SNS set_subscription_attributes and get_subscription_attributes --- moto/sns/exceptions.py | 8 +++ moto/sns/models.py | 23 ++++++- moto/sns/responses.py | 39 ++++++++++++ tests/test_sns/test_publishing_boto3.py | 2 - tests/test_sns/test_subscriptions_boto3.py | 73 ++++++++++++++++++++++ 5 files changed, 142 insertions(+), 3 deletions(-) diff --git a/moto/sns/exceptions.py b/moto/sns/exceptions.py index 092bb9d69..95b91acca 100644 --- a/moto/sns/exceptions.py +++ b/moto/sns/exceptions.py @@ -24,3 +24,11 @@ class SnsEndpointDisabled(RESTError): def __init__(self, message): super(SnsEndpointDisabled, self).__init__( "EndpointDisabled", message) + + +class SNSInvalidParameter(RESTError): + code = 400 + + def __init__(self, message): + super(SNSInvalidParameter, self).__init__( + "InvalidParameter", message) diff --git a/moto/sns/models.py b/moto/sns/models.py index dc7420db4..009398407 100644 --- a/moto/sns/models.py +++ b/moto/sns/models.py @@ -13,7 +13,7 @@ from moto.core import BaseBackend, BaseModel from moto.core.utils import iso_8601_datetime_with_milliseconds from moto.sqs import sqs_backends from .exceptions import ( - SNSNotFoundError, DuplicateSnsEndpointError, SnsEndpointDisabled + SNSNotFoundError, DuplicateSnsEndpointError, SnsEndpointDisabled, SNSInvalidParameter ) from .utils import make_arn_for_topic, make_arn_for_subscription @@ -76,6 +76,7 @@ class Subscription(BaseModel): self.endpoint = endpoint self.protocol = protocol self.arn = make_arn_for_subscription(self.topic.arn) + self.attributes = {} def publish(self, message, message_id): if self.protocol == 'sqs': @@ -301,6 +302,26 @@ class SNSBackend(BaseBackend): raise SNSNotFoundError( "Endpoint with arn {0} not found".format(arn)) + def get_subscription_attributes(self, arn): + _subscription = [_ for _ in self.subscriptions.values() if _.arn == arn] + if not _subscription: + raise SNSNotFoundError("Subscription with arn {0} not found".format(arn)) + subscription = _subscription[0] + + return subscription.attributes + + def set_subscription_attributes(self, arn, name, value): + if name not in ['RawMessageDelivery', 'DeliveryPolicy']: + raise SNSInvalidParameter('AttributeName') + + # TODO: should do validation + _subscription = [_ for _ in self.subscriptions.values() if _.arn == arn] + if not _subscription: + raise SNSNotFoundError("Subscription with arn {0} not found".format(arn)) + subscription = _subscription[0] + + subscription.attributes[name] = value + sns_backends = {} for region in boto.sns.regions(): diff --git a/moto/sns/responses.py b/moto/sns/responses.py index edb82e40c..9c079b006 100644 --- a/moto/sns/responses.py +++ b/moto/sns/responses.py @@ -445,6 +445,20 @@ class SNSResponse(BaseResponse): template = self.response_template(DELETE_ENDPOINT_TEMPLATE) return template.render() + def get_subscription_attributes(self): + arn = self._get_param('SubscriptionArn') + attributes = self.backend.get_subscription_attributes(arn) + template = self.response_template(GET_SUBSCRIPTION_ATTRIBUTES_TEMPLATE) + return template.render(attributes=attributes) + + def set_subscription_attributes(self): + arn = self._get_param('SubscriptionArn') + attr_name = self._get_param('AttributeName') + attr_value = self._get_param('AttributeValue') + self.backend.set_subscription_attributes(arn, attr_name, attr_value) + template = self.response_template(SET_SUBSCRIPTION_ATTRIBUTES_TEMPLATE) + return template.render() + CREATE_TOPIC_TEMPLATE = """ @@ -719,3 +733,28 @@ LIST_SUBSCRIPTIONS_BY_TOPIC_TEMPLATE = """384ac68d-3775-11df-8963-01868b7c937a """ + + +# Not responding aws system attribetus like 'Owner' and 'SubscriptionArn' +GET_SUBSCRIPTION_ATTRIBUTES_TEMPLATE = """ + + + {% for name, value in attributes.items() %} + + {{ name }} + {{ value }} + + {% endfor %} + + + + 057f074c-33a7-11df-9540-99d0768312d3 + +""" + + +SET_SUBSCRIPTION_ATTRIBUTES_TEMPLATE = """ + + a8763b99-33a7-11df-a9b7-05d48da6f042 + +""" diff --git a/tests/test_sns/test_publishing_boto3.py b/tests/test_sns/test_publishing_boto3.py index cfb57b9ec..a53744d63 100644 --- a/tests/test_sns/test_publishing_boto3.py +++ b/tests/test_sns/test_publishing_boto3.py @@ -17,8 +17,6 @@ from freezegun import freeze_time MESSAGE_FROM_SQS_TEMPLATE = '{\n "Message": "%s",\n "MessageId": "%s",\n "Signature": "EXAMPLElDMXvB8r9R83tGoNn0ecwd5UjllzsvSvbItzfaMpN2nk5HVSw7XnOn/49IkxDKz8YrlH2qJXj2iZB0Zo2O71c4qQk1fMUDi3LGpij7RCW7AW9vYYsSqIKRnFS94ilu7NFhUzLiieYr4BKHpdTmdD6c0esKEYBpabxDSc=",\n "SignatureVersion": "1",\n "SigningCertURL": "https://sns.us-east-1.amazonaws.com/SimpleNotificationService-f3ecfb7224c7233fe7bb5f59f96de52f.pem",\n "Subject": "my subject",\n "Timestamp": "2015-01-01T12:00:00.000Z",\n "TopicArn": "arn:aws:sns:%s:123456789012:some-topic",\n "Type": "Notification",\n "UnsubscribeURL": "https://sns.us-east-1.amazonaws.com/?Action=Unsubscribe&SubscriptionArn=arn:aws:sns:us-east-1:123456789012:some-topic:2bcfbf39-05c3-41de-beaa-fcfcc21c8f55"\n}' -from nose.plugins.attrib import attr -@attr('slow') @mock_sqs @mock_sns def test_publish_to_sqs(): diff --git a/tests/test_sns/test_subscriptions_boto3.py b/tests/test_sns/test_subscriptions_boto3.py index ac325ed20..8cb5c1886 100644 --- a/tests/test_sns/test_subscriptions_boto3.py +++ b/tests/test_sns/test_subscriptions_boto3.py @@ -1,8 +1,12 @@ from __future__ import unicode_literals import boto3 +import json import sure # noqa +from botocore.exceptions import ClientError +from nose.tools import assert_raises + from moto import mock_sns from moto.sns.models import DEFAULT_PAGE_SIZE @@ -124,3 +128,72 @@ def test_subscription_paging(): topic1_subscriptions["Subscriptions"].should.have.length_of( int(DEFAULT_PAGE_SIZE / 3)) topic1_subscriptions.shouldnt.have("NextToken") + + +@mock_sns +def test_set_subscription_attributes(): + conn = boto3.client('sns', region_name='us-east-1') + conn.create_topic(Name="some-topic") + response = conn.list_topics() + topic_arn = response["Topics"][0]['TopicArn'] + + conn.subscribe(TopicArn=topic_arn, + Protocol="http", + Endpoint="http://example.com/") + + subscriptions = conn.list_subscriptions()["Subscriptions"] + subscriptions.should.have.length_of(1) + subscription = subscriptions[0] + subscription["TopicArn"].should.equal(topic_arn) + subscription["Protocol"].should.equal("http") + subscription["SubscriptionArn"].should.contain(topic_arn) + subscription["Endpoint"].should.equal("http://example.com/") + + subscription_arn = subscription["SubscriptionArn"] + attrs = conn.get_subscription_attributes( + SubscriptionArn=subscription_arn + ) + attrs.should.have.key('Attributes') + conn.set_subscription_attributes( + SubscriptionArn=subscription_arn, + AttributeName='RawMessageDelivery', + AttributeValue='true' + ) + delivery_policy = json.dumps({ + 'healthyRetryPolicy': { + "numRetries": 10, + "minDelayTarget": 1, + "maxDelayTarget":2 + } + }) + conn.set_subscription_attributes( + SubscriptionArn=subscription_arn, + AttributeName='DeliveryPolicy', + AttributeValue=delivery_policy + ) + attrs = conn.get_subscription_attributes( + SubscriptionArn=subscription_arn + ) + attrs['Attributes']['RawMessageDelivery'].should.equal('true') + attrs['Attributes']['DeliveryPolicy'].should.equal(delivery_policy) + + # not existing subscription + with assert_raises(ClientError): + conn.set_subscription_attributes( + SubscriptionArn='invalid', + AttributeName='RawMessageDelivery', + AttributeValue='true' + ) + with assert_raises(ClientError): + attrs = conn.get_subscription_attributes( + SubscriptionArn='invalid' + ) + + + # invalid attr name + with assert_raises(ClientError): + conn.set_subscription_attributes( + SubscriptionArn=subscription_arn, + AttributeName='InvalidName', + AttributeValue='true' + ) From ee4ca0c39a01072e1902744b0f5b3be9378c6e6f Mon Sep 17 00:00:00 2001 From: Toshiya Kawasaki Date: Fri, 8 Sep 2017 03:21:44 +0900 Subject: [PATCH 165/412] Support IAM account alias (#1088) --- moto/iam/models.py | 11 ++++++ moto/iam/responses.py | 46 ++++++++++++++++++++++ tests/test_iam/test_iam_account_aliases.py | 20 ++++++++++ 3 files changed, 77 insertions(+) create mode 100644 tests/test_iam/test_iam_account_aliases.py diff --git a/moto/iam/models.py b/moto/iam/models.py index e6f8bae63..0005ec0a7 100644 --- a/moto/iam/models.py +++ b/moto/iam/models.py @@ -507,6 +507,7 @@ class IAMBackend(BaseBackend): self.users = {} self.credential_report = None self.managed_policies = self._init_managed_policies() + self.account_aliases = [] super(IAMBackend, self).__init__() def _init_managed_policies(self): @@ -957,5 +958,15 @@ class IAMBackend(BaseBackend): report += self.users[user].to_csv() return base64.b64encode(report.encode('ascii')).decode('ascii') + def list_account_aliases(self): + return self.account_aliases + + def create_account_alias(self, alias): + # alias is force updated + self.account_aliases = [alias] + + def delete_account_alias(self, alias): + self.account_aliases = [] + iam_backend = IAMBackend() diff --git a/moto/iam/responses.py b/moto/iam/responses.py index e79d8bc80..13688869e 100644 --- a/moto/iam/responses.py +++ b/moto/iam/responses.py @@ -478,6 +478,23 @@ class IamResponse(BaseResponse): template = self.response_template(CREDENTIAL_REPORT) return template.render(report=report) + def list_account_aliases(self): + aliases = iam_backend.list_account_aliases() + template = self.response_template(LIST_ACCOUNT_ALIASES_TEMPLATE) + return template.render(aliases=aliases) + + def create_account_alias(self): + alias = self._get_param('AccountAlias') + iam_backend.create_account_alias(alias) + template = self.response_template(CREATE_ACCOUNT_ALIAS_TEMPLATE) + return template.render() + + def delete_account_alias(self): + alias = self._get_param('AccountAlias') + iam_backend.delete_account_alias(alias) + template = self.response_template(DELETE_ACCOUNT_ALIAS_TEMPLATE) + return template.render() + ATTACH_ROLE_POLICY_TEMPLATE = """ @@ -1192,3 +1209,32 @@ LIST_MFA_DEVICES_TEMPLATE = """ 7a62c49f-347e-4fc4-9331-6e8eEXAMPLE """ + + +LIST_ACCOUNT_ALIASES_TEMPLATE = """ + + false + + {% for alias in aliases %} + {{ alias }} + {% endfor %} + + + + c5a076e9-f1b0-11df-8fbe-45274EXAMPLE + +""" + + +CREATE_ACCOUNT_ALIAS_TEMPLATE = """ + + 36b5db08-f1b0-11df-8fbe-45274EXAMPLE + +""" + + +DELETE_ACCOUNT_ALIAS_TEMPLATE = """ + + 7a62c49f-347e-4fc4-9331-6e8eEXAMPLE + +""" diff --git a/tests/test_iam/test_iam_account_aliases.py b/tests/test_iam/test_iam_account_aliases.py new file mode 100644 index 000000000..3d927038d --- /dev/null +++ b/tests/test_iam/test_iam_account_aliases.py @@ -0,0 +1,20 @@ +import boto3 +import sure # noqa +from moto import mock_iam + + +@mock_iam() +def test_account_aliases(): + client = boto3.client('iam', region_name='us-east-1') + + alias = 'my-account-name' + aliases = client.list_account_aliases() + aliases.should.have.key('AccountAliases').which.should.equal([]) + + client.create_account_alias(AccountAlias=alias) + aliases = client.list_account_aliases() + aliases.should.have.key('AccountAliases').which.should.equal([alias]) + + client.delete_account_alias(AccountAlias=alias) + aliases = client.list_account_aliases() + aliases.should.have.key('AccountAliases').which.should.equal([]) From 76b7101bc34fbf72b374e8188f2e6cdcb57341b2 Mon Sep 17 00:00:00 2001 From: Mike Grima Date: Thu, 7 Sep 2017 11:25:14 -0700 Subject: [PATCH 166/412] Fixed a bug where Dynamo registers STS and sts calls go to dynamo (#1097) Fixes #1095 --- moto/dynamodb/responses.py | 33 +-------------------------------- moto/dynamodb/urls.py | 3 +-- moto/dynamodb2/responses.py | 33 +-------------------------------- moto/dynamodb2/urls.py | 3 +-- 4 files changed, 4 insertions(+), 68 deletions(-) diff --git a/moto/dynamodb/responses.py b/moto/dynamodb/responses.py index 0da3e5045..d4f832be2 100644 --- a/moto/dynamodb/responses.py +++ b/moto/dynamodb/responses.py @@ -7,33 +7,6 @@ from moto.core.utils import camelcase_to_underscores from .models import dynamodb_backend, dynamo_json_dump -GET_SESSION_TOKEN_RESULT = """ - - - - - AQoEXAMPLEH4aoAH0gNCAPyJxz4BlCFFxWNE1OPTgk5TthT+FvwqnKwRcOIfrRh3c/L - To6UDdyJwOOvEVPvLXCrrrUtdnniCEXAMPLE/IvU1dYUg2RVAJBanLiHb4IgRmpRV3z - rkuWJOgQs8IZZaIv2BXIa2R4OlgkBN9bkUDNCJiBeb/AXlzBBko7b15fjrBs2+cTQtp - Z3CYWFXG8C5zqx37wnOE49mRl/+OtkIKGO7fAE - - - wJalrXUtnFEMI/K7MDENG/bPxRfiCYzEXAMPLEKEY - - 2011-07-11T19:55:29.611Z - AKIAIOSFODNN7EXAMPLE - - - - 58c5dbae-abef-11e0-8cfe-09039844ac7d - -""" - - -def sts_handler(): - return GET_SESSION_TOKEN_RESULT - - class DynamoHandler(BaseResponse): def get_endpoint_name(self, headers): @@ -51,11 +24,7 @@ class DynamoHandler(BaseResponse): return status, self.response_headers, dynamo_json_dump({'__type': type_}) def call_action(self): - body = self.body - if 'GetSessionToken' in body: - return 200, self.response_headers, sts_handler() - - self.body = json.loads(body or '{}') + self.body = json.loads(self.body or '{}') endpoint = self.get_endpoint_name(self.headers) if endpoint: endpoint = camelcase_to_underscores(endpoint) diff --git a/moto/dynamodb/urls.py b/moto/dynamodb/urls.py index 66c15d022..6988f6e15 100644 --- a/moto/dynamodb/urls.py +++ b/moto/dynamodb/urls.py @@ -2,8 +2,7 @@ from __future__ import unicode_literals from .responses import DynamoHandler url_bases = [ - "https?://dynamodb.(.+).amazonaws.com", - "https?://sts.amazonaws.com", + "https?://dynamodb.(.+).amazonaws.com" ] url_paths = { diff --git a/moto/dynamodb2/responses.py b/moto/dynamodb2/responses.py index bf24e9964..29863d23b 100644 --- a/moto/dynamodb2/responses.py +++ b/moto/dynamodb2/responses.py @@ -8,33 +8,6 @@ from moto.core.utils import camelcase_to_underscores from .models import dynamodb_backend2, dynamo_json_dump -GET_SESSION_TOKEN_RESULT = """ - - - - - AQoEXAMPLEH4aoAH0gNCAPyJxz4BlCFFxWNE1OPTgk5TthT+FvwqnKwRcOIfrRh3c/L - To6UDdyJwOOvEVPvLXCrrrUtdnniCEXAMPLE/IvU1dYUg2RVAJBanLiHb4IgRmpRV3z - rkuWJOgQs8IZZaIv2BXIa2R4OlgkBN9bkUDNCJiBeb/AXlzBBko7b15fjrBs2+cTQtp - Z3CYWFXG8C5zqx37wnOE49mRl/+OtkIKGO7fAE - - - wJalrXUtnFEMI/K7MDENG/bPxRfiCYzEXAMPLEKEY - - 2011-07-11T19:55:29.611Z - AKIAIOSFODNN7EXAMPLE - - - - 58c5dbae-abef-11e0-8cfe-09039844ac7d - -""" - - -def sts_handler(): - return GET_SESSION_TOKEN_RESULT - - class DynamoHandler(BaseResponse): def get_endpoint_name(self, headers): @@ -52,11 +25,7 @@ class DynamoHandler(BaseResponse): return status, self.response_headers, dynamo_json_dump({'__type': type_}) def call_action(self): - body = self.body - if 'GetSessionToken' in body: - return 200, self.response_headers, sts_handler() - - self.body = json.loads(body or '{}') + self.body = json.loads(self.body or '{}') endpoint = self.get_endpoint_name(self.headers) if endpoint: endpoint = camelcase_to_underscores(endpoint) diff --git a/moto/dynamodb2/urls.py b/moto/dynamodb2/urls.py index 66c15d022..6988f6e15 100644 --- a/moto/dynamodb2/urls.py +++ b/moto/dynamodb2/urls.py @@ -2,8 +2,7 @@ from __future__ import unicode_literals from .responses import DynamoHandler url_bases = [ - "https?://dynamodb.(.+).amazonaws.com", - "https?://sts.amazonaws.com", + "https?://dynamodb.(.+).amazonaws.com" ] url_paths = { From 8f0e2e795485dac00d5c6abac0554ac0fea6fcec Mon Sep 17 00:00:00 2001 From: Toshiya Kawasaki Date: Fri, 8 Sep 2017 03:25:59 +0900 Subject: [PATCH 167/412] fix TargetGroupNnotFoundError is not definied correctly (#1099) --- moto/elbv2/exceptions.py | 2 +- tests/test_elbv2/test_elbv2.py | 40 ++++++++++++++++++++++++++++++++++ 2 files changed, 41 insertions(+), 1 deletion(-) diff --git a/moto/elbv2/exceptions.py b/moto/elbv2/exceptions.py index e22820966..0947535eb 100644 --- a/moto/elbv2/exceptions.py +++ b/moto/elbv2/exceptions.py @@ -42,7 +42,7 @@ class SubnetNotFoundError(ELBClientError): class TargetGroupNotFoundError(ELBClientError): def __init__(self): - super(TooManyTagsError, self).__init__( + super(TargetGroupNotFoundError, self).__init__( "TargetGroupNotFound", "The specified target group does not exist.") diff --git a/tests/test_elbv2/test_elbv2.py b/tests/test_elbv2/test_elbv2.py index fb140cb69..e84cd0080 100644 --- a/tests/test_elbv2/test_elbv2.py +++ b/tests/test_elbv2/test_elbv2.py @@ -861,3 +861,43 @@ def test_handle_listener_rules(): 'Type': 'forward' }] ) + + +@mock_elbv2 +@mock_ec2 +def test_describe_invalid_target_group(): + conn = boto3.client('elbv2', region_name='us-east-1') + ec2 = boto3.resource('ec2', region_name='us-east-1') + + security_group = ec2.create_security_group(GroupName='a-security-group', Description='First One') + vpc = ec2.create_vpc(CidrBlock='172.28.7.0/24', InstanceTenancy='default') + subnet1 = ec2.create_subnet(VpcId=vpc.id, CidrBlock='172.28.7.192/26', AvailabilityZone='us-east-1a') + subnet2 = ec2.create_subnet(VpcId=vpc.id, CidrBlock='172.28.7.192/26', AvailabilityZone='us-east-1b') + + response = conn.create_load_balancer( + Name='my-lb', + Subnets=[subnet1.id, subnet2.id], + SecurityGroups=[security_group.id], + Scheme='internal', + Tags=[{'Key': 'key_name', 'Value': 'a_value'}]) + + load_balancer_arn = response.get('LoadBalancers')[0].get('LoadBalancerArn') + + response = conn.create_target_group( + Name='a-target', + Protocol='HTTP', + Port=8080, + VpcId=vpc.id, + HealthCheckProtocol='HTTP', + HealthCheckPort='8080', + HealthCheckPath='/', + HealthCheckIntervalSeconds=5, + HealthCheckTimeoutSeconds=5, + HealthyThresholdCount=5, + UnhealthyThresholdCount=2, + Matcher={'HttpCode': '200'}) + target_group = response.get('TargetGroups')[0] + + # Check error raises correctly + with assert_raises(ClientError): + conn.describe_target_groups(Names=['invalid']) From 0d122ef86f030588f52b61316b7a4f06ddc01b32 Mon Sep 17 00:00:00 2001 From: Toshiya Kawasaki Date: Fri, 8 Sep 2017 03:26:28 +0900 Subject: [PATCH 168/412] add ecr to readme (#1101) --- README.md | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/README.md b/README.md index 251c732eb..4d5d2d7e6 100644 --- a/README.md +++ b/README.md @@ -78,6 +78,8 @@ It gets even better! Moto isn't just for Python code and it isn't just for S3. L | - Security Groups | | core endpoints done | | - Tags | | all endpoints done | |------------------------------------------------------------------------------| +| ECR | @mock_ecr | basic endpoints done | +|------------------------------------------------------------------------------| | ECS | @mock_ecs | basic endpoints done | |------------------------------------------------------------------------------| | ELB | @mock_elb | core endpoints done | @@ -155,9 +157,9 @@ moto 1.0.X mock docorators are defined for boto3 and do not work with boto2. Use Using moto with boto2 ```python -from moto import mock_ec2_deprecated +from moto import mock_ec2_deprecated import boto - + @mock_ec2_deprecated def test_something_with_ec2(): ec2_conn = boto.ec2.connect_to_region('us-east-1') From 2f6f42a183c561ce8836433fb4d3349b0d6306be Mon Sep 17 00:00:00 2001 From: Toshiya Kawasaki Date: Fri, 8 Sep 2017 03:28:15 +0900 Subject: [PATCH 169/412] handle short form function in cfn yaml template (#1103) --- moto/cloudformation/models.py | 3 +- moto/cloudformation/responses.py | 3 +- moto/cloudformation/utils.py | 20 ++++ .../test_cloudformation_stack_crud_boto3.py | 103 +++++++++++++++++- .../test_cloudformation/test_stack_parsing.py | 47 ++++++++ 5 files changed, 172 insertions(+), 4 deletions(-) diff --git a/moto/cloudformation/models.py b/moto/cloudformation/models.py index ec922d8f5..e579e4c08 100644 --- a/moto/cloudformation/models.py +++ b/moto/cloudformation/models.py @@ -9,7 +9,7 @@ from moto.compat import OrderedDict from moto.core import BaseBackend, BaseModel from .parsing import ResourceMap, OutputMap -from .utils import generate_stack_id +from .utils import generate_stack_id, yaml_tag_constructor from .exceptions import ValidationError @@ -74,6 +74,7 @@ class FakeStack(BaseModel): )) def _parse_template(self): + yaml.add_multi_constructor('', yaml_tag_constructor) try: self.template_dict = yaml.load(self.template) except yaml.parser.ParserError: diff --git a/moto/cloudformation/responses.py b/moto/cloudformation/responses.py index d66a172a8..423cf92c1 100644 --- a/moto/cloudformation/responses.py +++ b/moto/cloudformation/responses.py @@ -391,8 +391,7 @@ LIST_STACKS_RESOURCES_RESPONSE = """ GET_TEMPLATE_RESPONSE_TEMPLATE = """ - {{ stack.template }} - + {{ stack.template }} b9b4b068-3a41-11e5-94eb-example diff --git a/moto/cloudformation/utils.py b/moto/cloudformation/utils.py index 1d629c76b..384ea5401 100644 --- a/moto/cloudformation/utils.py +++ b/moto/cloudformation/utils.py @@ -2,6 +2,7 @@ from __future__ import unicode_literals import uuid import six import random +import yaml def generate_stack_id(stack_name): @@ -13,3 +14,22 @@ def random_suffix(): size = 12 chars = list(range(10)) + ['A-Z'] return ''.join(six.text_type(random.choice(chars)) for x in range(size)) + + +def yaml_tag_constructor(loader, tag, node): + """convert shorthand intrinsic function to full name + """ + def _f(loader, tag, node): + if tag == '!GetAtt': + return node.value.split('.') + elif type(node) == yaml.SequenceNode: + return loader.construct_sequence(node) + else: + return node.value + + if tag == '!Ref': + key = 'Ref' + else: + key = 'Fn::{}'.format(tag[1:]) + + return {key: _f(loader, tag, node)} diff --git a/tests/test_cloudformation/test_cloudformation_stack_crud_boto3.py b/tests/test_cloudformation/test_cloudformation_stack_crud_boto3.py index e428d1f63..ed2ee8337 100644 --- a/tests/test_cloudformation/test_cloudformation_stack_crud_boto3.py +++ b/tests/test_cloudformation/test_cloudformation_stack_crud_boto3.py @@ -39,6 +39,68 @@ dummy_template = { } } + +dummy_template_yaml = """--- +AWSTemplateFormatVersion: 2010-09-09 +Description: Stack1 with yaml template +Resources: + EC2Instance1: + Type: AWS::EC2::Instance + Properties: + ImageId: ami-d3adb33f + KeyName: dummy + InstanceType: t2.micro + Tags: + - Key: Description + Value: Test tag + - Key: Name + Value: Name tag for tests +""" + + +dummy_template_yaml_with_short_form_func = """--- +AWSTemplateFormatVersion: 2010-09-09 +Description: Stack1 with yaml template +Resources: + EC2Instance1: + Type: AWS::EC2::Instance + Properties: + ImageId: ami-d3adb33f + KeyName: !Join [ ":", [ du, m, my ] ] + InstanceType: t2.micro + Tags: + - Key: Description + Value: Test tag + - Key: Name + Value: Name tag for tests +""" + + +dummy_template_yaml_with_ref = """--- +AWSTemplateFormatVersion: 2010-09-09 +Description: Stack1 with yaml template +Parameters: + TagDescription: + Type: String + TagName: + Type: String + +Resources: + EC2Instance1: + Type: AWS::EC2::Instance + Properties: + ImageId: ami-d3adb33f + KeyName: dummy + InstanceType: t2.micro + Tags: + - Key: Description + Value: + Ref: TagDescription + - Key: Name + Value: !Ref TagName +""" + + dummy_update_template = { "AWSTemplateFormatVersion": "2010-09-09", "Parameters": { @@ -110,6 +172,46 @@ def test_boto3_create_stack(): cf_conn.get_template(StackName="test_stack")['TemplateBody'].should.equal( dummy_template) +@mock_cloudformation +def test_boto3_create_stack_with_yaml(): + cf_conn = boto3.client('cloudformation', region_name='us-east-1') + cf_conn.create_stack( + StackName="test_stack", + TemplateBody=dummy_template_yaml, + ) + + cf_conn.get_template(StackName="test_stack")['TemplateBody'].should.equal( + dummy_template_yaml) + + +@mock_cloudformation +def test_boto3_create_stack_with_short_form_func_yaml(): + cf_conn = boto3.client('cloudformation', region_name='us-east-1') + cf_conn.create_stack( + StackName="test_stack", + TemplateBody=dummy_template_yaml_with_short_form_func, + ) + + cf_conn.get_template(StackName="test_stack")['TemplateBody'].should.equal( + dummy_template_yaml_with_short_form_func) + + +@mock_cloudformation +def test_boto3_create_stack_with_ref_yaml(): + cf_conn = boto3.client('cloudformation', region_name='us-east-1') + params = [ + {'ParameterKey': 'TagDescription', 'ParameterValue': 'desc_ref'}, + {'ParameterKey': 'TagName', 'ParameterValue': 'name_ref'}, + ] + cf_conn.create_stack( + StackName="test_stack", + TemplateBody=dummy_template_yaml_with_ref, + Parameters=params + ) + + cf_conn.get_template(StackName="test_stack")['TemplateBody'].should.equal( + dummy_template_yaml_with_ref) + @mock_cloudformation def test_creating_stacks_across_regions(): @@ -150,7 +252,6 @@ def test_create_stack_with_role_arn(): TemplateBody=dummy_template_json, RoleARN='arn:aws:iam::123456789012:role/moto', ) - stack = list(cf.stacks.all())[0] stack.role_arn.should.equal('arn:aws:iam::123456789012:role/moto') diff --git a/tests/test_cloudformation/test_stack_parsing.py b/tests/test_cloudformation/test_stack_parsing.py index ee53e9a68..d9fe4d80d 100644 --- a/tests/test_cloudformation/test_stack_parsing.py +++ b/tests/test_cloudformation/test_stack_parsing.py @@ -10,8 +10,11 @@ from moto.cloudformation.models import FakeStack from moto.cloudformation.parsing import resource_class_from_type, parse_condition, Export from moto.sqs.models import Queue from moto.s3.models import FakeBucket +from moto.cloudformation.utils import yaml_tag_constructor from boto.cloudformation.stack import Output + + dummy_template = { "AWSTemplateFormatVersion": "2010-09-09", @@ -380,3 +383,47 @@ def test_import(): queue = import_stack.resource_map['Queue'] queue.name.should.equal("value") + + + +def test_short_form_func_in_yaml_teamplate(): + template = """--- + KeyB64: !Base64 valueToEncode + KeyRef: !Ref foo + KeyAnd: !And + - A + - B + KeyEquals: !Equals [A, B] + KeyIf: !If [A, B, C] + KeyNot: !Not [A] + KeyOr: !Or [A, B] + KeyFindInMap: !FindInMap [A, B, C] + KeyGetAtt: !GetAtt A.B + KeyGetAZs: !GetAZs A + KeyImportValue: !ImportValue A + KeyJoin: !Join [ ":", [A, B, C] ] + KeySelect: !Select [A, B] + KeySplit: !Split [A, B] + KeySub: !Sub A + """ + yaml.add_multi_constructor('', yaml_tag_constructor) + template_dict = yaml.load(template) + key_and_expects = [ + ['KeyRef', {'Ref': 'foo'}], + ['KeyB64', {'Fn::Base64': 'valueToEncode'}], + ['KeyAnd', {'Fn::And': ['A', 'B']}], + ['KeyEquals', {'Fn::Equals': ['A', 'B']}], + ['KeyIf', {'Fn::If': ['A', 'B', 'C']}], + ['KeyNot', {'Fn::Not': ['A']}], + ['KeyOr', {'Fn::Or': ['A', 'B']}], + ['KeyFindInMap', {'Fn::FindInMap': ['A', 'B', 'C']}], + ['KeyGetAtt', {'Fn::GetAtt': ['A', 'B']}], + ['KeyGetAZs', {'Fn::GetAZs': 'A'}], + ['KeyImportValue', {'Fn::ImportValue': 'A'}], + ['KeyJoin', {'Fn::Join': [ ":", [ 'A', 'B', 'C' ] ]}], + ['KeySelect', {'Fn::Select': ['A', 'B']}], + ['KeySplit', {'Fn::Split': ['A', 'B']}], + ['KeySub', {'Fn::Sub': 'A'}], + ] + for k, v in key_and_expects: + template_dict.should.have.key(k).which.should.be.equal(v) From b922af8ab7c615316e46ccd9468c8fedbe946cfa Mon Sep 17 00:00:00 2001 From: Toshiya Kawasaki Date: Fri, 8 Sep 2017 03:28:56 +0900 Subject: [PATCH 170/412] set None to ecs taskdefinition volumes called when called from cfn (#1104) --- moto/ecs/models.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/moto/ecs/models.py b/moto/ecs/models.py index d3ec2b7f7..bc847b32e 100644 --- a/moto/ecs/models.py +++ b/moto/ecs/models.py @@ -114,7 +114,7 @@ class TaskDefinition(BaseObject): family = properties.get( 'Family', 'task-definition-{0}'.format(int(random() * 10 ** 6))) container_definitions = properties['ContainerDefinitions'] - volumes = properties['Volumes'] + volumes = properties.get('Volumes') ecs_backend = ecs_backends[region_name] return ecs_backend.register_task_definition( @@ -127,7 +127,7 @@ class TaskDefinition(BaseObject): family = properties.get( 'Family', 'task-definition-{0}'.format(int(random() * 10 ** 6))) container_definitions = properties['ContainerDefinitions'] - volumes = properties['Volumes'] + volumes = properties.get('Volumes') if (original_resource.family != family or original_resource.container_definitions != container_definitions or original_resource.volumes != volumes): From 5d8cd22b01e762e781d5a4476d23af0a27272b07 Mon Sep 17 00:00:00 2001 From: Mike Grima Date: Thu, 7 Sep 2017 11:30:05 -0700 Subject: [PATCH 171/412] Fixed S3 lifecycle error message. (#1110) Fixes #1109 Also added PutBucketTagging support Also added Bucket CORS support --- moto/s3/exceptions.py | 20 +++ moto/s3/models.py | 93 ++++++++++++++ moto/s3/responses.py | 149 +++++++++++++++++++++- tests/test_s3/test_s3.py | 262 +++++++++++++++++++++++++++++++++++++++ 4 files changed, 523 insertions(+), 1 deletion(-) diff --git a/moto/s3/exceptions.py b/moto/s3/exceptions.py index df817ba78..24704e7ef 100644 --- a/moto/s3/exceptions.py +++ b/moto/s3/exceptions.py @@ -91,3 +91,23 @@ class EntityTooSmall(S3ClientError): "EntityTooSmall", "Your proposed upload is smaller than the minimum allowed object size.", *args, **kwargs) + + +class InvalidRequest(S3ClientError): + code = 400 + + def __init__(self, method, *args, **kwargs): + super(InvalidRequest, self).__init__( + "InvalidRequest", + "Found unsupported HTTP method in CORS config. Unsupported method is {}".format(method), + *args, **kwargs) + + +class MalformedXML(S3ClientError): + code = 400 + + def __init__(self, *args, **kwargs): + super(MalformedXML, self).__init__( + "MalformedXML", + "The XML you provided was not well-formed or did not validate against our published schema", + *args, **kwargs) diff --git a/moto/s3/models.py b/moto/s3/models.py index 4ea33adb6..abe92bdf1 100644 --- a/moto/s3/models.py +++ b/moto/s3/models.py @@ -295,6 +295,26 @@ class LifecycleRule(BaseModel): self.storage_class = storage_class +class CorsRule(BaseModel): + + def __init__(self, allowed_methods, allowed_origins, allowed_headers=None, expose_headers=None, + max_age_seconds=None): + # Python 2 and 3 have different string types for handling unicodes. Python 2 wants `basestring`, + # whereas Python 3 is OK with str. This causes issues with the XML parser, which returns + # unicode strings in Python 2. So, need to do this to make it work in both Python 2 and 3: + import sys + if sys.version_info >= (3, 0): + str_type = str + else: + str_type = basestring # noqa + + self.allowed_methods = [allowed_methods] if isinstance(allowed_methods, str_type) else allowed_methods + self.allowed_origins = [allowed_origins] if isinstance(allowed_origins, str_type) else allowed_origins + self.allowed_headers = [allowed_headers] if isinstance(allowed_headers, str_type) else allowed_headers + self.exposed_headers = [expose_headers] if isinstance(expose_headers, str_type) else expose_headers + self.max_age_seconds = max_age_seconds + + class FakeBucket(BaseModel): def __init__(self, name, region_name): @@ -307,6 +327,8 @@ class FakeBucket(BaseModel): self.policy = None self.website_configuration = None self.acl = get_canned_acl('private') + self.tags = FakeTagging() + self.cors = [] @property def location(self): @@ -336,6 +358,61 @@ class FakeBucket(BaseModel): def delete_lifecycle(self): self.rules = [] + def set_cors(self, rules): + from moto.s3.exceptions import InvalidRequest, MalformedXML + self.cors = [] + + if len(rules) > 100: + raise MalformedXML() + + # Python 2 and 3 have different string types for handling unicodes. Python 2 wants `basestring`, + # whereas Python 3 is OK with str. This causes issues with the XML parser, which returns + # unicode strings in Python 2. So, need to do this to make it work in both Python 2 and 3: + import sys + if sys.version_info >= (3, 0): + str_type = str + else: + str_type = basestring # noqa + + for rule in rules: + assert isinstance(rule["AllowedMethod"], list) or isinstance(rule["AllowedMethod"], str_type) + assert isinstance(rule["AllowedOrigin"], list) or isinstance(rule["AllowedOrigin"], str_type) + assert isinstance(rule.get("AllowedHeader", []), list) or isinstance(rule.get("AllowedHeader", ""), + str_type) + assert isinstance(rule.get("ExposedHeader", []), list) or isinstance(rule.get("ExposedHeader", ""), + str_type) + assert isinstance(rule.get("MaxAgeSeconds", "0"), str_type) + + if isinstance(rule["AllowedMethod"], str_type): + methods = [rule["AllowedMethod"]] + else: + methods = rule["AllowedMethod"] + + for method in methods: + if method not in ["GET", "PUT", "HEAD", "POST", "DELETE"]: + raise InvalidRequest(method) + + self.cors.append(CorsRule( + rule["AllowedMethod"], + rule["AllowedOrigin"], + rule.get("AllowedHeader"), + rule.get("ExposedHeader"), + rule.get("MaxAgeSecond") + )) + + def delete_cors(self): + self.cors = [] + + def set_tags(self, tagging): + self.tags = tagging + + def delete_tags(self): + self.tags = FakeTagging() + + @property + def tagging(self): + return self.tags + def set_website_configuration(self, website_configuration): self.website_configuration = website_configuration @@ -510,6 +587,22 @@ class S3Backend(BaseBackend): key.set_tagging(tagging) return key + def put_bucket_tagging(self, bucket_name, tagging): + bucket = self.get_bucket(bucket_name) + bucket.set_tags(tagging) + + def delete_bucket_tagging(self, bucket_name): + bucket = self.get_bucket(bucket_name) + bucket.delete_tags() + + def put_bucket_cors(self, bucket_name, cors_rules): + bucket = self.get_bucket(bucket_name) + bucket.set_cors(cors_rules) + + def delete_bucket_cors(self, bucket_name): + bucket = self.get_bucket(bucket_name) + bucket.delete_cors() + def initiate_multipart(self, bucket_name, key_name, metadata): bucket = self.get_bucket(bucket_name) new_multipart = FakeMultipart(key_name, metadata) diff --git a/moto/s3/responses.py b/moto/s3/responses.py index dea80518d..2a696e551 100644 --- a/moto/s3/responses.py +++ b/moto/s3/responses.py @@ -188,7 +188,8 @@ class ResponseObject(_TemplateEnvironmentMixin): elif 'lifecycle' in querystring: bucket = self.backend.get_bucket(bucket_name) if not bucket.rules: - return 404, {}, "NoSuchLifecycleConfiguration" + template = self.response_template(S3_NO_LIFECYCLE) + return 404, {}, template.render(bucket_name=bucket_name) template = self.response_template( S3_BUCKET_LIFECYCLE_CONFIGURATION) return template.render(rules=bucket.rules) @@ -205,11 +206,29 @@ class ResponseObject(_TemplateEnvironmentMixin): elif 'website' in querystring: website_configuration = self.backend.get_bucket_website_configuration( bucket_name) + if not website_configuration: + template = self.response_template(S3_NO_BUCKET_WEBSITE_CONFIG) + return 404, {}, template.render(bucket_name=bucket_name) return website_configuration elif 'acl' in querystring: bucket = self.backend.get_bucket(bucket_name) template = self.response_template(S3_OBJECT_ACL_RESPONSE) return template.render(obj=bucket) + elif 'tagging' in querystring: + bucket = self.backend.get_bucket(bucket_name) + # "Special Error" if no tags: + if len(bucket.tagging.tag_set.tags) == 0: + template = self.response_template(S3_NO_BUCKET_TAGGING) + return 404, {}, template.render(bucket_name=bucket_name) + template = self.response_template(S3_BUCKET_TAGGING_RESPONSE) + return template.render(bucket=bucket) + elif "cors" in querystring: + bucket = self.backend.get_bucket(bucket_name) + if len(bucket.cors) == 0: + template = self.response_template(S3_NO_CORS_CONFIG) + return 404, {}, template.render(bucket_name=bucket_name) + template = self.response_template(S3_BUCKET_CORS_RESPONSE) + return template.render(bucket=bucket) elif 'versions' in querystring: delimiter = querystring.get('delimiter', [None])[0] encoding_type = querystring.get('encoding-type', [None])[0] @@ -340,9 +359,20 @@ class ResponseObject(_TemplateEnvironmentMixin): # TODO: Support the XML-based ACL format self.backend.set_bucket_acl(bucket_name, acl) return "" + elif "tagging" in querystring: + tagging = self._bucket_tagging_from_xml(body) + self.backend.put_bucket_tagging(bucket_name, tagging) + return "" elif 'website' in querystring: self.backend.set_bucket_website_configuration(bucket_name, body) return "" + elif "cors" in querystring: + from moto.s3.exceptions import MalformedXML + try: + self.backend.put_bucket_cors(bucket_name, self._cors_from_xml(body)) + return "" + except KeyError: + raise MalformedXML() else: if body: try: @@ -366,6 +396,12 @@ class ResponseObject(_TemplateEnvironmentMixin): if 'policy' in querystring: self.backend.delete_bucket_policy(bucket_name, body) return 204, {}, "" + elif "tagging" in querystring: + self.backend.delete_bucket_tagging(bucket_name) + return 204, {}, "" + elif "cors" in querystring: + self.backend.delete_bucket_cors(bucket_name) + return 204, {}, "" elif 'lifecycle' in querystring: bucket = self.backend.get_bucket(bucket_name) bucket.delete_lifecycle() @@ -697,6 +733,27 @@ class ResponseObject(_TemplateEnvironmentMixin): tagging = FakeTagging(tag_set) return tagging + def _bucket_tagging_from_xml(self, xml): + parsed_xml = xmltodict.parse(xml) + + tags = [] + # Optional if no tags are being sent: + if parsed_xml['Tagging'].get('TagSet'): + for tag in parsed_xml['Tagging']['TagSet']['Tag']: + tags.append(FakeTag(tag['Key'], tag['Value'])) + + tag_set = FakeTagSet(tags) + tagging = FakeTagging(tag_set) + return tagging + + def _cors_from_xml(self, xml): + parsed_xml = xmltodict.parse(xml) + + if isinstance(parsed_xml["CORSConfiguration"]["CORSRule"], list): + return [cors for cors in parsed_xml["CORSConfiguration"]["CORSRule"]] + + return [parsed_xml["CORSConfiguration"]["CORSRule"]] + def _key_response_delete(self, bucket_name, query, key_name, headers): if query.get('uploadId'): upload_id = query['uploadId'][0] @@ -1023,6 +1080,46 @@ S3_OBJECT_TAGGING_RESPONSE = """\ """ +S3_BUCKET_TAGGING_RESPONSE = """ + + + {% for tag in bucket.tagging.tag_set.tags %} + + {{ tag.key }} + {{ tag.value }} + + {% endfor %} + +""" + +S3_BUCKET_CORS_RESPONSE = """ + + {% for cors in bucket.cors %} + + {% for origin in cors.allowed_origins %} + {{ origin }} + {% endfor %} + {% for method in cors.allowed_methods %} + {{ method }} + {% endfor %} + {% if cors.allowed_headers is not none %} + {% for header in cors.allowed_headers %} + {{ header }} + {% endfor %} + {% endif %} + {% if cors.exposed_headers is not none %} + {% for header in cors.exposed_headers %} + {{ header }} + {% endfor %} + {% endif %} + {% if cors.max_age_seconds is not none %} + {{ cors.max_age_seconds }} + {% endif %} + + {% endfor %} + +""" + S3_OBJECT_COPY_RESPONSE = """\ {{ key.etag }} @@ -1115,3 +1212,53 @@ S3_NO_POLICY = """ 9Gjjt1m+cjU4OPvX9O9/8RuvnG41MRb/18Oux2o5H5MY7ISNTlXN+Dz9IG62/ILVxhAGI0qyPfg= """ + +S3_NO_LIFECYCLE = """ + + NoSuchLifecycleConfiguration + The lifecycle configuration does not exist + {{ bucket_name }} + 44425877V1D0A2F9 + 9Gjjt1m+cjU4OPvX9O9/8RuvnG41MRb/18Oux2o5H5MY7ISNTlXN+Dz9IG62/ILVxhAGI0qyPfg= + +""" + +S3_NO_BUCKET_TAGGING = """ + + NoSuchTagSet + The TagSet does not exist + {{ bucket_name }} + 44425877V1D0A2F9 + 9Gjjt1m+cjU4OPvX9O9/8RuvnG41MRb/18Oux2o5H5MY7ISNTlXN+Dz9IG62/ILVxhAGI0qyPfg= + +""" + +S3_NO_BUCKET_WEBSITE_CONFIG = """ + + NoSuchWebsiteConfiguration + The specified bucket does not have a website configuration + {{ bucket_name }} + 44425877V1D0A2F9 + 9Gjjt1m+cjU4OPvX9O9/8RuvnG41MRb/18Oux2o5H5MY7ISNTlXN+Dz9IG62/ILVxhAGI0qyPfg= + +""" + +S3_INVALID_CORS_REQUEST = """ + + NoSuchWebsiteConfiguration + The specified bucket does not have a website configuration + {{ bucket_name }} + 44425877V1D0A2F9 + 9Gjjt1m+cjU4OPvX9O9/8RuvnG41MRb/18Oux2o5H5MY7ISNTlXN+Dz9IG62/ILVxhAGI0qyPfg= + +""" + +S3_NO_CORS_CONFIG = """ + + NoSuchCORSConfiguration + The CORS configuration does not exist + {{ bucket_name }} + 44425877V1D0A2F9 + 9Gjjt1m+cjU4OPvX9O9/8RuvnG41MRb/18Oux2o5H5MY7ISNTlXN+Dz9IG62/ILVxhAGI0qyPfg= + +""" diff --git a/tests/test_s3/test_s3.py b/tests/test_s3/test_s3.py index 3832026eb..331452a7d 100644 --- a/tests/test_s3/test_s3.py +++ b/tests/test_s3/test_s3.py @@ -1391,6 +1391,268 @@ def test_boto3_put_object_with_tagging(): resp['TagSet'].should.contain({'Key': 'foo', 'Value': 'bar'}) +@mock_s3 +def test_boto3_put_bucket_tagging(): + s3 = boto3.client("s3", region_name="us-east-1") + bucket_name = "mybucket" + s3.create_bucket(Bucket=bucket_name) + + resp = s3.put_bucket_tagging(Bucket=bucket_name, + Tagging={ + "TagSet": [ + { + "Key": "TagOne", + "Value": "ValueOne" + }, + { + "Key": "TagTwo", + "Value": "ValueTwo" + } + ] + }) + + resp['ResponseMetadata']['HTTPStatusCode'].should.equal(200) + + # No tags is also OK: + resp = s3.put_bucket_tagging(Bucket=bucket_name, Tagging={ + "TagSet": [] + }) + resp['ResponseMetadata']['HTTPStatusCode'].should.equal(200) + + +@mock_s3 +def test_boto3_get_bucket_tagging(): + s3 = boto3.client("s3", region_name="us-east-1") + bucket_name = "mybucket" + s3.create_bucket(Bucket=bucket_name) + s3.put_bucket_tagging(Bucket=bucket_name, + Tagging={ + "TagSet": [ + { + "Key": "TagOne", + "Value": "ValueOne" + }, + { + "Key": "TagTwo", + "Value": "ValueTwo" + } + ] + }) + + # Get the tags for the bucket: + resp = s3.get_bucket_tagging(Bucket=bucket_name) + resp['ResponseMetadata']['HTTPStatusCode'].should.equal(200) + len(resp["TagSet"]).should.equal(2) + + # With no tags: + s3.put_bucket_tagging(Bucket=bucket_name, Tagging={ + "TagSet": [] + }) + + with assert_raises(ClientError) as err: + s3.get_bucket_tagging(Bucket=bucket_name) + + e = err.exception + e.response["Error"]["Code"].should.equal("NoSuchTagSet") + e.response["Error"]["Message"].should.equal("The TagSet does not exist") + + +@mock_s3 +def test_boto3_delete_bucket_tagging(): + s3 = boto3.client("s3", region_name="us-east-1") + bucket_name = "mybucket" + s3.create_bucket(Bucket=bucket_name) + + s3.put_bucket_tagging(Bucket=bucket_name, + Tagging={ + "TagSet": [ + { + "Key": "TagOne", + "Value": "ValueOne" + }, + { + "Key": "TagTwo", + "Value": "ValueTwo" + } + ] + }) + + resp = s3.delete_bucket_tagging(Bucket=bucket_name) + resp['ResponseMetadata']['HTTPStatusCode'].should.equal(204) + + with assert_raises(ClientError) as err: + s3.get_bucket_tagging(Bucket=bucket_name) + + e = err.exception + e.response["Error"]["Code"].should.equal("NoSuchTagSet") + e.response["Error"]["Message"].should.equal("The TagSet does not exist") + + +@mock_s3 +def test_boto3_put_bucket_cors(): + s3 = boto3.client("s3", region_name="us-east-1") + bucket_name = "mybucket" + s3.create_bucket(Bucket=bucket_name) + + resp = s3.put_bucket_cors(Bucket=bucket_name, CORSConfiguration={ + "CORSRules": [ + { + "AllowedOrigins": [ + "*" + ], + "AllowedMethods": [ + "GET", + "POST" + ], + "AllowedHeaders": [ + "Authorization" + ], + "ExposeHeaders": [ + "x-amz-request-id" + ], + "MaxAgeSeconds": 123 + }, + { + "AllowedOrigins": [ + "*" + ], + "AllowedMethods": [ + "PUT" + ], + "AllowedHeaders": [ + "Authorization" + ], + "ExposeHeaders": [ + "x-amz-request-id" + ], + "MaxAgeSeconds": 123 + } + ] + }) + + resp['ResponseMetadata']['HTTPStatusCode'].should.equal(200) + + with assert_raises(ClientError) as err: + s3.put_bucket_cors(Bucket=bucket_name, CORSConfiguration={ + "CORSRules": [ + { + "AllowedOrigins": [ + "*" + ], + "AllowedMethods": [ + "NOTREAL", + "POST" + ] + } + ] + }) + e = err.exception + e.response["Error"]["Code"].should.equal("InvalidRequest") + e.response["Error"]["Message"].should.equal("Found unsupported HTTP method in CORS config. " + "Unsupported method is NOTREAL") + + with assert_raises(ClientError) as err: + s3.put_bucket_cors(Bucket=bucket_name, CORSConfiguration={ + "CORSRules": [] + }) + e = err.exception + e.response["Error"]["Code"].should.equal("MalformedXML") + + # And 101: + many_rules = [{"AllowedOrigins": ["*"], "AllowedMethods": ["GET"]}] * 101 + with assert_raises(ClientError) as err: + s3.put_bucket_cors(Bucket=bucket_name, CORSConfiguration={ + "CORSRules": many_rules + }) + e = err.exception + e.response["Error"]["Code"].should.equal("MalformedXML") + + +@mock_s3 +def test_boto3_get_bucket_cors(): + s3 = boto3.client("s3", region_name="us-east-1") + bucket_name = "mybucket" + s3.create_bucket(Bucket=bucket_name) + + # Without CORS: + with assert_raises(ClientError) as err: + s3.get_bucket_cors(Bucket=bucket_name) + + e = err.exception + e.response["Error"]["Code"].should.equal("NoSuchCORSConfiguration") + e.response["Error"]["Message"].should.equal("The CORS configuration does not exist") + + s3.put_bucket_cors(Bucket=bucket_name, CORSConfiguration={ + "CORSRules": [ + { + "AllowedOrigins": [ + "*" + ], + "AllowedMethods": [ + "GET", + "POST" + ], + "AllowedHeaders": [ + "Authorization" + ], + "ExposeHeaders": [ + "x-amz-request-id" + ], + "MaxAgeSeconds": 123 + }, + { + "AllowedOrigins": [ + "*" + ], + "AllowedMethods": [ + "PUT" + ], + "AllowedHeaders": [ + "Authorization" + ], + "ExposeHeaders": [ + "x-amz-request-id" + ], + "MaxAgeSeconds": 123 + } + ] + }) + + resp = s3.get_bucket_cors(Bucket=bucket_name) + resp['ResponseMetadata']['HTTPStatusCode'].should.equal(200) + len(resp["CORSRules"]).should.equal(2) + + +@mock_s3 +def test_boto3_delete_bucket_cors(): + s3 = boto3.client("s3", region_name="us-east-1") + bucket_name = "mybucket" + s3.create_bucket(Bucket=bucket_name) + s3.put_bucket_cors(Bucket=bucket_name, CORSConfiguration={ + "CORSRules": [ + { + "AllowedOrigins": [ + "*" + ], + "AllowedMethods": [ + "GET" + ] + } + ] + }) + + resp = s3.delete_bucket_cors(Bucket=bucket_name) + resp['ResponseMetadata']['HTTPStatusCode'].should.equal(204) + + # Verify deletion: + with assert_raises(ClientError) as err: + s3.get_bucket_cors(Bucket=bucket_name) + + e = err.exception + e.response["Error"]["Code"].should.equal("NoSuchCORSConfiguration") + e.response["Error"]["Message"].should.equal("The CORS configuration does not exist") + + @mock_s3 def test_boto3_put_object_tagging(): s3 = boto3.client('s3', region_name='us-east-1') From 646e45b7e430b5374dc5145a65437d5809eeac29 Mon Sep 17 00:00:00 2001 From: Chris Evett Date: Thu, 7 Sep 2017 14:30:46 -0400 Subject: [PATCH 172/412] update dependencies so that make init will work; exclude ropeproject folder in gitignore (#1111) --- .gitignore | 3 ++- requirements-dev.txt | 2 +- setup.py | 4 ++-- 3 files changed, 5 insertions(+), 4 deletions(-) diff --git a/.gitignore b/.gitignore index efee854dd..18026d60f 100644 --- a/.gitignore +++ b/.gitignore @@ -11,4 +11,5 @@ build/ .idea/ *.swp .DS_Store -python_env \ No newline at end of file +python_env +.ropeproject/ diff --git a/requirements-dev.txt b/requirements-dev.txt index e2f379a59..28aaec601 100644 --- a/requirements-dev.txt +++ b/requirements-dev.txt @@ -8,4 +8,4 @@ freezegun flask boto3>=1.4.4 botocore>=1.5.77 -six +six>=1.9 diff --git a/setup.py b/setup.py index 07cf98387..b6de56867 100755 --- a/setup.py +++ b/setup.py @@ -10,11 +10,11 @@ install_requires = [ "requests>=2.5", "xmltodict", "dicttoxml", - "six", + "six>1.9", "werkzeug", "pyaml", "pytz", - "python-dateutil", + "python-dateutil<3.0.0,>=2.1", "mock", ] From b088811d0686011f80f8fdb10a69d5364436ff64 Mon Sep 17 00:00:00 2001 From: Jack Danger Date: Thu, 7 Sep 2017 11:31:42 -0700 Subject: [PATCH 173/412] bump version to 1.1.2 --- setup.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/setup.py b/setup.py index b6de56867..01a471507 100755 --- a/setup.py +++ b/setup.py @@ -24,7 +24,7 @@ extras_require = { setup( name='moto', - version='1.1.1', + version='1.1.2', description='A library that allows your python tests to easily' ' mock out the boto library', author='Steve Pulec', From df6e3532c72d4c9ecef2f6df7f6550aaa615e1f7 Mon Sep 17 00:00:00 2001 From: Jack Danger Date: Thu, 7 Sep 2017 12:07:08 -0700 Subject: [PATCH 174/412] better release tool --- Makefile | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/Makefile b/Makefile index cb30c5df1..3c5582c2d 100644 --- a/Makefile +++ b/Makefile @@ -17,5 +17,5 @@ test_server: publish: python setup.py sdist bdist_wheel upload - git tag $(python setup.py --version) - git push origin $(python setup.py --version) + git tag `python setup.py --version` + git push origin `python setup.py --version` From 5a034973679e77da61ace263685fb13277498af9 Mon Sep 17 00:00:00 2001 From: Jack Danger Date: Fri, 8 Sep 2017 15:07:44 -0700 Subject: [PATCH 175/412] rendering vpc_id in ec2 responses --- moto/ec2/responses/instances.py | 48 +++++++++++++++----------------- tests/test_ec2/test_instances.py | 4 +++ 2 files changed, 27 insertions(+), 25 deletions(-) diff --git a/moto/ec2/responses/instances.py b/moto/ec2/responses/instances.py index ea70b85b1..257efc1f2 100644 --- a/moto/ec2/responses/instances.py +++ b/moto/ec2/responses/instances.py @@ -255,18 +255,16 @@ EC2_RUN_INSTANCES = """ limit: - break - continuation_index += 1 - result_keys = result_keys[continuation_index:] + result_keys = self._get_results_from_token(result_keys, limit) if len(result_keys) > max_keys: is_truncated = 'true' @@ -333,6 +333,15 @@ class ResponseObject(_TemplateEnvironmentMixin): start_after=None if continuation_token else start_after ) + + def _get_results_from_token(self, result_keys, token): + continuation_index = 0 + for key in result_keys: + if key.name > token: + break + continuation_index += 1 + return result_keys[continuation_index:] + def _bucket_response_put(self, request, body, region_name, bucket_name, querystring, headers): if not request.headers.get('Content-Length'): return 411, {}, "Content-Length required" From 14dec68f151bfaebd1d0eaf6189698c2da0512a8 Mon Sep 17 00:00:00 2001 From: Julien Duchesne Date: Sat, 9 Sep 2017 00:27:54 -0400 Subject: [PATCH 179/412] Remove superfluous space --- moto/s3/responses.py | 1 - 1 file changed, 1 deletion(-) diff --git a/moto/s3/responses.py b/moto/s3/responses.py index 8e54fdf8e..126971228 100644 --- a/moto/s3/responses.py +++ b/moto/s3/responses.py @@ -333,7 +333,6 @@ class ResponseObject(_TemplateEnvironmentMixin): start_after=None if continuation_token else start_after ) - def _get_results_from_token(self, result_keys, token): continuation_index = 0 for key in result_keys: From 83dd9559db16c2973df12a4e415500331cb0d720 Mon Sep 17 00:00:00 2001 From: Julien Duchesne Date: Fri, 8 Sep 2017 23:47:16 -0400 Subject: [PATCH 180/412] Handle "max-keys" in list-objects --- moto/s3/responses.py | 30 ++++++++++++++++++++---------- 1 file changed, 20 insertions(+), 10 deletions(-) mode change 100644 => 100755 moto/s3/responses.py diff --git a/moto/s3/responses.py b/moto/s3/responses.py old mode 100644 new mode 100755 index 126971228..4da888de5 --- a/moto/s3/responses.py +++ b/moto/s3/responses.py @@ -276,6 +276,7 @@ class ResponseObject(_TemplateEnvironmentMixin): if prefix and isinstance(prefix, six.binary_type): prefix = prefix.decode("utf-8") delimiter = querystring.get('delimiter', [None])[0] + max_keys = int(querystring.get('max-keys', [1000])[0]) marker = querystring.get('marker', [None])[0] result_keys, result_folders = self.backend.prefix_query( bucket, prefix, delimiter) @@ -283,13 +284,17 @@ class ResponseObject(_TemplateEnvironmentMixin): if marker: result_keys = self._get_results_from_token(result_keys, marker) + result_keys, is_truncated, _ = self._truncate_result(result_keys, max_keys) + template = self.response_template(S3_BUCKET_GET_RESPONSE) return 200, {}, template.render( bucket=bucket, prefix=prefix, delimiter=delimiter, result_keys=result_keys, - result_folders=result_folders + result_folders=result_folders, + is_truncated=is_truncated, + max_keys=max_keys ) def _handle_list_objects_v2(self, bucket_name, querystring): @@ -312,13 +317,8 @@ class ResponseObject(_TemplateEnvironmentMixin): limit = continuation_token or start_after result_keys = self._get_results_from_token(result_keys, limit) - if len(result_keys) > max_keys: - is_truncated = 'true' - result_keys = result_keys[:max_keys] - next_continuation_token = result_keys[-1].name - else: - is_truncated = 'false' - next_continuation_token = None + result_keys, is_truncated, \ + next_continuation_token = self._truncate_result(result_keys, max_keys) return template.render( bucket=bucket, @@ -341,6 +341,16 @@ class ResponseObject(_TemplateEnvironmentMixin): continuation_index += 1 return result_keys[continuation_index:] + def _truncate_result(self, result_keys, max_keys): + if len(result_keys) > max_keys: + is_truncated = 'true' + result_keys = result_keys[:max_keys] + next_continuation_token = result_keys[-1].name + else: + is_truncated = 'false' + next_continuation_token = None + return result_keys, is_truncated, next_continuation_token + def _bucket_response_put(self, request, body, region_name, bucket_name, querystring, headers): if not request.headers.get('Content-Length'): return 411, {}, "Content-Length required" @@ -841,9 +851,9 @@ S3_BUCKET_GET_RESPONSE = """ {{ bucket.name }} {{ prefix }} - 1000 + {{ max_keys }} {{ delimiter }} - false + {{ is_truncated }} {% for key in result_keys %} {{ key.name }} From 386ac94abe8e2f97893bf8d0fd40827fdc1d9d6e Mon Sep 17 00:00:00 2001 From: Brian Rower Date: Mon, 11 Sep 2017 12:06:24 -0700 Subject: [PATCH 181/412] Allow doing an ADD update of a string set Fix test --- moto/dynamodb2/models.py | 6 ++++ .../test_dynamodb_table_with_range_key.py | 30 +++++++++++++++++++ 2 files changed, 36 insertions(+) diff --git a/moto/dynamodb2/models.py b/moto/dynamodb2/models.py index 7590ee1e1..9bb0ee441 100644 --- a/moto/dynamodb2/models.py +++ b/moto/dynamodb2/models.py @@ -171,6 +171,12 @@ class Item(BaseModel): decimal.Decimal(existing.value) + decimal.Decimal(new_value) )}) + elif set(update_action['Value'].keys()) == set(['SS']): + existing = self.attrs.get(attribute_name, DynamoType({"SS": {}})) + new_set = set(existing.value).union(set(new_value)) + self.attrs[attribute_name] = DynamoType({ + "SS": list(new_set) + }) else: # TODO: implement other data types raise NotImplementedError( diff --git a/tests/test_dynamodb2/test_dynamodb_table_with_range_key.py b/tests/test_dynamodb2/test_dynamodb_table_with_range_key.py index e4a586cbb..93dc5b3ef 100644 --- a/tests/test_dynamodb2/test_dynamodb_table_with_range_key.py +++ b/tests/test_dynamodb2/test_dynamodb_table_with_range_key.py @@ -1306,6 +1306,36 @@ def test_update_item_add_value(): }) +@mock_dynamodb2 +def test_update_item_add_value_string_set(): + table = _create_table_with_range_key() + + table.put_item(Item={ + 'forum_name': 'the-key', + 'subject': '123', + 'string_set': set(['str1', 'str2']), + }) + + item_key = {'forum_name': 'the-key', 'subject': '123'} + table.update_item( + Key=item_key, + AttributeUpdates={ + 'string_set': { + 'Action': u'ADD', + 'Value': set(['str3']), + }, + }, + ) + + returned_item = dict((k, str(v) if isinstance(v, Decimal) else v) + for k, v in table.get_item(Key=item_key)['Item'].items()) + dict(returned_item).should.equal({ + 'string_set': set(['str1', 'str2', 'str3']), + 'forum_name': 'the-key', + 'subject': '123', + }) + + @mock_dynamodb2 def test_update_item_add_value_does_not_exist_is_created(): table = _create_table_with_range_key() From 88b4d0b271ec584419f281c6e0a07fe43b5c8dbb Mon Sep 17 00:00:00 2001 From: Jack Danger Date: Mon, 11 Sep 2017 12:34:20 -0700 Subject: [PATCH 182/412] bumping version to 1.1.4 --- setup.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/setup.py b/setup.py index eae16b81d..7ab174d35 100755 --- a/setup.py +++ b/setup.py @@ -24,7 +24,7 @@ extras_require = { setup( name='moto', - version='1.1.3', + version='1.1.4', description='A library that allows your python tests to easily' ' mock out the boto library', author='Steve Pulec', From ed820cc80e3a1f6e08bf7799e8cb153e6669b6d7 Mon Sep 17 00:00:00 2001 From: Chris Keogh Date: Tue, 12 Sep 2017 09:28:36 +1200 Subject: [PATCH 183/412] return validation error for empty attribute --- moto/dynamodb2/responses.py | 10 +++++++++ tests/test_dynamodb2/test_dynamodb.py | 30 +++++++++++++++++++++++++++ 2 files changed, 40 insertions(+) diff --git a/moto/dynamodb2/responses.py b/moto/dynamodb2/responses.py index 29863d23b..61e976632 100644 --- a/moto/dynamodb2/responses.py +++ b/moto/dynamodb2/responses.py @@ -144,6 +144,16 @@ class DynamoHandler(BaseResponse): def put_item(self): name = self.body['TableName'] item = self.body['Item'] + + res = re.search('\"\"', json.dumps(item)) + if res: + er = 'com.amazonaws.dynamodb.v20111205#ValidationException' + return 400, {'server': 'amazon.com'}, dynamo_json_dump( + {'__type': er, + 'message': ('One or more parameter values were invalid: ' + 'An AttributeValue may not contain an empty string') + }) + overwrite = 'Expected' not in self.body if not overwrite: expected = self.body['Expected'] diff --git a/tests/test_dynamodb2/test_dynamodb.py b/tests/test_dynamodb2/test_dynamodb.py index 7fec5c2bd..4ae753d7b 100644 --- a/tests/test_dynamodb2/test_dynamodb.py +++ b/tests/test_dynamodb2/test_dynamodb.py @@ -149,3 +149,33 @@ def test_list_not_found_table_tags(): conn.list_tags_of_resource(ResourceArn=arn) except ClientError as exception: assert exception.response['Error']['Code'] == "ResourceNotFoundException" + + +@requires_boto_gte("2.9") +@mock_dynamodb2 +def test_item_add_empty_string_exception(): + name = 'TestTable' + conn = boto3.client('dynamodb', + region_name='us-west-2', + aws_access_key_id="ak", + aws_secret_access_key="sk") + conn.create_table(TableName=name, + KeySchema=[{'AttributeName':'forum_name','KeyType':'HASH'}], + AttributeDefinitions=[{'AttributeName':'forum_name','AttributeType':'S'}], + ProvisionedThroughput={'ReadCapacityUnits':5,'WriteCapacityUnits':5}) + session = boto3.Session() + dynamodb = session.resource('dynamodb', + region_name='us-west-2', + aws_access_key_id="ak", + aws_secret_access_key="sk") + table = dynamodb.Table('TestTable') + try: + response = table.put_item(Item={ + 'forum_name': 'LOLCat Forum', + 'subject': 'Check this out!', + 'Body': 'http://url_to_lolcat.gif', + 'SentBy': "", + 'ReceivedTime': '12/9/2011 11:36:03 PM', + }) + except ClientError as exception: + assert exception.response['Error']['Code'] == "ValidationException" From 2a66ae2bfdd9f715e87ef79ca649cff1333fee1c Mon Sep 17 00:00:00 2001 From: Chris Keogh Date: Tue, 12 Sep 2017 11:07:34 +1200 Subject: [PATCH 184/412] fix linting errors --- moto/dynamodb2/responses.py | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) diff --git a/moto/dynamodb2/responses.py b/moto/dynamodb2/responses.py index acc9dfb5e..d41e4b366 100644 --- a/moto/dynamodb2/responses.py +++ b/moto/dynamodb2/responses.py @@ -148,10 +148,13 @@ class DynamoHandler(BaseResponse): res = re.search('\"\"', json.dumps(item)) if res: er = 'com.amazonaws.dynamodb.v20111205#ValidationException' - return 400, {'server': 'amazon.com'}, dynamo_json_dump( - {'__type': er, - 'message': ('One or more parameter values were invalid: ' - 'An AttributeValue may not contain an empty string')}) + return (400, + {'server': 'amazon.com'}, + dynamo_json_dump({'__type': er, + 'message': ('One or more parameter values were ' + 'invalid:An AttributeValue may not ' + 'contain an empty string')} + )) overwrite = 'Expected' not in self.body if not overwrite: From 6ee204e458d4c696ac3939569e07c2116ce83511 Mon Sep 17 00:00:00 2001 From: Chris Keogh Date: Tue, 12 Sep 2017 12:21:08 +1200 Subject: [PATCH 185/412] fix server mode test --- moto/dynamodb2/responses.py | 2 +- tests/test_dynamodb2/test_dynamodb.py | 26 +++++++++++++------------- 2 files changed, 14 insertions(+), 14 deletions(-) diff --git a/moto/dynamodb2/responses.py b/moto/dynamodb2/responses.py index d41e4b366..586a1db7b 100644 --- a/moto/dynamodb2/responses.py +++ b/moto/dynamodb2/responses.py @@ -152,7 +152,7 @@ class DynamoHandler(BaseResponse): {'server': 'amazon.com'}, dynamo_json_dump({'__type': er, 'message': ('One or more parameter values were ' - 'invalid:An AttributeValue may not ' + 'invalid: An AttributeValue may not ' 'contain an empty string')} )) diff --git a/tests/test_dynamodb2/test_dynamodb.py b/tests/test_dynamodb2/test_dynamodb.py index 4ae753d7b..3b50f0b8e 100644 --- a/tests/test_dynamodb2/test_dynamodb.py +++ b/tests/test_dynamodb2/test_dynamodb.py @@ -163,19 +163,19 @@ def test_item_add_empty_string_exception(): KeySchema=[{'AttributeName':'forum_name','KeyType':'HASH'}], AttributeDefinitions=[{'AttributeName':'forum_name','AttributeType':'S'}], ProvisionedThroughput={'ReadCapacityUnits':5,'WriteCapacityUnits':5}) - session = boto3.Session() - dynamodb = session.resource('dynamodb', - region_name='us-west-2', - aws_access_key_id="ak", - aws_secret_access_key="sk") - table = dynamodb.Table('TestTable') + try: - response = table.put_item(Item={ - 'forum_name': 'LOLCat Forum', - 'subject': 'Check this out!', - 'Body': 'http://url_to_lolcat.gif', - 'SentBy': "", - 'ReceivedTime': '12/9/2011 11:36:03 PM', - }) + conn.put_item( + TableName=name, + Item={ + 'forum_name': { 'S': 'LOLCat Forum' }, + 'subject': { 'S': 'Check this out!' }, + 'Body': { 'S': 'http://url_to_lolcat.gif'}, + 'SentBy': { 'S': "" }, + 'ReceivedTime': { 'S': '12/9/2011 11:36:03 PM'}, + } + ) + except ClientError as exception: + exception.response['Error']['Code'] assert exception.response['Error']['Code'] == "ValidationException" From 1472d63c877471996e3798fcdc6144665e57121c Mon Sep 17 00:00:00 2001 From: Chris Keogh Date: Wed, 13 Sep 2017 08:28:42 +1200 Subject: [PATCH 186/412] use assert_raises teat helper pattern --- tests/test_dynamodb2/test_dynamodb.py | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/tests/test_dynamodb2/test_dynamodb.py b/tests/test_dynamodb2/test_dynamodb.py index 3b50f0b8e..764980fba 100644 --- a/tests/test_dynamodb2/test_dynamodb.py +++ b/tests/test_dynamodb2/test_dynamodb.py @@ -164,7 +164,7 @@ def test_item_add_empty_string_exception(): AttributeDefinitions=[{'AttributeName':'forum_name','AttributeType':'S'}], ProvisionedThroughput={'ReadCapacityUnits':5,'WriteCapacityUnits':5}) - try: + with assert_raises(ClientError) as ex: conn.put_item( TableName=name, Item={ @@ -176,6 +176,8 @@ def test_item_add_empty_string_exception(): } ) - except ClientError as exception: - exception.response['Error']['Code'] - assert exception.response['Error']['Code'] == "ValidationException" + ex.exception.response['Error']['Code'].should.equal('ValidationException') + ex.exception.response['ResponseMetadata']['HTTPStatusCode'].should.equal(400) + ex.exception.response['Error']['Message'].should.equal( + 'One or more parameter values were invalid: An AttributeValue may not contain an empty string' + ) From 417ade7b8fcd7f6ccbd7c32647d8d931109bae95 Mon Sep 17 00:00:00 2001 From: Jack Danger Date: Tue, 12 Sep 2017 14:53:50 -0700 Subject: [PATCH 187/412] bumping to version 1.1.5 --- setup.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/setup.py b/setup.py index 7ab174d35..b28ff0e12 100755 --- a/setup.py +++ b/setup.py @@ -24,7 +24,7 @@ extras_require = { setup( name='moto', - version='1.1.4', + version='1.1.5', description='A library that allows your python tests to easily' ' mock out the boto library', author='Steve Pulec', From 6affc7a4ec9cfa323f45b184d42429b309a7e074 Mon Sep 17 00:00:00 2001 From: Brian Rower Date: Tue, 12 Sep 2017 21:42:29 -0700 Subject: [PATCH 188/412] Add basic support for the add operation in an update operation Add basic delete functionality Improve testing coverage and make behave more like actual dynamo on errors Lint fix --- moto/dynamodb2/models.py | 60 ++++++++- moto/dynamodb2/responses.py | 3 + .../test_dynamodb_table_with_range_key.py | 126 ++++++++++++++++++ 3 files changed, 188 insertions(+), 1 deletion(-) diff --git a/moto/dynamodb2/models.py b/moto/dynamodb2/models.py index bdc5743c2..5747f260d 100644 --- a/moto/dynamodb2/models.py +++ b/moto/dynamodb2/models.py @@ -57,7 +57,7 @@ class DynamoType(object): @property def cast_value(self): - if self.type == 'N': + if self.is_number(): try: return int(self.value) except ValueError: @@ -76,6 +76,15 @@ class DynamoType(object): comparison_func = get_comparison_func(range_comparison) return comparison_func(self.cast_value, *range_values) + def is_number(self): + return self.type == 'N' + + def is_set(self): + return self.type == 'SS' or self.type == 'NS' or self.type == 'BS' + + def same_type(self, other): + return self.type == other.type + class Item(BaseModel): @@ -140,6 +149,55 @@ class Item(BaseModel): self.attrs[key] = DynamoType(expression_attribute_values[value]) else: self.attrs[key] = DynamoType({"S": value}) + elif action == 'ADD': + key, value = value.split(" ", maxsplit=1) + key = key.strip() + value_str = value.strip() + if value_str in expression_attribute_values: + dyn_value = DynamoType(expression_attribute_values[value]) + else: + raise TypeError + + # Handle adding numbers - value gets added to existing value, + # or added to 0 if it doesn't exist yet + if dyn_value.is_number(): + existing = self.attrs.get(key, DynamoType({"N": '0'})) + if not existing.same_type(dyn_value): + raise TypeError() + self.attrs[key] = DynamoType({"N": str( + decimal.Decimal(existing.value) + + decimal.Decimal(dyn_value.value) + )}) + + # Handle adding sets - value is added to the set, or set is + # created with only this value if it doesn't exist yet + # New value must be of same set type as previous value + elif dyn_value.is_set(): + existing = self.attrs.get(key, DynamoType({dyn_value.type: {}})) + if not existing.same_type(dyn_value): + raise TypeError() + new_set = set(existing.value).union(dyn_value.value) + self.attrs[key] = DynamoType({existing.type: list(new_set)}) + else: # Number and Sets are the only supported types for ADD + raise TypeError + + elif action == 'DELETE': + key, value = value.split(" ", maxsplit=1) + key = key.strip() + value_str = value.strip() + if value_str in expression_attribute_values: + dyn_value = DynamoType(expression_attribute_values[value]) + else: + raise TypeError + + if not dyn_value.is_set(): + raise TypeError + existing = self.attrs.get(key, None) + if existing: + if not existing.same_type(dyn_value): + raise TypeError + new_set = set(existing.value).difference(dyn_value.value) + self.attrs[key] = DynamoType({existing.type: list(new_set)}) else: raise NotImplementedError('{} update action not yet supported'.format(action)) diff --git a/moto/dynamodb2/responses.py b/moto/dynamodb2/responses.py index 586a1db7b..c3cb4ef72 100644 --- a/moto/dynamodb2/responses.py +++ b/moto/dynamodb2/responses.py @@ -497,6 +497,9 @@ class DynamoHandler(BaseResponse): except ValueError: er = 'com.amazonaws.dynamodb.v20111205#ConditionalCheckFailedException' return self.error(er) + except TypeError: + er = 'com.amazonaws.dynamodb.v20111205#ValidationException' + return self.error(er) item_dict = item.to_json() item_dict['ConsumedCapacityUnits'] = 0.5 diff --git a/tests/test_dynamodb2/test_dynamodb_table_with_range_key.py b/tests/test_dynamodb2/test_dynamodb_table_with_range_key.py index 93dc5b3ef..1ab25dc7a 100644 --- a/tests/test_dynamodb2/test_dynamodb_table_with_range_key.py +++ b/tests/test_dynamodb2/test_dynamodb_table_with_range_key.py @@ -5,6 +5,7 @@ from decimal import Decimal import boto import boto3 from boto3.dynamodb.conditions import Key +from botocore.exceptions import ClientError import sure # noqa from freezegun import freeze_time from moto import mock_dynamodb2, mock_dynamodb2_deprecated @@ -1190,6 +1191,14 @@ def _create_table_with_range_key(): 'AttributeName': 'subject', 'AttributeType': 'S' }, + { + 'AttributeName': 'username', + 'AttributeType': 'S' + }, + { + 'AttributeName': 'created', + 'AttributeType': 'N' + } ], ProvisionedThroughput={ 'ReadCapacityUnits': 5, @@ -1392,6 +1401,123 @@ def test_update_item_with_expression(): 'subject': '123', }) +@mock_dynamodb2 +def test_update_item_add_with_expression(): + table = _create_table_with_range_key() + + item_key = {'forum_name': 'the-key', 'subject': '123'} + current_item = { + 'forum_name': 'the-key', + 'subject': '123', + 'str_set': {'item1', 'item2', 'item3'}, + 'num_set': {1, 2, 3}, + 'num_val': 6 + } + + # Put an entry in the DB to play with + table.put_item(Item=current_item) + + # Update item to add a string value to a string set + table.update_item( + Key=item_key, + UpdateExpression='ADD str_set :v', + ExpressionAttributeValues={ + ':v': {'item4'} + } + ) + current_item['str_set'] = current_item['str_set'].union({'item4'}) + dict(table.get_item(Key=item_key)['Item']).should.equal(current_item) + + # Update item to add a num value to a num set + table.update_item( + Key=item_key, + UpdateExpression='ADD num_set :v', + ExpressionAttributeValues={ + ':v': {6} + } + ) + current_item['num_set'] = current_item['num_set'].union({6}) + dict(table.get_item(Key=item_key)['Item']).should.equal(current_item) + + # Update item to add a value to a number value + table.update_item( + Key=item_key, + UpdateExpression='ADD num_val :v', + ExpressionAttributeValues={ + ':v': 20 + } + ) + current_item['num_val'] = current_item['num_val'] + 20 + dict(table.get_item(Key=item_key)['Item']).should.equal(current_item) + + # Attempt to add a number value to a string set, should raise Client Error + table.update_item.when.called_with( + Key=item_key, + UpdateExpression='ADD str_set :v', + ExpressionAttributeValues={ + ':v': 20 + } + ).should.have.raised(ClientError) + dict(table.get_item(Key=item_key)['Item']).should.equal(current_item) + + # Attempt to add a number set to the string set, should raise a ClientError + table.update_item.when.called_with( + Key=item_key, + UpdateExpression='ADD str_set :v', + ExpressionAttributeValues={ + ':v': { 20 } + } + ).should.have.raised(ClientError) + dict(table.get_item(Key=item_key)['Item']).should.equal(current_item) + +@mock_dynamodb2 +def test_update_item_delete_with_expression(): + table = _create_table_with_range_key() + + item_key = {'forum_name': 'the-key', 'subject': '123'} + current_item = { + 'forum_name': 'the-key', + 'subject': '123', + 'str_set': {'item1', 'item2', 'item3'}, + 'num_set': {1, 2, 3}, + 'num_val': 6 + } + + # Put an entry in the DB to play with + table.put_item(Item=current_item) + + # Update item to delete a string value from a string set + table.update_item( + Key=item_key, + UpdateExpression='DELETE str_set :v', + ExpressionAttributeValues={ + ':v': {'item2'} + } + ) + current_item['str_set'] = current_item['str_set'].difference({'item2'}) + dict(table.get_item(Key=item_key)['Item']).should.equal(current_item) + + # Update item to delete a num value from a num set + table.update_item( + Key=item_key, + UpdateExpression='DELETE num_set :v', + ExpressionAttributeValues={ + ':v': {2} + } + ) + current_item['num_set'] = current_item['num_set'].difference({2}) + dict(table.get_item(Key=item_key)['Item']).should.equal(current_item) + + # Try to delete on a number, this should fail + table.update_item.when.called_with( + Key=item_key, + UpdateExpression='DELETE num_val :v', + ExpressionAttributeValues={ + ':v': 20 + } + ).should.have.raised(ClientError) + dict(table.get_item(Key=item_key)['Item']).should.equal(current_item) + @mock_dynamodb2 def test_boto3_query_gsi_range_comparison(): From c1c592609abfb3699e7692facf30d9ca7a9103dc Mon Sep 17 00:00:00 2001 From: Brian Rower Date: Tue, 12 Sep 2017 23:30:15 -0700 Subject: [PATCH 189/412] Fix python 2.7 issue --- moto/dynamodb2/models.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/moto/dynamodb2/models.py b/moto/dynamodb2/models.py index 5747f260d..5915d6eea 100644 --- a/moto/dynamodb2/models.py +++ b/moto/dynamodb2/models.py @@ -150,7 +150,7 @@ class Item(BaseModel): else: self.attrs[key] = DynamoType({"S": value}) elif action == 'ADD': - key, value = value.split(" ", maxsplit=1) + key, value = value.split(" ", 1) key = key.strip() value_str = value.strip() if value_str in expression_attribute_values: @@ -182,7 +182,7 @@ class Item(BaseModel): raise TypeError elif action == 'DELETE': - key, value = value.split(" ", maxsplit=1) + key, value = value.split(" ", 1) key = key.strip() value_str = value.strip() if value_str in expression_attribute_values: From 0097ab4c67972ae169cc0613a6c7aae72fbacfea Mon Sep 17 00:00:00 2001 From: Brian Rower Date: Tue, 12 Sep 2017 23:41:12 -0700 Subject: [PATCH 190/412] Improve code coverage --- .../test_dynamodb_table_with_range_key.py | 32 +++++++++++++++++++ 1 file changed, 32 insertions(+) diff --git a/tests/test_dynamodb2/test_dynamodb_table_with_range_key.py b/tests/test_dynamodb2/test_dynamodb_table_with_range_key.py index 1ab25dc7a..a9ab298b7 100644 --- a/tests/test_dynamodb2/test_dynamodb_table_with_range_key.py +++ b/tests/test_dynamodb2/test_dynamodb_table_with_range_key.py @@ -1470,6 +1470,22 @@ def test_update_item_add_with_expression(): ).should.have.raised(ClientError) dict(table.get_item(Key=item_key)['Item']).should.equal(current_item) + # Attempt to update with a bad expression + table.update_item.when.called_with( + Key=item_key, + UpdateExpression='ADD str_set bad_value' + ).should.have.raised(ClientError) + + # Attempt to add a string value instead of a string set + table.update_item.when.called_with( + Key=item_key, + UpdateExpression='ADD str_set :v', + ExpressionAttributeValues={ + ':v': 'new_string' + } + ).should.have.raised(ClientError) + + @mock_dynamodb2 def test_update_item_delete_with_expression(): table = _create_table_with_range_key() @@ -1518,6 +1534,22 @@ def test_update_item_delete_with_expression(): ).should.have.raised(ClientError) dict(table.get_item(Key=item_key)['Item']).should.equal(current_item) + # Try to delete a string set from a number set + table.update_item.when.called_with( + Key=item_key, + UpdateExpression='DELETE num_set :v', + ExpressionAttributeValues={ + ':v': {'del_str'} + } + ).should.have.raised(ClientError) + dict(table.get_item(Key=item_key)['Item']).should.equal(current_item) + + # Attempt to update with a bad expression + table.update_item.when.called_with( + Key=item_key, + UpdateExpression='DELETE num_val badvalue' + ).should.have.raised(ClientError) + @mock_dynamodb2 def test_boto3_query_gsi_range_comparison(): From 98f95dd4a7c656a258f0e16afed95b69aafefad2 Mon Sep 17 00:00:00 2001 From: aohara-admin Date: Wed, 13 Sep 2017 15:00:39 -0400 Subject: [PATCH 191/412] add Lambda tag_resource, untag_resource, and list_tags methods --- moto/awslambda/models.py | 26 +++++++++ moto/awslambda/responses.py | 57 +++++++++++++++++++ moto/awslambda/urls.py | 1 + tests/test_awslambda/test_lambda.py | 86 +++++++++++++++++++++++++++++ 4 files changed, 170 insertions(+) diff --git a/moto/awslambda/models.py b/moto/awslambda/models.py index 13d4726ac..523393ba3 100644 --- a/moto/awslambda/models.py +++ b/moto/awslambda/models.py @@ -81,6 +81,8 @@ class LambdaFunction(BaseModel): self.function_arn = 'arn:aws:lambda:123456789012:function:{0}'.format( self.function_name) + self.tags = dict() + @property def vpc_config(self): config = self._vpc_config.copy() @@ -278,6 +280,9 @@ class LambdaBackend(BaseBackend): def has_function(self, function_name): return function_name in self._functions + def has_function_arn(self, function_arn): + return self.get_function_by_arn(function_arn) is not None + def create_function(self, spec): fn = LambdaFunction(spec) self._functions[fn.function_name] = fn @@ -286,12 +291,33 @@ class LambdaBackend(BaseBackend): def get_function(self, function_name): return self._functions[function_name] + def get_function_by_arn(self, function_arn): + for function in self._functions.values(): + if function.function_arn == function_arn: + return function + return None + def delete_function(self, function_name): del self._functions[function_name] def list_functions(self): return self._functions.values() + def list_tags(self, resource): + return self.get_function_by_arn(resource).tags + + def tag_resource(self, resource, tags): + self.get_function_by_arn(resource).tags.update(tags) + + def untag_resource(self, resource, tagKeys): + function = self.get_function_by_arn(resource) + for key in tagKeys: + try: + del function.tags[key] + except KeyError: + pass + # Don't care + def do_validate_s3(): return os.environ.get('VALIDATE_LAMBDA_S3', '') in ['', '1', 'true'] diff --git a/moto/awslambda/responses.py b/moto/awslambda/responses.py index d145f4760..b7a6921c5 100644 --- a/moto/awslambda/responses.py +++ b/moto/awslambda/responses.py @@ -3,6 +3,12 @@ from __future__ import unicode_literals import json import re +try: + from urllib import unquote + from urlparse import urlparse, parse_qs +except: + from urllib.parse import unquote, urlparse, parse_qs + from moto.core.responses import BaseResponse @@ -33,6 +39,17 @@ class LambdaResponse(BaseResponse): else: raise ValueError("Cannot handle request") + def tag(self, request, full_url, headers): + self.setup_class(request, full_url, headers) + if request.method == 'GET': + return self._list_tags(request, full_url) + elif request.method == 'POST': + return self._tag_resource(request, full_url) + elif request.method == 'DELETE': + return self._untag_resource(request, full_url) + else: + raise ValueError("Cannot handle {0} request".format(request.method)) + def _invoke(self, request, full_url): response_headers = {} lambda_backend = self.get_lambda_backend(full_url) @@ -102,3 +119,43 @@ class LambdaResponse(BaseResponse): return region.group(1) else: return self.default_region + + def _list_tags(self, request, full_url): + lambda_backend = self.get_lambda_backend(full_url) + + path = request.path if hasattr(request, 'path') else request.path_url + function_arn = unquote(path.split('/')[-1]) + + if lambda_backend.has_function_arn(function_arn): + function = lambda_backend.get_function_by_arn(function_arn) + return 200, {}, json.dumps(dict(Tags=function.tags)) + else: + return 404, {}, "{}" + + def _tag_resource(self, request, full_url): + lambda_backend = self.get_lambda_backend(full_url) + + path = request.path if hasattr(request, 'path') else request.path_url + function_arn = unquote(path.split('/')[-1]) + + spec = json.loads(self.body) + + if lambda_backend.has_function_arn(function_arn): + lambda_backend.tag_resource(function_arn, spec['Tags']) + return 200, {}, "{}" + else: + return 404, {}, "{}" + + def _untag_resource(self, request, full_url): + lambda_backend = self.get_lambda_backend(full_url) + + path = request.path if hasattr(request, 'path') else request.path_url + function_arn = unquote(path.split('/')[-1].split('?')[0]) + + tag_keys = parse_qs(urlparse(full_url).query)['tagKeys'] + + if lambda_backend.has_function_arn(function_arn): + lambda_backend.untag_resource(function_arn, tag_keys) + return 204, {}, "{}" + else: + return 404, {}, "{}" diff --git a/moto/awslambda/urls.py b/moto/awslambda/urls.py index c63135766..c0917be57 100644 --- a/moto/awslambda/urls.py +++ b/moto/awslambda/urls.py @@ -11,4 +11,5 @@ url_paths = { '{0}/(?P[^/]+)/functions/?$': response.root, '{0}/(?P[^/]+)/functions/(?P[\w_-]+)/?$': response.function, '{0}/(?P[^/]+)/functions/(?P[\w_-]+)/invocations/?$': response.invoke, + '{0}/(?P[^/]+)/tags/(?P.+)': response.tag } diff --git a/tests/test_awslambda/test_lambda.py b/tests/test_awslambda/test_lambda.py index 007516f56..4ec504365 100644 --- a/tests/test_awslambda/test_lambda.py +++ b/tests/test_awslambda/test_lambda.py @@ -469,3 +469,89 @@ def test_invoke_lambda_error(): assert 'FunctionError' in result assert result['FunctionError'] == 'Handled' + +@mock_lambda +@mock_s3 +def test_tags(): + """ + test list_tags -> tag_resource -> list_tags -> tag_resource -> list_tags -> untag_resource -> list_tags integration + """ + s3_conn = boto3.client('s3', 'us-west-2') + s3_conn.create_bucket(Bucket='test-bucket') + + zip_content = get_test_zip_file2() + s3_conn.put_object(Bucket='test-bucket', Key='test.zip', Body=zip_content) + conn = boto3.client('lambda', 'us-west-2') + + function = conn.create_function( + FunctionName='testFunction', + Runtime='python2.7', + Role='test-iam-role', + Handler='lambda_function.handler', + Code={ + 'S3Bucket': 'test-bucket', + 'S3Key': 'test.zip', + }, + Description='test lambda function', + Timeout=3, + MemorySize=128, + Publish=True, + ) + + # List tags when there are none + conn.list_tags( + Resource=function['FunctionArn'] + )['Tags'].should.equal(dict()) + + # List tags when there is one + conn.tag_resource( + Resource=function['FunctionArn'], + Tags=dict(spam='eggs') + )['ResponseMetadata']['HTTPStatusCode'].should.equal(200) + conn.list_tags( + Resource=function['FunctionArn'] + )['Tags'].should.equal(dict(spam='eggs')) + + # List tags when another has been added + conn.tag_resource( + Resource=function['FunctionArn'], + Tags=dict(foo='bar') + )['ResponseMetadata']['HTTPStatusCode'].should.equal(200) + conn.list_tags( + Resource=function['FunctionArn'] + )['Tags'].should.equal(dict(spam='eggs', foo='bar')) + + # Untag resource + conn.untag_resource( + Resource=function['FunctionArn'], + TagKeys=['spam', 'trolls'] + )['ResponseMetadata']['HTTPStatusCode'].should.equal(204) + conn.list_tags( + Resource=function['FunctionArn'] + )['Tags'].should.equal(dict(foo='bar')) + + # Untag a tag that does not exist (no error and no change) + conn.untag_resource( + Resource=function['FunctionArn'], + TagKeys=['spam'] + )['ResponseMetadata']['HTTPStatusCode'].should.equal(204) + +@mock_lambda +def test_tags_not_found(): + """ + Test list_tags and tag_resource when the lambda with the given arn does not exist + """ + conn = boto3.client('lambda', 'us-west-2') + conn.list_tags.when.called_with( + Resource='arn:aws:lambda:123456789012:function:not-found' + ).should.throw(botocore.client.ClientError) + + conn.tag_resource.when.called_with( + Resource='arn:aws:lambda:123456789012:function:not-found', + Tags=dict(spam='eggs') + ).should.throw(botocore.client.ClientError) + + conn.untag_resource.when.called_with( + Resource='arn:aws:lambda:123456789012:function:not-found', + TagKeys=['spam'] + ).should.throw(botocore.client.ClientError) From b2423f44acc2fde2bd6c781c05af2fae9095808f Mon Sep 17 00:00:00 2001 From: Jack Danger Date: Wed, 13 Sep 2017 15:25:16 -0700 Subject: [PATCH 192/412] bumping to version 1.1.6 --- setup.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/setup.py b/setup.py index b28ff0e12..7d1c745dd 100755 --- a/setup.py +++ b/setup.py @@ -24,7 +24,7 @@ extras_require = { setup( name='moto', - version='1.1.5', + version='1.1.6', description='A library that allows your python tests to easily' ' mock out the boto library', author='Steve Pulec', From a379d76cfcbb4a26c565299856dea0240f1479bd Mon Sep 17 00:00:00 2001 From: Chris Keogh Date: Wed, 13 Sep 2017 16:44:22 +1200 Subject: [PATCH 193/412] add aws_lamba.invoke_async method --- moto/awslambda/responses.py | 23 ++++++++++++++++++++++- moto/awslambda/urls.py | 1 + tests/test_awslambda/test_lambda.py | 25 +++++++++++++++++++++++++ 3 files changed, 48 insertions(+), 1 deletion(-) diff --git a/moto/awslambda/responses.py b/moto/awslambda/responses.py index d145f4760..f68f44efa 100644 --- a/moto/awslambda/responses.py +++ b/moto/awslambda/responses.py @@ -33,13 +33,20 @@ class LambdaResponse(BaseResponse): else: raise ValueError("Cannot handle request") + def invoke_async(self, request, full_url, headers): + self.setup_class(request, full_url, headers) + if request.method == 'POST': + return self._invoke_async(request, full_url) + else: + raise ValueError("Cannot handle request") + def _invoke(self, request, full_url): response_headers = {} lambda_backend = self.get_lambda_backend(full_url) path = request.path if hasattr(request, 'path') else request.path_url function_name = path.split('/')[-2] - + if lambda_backend.has_function(function_name): fn = lambda_backend.get_function(function_name) payload = fn.invoke(self.body, self.headers, response_headers) @@ -48,6 +55,20 @@ class LambdaResponse(BaseResponse): else: return 404, response_headers, "{}" + def _invoke_async(self, request, full_url): + response_headers = {} + lambda_backend = self.get_lambda_backend(full_url) + + path = request.path if hasattr(request, 'path') else request.path_url + function_name = path.split('/')[-3] + if lambda_backend.has_function(function_name): + fn = lambda_backend.get_function(function_name) + fn.invoke(self.body, self.headers, response_headers) + response_headers['Content-Length'] = str(0) + return 202, response_headers, "" + else: + return 404, response_headers, "{}" + def _list_functions(self, request, full_url, headers): lambda_backend = self.get_lambda_backend(full_url) return 200, {}, json.dumps({ diff --git a/moto/awslambda/urls.py b/moto/awslambda/urls.py index c63135766..d6045d229 100644 --- a/moto/awslambda/urls.py +++ b/moto/awslambda/urls.py @@ -11,4 +11,5 @@ url_paths = { '{0}/(?P[^/]+)/functions/?$': response.root, '{0}/(?P[^/]+)/functions/(?P[\w_-]+)/?$': response.function, '{0}/(?P[^/]+)/functions/(?P[\w_-]+)/invocations/?$': response.invoke, + '{0}/(?P[^/]+)/functions/(?P[\w_-]+)/invoke-async/?$': response.invoke_async, } diff --git a/tests/test_awslambda/test_lambda.py b/tests/test_awslambda/test_lambda.py index 007516f56..9e99e1609 100644 --- a/tests/test_awslambda/test_lambda.py +++ b/tests/test_awslambda/test_lambda.py @@ -469,3 +469,28 @@ def test_invoke_lambda_error(): assert 'FunctionError' in result assert result['FunctionError'] == 'Handled' + +@mock_lambda +def test_invoke_async_function(): + conn = boto3.client('lambda', 'us-west-2') + conn.create_function( + FunctionName='testFunction', + Runtime='python2.7', + Role='test-iam-role', + Handler='lambda_function.handler', + Code={ + 'ZipFile': get_test_zip_file1(), + }, + Description='test lambda function', + Timeout=3, + MemorySize=128, + Publish=True, + ) + + success_result = conn.invoke_async( + FunctionName='testFunction', + InvokeArgs=json.dumps({ 'test': 'event' }) + ) + + print(success_result) + success_result['Status'].should.equal(202) From d0c610c5ac4a822a8bd6dce3654ceceebc556a09 Mon Sep 17 00:00:00 2001 From: Nuwan Goonasekera Date: Thu, 14 Sep 2017 14:59:13 +0530 Subject: [PATCH 194/412] Added keypair filtering --- moto/ec2/models.py | 46 +++++++++++++++++++++----------- moto/ec2/responses/key_pairs.py | 23 ++++++---------- tests/test_ec2/test_key_pairs.py | 19 +++++++++++++ 3 files changed, 58 insertions(+), 30 deletions(-) diff --git a/moto/ec2/models.py b/moto/ec2/models.py index fca84f0a5..0df3797d8 100755 --- a/moto/ec2/models.py +++ b/moto/ec2/models.py @@ -795,16 +795,29 @@ class InstanceBackend(object): return reservations +class KeyPair(object): + def __init__(self, name, fingerprint, material): + self.name = name + self.fingerprint = fingerprint + self.material = material + + def get_filter_value(self, filter_name): + if filter_name == 'key-name': + return self.name + elif filter_name == 'fingerprint': + return self.fingerprint + + class KeyPairBackend(object): def __init__(self): - self.keypairs = defaultdict(dict) + self.keypairs = {} super(KeyPairBackend, self).__init__() def create_key_pair(self, name): if name in self.keypairs: raise InvalidKeyPairDuplicateError(name) - self.keypairs[name] = keypair = random_key_pair() - keypair['name'] = name + keypair = KeyPair(name, **random_key_pair()) + self.keypairs[name] = keypair return keypair def delete_key_pair(self, name): @@ -812,24 +825,27 @@ class KeyPairBackend(object): self.keypairs.pop(name) return True - def describe_key_pairs(self, filter_names=None): + def describe_key_pairs(self, key_names=None, filters=None): results = [] - for name, keypair in self.keypairs.items(): - if not filter_names or name in filter_names: - keypair['name'] = name - results.append(keypair) + if key_names: + results = [keypair for keypair in self.keypairs.values() + if keypair.name in key_names] + if len(key_names) > len(results): + unknown_keys = set(key_names) - set(results) + raise InvalidKeyPairNameError(unknown_keys) + else: + results = self.keypairs.values() - # TODO: Trim error message down to specific invalid name. - if filter_names and len(filter_names) > len(results): - raise InvalidKeyPairNameError(filter_names) - - return results + if filters: + return generic_filter(filters, results) + else: + return results def import_key_pair(self, key_name, public_key_material): if key_name in self.keypairs: raise InvalidKeyPairDuplicateError(key_name) - self.keypairs[key_name] = keypair = random_key_pair() - keypair['name'] = key_name + keypair = KeyPair(key_name, **random_key_pair()) + self.keypairs[key_name] = keypair return keypair diff --git a/moto/ec2/responses/key_pairs.py b/moto/ec2/responses/key_pairs.py index 936df2cd3..59268556e 100644 --- a/moto/ec2/responses/key_pairs.py +++ b/moto/ec2/responses/key_pairs.py @@ -11,7 +11,7 @@ class KeyPairs(BaseResponse): if self.is_not_dryrun('CreateKeyPair'): keypair = self.ec2_backend.create_key_pair(name) template = self.response_template(CREATE_KEY_PAIR_RESPONSE) - return template.render(**keypair) + return template.render(keypair=keypair) def delete_key_pair(self): name = self.querystring.get('KeyName')[0] @@ -23,11 +23,7 @@ class KeyPairs(BaseResponse): def describe_key_pairs(self): names = keypair_names_from_querystring(self.querystring) filters = filters_from_querystring(self.querystring) - if len(filters) > 0: - raise NotImplementedError( - 'Using filters in KeyPairs.describe_key_pairs is not yet implemented') - - keypairs = self.ec2_backend.describe_key_pairs(names) + keypairs = self.ec2_backend.describe_key_pairs(names, filters) template = self.response_template(DESCRIBE_KEY_PAIRS_RESPONSE) return template.render(keypairs=keypairs) @@ -37,7 +33,7 @@ class KeyPairs(BaseResponse): if self.is_not_dryrun('ImportKeyPair'): keypair = self.ec2_backend.import_key_pair(name, material) template = self.response_template(IMPORT_KEYPAIR_RESPONSE) - return template.render(**keypair) + return template.render(keypair=keypair) DESCRIBE_KEY_PAIRS_RESPONSE = """ @@ -54,12 +50,9 @@ DESCRIBE_KEY_PAIRS_RESPONSE = """ - {{ name }} - - {{ fingerprint }} - - {{ material }} - + {{ keypair.name }} + {{ keypair.fingerprint }} + {{ keypair.material }} """ @@ -71,6 +64,6 @@ DELETE_KEY_PAIR_RESPONSE = """ 471f9fdd-8fe2-4a84-86b0-bd3d3e350979 - {{ name }} - {{ fingerprint }} + {{ keypair.name }} + {{ keypair.fingerprint }} """ diff --git a/tests/test_ec2/test_key_pairs.py b/tests/test_ec2/test_key_pairs.py index ec979a871..0a7fb9f76 100644 --- a/tests/test_ec2/test_key_pairs.py +++ b/tests/test_ec2/test_key_pairs.py @@ -130,3 +130,22 @@ def test_key_pairs_import_exist(): cm.exception.code.should.equal('InvalidKeyPair.Duplicate') cm.exception.status.should.equal(400) cm.exception.request_id.should_not.be.none + + +@mock_ec2_deprecated +def test_key_pair_filters(): + conn = boto.connect_ec2('the_key', 'the_secret') + + _ = conn.create_key_pair('kpfltr1') + kp2 = conn.create_key_pair('kpfltr2') + kp3 = conn.create_key_pair('kpfltr3') + + kp_by_name = conn.get_all_key_pairs( + filters={'key-name': 'kpfltr2'}) + set([kp.name for kp in kp_by_name] + ).should.equal(set([kp2.name])) + + kp_by_name = conn.get_all_key_pairs( + filters={'fingerprint': kp3.fingerprint}) + set([kp.name for kp in kp_by_name] + ).should.equal(set([kp3.name])) From 21df9b9a78baeb98cbebf7918e6ebe73f9b63ace Mon Sep 17 00:00:00 2001 From: Jack Danger Date: Thu, 14 Sep 2017 11:14:41 -0700 Subject: [PATCH 195/412] bumping to version 1.1.7 --- setup.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/setup.py b/setup.py index 7d1c745dd..75ffc4f1e 100755 --- a/setup.py +++ b/setup.py @@ -24,7 +24,7 @@ extras_require = { setup( name='moto', - version='1.1.6', + version='1.1.7', description='A library that allows your python tests to easily' ' mock out the boto library', author='Steve Pulec', From 2c8326e9b9b63fc991904b3465a59b92f8caf3c0 Mon Sep 17 00:00:00 2001 From: Chris Keogh Date: Fri, 15 Sep 2017 14:49:32 +1200 Subject: [PATCH 196/412] allow get_function for lambda created with zipfile --- moto/awslambda/models.py | 19 +++++++----- tests/test_awslambda/test_lambda.py | 46 +++++++++++++++++++++++++++++ 2 files changed, 58 insertions(+), 7 deletions(-) diff --git a/moto/awslambda/models.py b/moto/awslambda/models.py index 523393ba3..b34e3e9d4 100644 --- a/moto/awslambda/models.py +++ b/moto/awslambda/models.py @@ -111,13 +111,18 @@ class LambdaFunction(BaseModel): } def get_code(self): - return { - "Code": { - "Location": "s3://lambda-functions.aws.amazon.com/{0}".format(self.code['S3Key']), - "RepositoryType": "S3" - }, - "Configuration": self.get_configuration(), - } + if 'S3Key' in self.code: + return { + "Code": { + "Location": "s3://lambda-functions.aws.amazon.com/{0}".format(self.code['S3Key']), + "RepositoryType": "S3" + }, + "Configuration": self.get_configuration(), + } + else: + return { + "Configuration": self.get_configuration(), + } def convert(self, s): try: diff --git a/tests/test_awslambda/test_lambda.py b/tests/test_awslambda/test_lambda.py index db75cfd44..b1de685b8 100644 --- a/tests/test_awslambda/test_lambda.py +++ b/tests/test_awslambda/test_lambda.py @@ -579,3 +579,49 @@ def test_invoke_async_function(): ) success_result['Status'].should.equal(202) + +@mock_lambda +@freeze_time('2015-01-01 00:00:00') +def test_get_function_created_with_zipfile(): + conn = boto3.client('lambda', 'us-west-2') + zip_content = get_test_zip_file1() + result = conn.create_function( + FunctionName='testFunction', + Runtime='python2.7', + Role='test-iam-role', + Handler='lambda_function.handler', + Code={ + 'ZipFile': zip_content, + }, + Description='test lambda function', + Timeout=3, + MemorySize=128, + Publish=True, + ) + + response = conn.get_function( + FunctionName='testFunction' + ) + response['Configuration'].pop('LastModified') + + response['ResponseMetadata']['HTTPStatusCode'].should.equal(200) + assert 'Code' not in response + response['Configuration'].should.equal( + { + "CodeSha256": hashlib.sha256(zip_content).hexdigest(), + "CodeSize": len(zip_content), + "Description": "test lambda function", + "FunctionArn": "arn:aws:lambda:123456789012:function:testFunction", + "FunctionName": "testFunction", + "Handler": "lambda_function.handler", + "MemorySize": 128, + "Role": "test-iam-role", + "Runtime": "python2.7", + "Timeout": 3, + "Version": '$LATEST', + "VpcConfig": { + "SecurityGroupIds": [], + "SubnetIds": [], + } + }, + ) \ No newline at end of file From 101dfaa4129bd7d22fead5e0a72b2b35a423cf52 Mon Sep 17 00:00:00 2001 From: Chris Keogh Date: Fri, 15 Sep 2017 15:07:02 +1200 Subject: [PATCH 197/412] fix tests for python 3.x --- moto/awslambda/models.py | 2 +- tests/test_awslambda/test_lambda.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/moto/awslambda/models.py b/moto/awslambda/models.py index b34e3e9d4..1c489f3fd 100644 --- a/moto/awslambda/models.py +++ b/moto/awslambda/models.py @@ -111,7 +111,7 @@ class LambdaFunction(BaseModel): } def get_code(self): - if 'S3Key' in self.code: + if isinstance(self.code, dict): return { "Code": { "Location": "s3://lambda-functions.aws.amazon.com/{0}".format(self.code['S3Key']), diff --git a/tests/test_awslambda/test_lambda.py b/tests/test_awslambda/test_lambda.py index b1de685b8..8a5d84f33 100644 --- a/tests/test_awslambda/test_lambda.py +++ b/tests/test_awslambda/test_lambda.py @@ -624,4 +624,4 @@ def test_get_function_created_with_zipfile(): "SubnetIds": [], } }, - ) \ No newline at end of file + ) From bbd541ce15cb31312d0577ec9a5578e4b58db679 Mon Sep 17 00:00:00 2001 From: Jack Danger Date: Thu, 14 Sep 2017 22:58:09 -0700 Subject: [PATCH 198/412] bumping to version 1.1.8 --- setup.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/setup.py b/setup.py index 75ffc4f1e..86d1b0d02 100755 --- a/setup.py +++ b/setup.py @@ -24,7 +24,7 @@ extras_require = { setup( name='moto', - version='1.1.7', + version='1.1.8', description='A library that allows your python tests to easily' ' mock out the boto library', author='Steve Pulec', From c116c57eb80c1f2b03b5da31d1b6e83c1d9d79b2 Mon Sep 17 00:00:00 2001 From: Nuwan Goonasekera Date: Fri, 15 Sep 2017 21:30:32 +0530 Subject: [PATCH 199/412] Ensure root device is listed in block device mappings --- moto/ec2/responses/amis.py | 2 +- tests/test_ec2/test_amis.py | 4 ++++ 2 files changed, 5 insertions(+), 1 deletion(-) diff --git a/moto/ec2/responses/amis.py b/moto/ec2/responses/amis.py index 74767aa6b..d9be77812 100755 --- a/moto/ec2/responses/amis.py +++ b/moto/ec2/responses/amis.py @@ -115,7 +115,7 @@ DESCRIBE_IMAGES_RESPONSE = """ Date: Fri, 15 Sep 2017 10:52:44 -0700 Subject: [PATCH 200/412] bumping to version 1.1.9 --- setup.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/setup.py b/setup.py index 86d1b0d02..9e14bf544 100755 --- a/setup.py +++ b/setup.py @@ -24,7 +24,7 @@ extras_require = { setup( name='moto', - version='1.1.8', + version='1.1.9', description='A library that allows your python tests to easily' ' mock out the boto library', author='Steve Pulec', From 87761298167cbf4382dff08760a105df6108fbc3 Mon Sep 17 00:00:00 2001 From: Nuwan Goonasekera Date: Sat, 16 Sep 2017 01:49:57 +0530 Subject: [PATCH 201/412] Improved support for VPC Address filtering --- moto/ec2/models.py | 44 ++++++++++- moto/ec2/responses/elastic_ip_addresses.py | 29 ++------ tests/test_ec2/test_elastic_ip_addresses.py | 81 +++++++++++++++++++++ 3 files changed, 129 insertions(+), 25 deletions(-) diff --git a/moto/ec2/models.py b/moto/ec2/models.py index 0df3797d8..21d9a1e36 100755 --- a/moto/ec2/models.py +++ b/moto/ec2/models.py @@ -2959,6 +2959,27 @@ class ElasticAddress(object): return self.allocation_id raise UnformattedGetAttTemplateException() + def get_filter_value(self, filter_name): + if filter_name == 'allocation-id': + return self.allocation_id + elif filter_name == 'association-id': + return self.association_id + elif filter_name == 'domain': + return self.domain + elif filter_name == 'instance-id' and self.instance: + return self.instance.id + elif filter_name == 'network-interface-id' and self.eni: + return self.eni.id + elif filter_name == 'network-interface-owner-id': + msg = "The filter '{0}' for DescribeAddresses has not been" \ + " implemented in Moto yet. Feel free to open an issue at" \ + " https://github.com/spulec/moto/issues".format(filter_name) + raise NotImplementedError(msg) + elif filter_name == 'private-ip-address' and self.eni: + return self.eni.private_ip_address + elif filter_name == 'public-ip': + return self.public_ip + class ElasticAddressBackend(object): def __init__(self): @@ -3019,6 +3040,9 @@ class ElasticAddressBackend(object): if new_instance_association or new_eni_association or reassociate: eip.instance = instance eip.eni = eni + if not eip.eni and instance: + # default to primary network interface + eip.eni = instance.nics[0] if eip.eni: eip.eni.public_ip = eip.public_ip if eip.domain == "vpc": @@ -3030,8 +3054,24 @@ class ElasticAddressBackend(object): raise ResourceAlreadyAssociatedError(eip.public_ip) - def describe_addresses(self): - return self.addresses + def describe_addresses(self, allocation_ids=None, public_ips=None, filters=None): + matches = self.addresses + if allocation_ids: + matches = [addr for addr in matches + if addr.allocation_id in allocation_ids] + if len(allocation_ids) > len(matches): + unknown_ids = set(allocation_ids) - set(matches) + raise InvalidAllocationIdError(unknown_ids) + if public_ips: + matches = [addr for addr in matches + if addr.public_ip in public_ips] + if len(public_ips) > len(matches): + unknown_ips = set(allocation_ids) - set(matches) + raise InvalidAddressError(unknown_ips) + if filters: + matches = generic_filter(filters, matches) + + return matches def disassociate_address(self, address=None, association_id=None): eips = [] diff --git a/moto/ec2/responses/elastic_ip_addresses.py b/moto/ec2/responses/elastic_ip_addresses.py index a64a33bb5..137f12ec6 100644 --- a/moto/ec2/responses/elastic_ip_addresses.py +++ b/moto/ec2/responses/elastic_ip_addresses.py @@ -1,6 +1,6 @@ from __future__ import unicode_literals from moto.core.responses import BaseResponse -from moto.ec2.utils import sequence_from_querystring +from moto.ec2.utils import filters_from_querystring, sequence_from_querystring class ElasticIPAddresses(BaseResponse): @@ -51,29 +51,12 @@ class ElasticIPAddresses(BaseResponse): return template.render(address=eip) def describe_addresses(self): + allocation_ids = sequence_from_querystring('AllocationId', self.querystring) + public_ips = sequence_from_querystring('PublicIp', self.querystring) + filters = filters_from_querystring(self.querystring) + addresses = self.ec2_backend.describe_addresses( + allocation_ids, public_ips, filters) template = self.response_template(DESCRIBE_ADDRESS_RESPONSE) - - if "Filter.1.Name" in self.querystring: - filter_by = sequence_from_querystring( - "Filter.1.Name", self.querystring)[0] - filter_value = sequence_from_querystring( - "Filter.1.Value", self.querystring) - if filter_by == 'instance-id': - addresses = filter(lambda x: x.instance.id == filter_value[ - 0], self.ec2_backend.describe_addresses()) - else: - raise NotImplementedError( - "Filtering not supported in describe_address.") - elif "PublicIp.1" in self.querystring: - public_ips = sequence_from_querystring( - "PublicIp", self.querystring) - addresses = self.ec2_backend.address_by_ip(public_ips) - elif "AllocationId.1" in self.querystring: - allocation_ids = sequence_from_querystring( - "AllocationId", self.querystring) - addresses = self.ec2_backend.address_by_allocation(allocation_ids) - else: - addresses = self.ec2_backend.describe_addresses() return template.render(addresses=addresses) def disassociate_address(self): diff --git a/tests/test_ec2/test_elastic_ip_addresses.py b/tests/test_ec2/test_elastic_ip_addresses.py index 2e1ae189a..824c9402c 100644 --- a/tests/test_ec2/test_elastic_ip_addresses.py +++ b/tests/test_ec2/test_elastic_ip_addresses.py @@ -402,3 +402,84 @@ def test_eip_describe_none(): cm.exception.code.should.equal('InvalidAddress.NotFound') cm.exception.status.should.equal(400) cm.exception.request_id.should_not.be.none + + +@mock_ec2 +def test_eip_filters(): + service = boto3.resource('ec2', region_name='us-west-1') + client = boto3.client('ec2', region_name='us-west-1') + vpc_res = client.create_vpc(CidrBlock='10.0.0.0/24') + subnet_res = client.create_subnet( + VpcId=vpc_res['Vpc']['VpcId'], CidrBlock='10.0.0.0/24') + + def create_inst_with_eip(): + instance = service.create_instances(**{ + 'InstanceType': 't2.micro', + 'ImageId': 'ami-test', + 'MinCount': 1, + 'MaxCount': 1, + 'SubnetId': subnet_res['Subnet']['SubnetId'] + })[0] + allocation_id = client.allocate_address(Domain='vpc')['AllocationId'] + _ = client.associate_address( + InstanceId=instance.id, + AllocationId=allocation_id, + AllowReassociation=False) + instance.load() + address = service.VpcAddress(allocation_id) + address.load() + return instance, address + + inst1, eip1 = create_inst_with_eip() + inst2, eip2 = create_inst_with_eip() + inst3, eip3 = create_inst_with_eip() + + # Param search by AllocationId + addresses = list(service.vpc_addresses.filter(AllocationIds=[eip2.allocation_id])) + len(addresses).should.be.equal(1) + addresses[0].public_ip.should.equal(eip2.public_ip) + inst2.public_ip_address.should.equal(addresses[0].public_ip) + + # Param search by PublicIp + addresses = list(service.vpc_addresses.filter(PublicIps=[eip3.public_ip])) + len(addresses).should.be.equal(1) + addresses[0].public_ip.should.equal(eip3.public_ip) + inst3.public_ip_address.should.equal(addresses[0].public_ip) + + # Param search by Filter + def check_vpc_filter_valid(filter_name, filter_values): + addresses = list(service.vpc_addresses.filter( + Filters=[{'Name': filter_name, + 'Values': filter_values}])) + len(addresses).should.equal(2) + ips = [addr.public_ip for addr in addresses] + set(ips).should.equal(set([eip1.public_ip, eip2.public_ip])) + ips.should.contain(inst1.public_ip_address) + + def check_vpc_filter_invalid(filter_name): + addresses = list(service.vpc_addresses.filter( + Filters=[{'Name': filter_name, + 'Values': ['dummy1', 'dummy2']}])) + len(addresses).should.equal(0) + + def check_vpc_filter(filter_name, filter_values): + check_vpc_filter_valid(filter_name, filter_values) + check_vpc_filter_invalid(filter_name) + + check_vpc_filter('allocation-id', [eip1.allocation_id, eip2.allocation_id]) + check_vpc_filter('association-id', [eip1.association_id, eip2.association_id]) + check_vpc_filter('instance-id', [inst1.id, inst2.id]) + check_vpc_filter( + 'network-interface-id', + [inst1.network_interfaces_attribute[0].get('NetworkInterfaceId'), + inst2.network_interfaces_attribute[0].get('NetworkInterfaceId')]) + check_vpc_filter( + 'private-ip-address', + [inst1.network_interfaces_attribute[0].get('PrivateIpAddress'), + inst2.network_interfaces_attribute[0].get('PrivateIpAddress')]) + check_vpc_filter('public-ip', [inst1.public_ip_address, inst2.public_ip_address]) + + # all the ips are in a VPC + addresses = list(service.vpc_addresses.filter( + Filters=[{'Name': 'domain', 'Values': ['vpc']}])) + len(addresses).should.equal(3) From 0953ddde51a89ffc1342ab30548f79705f4ba854 Mon Sep 17 00:00:00 2001 From: Nuwan Goonasekera Date: Sat, 16 Sep 2017 12:26:43 +0530 Subject: [PATCH 202/412] Fix for instance public ip not being cleared on eip disassociation --- moto/ec2/models.py | 9 +-------- tests/test_ec2/test_elastic_ip_addresses.py | 18 ++++++++++++++++++ 2 files changed, 19 insertions(+), 8 deletions(-) diff --git a/moto/ec2/models.py b/moto/ec2/models.py index 21d9a1e36..545a9a3a3 100755 --- a/moto/ec2/models.py +++ b/moto/ec2/models.py @@ -590,10 +590,6 @@ class Instance(TaggedEC2Resource, BotoInstance): self.attach_eni(use_nic, device_index) - def set_ip(self, ip_address): - # Should we be creating a new ENI? - self.nics[0].public_ip = ip_address - def attach_eni(self, eni, device_index): device_index = int(device_index) self.nics[device_index] = eni @@ -3047,8 +3043,6 @@ class ElasticAddressBackend(object): eip.eni.public_ip = eip.public_ip if eip.domain == "vpc": eip.association_id = random_eip_association_id() - if instance: - instance.set_ip(eip.public_ip) return eip @@ -3082,10 +3076,9 @@ class ElasticAddressBackend(object): eip = eips[0] if eip.eni: + eip.eni.public_ip = None if eip.eni.instance and eip.eni.instance._state.name == "running": eip.eni.check_auto_public_ip() - else: - eip.eni.public_ip = None eip.eni = None eip.instance = None diff --git a/tests/test_ec2/test_elastic_ip_addresses.py b/tests/test_ec2/test_elastic_ip_addresses.py index 824c9402c..709bdc33b 100644 --- a/tests/test_ec2/test_elastic_ip_addresses.py +++ b/tests/test_ec2/test_elastic_ip_addresses.py @@ -180,13 +180,31 @@ def test_eip_boto3_vpc_association(): 'SubnetId': subnet_res['Subnet']['SubnetId'] })[0] allocation_id = client.allocate_address(Domain='vpc')['AllocationId'] + address = service.VpcAddress(allocation_id) + address.load() + address.association_id.should.be.none + address.instance_id.should.be.empty + address.network_interface_id.should.be.empty association_id = client.associate_address( InstanceId=instance.id, AllocationId=allocation_id, AllowReassociation=False) instance.load() + address.reload() + address.association_id.should_not.be.none instance.public_ip_address.should_not.be.none instance.public_dns_name.should_not.be.none + address.network_interface_id.should.equal(instance.network_interfaces_attribute[0].get('NetworkInterfaceId')) + address.public_ip.should.equal(instance.public_ip_address) + address.instance_id.should.equal(instance.id) + + client.disassociate_address(AssociationId=address.association_id) + instance.reload() + address.reload() + instance.public_ip_address.should.be.none + address.network_interface_id.should.be.empty + address.association_id.should.be.none + address.instance_id.should.be.empty @mock_ec2_deprecated From 6c7b0bdea0328f570f2f67928d231fefdb0917d2 Mon Sep 17 00:00:00 2001 From: Nuwan Goonasekera Date: Sat, 16 Sep 2017 13:56:07 +0530 Subject: [PATCH 203/412] Refactored handling of unknown filter names --- moto/ec2/exceptions.py | 17 ++++ moto/ec2/models.py | 202 +++++++++++++++-------------------------- 2 files changed, 89 insertions(+), 130 deletions(-) diff --git a/moto/ec2/exceptions.py b/moto/ec2/exceptions.py index e5432baf7..5cff527be 100644 --- a/moto/ec2/exceptions.py +++ b/moto/ec2/exceptions.py @@ -375,3 +375,20 @@ class RulesPerSecurityGroupLimitExceededError(EC2ClientError): "RulesPerSecurityGroupLimitExceeded", 'The maximum number of rules per security group ' 'has been reached.') + + +class MotoNotImplementedError(NotImplementedError): + + def __init__(self, blurb): + super(MotoNotImplementedError, self).__init__( + "{0} has not been implemented in Moto yet." + " Feel free to open an issue at" + " https://github.com/spulec/moto/issues".format(blurb)) + + +class FilterNotImplementedError(MotoNotImplementedError): + + def __init__(self, filter_name, method_name): + super(FilterNotImplementedError, self).__init__( + "The filter '{0}' for {1}".format( + filter_name, method_name)) diff --git a/moto/ec2/models.py b/moto/ec2/models.py index 21d9a1e36..4b9d40caa 100755 --- a/moto/ec2/models.py +++ b/moto/ec2/models.py @@ -62,6 +62,8 @@ from .exceptions import ( InvalidVpnConnectionIdError, InvalidCustomerGatewayIdError, RulesPerSecurityGroupLimitExceededError, + MotoNotImplementedError, + FilterNotImplementedError ) from .utils import ( EC2_RESOURCE_TO_PREFIX, @@ -144,7 +146,7 @@ class TaggedEC2Resource(BaseModel): for key, value in tag_map.items(): self.ec2_backend.create_tags([self.id], {key: value}) - def get_filter_value(self, filter_name): + def get_filter_value(self, filter_name, method_name=None): tags = self.get_tags() if filter_name.startswith('tag:'): @@ -154,12 +156,12 @@ class TaggedEC2Resource(BaseModel): return tag['value'] return '' - - if filter_name == 'tag-key': + elif filter_name == 'tag-key': return [tag['key'] for tag in tags] - - if filter_name == 'tag-value': + elif filter_name == 'tag-value': return [tag['value'] for tag in tags] + else: + raise FilterNotImplementedError(filter_name, method_name) class NetworkInterface(TaggedEC2Resource): @@ -261,17 +263,9 @@ class NetworkInterface(TaggedEC2Resource): return [group.id for group in self._group_set] elif filter_name == 'availability-zone': return self.subnet.availability_zone - - filter_value = super( - NetworkInterface, self).get_filter_value(filter_name) - - if filter_value is None: - self.ec2_backend.raise_not_implemented_error( - "The filter '{0}' for DescribeNetworkInterfaces".format( - filter_name) - ) - - return filter_value + else: + return super(NetworkInterface, self).get_filter_value( + filter_name, 'DescribeNetworkInterfaces') class NetworkInterfaceBackend(object): @@ -806,6 +800,8 @@ class KeyPair(object): return self.name elif filter_name == 'fingerprint': return self.fingerprint + else: + raise FilterNotImplementedError(filter_name, 'DescribeKeyPairs') class KeyPairBackend(object): @@ -1043,14 +1039,9 @@ class Ami(TaggedEC2Resource): return self.state elif filter_name == 'name': return self.name - - filter_value = super(Ami, self).get_filter_value(filter_name) - - if filter_value is None: - self.ec2_backend.raise_not_implemented_error( - "The filter '{0}' for DescribeImages".format(filter_name)) - - return filter_value + else: + return super(Ami, self).get_filter_value( + filter_name, 'DescribeImages') class AmiBackend(object): @@ -1707,43 +1698,31 @@ class Volume(TaggedEC2Resource): return 'available' def get_filter_value(self, filter_name): - if filter_name.startswith('attachment') and not self.attachment: return None - if filter_name == 'attachment.attach-time': + elif filter_name == 'attachment.attach-time': return self.attachment.attach_time - if filter_name == 'attachment.device': + elif filter_name == 'attachment.device': return self.attachment.device - if filter_name == 'attachment.instance-id': + elif filter_name == 'attachment.instance-id': return self.attachment.instance.id - if filter_name == 'attachment.status': + elif filter_name == 'attachment.status': return self.attachment.status - - if filter_name == 'create-time': + elif filter_name == 'create-time': return self.create_time - - if filter_name == 'size': + elif filter_name == 'size': return self.size - - if filter_name == 'snapshot-id': + elif filter_name == 'snapshot-id': return self.snapshot_id - - if filter_name == 'status': + elif filter_name == 'status': return self.status - - if filter_name == 'volume-id': + elif filter_name == 'volume-id': return self.id - - if filter_name == 'encrypted': + elif filter_name == 'encrypted': return str(self.encrypted).lower() - - filter_value = super(Volume, self).get_filter_value(filter_name) - - if filter_value is None: - self.ec2_backend.raise_not_implemented_error( - "The filter '{0}' for DescribeVolumes".format(filter_name)) - - return filter_value + else: + return super(Volume, self).get_filter_value( + filter_name, 'DescribeVolumes') class Snapshot(TaggedEC2Resource): @@ -1758,35 +1737,23 @@ class Snapshot(TaggedEC2Resource): self.encrypted = encrypted def get_filter_value(self, filter_name): - if filter_name == 'description': return self.description - - if filter_name == 'snapshot-id': + elif filter_name == 'snapshot-id': return self.id - - if filter_name == 'start-time': + elif filter_name == 'start-time': return self.start_time - - if filter_name == 'volume-id': + elif filter_name == 'volume-id': return self.volume.id - - if filter_name == 'volume-size': + elif filter_name == 'volume-size': return self.volume.size - - if filter_name == 'encrypted': + elif filter_name == 'encrypted': return str(self.encrypted).lower() - - if filter_name == 'status': + elif filter_name == 'status': return self.status - - filter_value = super(Snapshot, self).get_filter_value(filter_name) - - if filter_value is None: - self.ec2_backend.raise_not_implemented_error( - "The filter '{0}' for DescribeSnapshots".format(filter_name)) - - return filter_value + else: + return super(Snapshot, self).get_filter_value( + filter_name, 'DescribeSnapshots') class EBSBackend(object): @@ -1948,16 +1915,10 @@ class VPC(TaggedEC2Resource): elif filter_name in ('dhcp-options-id', 'dhcpOptionsId'): if not self.dhcp_options: return None - return self.dhcp_options.id - - filter_value = super(VPC, self).get_filter_value(filter_name) - - if filter_value is None: - self.ec2_backend.raise_not_implemented_error( - "The filter '{0}' for DescribeVPCs".format(filter_name)) - - return filter_value + else: + return super(VPC, self).get_filter_value( + filter_name, 'DescribeVpcs') class VPCBackend(object): @@ -2191,14 +2152,9 @@ class Subnet(TaggedEC2Resource): return self.availability_zone elif filter_name in ('defaultForAz', 'default-for-az'): return self.default_for_az - - filter_value = super(Subnet, self).get_filter_value(filter_name) - - if filter_value is None: - self.ec2_backend.raise_not_implemented_error( - "The filter '{0}' for DescribeSubnets".format(filter_name)) - - return filter_value + else: + return super(Subnet, self).get_filter_value( + filter_name, 'DescribeSubnets') def get_cfn_attribute(self, attribute_name): from moto.cloudformation.exceptions import UnformattedGetAttTemplateException @@ -2336,14 +2292,9 @@ class RouteTable(TaggedEC2Resource): return self.associations.keys() elif filter_name == "association.subnet-id": return self.associations.values() - - filter_value = super(RouteTable, self).get_filter_value(filter_name) - - if filter_value is None: - self.ec2_backend.raise_not_implemented_error( - "The filter '{0}' for DescribeRouteTables".format(filter_name)) - - return filter_value + else: + return super(RouteTable, self).get_filter_value( + filter_name, 'DescribeRouteTables') class RouteTableBackend(object): @@ -2690,16 +2641,11 @@ class SpotInstanceRequest(BotoSpotRequest, TaggedEC2Resource): def get_filter_value(self, filter_name): if filter_name == 'state': return self.state - if filter_name == 'spot-instance-request-id': + elif filter_name == 'spot-instance-request-id': return self.id - filter_value = super(SpotInstanceRequest, - self).get_filter_value(filter_name) - - if filter_value is None: - self.ec2_backend.raise_not_implemented_error( - "The filter '{0}' for DescribeSpotInstanceRequests".format(filter_name)) - - return filter_value + else: + return super(SpotInstanceRequest, self).get_filter_value( + filter_name, 'DescribeSpotInstanceRequests') def launch_instance(self): reservation = self.ec2_backend.add_instances( @@ -2970,15 +2916,13 @@ class ElasticAddress(object): return self.instance.id elif filter_name == 'network-interface-id' and self.eni: return self.eni.id - elif filter_name == 'network-interface-owner-id': - msg = "The filter '{0}' for DescribeAddresses has not been" \ - " implemented in Moto yet. Feel free to open an issue at" \ - " https://github.com/spulec/moto/issues".format(filter_name) - raise NotImplementedError(msg) elif filter_name == 'private-ip-address' and self.eni: return self.eni.private_ip_address elif filter_name == 'public-ip': return self.public_ip + else: + # TODO: implement network-interface-owner-id + raise FilterNotImplementedError(filter_name, 'DescribeAddresses') class ElasticAddressBackend(object): @@ -3141,15 +3085,9 @@ class DHCPOptionsSet(TaggedEC2Resource): elif filter_name == 'value': values = [item for item in list(self._options.values()) if item] return itertools.chain(*values) - - filter_value = super( - DHCPOptionsSet, self).get_filter_value(filter_name) - - if filter_value is None: - self.ec2_backend.raise_not_implemented_error( - "The filter '{0}' for DescribeDhcpOptions".format(filter_name)) - - return filter_value + else: + return super(DHCPOptionsSet, self).get_filter_value( + filter_name, 'DescribeDhcpOptions') @property def options(self): @@ -3236,6 +3174,10 @@ class VPNConnection(TaggedEC2Resource): self.options = None self.static_routes = None + def get_filter_value(self, filter_name): + return super(VPNConnection, self).get_filter_value( + filter_name, 'DescribeVpnConnections') + class VPNConnectionBackend(object): def __init__(self): @@ -3415,14 +3357,9 @@ class NetworkAcl(TaggedEC2Resource): return self.id elif filter_name == "association.subnet-id": return [assoc.subnet_id for assoc in self.associations.values()] - - filter_value = super(NetworkAcl, self).get_filter_value(filter_name) - - if filter_value is None: - self.ec2_backend.raise_not_implemented_error( - "The filter '{0}' for DescribeNetworkAcls".format(filter_name)) - - return filter_value + else: + return super(NetworkAcl, self).get_filter_value( + filter_name, 'DescribeNetworkAcls') class NetworkAclEntry(TaggedEC2Resource): @@ -3451,6 +3388,10 @@ class VpnGateway(TaggedEC2Resource): self.attachments = {} super(VpnGateway, self).__init__() + def get_filter_value(self, filter_name): + return super(VpnGateway, self).get_filter_value( + filter_name, 'DescribeVpnGateways') + class VpnGatewayAttachment(object): def __init__(self, vpc_id, state): @@ -3512,6 +3453,10 @@ class CustomerGateway(TaggedEC2Resource): self.attachments = {} super(CustomerGateway, self).__init__() + def get_filter_value(self, filter_name): + return super(CustomerGateway, self).get_filter_value( + filter_name, 'DescribeCustomerGateways') + class CustomerGatewayBackend(object): def __init__(self): @@ -3655,10 +3600,7 @@ class EC2Backend(BaseBackend, InstanceBackend, TagBackend, AmiBackend, raise EC2ClientError(code, message) def raise_not_implemented_error(self, blurb): - msg = "{0} has not been implemented in Moto yet." \ - " Feel free to open an issue at" \ - " https://github.com/spulec/moto/issues".format(blurb) - raise NotImplementedError(msg) + raise MotoNotImplementedError(blurb) def do_resources_exist(self, resource_ids): for resource_id in resource_ids: From 17d62d9266c29bb2db9e7282b2d15d47813f287a Mon Sep 17 00:00:00 2001 From: Nuwan Goonasekera Date: Sat, 16 Sep 2017 17:08:21 +0530 Subject: [PATCH 204/412] Replaced redundant sequence_from_querystring with inherited _get_multi_param --- moto/ec2/responses/amis.py | 12 ++-- moto/ec2/responses/dhcp_options.py | 4 +- moto/ec2/responses/elastic_ip_addresses.py | 6 +- .../responses/elastic_network_interfaces.py | 7 +- moto/ec2/responses/general.py | 4 +- moto/ec2/responses/instances.py | 26 ++++---- moto/ec2/responses/internet_gateways.py | 4 +- moto/ec2/responses/key_pairs.py | 4 +- moto/ec2/responses/network_acls.py | 5 +- moto/ec2/responses/route_tables.py | 4 +- moto/ec2/responses/tags.py | 8 +-- moto/ec2/responses/vpcs.py | 4 +- moto/ec2/responses/vpn_connections.py | 5 +- moto/ec2/utils.py | 64 ------------------- 14 files changed, 38 insertions(+), 119 deletions(-) diff --git a/moto/ec2/responses/amis.py b/moto/ec2/responses/amis.py index d9be77812..967f23e87 100755 --- a/moto/ec2/responses/amis.py +++ b/moto/ec2/responses/amis.py @@ -1,7 +1,6 @@ from __future__ import unicode_literals from moto.core.responses import BaseResponse -from moto.ec2.utils import instance_ids_from_querystring, image_ids_from_querystring, \ - filters_from_querystring, sequence_from_querystring, executable_users_from_querystring +from moto.ec2.utils import filters_from_querystring class AmisResponse(BaseResponse): @@ -12,8 +11,7 @@ class AmisResponse(BaseResponse): description = self.querystring.get('Description')[0] else: description = "" - instance_ids = instance_ids_from_querystring(self.querystring) - instance_id = instance_ids[0] + instance_id = self._get_param('InstanceId') if self.is_not_dryrun('CreateImage'): image = self.ec2_backend.create_image( instance_id, name, description) @@ -41,9 +39,9 @@ class AmisResponse(BaseResponse): return template.render(success=str(success).lower()) def describe_images(self): - ami_ids = image_ids_from_querystring(self.querystring) + ami_ids = self._get_multi_param('ImageId') filters = filters_from_querystring(self.querystring) - exec_users = executable_users_from_querystring(self.querystring) + exec_users = self._get_multi_param('ExecutableBy') images = self.ec2_backend.describe_images( ami_ids=ami_ids, filters=filters, exec_users=exec_users) template = self.response_template(DESCRIBE_IMAGES_RESPONSE) @@ -60,7 +58,7 @@ class AmisResponse(BaseResponse): ami_id = self.querystring.get('ImageId')[0] operation_type = self.querystring.get('OperationType')[0] group = self.querystring.get('UserGroup.1', [None])[0] - user_ids = sequence_from_querystring('UserId', self.querystring) + user_ids = self._get_multi_param('UserId') if self.is_not_dryrun('ModifyImageAttribute'): if (operation_type == 'add'): self.ec2_backend.add_launch_permission( diff --git a/moto/ec2/responses/dhcp_options.py b/moto/ec2/responses/dhcp_options.py index 450ef1bf9..966d2e3ac 100644 --- a/moto/ec2/responses/dhcp_options.py +++ b/moto/ec2/responses/dhcp_options.py @@ -2,7 +2,6 @@ from __future__ import unicode_literals from moto.core.responses import BaseResponse from moto.ec2.utils import ( filters_from_querystring, - sequence_from_querystring, dhcp_configuration_from_querystring) @@ -49,8 +48,7 @@ class DHCPOptions(BaseResponse): return template.render(delete_status=delete_status) def describe_dhcp_options(self): - dhcp_opt_ids = sequence_from_querystring( - "DhcpOptionsId", self.querystring) + dhcp_opt_ids = self._get_multi_param("DhcpOptionsId") filters = filters_from_querystring(self.querystring) dhcp_opts = self.ec2_backend.get_all_dhcp_options( dhcp_opt_ids, filters) diff --git a/moto/ec2/responses/elastic_ip_addresses.py b/moto/ec2/responses/elastic_ip_addresses.py index 137f12ec6..1a60efcde 100644 --- a/moto/ec2/responses/elastic_ip_addresses.py +++ b/moto/ec2/responses/elastic_ip_addresses.py @@ -1,6 +1,6 @@ from __future__ import unicode_literals from moto.core.responses import BaseResponse -from moto.ec2.utils import filters_from_querystring, sequence_from_querystring +from moto.ec2.utils import filters_from_querystring class ElasticIPAddresses(BaseResponse): @@ -51,8 +51,8 @@ class ElasticIPAddresses(BaseResponse): return template.render(address=eip) def describe_addresses(self): - allocation_ids = sequence_from_querystring('AllocationId', self.querystring) - public_ips = sequence_from_querystring('PublicIp', self.querystring) + allocation_ids = self._get_multi_param('AllocationId') + public_ips = self._get_multi_param('PublicIp') filters = filters_from_querystring(self.querystring) addresses = self.ec2_backend.describe_addresses( allocation_ids, public_ips, filters) diff --git a/moto/ec2/responses/elastic_network_interfaces.py b/moto/ec2/responses/elastic_network_interfaces.py index cbe76e306..ec99f2da9 100644 --- a/moto/ec2/responses/elastic_network_interfaces.py +++ b/moto/ec2/responses/elastic_network_interfaces.py @@ -1,6 +1,6 @@ from __future__ import unicode_literals from moto.core.responses import BaseResponse -from moto.ec2.utils import sequence_from_querystring, filters_from_querystring +from moto.ec2.utils import filters_from_querystring class ElasticNetworkInterfaces(BaseResponse): @@ -9,7 +9,7 @@ class ElasticNetworkInterfaces(BaseResponse): subnet_id = self.querystring.get('SubnetId')[0] private_ip_address = self.querystring.get( 'PrivateIpAddress', [None])[0] - groups = sequence_from_querystring('SecurityGroupId', self.querystring) + groups = self._get_multi_param('SecurityGroupId') subnet = self.ec2_backend.get_subnet(subnet_id) if self.is_not_dryrun('CreateNetworkInterface'): eni = self.ec2_backend.create_network_interface( @@ -31,8 +31,7 @@ class ElasticNetworkInterfaces(BaseResponse): 'ElasticNetworkInterfaces(AmazonVPC).describe_network_interface_attribute is not yet implemented') def describe_network_interfaces(self): - eni_ids = sequence_from_querystring( - 'NetworkInterfaceId', self.querystring) + eni_ids = self._get_multi_param('NetworkInterfaceId') filters = filters_from_querystring(self.querystring) enis = self.ec2_backend.get_all_network_interfaces(eni_ids, filters) template = self.response_template(DESCRIBE_NETWORK_INTERFACES_RESPONSE) diff --git a/moto/ec2/responses/general.py b/moto/ec2/responses/general.py index bd95c1975..9add43d3e 100644 --- a/moto/ec2/responses/general.py +++ b/moto/ec2/responses/general.py @@ -1,13 +1,11 @@ from __future__ import unicode_literals from moto.core.responses import BaseResponse -from moto.ec2.utils import instance_ids_from_querystring class General(BaseResponse): def get_console_output(self): - self.instance_ids = instance_ids_from_querystring(self.querystring) - instance_id = self.instance_ids[0] + instance_id = self._get_multi_param('InstanceId')[0] instance = self.ec2_backend.get_instance(instance_id) template = self.response_template(GET_CONSOLE_OUTPUT_RESULT) return template.render(instance=instance) diff --git a/moto/ec2/responses/instances.py b/moto/ec2/responses/instances.py index 20c04668e..b4bf531a8 100644 --- a/moto/ec2/responses/instances.py +++ b/moto/ec2/responses/instances.py @@ -2,7 +2,7 @@ from __future__ import unicode_literals from boto.ec2.instancetype import InstanceType from moto.core.responses import BaseResponse from moto.core.utils import camelcase_to_underscores -from moto.ec2.utils import instance_ids_from_querystring, filters_from_querystring, \ +from moto.ec2.utils import filters_from_querystring, \ dict_from_querystring, optional_from_querystring @@ -10,7 +10,7 @@ class InstanceResponse(BaseResponse): def describe_instances(self): filter_dict = filters_from_querystring(self.querystring) - instance_ids = instance_ids_from_querystring(self.querystring) + instance_ids = self._get_multi_param('InstanceId') token = self._get_param("NextToken") if instance_ids: reservations = self.ec2_backend.get_reservations_by_instance_ids( @@ -62,35 +62,35 @@ class InstanceResponse(BaseResponse): return template.render(reservation=new_reservation) def terminate_instances(self): - instance_ids = instance_ids_from_querystring(self.querystring) + instance_ids = self._get_multi_param('InstanceId') if self.is_not_dryrun('TerminateInstance'): instances = self.ec2_backend.terminate_instances(instance_ids) template = self.response_template(EC2_TERMINATE_INSTANCES) return template.render(instances=instances) def reboot_instances(self): - instance_ids = instance_ids_from_querystring(self.querystring) + instance_ids = self._get_multi_param('InstanceId') if self.is_not_dryrun('RebootInstance'): instances = self.ec2_backend.reboot_instances(instance_ids) template = self.response_template(EC2_REBOOT_INSTANCES) return template.render(instances=instances) def stop_instances(self): - instance_ids = instance_ids_from_querystring(self.querystring) + instance_ids = self._get_multi_param('InstanceId') if self.is_not_dryrun('StopInstance'): instances = self.ec2_backend.stop_instances(instance_ids) template = self.response_template(EC2_STOP_INSTANCES) return template.render(instances=instances) def start_instances(self): - instance_ids = instance_ids_from_querystring(self.querystring) + instance_ids = self._get_multi_param('InstanceId') if self.is_not_dryrun('StartInstance'): instances = self.ec2_backend.start_instances(instance_ids) template = self.response_template(EC2_START_INSTANCES) return template.render(instances=instances) def describe_instance_status(self): - instance_ids = instance_ids_from_querystring(self.querystring) + instance_ids = self._get_multi_param('InstanceId') include_all_instances = optional_from_querystring('IncludeAllInstances', self.querystring) == 'true' @@ -116,8 +116,7 @@ class InstanceResponse(BaseResponse): # instance not in stopped state attribute = self.querystring.get("Attribute")[0] key = camelcase_to_underscores(attribute) - instance_ids = instance_ids_from_querystring(self.querystring) - instance_id = instance_ids[0] + instance_id = self._get_param('InstanceId') instance, value = self.ec2_backend.describe_instance_attribute( instance_id, key) @@ -171,8 +170,7 @@ class InstanceResponse(BaseResponse): del_on_term_value = True if 'true' == del_on_term_value_str else False device_name_value = self.querystring[mapping_device_name][0] - instance_ids = instance_ids_from_querystring(self.querystring) - instance_id = instance_ids[0] + instance_id = self._get_param('InstanceId') instance = self.ec2_backend.get_instance(instance_id) if self.is_not_dryrun('ModifyInstanceAttribute'): @@ -200,8 +198,7 @@ class InstanceResponse(BaseResponse): value = self.querystring.get(attribute_key)[0] normalized_attribute = camelcase_to_underscores( attribute_key.split(".")[0]) - instance_ids = instance_ids_from_querystring(self.querystring) - instance_id = instance_ids[0] + instance_id = self._get_param('InstanceId') self.ec2_backend.modify_instance_attribute( instance_id, normalized_attribute, value) return EC2_MODIFY_INSTANCE_ATTRIBUTE @@ -212,8 +209,7 @@ class InstanceResponse(BaseResponse): if 'GroupId.' in key: new_security_grp_list.append(self.querystring.get(key)[0]) - instance_ids = instance_ids_from_querystring(self.querystring) - instance_id = instance_ids[0] + instance_id = self._get_param('InstanceId') if self.is_not_dryrun('ModifyInstanceSecurityGroups'): self.ec2_backend.modify_instance_security_groups( instance_id, new_security_grp_list) diff --git a/moto/ec2/responses/internet_gateways.py b/moto/ec2/responses/internet_gateways.py index 4a3da0b34..d6a0b19de 100644 --- a/moto/ec2/responses/internet_gateways.py +++ b/moto/ec2/responses/internet_gateways.py @@ -1,7 +1,6 @@ from __future__ import unicode_literals from moto.core.responses import BaseResponse from moto.ec2.utils import ( - sequence_from_querystring, filters_from_querystring, ) @@ -32,8 +31,7 @@ class InternetGateways(BaseResponse): def describe_internet_gateways(self): filter_dict = filters_from_querystring(self.querystring) if "InternetGatewayId.1" in self.querystring: - igw_ids = sequence_from_querystring( - "InternetGatewayId", self.querystring) + igw_ids = self._get_multi_param("InternetGatewayId") igws = self.ec2_backend.describe_internet_gateways( igw_ids, filters=filter_dict) else: diff --git a/moto/ec2/responses/key_pairs.py b/moto/ec2/responses/key_pairs.py index 59268556e..2dab918a7 100644 --- a/moto/ec2/responses/key_pairs.py +++ b/moto/ec2/responses/key_pairs.py @@ -1,7 +1,7 @@ from __future__ import unicode_literals import six from moto.core.responses import BaseResponse -from moto.ec2.utils import keypair_names_from_querystring, filters_from_querystring +from moto.ec2.utils import filters_from_querystring class KeyPairs(BaseResponse): @@ -21,7 +21,7 @@ class KeyPairs(BaseResponse): return self.response_template(DELETE_KEY_PAIR_RESPONSE).render(success=success) def describe_key_pairs(self): - names = keypair_names_from_querystring(self.querystring) + names = self._get_multi_param('KeyName') filters = filters_from_querystring(self.querystring) keypairs = self.ec2_backend.describe_key_pairs(names, filters) template = self.response_template(DESCRIBE_KEY_PAIRS_RESPONSE) diff --git a/moto/ec2/responses/network_acls.py b/moto/ec2/responses/network_acls.py index 440069edc..a0598c73b 100644 --- a/moto/ec2/responses/network_acls.py +++ b/moto/ec2/responses/network_acls.py @@ -1,7 +1,6 @@ from __future__ import unicode_literals from moto.core.responses import BaseResponse -from moto.ec2.utils import filters_from_querystring, \ - network_acl_ids_from_querystring +from moto.ec2.utils import filters_from_querystring class NetworkACLs(BaseResponse): @@ -67,7 +66,7 @@ class NetworkACLs(BaseResponse): return template.render() def describe_network_acls(self): - network_acl_ids = network_acl_ids_from_querystring(self.querystring) + network_acl_ids = self._get_multi_param('NetworkAclId') filters = filters_from_querystring(self.querystring) network_acls = self.ec2_backend.get_all_network_acls( network_acl_ids, filters) diff --git a/moto/ec2/responses/route_tables.py b/moto/ec2/responses/route_tables.py index 6f68a6553..30eef8159 100644 --- a/moto/ec2/responses/route_tables.py +++ b/moto/ec2/responses/route_tables.py @@ -1,6 +1,6 @@ from __future__ import unicode_literals from moto.core.responses import BaseResponse -from moto.ec2.utils import route_table_ids_from_querystring, filters_from_querystring, optional_from_querystring +from moto.ec2.utils import filters_from_querystring, optional_from_querystring class RouteTables(BaseResponse): @@ -55,7 +55,7 @@ class RouteTables(BaseResponse): return template.render() def describe_route_tables(self): - route_table_ids = route_table_ids_from_querystring(self.querystring) + route_table_ids = self._get_multi_param('RouteTableId') filters = filters_from_querystring(self.querystring) route_tables = self.ec2_backend.get_all_route_tables( route_table_ids, filters) diff --git a/moto/ec2/responses/tags.py b/moto/ec2/responses/tags.py index a747067fb..65d3da255 100644 --- a/moto/ec2/responses/tags.py +++ b/moto/ec2/responses/tags.py @@ -2,14 +2,13 @@ from __future__ import unicode_literals from moto.core.responses import BaseResponse from moto.ec2.models import validate_resource_ids -from moto.ec2.utils import sequence_from_querystring, tags_from_query_string, filters_from_querystring +from moto.ec2.utils import tags_from_query_string, filters_from_querystring class TagResponse(BaseResponse): def create_tags(self): - resource_ids = sequence_from_querystring( - 'ResourceId', self.querystring) + resource_ids = self._get_multi_param('ResourceId') validate_resource_ids(resource_ids) self.ec2_backend.do_resources_exist(resource_ids) tags = tags_from_query_string(self.querystring) @@ -18,8 +17,7 @@ class TagResponse(BaseResponse): return CREATE_RESPONSE def delete_tags(self): - resource_ids = sequence_from_querystring( - 'ResourceId', self.querystring) + resource_ids = self._get_multi_param('ResourceId') validate_resource_ids(resource_ids) tags = tags_from_query_string(self.querystring) if self.is_not_dryrun('DeleteTags'): diff --git a/moto/ec2/responses/vpcs.py b/moto/ec2/responses/vpcs.py index 129f91a3b..17309fe05 100644 --- a/moto/ec2/responses/vpcs.py +++ b/moto/ec2/responses/vpcs.py @@ -1,7 +1,7 @@ from __future__ import unicode_literals from moto.core.responses import BaseResponse from moto.core.utils import camelcase_to_underscores -from moto.ec2.utils import filters_from_querystring, vpc_ids_from_querystring +from moto.ec2.utils import filters_from_querystring class VPCs(BaseResponse): @@ -21,7 +21,7 @@ class VPCs(BaseResponse): return template.render(vpc=vpc) def describe_vpcs(self): - vpc_ids = vpc_ids_from_querystring(self.querystring) + vpc_ids = self._get_multi_param('VpcId') filters = filters_from_querystring(self.querystring) vpcs = self.ec2_backend.get_all_vpcs(vpc_ids=vpc_ids, filters=filters) template = self.response_template(DESCRIBE_VPCS_RESPONSE) diff --git a/moto/ec2/responses/vpn_connections.py b/moto/ec2/responses/vpn_connections.py index 2a4a7ef99..e24515ae6 100644 --- a/moto/ec2/responses/vpn_connections.py +++ b/moto/ec2/responses/vpn_connections.py @@ -1,6 +1,6 @@ from __future__ import unicode_literals from moto.core.responses import BaseResponse -from moto.ec2.utils import filters_from_querystring, sequence_from_querystring +from moto.ec2.utils import filters_from_querystring class VPNConnections(BaseResponse): @@ -23,8 +23,7 @@ class VPNConnections(BaseResponse): return template.render(vpn_connection=vpn_connection) def describe_vpn_connections(self): - vpn_connection_ids = sequence_from_querystring( - 'VpnConnectionId', self.querystring) + vpn_connection_ids = self._get_multi_param('VpnConnectionId') filters = filters_from_querystring(self.querystring) vpn_connections = self.ec2_backend.get_all_vpn_connections( vpn_connection_ids=vpn_connection_ids, filters=filters) diff --git a/moto/ec2/utils.py b/moto/ec2/utils.py index 8f86d0a8d..543ac4a99 100644 --- a/moto/ec2/utils.py +++ b/moto/ec2/utils.py @@ -174,62 +174,6 @@ def split_route_id(route_id): return values[0], values[1] -def instance_ids_from_querystring(querystring_dict): - instance_ids = [] - for key, value in querystring_dict.items(): - if 'InstanceId' in key: - instance_ids.append(value[0]) - return instance_ids - - -def image_ids_from_querystring(querystring_dict): - image_ids = [] - for key, value in querystring_dict.items(): - if 'ImageId' in key: - image_ids.append(value[0]) - return image_ids - - -def executable_users_from_querystring(querystring_dict): - user_ids = [] - for key, value in querystring_dict.items(): - if 'ExecutableBy' in key: - user_ids.append(value[0]) - return user_ids - - -def route_table_ids_from_querystring(querystring_dict): - route_table_ids = [] - for key, value in querystring_dict.items(): - if 'RouteTableId' in key: - route_table_ids.append(value[0]) - return route_table_ids - - -def network_acl_ids_from_querystring(querystring_dict): - network_acl_ids = [] - for key, value in querystring_dict.items(): - if 'NetworkAclId' in key: - network_acl_ids.append(value[0]) - return network_acl_ids - - -def vpc_ids_from_querystring(querystring_dict): - vpc_ids = [] - for key, value in querystring_dict.items(): - if 'VpcId' in key: - vpc_ids.append(value[0]) - return vpc_ids - - -def sequence_from_querystring(parameter, querystring_dict): - parameter_values = [] - for key, value in querystring_dict.items(): - if parameter in key: - parameter_values.append(value[0]) - return parameter_values - - def tags_from_query_string(querystring_dict): prefix = 'Tag' suffix = 'Key' @@ -319,14 +263,6 @@ def dict_from_querystring(parameter, querystring_dict): return use_dict -def keypair_names_from_querystring(querystring_dict): - keypair_names = [] - for key, value in querystring_dict.items(): - if 'KeyName' in key: - keypair_names.append(value[0]) - return keypair_names - - def get_object_value(obj, attr): keys = attr.split('.') val = obj From 7ed1036ba8dcf24c554ac8e51119c0e87d71dd75 Mon Sep 17 00:00:00 2001 From: Nuwan Goonasekera Date: Sat, 16 Sep 2017 18:31:30 +0530 Subject: [PATCH 205/412] Replaced direct querystring access with inherited _get_param --- moto/ec2/responses/amis.py | 25 ++-- moto/ec2/responses/customer_gateways.py | 8 +- moto/ec2/responses/dhcp_options.py | 6 +- moto/ec2/responses/elastic_block_store.py | 48 ++++---- moto/ec2/responses/elastic_ip_addresses.py | 29 +++-- .../responses/elastic_network_interfaces.py | 19 ++- moto/ec2/responses/instances.py | 27 ++--- moto/ec2/responses/internet_gateways.py | 10 +- moto/ec2/responses/key_pairs.py | 8 +- moto/ec2/responses/network_acls.py | 54 ++++----- moto/ec2/responses/route_tables.py | 53 ++++----- moto/ec2/responses/security_groups.py | 109 +++++++++--------- moto/ec2/responses/subnets.py | 30 ++--- .../ec2/responses/virtual_private_gateways.py | 12 +- moto/ec2/responses/vpc_peering_connections.py | 11 +- moto/ec2/responses/vpcs.py | 13 +-- moto/ec2/responses/vpn_connections.py | 10 +- moto/ec2/utils.py | 5 - 18 files changed, 215 insertions(+), 262 deletions(-) diff --git a/moto/ec2/responses/amis.py b/moto/ec2/responses/amis.py index 967f23e87..c92471093 100755 --- a/moto/ec2/responses/amis.py +++ b/moto/ec2/responses/amis.py @@ -7,10 +7,7 @@ class AmisResponse(BaseResponse): def create_image(self): name = self.querystring.get('Name')[0] - if "Description" in self.querystring: - description = self.querystring.get('Description')[0] - else: - description = "" + description = self._get_param('Description', if_none='') instance_id = self._get_param('InstanceId') if self.is_not_dryrun('CreateImage'): image = self.ec2_backend.create_image( @@ -19,12 +16,10 @@ class AmisResponse(BaseResponse): return template.render(image=image) def copy_image(self): - source_image_id = self.querystring.get('SourceImageId')[0] - source_region = self.querystring.get('SourceRegion')[0] - name = self.querystring.get( - 'Name')[0] if self.querystring.get('Name') else None - description = self.querystring.get( - 'Description')[0] if self.querystring.get('Description') else None + source_image_id = self._get_param('SourceImageId') + source_region = self._get_param('SourceRegion') + name = self._get_param('Name') + description = self._get_param('Description') if self.is_not_dryrun('CopyImage'): image = self.ec2_backend.copy_image( source_image_id, source_region, name, description) @@ -32,7 +27,7 @@ class AmisResponse(BaseResponse): return template.render(image=image) def deregister_image(self): - ami_id = self.querystring.get('ImageId')[0] + ami_id = self._get_param('ImageId') if self.is_not_dryrun('DeregisterImage'): success = self.ec2_backend.deregister_image(ami_id) template = self.response_template(DEREGISTER_IMAGE_RESPONSE) @@ -48,16 +43,16 @@ class AmisResponse(BaseResponse): return template.render(images=images) def describe_image_attribute(self): - ami_id = self.querystring.get('ImageId')[0] + ami_id = self._get_param('ImageId') groups = self.ec2_backend.get_launch_permission_groups(ami_id) users = self.ec2_backend.get_launch_permission_users(ami_id) template = self.response_template(DESCRIBE_IMAGE_ATTRIBUTES_RESPONSE) return template.render(ami_id=ami_id, groups=groups, users=users) def modify_image_attribute(self): - ami_id = self.querystring.get('ImageId')[0] - operation_type = self.querystring.get('OperationType')[0] - group = self.querystring.get('UserGroup.1', [None])[0] + ami_id = self._get_param('ImageId') + operation_type = self._get_param('OperationType') + group = self._get_param('UserGroup.1') user_ids = self._get_multi_param('UserId') if self.is_not_dryrun('ModifyImageAttribute'): if (operation_type == 'add'): diff --git a/moto/ec2/responses/customer_gateways.py b/moto/ec2/responses/customer_gateways.py index 6da2ed2f8..866b93045 100644 --- a/moto/ec2/responses/customer_gateways.py +++ b/moto/ec2/responses/customer_gateways.py @@ -7,16 +7,16 @@ class CustomerGateways(BaseResponse): def create_customer_gateway(self): # raise NotImplementedError('CustomerGateways(AmazonVPC).create_customer_gateway is not yet implemented') - type = self.querystring.get('Type', None)[0] - ip_address = self.querystring.get('IpAddress', None)[0] - bgp_asn = self.querystring.get('BgpAsn', None)[0] + type = self._get_param('Type') + ip_address = self._get_param('IpAddress') + bgp_asn = self._get_param('BgpAsn') customer_gateway = self.ec2_backend.create_customer_gateway( type, ip_address=ip_address, bgp_asn=bgp_asn) template = self.response_template(CREATE_CUSTOMER_GATEWAY_RESPONSE) return template.render(customer_gateway=customer_gateway) def delete_customer_gateway(self): - customer_gateway_id = self.querystring.get('CustomerGatewayId')[0] + customer_gateway_id = self._get_param('CustomerGatewayId') delete_status = self.ec2_backend.delete_customer_gateway( customer_gateway_id) template = self.response_template(DELETE_CUSTOMER_GATEWAY_RESPONSE) diff --git a/moto/ec2/responses/dhcp_options.py b/moto/ec2/responses/dhcp_options.py index 966d2e3ac..1f740d14b 100644 --- a/moto/ec2/responses/dhcp_options.py +++ b/moto/ec2/responses/dhcp_options.py @@ -8,8 +8,8 @@ from moto.ec2.utils import ( class DHCPOptions(BaseResponse): def associate_dhcp_options(self): - dhcp_opt_id = self.querystring.get("DhcpOptionsId", [None])[0] - vpc_id = self.querystring.get("VpcId", [None])[0] + dhcp_opt_id = self._get_param('DhcpOptionsId') + vpc_id = self._get_param('VpcId') dhcp_opt = self.ec2_backend.describe_dhcp_options([dhcp_opt_id])[0] vpc = self.ec2_backend.get_vpc(vpc_id) @@ -42,7 +42,7 @@ class DHCPOptions(BaseResponse): return template.render(dhcp_options_set=dhcp_options_set) def delete_dhcp_options(self): - dhcp_opt_id = self.querystring.get("DhcpOptionsId", [None])[0] + dhcp_opt_id = self._get_param('DhcpOptionsId') delete_status = self.ec2_backend.delete_dhcp_options_set(dhcp_opt_id) template = self.response_template(DELETE_DHCP_OPTIONS_RESPONSE) return template.render(delete_status=delete_status) diff --git a/moto/ec2/responses/elastic_block_store.py b/moto/ec2/responses/elastic_block_store.py index 8dfd9229c..8f12dc918 100644 --- a/moto/ec2/responses/elastic_block_store.py +++ b/moto/ec2/responses/elastic_block_store.py @@ -6,9 +6,9 @@ from moto.ec2.utils import filters_from_querystring class ElasticBlockStore(BaseResponse): def attach_volume(self): - volume_id = self.querystring.get('VolumeId')[0] - instance_id = self.querystring.get('InstanceId')[0] - device_path = self.querystring.get('Device')[0] + volume_id = self._get_param('VolumeId') + instance_id = self._get_param('InstanceId') + device_path = self._get_param('Device') if self.is_not_dryrun('AttachVolume'): attachment = self.ec2_backend.attach_volume( volume_id, instance_id, device_path) @@ -21,18 +21,18 @@ class ElasticBlockStore(BaseResponse): 'ElasticBlockStore.copy_snapshot is not yet implemented') def create_snapshot(self): - description = self.querystring.get('Description', [None])[0] - volume_id = self.querystring.get('VolumeId')[0] + volume_id = self._get_param('VolumeId') + description = self._get_param('Description') if self.is_not_dryrun('CreateSnapshot'): snapshot = self.ec2_backend.create_snapshot(volume_id, description) template = self.response_template(CREATE_SNAPSHOT_RESPONSE) return template.render(snapshot=snapshot) def create_volume(self): - size = self.querystring.get('Size', [None])[0] - zone = self.querystring.get('AvailabilityZone', [None])[0] - snapshot_id = self.querystring.get('SnapshotId', [None])[0] - encrypted = self.querystring.get('Encrypted', ['false'])[0] + size = self._get_param('Size') + zone = self._get_param('AvailabilityZone') + snapshot_id = self._get_param('SnapshotId') + encrypted = self._get_param('Encrypted', if_none=False) if self.is_not_dryrun('CreateVolume'): volume = self.ec2_backend.create_volume( size, zone, snapshot_id, encrypted) @@ -40,23 +40,20 @@ class ElasticBlockStore(BaseResponse): return template.render(volume=volume) def delete_snapshot(self): - snapshot_id = self.querystring.get('SnapshotId')[0] + snapshot_id = self._get_param('SnapshotId') if self.is_not_dryrun('DeleteSnapshot'): self.ec2_backend.delete_snapshot(snapshot_id) return DELETE_SNAPSHOT_RESPONSE def delete_volume(self): - volume_id = self.querystring.get('VolumeId')[0] + volume_id = self._get_param('VolumeId') if self.is_not_dryrun('DeleteVolume'): self.ec2_backend.delete_volume(volume_id) return DELETE_VOLUME_RESPONSE def describe_snapshots(self): filters = filters_from_querystring(self.querystring) - # querystring for multiple snapshotids results in SnapshotId.1, - # SnapshotId.2 etc - snapshot_ids = ','.join( - [','.join(s[1]) for s in self.querystring.items() if 'SnapshotId' in s[0]]) + snapshot_ids = self._get_multi_param('SnapshotId') snapshots = self.ec2_backend.describe_snapshots(filters=filters) # Describe snapshots to handle filter on snapshot_ids snapshots = [ @@ -66,10 +63,7 @@ class ElasticBlockStore(BaseResponse): def describe_volumes(self): filters = filters_from_querystring(self.querystring) - # querystring for multiple volumeids results in VolumeId.1, VolumeId.2 - # etc - volume_ids = ','.join( - [','.join(v[1]) for v in self.querystring.items() if 'VolumeId' in v[0]]) + volume_ids = self._get_multi_param('VolumeId') volumes = self.ec2_backend.describe_volumes(filters=filters) # Describe volumes to handle filter on volume_ids volumes = [ @@ -86,9 +80,9 @@ class ElasticBlockStore(BaseResponse): 'ElasticBlockStore.describe_volume_status is not yet implemented') def detach_volume(self): - volume_id = self.querystring.get('VolumeId')[0] - instance_id = self.querystring.get('InstanceId')[0] - device_path = self.querystring.get('Device')[0] + volume_id = self._get_param('VolumeId') + instance_id = self._get_param('InstanceId') + device_path = self._get_param('Device') if self.is_not_dryrun('DetachVolume'): attachment = self.ec2_backend.detach_volume( volume_id, instance_id, device_path) @@ -106,7 +100,7 @@ class ElasticBlockStore(BaseResponse): 'ElasticBlockStore.import_volume is not yet implemented') def describe_snapshot_attribute(self): - snapshot_id = self.querystring.get('SnapshotId')[0] + snapshot_id = self._get_param('SnapshotId') groups = self.ec2_backend.get_create_volume_permission_groups( snapshot_id) template = self.response_template( @@ -114,10 +108,10 @@ class ElasticBlockStore(BaseResponse): return template.render(snapshot_id=snapshot_id, groups=groups) def modify_snapshot_attribute(self): - snapshot_id = self.querystring.get('SnapshotId')[0] - operation_type = self.querystring.get('OperationType')[0] - group = self.querystring.get('UserGroup.1', [None])[0] - user_id = self.querystring.get('UserId.1', [None])[0] + snapshot_id = self._get_param('SnapshotId') + operation_type = self._get_param('OperationType') + group = self._get_param('UserGroup.1') + user_id = self._get_param('UserId.1') if self.is_not_dryrun('ModifySnapshotAttribute'): if (operation_type == 'add'): self.ec2_backend.add_create_volume_permission( diff --git a/moto/ec2/responses/elastic_ip_addresses.py b/moto/ec2/responses/elastic_ip_addresses.py index 1a60efcde..11c1d9c1f 100644 --- a/moto/ec2/responses/elastic_ip_addresses.py +++ b/moto/ec2/responses/elastic_ip_addresses.py @@ -6,10 +6,7 @@ from moto.ec2.utils import filters_from_querystring class ElasticIPAddresses(BaseResponse): def allocate_address(self): - if "Domain" in self.querystring: - domain = self.querystring.get('Domain')[0] - else: - domain = "standard" + domain = self._get_param('Domain', if_none='standard') if self.is_not_dryrun('AllocateAddress'): address = self.ec2_backend.allocate_address(domain) template = self.response_template(ALLOCATE_ADDRESS_RESPONSE) @@ -20,26 +17,28 @@ class ElasticIPAddresses(BaseResponse): if "InstanceId" in self.querystring: instance = self.ec2_backend.get_instance( - self.querystring['InstanceId'][0]) + self._get_param('InstanceId')) elif "NetworkInterfaceId" in self.querystring: eni = self.ec2_backend.get_network_interface( - self.querystring['NetworkInterfaceId'][0]) + self._get_param('NetworkInterfaceId')) else: self.ec2_backend.raise_error( "MissingParameter", "Invalid request, expect InstanceId/NetworkId parameter.") reassociate = False if "AllowReassociation" in self.querystring: - reassociate = self.querystring['AllowReassociation'][0] == "true" + reassociate = self._get_param('AllowReassociation') == "true" if self.is_not_dryrun('AssociateAddress'): if instance or eni: if "PublicIp" in self.querystring: - eip = self.ec2_backend.associate_address(instance=instance, eni=eni, address=self.querystring[ - 'PublicIp'][0], reassociate=reassociate) + eip = self.ec2_backend.associate_address( + instance=instance, eni=eni, + address=self._get_param('PublicIp'), reassociate=reassociate) elif "AllocationId" in self.querystring: - eip = self.ec2_backend.associate_address(instance=instance, eni=eni, allocation_id=self.querystring[ - 'AllocationId'][0], reassociate=reassociate) + eip = self.ec2_backend.associate_address( + instance=instance, eni=eni, + allocation_id=self._get_param('AllocationId'), reassociate=reassociate) else: self.ec2_backend.raise_error( "MissingParameter", "Invalid request, expect PublicIp/AllocationId parameter.") @@ -63,10 +62,10 @@ class ElasticIPAddresses(BaseResponse): if self.is_not_dryrun('DisAssociateAddress'): if "PublicIp" in self.querystring: self.ec2_backend.disassociate_address( - address=self.querystring['PublicIp'][0]) + address=self._get_param('PublicIp')) elif "AssociationId" in self.querystring: self.ec2_backend.disassociate_address( - association_id=self.querystring['AssociationId'][0]) + association_id=self._get_param('AssociationId')) else: self.ec2_backend.raise_error( "MissingParameter", "Invalid request, expect PublicIp/AssociationId parameter.") @@ -77,10 +76,10 @@ class ElasticIPAddresses(BaseResponse): if self.is_not_dryrun('ReleaseAddress'): if "PublicIp" in self.querystring: self.ec2_backend.release_address( - address=self.querystring['PublicIp'][0]) + address=self._get_param('PublicIp')) elif "AllocationId" in self.querystring: self.ec2_backend.release_address( - allocation_id=self.querystring['AllocationId'][0]) + allocation_id=self._get_param('AllocationId')) else: self.ec2_backend.raise_error( "MissingParameter", "Invalid request, expect PublicIp/AllocationId parameter.") diff --git a/moto/ec2/responses/elastic_network_interfaces.py b/moto/ec2/responses/elastic_network_interfaces.py index ec99f2da9..dc8b92df8 100644 --- a/moto/ec2/responses/elastic_network_interfaces.py +++ b/moto/ec2/responses/elastic_network_interfaces.py @@ -6,9 +6,8 @@ from moto.ec2.utils import filters_from_querystring class ElasticNetworkInterfaces(BaseResponse): def create_network_interface(self): - subnet_id = self.querystring.get('SubnetId')[0] - private_ip_address = self.querystring.get( - 'PrivateIpAddress', [None])[0] + subnet_id = self._get_param('SubnetId') + private_ip_address = self._get_param('PrivateIpAddress') groups = self._get_multi_param('SecurityGroupId') subnet = self.ec2_backend.get_subnet(subnet_id) if self.is_not_dryrun('CreateNetworkInterface'): @@ -19,7 +18,7 @@ class ElasticNetworkInterfaces(BaseResponse): return template.render(eni=eni) def delete_network_interface(self): - eni_id = self.querystring.get('NetworkInterfaceId')[0] + eni_id = self._get_param('NetworkInterfaceId') if self.is_not_dryrun('DeleteNetworkInterface'): self.ec2_backend.delete_network_interface(eni_id) template = self.response_template( @@ -38,9 +37,9 @@ class ElasticNetworkInterfaces(BaseResponse): return template.render(enis=enis) def attach_network_interface(self): - eni_id = self.querystring.get('NetworkInterfaceId')[0] - instance_id = self.querystring.get('InstanceId')[0] - device_index = self.querystring.get('DeviceIndex')[0] + eni_id = self._get_param('NetworkInterfaceId') + instance_id = self._get_param('InstanceId') + device_index = self._get_param('DeviceIndex') if self.is_not_dryrun('AttachNetworkInterface'): attachment_id = self.ec2_backend.attach_network_interface( eni_id, instance_id, device_index) @@ -49,7 +48,7 @@ class ElasticNetworkInterfaces(BaseResponse): return template.render(attachment_id=attachment_id) def detach_network_interface(self): - attachment_id = self.querystring.get('AttachmentId')[0] + attachment_id = self._get_param('AttachmentId') if self.is_not_dryrun('DetachNetworkInterface'): self.ec2_backend.detach_network_interface(attachment_id) template = self.response_template( @@ -58,8 +57,8 @@ class ElasticNetworkInterfaces(BaseResponse): def modify_network_interface_attribute(self): # Currently supports modifying one and only one security group - eni_id = self.querystring.get('NetworkInterfaceId')[0] - group_id = self.querystring.get('SecurityGroupId.1')[0] + eni_id = self._get_param('NetworkInterfaceId') + group_id = self._get_param('SecurityGroupId.1') if self.is_not_dryrun('ModifyNetworkInterface'): self.ec2_backend.modify_network_interface_attribute( eni_id, group_id) diff --git a/moto/ec2/responses/instances.py b/moto/ec2/responses/instances.py index b4bf531a8..532d703c9 100644 --- a/moto/ec2/responses/instances.py +++ b/moto/ec2/responses/instances.py @@ -3,7 +3,7 @@ from boto.ec2.instancetype import InstanceType from moto.core.responses import BaseResponse from moto.core.utils import camelcase_to_underscores from moto.ec2.utils import filters_from_querystring, \ - dict_from_querystring, optional_from_querystring + dict_from_querystring class InstanceResponse(BaseResponse): @@ -33,20 +33,18 @@ class InstanceResponse(BaseResponse): return template.render(reservations=reservations_resp, next_token=next_token) def run_instances(self): - min_count = int(self.querystring.get('MinCount', ['1'])[0]) - image_id = self.querystring.get('ImageId')[0] - user_data = self.querystring.get('UserData') + min_count = int(self._get_param('MinCount', if_none='1')) + image_id = self._get_param('ImageId') + user_data = self._get_param('UserData') security_group_names = self._get_multi_param('SecurityGroup') security_group_ids = self._get_multi_param('SecurityGroupId') nics = dict_from_querystring("NetworkInterface", self.querystring) - instance_type = self.querystring.get("InstanceType", ["m1.small"])[0] - placement = self.querystring.get( - "Placement.AvailabilityZone", [None])[0] - subnet_id = self.querystring.get("SubnetId", [None])[0] - private_ip = self.querystring.get("PrivateIpAddress", [None])[0] - associate_public_ip = self.querystring.get( - "AssociatePublicIpAddress", [None])[0] - key_name = self.querystring.get("KeyName", [None])[0] + instance_type = self._get_param('InstanceType', if_none='m1.small') + placement = self._get_param('Placement.AvailabilityZone') + subnet_id = self._get_param('SubnetId') + private_ip = self._get_param('PrivateIpAddress') + associate_public_ip = self._get_param('AssociatePublicIpAddress') + key_name = self._get_param('KeyName') tags = self._parse_tag_specification("TagSpecification") region_name = self.region @@ -91,8 +89,7 @@ class InstanceResponse(BaseResponse): def describe_instance_status(self): instance_ids = self._get_multi_param('InstanceId') - include_all_instances = optional_from_querystring('IncludeAllInstances', - self.querystring) == 'true' + include_all_instances = self._get_param('IncludeAllInstances') == 'true' if instance_ids: instances = self.ec2_backend.get_multi_instances_by_id( @@ -114,7 +111,7 @@ class InstanceResponse(BaseResponse): def describe_instance_attribute(self): # TODO this and modify below should raise IncorrectInstanceState if # instance not in stopped state - attribute = self.querystring.get("Attribute")[0] + attribute = self._get_param('Attribute') key = camelcase_to_underscores(attribute) instance_id = self._get_param('InstanceId') instance, value = self.ec2_backend.describe_instance_attribute( diff --git a/moto/ec2/responses/internet_gateways.py b/moto/ec2/responses/internet_gateways.py index d6a0b19de..ebea14adf 100644 --- a/moto/ec2/responses/internet_gateways.py +++ b/moto/ec2/responses/internet_gateways.py @@ -8,8 +8,8 @@ from moto.ec2.utils import ( class InternetGateways(BaseResponse): def attach_internet_gateway(self): - igw_id = self.querystring.get("InternetGatewayId", [None])[0] - vpc_id = self.querystring.get("VpcId", [None])[0] + igw_id = self._get_param('InternetGatewayId') + vpc_id = self._get_param('VpcId') if self.is_not_dryrun('AttachInternetGateway'): self.ec2_backend.attach_internet_gateway(igw_id, vpc_id) template = self.response_template(ATTACH_INTERNET_GATEWAY_RESPONSE) @@ -22,7 +22,7 @@ class InternetGateways(BaseResponse): return template.render(internet_gateway=igw) def delete_internet_gateway(self): - igw_id = self.querystring.get("InternetGatewayId", [None])[0] + igw_id = self._get_param('InternetGatewayId') if self.is_not_dryrun('DeleteInternetGateway'): self.ec2_backend.delete_internet_gateway(igw_id) template = self.response_template(DELETE_INTERNET_GATEWAY_RESPONSE) @@ -44,8 +44,8 @@ class InternetGateways(BaseResponse): def detach_internet_gateway(self): # TODO validate no instances with EIPs in VPC before detaching # raise else DependencyViolationError() - igw_id = self.querystring.get("InternetGatewayId", [None])[0] - vpc_id = self.querystring.get("VpcId", [None])[0] + igw_id = self._get_param('InternetGatewayId') + vpc_id = self._get_param('VpcId') if self.is_not_dryrun('DetachInternetGateway'): self.ec2_backend.detach_internet_gateway(igw_id, vpc_id) template = self.response_template(DETACH_INTERNET_GATEWAY_RESPONSE) diff --git a/moto/ec2/responses/key_pairs.py b/moto/ec2/responses/key_pairs.py index 2dab918a7..d927bddda 100644 --- a/moto/ec2/responses/key_pairs.py +++ b/moto/ec2/responses/key_pairs.py @@ -7,14 +7,14 @@ from moto.ec2.utils import filters_from_querystring class KeyPairs(BaseResponse): def create_key_pair(self): - name = self.querystring.get('KeyName')[0] + name = self._get_param('KeyName') if self.is_not_dryrun('CreateKeyPair'): keypair = self.ec2_backend.create_key_pair(name) template = self.response_template(CREATE_KEY_PAIR_RESPONSE) return template.render(keypair=keypair) def delete_key_pair(self): - name = self.querystring.get('KeyName')[0] + name = self._get_param('KeyName') if self.is_not_dryrun('DeleteKeyPair'): success = six.text_type( self.ec2_backend.delete_key_pair(name)).lower() @@ -28,8 +28,8 @@ class KeyPairs(BaseResponse): return template.render(keypairs=keypairs) def import_key_pair(self): - name = self.querystring.get('KeyName')[0] - material = self.querystring.get('PublicKeyMaterial')[0] + name = self._get_param('KeyName') + material = self._get_param('PublicKeyMaterial') if self.is_not_dryrun('ImportKeyPair'): keypair = self.ec2_backend.import_key_pair(name, material) template = self.response_template(IMPORT_KEYPAIR_RESPONSE) diff --git a/moto/ec2/responses/network_acls.py b/moto/ec2/responses/network_acls.py index a0598c73b..97f370306 100644 --- a/moto/ec2/responses/network_acls.py +++ b/moto/ec2/responses/network_acls.py @@ -6,22 +6,22 @@ from moto.ec2.utils import filters_from_querystring class NetworkACLs(BaseResponse): def create_network_acl(self): - vpc_id = self.querystring.get('VpcId')[0] + vpc_id = self._get_param('VpcId') network_acl = self.ec2_backend.create_network_acl(vpc_id) template = self.response_template(CREATE_NETWORK_ACL_RESPONSE) return template.render(network_acl=network_acl) def create_network_acl_entry(self): - network_acl_id = self.querystring.get('NetworkAclId')[0] - rule_number = self.querystring.get('RuleNumber')[0] - protocol = self.querystring.get('Protocol')[0] - rule_action = self.querystring.get('RuleAction')[0] - egress = self.querystring.get('Egress')[0] - cidr_block = self.querystring.get('CidrBlock')[0] - icmp_code = self.querystring.get('Icmp.Code', [None])[0] - icmp_type = self.querystring.get('Icmp.Type', [None])[0] - port_range_from = self.querystring.get('PortRange.From')[0] - port_range_to = self.querystring.get('PortRange.To')[0] + network_acl_id = self._get_param('NetworkAclId') + rule_number = self._get_param('RuleNumber') + protocol = self._get_param('Protocol') + rule_action = self._get_param('RuleAction') + egress = self._get_param('Egress') + cidr_block = self._get_param('CidrBlock') + icmp_code = self._get_param('Icmp.Code') + icmp_type = self._get_param('Icmp.Type') + port_range_from = self._get_param('PortRange.From') + port_range_to = self._get_param('PortRange.To') network_acl_entry = self.ec2_backend.create_network_acl_entry( network_acl_id, rule_number, protocol, rule_action, @@ -32,30 +32,30 @@ class NetworkACLs(BaseResponse): return template.render(network_acl_entry=network_acl_entry) def delete_network_acl(self): - network_acl_id = self.querystring.get('NetworkAclId')[0] + network_acl_id = self._get_param('NetworkAclId') self.ec2_backend.delete_network_acl(network_acl_id) template = self.response_template(DELETE_NETWORK_ACL_ASSOCIATION) return template.render() def delete_network_acl_entry(self): - network_acl_id = self.querystring.get('NetworkAclId')[0] - rule_number = self.querystring.get('RuleNumber')[0] - egress = self.querystring.get('Egress')[0] + network_acl_id = self._get_param('NetworkAclId') + rule_number = self._get_param('RuleNumber') + egress = self._get_param('Egress') self.ec2_backend.delete_network_acl_entry(network_acl_id, rule_number, egress) template = self.response_template(DELETE_NETWORK_ACL_ENTRY_RESPONSE) return template.render() def replace_network_acl_entry(self): - network_acl_id = self.querystring.get('NetworkAclId')[0] - rule_number = self.querystring.get('RuleNumber')[0] - protocol = self.querystring.get('Protocol')[0] - rule_action = self.querystring.get('RuleAction')[0] - egress = self.querystring.get('Egress')[0] - cidr_block = self.querystring.get('CidrBlock')[0] - icmp_code = self.querystring.get('Icmp.Code', [None])[0] - icmp_type = self.querystring.get('Icmp.Type', [None])[0] - port_range_from = self.querystring.get('PortRange.From')[0] - port_range_to = self.querystring.get('PortRange.To')[0] + network_acl_id = self._get_param('NetworkAclId') + rule_number = self._get_param('RuleNumber') + protocol = self._get_param('Protocol') + rule_action = self._get_param('RuleAction') + egress = self._get_param('Egress') + cidr_block = self._get_param('CidrBlock') + icmp_code = self._get_param('Icmp.Code') + icmp_type = self._get_param('Icmp.Type') + port_range_from = self._get_param('PortRange.From') + port_range_to = self._get_param('PortRange.To') self.ec2_backend.replace_network_acl_entry( network_acl_id, rule_number, protocol, rule_action, @@ -74,8 +74,8 @@ class NetworkACLs(BaseResponse): return template.render(network_acls=network_acls) def replace_network_acl_association(self): - association_id = self.querystring.get('AssociationId')[0] - network_acl_id = self.querystring.get('NetworkAclId')[0] + association_id = self._get_param('AssociationId') + network_acl_id = self._get_param('NetworkAclId') association = self.ec2_backend.replace_network_acl_association( association_id, diff --git a/moto/ec2/responses/route_tables.py b/moto/ec2/responses/route_tables.py index 30eef8159..3878f325d 100644 --- a/moto/ec2/responses/route_tables.py +++ b/moto/ec2/responses/route_tables.py @@ -1,29 +1,25 @@ from __future__ import unicode_literals from moto.core.responses import BaseResponse -from moto.ec2.utils import filters_from_querystring, optional_from_querystring +from moto.ec2.utils import filters_from_querystring class RouteTables(BaseResponse): def associate_route_table(self): - route_table_id = self.querystring.get('RouteTableId')[0] - subnet_id = self.querystring.get('SubnetId')[0] + route_table_id = self._get_param('RouteTableId') + subnet_id = self._get_param('SubnetId') association_id = self.ec2_backend.associate_route_table( route_table_id, subnet_id) template = self.response_template(ASSOCIATE_ROUTE_TABLE_RESPONSE) return template.render(association_id=association_id) def create_route(self): - route_table_id = self.querystring.get('RouteTableId')[0] - destination_cidr_block = self.querystring.get( - 'DestinationCidrBlock')[0] - - gateway_id = optional_from_querystring('GatewayId', self.querystring) - instance_id = optional_from_querystring('InstanceId', self.querystring) - interface_id = optional_from_querystring( - 'NetworkInterfaceId', self.querystring) - pcx_id = optional_from_querystring( - 'VpcPeeringConnectionId', self.querystring) + route_table_id = self._get_param('RouteTableId') + destination_cidr_block = self._get_param('DestinationCidrBlock') + gateway_id = self._get_param('GatewayId') + instance_id = self._get_param('InstanceId') + interface_id = self._get_param('NetworkInterfaceId') + pcx_id = self._get_param('VpcPeeringConnectionId') self.ec2_backend.create_route(route_table_id, destination_cidr_block, gateway_id=gateway_id, @@ -35,21 +31,20 @@ class RouteTables(BaseResponse): return template.render() def create_route_table(self): - vpc_id = self.querystring.get('VpcId')[0] + vpc_id = self._get_param('VpcId') route_table = self.ec2_backend.create_route_table(vpc_id) template = self.response_template(CREATE_ROUTE_TABLE_RESPONSE) return template.render(route_table=route_table) def delete_route(self): - route_table_id = self.querystring.get('RouteTableId')[0] - destination_cidr_block = self.querystring.get( - 'DestinationCidrBlock')[0] + route_table_id = self._get_param('RouteTableId') + destination_cidr_block = self._get_param('DestinationCidrBlock') self.ec2_backend.delete_route(route_table_id, destination_cidr_block) template = self.response_template(DELETE_ROUTE_RESPONSE) return template.render() def delete_route_table(self): - route_table_id = self.querystring.get('RouteTableId')[0] + route_table_id = self._get_param('RouteTableId') self.ec2_backend.delete_route_table(route_table_id) template = self.response_template(DELETE_ROUTE_TABLE_RESPONSE) return template.render() @@ -63,22 +58,18 @@ class RouteTables(BaseResponse): return template.render(route_tables=route_tables) def disassociate_route_table(self): - association_id = self.querystring.get('AssociationId')[0] + association_id = self._get_param('AssociationId') self.ec2_backend.disassociate_route_table(association_id) template = self.response_template(DISASSOCIATE_ROUTE_TABLE_RESPONSE) return template.render() def replace_route(self): - route_table_id = self.querystring.get('RouteTableId')[0] - destination_cidr_block = self.querystring.get( - 'DestinationCidrBlock')[0] - - gateway_id = optional_from_querystring('GatewayId', self.querystring) - instance_id = optional_from_querystring('InstanceId', self.querystring) - interface_id = optional_from_querystring( - 'NetworkInterfaceId', self.querystring) - pcx_id = optional_from_querystring( - 'VpcPeeringConnectionId', self.querystring) + route_table_id = self._get_param('RouteTableId') + destination_cidr_block = self._get_param('DestinationCidrBlock') + gateway_id = self._get_param('GatewayId') + instance_id = self._get_param('InstanceId') + interface_id = self._get_param('NetworkInterfaceId') + pcx_id = self._get_param('VpcPeeringConnectionId') self.ec2_backend.replace_route(route_table_id, destination_cidr_block, gateway_id=gateway_id, @@ -90,8 +81,8 @@ class RouteTables(BaseResponse): return template.render() def replace_route_table_association(self): - route_table_id = self.querystring.get('RouteTableId')[0] - association_id = self.querystring.get('AssociationId')[0] + route_table_id = self._get_param('RouteTableId') + association_id = self._get_param('AssociationId') new_association_id = self.ec2_backend.replace_route_table_association( association_id, route_table_id) template = self.response_template( diff --git a/moto/ec2/responses/security_groups.py b/moto/ec2/responses/security_groups.py index b8cd87de8..9118c01b3 100644 --- a/moto/ec2/responses/security_groups.py +++ b/moto/ec2/responses/security_groups.py @@ -11,69 +11,66 @@ def try_parse_int(value, default=None): return default -def process_rules_from_querystring(querystring): - try: - group_name_or_id = querystring.get('GroupName')[0] - except: - group_name_or_id = querystring.get('GroupId')[0] - - querytree = {} - for key, value in querystring.items(): - key_splitted = key.split('.') - key_splitted = [try_parse_int(e, e) for e in key_splitted] - - d = querytree - for subkey in key_splitted[:-1]: - if subkey not in d: - d[subkey] = {} - d = d[subkey] - d[key_splitted[-1]] = value - - ip_permissions = querytree.get('IpPermissions') or {} - for ip_permission_idx in sorted(ip_permissions.keys()): - ip_permission = ip_permissions[ip_permission_idx] - - ip_protocol = ip_permission.get('IpProtocol', [None])[0] - from_port = ip_permission.get('FromPort', [None])[0] - to_port = ip_permission.get('ToPort', [None])[0] - - ip_ranges = [] - ip_ranges_tree = ip_permission.get('IpRanges') or {} - for ip_range_idx in sorted(ip_ranges_tree.keys()): - ip_ranges.append(ip_ranges_tree[ip_range_idx]['CidrIp'][0]) - - source_groups = [] - source_group_ids = [] - groups_tree = ip_permission.get('Groups') or {} - for group_idx in sorted(groups_tree.keys()): - group_dict = groups_tree[group_idx] - if 'GroupId' in group_dict: - source_group_ids.append(group_dict['GroupId'][0]) - elif 'GroupName' in group_dict: - source_groups.append(group_dict['GroupName'][0]) - - yield (group_name_or_id, ip_protocol, from_port, to_port, ip_ranges, - source_groups, source_group_ids) - - class SecurityGroups(BaseResponse): + def _process_rules_from_querystring(self): + group_name_or_id = (self._get_param('GroupName') or + self._get_param('GroupId')) + + querytree = {} + for key, value in self.querystring.items(): + key_splitted = key.split('.') + key_splitted = [try_parse_int(e, e) for e in key_splitted] + + d = querytree + for subkey in key_splitted[:-1]: + if subkey not in d: + d[subkey] = {} + d = d[subkey] + d[key_splitted[-1]] = value + + ip_permissions = querytree.get('IpPermissions') or {} + for ip_permission_idx in sorted(ip_permissions.keys()): + ip_permission = ip_permissions[ip_permission_idx] + + ip_protocol = ip_permission.get('IpProtocol', [None])[0] + from_port = ip_permission.get('FromPort', [None])[0] + to_port = ip_permission.get('ToPort', [None])[0] + + ip_ranges = [] + ip_ranges_tree = ip_permission.get('IpRanges') or {} + for ip_range_idx in sorted(ip_ranges_tree.keys()): + ip_ranges.append(ip_ranges_tree[ip_range_idx]['CidrIp'][0]) + + source_groups = [] + source_group_ids = [] + groups_tree = ip_permission.get('Groups') or {} + for group_idx in sorted(groups_tree.keys()): + group_dict = groups_tree[group_idx] + if 'GroupId' in group_dict: + source_group_ids.append(group_dict['GroupId'][0]) + elif 'GroupName' in group_dict: + source_groups.append(group_dict['GroupName'][0]) + + yield (group_name_or_id, ip_protocol, from_port, to_port, ip_ranges, + source_groups, source_group_ids) + def authorize_security_group_egress(self): if self.is_not_dryrun('GrantSecurityGroupEgress'): - for args in process_rules_from_querystring(self.querystring): + for args in self._process_rules_from_querystring(): self.ec2_backend.authorize_security_group_egress(*args) return AUTHORIZE_SECURITY_GROUP_EGRESS_RESPONSE def authorize_security_group_ingress(self): if self.is_not_dryrun('GrantSecurityGroupIngress'): - for args in process_rules_from_querystring(self.querystring): + for args in self._process_rules_from_querystring(): self.ec2_backend.authorize_security_group_ingress(*args) return AUTHORIZE_SECURITY_GROUP_INGRESS_REPONSE def create_security_group(self): - name = self.querystring.get('GroupName')[0] - description = self.querystring.get('GroupDescription', [None])[0] - vpc_id = self.querystring.get("VpcId", [None])[0] + name = self._get_param('GroupName') + description = self._get_param('GroupDescription') + vpc_id = self._get_param('VpcId') if self.is_not_dryrun('CreateSecurityGroup'): group = self.ec2_backend.create_security_group( @@ -86,14 +83,14 @@ class SecurityGroups(BaseResponse): # See # http://docs.aws.amazon.com/AWSEC2/latest/APIReference/ApiReference-query-DeleteSecurityGroup.html - name = self.querystring.get('GroupName') - sg_id = self.querystring.get('GroupId') + name = self._get_param('GroupName') + sg_id = self._get_param('GroupId') if self.is_not_dryrun('DeleteSecurityGroup'): if name: - self.ec2_backend.delete_security_group(name[0]) + self.ec2_backend.delete_security_group(name) elif sg_id: - self.ec2_backend.delete_security_group(group_id=sg_id[0]) + self.ec2_backend.delete_security_group(group_id=sg_id) return DELETE_GROUP_RESPONSE @@ -113,7 +110,7 @@ class SecurityGroups(BaseResponse): def revoke_security_group_egress(self): if self.is_not_dryrun('RevokeSecurityGroupEgress'): - for args in process_rules_from_querystring(self.querystring): + for args in self._process_rules_from_querystring(): success = self.ec2_backend.revoke_security_group_egress(*args) if not success: return "Could not find a matching egress rule", dict(status=404) @@ -121,7 +118,7 @@ class SecurityGroups(BaseResponse): def revoke_security_group_ingress(self): if self.is_not_dryrun('RevokeSecurityGroupIngress'): - for args in process_rules_from_querystring(self.querystring): + for args in self._process_rules_from_querystring(): self.ec2_backend.revoke_security_group_ingress(*args) return REVOKE_SECURITY_GROUP_INGRESS_REPONSE diff --git a/moto/ec2/responses/subnets.py b/moto/ec2/responses/subnets.py index 67fd09a14..ba4f78a5e 100644 --- a/moto/ec2/responses/subnets.py +++ b/moto/ec2/responses/subnets.py @@ -7,14 +7,11 @@ from moto.ec2.utils import filters_from_querystring class Subnets(BaseResponse): def create_subnet(self): - vpc_id = self.querystring.get('VpcId')[0] - cidr_block = self.querystring.get('CidrBlock')[0] - if 'AvailabilityZone' in self.querystring: - availability_zone = self.querystring['AvailabilityZone'][0] - else: - zone = random.choice( - self.ec2_backend.describe_availability_zones()) - availability_zone = zone.name + vpc_id = self._get_param('VpcId') + cidr_block = self._get_param('CidrBlock') + availability_zone = self._get_param( + 'AvailabilityZone', if_none=random.choice( + self.ec2_backend.describe_availability_zones()).name) subnet = self.ec2_backend.create_subnet( vpc_id, cidr_block, @@ -24,30 +21,21 @@ class Subnets(BaseResponse): return template.render(subnet=subnet) def delete_subnet(self): - subnet_id = self.querystring.get('SubnetId')[0] + subnet_id = self._get_param('SubnetId') subnet = self.ec2_backend.delete_subnet(subnet_id) template = self.response_template(DELETE_SUBNET_RESPONSE) return template.render(subnet=subnet) def describe_subnets(self): + subnet_ids = self._get_multi_param('SubnetId') filters = filters_from_querystring(self.querystring) - - subnet_ids = [] - idx = 1 - key = 'SubnetId.{0}'.format(idx) - while key in self.querystring: - v = self.querystring[key] - subnet_ids.append(v[0]) - idx += 1 - key = 'SubnetId.{0}'.format(idx) - subnets = self.ec2_backend.get_all_subnets(subnet_ids, filters) template = self.response_template(DESCRIBE_SUBNETS_RESPONSE) return template.render(subnets=subnets) def modify_subnet_attribute(self): - subnet_id = self.querystring.get('SubnetId')[0] - map_public_ip = self.querystring.get('MapPublicIpOnLaunch.Value')[0] + subnet_id = self._get_param('SubnetId') + map_public_ip = self._get_param('MapPublicIpOnLaunch.Value') self.ec2_backend.modify_subnet_attribute(subnet_id, map_public_ip) return MODIFY_SUBNET_ATTRIBUTE_RESPONSE diff --git a/moto/ec2/responses/virtual_private_gateways.py b/moto/ec2/responses/virtual_private_gateways.py index 2a677d36c..75de31b93 100644 --- a/moto/ec2/responses/virtual_private_gateways.py +++ b/moto/ec2/responses/virtual_private_gateways.py @@ -6,8 +6,8 @@ from moto.ec2.utils import filters_from_querystring class VirtualPrivateGateways(BaseResponse): def attach_vpn_gateway(self): - vpn_gateway_id = self.querystring.get('VpnGatewayId')[0] - vpc_id = self.querystring.get('VpcId')[0] + vpn_gateway_id = self._get_param('VpnGatewayId') + vpc_id = self._get_param('VpcId') attachment = self.ec2_backend.attach_vpn_gateway( vpn_gateway_id, vpc_id @@ -16,13 +16,13 @@ class VirtualPrivateGateways(BaseResponse): return template.render(attachment=attachment) def create_vpn_gateway(self): - type = self.querystring.get('Type', None)[0] + type = self._get_param('Type') vpn_gateway = self.ec2_backend.create_vpn_gateway(type) template = self.response_template(CREATE_VPN_GATEWAY_RESPONSE) return template.render(vpn_gateway=vpn_gateway) def delete_vpn_gateway(self): - vpn_gateway_id = self.querystring.get('VpnGatewayId')[0] + vpn_gateway_id = self._get_param('VpnGatewayId') vpn_gateway = self.ec2_backend.delete_vpn_gateway(vpn_gateway_id) template = self.response_template(DELETE_VPN_GATEWAY_RESPONSE) return template.render(vpn_gateway=vpn_gateway) @@ -34,8 +34,8 @@ class VirtualPrivateGateways(BaseResponse): return template.render(vpn_gateways=vpn_gateways) def detach_vpn_gateway(self): - vpn_gateway_id = self.querystring.get('VpnGatewayId')[0] - vpc_id = self.querystring.get('VpcId')[0] + vpn_gateway_id = self._get_param('VpnGatewayId') + vpc_id = self._get_param('VpcId') attachment = self.ec2_backend.detach_vpn_gateway( vpn_gateway_id, vpc_id diff --git a/moto/ec2/responses/vpc_peering_connections.py b/moto/ec2/responses/vpc_peering_connections.py index deedcc0e6..1bccce4f6 100644 --- a/moto/ec2/responses/vpc_peering_connections.py +++ b/moto/ec2/responses/vpc_peering_connections.py @@ -5,16 +5,15 @@ from moto.core.responses import BaseResponse class VPCPeeringConnections(BaseResponse): def create_vpc_peering_connection(self): - vpc = self.ec2_backend.get_vpc(self.querystring.get('VpcId')[0]) - peer_vpc = self.ec2_backend.get_vpc( - self.querystring.get('PeerVpcId')[0]) + vpc = self.ec2_backend.get_vpc(self._get_param('VpcId')) + peer_vpc = self.ec2_backend.get_vpc(self._get_param('PeerVpcId')) vpc_pcx = self.ec2_backend.create_vpc_peering_connection(vpc, peer_vpc) template = self.response_template( CREATE_VPC_PEERING_CONNECTION_RESPONSE) return template.render(vpc_pcx=vpc_pcx) def delete_vpc_peering_connection(self): - vpc_pcx_id = self.querystring.get('VpcPeeringConnectionId')[0] + vpc_pcx_id = self._get_param('VpcPeeringConnectionId') vpc_pcx = self.ec2_backend.delete_vpc_peering_connection(vpc_pcx_id) template = self.response_template( DELETE_VPC_PEERING_CONNECTION_RESPONSE) @@ -27,14 +26,14 @@ class VPCPeeringConnections(BaseResponse): return template.render(vpc_pcxs=vpc_pcxs) def accept_vpc_peering_connection(self): - vpc_pcx_id = self.querystring.get('VpcPeeringConnectionId')[0] + vpc_pcx_id = self._get_param('VpcPeeringConnectionId') vpc_pcx = self.ec2_backend.accept_vpc_peering_connection(vpc_pcx_id) template = self.response_template( ACCEPT_VPC_PEERING_CONNECTION_RESPONSE) return template.render(vpc_pcx=vpc_pcx) def reject_vpc_peering_connection(self): - vpc_pcx_id = self.querystring.get('VpcPeeringConnectionId')[0] + vpc_pcx_id = self._get_param('VpcPeeringConnectionId') self.ec2_backend.reject_vpc_peering_connection(vpc_pcx_id) template = self.response_template( REJECT_VPC_PEERING_CONNECTION_RESPONSE) diff --git a/moto/ec2/responses/vpcs.py b/moto/ec2/responses/vpcs.py index 17309fe05..8a53151e0 100644 --- a/moto/ec2/responses/vpcs.py +++ b/moto/ec2/responses/vpcs.py @@ -7,15 +7,14 @@ from moto.ec2.utils import filters_from_querystring class VPCs(BaseResponse): def create_vpc(self): - cidr_block = self.querystring.get('CidrBlock')[0] - instance_tenancy = self.querystring.get( - 'InstanceTenancy', ['default'])[0] + cidr_block = self._get_param('CidrBlock') + instance_tenancy = self._get_param('InstanceTenancy', if_none='default') vpc = self.ec2_backend.create_vpc(cidr_block, instance_tenancy) template = self.response_template(CREATE_VPC_RESPONSE) return template.render(vpc=vpc) def delete_vpc(self): - vpc_id = self.querystring.get('VpcId')[0] + vpc_id = self._get_param('VpcId') vpc = self.ec2_backend.delete_vpc(vpc_id) template = self.response_template(DELETE_VPC_RESPONSE) return template.render(vpc=vpc) @@ -28,15 +27,15 @@ class VPCs(BaseResponse): return template.render(vpcs=vpcs) def describe_vpc_attribute(self): - vpc_id = self.querystring.get('VpcId')[0] - attribute = self.querystring.get('Attribute')[0] + vpc_id = self._get_param('VpcId') + attribute = self._get_param('Attribute') attr_name = camelcase_to_underscores(attribute) value = self.ec2_backend.describe_vpc_attribute(vpc_id, attr_name) template = self.response_template(DESCRIBE_VPC_ATTRIBUTE_RESPONSE) return template.render(vpc_id=vpc_id, attribute=attribute, value=value) def modify_vpc_attribute(self): - vpc_id = self.querystring.get('VpcId')[0] + vpc_id = self._get_param('VpcId') for attribute in ('EnableDnsSupport', 'EnableDnsHostnames'): if self.querystring.get('%s.Value' % attribute): diff --git a/moto/ec2/responses/vpn_connections.py b/moto/ec2/responses/vpn_connections.py index e24515ae6..276e3ca99 100644 --- a/moto/ec2/responses/vpn_connections.py +++ b/moto/ec2/responses/vpn_connections.py @@ -6,17 +6,17 @@ from moto.ec2.utils import filters_from_querystring class VPNConnections(BaseResponse): def create_vpn_connection(self): - type = self.querystring.get("Type", [None])[0] - cgw_id = self.querystring.get("CustomerGatewayId", [None])[0] - vgw_id = self.querystring.get("VPNGatewayId", [None])[0] - static_routes = self.querystring.get("StaticRoutesOnly", [None])[0] + type = self._get_param('Type') + cgw_id = self._get_param('CustomerGatewayId') + vgw_id = self._get_param('VPNGatewayId') + static_routes = self._get_param('StaticRoutesOnly') vpn_connection = self.ec2_backend.create_vpn_connection( type, cgw_id, vgw_id, static_routes_only=static_routes) template = self.response_template(CREATE_VPN_CONNECTION_RESPONSE) return template.render(vpn_connection=vpn_connection) def delete_vpn_connection(self): - vpn_connection_id = self.querystring.get('VpnConnectionId')[0] + vpn_connection_id = self._get_param('VpnConnectionId') vpn_connection = self.ec2_backend.delete_vpn_connection( vpn_connection_id) template = self.response_template(DELETE_VPN_CONNECTION_RESPONSE) diff --git a/moto/ec2/utils.py b/moto/ec2/utils.py index 543ac4a99..ab54ea3a8 100644 --- a/moto/ec2/utils.py +++ b/moto/ec2/utils.py @@ -230,11 +230,6 @@ def dhcp_configuration_from_querystring(querystring, option=u'DhcpConfiguration' return response_values -def optional_from_querystring(parameter, querystring): - parameter_array = querystring.get(parameter) - return parameter_array[0] if parameter_array else None - - def filters_from_querystring(querystring_dict): response_values = {} for key, value in querystring_dict.items(): From 2055bb62f505f376fc049efdd8418f82af01ee86 Mon Sep 17 00:00:00 2001 From: Jack Danger Date: Sat, 16 Sep 2017 06:08:27 -0700 Subject: [PATCH 206/412] enforce s3 acls --- moto/s3/models.py | 24 ++++++++++++++++++++++++ moto/s3/responses.py | 21 ++++++++++++++++++--- tests/test_s3/test_s3.py | 30 ++++++++++++++++++++++++++++++ 3 files changed, 72 insertions(+), 3 deletions(-) diff --git a/moto/s3/models.py b/moto/s3/models.py index abe92bdf1..ae05292f2 100644 --- a/moto/s3/models.py +++ b/moto/s3/models.py @@ -201,10 +201,18 @@ class FakeGrantee(BaseModel): self.uri = uri self.display_name = display_name + def __eq__(self, other): + if not isinstance(other, FakeGrantee): + return False + return self.id == other.id and self.uri == other.uri and self.display_name == other.display_name + @property def type(self): return 'Group' if self.uri else 'CanonicalUser' + def __repr__(self): + return "FakeGrantee(display_name: '{}', id: '{}', uri: '{}')".format(self.display_name, self.id, self.uri) + ALL_USERS_GRANTEE = FakeGrantee( uri='http://acs.amazonaws.com/groups/global/AllUsers') @@ -226,12 +234,28 @@ class FakeGrant(BaseModel): self.grantees = grantees self.permissions = permissions + def __repr__(self): + return "FakeGrant(grantees: {}, permissions: {})".format(self.grantees, self.permissions) + class FakeAcl(BaseModel): def __init__(self, grants=[]): self.grants = grants + @property + def public_read(self): + for grant in self.grants: + if ALL_USERS_GRANTEE in grant.grantees: + if PERMISSION_READ in grant.permissions: + return True + if PERMISSION_FULL_CONTROL in grant.permissions: + return True + return False + + def __repr__(self): + return "FakeAcl(grants: {})".format(self.grants) + def get_canned_acl(acl): owner_grantee = FakeGrantee( diff --git a/moto/s3/responses.py b/moto/s3/responses.py index 4da888de5..fbd142a34 100755 --- a/moto/s3/responses.py +++ b/moto/s3/responses.py @@ -373,9 +373,8 @@ class ResponseObject(_TemplateEnvironmentMixin): self.backend.set_bucket_policy(bucket_name, body) return 'True' elif 'acl' in querystring: - acl = self._acl_from_headers(request.headers) # TODO: Support the XML-based ACL format - self.backend.set_bucket_acl(bucket_name, acl) + self.backend.set_bucket_acl(bucket_name, self._acl_from_headers(request.headers)) return "" elif "tagging" in querystring: tagging = self._bucket_tagging_from_xml(body) @@ -407,6 +406,11 @@ class ResponseObject(_TemplateEnvironmentMixin): new_bucket = self.backend.get_bucket(bucket_name) else: raise + + if 'x-amz-acl' in request.headers: + # TODO: Support the XML-based ACL format + self.backend.set_bucket_acl(bucket_name, self._acl_from_headers(request.headers)) + template = self.response_template(S3_BUCKET_CREATE_RESPONSE) return 200, {}, template.render(bucket=new_bucket) @@ -536,6 +540,17 @@ class ResponseObject(_TemplateEnvironmentMixin): key_name = self.parse_key_name(request, parsed_url.path) bucket_name = self.parse_bucket_name_from_url(request, full_url) + # Because we patch the requests library the boto/boto3 API + # requests go through this method but so do + # `requests.get("https://bucket-name.s3.amazonaws.com/file-name")` + # Here we deny public access to private files by checking the + # ACL and checking for the mere presence of an Authorization + # header. + if 'Authorization' not in request.headers: + key = self.backend.get_key(bucket_name, key_name) + if key and not key.acl.public_read: + return 403, {}, "" + if hasattr(request, 'body'): # Boto body = request.body @@ -725,7 +740,7 @@ class ResponseObject(_TemplateEnvironmentMixin): if grants: return FakeAcl(grants) else: - return None + return get_canned_acl('private') def _tagging_from_headers(self, headers): if headers.get('x-amz-tagging'): diff --git a/tests/test_s3/test_s3.py b/tests/test_s3/test_s3.py index 331452a7d..f7898fcb4 100644 --- a/tests/test_s3/test_s3.py +++ b/tests/test_s3/test_s3.py @@ -864,6 +864,36 @@ def test_bucket_acl_switching(): g.permission == 'READ' for g in grants), grants +@mock_s3 +def test_s3_object_in_public_bucket(): + s3 = boto3.resource('s3') + bucket = s3.Bucket('test-bucket') + bucket.create(ACL='public-read') + bucket.put_object(ACL='public-read', Body=b'ABCD', Key='file.txt') + direct_url = 'https://test-bucket.s3.amazonaws.com/file.txt' + response = requests.get(direct_url) + response.status_code.should.equal(200) + response.content.should.equal(b'ABCD') + bucket.put_object(ACL='private', Body=b'ABCD', Key='file.txt') + response = requests.get(direct_url) + response.status_code.should.equal(403) + + +@mock_s3 +def test_s3_object_in_private_bucket(): + s3 = boto3.resource('s3') + bucket = s3.Bucket('test-bucket') + bucket.create(ACL='private') + bucket.put_object(ACL='private', Body=b'ABCD', Key='file.txt') + direct_url = 'https://test-bucket.s3.amazonaws.com/file.txt' + response = requests.get(direct_url) + response.status_code.should.equal(403) + bucket.put_object(ACL='public-read', Body=b'ABCD', Key='file.txt') + response = requests.get(direct_url) + response.status_code.should.equal(200) + response.content.should.equal(b'ABCD') + + @mock_s3_deprecated def test_unicode_key(): conn = boto.connect_s3() From 802279d7c464926f176a930d53ad4c5945e761b8 Mon Sep 17 00:00:00 2001 From: Jack Danger Date: Sat, 16 Sep 2017 06:38:40 -0700 Subject: [PATCH 207/412] Authenticating to S3 in tests --- tests/test_s3/test_server.py | 30 ++++++++++++------- .../test_bucket_path_server.py | 29 +++++++++++------- 2 files changed, 38 insertions(+), 21 deletions(-) diff --git a/tests/test_s3/test_server.py b/tests/test_s3/test_server.py index c3ca3c3ff..9c8252a04 100644 --- a/tests/test_s3/test_server.py +++ b/tests/test_s3/test_server.py @@ -3,6 +3,7 @@ from __future__ import unicode_literals import sure # noqa +from flask.testing import FlaskClient import moto.server as server ''' @@ -10,18 +11,28 @@ Test the different server responses ''' -def test_s3_server_get(): - backend = server.create_backend_app("s3") - test_client = backend.test_client() +class AuthenticatedClient(FlaskClient): + def open(self, *args, **kwargs): + kwargs['headers'] = kwargs.get('headers', {}) + kwargs['headers']['Authorization'] = "Any authorization header" + return super(AuthenticatedClient, self).open(*args, **kwargs) + +def authenticated_client(): + backend = server.create_backend_app("s3") + backend.test_client_class = AuthenticatedClient + return backend.test_client() + + +def test_s3_server_get(): + test_client = authenticated_client() res = test_client.get('/') res.data.should.contain(b'ListAllMyBucketsResult') def test_s3_server_bucket_create(): - backend = server.create_backend_app("s3") - test_client = backend.test_client() + test_client = authenticated_client() res = test_client.put('/', 'http://foobaz.localhost:5000/') res.status_code.should.equal(200) @@ -44,8 +55,7 @@ def test_s3_server_bucket_create(): def test_s3_server_bucket_versioning(): - backend = server.create_backend_app("s3") - test_client = backend.test_client() + test_client = authenticated_client() # Just enough XML to enable versioning body = 'Enabled' @@ -55,8 +65,7 @@ def test_s3_server_bucket_versioning(): def test_s3_server_post_to_bucket(): - backend = server.create_backend_app("s3") - test_client = backend.test_client() + test_client = authenticated_client() res = test_client.put('/', 'http://tester.localhost:5000/') res.status_code.should.equal(200) @@ -72,8 +81,7 @@ def test_s3_server_post_to_bucket(): def test_s3_server_post_without_content_length(): - backend = server.create_backend_app("s3") - test_client = backend.test_client() + test_client = authenticated_client() res = test_client.put('/', 'http://tester.localhost:5000/', environ_overrides={'CONTENT_LENGTH': ''}) res.status_code.should.equal(411) diff --git a/tests/test_s3bucket_path/test_bucket_path_server.py b/tests/test_s3bucket_path/test_bucket_path_server.py index c67a2bcaa..434110e87 100644 --- a/tests/test_s3bucket_path/test_bucket_path_server.py +++ b/tests/test_s3bucket_path/test_bucket_path_server.py @@ -1,6 +1,7 @@ from __future__ import unicode_literals import sure # noqa +from flask.testing import FlaskClient import moto.server as server ''' @@ -8,9 +9,21 @@ Test the different server responses ''' -def test_s3_server_get(): +class AuthenticatedClient(FlaskClient): + def open(self, *args, **kwargs): + kwargs['headers'] = kwargs.get('headers', {}) + kwargs['headers']['Authorization'] = "Any authorization header" + return super(AuthenticatedClient, self).open(*args, **kwargs) + + +def authenticated_client(): backend = server.create_backend_app("s3bucket_path") - test_client = backend.test_client() + backend.test_client_class = AuthenticatedClient + return backend.test_client() + + +def test_s3_server_get(): + test_client = authenticated_client() res = test_client.get('/') @@ -18,8 +31,7 @@ def test_s3_server_get(): def test_s3_server_bucket_create(): - backend = server.create_backend_app("s3bucket_path") - test_client = backend.test_client() + test_client = authenticated_client() res = test_client.put('/foobar', 'http://localhost:5000') res.status_code.should.equal(200) @@ -54,8 +66,7 @@ def test_s3_server_bucket_create(): def test_s3_server_post_to_bucket(): - backend = server.create_backend_app("s3bucket_path") - test_client = backend.test_client() + test_client = authenticated_client() res = test_client.put('/foobar2', 'http://localhost:5000/') res.status_code.should.equal(200) @@ -71,8 +82,7 @@ def test_s3_server_post_to_bucket(): def test_s3_server_put_ipv6(): - backend = server.create_backend_app("s3bucket_path") - test_client = backend.test_client() + test_client = authenticated_client() res = test_client.put('/foobar2', 'http://[::]:5000/') res.status_code.should.equal(200) @@ -88,8 +98,7 @@ def test_s3_server_put_ipv6(): def test_s3_server_put_ipv4(): - backend = server.create_backend_app("s3bucket_path") - test_client = backend.test_client() + test_client = authenticated_client() res = test_client.put('/foobar2', 'http://127.0.0.1:5000/') res.status_code.should.equal(200) From e33702fbac00775538d1320500304aef32ae52db Mon Sep 17 00:00:00 2001 From: Jack Danger Date: Sat, 16 Sep 2017 12:39:19 -0700 Subject: [PATCH 208/412] using deprecated mock just to patch requests library --- tests/test_s3/test_s3.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/tests/test_s3/test_s3.py b/tests/test_s3/test_s3.py index f7898fcb4..74cbfa310 100644 --- a/tests/test_s3/test_s3.py +++ b/tests/test_s3/test_s3.py @@ -864,6 +864,7 @@ def test_bucket_acl_switching(): g.permission == 'READ' for g in grants), grants +@mock_s3_deprecated @mock_s3 def test_s3_object_in_public_bucket(): s3 = boto3.resource('s3') @@ -879,6 +880,7 @@ def test_s3_object_in_public_bucket(): response.status_code.should.equal(403) +@mock_s3_deprecated @mock_s3 def test_s3_object_in_private_bucket(): s3 = boto3.resource('s3') From c8f6fb7738faa0423bfa5c900994e3207e9a509d Mon Sep 17 00:00:00 2001 From: Jack Danger Date: Sat, 16 Sep 2017 15:48:20 -0700 Subject: [PATCH 209/412] Creating server-safe anonymous clients for testing --- tests/test_s3/test_s3.py | 36 ++++++++++++++++++++++-------------- 1 file changed, 22 insertions(+), 14 deletions(-) diff --git a/tests/test_s3/test_s3.py b/tests/test_s3/test_s3.py index 74cbfa310..4fc698787 100644 --- a/tests/test_s3/test_s3.py +++ b/tests/test_s3/test_s3.py @@ -16,6 +16,7 @@ import boto3 from botocore.client import ClientError import botocore.exceptions from boto.exception import S3CreateError, S3ResponseError +from botocore.handlers import disable_signing from boto.s3.connection import S3Connection from boto.s3.key import Key from freezegun import freeze_time @@ -864,36 +865,43 @@ def test_bucket_acl_switching(): g.permission == 'READ' for g in grants), grants -@mock_s3_deprecated @mock_s3 def test_s3_object_in_public_bucket(): s3 = boto3.resource('s3') bucket = s3.Bucket('test-bucket') bucket.create(ACL='public-read') bucket.put_object(ACL='public-read', Body=b'ABCD', Key='file.txt') - direct_url = 'https://test-bucket.s3.amazonaws.com/file.txt' - response = requests.get(direct_url) - response.status_code.should.equal(200) - response.content.should.equal(b'ABCD') + + s3_anonymous = boto3.resource('s3') + s3_anonymous.meta.client.meta.events.register('choose-signer.s3.*', disable_signing) + + contents = s3_anonymous.Object(key='file.txt', bucket_name='test-bucket').get()['Body'].read() + contents.should.equal(b'ABCD') + bucket.put_object(ACL='private', Body=b'ABCD', Key='file.txt') - response = requests.get(direct_url) - response.status_code.should.equal(403) + + with assert_raises(ClientError) as exc: + s3_anonymous.Object(key='file.txt', bucket_name='test-bucket').get() + exc.exception.response['Error']['Code'].should.equal('403') -@mock_s3_deprecated @mock_s3 def test_s3_object_in_private_bucket(): s3 = boto3.resource('s3') bucket = s3.Bucket('test-bucket') bucket.create(ACL='private') bucket.put_object(ACL='private', Body=b'ABCD', Key='file.txt') - direct_url = 'https://test-bucket.s3.amazonaws.com/file.txt' - response = requests.get(direct_url) - response.status_code.should.equal(403) + + s3_anonymous = boto3.resource('s3') + s3_anonymous.meta.client.meta.events.register('choose-signer.s3.*', disable_signing) + + with assert_raises(ClientError) as exc: + s3_anonymous.Object(key='file.txt', bucket_name='test-bucket').get() + exc.exception.response['Error']['Code'].should.equal('403') + bucket.put_object(ACL='public-read', Body=b'ABCD', Key='file.txt') - response = requests.get(direct_url) - response.status_code.should.equal(200) - response.content.should.equal(b'ABCD') + contents = s3_anonymous.Object(key='file.txt', bucket_name='test-bucket').get()['Body'].read() + contents.should.equal(b'ABCD') @mock_s3_deprecated From 4b7a1575607dd54843a261ea5845b35b6e902f5d Mon Sep 17 00:00:00 2001 From: Jack Danger Date: Sat, 16 Sep 2017 19:53:09 -0700 Subject: [PATCH 210/412] implement target group tagging --- moto/elbv2/models.py | 6 ++++ moto/elbv2/responses.py | 60 ++++++++++++++++++++++------------ tests/test_elbv2/test_elbv2.py | 6 ++++ 3 files changed, 51 insertions(+), 21 deletions(-) diff --git a/moto/elbv2/models.py b/moto/elbv2/models.py index 6bc8f860e..3c6afe7f5 100644 --- a/moto/elbv2/models.py +++ b/moto/elbv2/models.py @@ -65,6 +65,7 @@ class FakeTargetGroup(BaseModel): self.healthy_threshold_count = healthy_threshold_count self.unhealthy_threshold_count = unhealthy_threshold_count self.load_balancer_arns = [] + self.tags = {} self.attributes = { 'deregistration_delay.timeout_seconds': 300, @@ -86,6 +87,11 @@ class FakeTargetGroup(BaseModel): if not t: raise InvalidTargetError() + def add_tag(self, key, value): + if len(self.tags) >= 10 and key not in self.tags: + raise TooManyTagsError() + self.tags[key] = value + def health_for(self, target): t = self.targets.get(target['id']) if t is None: diff --git a/moto/elbv2/responses.py b/moto/elbv2/responses.py index 652ecc566..3e8535187 100644 --- a/moto/elbv2/responses.py +++ b/moto/elbv2/responses.py @@ -266,10 +266,17 @@ class ELBV2Response(BaseResponse): resource_arns = self._get_multi_param('ResourceArns.member') for arn in resource_arns: - load_balancer = self.elbv2_backend.load_balancers.get(arn) - if not load_balancer: + if ':targetgroup' in arn: + resource = self.elbv2_backend.target_groups.get(arn) + if not resource: + raise TargetGroupNotFoundError() + elif ':loadbalancer' in arn: + resource = self.elbv2_backend.load_balancers.get(arn) + if not resource: + raise LoadBalancerNotFoundError() + else: raise LoadBalancerNotFoundError() - self._add_tags(load_balancer) + self._add_tags(resource) template = self.response_template(ADD_TAGS_TEMPLATE) return template.render() @@ -279,30 +286,41 @@ class ELBV2Response(BaseResponse): tag_keys = self._get_multi_param('TagKeys.member') for arn in resource_arns: - load_balancer = self.elbv2_backend.load_balancers.get(arn) - if not load_balancer: + if ':targetgroup' in arn: + resource = self.elbv2_backend.target_groups.get(arn) + if not resource: + raise TargetGroupNotFoundError() + elif ':loadbalancer' in arn: + resource = self.elbv2_backend.load_balancers.get(arn) + if not resource: + raise LoadBalancerNotFoundError() + else: raise LoadBalancerNotFoundError() - [load_balancer.remove_tag(key) for key in tag_keys] + [resource.remove_tag(key) for key in tag_keys] template = self.response_template(REMOVE_TAGS_TEMPLATE) return template.render() def describe_tags(self): - elbs = [] - for key, value in self.querystring.items(): - if "ResourceArns.member" in key: - number = key.split('.')[2] - load_balancer_arn = self._get_param( - 'ResourceArns.member.{0}'.format(number)) - elb = self.elbv2_backend.load_balancers.get(load_balancer_arn) - if not elb: + resource_arns = self._get_multi_param('ResourceArns.member') + resources = [] + for arn in resource_arns: + if ':targetgroup' in arn: + resource = self.elbv2_backend.target_groups.get(arn) + if not resource: + raise TargetGroupNotFoundError() + elif ':loadbalancer' in arn: + resource = self.elbv2_backend.load_balancers.get(arn) + if not resource: raise LoadBalancerNotFoundError() - elbs.append(elb) + else: + raise LoadBalancerNotFoundError() + resources.append(resource) template = self.response_template(DESCRIBE_TAGS_TEMPLATE) - return template.render(load_balancers=elbs) + return template.render(resources=resources) - def _add_tags(self, elb): + def _add_tags(self, resource): tag_values = [] tag_keys = [] @@ -324,7 +342,7 @@ class ELBV2Response(BaseResponse): raise DuplicateTagKeysError(counts[0]) for tag_key, tag_value in zip(tag_keys, tag_values): - elb.add_tag(tag_key, tag_value) + resource.add_tag(tag_key, tag_value) ADD_TAGS_TEMPLATE = """ @@ -344,11 +362,11 @@ REMOVE_TAGS_TEMPLATE = """ - {% for load_balancer in load_balancers %} + {% for resource in resources %} - {{ load_balancer.arn }} + {{ resource.arn }} - {% for key, value in load_balancer.tags.items() %} + {% for key, value in resource.tags.items() %} {{ value }} {{ key }} diff --git a/tests/test_elbv2/test_elbv2.py b/tests/test_elbv2/test_elbv2.py index e84cd0080..21799ddcf 100644 --- a/tests/test_elbv2/test_elbv2.py +++ b/tests/test_elbv2/test_elbv2.py @@ -251,6 +251,12 @@ def test_create_target_group_and_listeners(): UnhealthyThresholdCount=2, Matcher={'HttpCode': '200'}) target_group = response.get('TargetGroups')[0] + target_group_arn = target_group['TargetGroupArn'] + + # Add tags to the target group + conn.add_tags(ResourceArns=[target_group_arn], Tags=[{'Key': 'target', 'Value': 'group'}]) + conn.describe_tags(ResourceArns=[target_group_arn])['TagDescriptions'][0]['Tags'].should.equal( + [{'Key': 'target', 'Value': 'group'}]) # Check it's in the describe_target_groups response response = conn.describe_target_groups() From dc1d0da63cad01ae6093e129c31da70e2f049c25 Mon Sep 17 00:00:00 2001 From: Jack Danger Date: Sat, 16 Sep 2017 20:09:07 -0700 Subject: [PATCH 211/412] bumping to version 1.1.10 --- setup.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/setup.py b/setup.py index 9e14bf544..9814a7e28 100755 --- a/setup.py +++ b/setup.py @@ -24,7 +24,7 @@ extras_require = { setup( name='moto', - version='1.1.9', + version='1.1.10', description='A library that allows your python tests to easily' ' mock out the boto library', author='Steve Pulec', From cf7e07b728e10ad47d067084f6816c4b7b440c4a Mon Sep 17 00:00:00 2001 From: Terry Cain Date: Mon, 18 Sep 2017 14:13:02 +0100 Subject: [PATCH 212/412] Added GetParameter support --- moto/ssm/models.py | 5 +++++ moto/ssm/responses.py | 19 +++++++++++++++++++ tests/test_ssm/test_ssm_boto3.py | 19 +++++++++++++++++++ 3 files changed, 43 insertions(+) diff --git a/moto/ssm/models.py b/moto/ssm/models.py index bbb84ad91..a0e4a2155 100644 --- a/moto/ssm/models.py +++ b/moto/ssm/models.py @@ -75,6 +75,11 @@ class SimpleSystemManagerBackend(BaseBackend): result.append(self._parameters[name]) return result + def get_parameter(self, name, with_decryption): + if name in self._parameters: + return self._parameters[name] + return None + def put_parameter(self, name, description, value, type, keyid, overwrite): if not overwrite and name in self._parameters: return diff --git a/moto/ssm/responses.py b/moto/ssm/responses.py index 7c31a6dd9..a2673f7e9 100644 --- a/moto/ssm/responses.py +++ b/moto/ssm/responses.py @@ -42,6 +42,25 @@ class SimpleSystemManagerResponse(BaseResponse): response['InvalidParameters'].append(name) return json.dumps(response) + def get_parameter(self): + name = self._get_param('Name') + with_decryption = self._get_param('WithDecryption') + + result = self.ssm_backend.get_parameter(name, with_decryption) + + if result is None: + return '', dict(status=400) + + response = { + 'Parameter': { + 'Name': name, + 'Type': result.type, + 'Value': result.value + } + } + + return json.dumps(response) + def get_parameters(self): names = self._get_param('Names') with_decryption = self._get_param('WithDecryption') diff --git a/tests/test_ssm/test_ssm_boto3.py b/tests/test_ssm/test_ssm_boto3.py index 7a783299b..859ec7a81 100644 --- a/tests/test_ssm/test_ssm_boto3.py +++ b/tests/test_ssm/test_ssm_boto3.py @@ -66,6 +66,25 @@ def test_put_parameter(): response['Parameters'][0]['Type'].should.equal('String') +@mock_ssm +def test_get_parameter(): + client = boto3.client('ssm', region_name='us-east-1') + + client.put_parameter( + Name='test', + Description='A test parameter', + Value='value', + Type='String') + + response = client.get_parameter( + Name='test', + WithDecryption=False) + + response['Parameter']['Name'].should.equal('test') + response['Parameter']['Value'].should.equal('value') + response['Parameter']['Type'].should.equal('String') + + @mock_ssm def test_describe_parameters(): client = boto3.client('ssm', region_name='us-east-1') From 298772ca927a9b11359e567c3c448adc5cd3e566 Mon Sep 17 00:00:00 2001 From: Nuwan Goonasekera Date: Mon, 18 Sep 2017 19:51:01 +0530 Subject: [PATCH 213/412] Raise InvalidGroup.NotFound in DescribeSecurityGroups --- moto/ec2/models.py | 33 ++++++++++++++------------ tests/test_ec2/test_security_groups.py | 15 ++++++++++++ 2 files changed, 33 insertions(+), 15 deletions(-) diff --git a/moto/ec2/models.py b/moto/ec2/models.py index 3a2775237..8e5bcdfbd 100755 --- a/moto/ec2/models.py +++ b/moto/ec2/models.py @@ -1360,22 +1360,25 @@ class SecurityGroupBackend(object): return group def describe_security_groups(self, group_ids=None, groupnames=None, filters=None): - all_groups = itertools.chain(*[x.values() - for x in self.groups.values()]) - groups = [] + matches = itertools.chain(*[x.values() + for x in self.groups.values()]) + if group_ids: + matches = [grp for grp in matches + if grp.id in group_ids] + if len(group_ids) > len(matches): + unknown_ids = set(group_ids) - set(matches) + raise InvalidSecurityGroupNotFoundError(unknown_ids) + if groupnames: + matches = [grp for grp in matches + if grp.name in groupnames] + if len(groupnames) > len(matches): + unknown_names = set(groupnames) - set(matches) + raise InvalidSecurityGroupNotFoundError(unknown_names) + if filters: + matches = [grp for grp in matches + if grp.matches_filters(filters)] - if group_ids or groupnames or filters: - for group in all_groups: - if ((group_ids and group.id not in group_ids) or - (groupnames and group.name not in groupnames)): - continue - if filters and not group.matches_filters(filters): - continue - groups.append(group) - else: - groups = all_groups - - return groups + return matches def _delete_security_group(self, vpc_id, group_id): if self.groups[vpc_id][group_id].enis: diff --git a/tests/test_ec2/test_security_groups.py b/tests/test_ec2/test_security_groups.py index 21ecad11e..45e6e327d 100644 --- a/tests/test_ec2/test_security_groups.py +++ b/tests/test_ec2/test_security_groups.py @@ -348,6 +348,15 @@ def test_get_all_security_groups(): resp.should.have.length_of(1) resp[0].id.should.equal(sg1.id) + with assert_raises(EC2ResponseError) as cm: + conn.get_all_security_groups(groupnames=['does_not_exist']) + cm.exception.code.should.equal('InvalidGroup.NotFound') + cm.exception.status.should.equal(400) + cm.exception.request_id.should_not.be.none + + resp.should.have.length_of(1) + resp[0].id.should.equal(sg1.id) + resp = conn.get_all_security_groups(filters={'vpc-id': ['vpc-mjm05d27']}) resp.should.have.length_of(1) resp[0].id.should.equal(sg1.id) @@ -681,3 +690,9 @@ def test_get_all_security_groups_filter_with_same_vpc_id(): security_groups = conn.get_all_security_groups( group_ids=[security_group.id], filters={'vpc-id': [vpc_id]}) security_groups.should.have.length_of(1) + + with assert_raises(EC2ResponseError) as cm: + conn.get_all_security_groups(group_ids=['does_not_exist']) + cm.exception.code.should.equal('InvalidGroup.NotFound') + cm.exception.status.should.equal(400) + cm.exception.request_id.should_not.be.none From 08c4eff0b26518cf14c7eacaa1eb060bfc17ea50 Mon Sep 17 00:00:00 2001 From: Nuwan Goonasekera Date: Mon, 18 Sep 2017 23:12:39 +0530 Subject: [PATCH 214/412] Added invalid id exceptions when filtering snapshots and volumes --- moto/ec2/models.py | 28 +++++++++++++++------- moto/ec2/responses/elastic_block_store.py | 10 ++------ tests/test_ec2/test_elastic_block_store.py | 12 ++++++++++ 3 files changed, 34 insertions(+), 16 deletions(-) diff --git a/moto/ec2/models.py b/moto/ec2/models.py index 8e5bcdfbd..4b143eeab 100755 --- a/moto/ec2/models.py +++ b/moto/ec2/models.py @@ -1775,11 +1775,17 @@ class EBSBackend(object): self.volumes[volume_id] = volume return volume - def describe_volumes(self, filters=None): + def describe_volumes(self, volume_ids=None, filters=None): + matches = self.volumes.values() + if volume_ids: + matches = [vol for vol in matches + if vol.id in volume_ids] + if len(volume_ids) > len(matches): + unknown_ids = set(volume_ids) - set(matches) + raise InvalidVolumeIdError(unknown_ids) if filters: - volumes = self.volumes.values() - return generic_filter(filters, volumes) - return self.volumes.values() + matches = generic_filter(filters, matches) + return matches def get_volume(self, volume_id): volume = self.volumes.get(volume_id, None) @@ -1827,11 +1833,17 @@ class EBSBackend(object): self.snapshots[snapshot_id] = snapshot return snapshot - def describe_snapshots(self, filters=None): + def describe_snapshots(self, snapshot_ids=None, filters=None): + matches = self.snapshots.values() + if snapshot_ids: + matches = [vol for vol in matches + if vol.id in snapshot_ids] + if len(snapshot_ids) > len(matches): + unknown_ids = set(snapshot_ids) - set(matches) + raise InvalidSnapshotIdError(unknown_ids) if filters: - snapshots = self.snapshots.values() - return generic_filter(filters, snapshots) - return self.snapshots.values() + matches = generic_filter(filters, matches) + return matches def get_snapshot(self, snapshot_id): snapshot = self.snapshots.get(snapshot_id, None) diff --git a/moto/ec2/responses/elastic_block_store.py b/moto/ec2/responses/elastic_block_store.py index 8f12dc918..37b3e9a07 100644 --- a/moto/ec2/responses/elastic_block_store.py +++ b/moto/ec2/responses/elastic_block_store.py @@ -54,20 +54,14 @@ class ElasticBlockStore(BaseResponse): def describe_snapshots(self): filters = filters_from_querystring(self.querystring) snapshot_ids = self._get_multi_param('SnapshotId') - snapshots = self.ec2_backend.describe_snapshots(filters=filters) - # Describe snapshots to handle filter on snapshot_ids - snapshots = [ - s for s in snapshots if s.id in snapshot_ids] if snapshot_ids else snapshots + snapshots = self.ec2_backend.describe_snapshots(snapshot_ids=snapshot_ids, filters=filters) template = self.response_template(DESCRIBE_SNAPSHOTS_RESPONSE) return template.render(snapshots=snapshots) def describe_volumes(self): filters = filters_from_querystring(self.querystring) volume_ids = self._get_multi_param('VolumeId') - volumes = self.ec2_backend.describe_volumes(filters=filters) - # Describe volumes to handle filter on volume_ids - volumes = [ - v for v in volumes if v.id in volume_ids] if volume_ids else volumes + volumes = self.ec2_backend.describe_volumes(volume_ids=volume_ids, filters=filters) template = self.response_template(DESCRIBE_VOLUMES_RESPONSE) return template.render(volumes=volumes) diff --git a/tests/test_ec2/test_elastic_block_store.py b/tests/test_ec2/test_elastic_block_store.py index b238e68f9..4427d4843 100644 --- a/tests/test_ec2/test_elastic_block_store.py +++ b/tests/test_ec2/test_elastic_block_store.py @@ -83,6 +83,12 @@ def test_filter_volume_by_id(): vol2 = conn.get_all_volumes(volume_ids=[volume1.id, volume2.id]) vol2.should.have.length_of(2) + with assert_raises(EC2ResponseError) as cm: + conn.get_all_volumes(volume_ids=['vol-does_not_exist']) + cm.exception.code.should.equal('InvalidVolume.NotFound') + cm.exception.status.should.equal(400) + cm.exception.request_id.should_not.be.none + @mock_ec2_deprecated def test_volume_filters(): @@ -302,6 +308,12 @@ def test_filter_snapshot_by_id(): s.volume_id.should.be.within([volume2.id, volume3.id]) s.region.name.should.equal(conn.region.name) + with assert_raises(EC2ResponseError) as cm: + conn.get_all_snapshots(snapshot_ids=['snap-does_not_exist']) + cm.exception.code.should.equal('InvalidSnapshot.NotFound') + cm.exception.status.should.equal(400) + cm.exception.request_id.should_not.be.none + @mock_ec2_deprecated def test_snapshot_filters(): From ca56955a97c98057bffb7b3b9bba9f49fd8212bf Mon Sep 17 00:00:00 2001 From: Nuwan Goonasekera Date: Mon, 18 Sep 2017 23:38:39 +0530 Subject: [PATCH 215/412] Added invalid id exceptions when filtering vpcs and subnets --- moto/ec2/models.py | 39 ++++++++++++++++++++-------------- tests/test_ec2/test_subnets.py | 26 +++++++++++++++++++++++ tests/test_ec2/test_vpcs.py | 6 ++++++ 3 files changed, 55 insertions(+), 16 deletions(-) diff --git a/moto/ec2/models.py b/moto/ec2/models.py index 4b143eeab..e7e8a1dd8 100755 --- a/moto/ec2/models.py +++ b/moto/ec2/models.py @@ -1836,8 +1836,8 @@ class EBSBackend(object): def describe_snapshots(self, snapshot_ids=None, filters=None): matches = self.snapshots.values() if snapshot_ids: - matches = [vol for vol in matches - if vol.id in snapshot_ids] + matches = [snap for snap in matches + if snap.id in snapshot_ids] if len(snapshot_ids) > len(matches): unknown_ids = set(snapshot_ids) - set(matches) raise InvalidSnapshotIdError(unknown_ids) @@ -1962,12 +1962,16 @@ class VPCBackend(object): return self.vpcs.get(vpc_id) def get_all_vpcs(self, vpc_ids=None, filters=None): + matches = self.vpcs.values() if vpc_ids: - vpcs = [vpc for vpc in self.vpcs.values() if vpc.id in vpc_ids] - else: - vpcs = self.vpcs.values() - - return generic_filter(filters, vpcs) + matches = [vpc for vpc in matches + if vpc.id in vpc_ids] + if len(vpc_ids) > len(matches): + unknown_ids = set(vpc_ids) - set(matches) + raise InvalidVPCIdError(unknown_ids) + if filters: + matches = generic_filter(filters, matches) + return matches def delete_vpc(self, vpc_id): # Delete route table if only main route table remains. @@ -2204,16 +2208,19 @@ class SubnetBackend(object): return subnet def get_all_subnets(self, subnet_ids=None, filters=None): - subnets = [] + # Extract a list of all subnets + matches = itertools.chain(*[x.values() + for x in self.subnets.values()]) if subnet_ids: - for subnet_id in subnet_ids: - for items in self.subnets.values(): - if subnet_id in items: - subnets.append(items[subnet_id]) - else: - for items in self.subnets.values(): - subnets.extend(items.values()) - return generic_filter(filters, subnets) + matches = [sn for sn in matches + if sn.id in subnet_ids] + if len(subnet_ids) > len(matches): + unknown_ids = set(subnet_ids) - set(matches) + raise InvalidSubnetIdError(unknown_ids) + if filters: + matches = generic_filter(filters, matches) + + return matches def delete_subnet(self, subnet_id): for subnets in self.subnets.values(): diff --git a/tests/test_ec2/test_subnets.py b/tests/test_ec2/test_subnets.py index 38565a28f..99e6d45d8 100644 --- a/tests/test_ec2/test_subnets.py +++ b/tests/test_ec2/test_subnets.py @@ -158,6 +158,32 @@ def test_modify_subnet_attribute_validation(): SubnetId=subnet.id, MapPublicIpOnLaunch={'Value': 'invalid'}) +@mock_ec2_deprecated +def test_subnet_get_by_id(): + ec2 = boto.ec2.connect_to_region('us-west-1') + conn = boto.vpc.connect_to_region('us-west-1') + vpcA = conn.create_vpc("10.0.0.0/16") + subnetA = conn.create_subnet( + vpcA.id, "10.0.0.0/24", availability_zone='us-west-1a') + vpcB = conn.create_vpc("10.0.0.0/16") + subnetB1 = conn.create_subnet( + vpcB.id, "10.0.0.0/24", availability_zone='us-west-1a') + subnetB2 = conn.create_subnet( + vpcB.id, "10.0.1.0/24", availability_zone='us-west-1b') + + subnets_by_id = conn.get_all_subnets(subnet_ids=[subnetA.id, subnetB1.id]) + subnets_by_id.should.have.length_of(2) + subnets_by_id = tuple(map(lambda s: s.id, subnets_by_id)) + subnetA.id.should.be.within(subnets_by_id) + subnetB1.id.should.be.within(subnets_by_id) + + with assert_raises(EC2ResponseError) as cm: + conn.get_all_subnets(subnet_ids=['subnet-does_not_exist']) + cm.exception.code.should.equal('InvalidSubnetID.NotFound') + cm.exception.status.should.equal(400) + cm.exception.request_id.should_not.be.none + + @mock_ec2_deprecated def test_get_subnets_filtering(): ec2 = boto.ec2.connect_to_region('us-west-1') diff --git a/tests/test_ec2/test_vpcs.py b/tests/test_ec2/test_vpcs.py index 904603f6d..fc0a93cbb 100644 --- a/tests/test_ec2/test_vpcs.py +++ b/tests/test_ec2/test_vpcs.py @@ -113,6 +113,12 @@ def test_vpc_get_by_id(): vpc1.id.should.be.within(vpc_ids) vpc2.id.should.be.within(vpc_ids) + with assert_raises(EC2ResponseError) as cm: + conn.get_all_vpcs(vpc_ids=['vpc-does_not_exist']) + cm.exception.code.should.equal('InvalidVpcID.NotFound') + cm.exception.status.should.equal(400) + cm.exception.request_id.should_not.be.none + @mock_ec2_deprecated def test_vpc_get_by_cidr_block(): From 783c287e51c1614cd8341ede263c914aad691449 Mon Sep 17 00:00:00 2001 From: Terry Cain Date: Mon, 18 Sep 2017 21:27:56 +0100 Subject: [PATCH 216/412] Added non existant parameter test + needed error responses --- moto/ssm/responses.py | 6 +++++- tests/test_ssm/test_ssm_boto3.py | 15 +++++++++++++++ 2 files changed, 20 insertions(+), 1 deletion(-) diff --git a/moto/ssm/responses.py b/moto/ssm/responses.py index a2673f7e9..ca0339693 100644 --- a/moto/ssm/responses.py +++ b/moto/ssm/responses.py @@ -49,7 +49,11 @@ class SimpleSystemManagerResponse(BaseResponse): result = self.ssm_backend.get_parameter(name, with_decryption) if result is None: - return '', dict(status=400) + error = { + '__type': 'ParameterNotFound', + 'message': 'Parameter {0} not found.'.format(name) + } + return json.dumps(error), dict(status=400) response = { 'Parameter': { diff --git a/tests/test_ssm/test_ssm_boto3.py b/tests/test_ssm/test_ssm_boto3.py index 859ec7a81..2374ebf3a 100644 --- a/tests/test_ssm/test_ssm_boto3.py +++ b/tests/test_ssm/test_ssm_boto3.py @@ -1,6 +1,7 @@ from __future__ import unicode_literals import boto3 +import botocore.exceptions import sure # noqa from moto import mock_ssm @@ -85,6 +86,20 @@ def test_get_parameter(): response['Parameter']['Type'].should.equal('String') +@mock_ssm +def test_get_nonexistant_parameter(): + client = boto3.client('ssm', region_name='us-east-1') + + try: + client.get_parameter( + Name='test_noexist', + WithDecryption=False) + raise RuntimeError('Should of failed') + except botocore.exceptions.ClientError as err: + err.operation_name.should.equal('GetParameter') + err.response['Error']['Message'].should.equal('Parameter test_noexist not found.') + + @mock_ssm def test_describe_parameters(): client = boto3.client('ssm', region_name='us-east-1') From 81026dd1d7cce227706459251fb42652c771123a Mon Sep 17 00:00:00 2001 From: Terry Cain Date: Mon, 18 Sep 2017 21:46:07 +0100 Subject: [PATCH 217/412] Newlines for more pep8 compliance --- tests/test_ssm/test_ssm_boto3.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/tests/test_ssm/test_ssm_boto3.py b/tests/test_ssm/test_ssm_boto3.py index 2374ebf3a..7f4aca533 100644 --- a/tests/test_ssm/test_ssm_boto3.py +++ b/tests/test_ssm/test_ssm_boto3.py @@ -25,6 +25,7 @@ def test_delete_parameter(): response = client.get_parameters(Names=['test']) len(response['Parameters']).should.equal(0) + @mock_ssm def test_delete_parameters(): client = boto3.client('ssm', region_name='us-east-1') @@ -45,6 +46,7 @@ def test_delete_parameters(): response = client.get_parameters(Names=['test']) len(response['Parameters']).should.equal(0) + @mock_ssm def test_put_parameter(): client = boto3.client('ssm', region_name='us-east-1') @@ -313,6 +315,7 @@ def test_put_parameter_secure_custom_kms(): response['Parameters'][0]['Value'].should.equal('value') response['Parameters'][0]['Type'].should.equal('SecureString') + @mock_ssm def test_add_remove_list_tags_for_resource(): client = boto3.client('ssm', region_name='us-east-1') From 39f3e575be5f17b9635a4001b76fd98032fde834 Mon Sep 17 00:00:00 2001 From: Jack Danger Date: Mon, 18 Sep 2017 14:00:16 -0700 Subject: [PATCH 218/412] bumping to version 1.1.11 --- setup.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/setup.py b/setup.py index 9814a7e28..2af39396d 100755 --- a/setup.py +++ b/setup.py @@ -24,7 +24,7 @@ extras_require = { setup( name='moto', - version='1.1.10', + version='1.1.11', description='A library that allows your python tests to easily' ' mock out the boto library', author='Steve Pulec', From 4eec260fa71da4134baa601e7cbfeeae1816146c Mon Sep 17 00:00:00 2001 From: Terry Cain Date: Tue, 19 Sep 2017 09:29:05 +0100 Subject: [PATCH 219/412] Switched to using alpine --- Dockerfile | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) diff --git a/Dockerfile b/Dockerfile index 72657903e..895831bee 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,11 +1,15 @@ -FROM python:2 +FROM alpine:3.6 ADD . /moto/ ENV PYTHONUNBUFFERED 1 WORKDIR /moto/ -RUN pip install ".[server]" +RUN apk add --no-cache python3 && \ + python3 -m ensurepip && \ + rm -r /usr/lib/python*/ensurepip && \ + pip3 --no-cache-dir install --upgrade pip setuptools && \ + pip3 --no-cache-dir install ".[server]" -CMD ["moto_server"] +ENTRYPOINT ["/usr/bin/moto_server"] EXPOSE 5000 From 59968760436876a66327bd1949f02711920d52e8 Mon Sep 17 00:00:00 2001 From: Toshiya Kawasaki Date: Wed, 20 Sep 2017 02:10:10 +0900 Subject: [PATCH 220/412] select service and operation in scaffold --- requirements-dev.txt | 2 ++ scaffold.py | 86 ++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 88 insertions(+) create mode 100755 scaffold.py diff --git a/requirements-dev.txt b/requirements-dev.txt index 28aaec601..602e6fbbe 100644 --- a/requirements-dev.txt +++ b/requirements-dev.txt @@ -9,3 +9,5 @@ flask boto3>=1.4.4 botocore>=1.5.77 six>=1.9 +prompt-toolkit==1.0.14 +click==6.7 diff --git a/scaffold.py b/scaffold.py new file mode 100755 index 000000000..e3889e62d --- /dev/null +++ b/scaffold.py @@ -0,0 +1,86 @@ +#!/usr/bin/env python +import os + +import click +from prompt_toolkit import ( + prompt +) +from prompt_toolkit.contrib.completers import WordCompleter +from prompt_toolkit.shortcuts import print_tokens + +from botocore import xform_name +from botocore.session import Session +import boto3 + +from implementation_coverage import ( + get_moto_implementation +) + + +def select_service_and_operation(): + service_names = Session().get_available_services() + service_completer = WordCompleter(service_names) + service_name = prompt('Select service: ', completer=service_completer) + if service_name not in service_names: + click.secho('{} is not valid service'.format(service_name), fg='red') + raise click.Abort() + moto_client = get_moto_implementation(service_name) + real_client = boto3.client(service_name, region_name='us-east-1') + implemented = [] + not_implemented = [] + + operation_names = [xform_name(op) for op in real_client.meta.service_model.operation_names] + for op in operation_names: + if moto_client and op in dir(moto_client): + implemented.append(op) + else: + not_implemented.append(op) + operation_completer = WordCompleter(operation_names) + + click.echo('==Current Implementation Status==') + for operation_name in operation_names: + check = 'X' if operation_name in implemented else ' ' + click.secho('[{}] {}'.format(check, operation_name)) + click.echo('=================================') + operation_name = prompt('Select Operation: ', completer=operation_completer) + + if operation_name not in operation_names: + click.secho('{} is not valid operation'.format(operation_name), fg='red') + raise click.Abort() + + if operation_name in implemented: + click.secho('{} is already implemented'.format(operation_name), fg='red') + raise click.Abort() + return service_name, operation_name + + +def create_dirs(service, operation): + """create lib and test dirs if not exist + """ + lib_dir = os.path.join('moto', service) + test_dir = os.path.join('test', 'test_{}'.format(service)) + if os.path.exists(lib_dir): + return + + click.secho('\tInitializing service\t', fg='green', nl=False) + click.secho(service) + + click.secho('\tcraeting\t', fg='green', nl=False) + click.echo(lib_dir) + os.mkdirs(lib_dir) + # do init lib dir + + if not os.path.exists(test_dir): + click.secho('\tcraeting\t', fg='green', nl=False) + click.echo(test_dir) + os.mkdirs(test_dir) + # do init test dir + + +@click.command() +def main(): + service, operation = select_service_and_operation() + create_dirs(service, operation) + +if __name__ == '__main__': + main() From 9cdc0d50703ccd8ff83a2a1b0ebcc5c5fb1e5d2a Mon Sep 17 00:00:00 2001 From: Toshiya Kawasaki Date: Wed, 20 Sep 2017 03:14:14 +0900 Subject: [PATCH 221/412] Create service and test directories when they don't exist --- scaffold.py | 83 ++++++++++++++++++++++++++------ template/lib/__init__.py.j2 | 7 +++ template/lib/exceptions.py.j2 | 4 ++ template/lib/models.py.j2 | 20 ++++++++ template/lib/responses.py.j2 | 15 ++++++ template/test/test_server.py.j2 | 16 ++++++ template/test/test_service.py.j2 | 11 +++++ 7 files changed, 140 insertions(+), 16 deletions(-) create mode 100644 template/lib/__init__.py.j2 create mode 100644 template/lib/exceptions.py.j2 create mode 100644 template/lib/models.py.j2 create mode 100644 template/lib/responses.py.j2 create mode 100644 template/test/test_server.py.j2 create mode 100644 template/test/test_service.py.j2 diff --git a/scaffold.py b/scaffold.py index e3889e62d..77cc997c5 100755 --- a/scaffold.py +++ b/scaffold.py @@ -2,6 +2,7 @@ import os import click +import jinja2 from prompt_toolkit import ( prompt ) @@ -15,6 +16,12 @@ import boto3 from implementation_coverage import ( get_moto_implementation ) +TEMPLATE_DIR = './template' + + +def print_progress(title, body, color): + click.secho('\t{}\t'.format(title), fg=color, nl=False) + click.echo(body) def select_service_and_operation(): @@ -54,33 +61,77 @@ def select_service_and_operation(): return service_name, operation_name -def create_dirs(service, operation): +def get_lib_dir(service): + return os.path.join('moto', service) + +def get_test_dir(service): + return os.path.join('tests', 'test_{}'.format(service)) + + +def render_teamplte(tmpl_dir, tmpl_filename, context, service, alt_filename=None): + is_test = True if 'test' in tmpl_dir else False + rendered = jinja2.Environment( + loader=jinja2.FileSystemLoader(tmpl_dir) + ).get_template(tmpl_filename).render(context) + + dirname = get_test_dir(service) if is_test else get_lib_dir(service) + filename = alt_filename or os.path.splitext(tmpl_filename)[0] + filepath = os.path.join(dirname, filename) + + if os.path.exists(filepath): + print_progress('skip creating', filepath, 'yellow') + else: + print_progress('creating', filepath, 'green') + with open(filepath, 'w') as f: + f.write(rendered) + + +def initialize_service(service, operation): """create lib and test dirs if not exist """ lib_dir = os.path.join('moto', service) - test_dir = os.path.join('test', 'test_{}'.format(service)) + test_dir = os.path.join('tests', 'test_{}'.format(service)) + + print_progress('Initializing service', service, 'green') + + service_class = boto3.client(service).__class__.__name__ + + tmpl_context = { + 'service': service, + 'service_class': service_class + } + + # initialize service directory if os.path.exists(lib_dir): - return + print_progress('skip creating', lib_dir, 'yellow') + else: + print_progress('creating', lib_dir, 'green') + os.makedirs(lib_dir) - click.secho('\tInitializing service\t', fg='green', nl=False) - click.secho(service) + tmpl_dir = os.path.join(TEMPLATE_DIR, 'lib') + for tmpl_filename in os.listdir(tmpl_dir): + render_teamplte( + tmpl_dir, tmpl_filename, tmpl_context, service + ) - click.secho('\tcraeting\t', fg='green', nl=False) - click.echo(lib_dir) - os.mkdirs(lib_dir) - # do init lib dir - - if not os.path.exists(test_dir): - click.secho('\tcraeting\t', fg='green', nl=False) - click.echo(test_dir) - os.mkdirs(test_dir) - # do init test dir + # initialize test directory + if os.path.exists(test_dir): + print_progress('skip creating', test_dir, 'yellow') + else: + print_progress('creating', test_dir, 'green') + os.makedirs(test_dir) + tmpl_dir = os.path.join(TEMPLATE_DIR, 'test') + for tmpl_filename in os.listdir(tmpl_dir): + alt_filename = 'test_{}.py'.format(service) if tmpl_filename == 'test_service.py.j2' else None + render_teamplte( + tmpl_dir, tmpl_filename, tmpl_context, service, alt_filename + ) @click.command() def main(): service, operation = select_service_and_operation() - create_dirs(service, operation) + initialize_service(service, operation) if __name__ == '__main__': main() diff --git a/template/lib/__init__.py.j2 b/template/lib/__init__.py.j2 new file mode 100644 index 000000000..8e5bf50c7 --- /dev/null +++ b/template/lib/__init__.py.j2 @@ -0,0 +1,7 @@ +from __future__ import unicode_literals +from .models import {{ service }}_backends +from ..core.models import base_decorator + +{{ service }}_backend = {{ service }}_backends['us-east-1'] +mock_{{ service }} = base_decorator({{ service }}_backends) + diff --git a/template/lib/exceptions.py.j2 b/template/lib/exceptions.py.j2 new file mode 100644 index 000000000..2e9a72b1a --- /dev/null +++ b/template/lib/exceptions.py.j2 @@ -0,0 +1,4 @@ +from __future__ import unicode_literals +from moto.core.exceptions import RESTError + + diff --git a/template/lib/models.py.j2 b/template/lib/models.py.j2 new file mode 100644 index 000000000..2a0097c1d --- /dev/null +++ b/template/lib/models.py.j2 @@ -0,0 +1,20 @@ +from __future__ import unicode_literals +import boto3 +from moto.core import BaseBackend, BaseModel + + +class {{ service_class }}Backend(BaseBackend): + def __init__(self, region_name=None): + super({{ service_class }}Backend, self).__init__() + self.region_name = region_name + + def reset(self): + region_name = self.region_name + self.__dict__ = {} + self.__init__(region_name) + + # add methods from here + + +available_regions = boto3.session.Session().get_available_regions("{{ service }}") +{{ service }}_backends = {region: {{ service_class }}Backend for region in available_regions} diff --git a/template/lib/responses.py.j2 b/template/lib/responses.py.j2 new file mode 100644 index 000000000..b27da5b9f --- /dev/null +++ b/template/lib/responses.py.j2 @@ -0,0 +1,15 @@ +from __future__ import unicode_literals +from moto.core.responses import BaseResponse +from .models import {{ service }}_backends + + +class {{ service_class }}Response(BaseResponse): + @property + def {{ service }}_backend(self): + return {{ service }}_backends[self.region] + + # add methods from here + + +# add teampltes from here + diff --git a/template/test/test_server.py.j2 b/template/test/test_server.py.j2 new file mode 100644 index 000000000..f3963a743 --- /dev/null +++ b/template/test/test_server.py.j2 @@ -0,0 +1,16 @@ +from __future__ import unicode_literals + +import sure # noqa + +import moto.server as server +from moto import mock_{{ service }} + +''' +Test the different server responses +''' + +@mock_{{ service }} +def test_{{ service }}_list(): + backend = server.create_backend_app("{{ service }}") + test_client = backend.test_client() + # do test diff --git a/template/test/test_service.py.j2 b/template/test/test_service.py.j2 new file mode 100644 index 000000000..076f92e27 --- /dev/null +++ b/template/test/test_service.py.j2 @@ -0,0 +1,11 @@ +from __future__ import unicode_literals + +import boto3 +import sure # noqa +from moto import mock_{{ service }} + + +@mock_{{ service }} +def test_list(): + # do test + pass From dc09b18b99b73fdc82bf85136c1bc3e969fbf2fb Mon Sep 17 00:00:00 2001 From: Terry Cain Date: Tue, 19 Sep 2017 20:30:54 +0100 Subject: [PATCH 222/412] Added -H 0.0.0.0 to entrypoint --- Dockerfile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Dockerfile b/Dockerfile index 895831bee..3c18fb106 100644 --- a/Dockerfile +++ b/Dockerfile @@ -10,6 +10,6 @@ RUN apk add --no-cache python3 && \ pip3 --no-cache-dir install --upgrade pip setuptools && \ pip3 --no-cache-dir install ".[server]" -ENTRYPOINT ["/usr/bin/moto_server"] +ENTRYPOINT ["/usr/bin/moto_server", "-H", "0.0.0.0"] EXPOSE 5000 From 16e0326fe755969744d5d00b822c9fefe8d24d62 Mon Sep 17 00:00:00 2001 From: Terry Cain Date: Tue, 19 Sep 2017 21:43:55 +0100 Subject: [PATCH 223/412] Fixed #1162 --- moto/dynamodb2/responses.py | 41 +++++++++++++++------------ tests/test_dynamodb2/test_dynamodb.py | 13 +++++++++ 2 files changed, 36 insertions(+), 18 deletions(-) diff --git a/moto/dynamodb2/responses.py b/moto/dynamodb2/responses.py index c3cb4ef72..12b166ea0 100644 --- a/moto/dynamodb2/responses.py +++ b/moto/dynamodb2/responses.py @@ -21,8 +21,8 @@ class DynamoHandler(BaseResponse): if match: return match.split(".")[1] - def error(self, type_, status=400): - return status, self.response_headers, dynamo_json_dump({'__type': type_}) + def error(self, type_, message, status=400): + return status, self.response_headers, dynamo_json_dump({'__type': type_, 'message': message}) def call_action(self): self.body = json.loads(self.body or '{}') @@ -82,7 +82,7 @@ class DynamoHandler(BaseResponse): return dynamo_json_dump(table.describe()) else: er = 'com.amazonaws.dynamodb.v20111205#ResourceInUseException' - return self.error(er) + return self.error(er, 'Resource in use') def delete_table(self): name = self.body['TableName'] @@ -91,7 +91,7 @@ class DynamoHandler(BaseResponse): return dynamo_json_dump(table.describe()) else: er = 'com.amazonaws.dynamodb.v20111205#ResourceNotFoundException' - return self.error(er) + return self.error(er, 'Requested resource not found') def tag_resource(self): tags = self.body['Tags'] @@ -120,7 +120,7 @@ class DynamoHandler(BaseResponse): return json.dumps({'Tags': tags_resp}) except AttributeError: er = 'com.amazonaws.dynamodb.v20111205#ResourceNotFoundException' - return self.error(er) + return self.error(er, 'Requested resource not found') def update_table(self): name = self.body['TableName'] @@ -138,7 +138,7 @@ class DynamoHandler(BaseResponse): table = dynamodb_backend2.tables[name] except KeyError: er = 'com.amazonaws.dynamodb.v20111205#ResourceNotFoundException' - return self.error(er) + return self.error(er, 'Requested resource not found') return dynamo_json_dump(table.describe(base_key='Table')) def put_item(self): @@ -190,7 +190,7 @@ class DynamoHandler(BaseResponse): name, item, expected, overwrite) except ValueError: er = 'com.amazonaws.dynamodb.v20111205#ConditionalCheckFailedException' - return self.error(er) + return self.error(er, 'A condition specified in the operation could not be evaluated.') if result: item_dict = result.to_json() @@ -198,7 +198,7 @@ class DynamoHandler(BaseResponse): return dynamo_json_dump(item_dict) else: er = 'com.amazonaws.dynamodb.v20111205#ResourceNotFoundException' - return self.error(er) + return self.error(er, 'Requested resource not found') def batch_write_item(self): table_batches = self.body['RequestItems'] @@ -235,15 +235,14 @@ class DynamoHandler(BaseResponse): item = dynamodb_backend2.get_item(name, key) except ValueError: er = 'com.amazon.coral.validate#ValidationException' - return self.error(er, status=400) + return self.error(er, 'Validation Exception') if item: item_dict = item.describe_attrs(attributes=None) item_dict['ConsumedCapacityUnits'] = 0.5 return dynamo_json_dump(item_dict) else: # Item not found - er = '{}' - return self.error(er, status=200) + return 200, self.response_headers, '{}' def batch_get_item(self): table_batches = self.body['RequestItems'] @@ -282,6 +281,12 @@ class DynamoHandler(BaseResponse): value_alias_map = self.body['ExpressionAttributeValues'] table = dynamodb_backend2.get_table(name) + + # If table does not exist + if table is None: + return self.error('com.amazonaws.dynamodb.v20120810#ResourceNotFoundException', + 'Requested resource not found') + index_name = self.body.get('IndexName') if index_name: all_indexes = (table.global_indexes or []) + \ @@ -350,7 +355,7 @@ class DynamoHandler(BaseResponse): filter_kwargs[key] = value if hash_key_name is None: er = "'com.amazonaws.dynamodb.v20120810#ResourceNotFoundException" - return self.error(er) + return self.error(er, 'Requested resource not found') hash_key = key_conditions[hash_key_name][ 'AttributeValueList'][0] if len(key_conditions) == 1: @@ -359,7 +364,7 @@ class DynamoHandler(BaseResponse): else: if range_key_name is None and not filter_kwargs: er = "com.amazon.coral.validate#ValidationException" - return self.error(er) + return self.error(er, 'Validation Exception') else: range_condition = key_conditions.get(range_key_name) if range_condition: @@ -381,7 +386,7 @@ class DynamoHandler(BaseResponse): exclusive_start_key, scan_index_forward, index_name=index_name, **filter_kwargs) if items is None: er = 'com.amazonaws.dynamodb.v20111205#ResourceNotFoundException' - return self.error(er) + return self.error(er, 'Requested resource not found') result = { "Count": len(items), @@ -417,7 +422,7 @@ class DynamoHandler(BaseResponse): if items is None: er = 'com.amazonaws.dynamodb.v20111205#ResourceNotFoundException' - return self.error(er) + return self.error(er, 'Requested resource not found') result = { "Count": len(items), @@ -436,7 +441,7 @@ class DynamoHandler(BaseResponse): table = dynamodb_backend2.get_table(name) if not table: er = 'com.amazonaws.dynamodb.v20120810#ConditionalCheckFailedException' - return self.error(er) + return self.error(er, 'A condition specified in the operation could not be evaluated.') item = dynamodb_backend2.delete_item(name, keys) if item and return_values == 'ALL_OLD': @@ -496,10 +501,10 @@ class DynamoHandler(BaseResponse): expected) except ValueError: er = 'com.amazonaws.dynamodb.v20111205#ConditionalCheckFailedException' - return self.error(er) + return self.error(er, 'A condition specified in the operation could not be evaluated.') except TypeError: er = 'com.amazonaws.dynamodb.v20111205#ValidationException' - return self.error(er) + return self.error(er, 'Validation Exception') item_dict = item.to_json() item_dict['ConsumedCapacityUnits'] = 0.5 diff --git a/tests/test_dynamodb2/test_dynamodb.py b/tests/test_dynamodb2/test_dynamodb.py index 764980fba..3784cf71c 100644 --- a/tests/test_dynamodb2/test_dynamodb.py +++ b/tests/test_dynamodb2/test_dynamodb.py @@ -181,3 +181,16 @@ def test_item_add_empty_string_exception(): ex.exception.response['Error']['Message'].should.equal( 'One or more parameter values were invalid: An AttributeValue may not contain an empty string' ) + + +@requires_boto_gte("2.9") +@mock_dynamodb2 +def test_query_invalid_table(): + conn = boto3.client('dynamodb', + region_name='us-west-2', + aws_access_key_id="ak", + aws_secret_access_key="sk") + try: + conn.query(TableName='invalid_table', KeyConditionExpression='index1 = :partitionkeyval', ExpressionAttributeValues={':partitionkeyval': {'S':'test'}}) + except ClientError as exception: + assert exception.response['Error']['Code'] == "ResourceNotFoundException" \ No newline at end of file From 83c40a5bf6efd4ae29148dd8766342155a32e2da Mon Sep 17 00:00:00 2001 From: Jack Danger Date: Tue, 19 Sep 2017 13:58:59 -0700 Subject: [PATCH 224/412] moving implemetation_coverage to new scripts dir --- implementation_coverage.py => scripts/implementation_coverage.py | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename implementation_coverage.py => scripts/implementation_coverage.py (100%) diff --git a/implementation_coverage.py b/scripts/implementation_coverage.py similarity index 100% rename from implementation_coverage.py rename to scripts/implementation_coverage.py From 8f2f7fa20da82040fb8a01752495e4b8ae554036 Mon Sep 17 00:00:00 2001 From: Jack Danger Date: Tue, 19 Sep 2017 14:01:08 -0700 Subject: [PATCH 225/412] Adding a script to import all of AWS' managed policies fixes #1118 --- Makefile | 3 + moto/iam/aws_managed_policies.py | 12816 +++++++++++++++++++++++++++ moto/iam/models.py | 123 +- scripts/update_managed_policies.py | 63 + tests/test_iam/test_iam.py | 20 +- 5 files changed, 12912 insertions(+), 113 deletions(-) create mode 100644 moto/iam/aws_managed_policies.py create mode 100755 scripts/update_managed_policies.py diff --git a/Makefile b/Makefile index 3c5582c2d..6e6ad26ce 100644 --- a/Makefile +++ b/Makefile @@ -15,6 +15,9 @@ test: lint test_server: @TEST_SERVER_MODE=true nosetests -sv --with-coverage --cover-html ./tests/ +aws_managed_policies: + scripts/update_managed_policies.py + publish: python setup.py sdist bdist_wheel upload git tag `python setup.py --version` diff --git a/moto/iam/aws_managed_policies.py b/moto/iam/aws_managed_policies.py new file mode 100644 index 000000000..277783123 --- /dev/null +++ b/moto/iam/aws_managed_policies.py @@ -0,0 +1,12816 @@ +# Imported via `make aws_managed_policies` +aws_managed_policies_data = """ +{ + "AWSAccountActivityAccess": { + "Arn": "arn:aws:iam::aws:policy/AWSAccountActivityAccess", + "AttachmentCount": 0, + "CreateDate": "2015-02-06T18:41:18+00:00", + "DefaultVersionId": "v1", + "Document": { + "Statement": [ + { + "Action": [ + "aws-portal:ViewBilling" + ], + "Effect": "Allow", + "Resource": "*" + } + ], + "Version": "2012-10-17" + }, + "IsAttachable": true, + "IsDefaultVersion": true, + "Path": "/", + "PolicyId": "ANPAJQRYCWMFX5J3E333K", + "PolicyName": "AWSAccountActivityAccess", + "UpdateDate": "2015-02-06T18:41:18+00:00", + "VersionId": "v1" + }, + "AWSAccountUsageReportAccess": { + "Arn": "arn:aws:iam::aws:policy/AWSAccountUsageReportAccess", + "AttachmentCount": 0, + "CreateDate": "2015-02-06T18:41:19+00:00", + "DefaultVersionId": "v1", + "Document": { + "Statement": [ + { + "Action": [ + "aws-portal:ViewUsage" + ], + "Effect": "Allow", + "Resource": "*" + } + ], + "Version": "2012-10-17" + }, + "IsAttachable": true, + "IsDefaultVersion": true, + "Path": "/", + "PolicyId": "ANPAJLIB4VSBVO47ZSBB6", + "PolicyName": "AWSAccountUsageReportAccess", + "UpdateDate": "2015-02-06T18:41:19+00:00", + "VersionId": "v1" + }, + "AWSAgentlessDiscoveryService": { + "Arn": "arn:aws:iam::aws:policy/AWSAgentlessDiscoveryService", + "AttachmentCount": 0, + "CreateDate": "2016-08-02T01:35:11+00:00", + "DefaultVersionId": "v1", + "Document": { + "Statement": [ + { + "Action": [ + "awsconnector:RegisterConnector", + "awsconnector:GetConnectorHealth" + ], + "Effect": "Allow", + "Resource": "*" + }, + { + "Action": "iam:GetUser", + "Effect": "Allow", + "Resource": "*" + }, + { + "Action": [ + "s3:GetObject", + "s3:ListBucket" + ], + "Effect": "Allow", + "Resource": [ + "arn:aws:s3:::connector-platform-upgrade-info/*", + "arn:aws:s3:::connector-platform-upgrade-info", + "arn:aws:s3:::connector-platform-upgrade-bundles/*", + "arn:aws:s3:::connector-platform-upgrade-bundles", + "arn:aws:s3:::connector-platform-release-notes/*", + "arn:aws:s3:::connector-platform-release-notes", + "arn:aws:s3:::prod.agentless.discovery.connector.upgrade/*", + "arn:aws:s3:::prod.agentless.discovery.connector.upgrade" + ] + }, + { + "Action": [ + "s3:PutObject", + "s3:PutObjectAcl" + ], + "Effect": "Allow", + "Resource": [ + "arn:aws:s3:::import-to-ec2-connector-debug-logs/*" + ] + }, + { + "Action": [ + "SNS:Publish" + ], + "Effect": "Allow", + "Resource": "arn:aws:sns:*:*:metrics-sns-topic-for-*" + }, + { + "Action": [ + "Discovery:*" + ], + "Effect": "Allow", + "Resource": "*", + "Sid": "Discovery" + }, + { + "Action": [ + "arsenal:RegisterOnPremisesAgent" + ], + "Effect": "Allow", + "Resource": "*", + "Sid": "arsenal" + } + ], + "Version": "2012-10-17" + }, + "IsAttachable": true, + "IsDefaultVersion": true, + "Path": "/", + "PolicyId": "ANPAIA3DIL7BYQ35ISM4K", + "PolicyName": "AWSAgentlessDiscoveryService", + "UpdateDate": "2016-08-02T01:35:11+00:00", + "VersionId": "v1" + }, + "AWSApplicationDiscoveryAgentAccess": { + "Arn": "arn:aws:iam::aws:policy/AWSApplicationDiscoveryAgentAccess", + "AttachmentCount": 0, + "CreateDate": "2016-05-11T21:38:47+00:00", + "DefaultVersionId": "v1", + "Document": { + "Statement": [ + { + "Action": [ + "arsenal:RegisterOnPremisesAgent" + ], + "Effect": "Allow", + "Resource": "*" + } + ], + "Version": "2012-10-17" + }, + "IsAttachable": true, + "IsDefaultVersion": true, + "Path": "/", + "PolicyId": "ANPAICZIOVAGC6JPF3WHC", + "PolicyName": "AWSApplicationDiscoveryAgentAccess", + "UpdateDate": "2016-05-11T21:38:47+00:00", + "VersionId": "v1" + }, + "AWSApplicationDiscoveryServiceFullAccess": { + "Arn": "arn:aws:iam::aws:policy/AWSApplicationDiscoveryServiceFullAccess", + "AttachmentCount": 0, + "CreateDate": "2016-05-11T21:30:50+00:00", + "DefaultVersionId": "v1", + "Document": { + "Statement": [ + { + "Action": "discovery:*", + "Effect": "Allow", + "Resource": "*" + } + ], + "Version": "2012-10-17" + }, + "IsAttachable": true, + "IsDefaultVersion": true, + "Path": "/", + "PolicyId": "ANPAJBNJEA6ZXM2SBOPDU", + "PolicyName": "AWSApplicationDiscoveryServiceFullAccess", + "UpdateDate": "2016-05-11T21:30:50+00:00", + "VersionId": "v1" + }, + "AWSBatchFullAccess": { + "Arn": "arn:aws:iam::aws:policy/AWSBatchFullAccess", + "AttachmentCount": 0, + "CreateDate": "2016-12-13T00:38:59+00:00", + "DefaultVersionId": "v2", + "Document": { + "Statement": [ + { + "Action": [ + "batch:*", + "cloudwatch:GetMetricStatistics", + "ec2:DescribeSubnets", + "ec2:DescribeSecurityGroups", + "ec2:DescribeKeyPairs", + "ecs:DescribeClusters", + "ecs:Describe*", + "ecs:List*", + "logs:Describe*", + "logs:Get*", + "logs:TestMetricFilter", + "logs:FilterLogEvents", + "iam:ListInstanceProfiles", + "iam:ListRoles" + ], + "Effect": "Allow", + "Resource": "*" + }, + { + "Action": [ + "iam:PassRole" + ], + "Effect": "Allow", + "Resource": [ + "arn:aws:iam::*:role/AWSBatchServiceRole", + "arn:aws:iam::*:role/ecsInstanceRole", + "arn:aws:iam::*:role/iaws-ec2-spot-fleet-role", + "arn:aws:iam::*:role/aws-ec2-spot-fleet-role", + "arn:aws:iam::*:role/AWSBatchJobRole*" + ] + } + ], + "Version": "2012-10-17" + }, + "IsAttachable": true, + "IsDefaultVersion": true, + "Path": "/", + "PolicyId": "ANPAJ7K2KIWB3HZVK3CUO", + "PolicyName": "AWSBatchFullAccess", + "UpdateDate": "2016-12-13T00:38:59+00:00", + "VersionId": "v2" + }, + "AWSBatchServiceRole": { + "Arn": "arn:aws:iam::aws:policy/service-role/AWSBatchServiceRole", + "AttachmentCount": 0, + "CreateDate": "2017-05-11T20:44:52+00:00", + "DefaultVersionId": "v4", + "Document": { + "Statement": [ + { + "Action": [ + "ec2:DescribeAccountAttributes", + "ec2:DescribeInstances", + "ec2:DescribeSubnets", + "ec2:DescribeSecurityGroups", + "ec2:DescribeKeyPairs", + "ec2:DescribeImages", + "ec2:DescribeImageAttribute", + "ec2:DescribeSpotFleetInstances", + "ec2:DescribeSpotFleetRequests", + "ec2:DescribeSpotPriceHistory", + "ec2:RequestSpotFleet", + "ec2:CancelSpotFleetRequests", + "ec2:ModifySpotFleetRequest", + "ec2:TerminateInstances", + "autoscaling:DescribeAccountLimits", + "autoscaling:DescribeAutoScalingGroups", + "autoscaling:DescribeLaunchConfigurations", + "autoscaling:DescribeAutoScalingInstances", + "autoscaling:CreateLaunchConfiguration", + "autoscaling:CreateAutoScalingGroup", + "autoscaling:UpdateAutoScalingGroup", + "autoscaling:SetDesiredCapacity", + "autoscaling:DeleteLaunchConfiguration", + "autoscaling:DeleteAutoScalingGroup", + "autoscaling:CreateOrUpdateTags", + "autoscaling:SuspendProcesses", + "autoscaling:PutNotificationConfiguration", + "autoscaling:TerminateInstanceInAutoScalingGroup", + "ecs:DescribeClusters", + "ecs:DescribeContainerInstances", + "ecs:DescribeTaskDefinition", + "ecs:DescribeTasks", + "ecs:ListClusters", + "ecs:ListContainerInstances", + "ecs:ListTaskDefinitionFamilies", + "ecs:ListTaskDefinitions", + "ecs:ListTasks", + "ecs:CreateCluster", + "ecs:DeleteCluster", + "ecs:RegisterTaskDefinition", + "ecs:DeregisterTaskDefinition", + "ecs:RunTask", + "ecs:StartTask", + "ecs:StopTask", + "ecs:UpdateContainerAgent", + "ecs:DeregisterContainerInstance", + "logs:CreateLogGroup", + "logs:CreateLogStream", + "logs:PutLogEvents", + "logs:DescribeLogGroups", + "iam:GetInstanceProfile", + "iam:PassRole" + ], + "Effect": "Allow", + "Resource": "*" + } + ], + "Version": "2012-10-17" + }, + "IsAttachable": true, + "IsDefaultVersion": true, + "Path": "/service-role/", + "PolicyId": "ANPAIUETIXPCKASQJURFE", + "PolicyName": "AWSBatchServiceRole", + "UpdateDate": "2017-05-11T20:44:52+00:00", + "VersionId": "v4" + }, + "AWSCertificateManagerFullAccess": { + "Arn": "arn:aws:iam::aws:policy/AWSCertificateManagerFullAccess", + "AttachmentCount": 0, + "CreateDate": "2016-01-21T17:02:36+00:00", + "DefaultVersionId": "v1", + "Document": { + "Statement": [ + { + "Action": [ + "acm:*" + ], + "Effect": "Allow", + "Resource": "*" + } + ], + "Version": "2012-10-17" + }, + "IsAttachable": true, + "IsDefaultVersion": true, + "Path": "/", + "PolicyId": "ANPAJYCHABBP6VQIVBCBQ", + "PolicyName": "AWSCertificateManagerFullAccess", + "UpdateDate": "2016-01-21T17:02:36+00:00", + "VersionId": "v1" + }, + "AWSCertificateManagerReadOnly": { + "Arn": "arn:aws:iam::aws:policy/AWSCertificateManagerReadOnly", + "AttachmentCount": 0, + "CreateDate": "2016-04-21T15:08:16+00:00", + "DefaultVersionId": "v2", + "Document": { + "Statement": { + "Action": [ + "acm:DescribeCertificate", + "acm:ListCertificates", + "acm:GetCertificate", + "acm:ListTagsForCertificate" + ], + "Effect": "Allow", + "Resource": "*" + }, + "Version": "2012-10-17" + }, + "IsAttachable": true, + "IsDefaultVersion": true, + "Path": "/", + "PolicyId": "ANPAI4GSWX6S4MESJ3EWC", + "PolicyName": "AWSCertificateManagerReadOnly", + "UpdateDate": "2016-04-21T15:08:16+00:00", + "VersionId": "v2" + }, + "AWSCloudFormationReadOnlyAccess": { + "Arn": "arn:aws:iam::aws:policy/AWSCloudFormationReadOnlyAccess", + "AttachmentCount": 0, + "CreateDate": "2015-02-06T18:39:49+00:00", + "DefaultVersionId": "v1", + "Document": { + "Statement": [ + { + "Action": [ + "cloudformation:DescribeStacks", + "cloudformation:DescribeStackEvents", + "cloudformation:DescribeStackResource", + "cloudformation:DescribeStackResources", + "cloudformation:GetTemplate", + "cloudformation:List*" + ], + "Effect": "Allow", + "Resource": "*" + } + ], + "Version": "2012-10-17" + }, + "IsAttachable": true, + "IsDefaultVersion": true, + "Path": "/", + "PolicyId": "ANPAJWVBEE4I2POWLODLW", + "PolicyName": "AWSCloudFormationReadOnlyAccess", + "UpdateDate": "2015-02-06T18:39:49+00:00", + "VersionId": "v1" + }, + "AWSCloudHSMFullAccess": { + "Arn": "arn:aws:iam::aws:policy/AWSCloudHSMFullAccess", + "AttachmentCount": 0, + "CreateDate": "2015-02-06T18:39:51+00:00", + "DefaultVersionId": "v1", + "Document": { + "Statement": [ + { + "Action": "cloudhsm:*", + "Effect": "Allow", + "Resource": "*" + } + ], + "Version": "2012-10-17" + }, + "IsAttachable": true, + "IsDefaultVersion": true, + "Path": "/", + "PolicyId": "ANPAIMBQYQZM7F63DA2UU", + "PolicyName": "AWSCloudHSMFullAccess", + "UpdateDate": "2015-02-06T18:39:51+00:00", + "VersionId": "v1" + }, + "AWSCloudHSMReadOnlyAccess": { + "Arn": "arn:aws:iam::aws:policy/AWSCloudHSMReadOnlyAccess", + "AttachmentCount": 0, + "CreateDate": "2015-02-06T18:39:52+00:00", + "DefaultVersionId": "v1", + "Document": { + "Statement": [ + { + "Action": [ + "cloudhsm:Get*", + "cloudhsm:List*", + "cloudhsm:Describe*" + ], + "Effect": "Allow", + "Resource": "*" + } + ], + "Version": "2012-10-17" + }, + "IsAttachable": true, + "IsDefaultVersion": true, + "Path": "/", + "PolicyId": "ANPAISVCBSY7YDBOT67KE", + "PolicyName": "AWSCloudHSMReadOnlyAccess", + "UpdateDate": "2015-02-06T18:39:52+00:00", + "VersionId": "v1" + }, + "AWSCloudHSMRole": { + "Arn": "arn:aws:iam::aws:policy/service-role/AWSCloudHSMRole", + "AttachmentCount": 0, + "CreateDate": "2015-02-06T18:41:23+00:00", + "DefaultVersionId": "v1", + "Document": { + "Statement": [ + { + "Action": [ + "ec2:CreateNetworkInterface", + "ec2:CreateTags", + "ec2:DeleteNetworkInterface", + "ec2:DescribeNetworkInterfaceAttribute", + "ec2:DescribeNetworkInterfaces", + "ec2:DescribeSubnets", + "ec2:DescribeVpcs", + "ec2:DetachNetworkInterface" + ], + "Effect": "Allow", + "Resource": [ + "*" + ] + } + ], + "Version": "2012-10-17" + }, + "IsAttachable": true, + "IsDefaultVersion": true, + "Path": "/service-role/", + "PolicyId": "ANPAI7QIUU4GC66SF26WE", + "PolicyName": "AWSCloudHSMRole", + "UpdateDate": "2015-02-06T18:41:23+00:00", + "VersionId": "v1" + }, + "AWSCloudTrailFullAccess": { + "Arn": "arn:aws:iam::aws:policy/AWSCloudTrailFullAccess", + "AttachmentCount": 0, + "CreateDate": "2016-02-16T18:31:28+00:00", + "DefaultVersionId": "v4", + "Document": { + "Statement": [ + { + "Action": [ + "sns:AddPermission", + "sns:CreateTopic", + "sns:DeleteTopic", + "sns:ListTopics", + "sns:SetTopicAttributes", + "sns:GetTopicAttributes" + ], + "Effect": "Allow", + "Resource": "*" + }, + { + "Action": [ + "s3:CreateBucket", + "s3:DeleteBucket", + "s3:ListAllMyBuckets", + "s3:PutBucketPolicy", + "s3:ListBucket", + "s3:GetObject", + "s3:GetBucketLocation", + "s3:GetBucketPolicy" + ], + "Effect": "Allow", + "Resource": "*" + }, + { + "Action": "cloudtrail:*", + "Effect": "Allow", + "Resource": "*" + }, + { + "Action": [ + "logs:CreateLogGroup" + ], + "Effect": "Allow", + "Resource": "*" + }, + { + "Action": [ + "iam:PassRole", + "iam:ListRoles", + "iam:GetRolePolicy", + "iam:GetUser" + ], + "Effect": "Allow", + "Resource": "*" + }, + { + "Action": [ + "kms:ListKeys", + "kms:ListAliases" + ], + "Effect": "Allow", + "Resource": "*" + } + ], + "Version": "2012-10-17" + }, + "IsAttachable": true, + "IsDefaultVersion": true, + "Path": "/", + "PolicyId": "ANPAIQNUJTQYDRJPC3BNK", + "PolicyName": "AWSCloudTrailFullAccess", + "UpdateDate": "2016-02-16T18:31:28+00:00", + "VersionId": "v4" + }, + "AWSCloudTrailReadOnlyAccess": { + "Arn": "arn:aws:iam::aws:policy/AWSCloudTrailReadOnlyAccess", + "AttachmentCount": 0, + "CreateDate": "2016-12-14T20:41:52+00:00", + "DefaultVersionId": "v6", + "Document": { + "Statement": [ + { + "Action": [ + "s3:GetObject", + "s3:GetBucketLocation" + ], + "Effect": "Allow", + "Resource": "*" + }, + { + "Action": [ + "cloudtrail:GetTrailStatus", + "cloudtrail:DescribeTrails", + "cloudtrail:LookupEvents", + "cloudtrail:ListTags", + "cloudtrail:ListPublicKeys", + "cloudtrail:GetEventSelectors", + "s3:ListAllMyBuckets", + "kms:ListAliases" + ], + "Effect": "Allow", + "Resource": "*" + } + ], + "Version": "2012-10-17" + }, + "IsAttachable": true, + "IsDefaultVersion": true, + "Path": "/", + "PolicyId": "ANPAJDU7KJADWBSEQ3E7S", + "PolicyName": "AWSCloudTrailReadOnlyAccess", + "UpdateDate": "2016-12-14T20:41:52+00:00", + "VersionId": "v6" + }, + "AWSCodeBuildAdminAccess": { + "Arn": "arn:aws:iam::aws:policy/AWSCodeBuildAdminAccess", + "AttachmentCount": 0, + "CreateDate": "2016-12-01T19:04:44+00:00", + "DefaultVersionId": "v1", + "Document": { + "Statement": [ + { + "Action": [ + "codebuild:*", + "codecommit:GetBranch", + "codecommit:GetCommit", + "codecommit:GetRepository", + "codecommit:ListBranches", + "codecommit:ListRepositories", + "ecr:DescribeRepositories", + "ecr:ListImages", + "s3:GetBucketLocation", + "s3:ListAllMyBuckets" + ], + "Effect": "Allow", + "Resource": "*" + }, + { + "Action": [ + "logs:GetLogEvents" + ], + "Effect": "Allow", + "Resource": "arn:aws:logs:*:*:log-group:/aws/codebuild/*:log-stream:*" + } + ], + "Version": "2012-10-17" + }, + "IsAttachable": true, + "IsDefaultVersion": true, + "Path": "/", + "PolicyId": "ANPAJQJGIOIE3CD2TQXDS", + "PolicyName": "AWSCodeBuildAdminAccess", + "UpdateDate": "2016-12-01T19:04:44+00:00", + "VersionId": "v1" + }, + "AWSCodeBuildDeveloperAccess": { + "Arn": "arn:aws:iam::aws:policy/AWSCodeBuildDeveloperAccess", + "AttachmentCount": 0, + "CreateDate": "2016-12-01T19:02:32+00:00", + "DefaultVersionId": "v1", + "Document": { + "Statement": [ + { + "Action": [ + "codebuild:StartBuild", + "codebuild:StopBuild", + "codebuild:BatchGet*", + "codebuild:Get*", + "codebuild:List*", + "codecommit:GetBranch", + "codecommit:GetCommit", + "codecommit:GetRepository", + "codecommit:ListBranches", + "s3:GetBucketLocation", + "s3:ListAllMyBuckets" + ], + "Effect": "Allow", + "Resource": "*" + }, + { + "Action": [ + "logs:GetLogEvents" + ], + "Effect": "Allow", + "Resource": "arn:aws:logs:*:*:log-group:/aws/codebuild/*:log-stream:*" + } + ], + "Version": "2012-10-17" + }, + "IsAttachable": true, + "IsDefaultVersion": true, + "Path": "/", + "PolicyId": "ANPAIMKTMR34XSBQW45HS", + "PolicyName": "AWSCodeBuildDeveloperAccess", + "UpdateDate": "2016-12-01T19:02:32+00:00", + "VersionId": "v1" + }, + "AWSCodeBuildReadOnlyAccess": { + "Arn": "arn:aws:iam::aws:policy/AWSCodeBuildReadOnlyAccess", + "AttachmentCount": 0, + "CreateDate": "2016-12-01T19:03:41+00:00", + "DefaultVersionId": "v1", + "Document": { + "Statement": [ + { + "Action": [ + "codebuild:BatchGet*", + "codebuild:Get*", + "codebuild:List*", + "codecommit:GetBranch", + "codecommit:GetCommit", + "codecommit:GetRepository" + ], + "Effect": "Allow", + "Resource": "*" + }, + { + "Action": [ + "logs:GetLogEvents" + ], + "Effect": "Allow", + "Resource": "arn:aws:logs:*:*:log-group:/aws/codebuild/*:log-stream:*" + } + ], + "Version": "2012-10-17" + }, + "IsAttachable": true, + "IsDefaultVersion": true, + "Path": "/", + "PolicyId": "ANPAJIZZWN6557F5HVP2K", + "PolicyName": "AWSCodeBuildReadOnlyAccess", + "UpdateDate": "2016-12-01T19:03:41+00:00", + "VersionId": "v1" + }, + "AWSCodeCommitFullAccess": { + "Arn": "arn:aws:iam::aws:policy/AWSCodeCommitFullAccess", + "AttachmentCount": 0, + "CreateDate": "2015-07-09T17:02:19+00:00", + "DefaultVersionId": "v1", + "Document": { + "Statement": [ + { + "Action": [ + "codecommit:*" + ], + "Effect": "Allow", + "Resource": "*" + } + ], + "Version": "2012-10-17" + }, + "IsAttachable": true, + "IsDefaultVersion": true, + "Path": "/", + "PolicyId": "ANPAI4VCZ3XPIZLQ5NZV2", + "PolicyName": "AWSCodeCommitFullAccess", + "UpdateDate": "2015-07-09T17:02:19+00:00", + "VersionId": "v1" + }, + "AWSCodeCommitPowerUser": { + "Arn": "arn:aws:iam::aws:policy/AWSCodeCommitPowerUser", + "AttachmentCount": 0, + "CreateDate": "2017-05-22T21:12:48+00:00", + "DefaultVersionId": "v3", + "Document": { + "Statement": [ + { + "Action": [ + "codecommit:BatchGetRepositories", + "codecommit:CreateBranch", + "codecommit:CreateRepository", + "codecommit:DeleteBranch", + "codecommit:Get*", + "codecommit:GitPull", + "codecommit:GitPush", + "codecommit:List*", + "codecommit:Put*", + "codecommit:Test*", + "codecommit:Update*" + ], + "Effect": "Allow", + "Resource": "*" + } + ], + "Version": "2012-10-17" + }, + "IsAttachable": true, + "IsDefaultVersion": true, + "Path": "/", + "PolicyId": "ANPAI4UIINUVGB5SEC57G", + "PolicyName": "AWSCodeCommitPowerUser", + "UpdateDate": "2017-05-22T21:12:48+00:00", + "VersionId": "v3" + }, + "AWSCodeCommitReadOnly": { + "Arn": "arn:aws:iam::aws:policy/AWSCodeCommitReadOnly", + "AttachmentCount": 0, + "CreateDate": "2015-07-09T17:05:06+00:00", + "DefaultVersionId": "v1", + "Document": { + "Statement": [ + { + "Action": [ + "codecommit:BatchGetRepositories", + "codecommit:Get*", + "codecommit:GitPull", + "codecommit:List*" + ], + "Effect": "Allow", + "Resource": "*" + } + ], + "Version": "2012-10-17" + }, + "IsAttachable": true, + "IsDefaultVersion": true, + "Path": "/", + "PolicyId": "ANPAJACNSXR7Z2VLJW3D6", + "PolicyName": "AWSCodeCommitReadOnly", + "UpdateDate": "2015-07-09T17:05:06+00:00", + "VersionId": "v1" + }, + "AWSCodeDeployDeployerAccess": { + "Arn": "arn:aws:iam::aws:policy/AWSCodeDeployDeployerAccess", + "AttachmentCount": 0, + "CreateDate": "2015-05-19T18:18:43+00:00", + "DefaultVersionId": "v1", + "Document": { + "Statement": [ + { + "Action": [ + "codedeploy:Batch*", + "codedeploy:CreateDeployment", + "codedeploy:Get*", + "codedeploy:List*", + "codedeploy:RegisterApplicationRevision" + ], + "Effect": "Allow", + "Resource": "*" + } + ], + "Version": "2012-10-17" + }, + "IsAttachable": true, + "IsDefaultVersion": true, + "Path": "/", + "PolicyId": "ANPAJUWEPOMGLMVXJAPUI", + "PolicyName": "AWSCodeDeployDeployerAccess", + "UpdateDate": "2015-05-19T18:18:43+00:00", + "VersionId": "v1" + }, + "AWSCodeDeployFullAccess": { + "Arn": "arn:aws:iam::aws:policy/AWSCodeDeployFullAccess", + "AttachmentCount": 0, + "CreateDate": "2015-05-19T18:13:23+00:00", + "DefaultVersionId": "v1", + "Document": { + "Statement": [ + { + "Action": "codedeploy:*", + "Effect": "Allow", + "Resource": "*" + } + ], + "Version": "2012-10-17" + }, + "IsAttachable": true, + "IsDefaultVersion": true, + "Path": "/", + "PolicyId": "ANPAIONKN3TJZUKXCHXWC", + "PolicyName": "AWSCodeDeployFullAccess", + "UpdateDate": "2015-05-19T18:13:23+00:00", + "VersionId": "v1" + }, + "AWSCodeDeployReadOnlyAccess": { + "Arn": "arn:aws:iam::aws:policy/AWSCodeDeployReadOnlyAccess", + "AttachmentCount": 0, + "CreateDate": "2015-05-19T18:21:32+00:00", + "DefaultVersionId": "v1", + "Document": { + "Statement": [ + { + "Action": [ + "codedeploy:Batch*", + "codedeploy:Get*", + "codedeploy:List*" + ], + "Effect": "Allow", + "Resource": "*" + } + ], + "Version": "2012-10-17" + }, + "IsAttachable": true, + "IsDefaultVersion": true, + "Path": "/", + "PolicyId": "ANPAILZHHKCKB4NE7XOIQ", + "PolicyName": "AWSCodeDeployReadOnlyAccess", + "UpdateDate": "2015-05-19T18:21:32+00:00", + "VersionId": "v1" + }, + "AWSCodeDeployRole": { + "Arn": "arn:aws:iam::aws:policy/service-role/AWSCodeDeployRole", + "AttachmentCount": 0, + "CreateDate": "2017-09-11T19:09:51+00:00", + "DefaultVersionId": "v6", + "Document": { + "Statement": [ + { + "Action": [ + "autoscaling:CompleteLifecycleAction", + "autoscaling:DeleteLifecycleHook", + "autoscaling:DescribeAutoScalingGroups", + "autoscaling:DescribeLifecycleHooks", + "autoscaling:PutLifecycleHook", + "autoscaling:RecordLifecycleActionHeartbeat", + "autoscaling:CreateAutoScalingGroup", + "autoscaling:UpdateAutoScalingGroup", + "autoscaling:EnableMetricsCollection", + "autoscaling:DescribeAutoScalingGroups", + "autoscaling:DescribePolicies", + "autoscaling:DescribeScheduledActions", + "autoscaling:DescribeNotificationConfigurations", + "autoscaling:DescribeLifecycleHooks", + "autoscaling:SuspendProcesses", + "autoscaling:ResumeProcesses", + "autoscaling:AttachLoadBalancers", + "autoscaling:PutScalingPolicy", + "autoscaling:PutScheduledUpdateGroupAction", + "autoscaling:PutNotificationConfiguration", + "autoscaling:PutLifecycleHook", + "autoscaling:DescribeScalingActivities", + "autoscaling:DeleteAutoScalingGroup", + "ec2:DescribeInstances", + "ec2:DescribeInstanceStatus", + "ec2:TerminateInstances", + "tag:GetTags", + "tag:GetResources", + "sns:Publish", + "cloudwatch:DescribeAlarms", + "cloudwatch:PutMetricAlarm", + "elasticloadbalancing:DescribeLoadBalancers", + "elasticloadbalancing:DescribeInstanceHealth", + "elasticloadbalancing:RegisterInstancesWithLoadBalancer", + "elasticloadbalancing:DeregisterInstancesFromLoadBalancer", + "elasticloadbalancing:DescribeTargetGroups", + "elasticloadbalancing:DescribeTargetHealth", + "elasticloadbalancing:RegisterTargets", + "elasticloadbalancing:DeregisterTargets" + ], + "Effect": "Allow", + "Resource": "*" + } + ], + "Version": "2012-10-17" + }, + "IsAttachable": true, + "IsDefaultVersion": true, + "Path": "/service-role/", + "PolicyId": "ANPAJ2NKMKD73QS5NBFLA", + "PolicyName": "AWSCodeDeployRole", + "UpdateDate": "2017-09-11T19:09:51+00:00", + "VersionId": "v6" + }, + "AWSCodePipelineApproverAccess": { + "Arn": "arn:aws:iam::aws:policy/AWSCodePipelineApproverAccess", + "AttachmentCount": 0, + "CreateDate": "2017-08-02T17:24:58+00:00", + "DefaultVersionId": "v3", + "Document": { + "Statement": [ + { + "Action": [ + "codepipeline:GetPipeline", + "codepipeline:GetPipelineState", + "codepipeline:GetPipelineExecution", + "codepipeline:ListPipelineExecutions", + "codepipeline:ListPipelines", + "codepipeline:PutApprovalResult" + ], + "Effect": "Allow", + "Resource": "*" + } + ], + "Version": "2012-10-17" + }, + "IsAttachable": true, + "IsDefaultVersion": true, + "Path": "/", + "PolicyId": "ANPAICXNWK42SQ6LMDXM2", + "PolicyName": "AWSCodePipelineApproverAccess", + "UpdateDate": "2017-08-02T17:24:58+00:00", + "VersionId": "v3" + }, + "AWSCodePipelineCustomActionAccess": { + "Arn": "arn:aws:iam::aws:policy/AWSCodePipelineCustomActionAccess", + "AttachmentCount": 0, + "CreateDate": "2015-07-09T17:02:54+00:00", + "DefaultVersionId": "v1", + "Document": { + "Statement": [ + { + "Action": [ + "codepipeline:AcknowledgeJob", + "codepipeline:GetJobDetails", + "codepipeline:PollForJobs", + "codepipeline:PutJobFailureResult", + "codepipeline:PutJobSuccessResult" + ], + "Effect": "Allow", + "Resource": "*" + } + ], + "Version": "2012-10-17" + }, + "IsAttachable": true, + "IsDefaultVersion": true, + "Path": "/", + "PolicyId": "ANPAJFW5Z32BTVF76VCYC", + "PolicyName": "AWSCodePipelineCustomActionAccess", + "UpdateDate": "2015-07-09T17:02:54+00:00", + "VersionId": "v1" + }, + "AWSCodePipelineFullAccess": { + "Arn": "arn:aws:iam::aws:policy/AWSCodePipelineFullAccess", + "AttachmentCount": 0, + "CreateDate": "2016-11-01T19:59:46+00:00", + "DefaultVersionId": "v5", + "Document": { + "Statement": [ + { + "Action": [ + "codepipeline:*", + "iam:ListRoles", + "iam:PassRole", + "s3:CreateBucket", + "s3:GetBucketPolicy", + "s3:GetObject", + "s3:ListAllMyBuckets", + "s3:ListBucket", + "s3:PutBucketPolicy", + "codecommit:ListBranches", + "codecommit:ListRepositories", + "codedeploy:GetApplication", + "codedeploy:GetDeploymentGroup", + "codedeploy:ListApplications", + "codedeploy:ListDeploymentGroups", + "elasticbeanstalk:DescribeApplications", + "elasticbeanstalk:DescribeEnvironments", + "lambda:GetFunctionConfiguration", + "lambda:ListFunctions", + "opsworks:DescribeApps", + "opsworks:DescribeLayers", + "opsworks:DescribeStacks", + "cloudformation:DescribeStacks", + "cloudformation:ListChangeSets" + ], + "Effect": "Allow", + "Resource": "*" + } + ], + "Version": "2012-10-17" + }, + "IsAttachable": true, + "IsDefaultVersion": true, + "Path": "/", + "PolicyId": "ANPAJP5LH77KSAT2KHQGG", + "PolicyName": "AWSCodePipelineFullAccess", + "UpdateDate": "2016-11-01T19:59:46+00:00", + "VersionId": "v5" + }, + "AWSCodePipelineReadOnlyAccess": { + "Arn": "arn:aws:iam::aws:policy/AWSCodePipelineReadOnlyAccess", + "AttachmentCount": 0, + "CreateDate": "2017-08-02T17:25:18+00:00", + "DefaultVersionId": "v6", + "Document": { + "Statement": [ + { + "Action": [ + "codepipeline:GetPipeline", + "codepipeline:GetPipelineState", + "codepipeline:GetPipelineExecution", + "codepipeline:ListPipelineExecutions", + "codepipeline:ListActionTypes", + "codepipeline:ListPipelines", + "iam:ListRoles", + "s3:GetBucketPolicy", + "s3:GetObject", + "s3:ListAllMyBuckets", + "s3:ListBucket", + "codecommit:ListBranches", + "codecommit:ListRepositories", + "codedeploy:GetApplication", + "codedeploy:GetDeploymentGroup", + "codedeploy:ListApplications", + "codedeploy:ListDeploymentGroups", + "elasticbeanstalk:DescribeApplications", + "elasticbeanstalk:DescribeEnvironments", + "lambda:GetFunctionConfiguration", + "lambda:ListFunctions", + "opsworks:DescribeApps", + "opsworks:DescribeLayers", + "opsworks:DescribeStacks" + ], + "Effect": "Allow", + "Resource": "*" + } + ], + "Version": "2012-10-17" + }, + "IsAttachable": true, + "IsDefaultVersion": true, + "Path": "/", + "PolicyId": "ANPAILFKZXIBOTNC5TO2Q", + "PolicyName": "AWSCodePipelineReadOnlyAccess", + "UpdateDate": "2017-08-02T17:25:18+00:00", + "VersionId": "v6" + }, + "AWSCodeStarFullAccess": { + "Arn": "arn:aws:iam::aws:policy/AWSCodeStarFullAccess", + "AttachmentCount": 0, + "CreateDate": "2017-04-19T16:23:19+00:00", + "DefaultVersionId": "v1", + "Document": { + "Statement": [ + { + "Action": [ + "codestar:*", + "ec2:DescribeKeyPairs", + "ec2:DescribeVpcs", + "ec2:DescribeSubnets" + ], + "Effect": "Allow", + "Resource": "*", + "Sid": "CodeStarEC2" + }, + { + "Action": [ + "cloudformation:DescribeStack*", + "cloudformation:GetTemplateSummary" + ], + "Effect": "Allow", + "Resource": [ + "arn:aws:cloudformation:*:*:stack/awscodestar-*" + ], + "Sid": "CodeStarCF" + } + ], + "Version": "2012-10-17" + }, + "IsAttachable": true, + "IsDefaultVersion": true, + "Path": "/", + "PolicyId": "ANPAIXI233TFUGLZOJBEC", + "PolicyName": "AWSCodeStarFullAccess", + "UpdateDate": "2017-04-19T16:23:19+00:00", + "VersionId": "v1" + }, + "AWSCodeStarServiceRole": { + "Arn": "arn:aws:iam::aws:policy/service-role/AWSCodeStarServiceRole", + "AttachmentCount": 0, + "CreateDate": "2017-07-13T19:53:22+00:00", + "DefaultVersionId": "v2", + "Document": { + "Statement": [ + { + "Action": [ + "cloudformation:*Stack*", + "cloudformation:GetTemplate" + ], + "Effect": "Allow", + "Resource": [ + "arn:aws:cloudformation:*:*:stack/awscodestar-*", + "arn:aws:cloudformation:*:*:stack/awseb-*" + ], + "Sid": "ProjectStack" + }, + { + "Action": [ + "cloudformation:GetTemplateSummary", + "cloudformation:DescribeChangeSet" + ], + "Effect": "Allow", + "Resource": "*", + "Sid": "ProjectStackTemplate" + }, + { + "Action": [ + "s3:GetObject" + ], + "Effect": "Allow", + "Resource": [ + "arn:aws:s3:::awscodestar-*/*" + ], + "Sid": "ProjectQuickstarts" + }, + { + "Action": [ + "s3:*" + ], + "Effect": "Allow", + "Resource": [ + "arn:aws:s3:::aws-codestar-*", + "arn:aws:s3:::aws-codestar-*/*", + "arn:aws:s3:::elasticbeanstalk-*", + "arn:aws:s3:::elasticbeanstalk-*/*" + ], + "Sid": "ProjectS3Buckets" + }, + { + "Action": [ + "codestar:*Project", + "codestar:*Resource*", + "codestar:List*", + "codestar:Describe*", + "codestar:Get*", + "codestar:AssociateTeamMember", + "codecommit:*", + "codepipeline:*", + "codedeploy:*", + "codebuild:*", + "ec2:RunInstances", + "autoscaling:*", + "cloudwatch:Put*", + "ec2:*", + "elasticbeanstalk:*", + "elasticloadbalancing:*", + "iam:ListRoles", + "logs:*", + "sns:*" + ], + "Effect": "Allow", + "Resource": "*", + "Sid": "ProjectServices" + }, + { + "Action": [ + "iam:AttachRolePolicy", + "iam:CreateRole", + "iam:DeleteRole", + "iam:DeleteRolePolicy", + "iam:DetachRolePolicy", + "iam:GetRole", + "iam:PassRole", + "iam:PutRolePolicy", + "iam:SetDefaultPolicyVersion", + "iam:CreatePolicy", + "iam:DeletePolicy", + "iam:AddRoleToInstanceProfile", + "iam:CreateInstanceProfile", + "iam:DeleteInstanceProfile", + "iam:RemoveRoleFromInstanceProfile" + ], + "Effect": "Allow", + "Resource": [ + "arn:aws:iam::*:role/CodeStarWorker*", + "arn:aws:iam::*:policy/CodeStarWorker*", + "arn:aws:iam::*:instance-profile/awscodestar-*" + ], + "Sid": "ProjectWorkerRoles" + }, + { + "Action": [ + "iam:AttachUserPolicy", + "iam:DetachUserPolicy" + ], + "Condition": { + "ArnEquals": { + "iam:PolicyArn": [ + "arn:aws:iam::*:policy/CodeStar_*" + ] + } + }, + "Effect": "Allow", + "Resource": "*", + "Sid": "ProjectTeamMembers" + }, + { + "Action": [ + "iam:CreatePolicy", + "iam:DeletePolicy", + "iam:CreatePolicyVersion", + "iam:DeletePolicyVersion", + "iam:ListEntitiesForPolicy", + "iam:ListPolicyVersions" + ], + "Effect": "Allow", + "Resource": [ + "arn:aws:iam::*:policy/CodeStar_*" + ], + "Sid": "ProjectRoles" + }, + { + "Action": [ + "iam:ListAttachedRolePolicies" + ], + "Effect": "Allow", + "Resource": [ + "arn:aws:iam::*:role/aws-codestar-service-role", + "arn:aws:iam::*:role/service-role/aws-codestar-service-role" + ], + "Sid": "InspectServiceRole" + } + ], + "Version": "2012-10-17" + }, + "IsAttachable": true, + "IsDefaultVersion": true, + "Path": "/service-role/", + "PolicyId": "ANPAIN6D4M2KD3NBOC4M4", + "PolicyName": "AWSCodeStarServiceRole", + "UpdateDate": "2017-07-13T19:53:22+00:00", + "VersionId": "v2" + }, + "AWSConfigRole": { + "Arn": "arn:aws:iam::aws:policy/service-role/AWSConfigRole", + "AttachmentCount": 0, + "CreateDate": "2017-08-14T19:04:46+00:00", + "DefaultVersionId": "v10", + "Document": { + "Statement": [ + { + "Action": [ + "cloudtrail:DescribeTrails", + "ec2:Describe*", + "config:Put*", + "config:Get*", + "config:List*", + "config:Describe*", + "cloudtrail:GetTrailStatus", + "s3:GetObject", + "iam:GetAccountAuthorizationDetails", + "iam:GetAccountPasswordPolicy", + "iam:GetAccountSummary", + "iam:GetGroup", + "iam:GetGroupPolicy", + "iam:GetPolicy", + "iam:GetPolicyVersion", + "iam:GetRole", + "iam:GetRolePolicy", + "iam:GetUser", + "iam:GetUserPolicy", + "iam:ListAttachedGroupPolicies", + "iam:ListAttachedRolePolicies", + "iam:ListAttachedUserPolicies", + "iam:ListEntitiesForPolicy", + "iam:ListGroupPolicies", + "iam:ListGroupsForUser", + "iam:ListInstanceProfilesForRole", + "iam:ListPolicyVersions", + "iam:ListRolePolicies", + "iam:ListUserPolicies", + "elasticloadbalancing:DescribeLoadBalancers", + "elasticloadbalancing:DescribeLoadBalancerAttributes", + "elasticloadbalancing:DescribeLoadBalancerPolicies", + "elasticloadbalancing:DescribeTags", + "acm:DescribeCertificate", + "acm:ListCertificates", + "acm:ListTagsForCertificate", + "rds:DescribeDBInstances", + "rds:DescribeDBSecurityGroups", + "rds:DescribeDBSnapshotAttributes", + "rds:DescribeDBSnapshots", + "rds:DescribeDBSubnetGroups", + "rds:DescribeEventSubscriptions", + "rds:ListTagsForResource", + "rds:DescribeDBClusters", + "s3:GetAccelerateConfiguration", + "s3:GetBucketAcl", + "s3:GetBucketCORS", + "s3:GetBucketLocation", + "s3:GetBucketLogging", + "s3:GetBucketNotification", + "s3:GetBucketPolicy", + "s3:GetBucketRequestPayment", + "s3:GetBucketTagging", + "s3:GetBucketVersioning", + "s3:GetBucketWebsite", + "s3:GetLifecycleConfiguration", + "s3:GetReplicationConfiguration", + "s3:ListAllMyBuckets", + "redshift:DescribeClusterParameterGroups", + "redshift:DescribeClusterParameters", + "redshift:DescribeClusterSecurityGroups", + "redshift:DescribeClusterSnapshots", + "redshift:DescribeClusterSubnetGroups", + "redshift:DescribeClusters", + "redshift:DescribeEventSubscriptions", + "redshift:DescribeLoggingStatus", + "dynamodb:DescribeLimits", + "dynamodb:DescribeTable", + "dynamodb:ListTables", + "dynamodb:ListTagsOfResource", + "cloudwatch:DescribeAlarms", + "application-autoscaling:DescribeScalableTargets", + "application-autoscaling:DescribeScalingPolicies", + "autoscaling:DescribeAutoScalingGroups", + "autoscaling:DescribeLaunchConfigurations", + "autoscaling:DescribeLifecycleHooks", + "autoscaling:DescribePolicies", + "autoscaling:DescribeScheduledActions", + "autoscaling:DescribeTags" + ], + "Effect": "Allow", + "Resource": "*" + } + ], + "Version": "2012-10-17" + }, + "IsAttachable": true, + "IsDefaultVersion": true, + "Path": "/service-role/", + "PolicyId": "ANPAIQRXRDRGJUA33ELIO", + "PolicyName": "AWSConfigRole", + "UpdateDate": "2017-08-14T19:04:46+00:00", + "VersionId": "v10" + }, + "AWSConfigRulesExecutionRole": { + "Arn": "arn:aws:iam::aws:policy/service-role/AWSConfigRulesExecutionRole", + "AttachmentCount": 0, + "CreateDate": "2016-03-25T17:59:36+00:00", + "DefaultVersionId": "v1", + "Document": { + "Statement": [ + { + "Action": [ + "s3:GetObject" + ], + "Effect": "Allow", + "Resource": "arn:aws:s3:::*/AWSLogs/*/Config/*" + }, + { + "Action": [ + "config:Put*", + "config:Get*", + "config:List*", + "config:Describe*" + ], + "Effect": "Allow", + "Resource": "*" + } + ], + "Version": "2012-10-17" + }, + "IsAttachable": true, + "IsDefaultVersion": true, + "Path": "/service-role/", + "PolicyId": "ANPAJUB3KIKTA4PU4OYAA", + "PolicyName": "AWSConfigRulesExecutionRole", + "UpdateDate": "2016-03-25T17:59:36+00:00", + "VersionId": "v1" + }, + "AWSConfigUserAccess": { + "Arn": "arn:aws:iam::aws:policy/AWSConfigUserAccess", + "AttachmentCount": 0, + "CreateDate": "2016-08-30T19:15:19+00:00", + "DefaultVersionId": "v3", + "Document": { + "Statement": [ + { + "Action": [ + "config:Get*", + "config:Describe*", + "config:Deliver*", + "config:List*", + "tag:GetResources", + "tag:GetTagKeys", + "cloudtrail:DescribeTrails", + "cloudtrail:GetTrailStatus", + "cloudtrail:LookupEvents" + ], + "Effect": "Allow", + "Resource": "*" + } + ], + "Version": "2012-10-17" + }, + "IsAttachable": true, + "IsDefaultVersion": true, + "Path": "/", + "PolicyId": "ANPAIWTTSFJ7KKJE3MWGA", + "PolicyName": "AWSConfigUserAccess", + "UpdateDate": "2016-08-30T19:15:19+00:00", + "VersionId": "v3" + }, + "AWSConnector": { + "Arn": "arn:aws:iam::aws:policy/AWSConnector", + "AttachmentCount": 0, + "CreateDate": "2015-09-28T19:50:38+00:00", + "DefaultVersionId": "v3", + "Document": { + "Statement": [ + { + "Action": "iam:GetUser", + "Effect": "Allow", + "Resource": "*" + }, + { + "Action": [ + "s3:ListAllMyBuckets" + ], + "Effect": "Allow", + "Resource": "*" + }, + { + "Action": [ + "s3:CreateBucket", + "s3:DeleteBucket", + "s3:DeleteObject", + "s3:GetBucketLocation", + "s3:GetObject", + "s3:ListBucket", + "s3:PutObject", + "s3:PutObjectAcl", + "s3:AbortMultipartUpload", + "s3:ListBucketMultipartUploads", + "s3:ListMultipartUploadParts" + ], + "Effect": "Allow", + "Resource": "arn:aws:s3:::import-to-ec2-*" + }, + { + "Action": [ + "ec2:CancelConversionTask", + "ec2:CancelExportTask", + "ec2:CreateImage", + "ec2:CreateInstanceExportTask", + "ec2:CreateTags", + "ec2:CreateVolume", + "ec2:DeleteTags", + "ec2:DeleteVolume", + "ec2:DescribeConversionTasks", + "ec2:DescribeExportTasks", + "ec2:DescribeImages", + "ec2:DescribeInstanceAttribute", + "ec2:DescribeInstanceStatus", + "ec2:DescribeInstances", + "ec2:DescribeRegions", + "ec2:DescribeTags", + "ec2:DetachVolume", + "ec2:ImportInstance", + "ec2:ImportVolume", + "ec2:ModifyInstanceAttribute", + "ec2:RunInstances", + "ec2:StartInstances", + "ec2:StopInstances", + "ec2:TerminateInstances", + "ec2:ImportImage", + "ec2:DescribeImportImageTasks", + "ec2:DeregisterImage", + "ec2:DescribeSnapshots", + "ec2:DeleteSnapshot", + "ec2:CancelImportTask", + "ec2:ImportSnapshot", + "ec2:DescribeImportSnapshotTasks" + ], + "Effect": "Allow", + "Resource": "*" + }, + { + "Action": [ + "SNS:Publish" + ], + "Effect": "Allow", + "Resource": "arn:aws:sns:*:*:metrics-sns-topic-for-*" + } + ], + "Version": "2012-10-17" + }, + "IsAttachable": true, + "IsDefaultVersion": true, + "Path": "/", + "PolicyId": "ANPAJ6YATONJHICG3DJ3U", + "PolicyName": "AWSConnector", + "UpdateDate": "2015-09-28T19:50:38+00:00", + "VersionId": "v3" + }, + "AWSDataPipelineRole": { + "Arn": "arn:aws:iam::aws:policy/service-role/AWSDataPipelineRole", + "AttachmentCount": 0, + "CreateDate": "2016-02-22T17:17:38+00:00", + "DefaultVersionId": "v5", + "Document": { + "Statement": [ + { + "Action": [ + "cloudwatch:*", + "datapipeline:DescribeObjects", + "datapipeline:EvaluateExpression", + "dynamodb:BatchGetItem", + "dynamodb:DescribeTable", + "dynamodb:GetItem", + "dynamodb:Query", + "dynamodb:Scan", + "dynamodb:UpdateTable", + "ec2:AuthorizeSecurityGroupIngress", + "ec2:CancelSpotInstanceRequests", + "ec2:CreateSecurityGroup", + "ec2:CreateTags", + "ec2:DeleteTags", + "ec2:Describe*", + "ec2:ModifyImageAttribute", + "ec2:ModifyInstanceAttribute", + "ec2:RequestSpotInstances", + "ec2:RunInstances", + "ec2:StartInstances", + "ec2:StopInstances", + "ec2:TerminateInstances", + "ec2:AuthorizeSecurityGroupEgress", + "ec2:DeleteSecurityGroup", + "ec2:RevokeSecurityGroupEgress", + "ec2:DescribeNetworkInterfaces", + "ec2:CreateNetworkInterface", + "ec2:DeleteNetworkInterface", + "ec2:DetachNetworkInterface", + "elasticmapreduce:*", + "iam:GetInstanceProfile", + "iam:GetRole", + "iam:GetRolePolicy", + "iam:ListAttachedRolePolicies", + "iam:ListRolePolicies", + "iam:ListInstanceProfiles", + "iam:PassRole", + "rds:DescribeDBInstances", + "rds:DescribeDBSecurityGroups", + "redshift:DescribeClusters", + "redshift:DescribeClusterSecurityGroups", + "s3:CreateBucket", + "s3:DeleteObject", + "s3:Get*", + "s3:List*", + "s3:Put*", + "sdb:BatchPutAttributes", + "sdb:Select*", + "sns:GetTopicAttributes", + "sns:ListTopics", + "sns:Publish", + "sns:Subscribe", + "sns:Unsubscribe", + "sqs:CreateQueue", + "sqs:Delete*", + "sqs:GetQueue*", + "sqs:PurgeQueue", + "sqs:ReceiveMessage" + ], + "Effect": "Allow", + "Resource": [ + "*" + ] + } + ], + "Version": "2012-10-17" + }, + "IsAttachable": true, + "IsDefaultVersion": true, + "Path": "/service-role/", + "PolicyId": "ANPAIKCP6XS3ESGF4GLO2", + "PolicyName": "AWSDataPipelineRole", + "UpdateDate": "2016-02-22T17:17:38+00:00", + "VersionId": "v5" + }, + "AWSDataPipeline_FullAccess": { + "Arn": "arn:aws:iam::aws:policy/AWSDataPipeline_FullAccess", + "AttachmentCount": 0, + "CreateDate": "2017-08-17T18:48:39+00:00", + "DefaultVersionId": "v2", + "Document": { + "Statement": [ + { + "Action": [ + "s3:List*", + "dynamodb:DescribeTable", + "rds:DescribeDBInstances", + "rds:DescribeDBSecurityGroups", + "redshift:DescribeClusters", + "redshift:DescribeClusterSecurityGroups", + "sns:ListTopics", + "sns:Subscribe", + "iam:ListRoles", + "iam:GetRolePolicy", + "iam:GetInstanceProfile", + "iam:ListInstanceProfiles", + "datapipeline:*" + ], + "Effect": "Allow", + "Resource": [ + "*" + ] + }, + { + "Action": "iam:PassRole", + "Effect": "Allow", + "Resource": [ + "arn:aws:iam::*:role/DataPipelineDefaultResourceRole", + "arn:aws:iam::*:role/DataPipelineDefaultRole" + ] + } + ], + "Version": "2012-10-17" + }, + "IsAttachable": true, + "IsDefaultVersion": true, + "Path": "/", + "PolicyId": "ANPAIXOFIG7RSBMRPHXJ4", + "PolicyName": "AWSDataPipeline_FullAccess", + "UpdateDate": "2017-08-17T18:48:39+00:00", + "VersionId": "v2" + }, + "AWSDataPipeline_PowerUser": { + "Arn": "arn:aws:iam::aws:policy/AWSDataPipeline_PowerUser", + "AttachmentCount": 0, + "CreateDate": "2017-08-17T18:49:42+00:00", + "DefaultVersionId": "v2", + "Document": { + "Statement": [ + { + "Action": [ + "s3:List*", + "dynamodb:DescribeTable", + "rds:DescribeDBInstances", + "rds:DescribeDBSecurityGroups", + "redshift:DescribeClusters", + "redshift:DescribeClusterSecurityGroups", + "sns:ListTopics", + "iam:ListRoles", + "iam:GetRolePolicy", + "iam:GetInstanceProfile", + "iam:ListInstanceProfiles", + "datapipeline:*" + ], + "Effect": "Allow", + "Resource": [ + "*" + ] + }, + { + "Action": "iam:PassRole", + "Effect": "Allow", + "Resource": [ + "arn:aws:iam::*:role/DataPipelineDefaultResourceRole", + "arn:aws:iam::*:role/DataPipelineDefaultRole" + ] + } + ], + "Version": "2012-10-17" + }, + "IsAttachable": true, + "IsDefaultVersion": true, + "Path": "/", + "PolicyId": "ANPAIMXGLVY6DVR24VTYS", + "PolicyName": "AWSDataPipeline_PowerUser", + "UpdateDate": "2017-08-17T18:49:42+00:00", + "VersionId": "v2" + }, + "AWSDeviceFarmFullAccess": { + "Arn": "arn:aws:iam::aws:policy/AWSDeviceFarmFullAccess", + "AttachmentCount": 0, + "CreateDate": "2015-07-13T16:37:38+00:00", + "DefaultVersionId": "v1", + "Document": { + "Statement": [ + { + "Action": [ + "devicefarm:*" + ], + "Effect": "Allow", + "Resource": "*" + } + ], + "Version": "2012-10-17" + }, + "IsAttachable": true, + "IsDefaultVersion": true, + "Path": "/", + "PolicyId": "ANPAJO7KEDP4VYJPNT5UW", + "PolicyName": "AWSDeviceFarmFullAccess", + "UpdateDate": "2015-07-13T16:37:38+00:00", + "VersionId": "v1" + }, + "AWSDirectConnectFullAccess": { + "Arn": "arn:aws:iam::aws:policy/AWSDirectConnectFullAccess", + "AttachmentCount": 0, + "CreateDate": "2015-02-06T18:40:07+00:00", + "DefaultVersionId": "v1", + "Document": { + "Statement": [ + { + "Action": [ + "directconnect:*" + ], + "Effect": "Allow", + "Resource": "*" + } + ], + "Version": "2012-10-17" + }, + "IsAttachable": true, + "IsDefaultVersion": true, + "Path": "/", + "PolicyId": "ANPAJQF2QKZSK74KTIHOW", + "PolicyName": "AWSDirectConnectFullAccess", + "UpdateDate": "2015-02-06T18:40:07+00:00", + "VersionId": "v1" + }, + "AWSDirectConnectReadOnlyAccess": { + "Arn": "arn:aws:iam::aws:policy/AWSDirectConnectReadOnlyAccess", + "AttachmentCount": 0, + "CreateDate": "2015-02-06T18:40:08+00:00", + "DefaultVersionId": "v1", + "Document": { + "Statement": [ + { + "Action": [ + "directconnect:Describe*" + ], + "Effect": "Allow", + "Resource": "*" + } + ], + "Version": "2012-10-17" + }, + "IsAttachable": true, + "IsDefaultVersion": true, + "Path": "/", + "PolicyId": "ANPAI23HZ27SI6FQMGNQ2", + "PolicyName": "AWSDirectConnectReadOnlyAccess", + "UpdateDate": "2015-02-06T18:40:08+00:00", + "VersionId": "v1" + }, + "AWSDirectoryServiceFullAccess": { + "Arn": "arn:aws:iam::aws:policy/AWSDirectoryServiceFullAccess", + "AttachmentCount": 0, + "CreateDate": "2016-02-24T23:10:36+00:00", + "DefaultVersionId": "v2", + "Document": { + "Statement": [ + { + "Action": [ + "ds:*", + "ec2:AuthorizeSecurityGroupEgress", + "ec2:AuthorizeSecurityGroupIngress", + "ec2:CreateNetworkInterface", + "ec2:CreateSecurityGroup", + "ec2:DeleteNetworkInterface", + "ec2:DeleteSecurityGroup", + "ec2:DescribeNetworkInterfaces", + "ec2:DescribeSubnets", + "ec2:DescribeVpcs", + "ec2:RevokeSecurityGroupEgress", + "ec2:RevokeSecurityGroupIngress", + "sns:GetTopicAttributes", + "sns:ListSubscriptions", + "sns:ListSubscriptionsByTopic", + "sns:ListTopics" + ], + "Effect": "Allow", + "Resource": "*" + }, + { + "Action": [ + "sns:CreateTopic", + "sns:DeleteTopic", + "sns:SetTopicAttributes", + "sns:Subscribe", + "sns:Unsubscribe" + ], + "Effect": "Allow", + "Resource": "arn:aws:sns:*:*:DirectoryMonitoring*" + } + ], + "Version": "2012-10-17" + }, + "IsAttachable": true, + "IsDefaultVersion": true, + "Path": "/", + "PolicyId": "ANPAINAW5ANUWTH3R4ANI", + "PolicyName": "AWSDirectoryServiceFullAccess", + "UpdateDate": "2016-02-24T23:10:36+00:00", + "VersionId": "v2" + }, + "AWSDirectoryServiceReadOnlyAccess": { + "Arn": "arn:aws:iam::aws:policy/AWSDirectoryServiceReadOnlyAccess", + "AttachmentCount": 0, + "CreateDate": "2016-02-24T23:11:18+00:00", + "DefaultVersionId": "v3", + "Document": { + "Statement": [ + { + "Action": [ + "ds:Check*", + "ds:Describe*", + "ds:Get*", + "ds:List*", + "ds:Verify*", + "ec2:DescribeNetworkInterfaces", + "ec2:DescribeSubnets", + "ec2:DescribeVpcs", + "sns:ListTopics", + "sns:GetTopicAttributes", + "sns:ListSubscriptions", + "sns:ListSubscriptionsByTopic" + ], + "Effect": "Allow", + "Resource": "*" + } + ], + "Version": "2012-10-17" + }, + "IsAttachable": true, + "IsDefaultVersion": true, + "Path": "/", + "PolicyId": "ANPAIHWYO6WSDNCG64M2W", + "PolicyName": "AWSDirectoryServiceReadOnlyAccess", + "UpdateDate": "2016-02-24T23:11:18+00:00", + "VersionId": "v3" + }, + "AWSEC2SpotServiceRolePolicy": { + "Arn": "arn:aws:iam::aws:policy/aws-service-role/AWSEC2SpotServiceRolePolicy", + "AttachmentCount": 0, + "CreateDate": "2017-09-18T18:51:54+00:00", + "DefaultVersionId": "v1", + "Document": { + "Statement": [ + { + "Action": [ + "ec2:DescribeInstances", + "ec2:StartInstances", + "ec2:StopInstances" + ], + "Effect": "Allow", + "Resource": [ + "*" + ] + }, + { + "Action": [ + "iam:PassRole" + ], + "Condition": { + "StringLike": { + "iam:PassedToService": "ec2.amazonaws.com" + } + }, + "Effect": "Allow", + "Resource": [ + "*" + ] + } + ], + "Version": "2012-10-17" + }, + "IsAttachable": true, + "IsDefaultVersion": true, + "Path": "/aws-service-role/", + "PolicyId": "ANPAIZJJBQNXQYVKTEXGM", + "PolicyName": "AWSEC2SpotServiceRolePolicy", + "UpdateDate": "2017-09-18T18:51:54+00:00", + "VersionId": "v1" + }, + "AWSElasticBeanstalkCustomPlatformforEC2Role": { + "Arn": "arn:aws:iam::aws:policy/AWSElasticBeanstalkCustomPlatformforEC2Role", + "AttachmentCount": 0, + "CreateDate": "2017-02-21T22:50:30+00:00", + "DefaultVersionId": "v1", + "Document": { + "Statement": [ + { + "Action": [ + "ec2:AttachVolume", + "ec2:AuthorizeSecurityGroupIngress", + "ec2:CopyImage", + "ec2:CreateImage", + "ec2:CreateKeypair", + "ec2:CreateSecurityGroup", + "ec2:CreateSnapshot", + "ec2:CreateTags", + "ec2:CreateVolume", + "ec2:DeleteKeypair", + "ec2:DeleteSecurityGroup", + "ec2:DeleteSnapshot", + "ec2:DeleteVolume", + "ec2:DeregisterImage", + "ec2:DescribeImageAttribute", + "ec2:DescribeImages", + "ec2:DescribeInstances", + "ec2:DescribeRegions", + "ec2:DescribeSecurityGroups", + "ec2:DescribeSnapshots", + "ec2:DescribeSubnets", + "ec2:DescribeTags", + "ec2:DescribeVolumes", + "ec2:DetachVolume", + "ec2:GetPasswordData", + "ec2:ModifyImageAttribute", + "ec2:ModifyInstanceAttribute", + "ec2:ModifySnapshotAttribute", + "ec2:RegisterImage", + "ec2:RunInstances", + "ec2:StopInstances", + "ec2:TerminateInstances" + ], + "Effect": "Allow", + "Resource": "*", + "Sid": "EC2Access" + }, + { + "Action": [ + "s3:Get*", + "s3:List*", + "s3:PutObject" + ], + "Effect": "Allow", + "Resource": [ + "arn:aws:s3:::elasticbeanstalk-*", + "arn:aws:s3:::elasticbeanstalk-*/*" + ], + "Sid": "BucketAccess" + }, + { + "Action": [ + "logs:CreateLogGroup", + "logs:CreateLogStream", + "logs:PutLogEvents", + "logs:DescribeLogStreams" + ], + "Effect": "Allow", + "Resource": "arn:aws:logs:*:*:log-group:/aws/elasticbeanstalk/platform/*", + "Sid": "CloudWatchLogsAccess" + } + ], + "Version": "2012-10-17" + }, + "IsAttachable": true, + "IsDefaultVersion": true, + "Path": "/", + "PolicyId": "ANPAJRVFXSS6LEIQGBKDY", + "PolicyName": "AWSElasticBeanstalkCustomPlatformforEC2Role", + "UpdateDate": "2017-02-21T22:50:30+00:00", + "VersionId": "v1" + }, + "AWSElasticBeanstalkEnhancedHealth": { + "Arn": "arn:aws:iam::aws:policy/service-role/AWSElasticBeanstalkEnhancedHealth", + "AttachmentCount": 0, + "CreateDate": "2016-08-22T20:28:36+00:00", + "DefaultVersionId": "v2", + "Document": { + "Statement": [ + { + "Action": [ + "elasticloadbalancing:DescribeInstanceHealth", + "elasticloadbalancing:DescribeLoadBalancers", + "elasticloadbalancing:DescribeTargetHealth", + "ec2:DescribeInstances", + "ec2:DescribeInstanceStatus", + "ec2:GetConsoleOutput", + "ec2:AssociateAddress", + "ec2:DescribeAddresses", + "ec2:DescribeSecurityGroups", + "sqs:GetQueueAttributes", + "sqs:GetQueueUrl", + "autoscaling:DescribeAutoScalingGroups", + "autoscaling:DescribeAutoScalingInstances", + "autoscaling:DescribeScalingActivities", + "autoscaling:DescribeNotificationConfigurations" + ], + "Effect": "Allow", + "Resource": [ + "*" + ] + } + ], + "Version": "2012-10-17" + }, + "IsAttachable": true, + "IsDefaultVersion": true, + "Path": "/service-role/", + "PolicyId": "ANPAIH5EFJNMOGUUTKLFE", + "PolicyName": "AWSElasticBeanstalkEnhancedHealth", + "UpdateDate": "2016-08-22T20:28:36+00:00", + "VersionId": "v2" + }, + "AWSElasticBeanstalkFullAccess": { + "Arn": "arn:aws:iam::aws:policy/AWSElasticBeanstalkFullAccess", + "AttachmentCount": 0, + "CreateDate": "2016-12-21T01:00:13+00:00", + "DefaultVersionId": "v5", + "Document": { + "Statement": [ + { + "Action": [ + "elasticbeanstalk:*", + "ec2:*", + "ecs:*", + "ecr:*", + "elasticloadbalancing:*", + "autoscaling:*", + "cloudwatch:*", + "s3:*", + "sns:*", + "cloudformation:*", + "dynamodb:*", + "rds:*", + "sqs:*", + "logs:*", + "iam:GetPolicyVersion", + "iam:GetRole", + "iam:PassRole", + "iam:ListRolePolicies", + "iam:ListAttachedRolePolicies", + "iam:ListInstanceProfiles", + "iam:ListRoles", + "iam:ListServerCertificates", + "acm:DescribeCertificate", + "acm:ListCertificates", + "codebuild:CreateProject", + "codebuild:DeleteProject", + "codebuild:BatchGetBuilds", + "codebuild:StartBuild" + ], + "Effect": "Allow", + "Resource": "*" + }, + { + "Action": [ + "iam:AddRoleToInstanceProfile", + "iam:CreateInstanceProfile", + "iam:CreateRole" + ], + "Effect": "Allow", + "Resource": [ + "arn:aws:iam::*:role/aws-elasticbeanstalk*", + "arn:aws:iam::*:instance-profile/aws-elasticbeanstalk*" + ] + }, + { + "Action": [ + "iam:AttachRolePolicy" + ], + "Condition": { + "StringLike": { + "iam:PolicyArn": [ + "arn:aws:iam::aws:policy/AWSElasticBeanstalk*", + "arn:aws:iam::aws:policy/service-role/AWSElasticBeanstalk*" + ] + } + }, + "Effect": "Allow", + "Resource": "*" + } + ], + "Version": "2012-10-17" + }, + "IsAttachable": true, + "IsDefaultVersion": true, + "Path": "/", + "PolicyId": "ANPAIZYX2YLLBW2LJVUFW", + "PolicyName": "AWSElasticBeanstalkFullAccess", + "UpdateDate": "2016-12-21T01:00:13+00:00", + "VersionId": "v5" + }, + "AWSElasticBeanstalkMulticontainerDocker": { + "Arn": "arn:aws:iam::aws:policy/AWSElasticBeanstalkMulticontainerDocker", + "AttachmentCount": 0, + "CreateDate": "2016-06-06T23:45:37+00:00", + "DefaultVersionId": "v2", + "Document": { + "Statement": [ + { + "Action": [ + "ecs:Poll", + "ecs:StartTask", + "ecs:StopTask", + "ecs:DiscoverPollEndpoint", + "ecs:StartTelemetrySession", + "ecs:RegisterContainerInstance", + "ecs:DeregisterContainerInstance", + "ecs:DescribeContainerInstances", + "ecs:Submit*", + "ecs:DescribeTasks" + ], + "Effect": "Allow", + "Resource": "*", + "Sid": "ECSAccess" + } + ], + "Version": "2012-10-17" + }, + "IsAttachable": true, + "IsDefaultVersion": true, + "Path": "/", + "PolicyId": "ANPAJ45SBYG72SD6SHJEY", + "PolicyName": "AWSElasticBeanstalkMulticontainerDocker", + "UpdateDate": "2016-06-06T23:45:37+00:00", + "VersionId": "v2" + }, + "AWSElasticBeanstalkReadOnlyAccess": { + "Arn": "arn:aws:iam::aws:policy/AWSElasticBeanstalkReadOnlyAccess", + "AttachmentCount": 0, + "CreateDate": "2015-02-06T18:40:19+00:00", + "DefaultVersionId": "v1", + "Document": { + "Statement": [ + { + "Action": [ + "elasticbeanstalk:Check*", + "elasticbeanstalk:Describe*", + "elasticbeanstalk:List*", + "elasticbeanstalk:RequestEnvironmentInfo", + "elasticbeanstalk:RetrieveEnvironmentInfo", + "ec2:Describe*", + "elasticloadbalancing:Describe*", + "autoscaling:Describe*", + "cloudwatch:Describe*", + "cloudwatch:List*", + "cloudwatch:Get*", + "s3:Get*", + "s3:List*", + "sns:Get*", + "sns:List*", + "cloudformation:Describe*", + "cloudformation:Get*", + "cloudformation:List*", + "cloudformation:Validate*", + "cloudformation:Estimate*", + "rds:Describe*", + "sqs:Get*", + "sqs:List*" + ], + "Effect": "Allow", + "Resource": "*" + } + ], + "Version": "2012-10-17" + }, + "IsAttachable": true, + "IsDefaultVersion": true, + "Path": "/", + "PolicyId": "ANPAI47KNGXDAXFD4SDHG", + "PolicyName": "AWSElasticBeanstalkReadOnlyAccess", + "UpdateDate": "2015-02-06T18:40:19+00:00", + "VersionId": "v1" + }, + "AWSElasticBeanstalkService": { + "Arn": "arn:aws:iam::aws:policy/service-role/AWSElasticBeanstalkService", + "AttachmentCount": 0, + "CreateDate": "2017-06-21T16:49:23+00:00", + "DefaultVersionId": "v11", + "Document": { + "Statement": [ + { + "Action": [ + "cloudformation:*" + ], + "Effect": "Allow", + "Resource": [ + "arn:aws:cloudformation:*:*:stack/awseb-*", + "arn:aws:cloudformation:*:*:stack/eb-*" + ], + "Sid": "AllowCloudformationOperationsOnElasticBeanstalkStacks" + }, + { + "Action": [ + "logs:DeleteLogGroup" + ], + "Effect": "Allow", + "Resource": [ + "arn:aws:logs:*:*:log-group:/aws/elasticbeanstalk*" + ], + "Sid": "AllowDeleteCloudwatchLogGroups" + }, + { + "Action": [ + "s3:*" + ], + "Effect": "Allow", + "Resource": [ + "arn:aws:s3:::elasticbeanstalk-*", + "arn:aws:s3:::elasticbeanstalk-*/*" + ], + "Sid": "AllowS3OperationsOnElasticBeanstalkBuckets" + }, + { + "Action": [ + "autoscaling:AttachInstances", + "autoscaling:CreateAutoScalingGroup", + "autoscaling:CreateLaunchConfiguration", + "autoscaling:DeleteLaunchConfiguration", + "autoscaling:DeleteAutoScalingGroup", + "autoscaling:DeleteScheduledAction", + "autoscaling:DescribeAccountLimits", + "autoscaling:DescribeAutoScalingGroups", + "autoscaling:DescribeAutoScalingInstances", + "autoscaling:DescribeLaunchConfigurations", + "autoscaling:DescribeLoadBalancers", + "autoscaling:DescribeNotificationConfigurations", + "autoscaling:DescribeScalingActivities", + "autoscaling:DescribeScheduledActions", + "autoscaling:DetachInstances", + "autoscaling:PutScheduledUpdateGroupAction", + "autoscaling:ResumeProcesses", + "autoscaling:SetDesiredCapacity", + "autoscaling:SuspendProcesses", + "autoscaling:TerminateInstanceInAutoScalingGroup", + "autoscaling:UpdateAutoScalingGroup", + "cloudwatch:PutMetricAlarm", + "ec2:AssociateAddress", + "ec2:AllocateAddress", + "ec2:AuthorizeSecurityGroupEgress", + "ec2:AuthorizeSecurityGroupIngress", + "ec2:CreateSecurityGroup", + "ec2:DeleteSecurityGroup", + "ec2:DescribeAccountAttributes", + "ec2:DescribeAddresses", + "ec2:DescribeImages", + "ec2:DescribeInstances", + "ec2:DescribeKeyPairs", + "ec2:DescribeSecurityGroups", + "ec2:DescribeSnapshots", + "ec2:DescribeSubnets", + "ec2:DescribeVpcs", + "ec2:DisassociateAddress", + "ec2:ReleaseAddress", + "ec2:RevokeSecurityGroupEgress", + "ec2:RevokeSecurityGroupIngress", + "ec2:TerminateInstances", + "ecs:CreateCluster", + "ecs:DeleteCluster", + "ecs:DescribeClusters", + "ecs:RegisterTaskDefinition", + "elasticbeanstalk:*", + "elasticloadbalancing:ApplySecurityGroupsToLoadBalancer", + "elasticloadbalancing:ConfigureHealthCheck", + "elasticloadbalancing:CreateLoadBalancer", + "elasticloadbalancing:DeleteLoadBalancer", + "elasticloadbalancing:DeregisterInstancesFromLoadBalancer", + "elasticloadbalancing:DescribeInstanceHealth", + "elasticloadbalancing:DescribeLoadBalancers", + "elasticloadbalancing:DescribeTargetHealth", + "elasticloadbalancing:RegisterInstancesWithLoadBalancer", + "elasticloadbalancing:DescribeTargetGroups", + "elasticloadbalancing:RegisterTargets", + "elasticloadbalancing:DeregisterTargets", + "iam:ListRoles", + "iam:PassRole", + "logs:CreateLogGroup", + "logs:PutRetentionPolicy", + "rds:DescribeDBEngineVersions", + "rds:DescribeDBInstances", + "rds:DescribeOrderableDBInstanceOptions", + "s3:CopyObject", + "s3:GetObject", + "s3:GetObjectAcl", + "s3:GetObjectMetadata", + "s3:ListBucket", + "s3:listBuckets", + "s3:ListObjects", + "sns:CreateTopic", + "sns:GetTopicAttributes", + "sns:ListSubscriptionsByTopic", + "sns:Subscribe", + "sns:SetTopicAttributes", + "sqs:GetQueueAttributes", + "sqs:GetQueueUrl", + "codebuild:CreateProject", + "codebuild:DeleteProject", + "codebuild:BatchGetBuilds", + "codebuild:StartBuild" + ], + "Effect": "Allow", + "Resource": [ + "*" + ], + "Sid": "AllowOperations" + } + ], + "Version": "2012-10-17" + }, + "IsAttachable": true, + "IsDefaultVersion": true, + "Path": "/service-role/", + "PolicyId": "ANPAJKQ5SN74ZQ4WASXBM", + "PolicyName": "AWSElasticBeanstalkService", + "UpdateDate": "2017-06-21T16:49:23+00:00", + "VersionId": "v11" + }, + "AWSElasticBeanstalkServiceRolePolicy": { + "Arn": "arn:aws:iam::aws:policy/aws-service-role/AWSElasticBeanstalkServiceRolePolicy", + "AttachmentCount": 0, + "CreateDate": "2017-09-13T23:46:37+00:00", + "DefaultVersionId": "v1", + "Document": { + "Statement": [ + { + "Action": [ + "iam:PassRole" + ], + "Condition": { + "StringLikeIfExists": { + "iam:PassedToService": "elasticbeanstalk.amazonaws.com" + } + }, + "Effect": "Allow", + "Resource": "*", + "Sid": "AllowPassRoleToElasticBeanstalk" + }, + { + "Action": [ + "cloudformation:*" + ], + "Effect": "Allow", + "Resource": [ + "arn:aws:cloudformation:*:*:stack/awseb-*", + "arn:aws:cloudformation:*:*:stack/eb-*" + ], + "Sid": "AllowCloudformationOperationsOnElasticBeanstalkStacks" + }, + { + "Action": [ + "logs:DeleteLogGroup" + ], + "Effect": "Allow", + "Resource": [ + "arn:aws:logs:*:*:log-group:/aws/elasticbeanstalk*" + ], + "Sid": "AllowDeleteCloudwatchLogGroups" + }, + { + "Action": [ + "s3:*" + ], + "Effect": "Allow", + "Resource": [ + "arn:aws:s3:::elasticbeanstalk-*", + "arn:aws:s3:::elasticbeanstalk-*/*" + ], + "Sid": "AllowS3OperationsOnElasticBeanstalkBuckets" + }, + { + "Action": [ + "autoscaling:AttachInstances", + "autoscaling:CreateAutoScalingGroup", + "autoscaling:CreateLaunchConfiguration", + "autoscaling:DeleteLaunchConfiguration", + "autoscaling:DeleteAutoScalingGroup", + "autoscaling:DeleteScheduledAction", + "autoscaling:DescribeAccountLimits", + "autoscaling:DescribeAutoScalingGroups", + "autoscaling:DescribeAutoScalingInstances", + "autoscaling:DescribeLaunchConfigurations", + "autoscaling:DescribeLoadBalancers", + "autoscaling:DescribeNotificationConfigurations", + "autoscaling:DescribeScalingActivities", + "autoscaling:DescribeScheduledActions", + "autoscaling:DetachInstances", + "autoscaling:PutScheduledUpdateGroupAction", + "autoscaling:ResumeProcesses", + "autoscaling:SetDesiredCapacity", + "autoscaling:SuspendProcesses", + "autoscaling:TerminateInstanceInAutoScalingGroup", + "autoscaling:UpdateAutoScalingGroup", + "cloudwatch:PutMetricAlarm", + "ec2:AssociateAddress", + "ec2:AllocateAddress", + "ec2:AuthorizeSecurityGroupEgress", + "ec2:AuthorizeSecurityGroupIngress", + "ec2:CreateSecurityGroup", + "ec2:DeleteSecurityGroup", + "ec2:DescribeAccountAttributes", + "ec2:DescribeAddresses", + "ec2:DescribeImages", + "ec2:DescribeInstances", + "ec2:DescribeKeyPairs", + "ec2:DescribeSecurityGroups", + "ec2:DescribeSubnets", + "ec2:DescribeVpcs", + "ec2:DisassociateAddress", + "ec2:ReleaseAddress", + "ec2:RevokeSecurityGroupEgress", + "ec2:RevokeSecurityGroupIngress", + "ec2:TerminateInstances", + "ecs:CreateCluster", + "ecs:DeleteCluster", + "ecs:DescribeClusters", + "ecs:RegisterTaskDefinition", + "elasticbeanstalk:*", + "elasticloadbalancing:ApplySecurityGroupsToLoadBalancer", + "elasticloadbalancing:ConfigureHealthCheck", + "elasticloadbalancing:CreateLoadBalancer", + "elasticloadbalancing:DeleteLoadBalancer", + "elasticloadbalancing:DeregisterInstancesFromLoadBalancer", + "elasticloadbalancing:DescribeInstanceHealth", + "elasticloadbalancing:DescribeLoadBalancers", + "elasticloadbalancing:DescribeTargetHealth", + "elasticloadbalancing:RegisterInstancesWithLoadBalancer", + "elasticloadbalancing:DescribeTargetGroups", + "elasticloadbalancing:RegisterTargets", + "elasticloadbalancing:DeregisterTargets", + "iam:ListRoles", + "logs:CreateLogGroup", + "logs:PutRetentionPolicy", + "rds:DescribeDBInstances", + "rds:DescribeOrderableDBInstanceOptions", + "rds:DescribeDBEngineVersions", + "sns:ListTopics", + "sns:GetTopicAttributes", + "sns:ListSubscriptionsByTopic", + "sqs:GetQueueAttributes", + "sqs:GetQueueUrl", + "codebuild:CreateProject", + "codebuild:DeleteProject", + "codebuild:BatchGetBuilds", + "codebuild:StartBuild" + ], + "Effect": "Allow", + "Resource": [ + "*" + ], + "Sid": "AllowOperations" + } + ], + "Version": "2012-10-17" + }, + "IsAttachable": true, + "IsDefaultVersion": true, + "Path": "/aws-service-role/", + "PolicyId": "ANPAIID62QSI3OSIPQXTM", + "PolicyName": "AWSElasticBeanstalkServiceRolePolicy", + "UpdateDate": "2017-09-13T23:46:37+00:00", + "VersionId": "v1" + }, + "AWSElasticBeanstalkWebTier": { + "Arn": "arn:aws:iam::aws:policy/AWSElasticBeanstalkWebTier", + "AttachmentCount": 0, + "CreateDate": "2016-12-21T02:06:25+00:00", + "DefaultVersionId": "v4", + "Document": { + "Statement": [ + { + "Action": [ + "s3:Get*", + "s3:List*", + "s3:PutObject" + ], + "Effect": "Allow", + "Resource": [ + "arn:aws:s3:::elasticbeanstalk-*", + "arn:aws:s3:::elasticbeanstalk-*/*" + ], + "Sid": "BucketAccess" + }, + { + "Action": [ + "xray:PutTraceSegments", + "xray:PutTelemetryRecords" + ], + "Effect": "Allow", + "Resource": "*", + "Sid": "XRayAccess" + }, + { + "Action": [ + "logs:PutLogEvents", + "logs:CreateLogStream" + ], + "Effect": "Allow", + "Resource": [ + "arn:aws:logs:*:*:log-group:/aws/elasticbeanstalk*" + ], + "Sid": "CloudWatchLogsAccess" + } + ], + "Version": "2012-10-17" + }, + "IsAttachable": true, + "IsDefaultVersion": true, + "Path": "/", + "PolicyId": "ANPAIUF4325SJYOREKW3A", + "PolicyName": "AWSElasticBeanstalkWebTier", + "UpdateDate": "2016-12-21T02:06:25+00:00", + "VersionId": "v4" + }, + "AWSElasticBeanstalkWorkerTier": { + "Arn": "arn:aws:iam::aws:policy/AWSElasticBeanstalkWorkerTier", + "AttachmentCount": 0, + "CreateDate": "2016-12-21T02:01:55+00:00", + "DefaultVersionId": "v4", + "Document": { + "Statement": [ + { + "Action": [ + "cloudwatch:PutMetricData" + ], + "Effect": "Allow", + "Resource": "*", + "Sid": "MetricsAccess" + }, + { + "Action": [ + "xray:PutTraceSegments", + "xray:PutTelemetryRecords" + ], + "Effect": "Allow", + "Resource": "*", + "Sid": "XRayAccess" + }, + { + "Action": [ + "sqs:ChangeMessageVisibility", + "sqs:DeleteMessage", + "sqs:ReceiveMessage", + "sqs:SendMessage" + ], + "Effect": "Allow", + "Resource": "*", + "Sid": "QueueAccess" + }, + { + "Action": [ + "s3:Get*", + "s3:List*", + "s3:PutObject" + ], + "Effect": "Allow", + "Resource": [ + "arn:aws:s3:::elasticbeanstalk-*", + "arn:aws:s3:::elasticbeanstalk-*/*" + ], + "Sid": "BucketAccess" + }, + { + "Action": [ + "dynamodb:BatchGetItem", + "dynamodb:BatchWriteItem", + "dynamodb:DeleteItem", + "dynamodb:GetItem", + "dynamodb:PutItem", + "dynamodb:Query", + "dynamodb:Scan", + "dynamodb:UpdateItem" + ], + "Effect": "Allow", + "Resource": [ + "arn:aws:dynamodb:*:*:table/*-stack-AWSEBWorkerCronLeaderRegistry*" + ], + "Sid": "DynamoPeriodicTasks" + }, + { + "Action": [ + "logs:PutLogEvents", + "logs:CreateLogStream" + ], + "Effect": "Allow", + "Resource": [ + "arn:aws:logs:*:*:log-group:/aws/elasticbeanstalk*" + ], + "Sid": "CloudWatchLogsAccess" + } + ], + "Version": "2012-10-17" + }, + "IsAttachable": true, + "IsDefaultVersion": true, + "Path": "/", + "PolicyId": "ANPAJQDLBRSJVKVF4JMSK", + "PolicyName": "AWSElasticBeanstalkWorkerTier", + "UpdateDate": "2016-12-21T02:01:55+00:00", + "VersionId": "v4" + }, + "AWSGlueConsoleFullAccess": { + "Arn": "arn:aws:iam::aws:policy/AWSGlueConsoleFullAccess", + "AttachmentCount": 0, + "CreateDate": "2017-09-13T00:12:54+00:00", + "DefaultVersionId": "v2", + "Document": { + "Statement": [ + { + "Action": [ + "glue:*", + "redshift:DescribeClusters", + "redshift:DescribeClusterSubnetGroups", + "iam:ListRoles", + "iam:ListRolePolicies", + "iam:GetRole", + "iam:GetRolePolicy", + "ec2:DescribeSecurityGroups", + "ec2:DescribeSubnets", + "ec2:DescribeVpcs", + "ec2:DescribeVpcEndpoints", + "ec2:DescribeRouteTables", + "ec2:DescribeVpcAttribute", + "ec2:DescribeKeyPairs", + "ec2:DescribeInstances", + "rds:DescribeDBInstances", + "s3:ListAllMyBuckets", + "s3:ListBucket", + "s3:GetBucketAcl", + "s3:GetBucketLocation", + "cloudformation:DescribeStacks", + "cloudformation:GetTemplateSummary" + ], + "Effect": "Allow", + "Resource": [ + "*" + ] + }, + { + "Action": [ + "s3:GetObject", + "s3:PutObject" + ], + "Effect": "Allow", + "Resource": [ + "arn:aws:s3:::aws-glue-*/*", + "arn:aws:s3:::*/*aws-glue-*/*", + "arn:aws:s3:::aws-glue-*" + ] + }, + { + "Action": [ + "s3:CreateBucket" + ], + "Effect": "Allow", + "Resource": [ + "arn:aws:s3:::aws-glue-*" + ] + }, + { + "Action": [ + "logs:GetLogEvents" + ], + "Effect": "Allow", + "Resource": [ + "arn:aws:logs:*:*:/aws-glue/*" + ] + }, + { + "Action": [ + "cloudformation:CreateStack", + "cloudformation:DeleteStack" + ], + "Effect": "Allow", + "Resource": "arn:aws:cloudformation:*:*:stack/aws-glue*/*" + }, + { + "Action": [ + "ec2:TerminateInstances", + "ec2:RunInstances", + "ec2:CreateTags", + "ec2:DeleteTags" + ], + "Condition": { + "ForAllValues:StringEquals": { + "aws:TagKeys": [ + "aws-glue-dev-endpoint" + ] + } + }, + "Effect": "Allow", + "Resource": [ + "*" + ] + }, + { + "Action": [ + "iam:PassRole" + ], + "Condition": { + "StringLike": { + "iam:PassedToService": [ + "glue.amazonaws.com" + ] + } + }, + "Effect": "Allow", + "Resource": "arn:aws:iam::*:role/AWSGlueServiceRole*" + }, + { + "Action": [ + "iam:PassRole" + ], + "Condition": { + "StringLike": { + "iam:PassedToService": [ + "ec2.amazonaws.com" + ] + } + }, + "Effect": "Allow", + "Resource": "arn:aws:iam::*:role/AWSGlueServiceNotebookRole*" + } + ], + "Version": "2012-10-17" + }, + "IsAttachable": true, + "IsDefaultVersion": true, + "Path": "/", + "PolicyId": "ANPAJNZGDEOD7MISOVSVI", + "PolicyName": "AWSGlueConsoleFullAccess", + "UpdateDate": "2017-09-13T00:12:54+00:00", + "VersionId": "v2" + }, + "AWSGlueServiceNotebookRole": { + "Arn": "arn:aws:iam::aws:policy/service-role/AWSGlueServiceNotebookRole", + "AttachmentCount": 0, + "CreateDate": "2017-08-17T18:08:29+00:00", + "DefaultVersionId": "v2", + "Document": { + "Statement": [ + { + "Action": [ + "glue:CreateDatabase", + "glue:CreatePartition", + "glue:CreateTable", + "glue:DeleteDatabase", + "glue:DeletePartition", + "glue:DeleteTable", + "glue:GetDatabase", + "glue:GetDatabases", + "glue:GetPartition", + "glue:GetPartitions", + "glue:GetTable", + "glue:GetTableVersions", + "glue:GetTables", + "glue:UpdateDatabase", + "glue:UpdatePartition", + "glue:UpdateTable", + "glue:CreateBookmark", + "glue:GetBookmark", + "glue:UpdateBookmark", + "glue:GetMetric", + "glue:PutMetric", + "glue:CreateConnection", + "glue:CreateJob", + "glue:DeleteConnection", + "glue:DeleteJob", + "glue:GetConnection", + "glue:GetConnections", + "glue:GetDevEndpoint", + "glue:GetDevEndpoints", + "glue:GetJob", + "glue:GetJobs", + "glue:UpdateJob", + "glue:BatchDeleteConnection", + "glue:UpdateConnection", + "glue:GetUserDefinedFunction", + "glue:UpdateUserDefinedFunction", + "glue:GetUserDefinedFunctions", + "glue:DeleteUserDefinedFunction", + "glue:CreateUserDefinedFunction", + "glue:BatchGetPartition", + "glue:BatchDeletePartition", + "glue:BatchCreatePartition", + "glue:BatchDeleteTable", + "glue:UpdateDevEndpoint", + "s3:GetBucketLocation", + "s3:ListBucket", + "s3:ListAllMyBuckets", + "s3:GetBucketAcl" + ], + "Effect": "Allow", + "Resource": [ + "*" + ] + }, + { + "Action": [ + "s3:GetObject" + ], + "Effect": "Allow", + "Resource": [ + "arn:aws:s3:::crawler-public*", + "arn:aws:s3:::aws-glue*" + ] + }, + { + "Action": [ + "s3:PutObject", + "s3:DeleteObject" + ], + "Effect": "Allow", + "Resource": [ + "arn:aws:s3:::aws-glue*" + ] + }, + { + "Action": [ + "ec2:CreateTags", + "ec2:DeleteTags" + ], + "Condition": { + "ForAllValues:StringEquals": { + "aws:TagKeys": [ + "aws-glue-service-resource" + ] + } + }, + "Effect": "Allow", + "Resource": [ + "arn:aws:ec2:*:*:network-interface/*", + "arn:aws:ec2:*:*:security-group/*", + "arn:aws:ec2:*:*:instance/*" + ] + } + ], + "Version": "2012-10-17" + }, + "IsAttachable": true, + "IsDefaultVersion": true, + "Path": "/service-role/", + "PolicyId": "ANPAIMRC6VZUHJYCTKWFI", + "PolicyName": "AWSGlueServiceNotebookRole", + "UpdateDate": "2017-08-17T18:08:29+00:00", + "VersionId": "v2" + }, + "AWSGlueServiceRole": { + "Arn": "arn:aws:iam::aws:policy/service-role/AWSGlueServiceRole", + "AttachmentCount": 0, + "CreateDate": "2017-08-23T21:35:25+00:00", + "DefaultVersionId": "v3", + "Document": { + "Statement": [ + { + "Action": [ + "glue:*", + "s3:GetBucketLocation", + "s3:ListBucket", + "s3:ListAllMyBuckets", + "s3:GetBucketAcl", + "ec2:DescribeVpcEndpoints", + "ec2:DescribeRouteTables", + "ec2:CreateNetworkInterface", + "ec2:DeleteNetworkInterface", + "ec2:DescribeNetworkInterfaces", + "ec2:DescribeSecurityGroups", + "ec2:DescribeSubnets", + "ec2:DescribeVpcAttribute", + "iam:ListRolePolicies", + "iam:GetRole", + "iam:GetRolePolicy" + ], + "Effect": "Allow", + "Resource": [ + "*" + ] + }, + { + "Action": [ + "s3:CreateBucket" + ], + "Effect": "Allow", + "Resource": [ + "arn:aws:s3:::aws-glue-*" + ] + }, + { + "Action": [ + "s3:GetObject", + "s3:PutObject", + "s3:DeleteObject" + ], + "Effect": "Allow", + "Resource": [ + "arn:aws:s3:::aws-glue-*/*", + "arn:aws:s3:::*/*aws-glue-*/*" + ] + }, + { + "Action": [ + "s3:GetObject" + ], + "Effect": "Allow", + "Resource": [ + "arn:aws:s3:::crawler-public*", + "arn:aws:s3:::aws-glue-*" + ] + }, + { + "Action": [ + "logs:CreateLogGroup", + "logs:CreateLogStream", + "logs:PutLogEvents" + ], + "Effect": "Allow", + "Resource": [ + "arn:aws:logs:*:*:/aws-glue/*" + ] + }, + { + "Action": [ + "ec2:CreateTags", + "ec2:DeleteTags" + ], + "Condition": { + "ForAllValues:StringEquals": { + "aws:TagKeys": [ + "aws-glue-service-resource" + ] + } + }, + "Effect": "Allow", + "Resource": [ + "arn:aws:ec2:*:*:network-interface/*", + "arn:aws:ec2:*:*:security-group/*", + "arn:aws:ec2:*:*:instance/*" + ] + } + ], + "Version": "2012-10-17" + }, + "IsAttachable": true, + "IsDefaultVersion": true, + "Path": "/service-role/", + "PolicyId": "ANPAIRUJCPEBPMEZFAS32", + "PolicyName": "AWSGlueServiceRole", + "UpdateDate": "2017-08-23T21:35:25+00:00", + "VersionId": "v3" + }, + "AWSGreengrassFullAccess": { + "Arn": "arn:aws:iam::aws:policy/AWSGreengrassFullAccess", + "AttachmentCount": 0, + "CreateDate": "2017-05-03T00:47:37+00:00", + "DefaultVersionId": "v1", + "Document": { + "Statement": [ + { + "Action": [ + "greengrass:*" + ], + "Effect": "Allow", + "Resource": "*" + } + ], + "Version": "2012-10-17" + }, + "IsAttachable": true, + "IsDefaultVersion": true, + "Path": "/", + "PolicyId": "ANPAJWPV6OBK4QONH4J3O", + "PolicyName": "AWSGreengrassFullAccess", + "UpdateDate": "2017-05-03T00:47:37+00:00", + "VersionId": "v1" + }, + "AWSGreengrassResourceAccessRolePolicy": { + "Arn": "arn:aws:iam::aws:policy/service-role/AWSGreengrassResourceAccessRolePolicy", + "AttachmentCount": 0, + "CreateDate": "2017-05-26T23:10:54+00:00", + "DefaultVersionId": "v3", + "Document": { + "Statement": [ + { + "Action": [ + "iot:DeleteThingShadow", + "iot:GetThingShadow", + "iot:UpdateThingShadow" + ], + "Effect": "Allow", + "Resource": [ + "arn:aws:iot:*:*:thing/GG_*", + "arn:aws:iot:*:*:thing/*-gcm", + "arn:aws:iot:*:*:thing/*-gda", + "arn:aws:iot:*:*:thing/*-gci" + ], + "Sid": "AllowGreengrassAccessToShadows" + }, + { + "Action": [ + "iot:DescribeThing" + ], + "Effect": "Allow", + "Resource": "arn:aws:iot:*:*:thing/*", + "Sid": "AllowGreengrassToDescribeThings" + }, + { + "Action": [ + "iot:DescribeCertificate" + ], + "Effect": "Allow", + "Resource": "arn:aws:iot:*:*:cert/*", + "Sid": "AllowGreengrassToDescribeCertificates" + }, + { + "Action": [ + "greengrass:*" + ], + "Effect": "Allow", + "Resource": "*", + "Sid": "AllowGreengrassToCallGreengrassServices" + }, + { + "Action": [ + "lambda:GetFunction", + "lambda:GetFunctionConfiguration" + ], + "Effect": "Allow", + "Resource": "*", + "Sid": "AllowGreengrassToGetLambdaFunctions" + } + ], + "Version": "2012-10-17" + }, + "IsAttachable": true, + "IsDefaultVersion": true, + "Path": "/service-role/", + "PolicyId": "ANPAJPKEIMB6YMXDEVRTM", + "PolicyName": "AWSGreengrassResourceAccessRolePolicy", + "UpdateDate": "2017-05-26T23:10:54+00:00", + "VersionId": "v3" + }, + "AWSHealthFullAccess": { + "Arn": "arn:aws:iam::aws:policy/AWSHealthFullAccess", + "AttachmentCount": 0, + "CreateDate": "2016-12-06T12:30:31+00:00", + "DefaultVersionId": "v1", + "Document": { + "Statement": [ + { + "Action": [ + "health:*" + ], + "Effect": "Allow", + "Resource": "*" + } + ], + "Version": "2012-10-17" + }, + "IsAttachable": true, + "IsDefaultVersion": true, + "Path": "/", + "PolicyId": "ANPAI3CUMPCPEUPCSXC4Y", + "PolicyName": "AWSHealthFullAccess", + "UpdateDate": "2016-12-06T12:30:31+00:00", + "VersionId": "v1" + }, + "AWSImportExportFullAccess": { + "Arn": "arn:aws:iam::aws:policy/AWSImportExportFullAccess", + "AttachmentCount": 0, + "CreateDate": "2015-02-06T18:40:43+00:00", + "DefaultVersionId": "v1", + "Document": { + "Statement": [ + { + "Action": [ + "importexport:*" + ], + "Effect": "Allow", + "Resource": "*" + } + ], + "Version": "2012-10-17" + }, + "IsAttachable": true, + "IsDefaultVersion": true, + "Path": "/", + "PolicyId": "ANPAJCQCT4JGTLC6722MQ", + "PolicyName": "AWSImportExportFullAccess", + "UpdateDate": "2015-02-06T18:40:43+00:00", + "VersionId": "v1" + }, + "AWSImportExportReadOnlyAccess": { + "Arn": "arn:aws:iam::aws:policy/AWSImportExportReadOnlyAccess", + "AttachmentCount": 0, + "CreateDate": "2015-02-06T18:40:42+00:00", + "DefaultVersionId": "v1", + "Document": { + "Statement": [ + { + "Action": [ + "importexport:ListJobs", + "importexport:GetStatus" + ], + "Effect": "Allow", + "Resource": "*" + } + ], + "Version": "2012-10-17" + }, + "IsAttachable": true, + "IsDefaultVersion": true, + "Path": "/", + "PolicyId": "ANPAJNTV4OG52ESYZHCNK", + "PolicyName": "AWSImportExportReadOnlyAccess", + "UpdateDate": "2015-02-06T18:40:42+00:00", + "VersionId": "v1" + }, + "AWSIoTConfigAccess": { + "Arn": "arn:aws:iam::aws:policy/AWSIoTConfigAccess", + "AttachmentCount": 0, + "CreateDate": "2016-07-27T20:41:18+00:00", + "DefaultVersionId": "v4", + "Document": { + "Statement": [ + { + "Action": [ + "iot:AcceptCertificateTransfer", + "iot:AttachPrincipalPolicy", + "iot:AttachThingPrincipal", + "iot:CancelCertificateTransfer", + "iot:CreateCertificateFromCsr", + "iot:CreateKeysAndCertificate", + "iot:CreatePolicy", + "iot:CreatePolicyVersion", + "iot:CreateThing", + "iot:CreateThingType", + "iot:CreateTopicRule", + "iot:DeleteCertificate", + "iot:DeleteCACertificate", + "iot:DeletePolicy", + "iot:DeletePolicyVersion", + "iot:DeleteRegistrationCode", + "iot:DeleteThing", + "iot:DeleteThingType", + "iot:DeleteTopicRule", + "iot:DeprecateThingType", + "iot:DescribeCertificate", + "iot:DescribeCACertificate", + "iot:DescribeEndpoint", + "iot:DescribeThing", + "iot:DescribeThingType", + "iot:DetachPrincipalPolicy", + "iot:DetachThingPrincipal", + "iot:GetLoggingOptions", + "iot:GetPolicy", + "iot:GetPolicyVersion", + "iot:GetRegistrationCode", + "iot:GetTopicRule", + "iot:ListCertificates", + "iot:ListCACertificates", + "iot:ListCertificatesByCA", + "iot:ListPolicies", + "iot:ListPolicyPrincipals", + "iot:ListPolicyVersions", + "iot:ListPrincipalPolicies", + "iot:ListPrincipalThings", + "iot:ListThingPrincipals", + "iot:ListThings", + "iot:ListThingTypes", + "iot:ListTopicRules", + "iot:RegisterCertificate", + "iot:RegisterCACertificate", + "iot:RejectCertificateTransfer", + "iot:ReplaceTopicRule", + "iot:SetDefaultPolicyVersion", + "iot:SetLoggingOptions", + "iot:TransferCertificate", + "iot:UpdateCertificate", + "iot:UpdateCACertificate", + "iot:UpdateThing" + ], + "Effect": "Allow", + "Resource": "*" + } + ], + "Version": "2012-10-17" + }, + "IsAttachable": true, + "IsDefaultVersion": true, + "Path": "/", + "PolicyId": "ANPAIWWGD4LM4EMXNRL7I", + "PolicyName": "AWSIoTConfigAccess", + "UpdateDate": "2016-07-27T20:41:18+00:00", + "VersionId": "v4" + }, + "AWSIoTConfigReadOnlyAccess": { + "Arn": "arn:aws:iam::aws:policy/AWSIoTConfigReadOnlyAccess", + "AttachmentCount": 0, + "CreateDate": "2016-07-27T20:41:36+00:00", + "DefaultVersionId": "v4", + "Document": { + "Statement": [ + { + "Action": [ + "iot:DescribeCertificate", + "iot:DescribeCACertificate", + "iot:DescribeEndpoint", + "iot:DescribeThing", + "iot:DescribeThingType", + "iot:GetLoggingOptions", + "iot:GetPolicy", + "iot:GetPolicyVersion", + "iot:GetRegistrationCode", + "iot:GetTopicRule", + "iot:ListCertificates", + "iot:ListCertificatesByCA", + "iot:ListCACertificates", + "iot:ListPolicies", + "iot:ListPolicyPrincipals", + "iot:ListPolicyVersions", + "iot:ListPrincipalPolicies", + "iot:ListPrincipalThings", + "iot:ListThingPrincipals", + "iot:ListThings", + "iot:ListThingTypes", + "iot:ListTopicRules" + ], + "Effect": "Allow", + "Resource": "*" + } + ], + "Version": "2012-10-17" + }, + "IsAttachable": true, + "IsDefaultVersion": true, + "Path": "/", + "PolicyId": "ANPAJHENEMXGX4XMFOIOI", + "PolicyName": "AWSIoTConfigReadOnlyAccess", + "UpdateDate": "2016-07-27T20:41:36+00:00", + "VersionId": "v4" + }, + "AWSIoTDataAccess": { + "Arn": "arn:aws:iam::aws:policy/AWSIoTDataAccess", + "AttachmentCount": 0, + "CreateDate": "2015-10-27T21:51:18+00:00", + "DefaultVersionId": "v1", + "Document": { + "Statement": [ + { + "Action": [ + "iot:Connect", + "iot:Publish", + "iot:Subscribe", + "iot:Receive", + "iot:GetThingShadow", + "iot:UpdateThingShadow" + ], + "Effect": "Allow", + "Resource": "*" + } + ], + "Version": "2012-10-17" + }, + "IsAttachable": true, + "IsDefaultVersion": true, + "Path": "/", + "PolicyId": "ANPAJM2KI2UJDR24XPS2K", + "PolicyName": "AWSIoTDataAccess", + "UpdateDate": "2015-10-27T21:51:18+00:00", + "VersionId": "v1" + }, + "AWSIoTFullAccess": { + "Arn": "arn:aws:iam::aws:policy/AWSIoTFullAccess", + "AttachmentCount": 0, + "CreateDate": "2015-10-08T15:19:49+00:00", + "DefaultVersionId": "v1", + "Document": { + "Statement": [ + { + "Action": [ + "iot:*" + ], + "Effect": "Allow", + "Resource": "*" + } + ], + "Version": "2012-10-17" + }, + "IsAttachable": true, + "IsDefaultVersion": true, + "Path": "/", + "PolicyId": "ANPAJU2FPGG6PQWN72V2G", + "PolicyName": "AWSIoTFullAccess", + "UpdateDate": "2015-10-08T15:19:49+00:00", + "VersionId": "v1" + }, + "AWSIoTLogging": { + "Arn": "arn:aws:iam::aws:policy/service-role/AWSIoTLogging", + "AttachmentCount": 0, + "CreateDate": "2015-10-08T15:17:25+00:00", + "DefaultVersionId": "v1", + "Document": { + "Statement": [ + { + "Action": [ + "logs:CreateLogGroup", + "logs:CreateLogStream", + "logs:PutLogEvents", + "logs:PutMetricFilter", + "logs:PutRetentionPolicy", + "logs:GetLogEvents", + "logs:DeleteLogStream" + ], + "Effect": "Allow", + "Resource": [ + "*" + ] + } + ], + "Version": "2012-10-17" + }, + "IsAttachable": true, + "IsDefaultVersion": true, + "Path": "/service-role/", + "PolicyId": "ANPAI6R6Z2FHHGS454W7W", + "PolicyName": "AWSIoTLogging", + "UpdateDate": "2015-10-08T15:17:25+00:00", + "VersionId": "v1" + }, + "AWSIoTRuleActions": { + "Arn": "arn:aws:iam::aws:policy/service-role/AWSIoTRuleActions", + "AttachmentCount": 0, + "CreateDate": "2015-10-08T15:14:51+00:00", + "DefaultVersionId": "v1", + "Document": { + "Statement": { + "Action": [ + "dynamodb:PutItem", + "kinesis:PutRecord", + "iot:Publish", + "s3:PutObject", + "sns:Publish", + "sqs:SendMessage*" + ], + "Effect": "Allow", + "Resource": "*" + }, + "Version": "2012-10-17" + }, + "IsAttachable": true, + "IsDefaultVersion": true, + "Path": "/service-role/", + "PolicyId": "ANPAJEZ6FS7BUZVUHMOKY", + "PolicyName": "AWSIoTRuleActions", + "UpdateDate": "2015-10-08T15:14:51+00:00", + "VersionId": "v1" + }, + "AWSKeyManagementServicePowerUser": { + "Arn": "arn:aws:iam::aws:policy/AWSKeyManagementServicePowerUser", + "AttachmentCount": 1, + "CreateDate": "2017-03-07T00:55:11+00:00", + "DefaultVersionId": "v2", + "Document": { + "Statement": [ + { + "Action": [ + "kms:CreateAlias", + "kms:CreateKey", + "kms:DeleteAlias", + "kms:Describe*", + "kms:GenerateRandom", + "kms:Get*", + "kms:List*", + "kms:TagResource", + "kms:UntagResource", + "iam:ListGroups", + "iam:ListRoles", + "iam:ListUsers" + ], + "Effect": "Allow", + "Resource": "*" + } + ], + "Version": "2012-10-17" + }, + "IsAttachable": true, + "IsDefaultVersion": true, + "Path": "/", + "PolicyId": "ANPAJNPP7PPPPMJRV2SA4", + "PolicyName": "AWSKeyManagementServicePowerUser", + "UpdateDate": "2017-03-07T00:55:11+00:00", + "VersionId": "v2" + }, + "AWSLambdaBasicExecutionRole": { + "Arn": "arn:aws:iam::aws:policy/service-role/AWSLambdaBasicExecutionRole", + "AttachmentCount": 0, + "CreateDate": "2015-04-09T15:03:43+00:00", + "DefaultVersionId": "v1", + "Document": { + "Statement": [ + { + "Action": [ + "logs:CreateLogGroup", + "logs:CreateLogStream", + "logs:PutLogEvents" + ], + "Effect": "Allow", + "Resource": "*" + } + ], + "Version": "2012-10-17" + }, + "IsAttachable": true, + "IsDefaultVersion": true, + "Path": "/service-role/", + "PolicyId": "ANPAJNCQGXC42545SKXIK", + "PolicyName": "AWSLambdaBasicExecutionRole", + "UpdateDate": "2015-04-09T15:03:43+00:00", + "VersionId": "v1" + }, + "AWSLambdaDynamoDBExecutionRole": { + "Arn": "arn:aws:iam::aws:policy/service-role/AWSLambdaDynamoDBExecutionRole", + "AttachmentCount": 0, + "CreateDate": "2015-04-09T15:09:29+00:00", + "DefaultVersionId": "v1", + "Document": { + "Statement": [ + { + "Action": [ + "dynamodb:DescribeStream", + "dynamodb:GetRecords", + "dynamodb:GetShardIterator", + "dynamodb:ListStreams", + "logs:CreateLogGroup", + "logs:CreateLogStream", + "logs:PutLogEvents" + ], + "Effect": "Allow", + "Resource": "*" + } + ], + "Version": "2012-10-17" + }, + "IsAttachable": true, + "IsDefaultVersion": true, + "Path": "/service-role/", + "PolicyId": "ANPAIP7WNAGMIPYNW4WQG", + "PolicyName": "AWSLambdaDynamoDBExecutionRole", + "UpdateDate": "2015-04-09T15:09:29+00:00", + "VersionId": "v1" + }, + "AWSLambdaENIManagementAccess": { + "Arn": "arn:aws:iam::aws:policy/service-role/AWSLambdaENIManagementAccess", + "AttachmentCount": 0, + "CreateDate": "2016-12-06T00:37:27+00:00", + "DefaultVersionId": "v1", + "Document": { + "Statement": [ + { + "Action": [ + "ec2:CreateNetworkInterface", + "ec2:DescribeNetworkInterfaces", + "ec2:DeleteNetworkInterface" + ], + "Effect": "Allow", + "Resource": "*" + } + ], + "Version": "2012-10-17" + }, + "IsAttachable": true, + "IsDefaultVersion": true, + "Path": "/service-role/", + "PolicyId": "ANPAJXAW2Q3KPTURUT2QC", + "PolicyName": "AWSLambdaENIManagementAccess", + "UpdateDate": "2016-12-06T00:37:27+00:00", + "VersionId": "v1" + }, + "AWSLambdaExecute": { + "Arn": "arn:aws:iam::aws:policy/AWSLambdaExecute", + "AttachmentCount": 0, + "CreateDate": "2015-02-06T18:40:46+00:00", + "DefaultVersionId": "v1", + "Document": { + "Statement": [ + { + "Action": [ + "logs:*" + ], + "Effect": "Allow", + "Resource": "arn:aws:logs:*:*:*" + }, + { + "Action": [ + "s3:GetObject", + "s3:PutObject" + ], + "Effect": "Allow", + "Resource": "arn:aws:s3:::*" + } + ], + "Version": "2012-10-17" + }, + "IsAttachable": true, + "IsDefaultVersion": true, + "Path": "/", + "PolicyId": "ANPAJE5FX7FQZSU5XAKGO", + "PolicyName": "AWSLambdaExecute", + "UpdateDate": "2015-02-06T18:40:46+00:00", + "VersionId": "v1" + }, + "AWSLambdaFullAccess": { + "Arn": "arn:aws:iam::aws:policy/AWSLambdaFullAccess", + "AttachmentCount": 0, + "CreateDate": "2017-05-25T19:08:45+00:00", + "DefaultVersionId": "v7", + "Document": { + "Statement": [ + { + "Action": [ + "cloudwatch:*", + "cognito-identity:ListIdentityPools", + "cognito-sync:GetCognitoEvents", + "cognito-sync:SetCognitoEvents", + "dynamodb:*", + "events:*", + "iam:ListAttachedRolePolicies", + "iam:ListRolePolicies", + "iam:ListRoles", + "iam:PassRole", + "kinesis:DescribeStream", + "kinesis:ListStreams", + "kinesis:PutRecord", + "lambda:*", + "logs:*", + "s3:*", + "sns:ListSubscriptions", + "sns:ListSubscriptionsByTopic", + "sns:ListTopics", + "sns:Subscribe", + "sns:Unsubscribe", + "sns:Publish", + "sqs:ListQueues", + "sqs:SendMessage", + "tag:GetResources", + "kms:ListAliases", + "ec2:DescribeVpcs", + "ec2:DescribeSubnets", + "ec2:DescribeSecurityGroups", + "iot:GetTopicRule", + "iot:ListTopicRules", + "iot:CreateTopicRule", + "iot:ReplaceTopicRule", + "iot:AttachPrincipalPolicy", + "iot:AttachThingPrincipal", + "iot:CreateKeysAndCertificate", + "iot:CreatePolicy", + "iot:CreateThing", + "iot:ListPolicies", + "iot:ListThings", + "iot:DescribeEndpoint", + "xray:PutTraceSegments", + "xray:PutTelemetryRecords" + ], + "Effect": "Allow", + "Resource": "*" + } + ], + "Version": "2012-10-17" + }, + "IsAttachable": true, + "IsDefaultVersion": true, + "Path": "/", + "PolicyId": "ANPAI6E2CYYMI4XI7AA5K", + "PolicyName": "AWSLambdaFullAccess", + "UpdateDate": "2017-05-25T19:08:45+00:00", + "VersionId": "v7" + }, + "AWSLambdaInvocation-DynamoDB": { + "Arn": "arn:aws:iam::aws:policy/AWSLambdaInvocation-DynamoDB", + "AttachmentCount": 0, + "CreateDate": "2015-02-06T18:40:47+00:00", + "DefaultVersionId": "v1", + "Document": { + "Statement": [ + { + "Action": [ + "lambda:InvokeFunction" + ], + "Effect": "Allow", + "Resource": "*" + }, + { + "Action": [ + "dynamodb:DescribeStream", + "dynamodb:GetRecords", + "dynamodb:GetShardIterator", + "dynamodb:ListStreams" + ], + "Effect": "Allow", + "Resource": "*" + } + ], + "Version": "2012-10-17" + }, + "IsAttachable": true, + "IsDefaultVersion": true, + "Path": "/", + "PolicyId": "ANPAJTHQ3EKCQALQDYG5G", + "PolicyName": "AWSLambdaInvocation-DynamoDB", + "UpdateDate": "2015-02-06T18:40:47+00:00", + "VersionId": "v1" + }, + "AWSLambdaKinesisExecutionRole": { + "Arn": "arn:aws:iam::aws:policy/service-role/AWSLambdaKinesisExecutionRole", + "AttachmentCount": 0, + "CreateDate": "2015-04-09T15:14:16+00:00", + "DefaultVersionId": "v1", + "Document": { + "Statement": [ + { + "Action": [ + "kinesis:DescribeStream", + "kinesis:GetRecords", + "kinesis:GetShardIterator", + "kinesis:ListStreams", + "logs:CreateLogGroup", + "logs:CreateLogStream", + "logs:PutLogEvents" + ], + "Effect": "Allow", + "Resource": "*" + } + ], + "Version": "2012-10-17" + }, + "IsAttachable": true, + "IsDefaultVersion": true, + "Path": "/service-role/", + "PolicyId": "ANPAJHOLKJPXV4GBRMJUQ", + "PolicyName": "AWSLambdaKinesisExecutionRole", + "UpdateDate": "2015-04-09T15:14:16+00:00", + "VersionId": "v1" + }, + "AWSLambdaReadOnlyAccess": { + "Arn": "arn:aws:iam::aws:policy/AWSLambdaReadOnlyAccess", + "AttachmentCount": 0, + "CreateDate": "2017-05-04T18:22:29+00:00", + "DefaultVersionId": "v6", + "Document": { + "Statement": [ + { + "Action": [ + "cloudwatch:Describe*", + "cloudwatch:Get*", + "cloudwatch:List*", + "cognito-identity:ListIdentityPools", + "cognito-sync:GetCognitoEvents", + "dynamodb:BatchGetItem", + "dynamodb:DescribeStream", + "dynamodb:DescribeTable", + "dynamodb:GetItem", + "dynamodb:ListStreams", + "dynamodb:ListTables", + "dynamodb:Query", + "dynamodb:Scan", + "events:List*", + "events:Describe*", + "iam:ListRoles", + "kinesis:DescribeStream", + "kinesis:ListStreams", + "lambda:List*", + "lambda:Get*", + "logs:DescribeMetricFilters", + "logs:GetLogEvents", + "logs:DescribeLogGroups", + "logs:DescribeLogStreams", + "s3:Get*", + "s3:List*", + "sns:ListTopics", + "sns:ListSubscriptions", + "sns:ListSubscriptionsByTopic", + "sqs:ListQueues", + "tag:GetResources", + "kms:ListAliases", + "ec2:DescribeVpcs", + "ec2:DescribeSubnets", + "ec2:DescribeSecurityGroups", + "iot:GetTopicRules", + "iot:ListTopicRules", + "iot:ListPolicies", + "iot:ListThings", + "iot:DescribeEndpoint" + ], + "Effect": "Allow", + "Resource": "*" + } + ], + "Version": "2012-10-17" + }, + "IsAttachable": true, + "IsDefaultVersion": true, + "Path": "/", + "PolicyId": "ANPAJLDG7J3CGUHFN4YN6", + "PolicyName": "AWSLambdaReadOnlyAccess", + "UpdateDate": "2017-05-04T18:22:29+00:00", + "VersionId": "v6" + }, + "AWSLambdaRole": { + "Arn": "arn:aws:iam::aws:policy/service-role/AWSLambdaRole", + "AttachmentCount": 0, + "CreateDate": "2015-02-06T18:41:28+00:00", + "DefaultVersionId": "v1", + "Document": { + "Statement": [ + { + "Action": [ + "lambda:InvokeFunction" + ], + "Effect": "Allow", + "Resource": [ + "*" + ] + } + ], + "Version": "2012-10-17" + }, + "IsAttachable": true, + "IsDefaultVersion": true, + "Path": "/service-role/", + "PolicyId": "ANPAJX4DPCRGTC4NFDUXI", + "PolicyName": "AWSLambdaRole", + "UpdateDate": "2015-02-06T18:41:28+00:00", + "VersionId": "v1" + }, + "AWSLambdaVPCAccessExecutionRole": { + "Arn": "arn:aws:iam::aws:policy/service-role/AWSLambdaVPCAccessExecutionRole", + "AttachmentCount": 0, + "CreateDate": "2016-02-11T23:15:26+00:00", + "DefaultVersionId": "v1", + "Document": { + "Statement": [ + { + "Action": [ + "logs:CreateLogGroup", + "logs:CreateLogStream", + "logs:PutLogEvents", + "ec2:CreateNetworkInterface", + "ec2:DescribeNetworkInterfaces", + "ec2:DeleteNetworkInterface" + ], + "Effect": "Allow", + "Resource": "*" + } + ], + "Version": "2012-10-17" + }, + "IsAttachable": true, + "IsDefaultVersion": true, + "Path": "/service-role/", + "PolicyId": "ANPAJVTME3YLVNL72YR2K", + "PolicyName": "AWSLambdaVPCAccessExecutionRole", + "UpdateDate": "2016-02-11T23:15:26+00:00", + "VersionId": "v1" + }, + "AWSMarketplaceFullAccess": { + "Arn": "arn:aws:iam::aws:policy/AWSMarketplaceFullAccess", + "AttachmentCount": 0, + "CreateDate": "2015-02-11T17:21:45+00:00", + "DefaultVersionId": "v1", + "Document": { + "Statement": [ + { + "Action": [ + "aws-marketplace:*", + "cloudformation:CreateStack", + "cloudformation:DescribeStackResource", + "cloudformation:DescribeStackResources", + "cloudformation:DescribeStacks", + "cloudformation:List*", + "ec2:AuthorizeSecurityGroupEgress", + "ec2:AuthorizeSecurityGroupIngress", + "ec2:CreateSecurityGroup", + "ec2:CreateTags", + "ec2:DescribeAccountAttributes", + "ec2:DescribeAddresses", + "ec2:DeleteSecurityGroup", + "ec2:DescribeAccountAttributes", + "ec2:DescribeImages", + "ec2:DescribeInstances", + "ec2:DescribeKeyPairs", + "ec2:DescribeSecurityGroups", + "ec2:DescribeSubnets", + "ec2:DescribeTags", + "ec2:DescribeVpcs", + "ec2:RunInstances", + "ec2:StartInstances", + "ec2:StopInstances", + "ec2:TerminateInstances" + ], + "Effect": "Allow", + "Resource": "*" + } + ], + "Version": "2012-10-17" + }, + "IsAttachable": true, + "IsDefaultVersion": true, + "Path": "/", + "PolicyId": "ANPAI2DV5ULJSO2FYVPYG", + "PolicyName": "AWSMarketplaceFullAccess", + "UpdateDate": "2015-02-11T17:21:45+00:00", + "VersionId": "v1" + }, + "AWSMarketplaceGetEntitlements": { + "Arn": "arn:aws:iam::aws:policy/AWSMarketplaceGetEntitlements", + "AttachmentCount": 0, + "CreateDate": "2017-03-27T19:37:24+00:00", + "DefaultVersionId": "v1", + "Document": { + "Statement": [ + { + "Action": [ + "aws-marketplace:GetEntitlements" + ], + "Effect": "Allow", + "Resource": "*" + } + ], + "Version": "2012-10-17" + }, + "IsAttachable": true, + "IsDefaultVersion": true, + "Path": "/", + "PolicyId": "ANPAJLPIMQE4WMHDC2K7C", + "PolicyName": "AWSMarketplaceGetEntitlements", + "UpdateDate": "2017-03-27T19:37:24+00:00", + "VersionId": "v1" + }, + "AWSMarketplaceManageSubscriptions": { + "Arn": "arn:aws:iam::aws:policy/AWSMarketplaceManageSubscriptions", + "AttachmentCount": 0, + "CreateDate": "2015-02-06T18:40:32+00:00", + "DefaultVersionId": "v1", + "Document": { + "Statement": [ + { + "Action": [ + "aws-marketplace:ViewSubscriptions", + "aws-marketplace:Subscribe", + "aws-marketplace:Unsubscribe" + ], + "Effect": "Allow", + "Resource": "*" + } + ], + "Version": "2012-10-17" + }, + "IsAttachable": true, + "IsDefaultVersion": true, + "Path": "/", + "PolicyId": "ANPAJRDW2WIFN7QLUAKBQ", + "PolicyName": "AWSMarketplaceManageSubscriptions", + "UpdateDate": "2015-02-06T18:40:32+00:00", + "VersionId": "v1" + }, + "AWSMarketplaceMeteringFullAccess": { + "Arn": "arn:aws:iam::aws:policy/AWSMarketplaceMeteringFullAccess", + "AttachmentCount": 0, + "CreateDate": "2016-03-17T22:39:22+00:00", + "DefaultVersionId": "v1", + "Document": { + "Statement": [ + { + "Action": [ + "aws-marketplace:MeterUsage" + ], + "Effect": "Allow", + "Resource": "*" + } + ], + "Version": "2012-10-17" + }, + "IsAttachable": true, + "IsDefaultVersion": true, + "Path": "/", + "PolicyId": "ANPAJ65YJPG7CC7LDXNA6", + "PolicyName": "AWSMarketplaceMeteringFullAccess", + "UpdateDate": "2016-03-17T22:39:22+00:00", + "VersionId": "v1" + }, + "AWSMarketplaceRead-only": { + "Arn": "arn:aws:iam::aws:policy/AWSMarketplaceRead-only", + "AttachmentCount": 0, + "CreateDate": "2015-02-06T18:40:31+00:00", + "DefaultVersionId": "v1", + "Document": { + "Statement": [ + { + "Action": [ + "aws-marketplace:ViewSubscriptions", + "ec2:DescribeAccountAttributes", + "ec2:DescribeAddresses", + "ec2:DescribeImages", + "ec2:DescribeInstances", + "ec2:DescribeKeyPairs", + "ec2:DescribeSecurityGroups", + "ec2:DescribeSubnets", + "ec2:DescribeVpcs" + ], + "Effect": "Allow", + "Resource": "*" + } + ], + "Version": "2012-10-17" + }, + "IsAttachable": true, + "IsDefaultVersion": true, + "Path": "/", + "PolicyId": "ANPAJOOM6LETKURTJ3XZ2", + "PolicyName": "AWSMarketplaceRead-only", + "UpdateDate": "2015-02-06T18:40:31+00:00", + "VersionId": "v1" + }, + "AWSMigrationHubDMSAccess": { + "Arn": "arn:aws:iam::aws:policy/service-role/AWSMigrationHubDMSAccess", + "AttachmentCount": 0, + "CreateDate": "2017-08-14T14:00:06+00:00", + "DefaultVersionId": "v1", + "Document": { + "Statement": [ + { + "Action": [ + "mgh:CreateProgressUpdateStream" + ], + "Effect": "Allow", + "Resource": "arn:aws:mgh:*:*:progressUpdateStream/DMS" + }, + { + "Action": [ + "mgh:AssociateCreatedArtifact", + "mgh:DescribeMigrationTask", + "mgh:DisassociateCreatedArtifact", + "mgh:ImportMigrationTask", + "mgh:ListCreatedArtifacts", + "mgh:NotifyMigrationTaskState", + "mgh:PutResourceAttributes", + "mgh:NotifyApplicationState", + "mgh:DescribeApplicationState", + "mgh:AssociateDiscoveredResource", + "mgh:DisassociateDiscoveredResource", + "mgh:ListDiscoveredResources" + ], + "Effect": "Allow", + "Resource": "arn:aws:mgh:*:*:progressUpdateStream/DMS/*" + }, + { + "Action": [ + "mgh:ListMigrationTasks" + ], + "Effect": "Allow", + "Resource": "*" + } + ], + "Version": "2012-10-17" + }, + "IsAttachable": true, + "IsDefaultVersion": true, + "Path": "/service-role/", + "PolicyId": "ANPAIUQB56VA4JHLN7G2W", + "PolicyName": "AWSMigrationHubDMSAccess", + "UpdateDate": "2017-08-14T14:00:06+00:00", + "VersionId": "v1" + }, + "AWSMigrationHubDiscoveryAccess": { + "Arn": "arn:aws:iam::aws:policy/service-role/AWSMigrationHubDiscoveryAccess", + "AttachmentCount": 0, + "CreateDate": "2017-08-14T13:30:51+00:00", + "DefaultVersionId": "v1", + "Document": { + "Statement": [ + { + "Action": [ + "discovery:ListConfigurations", + "discovery:DescribeConfigurations" + ], + "Effect": "Allow", + "Resource": [ + "*" + ] + } + ], + "Version": "2012-10-17" + }, + "IsAttachable": true, + "IsDefaultVersion": true, + "Path": "/service-role/", + "PolicyId": "ANPAITRMRLSV7JAL6YIGG", + "PolicyName": "AWSMigrationHubDiscoveryAccess", + "UpdateDate": "2017-08-14T13:30:51+00:00", + "VersionId": "v1" + }, + "AWSMigrationHubFullAccess": { + "Arn": "arn:aws:iam::aws:policy/AWSMigrationHubFullAccess", + "AttachmentCount": 0, + "CreateDate": "2017-08-14T14:09:27+00:00", + "DefaultVersionId": "v2", + "Document": { + "Statement": [ + { + "Action": [ + "mgh:*", + "discovery:*" + ], + "Effect": "Allow", + "Resource": "*" + }, + { + "Action": [ + "iam:GetRole" + ], + "Effect": "Allow", + "Resource": "*" + } + ], + "Version": "2012-10-17" + }, + "IsAttachable": true, + "IsDefaultVersion": true, + "Path": "/", + "PolicyId": "ANPAJ4A2SZKHUYHDYIGOK", + "PolicyName": "AWSMigrationHubFullAccess", + "UpdateDate": "2017-08-14T14:09:27+00:00", + "VersionId": "v2" + }, + "AWSMigrationHubSMSAccess": { + "Arn": "arn:aws:iam::aws:policy/service-role/AWSMigrationHubSMSAccess", + "AttachmentCount": 0, + "CreateDate": "2017-08-14T13:57:54+00:00", + "DefaultVersionId": "v1", + "Document": { + "Statement": [ + { + "Action": [ + "mgh:CreateProgressUpdateStream" + ], + "Effect": "Allow", + "Resource": "arn:aws:mgh:*:*:progressUpdateStream/SMS" + }, + { + "Action": [ + "mgh:AssociateCreatedArtifact", + "mgh:DescribeMigrationTask", + "mgh:DisassociateCreatedArtifact", + "mgh:ImportMigrationTask", + "mgh:ListCreatedArtifacts", + "mgh:NotifyMigrationTaskState", + "mgh:PutResourceAttributes", + "mgh:NotifyApplicationState", + "mgh:DescribeApplicationState", + "mgh:AssociateDiscoveredResource", + "mgh:DisassociateDiscoveredResource", + "mgh:ListDiscoveredResources" + ], + "Effect": "Allow", + "Resource": "arn:aws:mgh:*:*:progressUpdateStream/SMS/*" + }, + { + "Action": [ + "mgh:ListMigrationTasks" + ], + "Effect": "Allow", + "Resource": "*" + } + ], + "Version": "2012-10-17" + }, + "IsAttachable": true, + "IsDefaultVersion": true, + "Path": "/service-role/", + "PolicyId": "ANPAIWQYYT6TSVIRJO4TY", + "PolicyName": "AWSMigrationHubSMSAccess", + "UpdateDate": "2017-08-14T13:57:54+00:00", + "VersionId": "v1" + }, + "AWSMobileHub_FullAccess": { + "Arn": "arn:aws:iam::aws:policy/AWSMobileHub_FullAccess", + "AttachmentCount": 0, + "CreateDate": "2017-08-10T22:23:47+00:00", + "DefaultVersionId": "v10", + "Document": { + "Statement": [ + { + "Action": [ + "apigateway:GET", + "apigateway:GetRestApis", + "apigateway:GetResources", + "apigateway:POST", + "apigateway:TestInvokeMethod", + "dynamodb:DescribeTable", + "ec2:DescribeSecurityGroups", + "ec2:DescribeSubnets", + "ec2:DescribeVpcs", + "iam:ListSAMLProviders", + "lambda:ListFunctions", + "sns:ListTopics", + "lex:GetIntent", + "lex:GetIntents", + "lex:GetSlotType", + "lex:GetSlotTypes", + "lex:GetBot", + "lex:GetBots", + "lex:GetBotAlias", + "lex:GetBotAliases", + "mobilehub:CreateProject", + "mobilehub:DeleteProject", + "mobilehub:UpdateProject", + "mobilehub:ExportProject", + "mobilehub:ImportProject", + "mobilehub:SynchronizeProject", + "mobilehub:GenerateProjectParameters", + "mobilehub:GetProject", + "mobilehub:GetProjectSnapshot", + "mobilehub:ListAvailableConnectors", + "mobilehub:ListAvailableFeatures", + "mobilehub:ListAvailableRegions", + "mobilehub:ListProjects", + "mobilehub:ValidateProject", + "mobilehub:VerifyServiceRole", + "mobilehub:DescribeBundle", + "mobilehub:ExportBundle", + "mobilehub:ListBundles" + ], + "Effect": "Allow", + "Resource": "*" + }, + { + "Action": [ + "s3:GetObject" + ], + "Effect": "Allow", + "Resource": "arn:aws:s3:::*/aws-my-sample-app*.zip" + } + ], + "Version": "2012-10-17" + }, + "IsAttachable": true, + "IsDefaultVersion": true, + "Path": "/", + "PolicyId": "ANPAIJLU43R6AGRBK76DM", + "PolicyName": "AWSMobileHub_FullAccess", + "UpdateDate": "2017-08-10T22:23:47+00:00", + "VersionId": "v10" + }, + "AWSMobileHub_ReadOnly": { + "Arn": "arn:aws:iam::aws:policy/AWSMobileHub_ReadOnly", + "AttachmentCount": 0, + "CreateDate": "2017-08-10T22:08:23+00:00", + "DefaultVersionId": "v8", + "Document": { + "Statement": [ + { + "Action": [ + "dynamodb:DescribeTable", + "iam:ListSAMLProviders", + "lambda:ListFunctions", + "sns:ListTopics", + "lex:GetIntent", + "lex:GetIntents", + "lex:GetSlotType", + "lex:GetSlotTypes", + "lex:GetBot", + "lex:GetBots", + "lex:GetBotAlias", + "lex:GetBotAliases", + "mobilehub:ExportProject", + "mobilehub:GenerateProjectParameters", + "mobilehub:GetProject", + "mobilehub:GetProjectSnapshot", + "mobilehub:ListAvailableConnectors", + "mobilehub:ListAvailableFeatures", + "mobilehub:ListAvailableRegions", + "mobilehub:ListProjects", + "mobilehub:ValidateProject", + "mobilehub:VerifyServiceRole", + "mobilehub:DescribeBundle", + "mobilehub:ExportBundle", + "mobilehub:ListBundles" + ], + "Effect": "Allow", + "Resource": "*" + }, + { + "Action": [ + "s3:GetObject" + ], + "Effect": "Allow", + "Resource": "arn:aws:s3:::*/aws-my-sample-app*.zip" + } + ], + "Version": "2012-10-17" + }, + "IsAttachable": true, + "IsDefaultVersion": true, + "Path": "/", + "PolicyId": "ANPAIBXVYVL3PWQFBZFGW", + "PolicyName": "AWSMobileHub_ReadOnly", + "UpdateDate": "2017-08-10T22:08:23+00:00", + "VersionId": "v8" + }, + "AWSMobileHub_ServiceUseOnly": { + "Arn": "arn:aws:iam::aws:policy/service-role/AWSMobileHub_ServiceUseOnly", + "AttachmentCount": 0, + "CreateDate": "2017-06-02T23:35:49+00:00", + "DefaultVersionId": "v23", + "Document": { + "Statement": [ + { + "Action": [ + "cloudformation:CreateUploadBucket", + "cloudformation:ValidateTemplate", + "cloudfront:CreateDistribution", + "cloudfront:DeleteDistribution", + "cloudfront:GetDistribution", + "cloudfront:GetDistributionConfig", + "cloudfront:UpdateDistribution", + "cognito-identity:CreateIdentityPool", + "cognito-identity:UpdateIdentityPool", + "cognito-identity:DeleteIdentityPool", + "cognito-identity:SetIdentityPoolRoles", + "cognito-idp:CreateUserPool", + "dynamodb:CreateTable", + "dynamodb:DeleteTable", + "dynamodb:DescribeTable", + "dynamodb:UpdateTable", + "iam:AddClientIDToOpenIDConnectProvider", + "iam:CreateOpenIDConnectProvider", + "iam:GetOpenIDConnectProvider", + "iam:ListOpenIDConnectProviders", + "iam:CreateSAMLProvider", + "iam:GetSAMLProvider", + "iam:ListSAMLProvider", + "iam:UpdateSAMLProvider", + "lambda:CreateFunction", + "lambda:DeleteFunction", + "lambda:GetFunction", + "mobileanalytics:CreateApp", + "mobileanalytics:DeleteApp", + "sns:CreateTopic", + "sns:DeleteTopic", + "sns:ListPlatformApplications", + "ec2:DescribeSecurityGroups", + "ec2:DescribeSubnets", + "ec2:DescribeVpcs", + "lex:PutIntent", + "lex:GetIntent", + "lex:GetIntents", + "lex:PutSlotType", + "lex:GetSlotType", + "lex:GetSlotTypes", + "lex:PutBot", + "lex:GetBot", + "lex:GetBots", + "lex:GetBotAlias", + "lex:GetBotAliases" + ], + "Effect": "Allow", + "Resource": [ + "*" + ] + }, + { + "Action": [ + "sns:CreatePlatformApplication", + "sns:DeletePlatformApplication", + "sns:GetPlatformApplicationAttributes", + "sns:SetPlatformApplicationAttributes" + ], + "Effect": "Allow", + "Resource": [ + "arn:aws:sns:*:*:app/*_MOBILEHUB_*" + ] + }, + { + "Action": [ + "s3:CreateBucket", + "s3:DeleteBucket", + "s3:DeleteBucketPolicy", + "s3:DeleteBucketWebsite", + "s3:ListBucket", + "s3:ListBucketVersions", + "s3:GetBucketLocation", + "s3:GetBucketVersioning", + "s3:PutBucketVersioning", + "s3:PutBucketWebsite", + "s3:PutBucketPolicy", + "s3:SetBucketCrossOriginConfiguration" + ], + "Effect": "Allow", + "Resource": [ + "arn:aws:s3:::*-userfiles-mobilehub-*", + "arn:aws:s3:::*-contentdelivery-mobilehub-*", + "arn:aws:s3:::*-hosting-mobilehub-*", + "arn:aws:s3:::*-deployments-mobilehub-*" + ] + }, + { + "Action": [ + "s3:DeleteObject", + "s3:DeleteVersion", + "s3:DeleteObjectVersion", + "s3:GetObject", + "s3:GetObjectVersion", + "s3:PutObject", + "s3:PutObjectAcl" + ], + "Effect": "Allow", + "Resource": [ + "arn:aws:s3:::*-userfiles-mobilehub-*/*", + "arn:aws:s3:::*-contentdelivery-mobilehub-*/*", + "arn:aws:s3:::*-hosting-mobilehub-*/*", + "arn:aws:s3:::*-deployments-mobilehub-*/*" + ] + }, + { + "Action": [ + "lambda:AddPermission", + "lambda:CreateAlias", + "lambda:DeleteAlias", + "lambda:UpdateAlias", + "lambda:GetFunctionConfiguration", + "lambda:GetPolicy", + "lambda:RemovePermission", + "lambda:UpdateFunctionCode", + "lambda:UpdateFunctionConfiguration" + ], + "Effect": "Allow", + "Resource": [ + "arn:aws:lambda:*:*:function:*-mobilehub-*" + ] + }, + { + "Action": [ + "iam:CreateRole", + "iam:DeleteRole", + "iam:DeleteRolePolicy", + "iam:GetRole", + "iam:GetRolePolicy", + "iam:ListRolePolicies", + "iam:PassRole", + "iam:PutRolePolicy", + "iam:UpdateAssumeRolePolicy", + "iam:AttachRolePolicy", + "iam:DetachRolePolicy" + ], + "Effect": "Allow", + "Resource": [ + "arn:aws:iam::*:role/*_unauth_MOBILEHUB_*", + "arn:aws:iam::*:role/*_auth_MOBILEHUB_*", + "arn:aws:iam::*:role/*_consolepush_MOBILEHUB_*", + "arn:aws:iam::*:role/*_lambdaexecutionrole_MOBILEHUB_*", + "arn:aws:iam::*:role/*_smsverification_MOBILEHUB_*", + "arn:aws:iam::*:role/*_botexecutionrole_MOBILEHUB_*", + "arn:aws:iam::*:role/pinpoint-events", + "arn:aws:iam::*:role/MOBILEHUB-*-lambdaexecution*", + "arn:aws:iam::*:role/MobileHub_Service_Role" + ] + }, + { + "Action": [ + "iam:CreateServiceLinkedRole", + "iam:GetRole" + ], + "Effect": "Allow", + "Resource": [ + "arn:aws:iam::*:role/aws-service-role/lex.amazonaws.com/AWSServiceRoleForLexBots" + ] + }, + { + "Action": [ + "logs:CreateLogGroup", + "logs:CreateLogStream", + "logs:PutLogEvents" + ], + "Effect": "Allow", + "Resource": [ + "arn:aws:logs:*:*:log-group:/aws/mobilehub/*:log-stream:*" + ] + }, + { + "Action": [ + "iam:ListAttachedRolePolicies" + ], + "Effect": "Allow", + "Resource": [ + "arn:aws:iam::*:role/MobileHub_Service_Role" + ] + }, + { + "Action": [ + "cloudformation:CreateStack", + "cloudformation:DeleteStack", + "cloudformation:DescribeStacks", + "cloudformation:DescribeStackEvents", + "cloudformation:DescribeStackResource", + "cloudformation:GetTemplate", + "cloudformation:ListStackResources", + "cloudformation:ListStacks", + "cloudformation:UpdateStack" + ], + "Effect": "Allow", + "Resource": [ + "arn:aws:cloudformation:*:*:stack/MOBILEHUB-*" + ] + }, + { + "Action": [ + "apigateway:DELETE", + "apigateway:GET", + "apigateway:HEAD", + "apigateway:OPTIONS", + "apigateway:PATCH", + "apigateway:POST", + "apigateway:PUT" + ], + "Effect": "Allow", + "Resource": [ + "arn:aws:apigateway:*::/restapis*" + ] + }, + { + "Action": [ + "cognito-idp:DeleteUserPool", + "cognito-idp:DescribeUserPool", + "cognito-idp:CreateUserPoolClient", + "cognito-idp:DescribeUserPoolClient", + "cognito-idp:DeleteUserPoolClient" + ], + "Effect": "Allow", + "Resource": [ + "arn:aws:cognito-idp:*:*:userpool/*" + ] + }, + { + "Action": [ + "mobiletargeting:UpdateApnsChannel", + "mobiletargeting:UpdateApnsSandboxChannel", + "mobiletargeting:UpdateEmailChannel", + "mobiletargeting:UpdateGcmChannel", + "mobiletargeting:UpdateSmsChannel", + "mobiletargeting:DeleteApnsChannel", + "mobiletargeting:DeleteApnsSandboxChannel", + "mobiletargeting:DeleteEmailChannel", + "mobiletargeting:DeleteGcmChannel", + "mobiletargeting:DeleteSmsChannel" + ], + "Effect": "Allow", + "Resource": [ + "arn:aws:mobiletargeting:*:*:apps/*/channels/*" + ] + } + ], + "Version": "2012-10-17" + }, + "IsAttachable": true, + "IsDefaultVersion": true, + "Path": "/service-role/", + "PolicyId": "ANPAIUHPQXBDZUWOP3PSK", + "PolicyName": "AWSMobileHub_ServiceUseOnly", + "UpdateDate": "2017-06-02T23:35:49+00:00", + "VersionId": "v23" + }, + "AWSOpsWorksCMInstanceProfileRole": { + "Arn": "arn:aws:iam::aws:policy/AWSOpsWorksCMInstanceProfileRole", + "AttachmentCount": 0, + "CreateDate": "2016-11-24T09:48:22+00:00", + "DefaultVersionId": "v1", + "Document": { + "Statement": [ + { + "Action": [ + "s3:AbortMultipartUpload", + "s3:DeleteObject", + "s3:GetObject", + "s3:ListAllMyBuckets", + "s3:ListBucket", + "s3:ListMultipartUploadParts", + "s3:PutObject" + ], + "Effect": "Allow", + "Resource": "arn:aws:s3:::aws-opsworks-cm-*" + } + ], + "Version": "2012-10-17" + }, + "IsAttachable": true, + "IsDefaultVersion": true, + "Path": "/", + "PolicyId": "ANPAICSU3OSHCURP2WIZW", + "PolicyName": "AWSOpsWorksCMInstanceProfileRole", + "UpdateDate": "2016-11-24T09:48:22+00:00", + "VersionId": "v1" + }, + "AWSOpsWorksCMServiceRole": { + "Arn": "arn:aws:iam::aws:policy/service-role/AWSOpsWorksCMServiceRole", + "AttachmentCount": 0, + "CreateDate": "2017-04-03T12:00:07+00:00", + "DefaultVersionId": "v6", + "Document": { + "Statement": [ + { + "Action": [ + "s3:CreateBucket", + "s3:DeleteObject", + "s3:DeleteBucket", + "s3:GetObject", + "s3:HeadBucket", + "s3:ListBucket", + "s3:ListObjects", + "s3:PutBucketPolicy" + ], + "Effect": "Allow", + "Resource": [ + "arn:aws:s3:::aws-opsworks-cm-*" + ] + }, + { + "Action": [ + "ssm:DescribeInstanceInformation", + "ssm:GetCommandInvocation", + "ssm:ListCommandInvocations", + "ssm:ListCommands" + ], + "Effect": "Allow", + "Resource": [ + "*" + ] + }, + { + "Action": [ + "ssm:SendCommand" + ], + "Condition": { + "StringLike": { + "ssm:resourceTag/aws:cloudformation:stack-name": "aws-opsworks-cm-*" + } + }, + "Effect": "Allow", + "Resource": [ + "*" + ] + }, + { + "Action": [ + "ssm:SendCommand" + ], + "Effect": "Allow", + "Resource": [ + "arn:aws:ssm:*::document/*", + "arn:aws:s3:::aws-opsworks-cm-*" + ] + }, + { + "Action": [ + "ec2:AllocateAddress", + "ec2:AssociateAddress", + "ec2:AuthorizeSecurityGroupIngress", + "ec2:CreateImage", + "ec2:CreateSecurityGroup", + "ec2:CreateSnapshot", + "ec2:CreateTags", + "ec2:DeleteSecurityGroup", + "ec2:DeleteSnapshot", + "ec2:DeregisterImage", + "ec2:DescribeAccountAttributes", + "ec2:DescribeAddresses", + "ec2:DescribeImages", + "ec2:DescribeInstanceStatus", + "ec2:DescribeInstances", + "ec2:DescribeSecurityGroups", + "ec2:DescribeSnapshots", + "ec2:DescribeSubnets", + "ec2:DisassociateAddress", + "ec2:ReleaseAddress", + "ec2:RunInstances", + "ec2:StopInstances" + ], + "Effect": "Allow", + "Resource": [ + "*" + ] + }, + { + "Action": [ + "ec2:TerminateInstances" + ], + "Condition": { + "StringLike": { + "ec2:ResourceTag/aws:cloudformation:stack-name": "aws-opsworks-cm-*" + } + }, + "Effect": "Allow", + "Resource": [ + "*" + ] + }, + { + "Action": [ + "cloudformation:CreateStack", + "cloudformation:DeleteStack", + "cloudformation:DescribeStackEvents", + "cloudformation:DescribeStackResources", + "cloudformation:DescribeStacks", + "cloudformation:UpdateStack" + ], + "Effect": "Allow", + "Resource": [ + "arn:aws:cloudformation:*:*:stack/aws-opsworks-cm-*" + ] + }, + { + "Action": [ + "iam:PassRole" + ], + "Effect": "Allow", + "Resource": [ + "arn:aws:iam::*:role/aws-opsworks-cm-*", + "arn:aws:iam::*:role/service-role/aws-opsworks-cm-*" + ] + } + ], + "Version": "2012-10-17" + }, + "IsAttachable": true, + "IsDefaultVersion": true, + "Path": "/service-role/", + "PolicyId": "ANPAJ6I6MPGJE62URSHCO", + "PolicyName": "AWSOpsWorksCMServiceRole", + "UpdateDate": "2017-04-03T12:00:07+00:00", + "VersionId": "v6" + }, + "AWSOpsWorksCloudWatchLogs": { + "Arn": "arn:aws:iam::aws:policy/AWSOpsWorksCloudWatchLogs", + "AttachmentCount": 0, + "CreateDate": "2017-03-30T17:47:19+00:00", + "DefaultVersionId": "v1", + "Document": { + "Statement": [ + { + "Action": [ + "logs:CreateLogGroup", + "logs:CreateLogStream", + "logs:PutLogEvents", + "logs:DescribeLogStreams" + ], + "Effect": "Allow", + "Resource": [ + "arn:aws:logs:*:*:*" + ] + } + ], + "Version": "2012-10-17" + }, + "IsAttachable": true, + "IsDefaultVersion": true, + "Path": "/", + "PolicyId": "ANPAJXFIK7WABAY5CPXM4", + "PolicyName": "AWSOpsWorksCloudWatchLogs", + "UpdateDate": "2017-03-30T17:47:19+00:00", + "VersionId": "v1" + }, + "AWSOpsWorksFullAccess": { + "Arn": "arn:aws:iam::aws:policy/AWSOpsWorksFullAccess", + "AttachmentCount": 0, + "CreateDate": "2015-02-06T18:40:48+00:00", + "DefaultVersionId": "v1", + "Document": { + "Statement": [ + { + "Action": [ + "opsworks:*", + "ec2:DescribeAvailabilityZones", + "ec2:DescribeKeyPairs", + "ec2:DescribeSecurityGroups", + "ec2:DescribeAccountAttributes", + "ec2:DescribeAvailabilityZones", + "ec2:DescribeSecurityGroups", + "ec2:DescribeSubnets", + "ec2:DescribeVpcs", + "elasticloadbalancing:DescribeInstanceHealth", + "elasticloadbalancing:DescribeLoadBalancers", + "iam:GetRolePolicy", + "iam:ListInstanceProfiles", + "iam:ListRoles", + "iam:ListUsers", + "iam:PassRole" + ], + "Effect": "Allow", + "Resource": "*" + } + ], + "Version": "2012-10-17" + }, + "IsAttachable": true, + "IsDefaultVersion": true, + "Path": "/", + "PolicyId": "ANPAICN26VXMXASXKOQCG", + "PolicyName": "AWSOpsWorksFullAccess", + "UpdateDate": "2015-02-06T18:40:48+00:00", + "VersionId": "v1" + }, + "AWSOpsWorksInstanceRegistration": { + "Arn": "arn:aws:iam::aws:policy/AWSOpsWorksInstanceRegistration", + "AttachmentCount": 0, + "CreateDate": "2016-06-03T14:23:15+00:00", + "DefaultVersionId": "v1", + "Document": { + "Statement": [ + { + "Action": [ + "opsworks:DescribeStackProvisioningParameters", + "opsworks:DescribeStacks", + "opsworks:RegisterInstance" + ], + "Effect": "Allow", + "Resource": [ + "*" + ] + } + ], + "Version": "2012-10-17" + }, + "IsAttachable": true, + "IsDefaultVersion": true, + "Path": "/", + "PolicyId": "ANPAJG3LCPVNI4WDZCIMU", + "PolicyName": "AWSOpsWorksInstanceRegistration", + "UpdateDate": "2016-06-03T14:23:15+00:00", + "VersionId": "v1" + }, + "AWSOpsWorksRegisterCLI": { + "Arn": "arn:aws:iam::aws:policy/AWSOpsWorksRegisterCLI", + "AttachmentCount": 0, + "CreateDate": "2015-02-06T18:40:49+00:00", + "DefaultVersionId": "v1", + "Document": { + "Statement": [ + { + "Action": [ + "opsworks:AssignInstance", + "opsworks:CreateStack", + "opsworks:CreateLayer", + "opsworks:DeregisterInstance", + "opsworks:DescribeInstances", + "opsworks:DescribeStackProvisioningParameters", + "opsworks:DescribeStacks", + "opsworks:UnassignInstance" + ], + "Effect": "Allow", + "Resource": [ + "*" + ] + }, + { + "Action": [ + "ec2:DescribeInstances" + ], + "Effect": "Allow", + "Resource": [ + "*" + ] + }, + { + "Action": [ + "iam:AddUserToGroup", + "iam:CreateAccessKey", + "iam:CreateGroup", + "iam:CreateUser", + "iam:ListInstanceProfiles", + "iam:PassRole", + "iam:PutUserPolicy" + ], + "Effect": "Allow", + "Resource": [ + "*" + ] + } + ], + "Version": "2012-10-17" + }, + "IsAttachable": true, + "IsDefaultVersion": true, + "Path": "/", + "PolicyId": "ANPAJ3AB5ZBFPCQGTVDU4", + "PolicyName": "AWSOpsWorksRegisterCLI", + "UpdateDate": "2015-02-06T18:40:49+00:00", + "VersionId": "v1" + }, + "AWSOpsWorksRole": { + "Arn": "arn:aws:iam::aws:policy/service-role/AWSOpsWorksRole", + "AttachmentCount": 0, + "CreateDate": "2015-02-06T18:41:27+00:00", + "DefaultVersionId": "v1", + "Document": { + "Statement": [ + { + "Action": [ + "cloudwatch:GetMetricStatistics", + "ec2:DescribeAccountAttributes", + "ec2:DescribeAvailabilityZones", + "ec2:DescribeInstances", + "ec2:DescribeKeyPairs", + "ec2:DescribeSecurityGroups", + "ec2:DescribeSubnets", + "ec2:DescribeVpcs", + "elasticloadbalancing:DescribeInstanceHealth", + "elasticloadbalancing:DescribeLoadBalancers", + "iam:GetRolePolicy", + "iam:ListInstanceProfiles", + "iam:ListRoles", + "iam:ListUsers", + "iam:PassRole", + "opsworks:*", + "rds:*" + ], + "Effect": "Allow", + "Resource": [ + "*" + ] + } + ], + "Version": "2012-10-17" + }, + "IsAttachable": true, + "IsDefaultVersion": true, + "Path": "/service-role/", + "PolicyId": "ANPAIDUTMOKHJFAPJV45W", + "PolicyName": "AWSOpsWorksRole", + "UpdateDate": "2015-02-06T18:41:27+00:00", + "VersionId": "v1" + }, + "AWSQuickSightDescribeRDS": { + "Arn": "arn:aws:iam::aws:policy/service-role/AWSQuickSightDescribeRDS", + "AttachmentCount": 0, + "CreateDate": "2015-11-10T23:24:50+00:00", + "DefaultVersionId": "v1", + "Document": { + "Statement": [ + { + "Action": [ + "rds:Describe*" + ], + "Effect": "Allow", + "Resource": "*" + } + ], + "Version": "2012-10-17" + }, + "IsAttachable": true, + "IsDefaultVersion": true, + "Path": "/service-role/", + "PolicyId": "ANPAJU5J6OAMCJD3OO76O", + "PolicyName": "AWSQuickSightDescribeRDS", + "UpdateDate": "2015-11-10T23:24:50+00:00", + "VersionId": "v1" + }, + "AWSQuickSightDescribeRedshift": { + "Arn": "arn:aws:iam::aws:policy/service-role/AWSQuickSightDescribeRedshift", + "AttachmentCount": 0, + "CreateDate": "2015-11-10T23:25:01+00:00", + "DefaultVersionId": "v1", + "Document": { + "Statement": [ + { + "Action": [ + "redshift:Describe*" + ], + "Effect": "Allow", + "Resource": "*" + } + ], + "Version": "2012-10-17" + }, + "IsAttachable": true, + "IsDefaultVersion": true, + "Path": "/service-role/", + "PolicyId": "ANPAJFEM6MLSLTW4ZNBW2", + "PolicyName": "AWSQuickSightDescribeRedshift", + "UpdateDate": "2015-11-10T23:25:01+00:00", + "VersionId": "v1" + }, + "AWSQuickSightListIAM": { + "Arn": "arn:aws:iam::aws:policy/service-role/AWSQuickSightListIAM", + "AttachmentCount": 0, + "CreateDate": "2015-11-10T23:25:07+00:00", + "DefaultVersionId": "v1", + "Document": { + "Statement": [ + { + "Action": [ + "iam:List*" + ], + "Effect": "Allow", + "Resource": "*" + } + ], + "Version": "2012-10-17" + }, + "IsAttachable": true, + "IsDefaultVersion": true, + "Path": "/service-role/", + "PolicyId": "ANPAI3CH5UUWZN4EKGILO", + "PolicyName": "AWSQuickSightListIAM", + "UpdateDate": "2015-11-10T23:25:07+00:00", + "VersionId": "v1" + }, + "AWSQuicksightAthenaAccess": { + "Arn": "arn:aws:iam::aws:policy/service-role/AWSQuicksightAthenaAccess", + "AttachmentCount": 0, + "CreateDate": "2017-08-11T23:37:32+00:00", + "DefaultVersionId": "v3", + "Document": { + "Statement": [ + { + "Action": [ + "athena:BatchGetQueryExecution", + "athena:CancelQueryExecution", + "athena:GetCatalogs", + "athena:GetExecutionEngine", + "athena:GetExecutionEngines", + "athena:GetNamespace", + "athena:GetNamespaces", + "athena:GetQueryExecution", + "athena:GetQueryExecutions", + "athena:GetQueryResults", + "athena:GetTable", + "athena:GetTables", + "athena:ListQueryExecutions", + "athena:RunQuery", + "athena:StartQueryExecution", + "athena:StopQueryExecution" + ], + "Effect": "Allow", + "Resource": [ + "*" + ] + }, + { + "Action": [ + "glue:CreateDatabase", + "glue:DeleteDatabase", + "glue:GetDatabase", + "glue:GetDatabases", + "glue:UpdateDatabase", + "glue:CreateTable", + "glue:DeleteTable", + "glue:BatchDeleteTable", + "glue:UpdateTable", + "glue:GetTable", + "glue:GetTables", + "glue:BatchCreatePartition", + "glue:CreatePartition", + "glue:DeletePartition", + "glue:BatchDeletePartition", + "glue:UpdatePartition", + "glue:GetPartition", + "glue:GetPartitions", + "glue:BatchGetPartition" + ], + "Effect": "Allow", + "Resource": [ + "*" + ] + }, + { + "Action": [ + "s3:GetBucketLocation", + "s3:GetObject", + "s3:ListBucket", + "s3:ListBucketMultipartUploads", + "s3:ListMultipartUploadParts", + "s3:AbortMultipartUpload", + "s3:CreateBucket", + "s3:PutObject" + ], + "Effect": "Allow", + "Resource": [ + "arn:aws:s3:::aws-athena-query-results-*" + ] + } + ], + "Version": "2012-10-17" + }, + "IsAttachable": true, + "IsDefaultVersion": true, + "Path": "/service-role/", + "PolicyId": "ANPAI4JB77JXFQXDWNRPM", + "PolicyName": "AWSQuicksightAthenaAccess", + "UpdateDate": "2017-08-11T23:37:32+00:00", + "VersionId": "v3" + }, + "AWSStepFunctionsConsoleFullAccess": { + "Arn": "arn:aws:iam::aws:policy/AWSStepFunctionsConsoleFullAccess", + "AttachmentCount": 0, + "CreateDate": "2017-01-12T00:19:34+00:00", + "DefaultVersionId": "v2", + "Document": { + "Statement": [ + { + "Action": "states:*", + "Effect": "Allow", + "Resource": "*" + }, + { + "Action": "iam:ListRoles", + "Effect": "Allow", + "Resource": "*" + }, + { + "Action": "iam:PassRole", + "Effect": "Allow", + "Resource": "arn:aws:iam::*:role/service-role/StatesExecutionRole*" + }, + { + "Action": "lambda:ListFunctions", + "Effect": "Allow", + "Resource": "*" + } + ], + "Version": "2012-10-17" + }, + "IsAttachable": true, + "IsDefaultVersion": true, + "Path": "/", + "PolicyId": "ANPAJIYC52YWRX6OSMJWK", + "PolicyName": "AWSStepFunctionsConsoleFullAccess", + "UpdateDate": "2017-01-12T00:19:34+00:00", + "VersionId": "v2" + }, + "AWSStepFunctionsFullAccess": { + "Arn": "arn:aws:iam::aws:policy/AWSStepFunctionsFullAccess", + "AttachmentCount": 0, + "CreateDate": "2017-01-11T21:51:32+00:00", + "DefaultVersionId": "v1", + "Document": { + "Statement": [ + { + "Action": "states:*", + "Effect": "Allow", + "Resource": "*" + } + ], + "Version": "2012-10-17" + }, + "IsAttachable": true, + "IsDefaultVersion": true, + "Path": "/", + "PolicyId": "ANPAJXKA6VP3UFBVHDPPA", + "PolicyName": "AWSStepFunctionsFullAccess", + "UpdateDate": "2017-01-11T21:51:32+00:00", + "VersionId": "v1" + }, + "AWSStepFunctionsReadOnlyAccess": { + "Arn": "arn:aws:iam::aws:policy/AWSStepFunctionsReadOnlyAccess", + "AttachmentCount": 0, + "CreateDate": "2017-01-11T21:46:19+00:00", + "DefaultVersionId": "v1", + "Document": { + "Statement": [ + { + "Action": [ + "states:ListStateMachines", + "states:ListActivities", + "states:DescribeStateMachine", + "states:ListExecutions", + "states:DescribeExecution", + "states:GetExecutionHistory", + "states:DescribeActivity" + ], + "Effect": "Allow", + "Resource": "*" + } + ], + "Version": "2012-10-17" + }, + "IsAttachable": true, + "IsDefaultVersion": true, + "Path": "/", + "PolicyId": "ANPAJONHB2TJQDJPFW5TM", + "PolicyName": "AWSStepFunctionsReadOnlyAccess", + "UpdateDate": "2017-01-11T21:46:19+00:00", + "VersionId": "v1" + }, + "AWSStorageGatewayFullAccess": { + "Arn": "arn:aws:iam::aws:policy/AWSStorageGatewayFullAccess", + "AttachmentCount": 0, + "CreateDate": "2015-02-06T18:41:09+00:00", + "DefaultVersionId": "v1", + "Document": { + "Statement": [ + { + "Action": [ + "storagegateway:*" + ], + "Effect": "Allow", + "Resource": "*" + }, + { + "Action": [ + "ec2:DescribeSnapshots", + "ec2:DeleteSnapshot" + ], + "Effect": "Allow", + "Resource": "*" + } + ], + "Version": "2012-10-17" + }, + "IsAttachable": true, + "IsDefaultVersion": true, + "Path": "/", + "PolicyId": "ANPAJG5SSPAVOGK3SIDGU", + "PolicyName": "AWSStorageGatewayFullAccess", + "UpdateDate": "2015-02-06T18:41:09+00:00", + "VersionId": "v1" + }, + "AWSStorageGatewayReadOnlyAccess": { + "Arn": "arn:aws:iam::aws:policy/AWSStorageGatewayReadOnlyAccess", + "AttachmentCount": 0, + "CreateDate": "2015-02-06T18:41:10+00:00", + "DefaultVersionId": "v1", + "Document": { + "Statement": [ + { + "Action": [ + "storagegateway:List*", + "storagegateway:Describe*" + ], + "Effect": "Allow", + "Resource": "*" + }, + { + "Action": [ + "ec2:DescribeSnapshots" + ], + "Effect": "Allow", + "Resource": "*" + } + ], + "Version": "2012-10-17" + }, + "IsAttachable": true, + "IsDefaultVersion": true, + "Path": "/", + "PolicyId": "ANPAIFKCTUVOPD5NICXJK", + "PolicyName": "AWSStorageGatewayReadOnlyAccess", + "UpdateDate": "2015-02-06T18:41:10+00:00", + "VersionId": "v1" + }, + "AWSSupportAccess": { + "Arn": "arn:aws:iam::aws:policy/AWSSupportAccess", + "AttachmentCount": 0, + "CreateDate": "2015-02-06T18:41:11+00:00", + "DefaultVersionId": "v1", + "Document": { + "Statement": [ + { + "Action": [ + "support:*" + ], + "Effect": "Allow", + "Resource": "*" + } + ], + "Version": "2012-10-17" + }, + "IsAttachable": true, + "IsDefaultVersion": true, + "Path": "/", + "PolicyId": "ANPAJSNKQX2OW67GF4S7E", + "PolicyName": "AWSSupportAccess", + "UpdateDate": "2015-02-06T18:41:11+00:00", + "VersionId": "v1" + }, + "AWSWAFFullAccess": { + "Arn": "arn:aws:iam::aws:policy/AWSWAFFullAccess", + "AttachmentCount": 0, + "CreateDate": "2016-12-07T21:33:25+00:00", + "DefaultVersionId": "v2", + "Document": { + "Statement": [ + { + "Action": [ + "waf:*", + "waf-regional:*", + "elasticloadbalancing:SetWebACL" + ], + "Effect": "Allow", + "Resource": "*" + } + ], + "Version": "2012-10-17" + }, + "IsAttachable": true, + "IsDefaultVersion": true, + "Path": "/", + "PolicyId": "ANPAJMIKIAFXZEGOLRH7C", + "PolicyName": "AWSWAFFullAccess", + "UpdateDate": "2016-12-07T21:33:25+00:00", + "VersionId": "v2" + }, + "AWSWAFReadOnlyAccess": { + "Arn": "arn:aws:iam::aws:policy/AWSWAFReadOnlyAccess", + "AttachmentCount": 0, + "CreateDate": "2016-12-07T21:30:54+00:00", + "DefaultVersionId": "v2", + "Document": { + "Statement": [ + { + "Action": [ + "waf:Get*", + "waf:List*", + "waf-regional:Get*", + "waf-regional:List*" + ], + "Effect": "Allow", + "Resource": "*" + } + ], + "Version": "2012-10-17" + }, + "IsAttachable": true, + "IsDefaultVersion": true, + "Path": "/", + "PolicyId": "ANPAINZVDMX2SBF7EU2OC", + "PolicyName": "AWSWAFReadOnlyAccess", + "UpdateDate": "2016-12-07T21:30:54+00:00", + "VersionId": "v2" + }, + "AWSXrayFullAccess": { + "Arn": "arn:aws:iam::aws:policy/AWSXrayFullAccess", + "AttachmentCount": 0, + "CreateDate": "2016-12-01T18:30:55+00:00", + "DefaultVersionId": "v1", + "Document": { + "Statement": [ + { + "Action": [ + "xray:*" + ], + "Effect": "Allow", + "Resource": [ + "*" + ] + } + ], + "Version": "2012-10-17" + }, + "IsAttachable": true, + "IsDefaultVersion": true, + "Path": "/", + "PolicyId": "ANPAJQBYG45NSJMVQDB2K", + "PolicyName": "AWSXrayFullAccess", + "UpdateDate": "2016-12-01T18:30:55+00:00", + "VersionId": "v1" + }, + "AWSXrayReadOnlyAccess": { + "Arn": "arn:aws:iam::aws:policy/AWSXrayReadOnlyAccess", + "AttachmentCount": 0, + "CreateDate": "2016-12-01T18:27:02+00:00", + "DefaultVersionId": "v1", + "Document": { + "Statement": [ + { + "Action": [ + "xray:BatchGetTraces", + "xray:GetServiceGraph", + "xray:GetTraceGraph", + "xray:GetTraceSummaries" + ], + "Effect": "Allow", + "Resource": [ + "*" + ] + } + ], + "Version": "2012-10-17" + }, + "IsAttachable": true, + "IsDefaultVersion": true, + "Path": "/", + "PolicyId": "ANPAIH4OFXWPS6ZX6OPGQ", + "PolicyName": "AWSXrayReadOnlyAccess", + "UpdateDate": "2016-12-01T18:27:02+00:00", + "VersionId": "v1" + }, + "AWSXrayWriteOnlyAccess": { + "Arn": "arn:aws:iam::aws:policy/AWSXrayWriteOnlyAccess", + "AttachmentCount": 0, + "CreateDate": "2016-12-01T18:19:53+00:00", + "DefaultVersionId": "v1", + "Document": { + "Statement": [ + { + "Action": [ + "xray:PutTraceSegments", + "xray:PutTelemetryRecords" + ], + "Effect": "Allow", + "Resource": [ + "*" + ] + } + ], + "Version": "2012-10-17" + }, + "IsAttachable": true, + "IsDefaultVersion": true, + "Path": "/", + "PolicyId": "ANPAIAACM4LMYSRGBCTM6", + "PolicyName": "AWSXrayWriteOnlyAccess", + "UpdateDate": "2016-12-01T18:19:53+00:00", + "VersionId": "v1" + }, + "AdministratorAccess": { + "Arn": "arn:aws:iam::aws:policy/AdministratorAccess", + "AttachmentCount": 3, + "CreateDate": "2015-02-06T18:39:46+00:00", + "DefaultVersionId": "v1", + "Document": { + "Statement": [ + { + "Action": "*", + "Effect": "Allow", + "Resource": "*" + } + ], + "Version": "2012-10-17" + }, + "IsAttachable": true, + "IsDefaultVersion": true, + "Path": "/", + "PolicyId": "ANPAIWMBCKSKIEE64ZLYK", + "PolicyName": "AdministratorAccess", + "UpdateDate": "2015-02-06T18:39:46+00:00", + "VersionId": "v1" + }, + "AmazonAPIGatewayAdministrator": { + "Arn": "arn:aws:iam::aws:policy/AmazonAPIGatewayAdministrator", + "AttachmentCount": 0, + "CreateDate": "2015-07-09T17:34:45+00:00", + "DefaultVersionId": "v1", + "Document": { + "Statement": [ + { + "Action": [ + "apigateway:*" + ], + "Effect": "Allow", + "Resource": "arn:aws:apigateway:*::/*" + } + ], + "Version": "2012-10-17" + }, + "IsAttachable": true, + "IsDefaultVersion": true, + "Path": "/", + "PolicyId": "ANPAJ4PT6VY5NLKTNUYSI", + "PolicyName": "AmazonAPIGatewayAdministrator", + "UpdateDate": "2015-07-09T17:34:45+00:00", + "VersionId": "v1" + }, + "AmazonAPIGatewayInvokeFullAccess": { + "Arn": "arn:aws:iam::aws:policy/AmazonAPIGatewayInvokeFullAccess", + "AttachmentCount": 0, + "CreateDate": "2015-07-09T17:36:12+00:00", + "DefaultVersionId": "v1", + "Document": { + "Statement": [ + { + "Action": [ + "execute-api:Invoke" + ], + "Effect": "Allow", + "Resource": "arn:aws:execute-api:*:*:*" + } + ], + "Version": "2012-10-17" + }, + "IsAttachable": true, + "IsDefaultVersion": true, + "Path": "/", + "PolicyId": "ANPAIIWAX2NOOQJ4AIEQ6", + "PolicyName": "AmazonAPIGatewayInvokeFullAccess", + "UpdateDate": "2015-07-09T17:36:12+00:00", + "VersionId": "v1" + }, + "AmazonAPIGatewayPushToCloudWatchLogs": { + "Arn": "arn:aws:iam::aws:policy/service-role/AmazonAPIGatewayPushToCloudWatchLogs", + "AttachmentCount": 0, + "CreateDate": "2015-11-11T23:41:46+00:00", + "DefaultVersionId": "v1", + "Document": { + "Statement": [ + { + "Action": [ + "logs:CreateLogGroup", + "logs:CreateLogStream", + "logs:DescribeLogGroups", + "logs:DescribeLogStreams", + "logs:PutLogEvents", + "logs:GetLogEvents", + "logs:FilterLogEvents" + ], + "Effect": "Allow", + "Resource": "*" + } + ], + "Version": "2012-10-17" + }, + "IsAttachable": true, + "IsDefaultVersion": true, + "Path": "/service-role/", + "PolicyId": "ANPAIK4GFO7HLKYN64ASK", + "PolicyName": "AmazonAPIGatewayPushToCloudWatchLogs", + "UpdateDate": "2015-11-11T23:41:46+00:00", + "VersionId": "v1" + }, + "AmazonAppStreamFullAccess": { + "Arn": "arn:aws:iam::aws:policy/AmazonAppStreamFullAccess", + "AttachmentCount": 0, + "CreateDate": "2017-09-07T23:56:23+00:00", + "DefaultVersionId": "v2", + "Document": { + "Statement": [ + { + "Action": [ + "appstream:*" + ], + "Effect": "Allow", + "Resource": "*" + }, + { + "Action": [ + "application-autoscaling:DeleteScalingPolicy", + "application-autoscaling:DescribeScalableTargets", + "application-autoscaling:DescribeScalingPolicies", + "application-autoscaling:PutScalingPolicy", + "application-autoscaling:RegisterScalableTarget" + ], + "Effect": "Allow", + "Resource": "*" + }, + { + "Action": [ + "cloudwatch:DeleteAlarms", + "cloudwatch:DescribeAlarms", + "cloudwatch:GetMetricStatistics", + "cloudwatch:PutMetricAlarm" + ], + "Effect": "Allow", + "Resource": "*" + }, + { + "Action": [ + "ec2:DescribeRouteTables", + "ec2:DescribeSecurityGroups", + "ec2:DescribeSubnets", + "ec2:DescribeVpcs" + ], + "Effect": "Allow", + "Resource": "*" + }, + { + "Action": "iam:ListRoles", + "Effect": "Allow", + "Resource": "*" + }, + { + "Action": "iam:PassRole", + "Condition": { + "StringLike": { + "iam:PassedToService": "application-autoscaling.amazonaws.com" + } + }, + "Effect": "Allow", + "Resource": "arn:aws:iam::*:role/service-role/ApplicationAutoScalingForAmazonAppStreamAccess" + } + ], + "Version": "2012-10-17" + }, + "IsAttachable": true, + "IsDefaultVersion": true, + "Path": "/", + "PolicyId": "ANPAJLZZXU2YQVGL4QDNC", + "PolicyName": "AmazonAppStreamFullAccess", + "UpdateDate": "2017-09-07T23:56:23+00:00", + "VersionId": "v2" + }, + "AmazonAppStreamReadOnlyAccess": { + "Arn": "arn:aws:iam::aws:policy/AmazonAppStreamReadOnlyAccess", + "AttachmentCount": 0, + "CreateDate": "2016-12-07T21:00:06+00:00", + "DefaultVersionId": "v2", + "Document": { + "Statement": [ + { + "Action": [ + "appstream:Get*", + "appstream:List*", + "appstream:Describe*" + ], + "Effect": "Allow", + "Resource": "*" + } + ], + "Version": "2012-10-17" + }, + "IsAttachable": true, + "IsDefaultVersion": true, + "Path": "/", + "PolicyId": "ANPAJXIFDGB4VBX23DX7K", + "PolicyName": "AmazonAppStreamReadOnlyAccess", + "UpdateDate": "2016-12-07T21:00:06+00:00", + "VersionId": "v2" + }, + "AmazonAppStreamServiceAccess": { + "Arn": "arn:aws:iam::aws:policy/service-role/AmazonAppStreamServiceAccess", + "AttachmentCount": 0, + "CreateDate": "2017-05-23T23:00:47+00:00", + "DefaultVersionId": "v3", + "Document": { + "Statement": [ + { + "Action": [ + "ec2:DescribeVpcs", + "ec2:DescribeSubnets", + "ec2:DescribeAvailabilityZones", + "ec2:CreateNetworkInterface", + "ec2:DescribeNetworkInterfaces", + "ec2:DeleteNetworkInterface", + "ec2:DescribeSubnets", + "ec2:AssociateAddress", + "ec2:DisassociateAddress", + "ec2:DescribeRouteTables", + "ec2:DescribeSecurityGroups" + ], + "Effect": "Allow", + "Resource": "*" + }, + { + "Action": [ + "s3:CreateBucket", + "s3:ListBucket", + "s3:GetObject", + "s3:PutObject", + "s3:DeleteObject", + "s3:GetObjectVersion", + "s3:DeleteObjectVersion", + "s3:PutBucketPolicy" + ], + "Effect": "Allow", + "Resource": "arn:aws:s3:::appstream2-36fb080bb8-*" + } + ], + "Version": "2012-10-17" + }, + "IsAttachable": true, + "IsDefaultVersion": true, + "Path": "/service-role/", + "PolicyId": "ANPAISBRZ7LMMCBYEF3SE", + "PolicyName": "AmazonAppStreamServiceAccess", + "UpdateDate": "2017-05-23T23:00:47+00:00", + "VersionId": "v3" + }, + "AmazonAthenaFullAccess": { + "Arn": "arn:aws:iam::aws:policy/AmazonAthenaFullAccess", + "AttachmentCount": 0, + "CreateDate": "2017-09-13T00:13:48+00:00", + "DefaultVersionId": "v3", + "Document": { + "Statement": [ + { + "Action": [ + "athena:*" + ], + "Effect": "Allow", + "Resource": [ + "*" + ] + }, + { + "Action": [ + "glue:CreateDatabase", + "glue:DeleteDatabase", + "glue:GetDatabase", + "glue:GetDatabases", + "glue:UpdateDatabase", + "glue:CreateTable", + "glue:DeleteTable", + "glue:BatchDeleteTable", + "glue:UpdateTable", + "glue:GetTable", + "glue:GetTables", + "glue:BatchCreatePartition", + "glue:CreatePartition", + "glue:DeletePartition", + "glue:BatchDeletePartition", + "glue:UpdatePartition", + "glue:GetPartition", + "glue:GetPartitions", + "glue:BatchGetPartition" + ], + "Effect": "Allow", + "Resource": [ + "*" + ] + }, + { + "Action": [ + "s3:GetBucketLocation", + "s3:GetObject", + "s3:ListBucket", + "s3:ListBucketMultipartUploads", + "s3:ListMultipartUploadParts", + "s3:AbortMultipartUpload", + "s3:CreateBucket", + "s3:PutObject" + ], + "Effect": "Allow", + "Resource": [ + "arn:aws:s3:::aws-athena-query-results-*" + ] + }, + { + "Action": [ + "s3:GetObject" + ], + "Effect": "Allow", + "Resource": [ + "arn:aws:s3:::athena-examples*" + ] + } + ], + "Version": "2012-10-17" + }, + "IsAttachable": true, + "IsDefaultVersion": true, + "Path": "/", + "PolicyId": "ANPAIPJMLMD4C7RYZ6XCK", + "PolicyName": "AmazonAthenaFullAccess", + "UpdateDate": "2017-09-13T00:13:48+00:00", + "VersionId": "v3" + }, + "AmazonCloudDirectoryFullAccess": { + "Arn": "arn:aws:iam::aws:policy/AmazonCloudDirectoryFullAccess", + "AttachmentCount": 0, + "CreateDate": "2017-02-25T00:41:39+00:00", + "DefaultVersionId": "v1", + "Document": { + "Statement": [ + { + "Action": [ + "clouddirectory:*" + ], + "Effect": "Allow", + "Resource": [ + "*" + ] + } + ], + "Version": "2012-10-17" + }, + "IsAttachable": true, + "IsDefaultVersion": true, + "Path": "/", + "PolicyId": "ANPAJG3XQK77ATFLCF2CK", + "PolicyName": "AmazonCloudDirectoryFullAccess", + "UpdateDate": "2017-02-25T00:41:39+00:00", + "VersionId": "v1" + }, + "AmazonCloudDirectoryReadOnlyAccess": { + "Arn": "arn:aws:iam::aws:policy/AmazonCloudDirectoryReadOnlyAccess", + "AttachmentCount": 0, + "CreateDate": "2017-02-28T23:42:06+00:00", + "DefaultVersionId": "v1", + "Document": { + "Statement": [ + { + "Action": [ + "clouddirectory:List*", + "clouddirectory:Get*", + "clouddirectory:LookupPolicy", + "clouddirectory:BatchRead" + ], + "Effect": "Allow", + "Resource": [ + "*" + ] + } + ], + "Version": "2012-10-17" + }, + "IsAttachable": true, + "IsDefaultVersion": true, + "Path": "/", + "PolicyId": "ANPAICMSZQGR3O62KMD6M", + "PolicyName": "AmazonCloudDirectoryReadOnlyAccess", + "UpdateDate": "2017-02-28T23:42:06+00:00", + "VersionId": "v1" + }, + "AmazonCognitoDeveloperAuthenticatedIdentities": { + "Arn": "arn:aws:iam::aws:policy/AmazonCognitoDeveloperAuthenticatedIdentities", + "AttachmentCount": 0, + "CreateDate": "2015-03-24T17:22:23+00:00", + "DefaultVersionId": "v1", + "Document": { + "Statement": [ + { + "Action": [ + "cognito-identity:GetOpenIdTokenForDeveloperIdentity", + "cognito-identity:LookupDeveloperIdentity", + "cognito-identity:MergeDeveloperIdentities", + "cognito-identity:UnlinkDeveloperIdentity" + ], + "Effect": "Allow", + "Resource": "*" + } + ], + "Version": "2012-10-17" + }, + "IsAttachable": true, + "IsDefaultVersion": true, + "Path": "/", + "PolicyId": "ANPAIQOKZ5BGKLCMTXH4W", + "PolicyName": "AmazonCognitoDeveloperAuthenticatedIdentities", + "UpdateDate": "2015-03-24T17:22:23+00:00", + "VersionId": "v1" + }, + "AmazonCognitoPowerUser": { + "Arn": "arn:aws:iam::aws:policy/AmazonCognitoPowerUser", + "AttachmentCount": 0, + "CreateDate": "2016-06-02T16:57:56+00:00", + "DefaultVersionId": "v2", + "Document": { + "Statement": [ + { + "Action": [ + "cognito-identity:*", + "cognito-idp:*", + "cognito-sync:*", + "iam:ListRoles", + "iam:ListOpenIdConnectProviders", + "sns:ListPlatformApplications" + ], + "Effect": "Allow", + "Resource": "*" + } + ], + "Version": "2012-10-17" + }, + "IsAttachable": true, + "IsDefaultVersion": true, + "Path": "/", + "PolicyId": "ANPAJKW5H2HNCPGCYGR6Y", + "PolicyName": "AmazonCognitoPowerUser", + "UpdateDate": "2016-06-02T16:57:56+00:00", + "VersionId": "v2" + }, + "AmazonCognitoReadOnly": { + "Arn": "arn:aws:iam::aws:policy/AmazonCognitoReadOnly", + "AttachmentCount": 0, + "CreateDate": "2016-06-02T17:30:24+00:00", + "DefaultVersionId": "v2", + "Document": { + "Statement": [ + { + "Action": [ + "cognito-identity:Describe*", + "cognito-identity:Get*", + "cognito-identity:List*", + "cognito-idp:Describe*", + "cognito-idp:AdminGetUser", + "cognito-idp:List*", + "cognito-sync:Describe*", + "cognito-sync:Get*", + "cognito-sync:List*", + "iam:ListOpenIdConnectProviders", + "iam:ListRoles", + "sns:ListPlatformApplications" + ], + "Effect": "Allow", + "Resource": "*" + } + ], + "Version": "2012-10-17" + }, + "IsAttachable": true, + "IsDefaultVersion": true, + "Path": "/", + "PolicyId": "ANPAJBFTRZD2GQGJHSVQK", + "PolicyName": "AmazonCognitoReadOnly", + "UpdateDate": "2016-06-02T17:30:24+00:00", + "VersionId": "v2" + }, + "AmazonDMSCloudWatchLogsRole": { + "Arn": "arn:aws:iam::aws:policy/service-role/AmazonDMSCloudWatchLogsRole", + "AttachmentCount": 0, + "CreateDate": "2016-01-07T23:44:53+00:00", + "DefaultVersionId": "v1", + "Document": { + "Statement": [ + { + "Action": [ + "logs:DescribeLogGroups" + ], + "Effect": "Allow", + "Resource": [ + "*" + ], + "Sid": "AllowDescribeOnAllLogGroups" + }, + { + "Action": [ + "logs:DescribeLogStreams" + ], + "Effect": "Allow", + "Resource": [ + "arn:aws:logs:*:*:log-group:dms-tasks-*" + ], + "Sid": "AllowDescribeOfAllLogStreamsOnDmsTasksLogGroup" + }, + { + "Action": [ + "logs:CreateLogGroup" + ], + "Effect": "Allow", + "Resource": [ + "arn:aws:logs:*:*:log-group:dms-tasks-*" + ], + "Sid": "AllowCreationOfDmsTasksLogGroups" + }, + { + "Action": [ + "logs:CreateLogStream" + ], + "Effect": "Allow", + "Resource": [ + "arn:aws:logs:*:*:log-group:dms-tasks-*:log-stream:dms-task-*" + ], + "Sid": "AllowCreationOfDmsTaskLogStream" + }, + { + "Action": [ + "logs:PutLogEvents" + ], + "Effect": "Allow", + "Resource": [ + "arn:aws:logs:*:*:log-group:dms-tasks-*:log-stream:dms-task-*" + ], + "Sid": "AllowUploadOfLogEventsToDmsTaskLogStream" + } + ], + "Version": "2012-10-17" + }, + "IsAttachable": true, + "IsDefaultVersion": true, + "Path": "/service-role/", + "PolicyId": "ANPAJBG7UXZZXUJD3TDJE", + "PolicyName": "AmazonDMSCloudWatchLogsRole", + "UpdateDate": "2016-01-07T23:44:53+00:00", + "VersionId": "v1" + }, + "AmazonDMSRedshiftS3Role": { + "Arn": "arn:aws:iam::aws:policy/service-role/AmazonDMSRedshiftS3Role", + "AttachmentCount": 0, + "CreateDate": "2016-04-20T17:05:56+00:00", + "DefaultVersionId": "v1", + "Document": { + "Statement": [ + { + "Action": [ + "s3:CreateBucket", + "s3:ListBucket", + "s3:DeleteBucket", + "s3:GetBucketLocation", + "s3:GetObject", + "s3:PutObject", + "s3:DeleteObject", + "s3:GetObjectVersion", + "s3:GetBucketPolicy", + "s3:PutBucketPolicy", + "s3:DeleteBucketPolicy" + ], + "Effect": "Allow", + "Resource": "arn:aws:s3:::dms-*" + } + ], + "Version": "2012-10-17" + }, + "IsAttachable": true, + "IsDefaultVersion": true, + "Path": "/service-role/", + "PolicyId": "ANPAI3CCUQ4U5WNC5F6B6", + "PolicyName": "AmazonDMSRedshiftS3Role", + "UpdateDate": "2016-04-20T17:05:56+00:00", + "VersionId": "v1" + }, + "AmazonDMSVPCManagementRole": { + "Arn": "arn:aws:iam::aws:policy/service-role/AmazonDMSVPCManagementRole", + "AttachmentCount": 0, + "CreateDate": "2016-05-23T16:29:57+00:00", + "DefaultVersionId": "v3", + "Document": { + "Statement": [ + { + "Action": [ + "ec2:CreateNetworkInterface", + "ec2:DescribeAvailabilityZones", + "ec2:DescribeInternetGateways", + "ec2:DescribeSecurityGroups", + "ec2:DescribeSubnets", + "ec2:DescribeVpcs", + "ec2:DeleteNetworkInterface", + "ec2:ModifyNetworkInterfaceAttribute" + ], + "Effect": "Allow", + "Resource": "*" + } + ], + "Version": "2012-10-17" + }, + "IsAttachable": true, + "IsDefaultVersion": true, + "Path": "/service-role/", + "PolicyId": "ANPAJHKIGMBQI4AEFFSYO", + "PolicyName": "AmazonDMSVPCManagementRole", + "UpdateDate": "2016-05-23T16:29:57+00:00", + "VersionId": "v3" + }, + "AmazonDRSVPCManagement": { + "Arn": "arn:aws:iam::aws:policy/AmazonDRSVPCManagement", + "AttachmentCount": 0, + "CreateDate": "2015-09-02T00:09:20+00:00", + "DefaultVersionId": "v1", + "Document": { + "Statement": [ + { + "Action": [ + "ec2:AuthorizeSecurityGroupIngress", + "ec2:CreateNetworkInterface", + "ec2:CreateSecurityGroup", + "ec2:DescribeAvailabilityZones", + "ec2:DescribeInternetGateways", + "ec2:DescribeSecurityGroups", + "ec2:DescribeSubnets", + "ec2:DescribeVpcAttribute", + "ec2:DescribeVpcs", + "ec2:DeleteNetworkInterface", + "ec2:DeleteSecurityGroup", + "ec2:ModifyNetworkInterfaceAttribute", + "ec2:RevokeSecurityGroupIngress" + ], + "Effect": "Allow", + "Resource": "*" + } + ], + "Version": "2012-10-17" + }, + "IsAttachable": true, + "IsDefaultVersion": true, + "Path": "/", + "PolicyId": "ANPAJPXIBTTZMBEFEX6UA", + "PolicyName": "AmazonDRSVPCManagement", + "UpdateDate": "2015-09-02T00:09:20+00:00", + "VersionId": "v1" + }, + "AmazonDynamoDBFullAccess": { + "Arn": "arn:aws:iam::aws:policy/AmazonDynamoDBFullAccess", + "AttachmentCount": 0, + "CreateDate": "2017-06-28T23:23:34+00:00", + "DefaultVersionId": "v5", + "Document": { + "Statement": [ + { + "Action": [ + "dynamodb:*", + "dax:*", + "application-autoscaling:DeleteScalingPolicy", + "application-autoscaling:DeregisterScalableTarget", + "application-autoscaling:DescribeScalableTargets", + "application-autoscaling:DescribeScalingActivities", + "application-autoscaling:DescribeScalingPolicies", + "application-autoscaling:PutScalingPolicy", + "application-autoscaling:RegisterScalableTarget", + "cloudwatch:DeleteAlarms", + "cloudwatch:DescribeAlarmHistory", + "cloudwatch:DescribeAlarms", + "cloudwatch:DescribeAlarmsForMetric", + "cloudwatch:GetMetricStatistics", + "cloudwatch:ListMetrics", + "cloudwatch:PutMetricAlarm", + "datapipeline:ActivatePipeline", + "datapipeline:CreatePipeline", + "datapipeline:DeletePipeline", + "datapipeline:DescribeObjects", + "datapipeline:DescribePipelines", + "datapipeline:GetPipelineDefinition", + "datapipeline:ListPipelines", + "datapipeline:PutPipelineDefinition", + "datapipeline:QueryObjects", + "ec2:DescribeVpcs", + "ec2:DescribeSubnets", + "ec2:DescribeSecurityGroups", + "iam:GetRole", + "iam:ListRoles", + "sns:CreateTopic", + "sns:DeleteTopic", + "sns:ListSubscriptions", + "sns:ListSubscriptionsByTopic", + "sns:ListTopics", + "sns:Subscribe", + "sns:Unsubscribe", + "sns:SetTopicAttributes", + "lambda:CreateFunction", + "lambda:ListFunctions", + "lambda:ListEventSourceMappings", + "lambda:CreateEventSourceMapping", + "lambda:DeleteEventSourceMapping", + "lambda:GetFunctionConfiguration", + "lambda:DeleteFunction" + ], + "Effect": "Allow", + "Resource": "*" + }, + { + "Action": [ + "iam:PassRole" + ], + "Condition": { + "StringLike": { + "iam:PassedToService": [ + "application-autoscaling.amazonaws.com", + "dax.amazonaws.com" + ] + } + }, + "Effect": "Allow", + "Resource": "*" + } + ], + "Version": "2012-10-17" + }, + "IsAttachable": true, + "IsDefaultVersion": true, + "Path": "/", + "PolicyId": "ANPAINUGF2JSOSUY76KYA", + "PolicyName": "AmazonDynamoDBFullAccess", + "UpdateDate": "2017-06-28T23:23:34+00:00", + "VersionId": "v5" + }, + "AmazonDynamoDBFullAccesswithDataPipeline": { + "Arn": "arn:aws:iam::aws:policy/AmazonDynamoDBFullAccesswithDataPipeline", + "AttachmentCount": 0, + "CreateDate": "2015-11-12T02:17:42+00:00", + "DefaultVersionId": "v2", + "Document": { + "Statement": [ + { + "Action": [ + "cloudwatch:DeleteAlarms", + "cloudwatch:DescribeAlarmHistory", + "cloudwatch:DescribeAlarms", + "cloudwatch:DescribeAlarmsForMetric", + "cloudwatch:GetMetricStatistics", + "cloudwatch:ListMetrics", + "cloudwatch:PutMetricAlarm", + "dynamodb:*", + "sns:CreateTopic", + "sns:DeleteTopic", + "sns:ListSubscriptions", + "sns:ListSubscriptionsByTopic", + "sns:ListTopics", + "sns:Subscribe", + "sns:Unsubscribe", + "sns:SetTopicAttributes" + ], + "Effect": "Allow", + "Resource": "*", + "Sid": "DDBConsole" + }, + { + "Action": [ + "lambda:*", + "iam:ListRoles" + ], + "Effect": "Allow", + "Resource": "*", + "Sid": "DDBConsoleTriggers" + }, + { + "Action": [ + "datapipeline:*", + "iam:ListRoles" + ], + "Effect": "Allow", + "Resource": "*", + "Sid": "DDBConsoleImportExport" + }, + { + "Action": [ + "iam:GetRolePolicy", + "iam:PassRole" + ], + "Effect": "Allow", + "Resource": [ + "*" + ], + "Sid": "IAMEDPRoles" + }, + { + "Action": [ + "ec2:CreateTags", + "ec2:DescribeInstances", + "ec2:RunInstances", + "ec2:StartInstances", + "ec2:StopInstances", + "ec2:TerminateInstances", + "elasticmapreduce:*", + "datapipeline:*" + ], + "Effect": "Allow", + "Resource": "*", + "Sid": "EMR" + }, + { + "Action": [ + "s3:DeleteObject", + "s3:Get*", + "s3:List*", + "s3:Put*" + ], + "Effect": "Allow", + "Resource": [ + "*" + ], + "Sid": "S3" + } + ], + "Version": "2012-10-17" + }, + "IsAttachable": true, + "IsDefaultVersion": true, + "Path": "/", + "PolicyId": "ANPAJ3ORT7KDISSXGHJXA", + "PolicyName": "AmazonDynamoDBFullAccesswithDataPipeline", + "UpdateDate": "2015-11-12T02:17:42+00:00", + "VersionId": "v2" + }, + "AmazonDynamoDBReadOnlyAccess": { + "Arn": "arn:aws:iam::aws:policy/AmazonDynamoDBReadOnlyAccess", + "AttachmentCount": 0, + "CreateDate": "2017-06-12T21:11:40+00:00", + "DefaultVersionId": "v5", + "Document": { + "Statement": [ + { + "Action": [ + "application-autoscaling:DescribeScalableTargets", + "application-autoscaling:DescribeScalingActivities", + "application-autoscaling:DescribeScalingPolicies", + "cloudwatch:DescribeAlarmHistory", + "cloudwatch:DescribeAlarms", + "cloudwatch:DescribeAlarmsForMetric", + "cloudwatch:GetMetricStatistics", + "cloudwatch:ListMetrics", + "datapipeline:DescribeObjects", + "datapipeline:DescribePipelines", + "datapipeline:GetPipelineDefinition", + "datapipeline:ListPipelines", + "datapipeline:QueryObjects", + "dynamodb:BatchGetItem", + "dynamodb:DescribeTable", + "dynamodb:GetItem", + "dynamodb:ListTables", + "dynamodb:Query", + "dynamodb:Scan", + "dynamodb:DescribeReservedCapacity", + "dynamodb:DescribeReservedCapacityOfferings", + "dynamodb:ListTagsOfResource", + "dynamodb:DescribeTimeToLive", + "dynamodb:DescribeLimits", + "iam:GetRole", + "iam:ListRoles", + "sns:ListSubscriptionsByTopic", + "sns:ListTopics", + "lambda:ListFunctions", + "lambda:ListEventSourceMappings", + "lambda:GetFunctionConfiguration" + ], + "Effect": "Allow", + "Resource": "*" + } + ], + "Version": "2012-10-17" + }, + "IsAttachable": true, + "IsDefaultVersion": true, + "Path": "/", + "PolicyId": "ANPAIY2XFNA232XJ6J7X2", + "PolicyName": "AmazonDynamoDBReadOnlyAccess", + "UpdateDate": "2017-06-12T21:11:40+00:00", + "VersionId": "v5" + }, + "AmazonEC2ContainerRegistryFullAccess": { + "Arn": "arn:aws:iam::aws:policy/AmazonEC2ContainerRegistryFullAccess", + "AttachmentCount": 0, + "CreateDate": "2015-12-21T17:06:48+00:00", + "DefaultVersionId": "v1", + "Document": { + "Statement": [ + { + "Action": [ + "ecr:*" + ], + "Effect": "Allow", + "Resource": "*" + } + ], + "Version": "2012-10-17" + }, + "IsAttachable": true, + "IsDefaultVersion": true, + "Path": "/", + "PolicyId": "ANPAIESRL7KD7IIVF6V4W", + "PolicyName": "AmazonEC2ContainerRegistryFullAccess", + "UpdateDate": "2015-12-21T17:06:48+00:00", + "VersionId": "v1" + }, + "AmazonEC2ContainerRegistryPowerUser": { + "Arn": "arn:aws:iam::aws:policy/AmazonEC2ContainerRegistryPowerUser", + "AttachmentCount": 0, + "CreateDate": "2016-10-11T22:28:07+00:00", + "DefaultVersionId": "v2", + "Document": { + "Statement": [ + { + "Action": [ + "ecr:GetAuthorizationToken", + "ecr:BatchCheckLayerAvailability", + "ecr:GetDownloadUrlForLayer", + "ecr:GetRepositoryPolicy", + "ecr:DescribeRepositories", + "ecr:ListImages", + "ecr:DescribeImages", + "ecr:BatchGetImage", + "ecr:InitiateLayerUpload", + "ecr:UploadLayerPart", + "ecr:CompleteLayerUpload", + "ecr:PutImage" + ], + "Effect": "Allow", + "Resource": "*" + } + ], + "Version": "2012-10-17" + }, + "IsAttachable": true, + "IsDefaultVersion": true, + "Path": "/", + "PolicyId": "ANPAJDNE5PIHROIBGGDDW", + "PolicyName": "AmazonEC2ContainerRegistryPowerUser", + "UpdateDate": "2016-10-11T22:28:07+00:00", + "VersionId": "v2" + }, + "AmazonEC2ContainerRegistryReadOnly": { + "Arn": "arn:aws:iam::aws:policy/AmazonEC2ContainerRegistryReadOnly", + "AttachmentCount": 0, + "CreateDate": "2016-10-11T22:08:43+00:00", + "DefaultVersionId": "v2", + "Document": { + "Statement": [ + { + "Action": [ + "ecr:GetAuthorizationToken", + "ecr:BatchCheckLayerAvailability", + "ecr:GetDownloadUrlForLayer", + "ecr:GetRepositoryPolicy", + "ecr:DescribeRepositories", + "ecr:ListImages", + "ecr:DescribeImages", + "ecr:BatchGetImage" + ], + "Effect": "Allow", + "Resource": "*" + } + ], + "Version": "2012-10-17" + }, + "IsAttachable": true, + "IsDefaultVersion": true, + "Path": "/", + "PolicyId": "ANPAIFYZPA37OOHVIH7KQ", + "PolicyName": "AmazonEC2ContainerRegistryReadOnly", + "UpdateDate": "2016-10-11T22:08:43+00:00", + "VersionId": "v2" + }, + "AmazonEC2ContainerServiceAutoscaleRole": { + "Arn": "arn:aws:iam::aws:policy/service-role/AmazonEC2ContainerServiceAutoscaleRole", + "AttachmentCount": 1, + "CreateDate": "2016-05-12T23:25:44+00:00", + "DefaultVersionId": "v1", + "Document": { + "Statement": [ + { + "Action": [ + "ecs:DescribeServices", + "ecs:UpdateService" + ], + "Effect": "Allow", + "Resource": [ + "*" + ] + }, + { + "Action": [ + "cloudwatch:DescribeAlarms" + ], + "Effect": "Allow", + "Resource": [ + "*" + ] + } + ], + "Version": "2012-10-17" + }, + "IsAttachable": true, + "IsDefaultVersion": true, + "Path": "/service-role/", + "PolicyId": "ANPAIUAP3EGGGXXCPDQKK", + "PolicyName": "AmazonEC2ContainerServiceAutoscaleRole", + "UpdateDate": "2016-05-12T23:25:44+00:00", + "VersionId": "v1" + }, + "AmazonEC2ContainerServiceEventsRole": { + "Arn": "arn:aws:iam::aws:policy/service-role/AmazonEC2ContainerServiceEventsRole", + "AttachmentCount": 0, + "CreateDate": "2017-05-30T16:51:35+00:00", + "DefaultVersionId": "v1", + "Document": { + "Statement": [ + { + "Action": [ + "ecs:RunTask" + ], + "Effect": "Allow", + "Resource": [ + "*" + ] + } + ], + "Version": "2012-10-17" + }, + "IsAttachable": true, + "IsDefaultVersion": true, + "Path": "/service-role/", + "PolicyId": "ANPAITKFNIUAG27VSYNZ4", + "PolicyName": "AmazonEC2ContainerServiceEventsRole", + "UpdateDate": "2017-05-30T16:51:35+00:00", + "VersionId": "v1" + }, + "AmazonEC2ContainerServiceFullAccess": { + "Arn": "arn:aws:iam::aws:policy/AmazonEC2ContainerServiceFullAccess", + "AttachmentCount": 0, + "CreateDate": "2017-06-08T00:18:56+00:00", + "DefaultVersionId": "v4", + "Document": { + "Statement": [ + { + "Action": [ + "autoscaling:Describe*", + "autoscaling:UpdateAutoScalingGroup", + "cloudformation:CreateStack", + "cloudformation:DeleteStack", + "cloudformation:DescribeStack*", + "cloudformation:UpdateStack", + "cloudwatch:GetMetricStatistics", + "ec2:Describe*", + "elasticloadbalancing:*", + "ecs:*", + "events:DescribeRule", + "events:DeleteRule", + "events:ListRuleNamesByTarget", + "events:ListTargetsByRule", + "events:PutRule", + "events:PutTargets", + "events:RemoveTargets", + "iam:ListInstanceProfiles", + "iam:ListRoles", + "iam:PassRole" + ], + "Effect": "Allow", + "Resource": "*" + } + ], + "Version": "2012-10-17" + }, + "IsAttachable": true, + "IsDefaultVersion": true, + "Path": "/", + "PolicyId": "ANPAJALOYVTPDZEMIACSM", + "PolicyName": "AmazonEC2ContainerServiceFullAccess", + "UpdateDate": "2017-06-08T00:18:56+00:00", + "VersionId": "v4" + }, + "AmazonEC2ContainerServiceRole": { + "Arn": "arn:aws:iam::aws:policy/service-role/AmazonEC2ContainerServiceRole", + "AttachmentCount": 1, + "CreateDate": "2016-08-11T13:08:01+00:00", + "DefaultVersionId": "v2", + "Document": { + "Statement": [ + { + "Action": [ + "ec2:AuthorizeSecurityGroupIngress", + "ec2:Describe*", + "elasticloadbalancing:DeregisterInstancesFromLoadBalancer", + "elasticloadbalancing:DeregisterTargets", + "elasticloadbalancing:Describe*", + "elasticloadbalancing:RegisterInstancesWithLoadBalancer", + "elasticloadbalancing:RegisterTargets" + ], + "Effect": "Allow", + "Resource": "*" + } + ], + "Version": "2012-10-17" + }, + "IsAttachable": true, + "IsDefaultVersion": true, + "Path": "/service-role/", + "PolicyId": "ANPAJO53W2XHNACG7V77Q", + "PolicyName": "AmazonEC2ContainerServiceRole", + "UpdateDate": "2016-08-11T13:08:01+00:00", + "VersionId": "v2" + }, + "AmazonEC2ContainerServiceforEC2Role": { + "Arn": "arn:aws:iam::aws:policy/service-role/AmazonEC2ContainerServiceforEC2Role", + "AttachmentCount": 1, + "CreateDate": "2017-05-17T23:09:13+00:00", + "DefaultVersionId": "v5", + "Document": { + "Statement": [ + { + "Action": [ + "ecs:CreateCluster", + "ecs:DeregisterContainerInstance", + "ecs:DiscoverPollEndpoint", + "ecs:Poll", + "ecs:RegisterContainerInstance", + "ecs:StartTelemetrySession", + "ecs:UpdateContainerInstancesState", + "ecs:Submit*", + "ecr:GetAuthorizationToken", + "ecr:BatchCheckLayerAvailability", + "ecr:GetDownloadUrlForLayer", + "ecr:BatchGetImage", + "logs:CreateLogStream", + "logs:PutLogEvents" + ], + "Effect": "Allow", + "Resource": "*" + } + ], + "Version": "2012-10-17" + }, + "IsAttachable": true, + "IsDefaultVersion": true, + "Path": "/service-role/", + "PolicyId": "ANPAJLYJCVHC7TQHCSQDS", + "PolicyName": "AmazonEC2ContainerServiceforEC2Role", + "UpdateDate": "2017-05-17T23:09:13+00:00", + "VersionId": "v5" + }, + "AmazonEC2FullAccess": { + "Arn": "arn:aws:iam::aws:policy/AmazonEC2FullAccess", + "AttachmentCount": 1, + "CreateDate": "2015-02-06T18:40:15+00:00", + "DefaultVersionId": "v1", + "Document": { + "Statement": [ + { + "Action": "ec2:*", + "Effect": "Allow", + "Resource": "*" + }, + { + "Action": "elasticloadbalancing:*", + "Effect": "Allow", + "Resource": "*" + }, + { + "Action": "cloudwatch:*", + "Effect": "Allow", + "Resource": "*" + }, + { + "Action": "autoscaling:*", + "Effect": "Allow", + "Resource": "*" + } + ], + "Version": "2012-10-17" + }, + "IsAttachable": true, + "IsDefaultVersion": true, + "Path": "/", + "PolicyId": "ANPAI3VAJF5ZCRZ7MCQE6", + "PolicyName": "AmazonEC2FullAccess", + "UpdateDate": "2015-02-06T18:40:15+00:00", + "VersionId": "v1" + }, + "AmazonEC2ReadOnlyAccess": { + "Arn": "arn:aws:iam::aws:policy/AmazonEC2ReadOnlyAccess", + "AttachmentCount": 0, + "CreateDate": "2015-02-06T18:40:17+00:00", + "DefaultVersionId": "v1", + "Document": { + "Statement": [ + { + "Action": "ec2:Describe*", + "Effect": "Allow", + "Resource": "*" + }, + { + "Action": "elasticloadbalancing:Describe*", + "Effect": "Allow", + "Resource": "*" + }, + { + "Action": [ + "cloudwatch:ListMetrics", + "cloudwatch:GetMetricStatistics", + "cloudwatch:Describe*" + ], + "Effect": "Allow", + "Resource": "*" + }, + { + "Action": "autoscaling:Describe*", + "Effect": "Allow", + "Resource": "*" + } + ], + "Version": "2012-10-17" + }, + "IsAttachable": true, + "IsDefaultVersion": true, + "Path": "/", + "PolicyId": "ANPAIGDT4SV4GSETWTBZK", + "PolicyName": "AmazonEC2ReadOnlyAccess", + "UpdateDate": "2015-02-06T18:40:17+00:00", + "VersionId": "v1" + }, + "AmazonEC2ReportsAccess": { + "Arn": "arn:aws:iam::aws:policy/AmazonEC2ReportsAccess", + "AttachmentCount": 0, + "CreateDate": "2015-02-06T18:40:16+00:00", + "DefaultVersionId": "v1", + "Document": { + "Statement": [ + { + "Action": "ec2-reports:*", + "Effect": "Allow", + "Resource": "*" + } + ], + "Version": "2012-10-17" + }, + "IsAttachable": true, + "IsDefaultVersion": true, + "Path": "/", + "PolicyId": "ANPAIU6NBZVF2PCRW36ZW", + "PolicyName": "AmazonEC2ReportsAccess", + "UpdateDate": "2015-02-06T18:40:16+00:00", + "VersionId": "v1" + }, + "AmazonEC2RoleforAWSCodeDeploy": { + "Arn": "arn:aws:iam::aws:policy/service-role/AmazonEC2RoleforAWSCodeDeploy", + "AttachmentCount": 0, + "CreateDate": "2017-03-20T17:14:10+00:00", + "DefaultVersionId": "v2", + "Document": { + "Statement": [ + { + "Action": [ + "s3:GetObject", + "s3:GetObjectVersion", + "s3:ListBucket" + ], + "Effect": "Allow", + "Resource": "*" + } + ], + "Version": "2012-10-17" + }, + "IsAttachable": true, + "IsDefaultVersion": true, + "Path": "/service-role/", + "PolicyId": "ANPAIAZKXZ27TAJ4PVWGK", + "PolicyName": "AmazonEC2RoleforAWSCodeDeploy", + "UpdateDate": "2017-03-20T17:14:10+00:00", + "VersionId": "v2" + }, + "AmazonEC2RoleforDataPipelineRole": { + "Arn": "arn:aws:iam::aws:policy/service-role/AmazonEC2RoleforDataPipelineRole", + "AttachmentCount": 0, + "CreateDate": "2016-02-22T17:24:05+00:00", + "DefaultVersionId": "v3", + "Document": { + "Statement": [ + { + "Action": [ + "cloudwatch:*", + "datapipeline:*", + "dynamodb:*", + "ec2:Describe*", + "elasticmapreduce:AddJobFlowSteps", + "elasticmapreduce:Describe*", + "elasticmapreduce:ListInstance*", + "elasticmapreduce:ModifyInstanceGroups", + "rds:Describe*", + "redshift:DescribeClusters", + "redshift:DescribeClusterSecurityGroups", + "s3:*", + "sdb:*", + "sns:*", + "sqs:*" + ], + "Effect": "Allow", + "Resource": [ + "*" + ] + } + ], + "Version": "2012-10-17" + }, + "IsAttachable": true, + "IsDefaultVersion": true, + "Path": "/service-role/", + "PolicyId": "ANPAJ3Z5I2WAJE5DN2J36", + "PolicyName": "AmazonEC2RoleforDataPipelineRole", + "UpdateDate": "2016-02-22T17:24:05+00:00", + "VersionId": "v3" + }, + "AmazonEC2RoleforSSM": { + "Arn": "arn:aws:iam::aws:policy/service-role/AmazonEC2RoleforSSM", + "AttachmentCount": 0, + "CreateDate": "2017-08-10T20:49:08+00:00", + "DefaultVersionId": "v4", + "Document": { + "Statement": [ + { + "Action": [ + "ssm:DescribeAssociation", + "ssm:GetDeployablePatchSnapshotForInstance", + "ssm:GetDocument", + "ssm:GetParameters", + "ssm:ListAssociations", + "ssm:ListInstanceAssociations", + "ssm:PutInventory", + "ssm:PutComplianceItems", + "ssm:UpdateAssociationStatus", + "ssm:UpdateInstanceAssociationStatus", + "ssm:UpdateInstanceInformation" + ], + "Effect": "Allow", + "Resource": "*" + }, + { + "Action": [ + "ec2messages:AcknowledgeMessage", + "ec2messages:DeleteMessage", + "ec2messages:FailMessage", + "ec2messages:GetEndpoint", + "ec2messages:GetMessages", + "ec2messages:SendReply" + ], + "Effect": "Allow", + "Resource": "*" + }, + { + "Action": [ + "cloudwatch:PutMetricData" + ], + "Effect": "Allow", + "Resource": "*" + }, + { + "Action": [ + "ec2:DescribeInstanceStatus" + ], + "Effect": "Allow", + "Resource": "*" + }, + { + "Action": [ + "ds:CreateComputer", + "ds:DescribeDirectories" + ], + "Effect": "Allow", + "Resource": "*" + }, + { + "Action": [ + "logs:CreateLogGroup", + "logs:CreateLogStream", + "logs:DescribeLogGroups", + "logs:DescribeLogStreams", + "logs:PutLogEvents" + ], + "Effect": "Allow", + "Resource": "*" + }, + { + "Action": [ + "s3:PutObject", + "s3:GetObject", + "s3:AbortMultipartUpload", + "s3:ListMultipartUploadParts", + "s3:ListBucketMultipartUploads" + ], + "Effect": "Allow", + "Resource": "*" + }, + { + "Action": [ + "s3:ListBucket" + ], + "Effect": "Allow", + "Resource": "arn:aws:s3:::amazon-ssm-packages-*" + } + ], + "Version": "2012-10-17" + }, + "IsAttachable": true, + "IsDefaultVersion": true, + "Path": "/service-role/", + "PolicyId": "ANPAI6TL3SMY22S4KMMX6", + "PolicyName": "AmazonEC2RoleforSSM", + "UpdateDate": "2017-08-10T20:49:08+00:00", + "VersionId": "v4" + }, + "AmazonEC2SpotFleetAutoscaleRole": { + "Arn": "arn:aws:iam::aws:policy/service-role/AmazonEC2SpotFleetAutoscaleRole", + "AttachmentCount": 0, + "CreateDate": "2016-08-19T18:27:22+00:00", + "DefaultVersionId": "v1", + "Document": { + "Statement": [ + { + "Action": [ + "ec2:DescribeSpotFleetRequests", + "ec2:ModifySpotFleetRequest" + ], + "Effect": "Allow", + "Resource": [ + "*" + ] + }, + { + "Action": [ + "cloudwatch:DescribeAlarms" + ], + "Effect": "Allow", + "Resource": [ + "*" + ] + } + ], + "Version": "2012-10-17" + }, + "IsAttachable": true, + "IsDefaultVersion": true, + "Path": "/service-role/", + "PolicyId": "ANPAIMFFRMIOBGDP2TAVE", + "PolicyName": "AmazonEC2SpotFleetAutoscaleRole", + "UpdateDate": "2016-08-19T18:27:22+00:00", + "VersionId": "v1" + }, + "AmazonEC2SpotFleetRole": { + "Arn": "arn:aws:iam::aws:policy/service-role/AmazonEC2SpotFleetRole", + "AttachmentCount": 0, + "CreateDate": "2016-11-10T21:19:35+00:00", + "DefaultVersionId": "v3", + "Document": { + "Statement": [ + { + "Action": [ + "ec2:DescribeImages", + "ec2:DescribeSubnets", + "ec2:RequestSpotInstances", + "ec2:TerminateInstances", + "ec2:DescribeInstanceStatus", + "iam:PassRole" + ], + "Effect": "Allow", + "Resource": [ + "*" + ] + } + ], + "Version": "2012-10-17" + }, + "IsAttachable": true, + "IsDefaultVersion": true, + "Path": "/service-role/", + "PolicyId": "ANPAIMRTKHWK7ESSNETSW", + "PolicyName": "AmazonEC2SpotFleetRole", + "UpdateDate": "2016-11-10T21:19:35+00:00", + "VersionId": "v3" + }, + "AmazonEC2SpotFleetTaggingRole": { + "Arn": "arn:aws:iam::aws:policy/service-role/AmazonEC2SpotFleetTaggingRole", + "AttachmentCount": 0, + "CreateDate": "2017-07-26T19:10:35+00:00", + "DefaultVersionId": "v2", + "Document": { + "Statement": [ + { + "Action": [ + "ec2:DescribeImages", + "ec2:DescribeSubnets", + "ec2:RequestSpotInstances", + "ec2:TerminateInstances", + "ec2:DescribeInstanceStatus", + "ec2:CreateTags" + ], + "Effect": "Allow", + "Resource": [ + "*" + ] + }, + { + "Action": "iam:PassRole", + "Condition": { + "StringEquals": { + "iam:PassedToService": "ec2.amazonaws.com" + } + }, + "Effect": "Allow", + "Resource": [ + "*" + ] + } + ], + "Version": "2012-10-17" + }, + "IsAttachable": true, + "IsDefaultVersion": true, + "Path": "/service-role/", + "PolicyId": "ANPAJ5U6UMLCEYLX5OLC4", + "PolicyName": "AmazonEC2SpotFleetTaggingRole", + "UpdateDate": "2017-07-26T19:10:35+00:00", + "VersionId": "v2" + }, + "AmazonESFullAccess": { + "Arn": "arn:aws:iam::aws:policy/AmazonESFullAccess", + "AttachmentCount": 0, + "CreateDate": "2015-10-01T19:14:00+00:00", + "DefaultVersionId": "v1", + "Document": { + "Statement": [ + { + "Action": [ + "es:*" + ], + "Effect": "Allow", + "Resource": "*" + } + ], + "Version": "2012-10-17" + }, + "IsAttachable": true, + "IsDefaultVersion": true, + "Path": "/", + "PolicyId": "ANPAJM6ZTCU24QL5PZCGC", + "PolicyName": "AmazonESFullAccess", + "UpdateDate": "2015-10-01T19:14:00+00:00", + "VersionId": "v1" + }, + "AmazonESReadOnlyAccess": { + "Arn": "arn:aws:iam::aws:policy/AmazonESReadOnlyAccess", + "AttachmentCount": 0, + "CreateDate": "2015-10-01T19:18:24+00:00", + "DefaultVersionId": "v1", + "Document": { + "Statement": [ + { + "Action": [ + "es:Describe*", + "es:List*" + ], + "Effect": "Allow", + "Resource": "*" + } + ], + "Version": "2012-10-17" + }, + "IsAttachable": true, + "IsDefaultVersion": true, + "Path": "/", + "PolicyId": "ANPAJUDMRLOQ7FPAR46FQ", + "PolicyName": "AmazonESReadOnlyAccess", + "UpdateDate": "2015-10-01T19:18:24+00:00", + "VersionId": "v1" + }, + "AmazonElastiCacheFullAccess": { + "Arn": "arn:aws:iam::aws:policy/AmazonElastiCacheFullAccess", + "AttachmentCount": 0, + "CreateDate": "2015-02-06T18:40:20+00:00", + "DefaultVersionId": "v1", + "Document": { + "Statement": [ + { + "Action": "elasticache:*", + "Effect": "Allow", + "Resource": "*" + } + ], + "Version": "2012-10-17" + }, + "IsAttachable": true, + "IsDefaultVersion": true, + "Path": "/", + "PolicyId": "ANPAIA2V44CPHAUAAECKG", + "PolicyName": "AmazonElastiCacheFullAccess", + "UpdateDate": "2015-02-06T18:40:20+00:00", + "VersionId": "v1" + }, + "AmazonElastiCacheReadOnlyAccess": { + "Arn": "arn:aws:iam::aws:policy/AmazonElastiCacheReadOnlyAccess", + "AttachmentCount": 0, + "CreateDate": "2015-02-06T18:40:21+00:00", + "DefaultVersionId": "v1", + "Document": { + "Statement": [ + { + "Action": [ + "elasticache:Describe*" + ], + "Effect": "Allow", + "Resource": "*" + } + ], + "Version": "2012-10-17" + }, + "IsAttachable": true, + "IsDefaultVersion": true, + "Path": "/", + "PolicyId": "ANPAIPDACSNQHSENWAKM2", + "PolicyName": "AmazonElastiCacheReadOnlyAccess", + "UpdateDate": "2015-02-06T18:40:21+00:00", + "VersionId": "v1" + }, + "AmazonElasticFileSystemFullAccess": { + "Arn": "arn:aws:iam::aws:policy/AmazonElasticFileSystemFullAccess", + "AttachmentCount": 0, + "CreateDate": "2017-08-14T10:18:34+00:00", + "DefaultVersionId": "v3", + "Document": { + "Statement": [ + { + "Action": [ + "ec2:CreateNetworkInterface", + "ec2:DeleteNetworkInterface", + "ec2:DescribeAvailabilityZones", + "ec2:DescribeNetworkInterfaceAttribute", + "ec2:DescribeNetworkInterfaces", + "ec2:DescribeSecurityGroups", + "ec2:DescribeSubnets", + "ec2:DescribeVpcAttribute", + "ec2:DescribeVpcs", + "ec2:ModifyNetworkInterfaceAttribute", + "elasticfilesystem:*", + "kms:DescribeKey", + "kms:ListAliases" + ], + "Effect": "Allow", + "Resource": "*" + } + ], + "Version": "2012-10-17" + }, + "IsAttachable": true, + "IsDefaultVersion": true, + "Path": "/", + "PolicyId": "ANPAJKXTMNVQGIDNCKPBC", + "PolicyName": "AmazonElasticFileSystemFullAccess", + "UpdateDate": "2017-08-14T10:18:34+00:00", + "VersionId": "v3" + }, + "AmazonElasticFileSystemReadOnlyAccess": { + "Arn": "arn:aws:iam::aws:policy/AmazonElasticFileSystemReadOnlyAccess", + "AttachmentCount": 0, + "CreateDate": "2017-08-14T10:09:49+00:00", + "DefaultVersionId": "v3", + "Document": { + "Statement": [ + { + "Action": [ + "ec2:DescribeAvailabilityZones", + "ec2:DescribeNetworkInterfaceAttribute", + "ec2:DescribeNetworkInterfaces", + "ec2:DescribeSecurityGroups", + "ec2:DescribeSubnets", + "ec2:DescribeVpcAttribute", + "ec2:DescribeVpcs", + "elasticfilesystem:Describe*", + "kms:ListAliases" + ], + "Effect": "Allow", + "Resource": "*" + } + ], + "Version": "2012-10-17" + }, + "IsAttachable": true, + "IsDefaultVersion": true, + "Path": "/", + "PolicyId": "ANPAIPN5S4NE5JJOKVC4Y", + "PolicyName": "AmazonElasticFileSystemReadOnlyAccess", + "UpdateDate": "2017-08-14T10:09:49+00:00", + "VersionId": "v3" + }, + "AmazonElasticMapReduceFullAccess": { + "Arn": "arn:aws:iam::aws:policy/AmazonElasticMapReduceFullAccess", + "AttachmentCount": 0, + "CreateDate": "2015-12-21T23:20:38+00:00", + "DefaultVersionId": "v4", + "Document": { + "Statement": [ + { + "Action": [ + "cloudwatch:*", + "cloudformation:CreateStack", + "cloudformation:DescribeStackEvents", + "ec2:AuthorizeSecurityGroupIngress", + "ec2:AuthorizeSecurityGroupEgress", + "ec2:CancelSpotInstanceRequests", + "ec2:CreateRoute", + "ec2:CreateSecurityGroup", + "ec2:CreateTags", + "ec2:DeleteRoute", + "ec2:DeleteTags", + "ec2:DeleteSecurityGroup", + "ec2:DescribeAvailabilityZones", + "ec2:DescribeAccountAttributes", + "ec2:DescribeInstances", + "ec2:DescribeKeyPairs", + "ec2:DescribeRouteTables", + "ec2:DescribeSecurityGroups", + "ec2:DescribeSpotInstanceRequests", + "ec2:DescribeSpotPriceHistory", + "ec2:DescribeSubnets", + "ec2:DescribeVpcAttribute", + "ec2:DescribeVpcs", + "ec2:DescribeRouteTables", + "ec2:DescribeNetworkAcls", + "ec2:CreateVpcEndpoint", + "ec2:ModifyImageAttribute", + "ec2:ModifyInstanceAttribute", + "ec2:RequestSpotInstances", + "ec2:RevokeSecurityGroupEgress", + "ec2:RunInstances", + "ec2:TerminateInstances", + "elasticmapreduce:*", + "iam:GetPolicy", + "iam:GetPolicyVersion", + "iam:ListRoles", + "iam:PassRole", + "kms:List*", + "s3:*", + "sdb:*", + "support:CreateCase", + "support:DescribeServices", + "support:DescribeSeverityLevels" + ], + "Effect": "Allow", + "Resource": "*" + } + ], + "Version": "2012-10-17" + }, + "IsAttachable": true, + "IsDefaultVersion": true, + "Path": "/", + "PolicyId": "ANPAIZP5JFP3AMSGINBB2", + "PolicyName": "AmazonElasticMapReduceFullAccess", + "UpdateDate": "2015-12-21T23:20:38+00:00", + "VersionId": "v4" + }, + "AmazonElasticMapReduceReadOnlyAccess": { + "Arn": "arn:aws:iam::aws:policy/AmazonElasticMapReduceReadOnlyAccess", + "AttachmentCount": 0, + "CreateDate": "2017-05-22T23:00:19+00:00", + "DefaultVersionId": "v2", + "Document": { + "Statement": [ + { + "Action": [ + "elasticmapreduce:Describe*", + "elasticmapreduce:List*", + "elasticmapreduce:ViewEventsFromAllClustersInConsole", + "s3:GetObject", + "s3:ListAllMyBuckets", + "s3:ListBucket", + "sdb:Select", + "cloudwatch:GetMetricStatistics" + ], + "Effect": "Allow", + "Resource": "*" + } + ], + "Version": "2012-10-17" + }, + "IsAttachable": true, + "IsDefaultVersion": true, + "Path": "/", + "PolicyId": "ANPAIHP6NH2S6GYFCOINC", + "PolicyName": "AmazonElasticMapReduceReadOnlyAccess", + "UpdateDate": "2017-05-22T23:00:19+00:00", + "VersionId": "v2" + }, + "AmazonElasticMapReduceRole": { + "Arn": "arn:aws:iam::aws:policy/service-role/AmazonElasticMapReduceRole", + "AttachmentCount": 0, + "CreateDate": "2017-07-17T21:29:50+00:00", + "DefaultVersionId": "v8", + "Document": { + "Statement": [ + { + "Action": [ + "ec2:AuthorizeSecurityGroupEgress", + "ec2:AuthorizeSecurityGroupIngress", + "ec2:CancelSpotInstanceRequests", + "ec2:CreateNetworkInterface", + "ec2:CreateSecurityGroup", + "ec2:CreateTags", + "ec2:DeleteNetworkInterface", + "ec2:DeleteSecurityGroup", + "ec2:DeleteTags", + "ec2:DescribeAvailabilityZones", + "ec2:DescribeAccountAttributes", + "ec2:DescribeDhcpOptions", + "ec2:DescribeImages", + "ec2:DescribeInstanceStatus", + "ec2:DescribeInstances", + "ec2:DescribeKeyPairs", + "ec2:DescribeNetworkAcls", + "ec2:DescribeNetworkInterfaces", + "ec2:DescribePrefixLists", + "ec2:DescribeRouteTables", + "ec2:DescribeSecurityGroups", + "ec2:DescribeSpotInstanceRequests", + "ec2:DescribeSpotPriceHistory", + "ec2:DescribeSubnets", + "ec2:DescribeTags", + "ec2:DescribeVpcAttribute", + "ec2:DescribeVpcEndpoints", + "ec2:DescribeVpcEndpointServices", + "ec2:DescribeVpcs", + "ec2:DetachNetworkInterface", + "ec2:ModifyImageAttribute", + "ec2:ModifyInstanceAttribute", + "ec2:RequestSpotInstances", + "ec2:RevokeSecurityGroupEgress", + "ec2:RunInstances", + "ec2:TerminateInstances", + "ec2:DeleteVolume", + "ec2:DescribeVolumeStatus", + "ec2:DescribeVolumes", + "ec2:DetachVolume", + "iam:GetRole", + "iam:GetRolePolicy", + "iam:ListInstanceProfiles", + "iam:ListRolePolicies", + "iam:PassRole", + "s3:CreateBucket", + "s3:Get*", + "s3:List*", + "sdb:BatchPutAttributes", + "sdb:Select", + "sqs:CreateQueue", + "sqs:Delete*", + "sqs:GetQueue*", + "sqs:PurgeQueue", + "sqs:ReceiveMessage", + "cloudwatch:PutMetricAlarm", + "cloudwatch:DescribeAlarms", + "cloudwatch:DeleteAlarms", + "application-autoscaling:RegisterScalableTarget", + "application-autoscaling:DeregisterScalableTarget", + "application-autoscaling:PutScalingPolicy", + "application-autoscaling:DeleteScalingPolicy", + "application-autoscaling:Describe*" + ], + "Effect": "Allow", + "Resource": "*" + } + ], + "Version": "2012-10-17" + }, + "IsAttachable": true, + "IsDefaultVersion": true, + "Path": "/service-role/", + "PolicyId": "ANPAIDI2BQT2LKXZG36TW", + "PolicyName": "AmazonElasticMapReduceRole", + "UpdateDate": "2017-07-17T21:29:50+00:00", + "VersionId": "v8" + }, + "AmazonElasticMapReduceforAutoScalingRole": { + "Arn": "arn:aws:iam::aws:policy/service-role/AmazonElasticMapReduceforAutoScalingRole", + "AttachmentCount": 0, + "CreateDate": "2016-11-18T01:09:10+00:00", + "DefaultVersionId": "v1", + "Document": { + "Statement": [ + { + "Action": [ + "cloudwatch:DescribeAlarms", + "elasticmapreduce:ListInstanceGroups", + "elasticmapreduce:ModifyInstanceGroups" + ], + "Effect": "Allow", + "Resource": "*" + } + ], + "Version": "2012-10-17" + }, + "IsAttachable": true, + "IsDefaultVersion": true, + "Path": "/service-role/", + "PolicyId": "ANPAJSVXG6QHPE6VHDZ4Q", + "PolicyName": "AmazonElasticMapReduceforAutoScalingRole", + "UpdateDate": "2016-11-18T01:09:10+00:00", + "VersionId": "v1" + }, + "AmazonElasticMapReduceforEC2Role": { + "Arn": "arn:aws:iam::aws:policy/service-role/AmazonElasticMapReduceforEC2Role", + "AttachmentCount": 0, + "CreateDate": "2017-08-11T23:57:30+00:00", + "DefaultVersionId": "v3", + "Document": { + "Statement": [ + { + "Action": [ + "cloudwatch:*", + "dynamodb:*", + "ec2:Describe*", + "elasticmapreduce:Describe*", + "elasticmapreduce:ListBootstrapActions", + "elasticmapreduce:ListClusters", + "elasticmapreduce:ListInstanceGroups", + "elasticmapreduce:ListInstances", + "elasticmapreduce:ListSteps", + "kinesis:CreateStream", + "kinesis:DeleteStream", + "kinesis:DescribeStream", + "kinesis:GetRecords", + "kinesis:GetShardIterator", + "kinesis:MergeShards", + "kinesis:PutRecord", + "kinesis:SplitShard", + "rds:Describe*", + "s3:*", + "sdb:*", + "sns:*", + "sqs:*", + "glue:CreateDatabase", + "glue:UpdateDatabase", + "glue:DeleteDatabase", + "glue:GetDatabase", + "glue:GetDatabases", + "glue:CreateTable", + "glue:UpdateTable", + "glue:DeleteTable", + "glue:GetTable", + "glue:GetTables", + "glue:GetTableVersions", + "glue:CreatePartition", + "glue:BatchCreatePartition", + "glue:UpdatePartition", + "glue:DeletePartition", + "glue:BatchDeletePartition", + "glue:GetPartition", + "glue:GetPartitions", + "glue:BatchGetPartition", + "glue:CreateUserDefinedFunction", + "glue:UpdateUserDefinedFunction", + "glue:DeleteUserDefinedFunction", + "glue:GetUserDefinedFunction", + "glue:GetUserDefinedFunctions" + ], + "Effect": "Allow", + "Resource": "*" + } + ], + "Version": "2012-10-17" + }, + "IsAttachable": true, + "IsDefaultVersion": true, + "Path": "/service-role/", + "PolicyId": "ANPAIGALS5RCDLZLB3PGS", + "PolicyName": "AmazonElasticMapReduceforEC2Role", + "UpdateDate": "2017-08-11T23:57:30+00:00", + "VersionId": "v3" + }, + "AmazonElasticTranscoderFullAccess": { + "Arn": "arn:aws:iam::aws:policy/AmazonElasticTranscoderFullAccess", + "AttachmentCount": 0, + "CreateDate": "2015-02-06T18:40:24+00:00", + "DefaultVersionId": "v1", + "Document": { + "Statement": [ + { + "Action": [ + "elastictranscoder:*", + "cloudfront:*", + "s3:List*", + "s3:Put*", + "s3:Get*", + "s3:*MultipartUpload*", + "iam:CreateRole", + "iam:GetRolePolicy", + "iam:PassRole", + "iam:PutRolePolicy", + "iam:List*", + "sns:CreateTopic", + "sns:List*" + ], + "Effect": "Allow", + "Resource": "*" + } + ], + "Version": "2012-10-17" + }, + "IsAttachable": true, + "IsDefaultVersion": true, + "Path": "/", + "PolicyId": "ANPAJ4D5OJU75P5ZJZVNY", + "PolicyName": "AmazonElasticTranscoderFullAccess", + "UpdateDate": "2015-02-06T18:40:24+00:00", + "VersionId": "v1" + }, + "AmazonElasticTranscoderJobsSubmitter": { + "Arn": "arn:aws:iam::aws:policy/AmazonElasticTranscoderJobsSubmitter", + "AttachmentCount": 0, + "CreateDate": "2015-02-06T18:40:25+00:00", + "DefaultVersionId": "v1", + "Document": { + "Statement": [ + { + "Action": [ + "elastictranscoder:Read*", + "elastictranscoder:List*", + "elastictranscoder:*Job", + "elastictranscoder:*Preset", + "s3:List*", + "iam:List*", + "sns:List*" + ], + "Effect": "Allow", + "Resource": "*" + } + ], + "Version": "2012-10-17" + }, + "IsAttachable": true, + "IsDefaultVersion": true, + "Path": "/", + "PolicyId": "ANPAIN5WGARIKZ3E2UQOU", + "PolicyName": "AmazonElasticTranscoderJobsSubmitter", + "UpdateDate": "2015-02-06T18:40:25+00:00", + "VersionId": "v1" + }, + "AmazonElasticTranscoderReadOnlyAccess": { + "Arn": "arn:aws:iam::aws:policy/AmazonElasticTranscoderReadOnlyAccess", + "AttachmentCount": 0, + "CreateDate": "2015-02-06T18:40:26+00:00", + "DefaultVersionId": "v1", + "Document": { + "Statement": [ + { + "Action": [ + "elastictranscoder:Read*", + "elastictranscoder:List*", + "s3:List*", + "iam:List*", + "sns:List*" + ], + "Effect": "Allow", + "Resource": "*" + } + ], + "Version": "2012-10-17" + }, + "IsAttachable": true, + "IsDefaultVersion": true, + "Path": "/", + "PolicyId": "ANPAJGPP7GPMJRRJMEP3Q", + "PolicyName": "AmazonElasticTranscoderReadOnlyAccess", + "UpdateDate": "2015-02-06T18:40:26+00:00", + "VersionId": "v1" + }, + "AmazonElasticTranscoderRole": { + "Arn": "arn:aws:iam::aws:policy/service-role/AmazonElasticTranscoderRole", + "AttachmentCount": 0, + "CreateDate": "2015-02-06T18:41:26+00:00", + "DefaultVersionId": "v1", + "Document": { + "Statement": [ + { + "Action": [ + "s3:ListBucket", + "s3:Put*", + "s3:Get*", + "s3:*MultipartUpload*" + ], + "Effect": "Allow", + "Resource": [ + "*" + ], + "Sid": "1" + }, + { + "Action": [ + "sns:Publish" + ], + "Effect": "Allow", + "Resource": [ + "*" + ], + "Sid": "2" + }, + { + "Action": [ + "s3:*Policy*", + "sns:*Permission*", + "sns:*Delete*", + "s3:*Delete*", + "sns:*Remove*" + ], + "Effect": "Deny", + "Resource": [ + "*" + ], + "Sid": "3" + } + ], + "Version": "2012-10-17" + }, + "IsAttachable": true, + "IsDefaultVersion": true, + "Path": "/service-role/", + "PolicyId": "ANPAJNW3WMKVXFJ2KPIQ2", + "PolicyName": "AmazonElasticTranscoderRole", + "UpdateDate": "2015-02-06T18:41:26+00:00", + "VersionId": "v1" + }, + "AmazonElasticsearchServiceRolePolicy": { + "Arn": "arn:aws:iam::aws:policy/aws-service-role/AmazonElasticsearchServiceRolePolicy", + "AttachmentCount": 0, + "CreateDate": "2017-07-07T00:15:31+00:00", + "DefaultVersionId": "v1", + "Document": { + "Statement": [ + { + "Action": [ + "ec2:CreateNetworkInterface", + "ec2:DeleteNetworkInterface", + "ec2:DescribeNetworkInterfaces", + "ec2:ModifyNetworkInterfaceAttribute", + "ec2:DescribeSecurityGroups", + "ec2:DescribeSubnets" + ], + "Effect": "Allow", + "Resource": "*", + "Sid": "Stmt1480452973134" + } + ], + "Version": "2012-10-17" + }, + "IsAttachable": true, + "IsDefaultVersion": true, + "Path": "/aws-service-role/", + "PolicyId": "ANPAJFEWZPHXKLCVHEUIC", + "PolicyName": "AmazonElasticsearchServiceRolePolicy", + "UpdateDate": "2017-07-07T00:15:31+00:00", + "VersionId": "v1" + }, + "AmazonGlacierFullAccess": { + "Arn": "arn:aws:iam::aws:policy/AmazonGlacierFullAccess", + "AttachmentCount": 0, + "CreateDate": "2015-02-06T18:40:28+00:00", + "DefaultVersionId": "v1", + "Document": { + "Statement": [ + { + "Action": "glacier:*", + "Effect": "Allow", + "Resource": "*" + } + ], + "Version": "2012-10-17" + }, + "IsAttachable": true, + "IsDefaultVersion": true, + "Path": "/", + "PolicyId": "ANPAJQSTZJWB2AXXAKHVQ", + "PolicyName": "AmazonGlacierFullAccess", + "UpdateDate": "2015-02-06T18:40:28+00:00", + "VersionId": "v1" + }, + "AmazonGlacierReadOnlyAccess": { + "Arn": "arn:aws:iam::aws:policy/AmazonGlacierReadOnlyAccess", + "AttachmentCount": 0, + "CreateDate": "2016-05-05T18:46:10+00:00", + "DefaultVersionId": "v2", + "Document": { + "Statement": [ + { + "Action": [ + "glacier:DescribeJob", + "glacier:DescribeVault", + "glacier:GetDataRetrievalPolicy", + "glacier:GetJobOutput", + "glacier:GetVaultAccessPolicy", + "glacier:GetVaultLock", + "glacier:GetVaultNotifications", + "glacier:ListJobs", + "glacier:ListMultipartUploads", + "glacier:ListParts", + "glacier:ListTagsForVault", + "glacier:ListVaults" + ], + "Effect": "Allow", + "Resource": "*" + } + ], + "Version": "2012-10-17" + }, + "IsAttachable": true, + "IsDefaultVersion": true, + "Path": "/", + "PolicyId": "ANPAI2D5NJKMU274MET4E", + "PolicyName": "AmazonGlacierReadOnlyAccess", + "UpdateDate": "2016-05-05T18:46:10+00:00", + "VersionId": "v2" + }, + "AmazonInspectorFullAccess": { + "Arn": "arn:aws:iam::aws:policy/AmazonInspectorFullAccess", + "AttachmentCount": 0, + "CreateDate": "2017-09-12T17:42:57+00:00", + "DefaultVersionId": "v3", + "Document": { + "Statement": [ + { + "Action": [ + "inspector:*", + "ec2:DescribeInstances", + "ec2:DescribeTags", + "sns:ListTopics", + "events:DescribeRule", + "events:ListRuleNamesByTarget" + ], + "Effect": "Allow", + "Resource": "*" + } + ], + "Version": "2012-10-17" + }, + "IsAttachable": true, + "IsDefaultVersion": true, + "Path": "/", + "PolicyId": "ANPAI7Y6NTA27NWNA5U5E", + "PolicyName": "AmazonInspectorFullAccess", + "UpdateDate": "2017-09-12T17:42:57+00:00", + "VersionId": "v3" + }, + "AmazonInspectorReadOnlyAccess": { + "Arn": "arn:aws:iam::aws:policy/AmazonInspectorReadOnlyAccess", + "AttachmentCount": 0, + "CreateDate": "2017-09-12T16:53:06+00:00", + "DefaultVersionId": "v3", + "Document": { + "Statement": [ + { + "Action": [ + "inspector:Describe*", + "inspector:Get*", + "inspector:List*", + "inspector:LocalizeText", + "inspector:Preview*", + "ec2:DescribeInstances", + "ec2:DescribeTags", + "sns:ListTopics", + "events:DescribeRule", + "events:ListRuleNamesByTarget" + ], + "Effect": "Allow", + "Resource": "*" + } + ], + "Version": "2012-10-17" + }, + "IsAttachable": true, + "IsDefaultVersion": true, + "Path": "/", + "PolicyId": "ANPAJXQNTHTEJ2JFRN2SE", + "PolicyName": "AmazonInspectorReadOnlyAccess", + "UpdateDate": "2017-09-12T16:53:06+00:00", + "VersionId": "v3" + }, + "AmazonKinesisAnalyticsFullAccess": { + "Arn": "arn:aws:iam::aws:policy/AmazonKinesisAnalyticsFullAccess", + "AttachmentCount": 0, + "CreateDate": "2016-09-21T19:01:14+00:00", + "DefaultVersionId": "v1", + "Document": { + "Statement": [ + { + "Action": "kinesisanalytics:*", + "Effect": "Allow", + "Resource": "*" + }, + { + "Action": [ + "kinesis:CreateStream", + "kinesis:DeleteStream", + "kinesis:DescribeStream", + "kinesis:ListStreams", + "kinesis:PutRecord", + "kinesis:PutRecords" + ], + "Effect": "Allow", + "Resource": "*" + }, + { + "Action": [ + "firehose:DescribeDeliveryStream", + "firehose:ListDeliveryStreams" + ], + "Effect": "Allow", + "Resource": "*" + }, + { + "Action": [ + "cloudwatch:GetMetricStatistics", + "cloudwatch:ListMetrics" + ], + "Effect": "Allow", + "Resource": "*" + }, + { + "Action": "logs:GetLogEvents", + "Effect": "Allow", + "Resource": "*" + }, + { + "Action": [ + "iam:ListPolicyVersions", + "iam:ListRoles" + ], + "Effect": "Allow", + "Resource": "*" + }, + { + "Action": "iam:PassRole", + "Effect": "Allow", + "Resource": "arn:aws:iam::*:role/service-role/kinesis-analytics*" + } + ], + "Version": "2012-10-17" + }, + "IsAttachable": true, + "IsDefaultVersion": true, + "Path": "/", + "PolicyId": "ANPAJQOSKHTXP43R7P5AC", + "PolicyName": "AmazonKinesisAnalyticsFullAccess", + "UpdateDate": "2016-09-21T19:01:14+00:00", + "VersionId": "v1" + }, + "AmazonKinesisAnalyticsReadOnly": { + "Arn": "arn:aws:iam::aws:policy/AmazonKinesisAnalyticsReadOnly", + "AttachmentCount": 0, + "CreateDate": "2016-09-21T18:16:43+00:00", + "DefaultVersionId": "v1", + "Document": { + "Statement": [ + { + "Action": [ + "kinesisanalytics:Describe*", + "kinesisanalytics:Get*", + "kinesisanalytics:List*" + ], + "Effect": "Allow", + "Resource": "*" + }, + { + "Action": [ + "kinesis:DescribeStream", + "kinesis:ListStreams" + ], + "Effect": "Allow", + "Resource": "*" + }, + { + "Action": [ + "firehose:DescribeDeliveryStream", + "firehose:ListDeliveryStreams" + ], + "Effect": "Allow", + "Resource": "*" + }, + { + "Action": [ + "cloudwatch:GetMetricStatistics", + "cloudwatch:ListMetrics" + ], + "Effect": "Allow", + "Resource": "*" + }, + { + "Action": "logs:GetLogEvents", + "Effect": "Allow", + "Resource": "*" + }, + { + "Action": [ + "iam:ListPolicyVersions", + "iam:ListRoles" + ], + "Effect": "Allow", + "Resource": "*" + } + ], + "Version": "2012-10-17" + }, + "IsAttachable": true, + "IsDefaultVersion": true, + "Path": "/", + "PolicyId": "ANPAIJIEXZAFUK43U7ARK", + "PolicyName": "AmazonKinesisAnalyticsReadOnly", + "UpdateDate": "2016-09-21T18:16:43+00:00", + "VersionId": "v1" + }, + "AmazonKinesisFirehoseFullAccess": { + "Arn": "arn:aws:iam::aws:policy/AmazonKinesisFirehoseFullAccess", + "AttachmentCount": 0, + "CreateDate": "2015-10-07T18:45:26+00:00", + "DefaultVersionId": "v1", + "Document": { + "Statement": [ + { + "Action": [ + "firehose:*" + ], + "Effect": "Allow", + "Resource": "*" + } + ], + "Version": "2012-10-17" + }, + "IsAttachable": true, + "IsDefaultVersion": true, + "Path": "/", + "PolicyId": "ANPAJMZQMTZ7FRBFHHAHI", + "PolicyName": "AmazonKinesisFirehoseFullAccess", + "UpdateDate": "2015-10-07T18:45:26+00:00", + "VersionId": "v1" + }, + "AmazonKinesisFirehoseReadOnlyAccess": { + "Arn": "arn:aws:iam::aws:policy/AmazonKinesisFirehoseReadOnlyAccess", + "AttachmentCount": 0, + "CreateDate": "2015-10-07T18:43:39+00:00", + "DefaultVersionId": "v1", + "Document": { + "Statement": [ + { + "Action": [ + "firehose:Describe*", + "firehose:List*" + ], + "Effect": "Allow", + "Resource": "*" + } + ], + "Version": "2012-10-17" + }, + "IsAttachable": true, + "IsDefaultVersion": true, + "Path": "/", + "PolicyId": "ANPAJ36NT645INW4K24W6", + "PolicyName": "AmazonKinesisFirehoseReadOnlyAccess", + "UpdateDate": "2015-10-07T18:43:39+00:00", + "VersionId": "v1" + }, + "AmazonKinesisFullAccess": { + "Arn": "arn:aws:iam::aws:policy/AmazonKinesisFullAccess", + "AttachmentCount": 0, + "CreateDate": "2015-02-06T18:40:29+00:00", + "DefaultVersionId": "v1", + "Document": { + "Statement": [ + { + "Action": "kinesis:*", + "Effect": "Allow", + "Resource": "*" + } + ], + "Version": "2012-10-17" + }, + "IsAttachable": true, + "IsDefaultVersion": true, + "Path": "/", + "PolicyId": "ANPAIVF32HAMOXCUYRAYE", + "PolicyName": "AmazonKinesisFullAccess", + "UpdateDate": "2015-02-06T18:40:29+00:00", + "VersionId": "v1" + }, + "AmazonKinesisReadOnlyAccess": { + "Arn": "arn:aws:iam::aws:policy/AmazonKinesisReadOnlyAccess", + "AttachmentCount": 0, + "CreateDate": "2015-02-06T18:40:30+00:00", + "DefaultVersionId": "v1", + "Document": { + "Statement": [ + { + "Action": [ + "kinesis:Get*", + "kinesis:List*", + "kinesis:Describe*" + ], + "Effect": "Allow", + "Resource": "*" + } + ], + "Version": "2012-10-17" + }, + "IsAttachable": true, + "IsDefaultVersion": true, + "Path": "/", + "PolicyId": "ANPAIOCMTDT5RLKZ2CAJO", + "PolicyName": "AmazonKinesisReadOnlyAccess", + "UpdateDate": "2015-02-06T18:40:30+00:00", + "VersionId": "v1" + }, + "AmazonLexFullAccess": { + "Arn": "arn:aws:iam::aws:policy/AmazonLexFullAccess", + "AttachmentCount": 0, + "CreateDate": "2017-04-14T19:45:37+00:00", + "DefaultVersionId": "v3", + "Document": { + "Statement": [ + { + "Action": [ + "cloudwatch:GetMetricStatistics", + "cloudwatch:DescribeAlarms", + "cloudwatch:DescribeAlarmsForMetric", + "kms:DescribeKey", + "kms:ListAliases", + "lambda:GetPolicy", + "lambda:ListFunctions", + "lex:*", + "polly:DescribeVoices", + "polly:SynthesizeSpeech" + ], + "Effect": "Allow", + "Resource": [ + "*" + ] + }, + { + "Action": [ + "lambda:AddPermission", + "lambda:RemovePermission" + ], + "Condition": { + "StringLike": { + "lambda:Principal": "lex.amazonaws.com" + } + }, + "Effect": "Allow", + "Resource": "arn:aws:lambda:*:*:function:AmazonLex*" + }, + { + "Action": [ + "iam:GetRole", + "iam:DeleteRole" + ], + "Effect": "Allow", + "Resource": [ + "arn:aws:iam::*:role/aws-service-role/lex.amazonaws.com/AWSServiceRoleForLexBots", + "arn:aws:iam::*:role/aws-service-role/channels.lex.amazonaws.com/AWSServiceRoleForLexChannels" + ] + }, + { + "Action": [ + "iam:CreateServiceLinkedRole" + ], + "Condition": { + "StringLike": { + "iam:AWSServiceName": "lex.amazonaws.com" + } + }, + "Effect": "Allow", + "Resource": [ + "arn:aws:iam::*:role/aws-service-role/lex.amazonaws.com/AWSServiceRoleForLexBots" + ] + }, + { + "Action": [ + "iam:DetachRolePolicy" + ], + "Condition": { + "StringLike": { + "iam:PolicyArn": "arn:aws:iam::aws:policy/aws-service-role/AmazonLexBotPolicy" + } + }, + "Effect": "Allow", + "Resource": [ + "arn:aws:iam::*:role/aws-service-role/lex.amazonaws.com/AWSServiceRoleForLexBots" + ] + }, + { + "Action": [ + "iam:CreateServiceLinkedRole" + ], + "Condition": { + "StringLike": { + "iam:AWSServiceName": "channels.lex.amazonaws.com" + } + }, + "Effect": "Allow", + "Resource": [ + "arn:aws:iam::*:role/aws-service-role/channels.lex.amazonaws.com/AWSServiceRoleForLexChannels" + ] + }, + { + "Action": [ + "iam:DetachRolePolicy" + ], + "Condition": { + "StringLike": { + "iam:PolicyArn": "arn:aws:iam::aws:policy/aws-service-role/LexChannelPolicy" + } + }, + "Effect": "Allow", + "Resource": [ + "arn:aws:iam::*:role/aws-service-role/channels.lex.amazonaws.com/AWSServiceRoleForLexChannels" + ] + } + ], + "Version": "2012-10-17" + }, + "IsAttachable": true, + "IsDefaultVersion": true, + "Path": "/", + "PolicyId": "ANPAJVLXDHKVC23HRTKSI", + "PolicyName": "AmazonLexFullAccess", + "UpdateDate": "2017-04-14T19:45:37+00:00", + "VersionId": "v3" + }, + "AmazonLexReadOnly": { + "Arn": "arn:aws:iam::aws:policy/AmazonLexReadOnly", + "AttachmentCount": 0, + "CreateDate": "2017-04-11T23:13:33+00:00", + "DefaultVersionId": "v1", + "Document": { + "Statement": [ + { + "Action": [ + "lex:GetBot", + "lex:GetBotAlias", + "lex:GetBotAliases", + "lex:GetBots", + "lex:GetBotChannelAssociation", + "lex:GetBotChannelAssociations", + "lex:GetBotVersions", + "lex:GetBuiltinIntent", + "lex:GetBuiltinIntents", + "lex:GetBuiltinSlotTypes", + "lex:GetIntent", + "lex:GetIntents", + "lex:GetIntentVersions", + "lex:GetSlotType", + "lex:GetSlotTypes", + "lex:GetSlotTypeVersions", + "lex:GetUtterancesView" + ], + "Effect": "Allow", + "Resource": "*" + } + ], + "Version": "2012-10-17" + }, + "IsAttachable": true, + "IsDefaultVersion": true, + "Path": "/", + "PolicyId": "ANPAJGBI5LSMAJNDGBNAM", + "PolicyName": "AmazonLexReadOnly", + "UpdateDate": "2017-04-11T23:13:33+00:00", + "VersionId": "v1" + }, + "AmazonLexRunBotsOnly": { + "Arn": "arn:aws:iam::aws:policy/AmazonLexRunBotsOnly", + "AttachmentCount": 0, + "CreateDate": "2017-04-11T23:06:24+00:00", + "DefaultVersionId": "v1", + "Document": { + "Statement": [ + { + "Action": [ + "lex:PostContent", + "lex:PostText" + ], + "Effect": "Allow", + "Resource": "*" + } + ], + "Version": "2012-10-17" + }, + "IsAttachable": true, + "IsDefaultVersion": true, + "Path": "/", + "PolicyId": "ANPAJVZGB5CM3N6YWJHBE", + "PolicyName": "AmazonLexRunBotsOnly", + "UpdateDate": "2017-04-11T23:06:24+00:00", + "VersionId": "v1" + }, + "AmazonMachineLearningBatchPredictionsAccess": { + "Arn": "arn:aws:iam::aws:policy/AmazonMachineLearningBatchPredictionsAccess", + "AttachmentCount": 0, + "CreateDate": "2015-04-09T17:12:19+00:00", + "DefaultVersionId": "v1", + "Document": { + "Statement": [ + { + "Action": [ + "machinelearning:CreateBatchPrediction", + "machinelearning:DeleteBatchPrediction", + "machinelearning:DescribeBatchPredictions", + "machinelearning:GetBatchPrediction", + "machinelearning:UpdateBatchPrediction" + ], + "Effect": "Allow", + "Resource": "*" + } + ], + "Version": "2012-10-17" + }, + "IsAttachable": true, + "IsDefaultVersion": true, + "Path": "/", + "PolicyId": "ANPAILOI4HTQSFTF3GQSC", + "PolicyName": "AmazonMachineLearningBatchPredictionsAccess", + "UpdateDate": "2015-04-09T17:12:19+00:00", + "VersionId": "v1" + }, + "AmazonMachineLearningCreateOnlyAccess": { + "Arn": "arn:aws:iam::aws:policy/AmazonMachineLearningCreateOnlyAccess", + "AttachmentCount": 0, + "CreateDate": "2016-06-29T20:55:03+00:00", + "DefaultVersionId": "v2", + "Document": { + "Statement": [ + { + "Action": [ + "machinelearning:Add*", + "machinelearning:Create*", + "machinelearning:Delete*", + "machinelearning:Describe*", + "machinelearning:Get*" + ], + "Effect": "Allow", + "Resource": "*" + } + ], + "Version": "2012-10-17" + }, + "IsAttachable": true, + "IsDefaultVersion": true, + "Path": "/", + "PolicyId": "ANPAJDRUNIC2RYAMAT3CK", + "PolicyName": "AmazonMachineLearningCreateOnlyAccess", + "UpdateDate": "2016-06-29T20:55:03+00:00", + "VersionId": "v2" + }, + "AmazonMachineLearningFullAccess": { + "Arn": "arn:aws:iam::aws:policy/AmazonMachineLearningFullAccess", + "AttachmentCount": 0, + "CreateDate": "2015-04-09T17:25:41+00:00", + "DefaultVersionId": "v1", + "Document": { + "Statement": [ + { + "Action": [ + "machinelearning:*" + ], + "Effect": "Allow", + "Resource": "*" + } + ], + "Version": "2012-10-17" + }, + "IsAttachable": true, + "IsDefaultVersion": true, + "Path": "/", + "PolicyId": "ANPAIWKW6AGSGYOQ5ERHC", + "PolicyName": "AmazonMachineLearningFullAccess", + "UpdateDate": "2015-04-09T17:25:41+00:00", + "VersionId": "v1" + }, + "AmazonMachineLearningManageRealTimeEndpointOnlyAccess": { + "Arn": "arn:aws:iam::aws:policy/AmazonMachineLearningManageRealTimeEndpointOnlyAccess", + "AttachmentCount": 0, + "CreateDate": "2015-04-09T17:32:41+00:00", + "DefaultVersionId": "v1", + "Document": { + "Statement": [ + { + "Action": [ + "machinelearning:CreateRealtimeEndpoint", + "machinelearning:DeleteRealtimeEndpoint" + ], + "Effect": "Allow", + "Resource": "*" + } + ], + "Version": "2012-10-17" + }, + "IsAttachable": true, + "IsDefaultVersion": true, + "Path": "/", + "PolicyId": "ANPAJJL3PC3VCSVZP6OCI", + "PolicyName": "AmazonMachineLearningManageRealTimeEndpointOnlyAccess", + "UpdateDate": "2015-04-09T17:32:41+00:00", + "VersionId": "v1" + }, + "AmazonMachineLearningReadOnlyAccess": { + "Arn": "arn:aws:iam::aws:policy/AmazonMachineLearningReadOnlyAccess", + "AttachmentCount": 0, + "CreateDate": "2015-04-09T17:40:02+00:00", + "DefaultVersionId": "v1", + "Document": { + "Statement": [ + { + "Action": [ + "machinelearning:Describe*", + "machinelearning:Get*" + ], + "Effect": "Allow", + "Resource": "*" + } + ], + "Version": "2012-10-17" + }, + "IsAttachable": true, + "IsDefaultVersion": true, + "Path": "/", + "PolicyId": "ANPAIW5VYBCGEX56JCINC", + "PolicyName": "AmazonMachineLearningReadOnlyAccess", + "UpdateDate": "2015-04-09T17:40:02+00:00", + "VersionId": "v1" + }, + "AmazonMachineLearningRealTimePredictionOnlyAccess": { + "Arn": "arn:aws:iam::aws:policy/AmazonMachineLearningRealTimePredictionOnlyAccess", + "AttachmentCount": 0, + "CreateDate": "2015-04-09T17:44:06+00:00", + "DefaultVersionId": "v1", + "Document": { + "Statement": [ + { + "Action": [ + "machinelearning:Predict" + ], + "Effect": "Allow", + "Resource": "*" + } + ], + "Version": "2012-10-17" + }, + "IsAttachable": true, + "IsDefaultVersion": true, + "Path": "/", + "PolicyId": "ANPAIWMCNQPRWMWT36GVQ", + "PolicyName": "AmazonMachineLearningRealTimePredictionOnlyAccess", + "UpdateDate": "2015-04-09T17:44:06+00:00", + "VersionId": "v1" + }, + "AmazonMachineLearningRoleforRedshiftDataSource": { + "Arn": "arn:aws:iam::aws:policy/service-role/AmazonMachineLearningRoleforRedshiftDataSource", + "AttachmentCount": 0, + "CreateDate": "2015-04-09T17:05:26+00:00", + "DefaultVersionId": "v1", + "Document": { + "Statement": [ + { + "Action": [ + "ec2:AuthorizeSecurityGroupIngress", + "ec2:CreateSecurityGroup", + "ec2:DescribeInternetGateways", + "ec2:DescribeSecurityGroups", + "ec2:RevokeSecurityGroupIngress", + "redshift:AuthorizeClusterSecurityGroupIngress", + "redshift:CreateClusterSecurityGroup", + "redshift:DescribeClusters", + "redshift:DescribeClusterSecurityGroups", + "redshift:ModifyCluster", + "redshift:RevokeClusterSecurityGroupIngress", + "s3:GetBucketLocation", + "s3:GetBucketPolicy", + "s3:GetObject", + "s3:PutBucketPolicy", + "s3:PutObject" + ], + "Effect": "Allow", + "Resource": "*" + } + ], + "Version": "2012-10-17" + }, + "IsAttachable": true, + "IsDefaultVersion": true, + "Path": "/service-role/", + "PolicyId": "ANPAIQ5UDYYMNN42BM4AK", + "PolicyName": "AmazonMachineLearningRoleforRedshiftDataSource", + "UpdateDate": "2015-04-09T17:05:26+00:00", + "VersionId": "v1" + }, + "AmazonMacieFullAccess": { + "Arn": "arn:aws:iam::aws:policy/AmazonMacieFullAccess", + "AttachmentCount": 0, + "CreateDate": "2017-08-14T14:54:30+00:00", + "DefaultVersionId": "v1", + "Document": { + "Statement": [ + { + "Action": [ + "macie:*" + ], + "Effect": "Allow", + "Resource": "*" + } + ], + "Version": "2012-10-17" + }, + "IsAttachable": true, + "IsDefaultVersion": true, + "Path": "/", + "PolicyId": "ANPAJJF2N5FR6S5TZN5OA", + "PolicyName": "AmazonMacieFullAccess", + "UpdateDate": "2017-08-14T14:54:30+00:00", + "VersionId": "v1" + }, + "AmazonMacieServiceRole": { + "Arn": "arn:aws:iam::aws:policy/service-role/AmazonMacieServiceRole", + "AttachmentCount": 0, + "CreateDate": "2017-08-14T14:53:26+00:00", + "DefaultVersionId": "v1", + "Document": { + "Statement": [ + { + "Action": [ + "s3:Get*", + "s3:List*" + ], + "Effect": "Allow", + "Resource": "*" + } + ], + "Version": "2012-10-17" + }, + "IsAttachable": true, + "IsDefaultVersion": true, + "Path": "/service-role/", + "PolicyId": "ANPAJVV7PON3FPBL2PSGC", + "PolicyName": "AmazonMacieServiceRole", + "UpdateDate": "2017-08-14T14:53:26+00:00", + "VersionId": "v1" + }, + "AmazonMacieSetupRole": { + "Arn": "arn:aws:iam::aws:policy/service-role/AmazonMacieSetupRole", + "AttachmentCount": 0, + "CreateDate": "2017-08-14T14:53:34+00:00", + "DefaultVersionId": "v1", + "Document": { + "Statement": [ + { + "Action": [ + "cloudtrail:DescribeTrails", + "cloudtrail:GetEventSelectors", + "cloudtrail:GetTrailStatus", + "cloudtrail:ListTags", + "cloudtrail:LookupEvents", + "iam:ListAccountAliases", + "s3:GetBucket*", + "s3:ListBucket", + "s3:ListAllMyBuckets" + ], + "Effect": "Allow", + "Resource": "*" + }, + { + "Action": [ + "cloudtrail:CreateTrail", + "cloudtrail:StartLogging", + "cloudtrail:StopLogging", + "cloudtrail:UpdateTrail", + "cloudtrail:DeleteTrail", + "cloudtrail:PutEventSelectors" + ], + "Effect": "Allow", + "Resource": "arn:aws:cloudtrail:*:*:trail/AWSMacieTrail-DO-NOT-EDIT" + }, + { + "Action": [ + "s3:CreateBucket", + "s3:DeleteBucket", + "s3:DeleteBucketPolicy", + "s3:DeleteBucketWebsite", + "s3:DeleteObject", + "s3:DeleteObjectTagging", + "s3:DeleteObjectVersion", + "s3:DeleteObjectVersionTagging", + "s3:DeleteReplicationConfiguration", + "s3:PutBucketPolicy" + ], + "Effect": "Allow", + "Resource": [ + "arn:aws:s3:::awsmacie-*", + "arn:aws:s3:::awsmacietrail-*", + "arn:aws:s3:::*-awsmacietrail-*" + ] + } + ], + "Version": "2012-10-17" + }, + "IsAttachable": true, + "IsDefaultVersion": true, + "Path": "/service-role/", + "PolicyId": "ANPAJ5DC6UBVKND7ADSKA", + "PolicyName": "AmazonMacieSetupRole", + "UpdateDate": "2017-08-14T14:53:34+00:00", + "VersionId": "v1" + }, + "AmazonMechanicalTurkFullAccess": { + "Arn": "arn:aws:iam::aws:policy/AmazonMechanicalTurkFullAccess", + "AttachmentCount": 0, + "CreateDate": "2015-12-11T19:08:19+00:00", + "DefaultVersionId": "v1", + "Document": { + "Statement": [ + { + "Action": [ + "mechanicalturk:*" + ], + "Effect": "Allow", + "Resource": [ + "*" + ] + } + ], + "Version": "2012-10-17" + }, + "IsAttachable": true, + "IsDefaultVersion": true, + "Path": "/", + "PolicyId": "ANPAJDGCL5BET73H5QIQC", + "PolicyName": "AmazonMechanicalTurkFullAccess", + "UpdateDate": "2015-12-11T19:08:19+00:00", + "VersionId": "v1" + }, + "AmazonMechanicalTurkReadOnly": { + "Arn": "arn:aws:iam::aws:policy/AmazonMechanicalTurkReadOnly", + "AttachmentCount": 0, + "CreateDate": "2017-02-27T21:45:50+00:00", + "DefaultVersionId": "v2", + "Document": { + "Statement": [ + { + "Action": [ + "mechanicalturk:Get*", + "mechanicalturk:Search*", + "mechanicalturk:List*" + ], + "Effect": "Allow", + "Resource": [ + "*" + ] + } + ], + "Version": "2012-10-17" + }, + "IsAttachable": true, + "IsDefaultVersion": true, + "Path": "/", + "PolicyId": "ANPAIO5IY3G3WXSX5PPRM", + "PolicyName": "AmazonMechanicalTurkReadOnly", + "UpdateDate": "2017-02-27T21:45:50+00:00", + "VersionId": "v2" + }, + "AmazonMobileAnalyticsFinancialReportAccess": { + "Arn": "arn:aws:iam::aws:policy/AmazonMobileAnalyticsFinancialReportAccess", + "AttachmentCount": 0, + "CreateDate": "2015-02-06T18:40:35+00:00", + "DefaultVersionId": "v1", + "Document": { + "Statement": [ + { + "Action": [ + "mobileanalytics:GetReports", + "mobileanalytics:GetFinancialReports" + ], + "Effect": "Allow", + "Resource": "*" + } + ], + "Version": "2012-10-17" + }, + "IsAttachable": true, + "IsDefaultVersion": true, + "Path": "/", + "PolicyId": "ANPAJKJHO2R27TXKCWBU4", + "PolicyName": "AmazonMobileAnalyticsFinancialReportAccess", + "UpdateDate": "2015-02-06T18:40:35+00:00", + "VersionId": "v1" + }, + "AmazonMobileAnalyticsFullAccess": { + "Arn": "arn:aws:iam::aws:policy/AmazonMobileAnalyticsFullAccess", + "AttachmentCount": 0, + "CreateDate": "2015-02-06T18:40:34+00:00", + "DefaultVersionId": "v1", + "Document": { + "Statement": [ + { + "Action": "mobileanalytics:*", + "Effect": "Allow", + "Resource": "*" + } + ], + "Version": "2012-10-17" + }, + "IsAttachable": true, + "IsDefaultVersion": true, + "Path": "/", + "PolicyId": "ANPAIJIKLU2IJ7WJ6DZFG", + "PolicyName": "AmazonMobileAnalyticsFullAccess", + "UpdateDate": "2015-02-06T18:40:34+00:00", + "VersionId": "v1" + }, + "AmazonMobileAnalyticsNon-financialReportAccess": { + "Arn": "arn:aws:iam::aws:policy/AmazonMobileAnalyticsNon-financialReportAccess", + "AttachmentCount": 0, + "CreateDate": "2015-02-06T18:40:36+00:00", + "DefaultVersionId": "v1", + "Document": { + "Statement": [ + { + "Action": "mobileanalytics:GetReports", + "Effect": "Allow", + "Resource": "*" + } + ], + "Version": "2012-10-17" + }, + "IsAttachable": true, + "IsDefaultVersion": true, + "Path": "/", + "PolicyId": "ANPAIQLKQ4RXPUBBVVRDE", + "PolicyName": "AmazonMobileAnalyticsNon-financialReportAccess", + "UpdateDate": "2015-02-06T18:40:36+00:00", + "VersionId": "v1" + }, + "AmazonMobileAnalyticsWriteOnlyAccess": { + "Arn": "arn:aws:iam::aws:policy/AmazonMobileAnalyticsWriteOnlyAccess", + "AttachmentCount": 0, + "CreateDate": "2015-02-06T18:40:37+00:00", + "DefaultVersionId": "v1", + "Document": { + "Statement": [ + { + "Action": "mobileanalytics:PutEvents", + "Effect": "Allow", + "Resource": "*" + } + ], + "Version": "2012-10-17" + }, + "IsAttachable": true, + "IsDefaultVersion": true, + "Path": "/", + "PolicyId": "ANPAJ5TAWBBQC2FAL3G6G", + "PolicyName": "AmazonMobileAnalyticsWriteOnlyAccess", + "UpdateDate": "2015-02-06T18:40:37+00:00", + "VersionId": "v1" + }, + "AmazonPollyFullAccess": { + "Arn": "arn:aws:iam::aws:policy/AmazonPollyFullAccess", + "AttachmentCount": 0, + "CreateDate": "2016-11-30T18:59:06+00:00", + "DefaultVersionId": "v1", + "Document": { + "Statement": [ + { + "Action": [ + "polly:*" + ], + "Effect": "Allow", + "Resource": [ + "*" + ] + } + ], + "Version": "2012-10-17" + }, + "IsAttachable": true, + "IsDefaultVersion": true, + "Path": "/", + "PolicyId": "ANPAJUZOYQU6XQYPR7EWS", + "PolicyName": "AmazonPollyFullAccess", + "UpdateDate": "2016-11-30T18:59:06+00:00", + "VersionId": "v1" + }, + "AmazonPollyReadOnlyAccess": { + "Arn": "arn:aws:iam::aws:policy/AmazonPollyReadOnlyAccess", + "AttachmentCount": 0, + "CreateDate": "2016-11-30T18:59:24+00:00", + "DefaultVersionId": "v1", + "Document": { + "Statement": [ + { + "Action": [ + "polly:DescribeVoices", + "polly:GetLexicon", + "polly:ListLexicons", + "polly:SynthesizeSpeech" + ], + "Effect": "Allow", + "Resource": [ + "*" + ] + } + ], + "Version": "2012-10-17" + }, + "IsAttachable": true, + "IsDefaultVersion": true, + "Path": "/", + "PolicyId": "ANPAJ5FENL3CVPL2FPDLA", + "PolicyName": "AmazonPollyReadOnlyAccess", + "UpdateDate": "2016-11-30T18:59:24+00:00", + "VersionId": "v1" + }, + "AmazonRDSDirectoryServiceAccess": { + "Arn": "arn:aws:iam::aws:policy/service-role/AmazonRDSDirectoryServiceAccess", + "AttachmentCount": 0, + "CreateDate": "2016-02-26T02:02:05+00:00", + "DefaultVersionId": "v1", + "Document": { + "Statement": [ + { + "Action": [ + "ds:DescribeDirectories", + "ds:AuthorizeApplication", + "ds:UnauthorizeApplication" + ], + "Effect": "Allow", + "Resource": "*" + } + ], + "Version": "2012-10-17" + }, + "IsAttachable": true, + "IsDefaultVersion": true, + "Path": "/service-role/", + "PolicyId": "ANPAIL4KBY57XWMYUHKUU", + "PolicyName": "AmazonRDSDirectoryServiceAccess", + "UpdateDate": "2016-02-26T02:02:05+00:00", + "VersionId": "v1" + }, + "AmazonRDSEnhancedMonitoringRole": { + "Arn": "arn:aws:iam::aws:policy/service-role/AmazonRDSEnhancedMonitoringRole", + "AttachmentCount": 1, + "CreateDate": "2015-11-11T19:58:29+00:00", + "DefaultVersionId": "v1", + "Document": { + "Statement": [ + { + "Action": [ + "logs:CreateLogGroup", + "logs:PutRetentionPolicy" + ], + "Effect": "Allow", + "Resource": [ + "arn:aws:logs:*:*:log-group:RDS*" + ], + "Sid": "EnableCreationAndManagementOfRDSCloudwatchLogGroups" + }, + { + "Action": [ + "logs:CreateLogStream", + "logs:PutLogEvents", + "logs:DescribeLogStreams", + "logs:GetLogEvents" + ], + "Effect": "Allow", + "Resource": [ + "arn:aws:logs:*:*:log-group:RDS*:log-stream:*" + ], + "Sid": "EnableCreationAndManagementOfRDSCloudwatchLogStreams" + } + ], + "Version": "2012-10-17" + }, + "IsAttachable": true, + "IsDefaultVersion": true, + "Path": "/service-role/", + "PolicyId": "ANPAJV7BS425S4PTSSVGK", + "PolicyName": "AmazonRDSEnhancedMonitoringRole", + "UpdateDate": "2015-11-11T19:58:29+00:00", + "VersionId": "v1" + }, + "AmazonRDSFullAccess": { + "Arn": "arn:aws:iam::aws:policy/AmazonRDSFullAccess", + "AttachmentCount": 0, + "CreateDate": "2017-09-14T23:40:45+00:00", + "DefaultVersionId": "v4", + "Document": { + "Statement": [ + { + "Action": [ + "rds:*", + "cloudwatch:DescribeAlarms", + "cloudwatch:GetMetricStatistics", + "ec2:DescribeAccountAttributes", + "ec2:DescribeAvailabilityZones", + "ec2:DescribeInternetGateways", + "ec2:DescribeSecurityGroups", + "ec2:DescribeSubnets", + "ec2:DescribeVpcAttribute", + "ec2:DescribeVpcs", + "sns:ListSubscriptions", + "sns:ListTopics", + "logs:DescribeLogStreams", + "logs:GetLogEvents" + ], + "Effect": "Allow", + "Resource": "*" + }, + { + "Action": "pi:*", + "Effect": "Allow", + "Resource": "arn:aws:pi:*:*:metrics/rds/*" + } + ], + "Version": "2012-10-17" + }, + "IsAttachable": true, + "IsDefaultVersion": true, + "Path": "/", + "PolicyId": "ANPAI3R4QMOG6Q5A4VWVG", + "PolicyName": "AmazonRDSFullAccess", + "UpdateDate": "2017-09-14T23:40:45+00:00", + "VersionId": "v4" + }, + "AmazonRDSReadOnlyAccess": { + "Arn": "arn:aws:iam::aws:policy/AmazonRDSReadOnlyAccess", + "AttachmentCount": 0, + "CreateDate": "2017-08-28T21:36:32+00:00", + "DefaultVersionId": "v3", + "Document": { + "Statement": [ + { + "Action": [ + "rds:Describe*", + "rds:ListTagsForResource", + "ec2:DescribeAccountAttributes", + "ec2:DescribeAvailabilityZones", + "ec2:DescribeInternetGateways", + "ec2:DescribeSecurityGroups", + "ec2:DescribeSubnets", + "ec2:DescribeVpcAttribute", + "ec2:DescribeVpcs" + ], + "Effect": "Allow", + "Resource": "*" + }, + { + "Action": [ + "cloudwatch:GetMetricStatistics", + "logs:DescribeLogStreams", + "logs:GetLogEvents" + ], + "Effect": "Allow", + "Resource": "*" + } + ], + "Version": "2012-10-17" + }, + "IsAttachable": true, + "IsDefaultVersion": true, + "Path": "/", + "PolicyId": "ANPAJKTTTYV2IIHKLZ346", + "PolicyName": "AmazonRDSReadOnlyAccess", + "UpdateDate": "2017-08-28T21:36:32+00:00", + "VersionId": "v3" + }, + "AmazonRedshiftFullAccess": { + "Arn": "arn:aws:iam::aws:policy/AmazonRedshiftFullAccess", + "AttachmentCount": 0, + "CreateDate": "2017-09-19T18:27:44+00:00", + "DefaultVersionId": "v2", + "Document": { + "Statement": [ + { + "Action": [ + "redshift:*", + "ec2:DescribeAccountAttributes", + "ec2:DescribeAddresses", + "ec2:DescribeAvailabilityZones", + "ec2:DescribeSecurityGroups", + "ec2:DescribeSubnets", + "ec2:DescribeVpcs", + "ec2:DescribeInternetGateways", + "sns:CreateTopic", + "sns:Get*", + "sns:List*", + "cloudwatch:Describe*", + "cloudwatch:Get*", + "cloudwatch:List*", + "cloudwatch:PutMetricAlarm", + "cloudwatch:EnableAlarmActions", + "cloudwatch:DisableAlarmActions" + ], + "Effect": "Allow", + "Resource": "*" + }, + { + "Action": "iam:CreateServiceLinkedRole", + "Condition": { + "StringLike": { + "iam:AWSServiceName": "redshift.amazonaws.com" + } + }, + "Effect": "Allow", + "Resource": "arn:aws:iam::*:role/aws-service-role/redshift.amazonaws.com/AWSServiceRoleForRedshift" + } + ], + "Version": "2012-10-17" + }, + "IsAttachable": true, + "IsDefaultVersion": true, + "Path": "/", + "PolicyId": "ANPAISEKCHH4YDB46B5ZO", + "PolicyName": "AmazonRedshiftFullAccess", + "UpdateDate": "2017-09-19T18:27:44+00:00", + "VersionId": "v2" + }, + "AmazonRedshiftReadOnlyAccess": { + "Arn": "arn:aws:iam::aws:policy/AmazonRedshiftReadOnlyAccess", + "AttachmentCount": 0, + "CreateDate": "2015-02-06T18:40:51+00:00", + "DefaultVersionId": "v1", + "Document": { + "Statement": [ + { + "Action": [ + "redshift:Describe*", + "redshift:ViewQueriesInConsole", + "ec2:DescribeAccountAttributes", + "ec2:DescribeAddresses", + "ec2:DescribeAvailabilityZones", + "ec2:DescribeSecurityGroups", + "ec2:DescribeSubnets", + "ec2:DescribeVpcs", + "ec2:DescribeInternetGateways", + "sns:Get*", + "sns:List*", + "cloudwatch:Describe*", + "cloudwatch:List*", + "cloudwatch:Get*" + ], + "Effect": "Allow", + "Resource": "*" + } + ], + "Version": "2012-10-17" + }, + "IsAttachable": true, + "IsDefaultVersion": true, + "Path": "/", + "PolicyId": "ANPAIGD46KSON64QBSEZM", + "PolicyName": "AmazonRedshiftReadOnlyAccess", + "UpdateDate": "2015-02-06T18:40:51+00:00", + "VersionId": "v1" + }, + "AmazonRedshiftServiceLinkedRolePolicy": { + "Arn": "arn:aws:iam::aws:policy/aws-service-role/AmazonRedshiftServiceLinkedRolePolicy", + "AttachmentCount": 0, + "CreateDate": "2017-09-18T19:19:45+00:00", + "DefaultVersionId": "v1", + "Document": { + "Statement": [ + { + "Action": [ + "ec2:DescribeVpcs", + "ec2:DescribeSubnets", + "ec2:DescribeNetworkInterfaces", + "ec2:DescribeAddress", + "ec2:AssociateAddress", + "ec2:DisassociateAddress", + "ec2:CreateNetworkInterface", + "ec2:DeleteNetworkInterface", + "ec2:ModifyNetworkInterfaceAttribute" + ], + "Effect": "Allow", + "Resource": "*" + } + ], + "Version": "2012-10-17" + }, + "IsAttachable": true, + "IsDefaultVersion": true, + "Path": "/aws-service-role/", + "PolicyId": "ANPAJPY2VXNRUYOY3SRZS", + "PolicyName": "AmazonRedshiftServiceLinkedRolePolicy", + "UpdateDate": "2017-09-18T19:19:45+00:00", + "VersionId": "v1" + }, + "AmazonRekognitionFullAccess": { + "Arn": "arn:aws:iam::aws:policy/AmazonRekognitionFullAccess", + "AttachmentCount": 0, + "CreateDate": "2016-11-30T14:40:44+00:00", + "DefaultVersionId": "v1", + "Document": { + "Statement": [ + { + "Action": [ + "rekognition:*" + ], + "Effect": "Allow", + "Resource": "*" + } + ], + "Version": "2012-10-17" + }, + "IsAttachable": true, + "IsDefaultVersion": true, + "Path": "/", + "PolicyId": "ANPAIWDAOK6AIFDVX6TT6", + "PolicyName": "AmazonRekognitionFullAccess", + "UpdateDate": "2016-11-30T14:40:44+00:00", + "VersionId": "v1" + }, + "AmazonRekognitionReadOnlyAccess": { + "Arn": "arn:aws:iam::aws:policy/AmazonRekognitionReadOnlyAccess", + "AttachmentCount": 0, + "CreateDate": "2016-11-30T14:58:06+00:00", + "DefaultVersionId": "v1", + "Document": { + "Statement": [ + { + "Action": [ + "rekognition:CompareFaces", + "rekognition:DetectFaces", + "rekognition:DetectLabels", + "rekognition:ListCollections", + "rekognition:ListFaces", + "rekognition:SearchFaces", + "rekognition:SearchFacesByImage" + ], + "Effect": "Allow", + "Resource": "*" + } + ], + "Version": "2012-10-17" + }, + "IsAttachable": true, + "IsDefaultVersion": true, + "Path": "/", + "PolicyId": "ANPAILWSUHXUY4ES43SA4", + "PolicyName": "AmazonRekognitionReadOnlyAccess", + "UpdateDate": "2016-11-30T14:58:06+00:00", + "VersionId": "v1" + }, + "AmazonRoute53DomainsFullAccess": { + "Arn": "arn:aws:iam::aws:policy/AmazonRoute53DomainsFullAccess", + "AttachmentCount": 0, + "CreateDate": "2015-02-06T18:40:56+00:00", + "DefaultVersionId": "v1", + "Document": { + "Statement": [ + { + "Action": [ + "route53:CreateHostedZone", + "route53domains:*" + ], + "Effect": "Allow", + "Resource": [ + "*" + ] + } + ], + "Version": "2012-10-17" + }, + "IsAttachable": true, + "IsDefaultVersion": true, + "Path": "/", + "PolicyId": "ANPAIPAFBMIYUILMOKL6G", + "PolicyName": "AmazonRoute53DomainsFullAccess", + "UpdateDate": "2015-02-06T18:40:56+00:00", + "VersionId": "v1" + }, + "AmazonRoute53DomainsReadOnlyAccess": { + "Arn": "arn:aws:iam::aws:policy/AmazonRoute53DomainsReadOnlyAccess", + "AttachmentCount": 0, + "CreateDate": "2015-02-06T18:40:57+00:00", + "DefaultVersionId": "v1", + "Document": { + "Statement": [ + { + "Action": [ + "route53domains:Get*", + "route53domains:List*" + ], + "Effect": "Allow", + "Resource": [ + "*" + ] + } + ], + "Version": "2012-10-17" + }, + "IsAttachable": true, + "IsDefaultVersion": true, + "Path": "/", + "PolicyId": "ANPAIDRINP6PPTRXYVQCI", + "PolicyName": "AmazonRoute53DomainsReadOnlyAccess", + "UpdateDate": "2015-02-06T18:40:57+00:00", + "VersionId": "v1" + }, + "AmazonRoute53FullAccess": { + "Arn": "arn:aws:iam::aws:policy/AmazonRoute53FullAccess", + "AttachmentCount": 0, + "CreateDate": "2017-02-14T21:25:53+00:00", + "DefaultVersionId": "v2", + "Document": { + "Statement": [ + { + "Action": [ + "route53:*", + "route53domains:*", + "cloudfront:ListDistributions", + "elasticloadbalancing:DescribeLoadBalancers", + "elasticbeanstalk:DescribeEnvironments", + "s3:ListBucket", + "s3:GetBucketLocation", + "s3:GetBucketWebsiteConfiguration", + "ec2:DescribeVpcs", + "ec2:DescribeRegions", + "sns:ListTopics", + "sns:ListSubscriptionsByTopic", + "cloudwatch:DescribeAlarms", + "cloudwatch:GetMetricStatistics" + ], + "Effect": "Allow", + "Resource": "*" + } + ], + "Version": "2012-10-17" + }, + "IsAttachable": true, + "IsDefaultVersion": true, + "Path": "/", + "PolicyId": "ANPAJWVDLG5RPST6PHQ3A", + "PolicyName": "AmazonRoute53FullAccess", + "UpdateDate": "2017-02-14T21:25:53+00:00", + "VersionId": "v2" + }, + "AmazonRoute53ReadOnlyAccess": { + "Arn": "arn:aws:iam::aws:policy/AmazonRoute53ReadOnlyAccess", + "AttachmentCount": 0, + "CreateDate": "2016-11-15T21:15:16+00:00", + "DefaultVersionId": "v2", + "Document": { + "Statement": [ + { + "Action": [ + "route53:Get*", + "route53:List*", + "route53:TestDNSAnswer" + ], + "Effect": "Allow", + "Resource": [ + "*" + ] + } + ], + "Version": "2012-10-17" + }, + "IsAttachable": true, + "IsDefaultVersion": true, + "Path": "/", + "PolicyId": "ANPAITOYK2ZAOQFXV2JNC", + "PolicyName": "AmazonRoute53ReadOnlyAccess", + "UpdateDate": "2016-11-15T21:15:16+00:00", + "VersionId": "v2" + }, + "AmazonS3FullAccess": { + "Arn": "arn:aws:iam::aws:policy/AmazonS3FullAccess", + "AttachmentCount": 1, + "CreateDate": "2015-02-06T18:40:58+00:00", + "DefaultVersionId": "v1", + "Document": { + "Statement": [ + { + "Action": "s3:*", + "Effect": "Allow", + "Resource": "*" + } + ], + "Version": "2012-10-17" + }, + "IsAttachable": true, + "IsDefaultVersion": true, + "Path": "/", + "PolicyId": "ANPAIFIR6V6BVTRAHWINE", + "PolicyName": "AmazonS3FullAccess", + "UpdateDate": "2015-02-06T18:40:58+00:00", + "VersionId": "v1" + }, + "AmazonS3ReadOnlyAccess": { + "Arn": "arn:aws:iam::aws:policy/AmazonS3ReadOnlyAccess", + "AttachmentCount": 0, + "CreateDate": "2015-02-06T18:40:59+00:00", + "DefaultVersionId": "v1", + "Document": { + "Statement": [ + { + "Action": [ + "s3:Get*", + "s3:List*" + ], + "Effect": "Allow", + "Resource": "*" + } + ], + "Version": "2012-10-17" + }, + "IsAttachable": true, + "IsDefaultVersion": true, + "Path": "/", + "PolicyId": "ANPAIZTJ4DXE7G6AGAE6M", + "PolicyName": "AmazonS3ReadOnlyAccess", + "UpdateDate": "2015-02-06T18:40:59+00:00", + "VersionId": "v1" + }, + "AmazonSESFullAccess": { + "Arn": "arn:aws:iam::aws:policy/AmazonSESFullAccess", + "AttachmentCount": 0, + "CreateDate": "2015-02-06T18:41:02+00:00", + "DefaultVersionId": "v1", + "Document": { + "Statement": [ + { + "Action": [ + "ses:*" + ], + "Effect": "Allow", + "Resource": "*" + } + ], + "Version": "2012-10-17" + }, + "IsAttachable": true, + "IsDefaultVersion": true, + "Path": "/", + "PolicyId": "ANPAJ2P4NXCHAT7NDPNR4", + "PolicyName": "AmazonSESFullAccess", + "UpdateDate": "2015-02-06T18:41:02+00:00", + "VersionId": "v1" + }, + "AmazonSESReadOnlyAccess": { + "Arn": "arn:aws:iam::aws:policy/AmazonSESReadOnlyAccess", + "AttachmentCount": 0, + "CreateDate": "2015-02-06T18:41:03+00:00", + "DefaultVersionId": "v1", + "Document": { + "Statement": [ + { + "Action": [ + "ses:Get*", + "ses:List*" + ], + "Effect": "Allow", + "Resource": "*" + } + ], + "Version": "2012-10-17" + }, + "IsAttachable": true, + "IsDefaultVersion": true, + "Path": "/", + "PolicyId": "ANPAINV2XPFRMWJJNSCGI", + "PolicyName": "AmazonSESReadOnlyAccess", + "UpdateDate": "2015-02-06T18:41:03+00:00", + "VersionId": "v1" + }, + "AmazonSNSFullAccess": { + "Arn": "arn:aws:iam::aws:policy/AmazonSNSFullAccess", + "AttachmentCount": 0, + "CreateDate": "2015-02-06T18:41:05+00:00", + "DefaultVersionId": "v1", + "Document": { + "Statement": [ + { + "Action": [ + "sns:*" + ], + "Effect": "Allow", + "Resource": "*" + } + ], + "Version": "2012-10-17" + }, + "IsAttachable": true, + "IsDefaultVersion": true, + "Path": "/", + "PolicyId": "ANPAJWEKLCXXUNT2SOLSG", + "PolicyName": "AmazonSNSFullAccess", + "UpdateDate": "2015-02-06T18:41:05+00:00", + "VersionId": "v1" + }, + "AmazonSNSReadOnlyAccess": { + "Arn": "arn:aws:iam::aws:policy/AmazonSNSReadOnlyAccess", + "AttachmentCount": 0, + "CreateDate": "2015-02-06T18:41:06+00:00", + "DefaultVersionId": "v1", + "Document": { + "Statement": [ + { + "Action": [ + "sns:GetTopicAttributes", + "sns:List*" + ], + "Effect": "Allow", + "Resource": "*" + } + ], + "Version": "2012-10-17" + }, + "IsAttachable": true, + "IsDefaultVersion": true, + "Path": "/", + "PolicyId": "ANPAIZGQCQTFOFPMHSB6W", + "PolicyName": "AmazonSNSReadOnlyAccess", + "UpdateDate": "2015-02-06T18:41:06+00:00", + "VersionId": "v1" + }, + "AmazonSNSRole": { + "Arn": "arn:aws:iam::aws:policy/service-role/AmazonSNSRole", + "AttachmentCount": 0, + "CreateDate": "2015-02-06T18:41:30+00:00", + "DefaultVersionId": "v1", + "Document": { + "Statement": [ + { + "Action": [ + "logs:CreateLogGroup", + "logs:CreateLogStream", + "logs:PutLogEvents", + "logs:PutMetricFilter", + "logs:PutRetentionPolicy" + ], + "Effect": "Allow", + "Resource": [ + "*" + ] + } + ], + "Version": "2012-10-17" + }, + "IsAttachable": true, + "IsDefaultVersion": true, + "Path": "/service-role/", + "PolicyId": "ANPAJK5GQB7CIK7KHY2GA", + "PolicyName": "AmazonSNSRole", + "UpdateDate": "2015-02-06T18:41:30+00:00", + "VersionId": "v1" + }, + "AmazonSQSFullAccess": { + "Arn": "arn:aws:iam::aws:policy/AmazonSQSFullAccess", + "AttachmentCount": 0, + "CreateDate": "2015-02-06T18:41:07+00:00", + "DefaultVersionId": "v1", + "Document": { + "Statement": [ + { + "Action": [ + "sqs:*" + ], + "Effect": "Allow", + "Resource": "*" + } + ], + "Version": "2012-10-17" + }, + "IsAttachable": true, + "IsDefaultVersion": true, + "Path": "/", + "PolicyId": "ANPAI65L554VRJ33ECQS6", + "PolicyName": "AmazonSQSFullAccess", + "UpdateDate": "2015-02-06T18:41:07+00:00", + "VersionId": "v1" + }, + "AmazonSQSReadOnlyAccess": { + "Arn": "arn:aws:iam::aws:policy/AmazonSQSReadOnlyAccess", + "AttachmentCount": 0, + "CreateDate": "2015-02-06T18:41:08+00:00", + "DefaultVersionId": "v1", + "Document": { + "Statement": [ + { + "Action": [ + "sqs:GetQueueAttributes", + "sqs:ListQueues" + ], + "Effect": "Allow", + "Resource": "*" + } + ], + "Version": "2012-10-17" + }, + "IsAttachable": true, + "IsDefaultVersion": true, + "Path": "/", + "PolicyId": "ANPAIUGSSQY362XGCM6KW", + "PolicyName": "AmazonSQSReadOnlyAccess", + "UpdateDate": "2015-02-06T18:41:08+00:00", + "VersionId": "v1" + }, + "AmazonSSMAutomationApproverAccess": { + "Arn": "arn:aws:iam::aws:policy/AmazonSSMAutomationApproverAccess", + "AttachmentCount": 0, + "CreateDate": "2017-08-07T23:07:28+00:00", + "DefaultVersionId": "v1", + "Document": { + "Statement": [ + { + "Action": [ + "ssm:DescribeAutomationExecutions", + "ssm:GetAutomationExecution", + "ssm:SendAutomationSignal" + ], + "Effect": "Allow", + "Resource": [ + "*" + ] + } + ], + "Version": "2012-10-17" + }, + "IsAttachable": true, + "IsDefaultVersion": true, + "Path": "/", + "PolicyId": "ANPAIDSSXIRWBSLWWIORC", + "PolicyName": "AmazonSSMAutomationApproverAccess", + "UpdateDate": "2017-08-07T23:07:28+00:00", + "VersionId": "v1" + }, + "AmazonSSMAutomationRole": { + "Arn": "arn:aws:iam::aws:policy/service-role/AmazonSSMAutomationRole", + "AttachmentCount": 0, + "CreateDate": "2017-07-24T23:29:12+00:00", + "DefaultVersionId": "v5", + "Document": { + "Statement": [ + { + "Action": [ + "lambda:InvokeFunction" + ], + "Effect": "Allow", + "Resource": [ + "arn:aws:lambda:*:*:function:Automation*" + ] + }, + { + "Action": [ + "ec2:CreateImage", + "ec2:CopyImage", + "ec2:DeregisterImage", + "ec2:DescribeImages", + "ec2:DeleteSnapshot", + "ec2:StartInstances", + "ec2:RunInstances", + "ec2:StopInstances", + "ec2:TerminateInstances", + "ec2:DescribeInstanceStatus", + "ec2:CreateTags", + "ec2:DeleteTags", + "ec2:DescribeTags", + "cloudformation:CreateStack", + "cloudformation:DescribeStackEvents", + "cloudformation:DescribeStacks", + "cloudformation:UpdateStack", + "cloudformation:DeleteStack" + ], + "Effect": "Allow", + "Resource": [ + "*" + ] + }, + { + "Action": [ + "ssm:*" + ], + "Effect": "Allow", + "Resource": [ + "*" + ] + }, + { + "Action": [ + "sns:Publish" + ], + "Effect": "Allow", + "Resource": [ + "arn:aws:sns:*:*:Automation*" + ] + } + ], + "Version": "2012-10-17" + }, + "IsAttachable": true, + "IsDefaultVersion": true, + "Path": "/service-role/", + "PolicyId": "ANPAJIBQCTBCXD2XRNB6W", + "PolicyName": "AmazonSSMAutomationRole", + "UpdateDate": "2017-07-24T23:29:12+00:00", + "VersionId": "v5" + }, + "AmazonSSMFullAccess": { + "Arn": "arn:aws:iam::aws:policy/AmazonSSMFullAccess", + "AttachmentCount": 0, + "CreateDate": "2016-03-07T21:09:12+00:00", + "DefaultVersionId": "v2", + "Document": { + "Statement": [ + { + "Action": [ + "cloudwatch:PutMetricData", + "ds:CreateComputer", + "ds:DescribeDirectories", + "ec2:DescribeInstanceStatus", + "logs:*", + "ssm:*", + "ec2messages:*" + ], + "Effect": "Allow", + "Resource": "*" + } + ], + "Version": "2012-10-17" + }, + "IsAttachable": true, + "IsDefaultVersion": true, + "Path": "/", + "PolicyId": "ANPAJA7V6HI4ISQFMDYAG", + "PolicyName": "AmazonSSMFullAccess", + "UpdateDate": "2016-03-07T21:09:12+00:00", + "VersionId": "v2" + }, + "AmazonSSMMaintenanceWindowRole": { + "Arn": "arn:aws:iam::aws:policy/service-role/AmazonSSMMaintenanceWindowRole", + "AttachmentCount": 0, + "CreateDate": "2017-08-09T20:49:14+00:00", + "DefaultVersionId": "v2", + "Document": { + "Statement": [ + { + "Action": [ + "ssm:GetAutomationExecution", + "ssm:GetParameters", + "ssm:ListCommands", + "ssm:SendCommand", + "ssm:StartAutomationExecution" + ], + "Effect": "Allow", + "Resource": [ + "*" + ], + "Sid": "Stmt1477803259000" + }, + { + "Action": [ + "lambda:InvokeFunction" + ], + "Effect": "Allow", + "Resource": [ + "arn:aws:lambda:*:*:function:SSM*", + "arn:aws:lambda:*:*:function:*:SSM*" + ], + "Sid": "Stmt1477803259001" + }, + { + "Action": [ + "states:DescribeExecution", + "states:StartExecution" + ], + "Effect": "Allow", + "Resource": [ + "arn:aws:states:*:*:stateMachine:SSM*", + "arn:aws:states:*:*:execution:SSM*" + ], + "Sid": "Stmt1477803259002" + } + ], + "Version": "2012-10-17" + }, + "IsAttachable": true, + "IsDefaultVersion": true, + "Path": "/service-role/", + "PolicyId": "ANPAJV3JNYSTZ47VOXYME", + "PolicyName": "AmazonSSMMaintenanceWindowRole", + "UpdateDate": "2017-08-09T20:49:14+00:00", + "VersionId": "v2" + }, + "AmazonSSMReadOnlyAccess": { + "Arn": "arn:aws:iam::aws:policy/AmazonSSMReadOnlyAccess", + "AttachmentCount": 0, + "CreateDate": "2015-05-29T17:44:19+00:00", + "DefaultVersionId": "v1", + "Document": { + "Statement": [ + { + "Action": [ + "ssm:Describe*", + "ssm:Get*", + "ssm:List*" + ], + "Effect": "Allow", + "Resource": "*" + } + ], + "Version": "2012-10-17" + }, + "IsAttachable": true, + "IsDefaultVersion": true, + "Path": "/", + "PolicyId": "ANPAJODSKQGGJTHRYZ5FC", + "PolicyName": "AmazonSSMReadOnlyAccess", + "UpdateDate": "2015-05-29T17:44:19+00:00", + "VersionId": "v1" + }, + "AmazonVPCCrossAccountNetworkInterfaceOperations": { + "Arn": "arn:aws:iam::aws:policy/AmazonVPCCrossAccountNetworkInterfaceOperations", + "AttachmentCount": 0, + "CreateDate": "2017-07-18T20:47:16+00:00", + "DefaultVersionId": "v1", + "Document": { + "Statement": [ + { + "Action": [ + "ec2:DescribeRouteTables", + "ec2:CreateRoute", + "ec2:DeleteRoute", + "ec2:ReplaceRoute" + ], + "Effect": "Allow", + "Resource": [ + "*" + ] + }, + { + "Action": [ + "ec2:DescribeNetworkInterfaces", + "ec2:CreateNetworkInterface", + "ec2:DeleteNetworkInterface", + "ec2:CreateNetworkInterfacePermission", + "ec2:ModifyNetworkInterfaceAttribute", + "ec2:DescribeNetworkInterfaceAttribute", + "ec2:DescribeVpcs", + "ec2:DescribeSubnets" + ], + "Effect": "Allow", + "Resource": [ + "*" + ] + }, + { + "Action": [ + "ec2:AssignPrivateIpAddresses", + "ec2:UnassignPrivateIpAddresses" + ], + "Effect": "Allow", + "Resource": [ + "*" + ] + } + ], + "Version": "2012-10-17" + }, + "IsAttachable": true, + "IsDefaultVersion": true, + "Path": "/", + "PolicyId": "ANPAJ53Y4ZY5OHP4CNRJC", + "PolicyName": "AmazonVPCCrossAccountNetworkInterfaceOperations", + "UpdateDate": "2017-07-18T20:47:16+00:00", + "VersionId": "v1" + }, + "AmazonVPCFullAccess": { + "Arn": "arn:aws:iam::aws:policy/AmazonVPCFullAccess", + "AttachmentCount": 1, + "CreateDate": "2015-12-17T17:25:44+00:00", + "DefaultVersionId": "v5", + "Document": { + "Statement": [ + { + "Action": [ + "ec2:AcceptVpcPeeringConnection", + "ec2:AllocateAddress", + "ec2:AssignPrivateIpAddresses", + "ec2:AssociateAddress", + "ec2:AssociateDhcpOptions", + "ec2:AssociateRouteTable", + "ec2:AttachClassicLinkVpc", + "ec2:AttachInternetGateway", + "ec2:AttachNetworkInterface", + "ec2:AttachVpnGateway", + "ec2:AuthorizeSecurityGroupEgress", + "ec2:AuthorizeSecurityGroupIngress", + "ec2:CreateCustomerGateway", + "ec2:CreateDhcpOptions", + "ec2:CreateFlowLogs", + "ec2:CreateInternetGateway", + "ec2:CreateNatGateway", + "ec2:CreateNetworkAcl", + "ec2:CreateNetworkAcl", + "ec2:CreateNetworkAclEntry", + "ec2:CreateNetworkInterface", + "ec2:CreateRoute", + "ec2:CreateRouteTable", + "ec2:CreateSecurityGroup", + "ec2:CreateSubnet", + "ec2:CreateTags", + "ec2:CreateVpc", + "ec2:CreateVpcEndpoint", + "ec2:CreateVpcPeeringConnection", + "ec2:CreateVpnConnection", + "ec2:CreateVpnConnectionRoute", + "ec2:CreateVpnGateway", + "ec2:DeleteCustomerGateway", + "ec2:DeleteDhcpOptions", + "ec2:DeleteFlowLogs", + "ec2:DeleteInternetGateway", + "ec2:DeleteNatGateway", + "ec2:DeleteNetworkAcl", + "ec2:DeleteNetworkAclEntry", + "ec2:DeleteNetworkInterface", + "ec2:DeleteRoute", + "ec2:DeleteRouteTable", + "ec2:DeleteSecurityGroup", + "ec2:DeleteSubnet", + "ec2:DeleteTags", + "ec2:DeleteVpc", + "ec2:DeleteVpcEndpoints", + "ec2:DeleteVpcPeeringConnection", + "ec2:DeleteVpnConnection", + "ec2:DeleteVpnConnectionRoute", + "ec2:DeleteVpnGateway", + "ec2:DescribeAddresses", + "ec2:DescribeAvailabilityZones", + "ec2:DescribeClassicLinkInstances", + "ec2:DescribeCustomerGateways", + "ec2:DescribeDhcpOptions", + "ec2:DescribeFlowLogs", + "ec2:DescribeInstances", + "ec2:DescribeInternetGateways", + "ec2:DescribeKeyPairs", + "ec2:DescribeMovingAddresses", + "ec2:DescribeNatGateways", + "ec2:DescribeNetworkAcls", + "ec2:DescribeNetworkInterfaceAttribute", + "ec2:DescribeNetworkInterfaces", + "ec2:DescribePrefixLists", + "ec2:DescribeRouteTables", + "ec2:DescribeSecurityGroups", + "ec2:DescribeSubnets", + "ec2:DescribeTags", + "ec2:DescribeVpcAttribute", + "ec2:DescribeVpcClassicLink", + "ec2:DescribeVpcEndpoints", + "ec2:DescribeVpcEndpointServices", + "ec2:DescribeVpcPeeringConnections", + "ec2:DescribeVpcs", + "ec2:DescribeVpnConnections", + "ec2:DescribeVpnGateways", + "ec2:DetachClassicLinkVpc", + "ec2:DetachInternetGateway", + "ec2:DetachNetworkInterface", + "ec2:DetachVpnGateway", + "ec2:DisableVgwRoutePropagation", + "ec2:DisableVpcClassicLink", + "ec2:DisassociateAddress", + "ec2:DisassociateRouteTable", + "ec2:EnableVgwRoutePropagation", + "ec2:EnableVpcClassicLink", + "ec2:ModifyNetworkInterfaceAttribute", + "ec2:ModifySubnetAttribute", + "ec2:ModifyVpcAttribute", + "ec2:ModifyVpcEndpoint", + "ec2:MoveAddressToVpc", + "ec2:RejectVpcPeeringConnection", + "ec2:ReleaseAddress", + "ec2:ReplaceNetworkAclAssociation", + "ec2:ReplaceNetworkAclEntry", + "ec2:ReplaceRoute", + "ec2:ReplaceRouteTableAssociation", + "ec2:ResetNetworkInterfaceAttribute", + "ec2:RestoreAddressToClassic", + "ec2:RevokeSecurityGroupEgress", + "ec2:RevokeSecurityGroupIngress", + "ec2:UnassignPrivateIpAddresses" + ], + "Effect": "Allow", + "Resource": "*" + } + ], + "Version": "2012-10-17" + }, + "IsAttachable": true, + "IsDefaultVersion": true, + "Path": "/", + "PolicyId": "ANPAJBWPGNOVKZD3JI2P2", + "PolicyName": "AmazonVPCFullAccess", + "UpdateDate": "2015-12-17T17:25:44+00:00", + "VersionId": "v5" + }, + "AmazonVPCReadOnlyAccess": { + "Arn": "arn:aws:iam::aws:policy/AmazonVPCReadOnlyAccess", + "AttachmentCount": 0, + "CreateDate": "2015-12-17T17:25:56+00:00", + "DefaultVersionId": "v4", + "Document": { + "Statement": [ + { + "Action": [ + "ec2:DescribeAddresses", + "ec2:DescribeClassicLinkInstances", + "ec2:DescribeCustomerGateways", + "ec2:DescribeDhcpOptions", + "ec2:DescribeFlowLogs", + "ec2:DescribeInternetGateways", + "ec2:DescribeMovingAddresses", + "ec2:DescribeNatGateways", + "ec2:DescribeNetworkAcls", + "ec2:DescribeNetworkInterfaceAttribute", + "ec2:DescribeNetworkInterfaces", + "ec2:DescribePrefixLists", + "ec2:DescribeRouteTables", + "ec2:DescribeSecurityGroups", + "ec2:DescribeSubnets", + "ec2:DescribeTags", + "ec2:DescribeVpcAttribute", + "ec2:DescribeVpcClassicLink", + "ec2:DescribeVpcEndpoints", + "ec2:DescribeVpcEndpointServices", + "ec2:DescribeVpcPeeringConnections", + "ec2:DescribeVpcs", + "ec2:DescribeVpnConnections", + "ec2:DescribeVpnGateways" + ], + "Effect": "Allow", + "Resource": "*" + } + ], + "Version": "2012-10-17" + }, + "IsAttachable": true, + "IsDefaultVersion": true, + "Path": "/", + "PolicyId": "ANPAIICZJNOJN36GTG6CM", + "PolicyName": "AmazonVPCReadOnlyAccess", + "UpdateDate": "2015-12-17T17:25:56+00:00", + "VersionId": "v4" + }, + "AmazonWorkMailFullAccess": { + "Arn": "arn:aws:iam::aws:policy/AmazonWorkMailFullAccess", + "AttachmentCount": 0, + "CreateDate": "2017-04-20T08:35:49+00:00", + "DefaultVersionId": "v3", + "Document": { + "Statement": [ + { + "Action": [ + "ds:AuthorizeApplication", + "ds:CheckAlias", + "ds:CreateAlias", + "ds:CreateDirectory", + "ds:CreateIdentityPoolDirectory", + "ds:CreateDomain", + "ds:DeleteAlias", + "ds:DeleteDirectory", + "ds:DescribeDirectories", + "ds:ExtendDirectory", + "ds:GetDirectoryLimits", + "ds:ListAuthorizedApplications", + "ds:UnauthorizeApplication", + "ec2:AuthorizeSecurityGroupEgress", + "ec2:AuthorizeSecurityGroupIngress", + "ec2:CreateNetworkInterface", + "ec2:CreateSecurityGroup", + "ec2:CreateSubnet", + "ec2:CreateTags", + "ec2:CreateVpc", + "ec2:DeleteSecurityGroup", + "ec2:DeleteSubnet", + "ec2:DeleteVpc", + "ec2:DescribeAvailabilityZones", + "ec2:DescribeDomains", + "ec2:DescribeRouteTables", + "ec2:DescribeSubnets", + "ec2:DescribeVpcs", + "ec2:RevokeSecurityGroupEgress", + "ec2:RevokeSecurityGroupIngress", + "kms:DescribeKey", + "kms:ListAliases", + "ses:*", + "workmail:*" + ], + "Effect": "Allow", + "Resource": "*" + } + ], + "Version": "2012-10-17" + }, + "IsAttachable": true, + "IsDefaultVersion": true, + "Path": "/", + "PolicyId": "ANPAJQVKNMT7SVATQ4AUY", + "PolicyName": "AmazonWorkMailFullAccess", + "UpdateDate": "2017-04-20T08:35:49+00:00", + "VersionId": "v3" + }, + "AmazonWorkMailReadOnlyAccess": { + "Arn": "arn:aws:iam::aws:policy/AmazonWorkMailReadOnlyAccess", + "AttachmentCount": 0, + "CreateDate": "2015-02-06T18:40:42+00:00", + "DefaultVersionId": "v1", + "Document": { + "Statement": [ + { + "Action": [ + "ses:Describe*", + "ses:Get*", + "workmail:Describe*", + "workmail:Get*", + "workmail:List*", + "workmail:Search*" + ], + "Effect": "Allow", + "Resource": "*" + } + ], + "Version": "2012-10-17" + }, + "IsAttachable": true, + "IsDefaultVersion": true, + "Path": "/", + "PolicyId": "ANPAJHF7J65E2QFKCWAJM", + "PolicyName": "AmazonWorkMailReadOnlyAccess", + "UpdateDate": "2015-02-06T18:40:42+00:00", + "VersionId": "v1" + }, + "AmazonWorkSpacesAdmin": { + "Arn": "arn:aws:iam::aws:policy/AmazonWorkSpacesAdmin", + "AttachmentCount": 0, + "CreateDate": "2016-08-18T23:08:42+00:00", + "DefaultVersionId": "v2", + "Document": { + "Statement": [ + { + "Action": [ + "workspaces:CreateWorkspaces", + "workspaces:DescribeWorkspaces", + "workspaces:RebootWorkspaces", + "workspaces:RebuildWorkspaces", + "workspaces:TerminateWorkspaces", + "workspaces:DescribeWorkspaceDirectories", + "workspaces:DescribeWorkspaceBundles", + "workspaces:ModifyWorkspaceProperties", + "workspaces:StopWorkspaces", + "workspaces:StartWorkspaces", + "workspaces:DescribeWorkspacesConnectionStatus", + "workspaces:CreateTags", + "workspaces:DeleteTags", + "workspaces:DescribeTags", + "kms:ListKeys", + "kms:ListAliases", + "kms:DescribeKey" + ], + "Effect": "Allow", + "Resource": "*" + } + ], + "Version": "2012-10-17" + }, + "IsAttachable": true, + "IsDefaultVersion": true, + "Path": "/", + "PolicyId": "ANPAJ26AU6ATUQCT5KVJU", + "PolicyName": "AmazonWorkSpacesAdmin", + "UpdateDate": "2016-08-18T23:08:42+00:00", + "VersionId": "v2" + }, + "AmazonWorkSpacesApplicationManagerAdminAccess": { + "Arn": "arn:aws:iam::aws:policy/AmazonWorkSpacesApplicationManagerAdminAccess", + "AttachmentCount": 0, + "CreateDate": "2015-04-09T14:03:18+00:00", + "DefaultVersionId": "v1", + "Document": { + "Statement": [ + { + "Action": "wam:AuthenticatePackager", + "Effect": "Allow", + "Resource": "*" + } + ], + "Version": "2012-10-17" + }, + "IsAttachable": true, + "IsDefaultVersion": true, + "Path": "/", + "PolicyId": "ANPAJPRL4KYETIH7XGTSS", + "PolicyName": "AmazonWorkSpacesApplicationManagerAdminAccess", + "UpdateDate": "2015-04-09T14:03:18+00:00", + "VersionId": "v1" + }, + "AmazonZocaloFullAccess": { + "Arn": "arn:aws:iam::aws:policy/AmazonZocaloFullAccess", + "AttachmentCount": 0, + "CreateDate": "2015-02-06T18:41:13+00:00", + "DefaultVersionId": "v1", + "Document": { + "Statement": [ + { + "Action": [ + "zocalo:*", + "ds:*", + "ec2:AuthorizeSecurityGroupEgress", + "ec2:AuthorizeSecurityGroupIngress", + "ec2:CreateNetworkInterface", + "ec2:CreateSecurityGroup", + "ec2:CreateSubnet", + "ec2:CreateTags", + "ec2:CreateVpc", + "ec2:DescribeAvailabilityZones", + "ec2:DescribeNetworkInterfaces", + "ec2:DescribeSubnets", + "ec2:DescribeVpcs", + "ec2:DeleteNetworkInterface", + "ec2:DeleteSecurityGroup", + "ec2:RevokeSecurityGroupEgress", + "ec2:RevokeSecurityGroupIngress" + ], + "Effect": "Allow", + "Resource": "*" + } + ], + "Version": "2012-10-17" + }, + "IsAttachable": true, + "IsDefaultVersion": true, + "Path": "/", + "PolicyId": "ANPAJLCDXYRINDMUXEVL6", + "PolicyName": "AmazonZocaloFullAccess", + "UpdateDate": "2015-02-06T18:41:13+00:00", + "VersionId": "v1" + }, + "AmazonZocaloReadOnlyAccess": { + "Arn": "arn:aws:iam::aws:policy/AmazonZocaloReadOnlyAccess", + "AttachmentCount": 0, + "CreateDate": "2015-02-06T18:41:14+00:00", + "DefaultVersionId": "v1", + "Document": { + "Statement": [ + { + "Action": [ + "zocalo:Describe*", + "ds:DescribeDirectories", + "ec2:DescribeVpcs", + "ec2:DescribeSubnets" + ], + "Effect": "Allow", + "Resource": "*" + } + ], + "Version": "2012-10-17" + }, + "IsAttachable": true, + "IsDefaultVersion": true, + "Path": "/", + "PolicyId": "ANPAISRCSSJNS3QPKZJPM", + "PolicyName": "AmazonZocaloReadOnlyAccess", + "UpdateDate": "2015-02-06T18:41:14+00:00", + "VersionId": "v1" + }, + "ApplicationAutoScalingForAmazonAppStreamAccess": { + "Arn": "arn:aws:iam::aws:policy/service-role/ApplicationAutoScalingForAmazonAppStreamAccess", + "AttachmentCount": 0, + "CreateDate": "2017-02-06T21:39:56+00:00", + "DefaultVersionId": "v1", + "Document": { + "Statement": [ + { + "Action": [ + "appstream:UpdateFleet", + "appstream:DescribeFleets" + ], + "Effect": "Allow", + "Resource": [ + "*" + ] + }, + { + "Action": [ + "cloudwatch:DescribeAlarms" + ], + "Effect": "Allow", + "Resource": [ + "*" + ] + } + ], + "Version": "2012-10-17" + }, + "IsAttachable": true, + "IsDefaultVersion": true, + "Path": "/service-role/", + "PolicyId": "ANPAIEL3HJCCWFVHA6KPG", + "PolicyName": "ApplicationAutoScalingForAmazonAppStreamAccess", + "UpdateDate": "2017-02-06T21:39:56+00:00", + "VersionId": "v1" + }, + "AutoScalingConsoleFullAccess": { + "Arn": "arn:aws:iam::aws:policy/AutoScalingConsoleFullAccess", + "AttachmentCount": 0, + "CreateDate": "2017-01-12T19:43:16+00:00", + "DefaultVersionId": "v1", + "Document": { + "Statement": [ + { + "Action": [ + "ec2:AuthorizeSecurityGroupIngress", + "ec2:CreateKeyPair", + "ec2:CreateSecurityGroup", + "ec2:DescribeAvailabilityZones", + "ec2:DescribeImages", + "ec2:DescribeKeyPairs", + "ec2:DescribeSecurityGroups", + "ec2:DescribeSubnets", + "ec2:DescribeVpcs", + "ec2:DescribeVpcClassicLink", + "ec2:ImportKeyPair" + ], + "Effect": "Allow", + "Resource": "*" + }, + { + "Action": "elasticloadbalancing:Describe*", + "Effect": "Allow", + "Resource": "*" + }, + { + "Action": [ + "cloudwatch:ListMetrics", + "cloudwatch:GetMetricStatistics", + "cloudwatch:PutMetricAlarm", + "cloudwatch:Describe*" + ], + "Effect": "Allow", + "Resource": "*" + }, + { + "Action": "autoscaling:*", + "Effect": "Allow", + "Resource": "*" + }, + { + "Action": [ + "sns:ListSubscriptions", + "sns:ListTopics" + ], + "Effect": "Allow", + "Resource": "*" + } + ], + "Version": "2012-10-17" + }, + "IsAttachable": true, + "IsDefaultVersion": true, + "Path": "/", + "PolicyId": "ANPAIYEN6FJGYYWJFFCZW", + "PolicyName": "AutoScalingConsoleFullAccess", + "UpdateDate": "2017-01-12T19:43:16+00:00", + "VersionId": "v1" + }, + "AutoScalingConsoleReadOnlyAccess": { + "Arn": "arn:aws:iam::aws:policy/AutoScalingConsoleReadOnlyAccess", + "AttachmentCount": 0, + "CreateDate": "2017-01-12T19:48:53+00:00", + "DefaultVersionId": "v1", + "Document": { + "Statement": [ + { + "Action": [ + "ec2:DescribeVpcs", + "ec2:DescribeVpcClassicLink", + "ec2:DescribeAvailabilityZones", + "ec2:DescribeSubnets" + ], + "Effect": "Allow", + "Resource": "*" + }, + { + "Action": "elasticloadbalancing:Describe*", + "Effect": "Allow", + "Resource": "*" + }, + { + "Action": [ + "cloudwatch:ListMetrics", + "cloudwatch:GetMetricStatistics", + "cloudwatch:Describe*" + ], + "Effect": "Allow", + "Resource": "*" + }, + { + "Action": "autoscaling:Describe*", + "Effect": "Allow", + "Resource": "*" + }, + { + "Action": [ + "sns:ListSubscriptions", + "sns:ListTopics" + ], + "Effect": "Allow", + "Resource": "*" + } + ], + "Version": "2012-10-17" + }, + "IsAttachable": true, + "IsDefaultVersion": true, + "Path": "/", + "PolicyId": "ANPAI3A7GDXOYQV3VUQMK", + "PolicyName": "AutoScalingConsoleReadOnlyAccess", + "UpdateDate": "2017-01-12T19:48:53+00:00", + "VersionId": "v1" + }, + "AutoScalingFullAccess": { + "Arn": "arn:aws:iam::aws:policy/AutoScalingFullAccess", + "AttachmentCount": 0, + "CreateDate": "2017-01-12T19:31:58+00:00", + "DefaultVersionId": "v1", + "Document": { + "Statement": [ + { + "Action": "autoscaling:*", + "Effect": "Allow", + "Resource": "*" + }, + { + "Action": "cloudwatch:PutMetricAlarm", + "Effect": "Allow", + "Resource": "*" + } + ], + "Version": "2012-10-17" + }, + "IsAttachable": true, + "IsDefaultVersion": true, + "Path": "/", + "PolicyId": "ANPAIAWRCSJDDXDXGPCFU", + "PolicyName": "AutoScalingFullAccess", + "UpdateDate": "2017-01-12T19:31:58+00:00", + "VersionId": "v1" + }, + "AutoScalingNotificationAccessRole": { + "Arn": "arn:aws:iam::aws:policy/service-role/AutoScalingNotificationAccessRole", + "AttachmentCount": 0, + "CreateDate": "2015-02-06T18:41:22+00:00", + "DefaultVersionId": "v1", + "Document": { + "Statement": [ + { + "Action": [ + "sqs:SendMessage", + "sqs:GetQueueUrl", + "sns:Publish" + ], + "Effect": "Allow", + "Resource": "*" + } + ], + "Version": "2012-10-17" + }, + "IsAttachable": true, + "IsDefaultVersion": true, + "Path": "/service-role/", + "PolicyId": "ANPAIO2VMUPGDC5PZVXVA", + "PolicyName": "AutoScalingNotificationAccessRole", + "UpdateDate": "2015-02-06T18:41:22+00:00", + "VersionId": "v1" + }, + "AutoScalingReadOnlyAccess": { + "Arn": "arn:aws:iam::aws:policy/AutoScalingReadOnlyAccess", + "AttachmentCount": 0, + "CreateDate": "2017-01-12T19:39:35+00:00", + "DefaultVersionId": "v1", + "Document": { + "Statement": [ + { + "Action": "autoscaling:Describe*", + "Effect": "Allow", + "Resource": "*" + } + ], + "Version": "2012-10-17" + }, + "IsAttachable": true, + "IsDefaultVersion": true, + "Path": "/", + "PolicyId": "ANPAIAFWUVLC2LPLSFTFG", + "PolicyName": "AutoScalingReadOnlyAccess", + "UpdateDate": "2017-01-12T19:39:35+00:00", + "VersionId": "v1" + }, + "Billing": { + "Arn": "arn:aws:iam::aws:policy/job-function/Billing", + "AttachmentCount": 0, + "CreateDate": "2016-11-10T17:33:18+00:00", + "DefaultVersionId": "v1", + "Document": { + "Statement": [ + { + "Action": [ + "aws-portal:*Billing", + "aws-portal:*Usage", + "aws-portal:*PaymentMethods", + "budgets:ViewBudget", + "budgets:ModifyBudget" + ], + "Effect": "Allow", + "Resource": "*" + } + ], + "Version": "2012-10-17" + }, + "IsAttachable": true, + "IsDefaultVersion": true, + "Path": "/job-function/", + "PolicyId": "ANPAIFTHXT6FFMIRT7ZEA", + "PolicyName": "Billing", + "UpdateDate": "2016-11-10T17:33:18+00:00", + "VersionId": "v1" + }, + "CloudFrontFullAccess": { + "Arn": "arn:aws:iam::aws:policy/CloudFrontFullAccess", + "AttachmentCount": 0, + "CreateDate": "2016-01-21T17:03:57+00:00", + "DefaultVersionId": "v3", + "Document": { + "Statement": [ + { + "Action": [ + "s3:ListAllMyBuckets" + ], + "Effect": "Allow", + "Resource": "arn:aws:s3:::*" + }, + { + "Action": [ + "acm:ListCertificates", + "cloudfront:*", + "iam:ListServerCertificates", + "waf:ListWebACLs", + "waf:GetWebACL" + ], + "Effect": "Allow", + "Resource": "*" + } + ], + "Version": "2012-10-17" + }, + "IsAttachable": true, + "IsDefaultVersion": true, + "Path": "/", + "PolicyId": "ANPAIPRV52SH6HDCCFY6U", + "PolicyName": "CloudFrontFullAccess", + "UpdateDate": "2016-01-21T17:03:57+00:00", + "VersionId": "v3" + }, + "CloudFrontReadOnlyAccess": { + "Arn": "arn:aws:iam::aws:policy/CloudFrontReadOnlyAccess", + "AttachmentCount": 0, + "CreateDate": "2016-01-21T17:03:28+00:00", + "DefaultVersionId": "v3", + "Document": { + "Statement": [ + { + "Action": [ + "acm:ListCertificates", + "cloudfront:Get*", + "cloudfront:List*", + "iam:ListServerCertificates", + "route53:List*", + "waf:ListWebACLs", + "waf:GetWebACL" + ], + "Effect": "Allow", + "Resource": "*" + } + ], + "Version": "2012-10-17" + }, + "IsAttachable": true, + "IsDefaultVersion": true, + "Path": "/", + "PolicyId": "ANPAJJZMNYOTZCNQP36LG", + "PolicyName": "CloudFrontReadOnlyAccess", + "UpdateDate": "2016-01-21T17:03:28+00:00", + "VersionId": "v3" + }, + "CloudSearchFullAccess": { + "Arn": "arn:aws:iam::aws:policy/CloudSearchFullAccess", + "AttachmentCount": 0, + "CreateDate": "2015-02-06T18:39:56+00:00", + "DefaultVersionId": "v1", + "Document": { + "Statement": [ + { + "Action": [ + "cloudsearch:*" + ], + "Effect": "Allow", + "Resource": "*" + } + ], + "Version": "2012-10-17" + }, + "IsAttachable": true, + "IsDefaultVersion": true, + "Path": "/", + "PolicyId": "ANPAIM6OOWKQ7L7VBOZOC", + "PolicyName": "CloudSearchFullAccess", + "UpdateDate": "2015-02-06T18:39:56+00:00", + "VersionId": "v1" + }, + "CloudSearchReadOnlyAccess": { + "Arn": "arn:aws:iam::aws:policy/CloudSearchReadOnlyAccess", + "AttachmentCount": 0, + "CreateDate": "2015-02-06T18:39:57+00:00", + "DefaultVersionId": "v1", + "Document": { + "Statement": [ + { + "Action": [ + "cloudsearch:Describe*", + "cloudsearch:List*" + ], + "Effect": "Allow", + "Resource": "*" + } + ], + "Version": "2012-10-17" + }, + "IsAttachable": true, + "IsDefaultVersion": true, + "Path": "/", + "PolicyId": "ANPAJWPLX7N7BCC3RZLHW", + "PolicyName": "CloudSearchReadOnlyAccess", + "UpdateDate": "2015-02-06T18:39:57+00:00", + "VersionId": "v1" + }, + "CloudWatchActionsEC2Access": { + "Arn": "arn:aws:iam::aws:policy/CloudWatchActionsEC2Access", + "AttachmentCount": 0, + "CreateDate": "2015-07-07T00:00:33+00:00", + "DefaultVersionId": "v1", + "Document": { + "Statement": [ + { + "Action": [ + "cloudwatch:Describe*", + "ec2:Describe*", + "ec2:RebootInstances", + "ec2:StopInstances", + "ec2:TerminateInstances" + ], + "Effect": "Allow", + "Resource": "*" + } + ], + "Version": "2012-10-17" + }, + "IsAttachable": true, + "IsDefaultVersion": true, + "Path": "/", + "PolicyId": "ANPAIOWD4E3FVSORSZTGU", + "PolicyName": "CloudWatchActionsEC2Access", + "UpdateDate": "2015-07-07T00:00:33+00:00", + "VersionId": "v1" + }, + "CloudWatchEventsBuiltInTargetExecutionAccess": { + "Arn": "arn:aws:iam::aws:policy/service-role/CloudWatchEventsBuiltInTargetExecutionAccess", + "AttachmentCount": 0, + "CreateDate": "2016-01-14T18:35:49+00:00", + "DefaultVersionId": "v1", + "Document": { + "Statement": [ + { + "Action": [ + "ec2:Describe*", + "ec2:RebootInstances", + "ec2:StopInstances", + "ec2:TerminateInstances", + "ec2:CreateSnapshot" + ], + "Effect": "Allow", + "Resource": "*", + "Sid": "CloudWatchEventsBuiltInTargetExecutionAccess" + } + ], + "Version": "2012-10-17" + }, + "IsAttachable": true, + "IsDefaultVersion": true, + "Path": "/service-role/", + "PolicyId": "ANPAIC5AQ5DATYSNF4AUM", + "PolicyName": "CloudWatchEventsBuiltInTargetExecutionAccess", + "UpdateDate": "2016-01-14T18:35:49+00:00", + "VersionId": "v1" + }, + "CloudWatchEventsFullAccess": { + "Arn": "arn:aws:iam::aws:policy/CloudWatchEventsFullAccess", + "AttachmentCount": 0, + "CreateDate": "2016-01-14T18:37:08+00:00", + "DefaultVersionId": "v1", + "Document": { + "Statement": [ + { + "Action": "events:*", + "Effect": "Allow", + "Resource": "*", + "Sid": "CloudWatchEventsFullAccess" + }, + { + "Action": "iam:PassRole", + "Effect": "Allow", + "Resource": "arn:aws:iam::*:role/AWS_Events_Invoke_Targets", + "Sid": "IAMPassRoleForCloudWatchEvents" + } + ], + "Version": "2012-10-17" + }, + "IsAttachable": true, + "IsDefaultVersion": true, + "Path": "/", + "PolicyId": "ANPAJZLOYLNHESMYOJAFU", + "PolicyName": "CloudWatchEventsFullAccess", + "UpdateDate": "2016-01-14T18:37:08+00:00", + "VersionId": "v1" + }, + "CloudWatchEventsInvocationAccess": { + "Arn": "arn:aws:iam::aws:policy/service-role/CloudWatchEventsInvocationAccess", + "AttachmentCount": 0, + "CreateDate": "2016-01-14T18:36:33+00:00", + "DefaultVersionId": "v1", + "Document": { + "Statement": [ + { + "Action": [ + "kinesis:PutRecord" + ], + "Effect": "Allow", + "Resource": "*", + "Sid": "CloudWatchEventsInvocationAccess" + } + ], + "Version": "2012-10-17" + }, + "IsAttachable": true, + "IsDefaultVersion": true, + "Path": "/service-role/", + "PolicyId": "ANPAJJXD6JKJLK2WDLZNO", + "PolicyName": "CloudWatchEventsInvocationAccess", + "UpdateDate": "2016-01-14T18:36:33+00:00", + "VersionId": "v1" + }, + "CloudWatchEventsReadOnlyAccess": { + "Arn": "arn:aws:iam::aws:policy/CloudWatchEventsReadOnlyAccess", + "AttachmentCount": 0, + "CreateDate": "2017-08-10T17:25:34+00:00", + "DefaultVersionId": "v2", + "Document": { + "Statement": [ + { + "Action": [ + "events:DescribeRule", + "events:ListRuleNamesByTarget", + "events:ListRules", + "events:ListTargetsByRule", + "events:TestEventPattern", + "events:DescribeEventBus" + ], + "Effect": "Allow", + "Resource": "*", + "Sid": "CloudWatchEventsReadOnlyAccess" + } + ], + "Version": "2012-10-17" + }, + "IsAttachable": true, + "IsDefaultVersion": true, + "Path": "/", + "PolicyId": "ANPAIILJPXXA6F7GYLYBS", + "PolicyName": "CloudWatchEventsReadOnlyAccess", + "UpdateDate": "2017-08-10T17:25:34+00:00", + "VersionId": "v2" + }, + "CloudWatchFullAccess": { + "Arn": "arn:aws:iam::aws:policy/CloudWatchFullAccess", + "AttachmentCount": 0, + "CreateDate": "2015-02-06T18:40:00+00:00", + "DefaultVersionId": "v1", + "Document": { + "Statement": [ + { + "Action": [ + "autoscaling:Describe*", + "cloudwatch:*", + "logs:*", + "sns:*" + ], + "Effect": "Allow", + "Resource": "*" + } + ], + "Version": "2012-10-17" + }, + "IsAttachable": true, + "IsDefaultVersion": true, + "Path": "/", + "PolicyId": "ANPAIKEABORKUXN6DEAZU", + "PolicyName": "CloudWatchFullAccess", + "UpdateDate": "2015-02-06T18:40:00+00:00", + "VersionId": "v1" + }, + "CloudWatchLogsFullAccess": { + "Arn": "arn:aws:iam::aws:policy/CloudWatchLogsFullAccess", + "AttachmentCount": 0, + "CreateDate": "2015-02-06T18:40:02+00:00", + "DefaultVersionId": "v1", + "Document": { + "Statement": [ + { + "Action": [ + "logs:*" + ], + "Effect": "Allow", + "Resource": "*" + } + ], + "Version": "2012-10-17" + }, + "IsAttachable": true, + "IsDefaultVersion": true, + "Path": "/", + "PolicyId": "ANPAJ3ZGNWK2R5HW5BQFO", + "PolicyName": "CloudWatchLogsFullAccess", + "UpdateDate": "2015-02-06T18:40:02+00:00", + "VersionId": "v1" + }, + "CloudWatchLogsReadOnlyAccess": { + "Arn": "arn:aws:iam::aws:policy/CloudWatchLogsReadOnlyAccess", + "AttachmentCount": 0, + "CreateDate": "2017-08-14T22:22:16+00:00", + "DefaultVersionId": "v3", + "Document": { + "Statement": [ + { + "Action": [ + "logs:Describe*", + "logs:Get*", + "logs:List*", + "logs:TestMetricFilter", + "logs:FilterLogEvents" + ], + "Effect": "Allow", + "Resource": "*" + } + ], + "Version": "2012-10-17" + }, + "IsAttachable": true, + "IsDefaultVersion": true, + "Path": "/", + "PolicyId": "ANPAJ2YIYDYSNNEHK3VKW", + "PolicyName": "CloudWatchLogsReadOnlyAccess", + "UpdateDate": "2017-08-14T22:22:16+00:00", + "VersionId": "v3" + }, + "CloudWatchReadOnlyAccess": { + "Arn": "arn:aws:iam::aws:policy/CloudWatchReadOnlyAccess", + "AttachmentCount": 0, + "CreateDate": "2015-02-06T18:40:01+00:00", + "DefaultVersionId": "v1", + "Document": { + "Statement": [ + { + "Action": [ + "autoscaling:Describe*", + "cloudwatch:Describe*", + "cloudwatch:Get*", + "cloudwatch:List*", + "logs:Get*", + "logs:Describe*", + "logs:TestMetricFilter", + "sns:Get*", + "sns:List*" + ], + "Effect": "Allow", + "Resource": "*" + } + ], + "Version": "2012-10-17" + }, + "IsAttachable": true, + "IsDefaultVersion": true, + "Path": "/", + "PolicyId": "ANPAJN23PDQP7SZQAE3QE", + "PolicyName": "CloudWatchReadOnlyAccess", + "UpdateDate": "2015-02-06T18:40:01+00:00", + "VersionId": "v1" + }, + "DataScientist": { + "Arn": "arn:aws:iam::aws:policy/job-function/DataScientist", + "AttachmentCount": 0, + "CreateDate": "2016-11-10T17:28:48+00:00", + "DefaultVersionId": "v1", + "Document": { + "Statement": [ + { + "Action": [ + "autoscaling:*", + "cloudwatch:*", + "cloudformation:CreateStack", + "cloudformation:DescribeStackEvents", + "datapipeline:Describe*", + "datapipeline:ListPipelines", + "datapipeline:GetPipelineDefinition", + "datapipeline:QueryObjects", + "dynamodb:*", + "ec2:CancelSpotInstanceRequests", + "ec2:CancelSpotFleetRequests", + "ec2:CreateTags", + "ec2:DeleteTags", + "ec2:Describe*", + "ec2:ModifyImageAttribute", + "ec2:ModifyInstanceAttribute", + "ec2:ModifySpotFleetRequest", + "ec2:RequestSpotInstances", + "ec2:RequestSpotFleet", + "elasticfilesystem:*", + "elasticmapreduce:*", + "es:*", + "firehose:*", + "iam:GetInstanceProfile", + "iam:GetRole", + "iam:GetPolicy", + "iam:GetPolicyVersion", + "iam:ListRoles", + "kinesis:*", + "kms:List*", + "lambda:Create*", + "lambda:Delete*", + "lambda:Get*", + "lambda:InvokeFunction", + "lambda:PublishVersion", + "lambda:Update*", + "lambda:List*", + "machinelearning:*", + "sdb:*", + "rds:*", + "sns:ListSubscriptions", + "sns:ListTopics", + "logs:DescribeLogStreams", + "logs:GetLogEvents", + "redshift:*", + "s3:CreateBucket", + "sns:CreateTopic", + "sns:Get*", + "sns:List*" + ], + "Effect": "Allow", + "Resource": "*" + }, + { + "Action": [ + "s3:Abort*", + "s3:DeleteObject", + "s3:Get*", + "s3:List*", + "s3:PutAccelerateConfiguration", + "s3:PutBucketLogging", + "s3:PutBucketNotification", + "s3:PutBucketTagging", + "s3:PutObject", + "s3:Replicate*", + "s3:RestoreObject" + ], + "Effect": "Allow", + "Resource": [ + "*" + ] + }, + { + "Action": [ + "ec2:RunInstances", + "ec2:TerminateInstances" + ], + "Effect": "Allow", + "Resource": [ + "*" + ] + }, + { + "Action": [ + "iam:GetRole", + "iam:PassRole" + ], + "Effect": "Allow", + "Resource": [ + "arn:aws:iam::*:role/DataPipelineDefaultRole", + "arn:aws:iam::*:role/DataPipelineDefaultResourceRole", + "arn:aws:iam::*:role/EMR_EC2_DefaultRole", + "arn:aws:iam::*:role/EMR_DefaultRole", + "arn:aws:iam::*:role/kinesis-*" + ] + } + ], + "Version": "2012-10-17" + }, + "IsAttachable": true, + "IsDefaultVersion": true, + "Path": "/job-function/", + "PolicyId": "ANPAJ5YHI2BQW7EQFYDXS", + "PolicyName": "DataScientist", + "UpdateDate": "2016-11-10T17:28:48+00:00", + "VersionId": "v1" + }, + "DatabaseAdministrator": { + "Arn": "arn:aws:iam::aws:policy/job-function/DatabaseAdministrator", + "AttachmentCount": 0, + "CreateDate": "2016-11-10T17:25:43+00:00", + "DefaultVersionId": "v1", + "Document": { + "Statement": [ + { + "Action": [ + "cloudwatch:DeleteAlarms", + "cloudwatch:Describe*", + "cloudwatch:DisableAlarmActions", + "cloudwatch:EnableAlarmActions", + "cloudwatch:Get*", + "cloudwatch:List*", + "cloudwatch:PutMetricAlarm", + "datapipeline:ActivatePipeline", + "datapipeline:CreatePipeline", + "datapipeline:DeletePipeline", + "datapipeline:DescribeObjects", + "datapipeline:DescribePipelines", + "datapipeline:GetPipelineDefinition", + "datapipeline:ListPipelines", + "datapipeline:PutPipelineDefinition", + "datapipeline:QueryObjects", + "dynamodb:*", + "ec2:DescribeAccountAttributes", + "ec2:DescribeAddresses", + "ec2:DescribeAvailabilityZones", + "ec2:DescribeInternetGateways", + "ec2:DescribeSecurityGroups", + "ec2:DescribeSubnets", + "ec2:DescribeVpcs", + "elasticache:*", + "iam:ListRoles", + "iam:GetRole", + "kms:ListKeys", + "lambda:CreateEventSourceMapping", + "lambda:CreateFunction", + "lambda:DeleteEventSourceMapping", + "lambda:DeleteFunction", + "lambda:GetFunctionConfiguration", + "lambda:ListEventSourceMappings", + "lambda:ListFunctions", + "logs:DescribeLogGroups", + "logs:DescribeLogStreams", + "logs:FilterLogEvents", + "logs:GetLogEvents", + "logs:Create*", + "logs:PutLogEvents", + "logs:PutMetricFilter", + "rds:*", + "redshift:*", + "s3:CreateBucket", + "sns:CreateTopic", + "sns:DeleteTopic", + "sns:Get*", + "sns:List*", + "sns:SetTopicAttributes", + "sns:Subscribe", + "sns:Unsubscribe" + ], + "Effect": "Allow", + "Resource": "*" + }, + { + "Action": [ + "s3:AbortMultipartUpload", + "s3:DeleteObject*", + "s3:Get*", + "s3:List*", + "s3:PutAccelerateConfiguration", + "s3:PutBucketTagging", + "s3:PutBucketVersioning", + "s3:PutBucketWebsite", + "s3:PutLifecycleConfiguration", + "s3:PutReplicationConfiguration", + "s3:PutObject*", + "s3:Replicate*", + "s3:RestoreObject" + ], + "Effect": "Allow", + "Resource": [ + "*" + ] + }, + { + "Action": [ + "iam:GetRole", + "iam:PassRole" + ], + "Effect": "Allow", + "Resource": [ + "arn:aws:iam::*:role/rds-monitoring-role", + "arn:aws:iam::*:role/rdbms-lambda-access", + "arn:aws:iam::*:role/lambda_exec_role", + "arn:aws:iam::*:role/lambda-dynamodb-*", + "arn:aws:iam::*:role/lambda-vpc-execution-role", + "arn:aws:iam::*:role/DataPipelineDefaultRole", + "arn:aws:iam::*:role/DataPipelineDefaultResourceRole" + ] + } + ], + "Version": "2012-10-17" + }, + "IsAttachable": true, + "IsDefaultVersion": true, + "Path": "/job-function/", + "PolicyId": "ANPAIGBMAW4VUQKOQNVT6", + "PolicyName": "DatabaseAdministrator", + "UpdateDate": "2016-11-10T17:25:43+00:00", + "VersionId": "v1" + }, + "IAMFullAccess": { + "Arn": "arn:aws:iam::aws:policy/IAMFullAccess", + "AttachmentCount": 2, + "CreateDate": "2015-02-06T18:40:38+00:00", + "DefaultVersionId": "v1", + "Document": { + "Statement": [ + { + "Action": "iam:*", + "Effect": "Allow", + "Resource": "*" + } + ], + "Version": "2012-10-17" + }, + "IsAttachable": true, + "IsDefaultVersion": true, + "Path": "/", + "PolicyId": "ANPAI7XKCFMBPM3QQRRVQ", + "PolicyName": "IAMFullAccess", + "UpdateDate": "2015-02-06T18:40:38+00:00", + "VersionId": "v1" + }, + "IAMReadOnlyAccess": { + "Arn": "arn:aws:iam::aws:policy/IAMReadOnlyAccess", + "AttachmentCount": 0, + "CreateDate": "2016-09-06T17:06:37+00:00", + "DefaultVersionId": "v3", + "Document": { + "Statement": [ + { + "Action": [ + "iam:GenerateCredentialReport", + "iam:GenerateServiceLastAccessedDetails", + "iam:Get*", + "iam:List*" + ], + "Effect": "Allow", + "Resource": "*" + } + ], + "Version": "2012-10-17" + }, + "IsAttachable": true, + "IsDefaultVersion": true, + "Path": "/", + "PolicyId": "ANPAJKSO7NDY4T57MWDSQ", + "PolicyName": "IAMReadOnlyAccess", + "UpdateDate": "2016-09-06T17:06:37+00:00", + "VersionId": "v3" + }, + "IAMSelfManageServiceSpecificCredentials": { + "Arn": "arn:aws:iam::aws:policy/IAMSelfManageServiceSpecificCredentials", + "AttachmentCount": 0, + "CreateDate": "2016-12-22T17:25:18+00:00", + "DefaultVersionId": "v1", + "Document": { + "Statement": [ + { + "Action": [ + "iam:CreateServiceSpecificCredential", + "iam:ListServiceSpecificCredentials", + "iam:UpdateServiceSpecificCredential", + "iam:DeleteServiceSpecificCredential", + "iam:ResetServiceSpecificCredential" + ], + "Effect": "Allow", + "Resource": "arn:aws:iam::*:user/${aws:username}" + } + ], + "Version": "2012-10-17" + }, + "IsAttachable": true, + "IsDefaultVersion": true, + "Path": "/", + "PolicyId": "ANPAI4VT74EMXK2PMQJM2", + "PolicyName": "IAMSelfManageServiceSpecificCredentials", + "UpdateDate": "2016-12-22T17:25:18+00:00", + "VersionId": "v1" + }, + "IAMUserChangePassword": { + "Arn": "arn:aws:iam::aws:policy/IAMUserChangePassword", + "AttachmentCount": 1, + "CreateDate": "2016-11-15T23:18:55+00:00", + "DefaultVersionId": "v2", + "Document": { + "Statement": [ + { + "Action": [ + "iam:ChangePassword" + ], + "Effect": "Allow", + "Resource": [ + "arn:aws:iam::*:user/${aws:username}" + ] + }, + { + "Action": [ + "iam:GetAccountPasswordPolicy" + ], + "Effect": "Allow", + "Resource": "*" + } + ], + "Version": "2012-10-17" + }, + "IsAttachable": true, + "IsDefaultVersion": true, + "Path": "/", + "PolicyId": "ANPAJ4L4MM2A7QIEB56MS", + "PolicyName": "IAMUserChangePassword", + "UpdateDate": "2016-11-15T23:18:55+00:00", + "VersionId": "v2" + }, + "IAMUserSSHKeys": { + "Arn": "arn:aws:iam::aws:policy/IAMUserSSHKeys", + "AttachmentCount": 1, + "CreateDate": "2015-07-09T17:08:54+00:00", + "DefaultVersionId": "v1", + "Document": { + "Statement": [ + { + "Action": [ + "iam:DeleteSSHPublicKey", + "iam:GetSSHPublicKey", + "iam:ListSSHPublicKeys", + "iam:UpdateSSHPublicKey", + "iam:UploadSSHPublicKey" + ], + "Effect": "Allow", + "Resource": "arn:aws:iam::*:user/${aws:username}" + } + ], + "Version": "2012-10-17" + }, + "IsAttachable": true, + "IsDefaultVersion": true, + "Path": "/", + "PolicyId": "ANPAJTSHUA4UXGXU7ANUA", + "PolicyName": "IAMUserSSHKeys", + "UpdateDate": "2015-07-09T17:08:54+00:00", + "VersionId": "v1" + }, + "NetworkAdministrator": { + "Arn": "arn:aws:iam::aws:policy/job-function/NetworkAdministrator", + "AttachmentCount": 0, + "CreateDate": "2017-03-20T18:44:58+00:00", + "DefaultVersionId": "v2", + "Document": { + "Statement": [ + { + "Action": [ + "autoscaling:Describe*", + "ec2:AllocateAddress", + "ec2:AssignPrivateIpAddresses", + "ec2:AssociateAddress", + "ec2:AssociateDhcpOptions", + "ec2:AssociateRouteTable", + "ec2:AttachInternetGateway", + "ec2:AttachNetworkInterface", + "ec2:AttachVpnGateway", + "ec2:CreateCustomerGateway", + "ec2:CreateDhcpOptions", + "ec2:CreateFlowLogs", + "ec2:CreateInternetGateway", + "ec2:CreateNatGateway", + "ec2:CreateNetworkAcl", + "ec2:CreateNetworkAcl", + "ec2:CreateNetworkAclEntry", + "ec2:CreateNetworkInterface", + "ec2:CreateRoute", + "ec2:CreateRouteTable", + "ec2:CreateSecurityGroup", + "ec2:CreateSubnet", + "ec2:CreateTags", + "ec2:CreateVpc", + "ec2:CreateVpcEndpoint", + "ec2:CreateVpnConnection", + "ec2:CreateVpnConnectionRoute", + "ec2:CreateVpnGateway", + "ec2:CreatePlacementGroup", + "ec2:DeletePlacementGroup", + "ec2:DescribePlacementGroups", + "ec2:DeleteFlowLogs", + "ec2:DeleteNatGateway", + "ec2:DeleteNetworkInterface", + "ec2:DeleteSubnet", + "ec2:DeleteTags", + "ec2:DeleteVpc", + "ec2:DeleteVpcEndpoints", + "ec2:DeleteVpnConnection", + "ec2:DeleteVpnConnectionRoute", + "ec2:DeleteVpnGateway", + "ec2:DescribeAddresses", + "ec2:DescribeAvailabilityZones", + "ec2:DescribeClassicLinkInstances", + "ec2:DescribeCustomerGateways", + "ec2:DescribeVpcClassicLinkDnsSupport", + "ec2:DescribeDhcpOptions", + "ec2:DescribeFlowLogs", + "ec2:DescribeInstances", + "ec2:DescribeInternetGateways", + "ec2:DescribeKeyPairs", + "ec2:DescribeMovingAddresses", + "ec2:DescribeNatGateways", + "ec2:DescribeNetworkAcls", + "ec2:DescribeNetworkInterfaceAttribute", + "ec2:DescribeNetworkInterfaces", + "ec2:DescribePrefixLists", + "ec2:DescribeRouteTables", + "ec2:DescribeSecurityGroups", + "ec2:DescribeSubnets", + "ec2:DescribeTags", + "ec2:DescribeVpcAttribute", + "ec2:DescribeVpcClassicLink", + "ec2:DescribeVpcEndpoints", + "ec2:DescribeVpcEndpointServices", + "ec2:DescribeVpcPeeringConnections", + "ec2:DescribeVpcs", + "ec2:DescribeVpnConnections", + "ec2:DescribeVpnGateways", + "ec2:DetachInternetGateway", + "ec2:DetachNetworkInterface", + "ec2:DetachVpnGateway", + "ec2:DisableVgwRoutePropagation", + "ec2:DisassociateAddress", + "ec2:DisassociateRouteTable", + "ec2:EnableVgwRoutePropagation", + "ec2:ModifyNetworkInterfaceAttribute", + "ec2:ModifySubnetAttribute", + "ec2:ModifyVpcAttribute", + "ec2:ModifyVpcEndpoint", + "ec2:MoveAddressToVpc", + "ec2:ReleaseAddress", + "ec2:ReplaceNetworkAclAssociation", + "ec2:ReplaceNetworkAclEntry", + "ec2:ReplaceRoute", + "ec2:ReplaceRouteTableAssociation", + "ec2:ResetNetworkInterfaceAttribute", + "ec2:RestoreAddressToClassic", + "ec2:UnassignPrivateIpAddresses", + "directconnect:*", + "route53:*", + "route53domains:*", + "cloudfront:ListDistributions", + "elasticloadbalancing:*", + "elasticbeanstalk:Describe*", + "elasticbeanstalk:List*", + "elasticbeanstalk:RetrieveEnvironmentInfo", + "elasticbeanstalk:RequestEnvironmentInfo", + "sns:ListTopics", + "sns:ListSubscriptionsByTopic", + "sns:CreateTopic", + "cloudwatch:DescribeAlarms", + "cloudwatch:PutMetricAlarm", + "cloudwatch:DeleteAlarms", + "cloudwatch:GetMetricStatistics", + "logs:DescribeLogGroups", + "logs:DescribeLogStreams", + "logs:GetLogEvents" + ], + "Effect": "Allow", + "Resource": "*" + }, + { + "Action": [ + "ec2:AcceptVpcPeeringConnection", + "ec2:AttachClassicLinkVpc", + "ec2:AuthorizeSecurityGroupEgress", + "ec2:AuthorizeSecurityGroupIngress", + "ec2:CreateVpcPeeringConnection", + "ec2:DeleteCustomerGateway", + "ec2:DeleteDhcpOptions", + "ec2:DeleteInternetGateway", + "ec2:DeleteNetworkAcl", + "ec2:DeleteNetworkAclEntry", + "ec2:DeleteRoute", + "ec2:DeleteRouteTable", + "ec2:DeleteSecurityGroup", + "ec2:DeleteVolume", + "ec2:DeleteVpcPeeringConnection", + "ec2:DetachClassicLinkVpc", + "ec2:DisableVpcClassicLink", + "ec2:EnableVpcClassicLink", + "ec2:GetConsoleScreenshot", + "ec2:RejectVpcPeeringConnection", + "ec2:RevokeSecurityGroupEgress", + "ec2:RevokeSecurityGroupIngress" + ], + "Effect": "Allow", + "Resource": [ + "*" + ] + }, + { + "Action": [ + "s3:ListBucket", + "s3:GetBucketLocation", + "s3:GetBucketWebsiteConfiguration" + ], + "Effect": "Allow", + "Resource": [ + "*" + ] + }, + { + "Action": [ + "iam:GetRole", + "iam:ListRoles", + "iam:PassRole" + ], + "Effect": "Allow", + "Resource": "arn:aws:iam::*:role/flow-logs-*" + } + ], + "Version": "2012-10-17" + }, + "IsAttachable": true, + "IsDefaultVersion": true, + "Path": "/job-function/", + "PolicyId": "ANPAJPNMADZFJCVPJVZA2", + "PolicyName": "NetworkAdministrator", + "UpdateDate": "2017-03-20T18:44:58+00:00", + "VersionId": "v2" + }, + "PowerUserAccess": { + "Arn": "arn:aws:iam::aws:policy/PowerUserAccess", + "AttachmentCount": 0, + "CreateDate": "2016-12-06T18:11:16+00:00", + "DefaultVersionId": "v2", + "Document": { + "Statement": [ + { + "Effect": "Allow", + "NotAction": [ + "iam:*", + "organizations:*" + ], + "Resource": "*" + }, + { + "Action": "organizations:DescribeOrganization", + "Effect": "Allow", + "Resource": "*" + } + ], + "Version": "2012-10-17" + }, + "IsAttachable": true, + "IsDefaultVersion": true, + "Path": "/", + "PolicyId": "ANPAJYRXTHIB4FOVS3ZXS", + "PolicyName": "PowerUserAccess", + "UpdateDate": "2016-12-06T18:11:16+00:00", + "VersionId": "v2" + }, + "QuickSightAccessForS3StorageManagementAnalyticsReadOnly": { + "Arn": "arn:aws:iam::aws:policy/service-role/QuickSightAccessForS3StorageManagementAnalyticsReadOnly", + "AttachmentCount": 0, + "CreateDate": "2017-07-21T00:02:14+00:00", + "DefaultVersionId": "v3", + "Document": { + "Statement": [ + { + "Action": [ + "s3:GetObject", + "s3:GetObjectMetadata" + ], + "Effect": "Allow", + "Resource": [ + "arn:aws:s3:::s3-analytics-export-shared-*" + ] + }, + { + "Action": [ + "s3:GetAnalyticsConfiguration", + "s3:ListAllMyBuckets", + "s3:GetBucketLocation" + ], + "Effect": "Allow", + "Resource": "*" + } + ], + "Version": "2012-10-17" + }, + "IsAttachable": true, + "IsDefaultVersion": true, + "Path": "/service-role/", + "PolicyId": "ANPAIFWG3L3WDMR4I7ZJW", + "PolicyName": "QuickSightAccessForS3StorageManagementAnalyticsReadOnly", + "UpdateDate": "2017-07-21T00:02:14+00:00", + "VersionId": "v3" + }, + "RDSCloudHsmAuthorizationRole": { + "Arn": "arn:aws:iam::aws:policy/service-role/RDSCloudHsmAuthorizationRole", + "AttachmentCount": 0, + "CreateDate": "2015-02-06T18:41:29+00:00", + "DefaultVersionId": "v1", + "Document": { + "Statement": [ + { + "Action": [ + "cloudhsm:CreateLunaClient", + "cloudhsm:GetClientConfiguration", + "cloudhsm:DeleteLunaClient", + "cloudhsm:DescribeLunaClient", + "cloudhsm:ModifyLunaClient", + "cloudhsm:DescribeHapg", + "cloudhsm:ModifyHapg", + "cloudhsm:GetConfig" + ], + "Effect": "Allow", + "Resource": "*" + } + ], + "Version": "2012-10-17" + }, + "IsAttachable": true, + "IsDefaultVersion": true, + "Path": "/service-role/", + "PolicyId": "ANPAIWKFXRLQG2ROKKXLE", + "PolicyName": "RDSCloudHsmAuthorizationRole", + "UpdateDate": "2015-02-06T18:41:29+00:00", + "VersionId": "v1" + }, + "ReadOnlyAccess": { + "Arn": "arn:aws:iam::aws:policy/ReadOnlyAccess", + "AttachmentCount": 0, + "CreateDate": "2017-07-20T17:43:06+00:00", + "DefaultVersionId": "v29", + "Document": { + "Statement": [ + { + "Action": [ + "acm:Describe*", + "acm:Get*", + "acm:List*", + "apigateway:GET", + "application-autoscaling:Describe*", + "appstream:Describe*", + "appstream:Get*", + "appstream:List*", + "athena:List*", + "athena:Batch*", + "athena:Get*", + "autoscaling:Describe*", + "batch:List*", + "batch:Describe*", + "clouddirectory:List*", + "clouddirectory:BatchRead", + "clouddirectory:Get*", + "clouddirectory:LookupPolicy", + "cloudformation:Describe*", + "cloudformation:Get*", + "cloudformation:List*", + "cloudformation:Estimate*", + "cloudformation:Preview*", + "cloudfront:Get*", + "cloudfront:List*", + "cloudhsm:List*", + "cloudhsm:Describe*", + "cloudhsm:Get*", + "cloudsearch:Describe*", + "cloudsearch:List*", + "cloudtrail:Describe*", + "cloudtrail:Get*", + "cloudtrail:List*", + "cloudtrail:LookupEvents", + "cloudwatch:Describe*", + "cloudwatch:Get*", + "cloudwatch:List*", + "codebuild:BatchGet*", + "codebuild:List*", + "codecommit:BatchGet*", + "codecommit:Get*", + "codecommit:GitPull", + "codecommit:List*", + "codedeploy:BatchGet*", + "codedeploy:Get*", + "codedeploy:List*", + "codepipeline:List*", + "codepipeline:Get*", + "codestar:List*", + "codestar:Describe*", + "codestar:Get*", + "codestar:Verify*", + "cognito-identity:List*", + "cognito-identity:Describe*", + "cognito-identity:Lookup*", + "cognito-sync:List*", + "cognito-sync:Describe*", + "cognito-sync:Get*", + "cognito-sync:QueryRecords", + "cognito-idp:AdminList*", + "cognito-idp:List*", + "cognito-idp:Describe*", + "cognito-idp:Get*", + "config:Deliver*", + "config:Describe*", + "config:Get*", + "config:List*", + "connect:List*", + "connect:Describe*", + "connect:Get*", + "datapipeline:Describe*", + "datapipeline:EvaluateExpression", + "datapipeline:Get*", + "datapipeline:List*", + "datapipeline:QueryObjects", + "datapipeline:Validate*", + "directconnect:Describe*", + "directconnect:Confirm*", + "devicefarm:List*", + "devicefarm:Get*", + "discovery:Describe*", + "discovery:List*", + "discovery:Get*", + "dms:Describe*", + "dms:List*", + "dms:Test*", + "ds:Check*", + "ds:Describe*", + "ds:Get*", + "ds:List*", + "ds:Verify*", + "dynamodb:BatchGet*", + "dynamodb:Describe*", + "dynamodb:Get*", + "dynamodb:List*", + "dynamodb:Query", + "dynamodb:Scan", + "ec2:Describe*", + "ec2:Get*", + "ec2messages:Get*", + "ecr:BatchCheck*", + "ecr:BatchGet*", + "ecr:Describe*", + "ecr:Get*", + "ecr:List*", + "ecs:Describe*", + "ecs:List*", + "elasticache:Describe*", + "elasticache:List*", + "elasticbeanstalk:Check*", + "elasticbeanstalk:Describe*", + "elasticbeanstalk:List*", + "elasticbeanstalk:Request*", + "elasticbeanstalk:Retrieve*", + "elasticbeanstalk:Validate*", + "elasticfilesystem:Describe*", + "elasticloadbalancing:Describe*", + "elasticmapreduce:Describe*", + "elasticmapreduce:List*", + "elasticmapreduce:View*", + "elastictranscoder:List*", + "elastictranscoder:Read*", + "es:Describe*", + "es:List*", + "es:ESHttpGet", + "es:ESHttpHead", + "events:Describe*", + "events:List*", + "events:Test*", + "firehose:Describe*", + "firehose:List*", + "gamelift:List*", + "gamelift:Get*", + "gamelift:Describe*", + "gamelift:RequestUploadCredentials", + "gamelift:ResolveAlias", + "gamelift:Search*", + "glacier:List*", + "glacier:Describe*", + "glacier:Get*", + "health:Describe*", + "health:Get*", + "health:List*", + "iam:Generate*", + "iam:Get*", + "iam:List*", + "iam:Simulate*", + "importexport:Get*", + "importexport:List*", + "inspector:Describe*", + "inspector:Get*", + "inspector:List*", + "inspector:Preview*", + "inspector:LocalizeText", + "iot:Describe*", + "iot:Get*", + "iot:List*", + "kinesisanalytics:Describe*", + "kinesisanalytics:Discover*", + "kinesisanalytics:Get*", + "kinesisanalytics:List*", + "kinesis:Describe*", + "kinesis:Get*", + "kinesis:List*", + "kms:Describe*", + "kms:Get*", + "kms:List*", + "lambda:List*", + "lambda:Get*", + "lex:Get*", + "lightsail:Get*", + "lightsail:Is*", + "lightsail:Download*", + "logs:Describe*", + "logs:Get*", + "logs:FilterLogEvents", + "logs:ListTagsLogGroup", + "logs:TestMetricFilter", + "machinelearning:Describe*", + "machinelearning:Get*", + "mobileanalytics:Get*", + "mobilehub:Get*", + "mobilehub:List*", + "mobilehub:Validate*", + "mobilehub:Verify*", + "mobiletargeting:Get*", + "opsworks:Describe*", + "opsworks:Get*", + "opsworks-cm:Describe*", + "organizations:Describe*", + "organizations:List*", + "polly:Describe*", + "polly:Get*", + "polly:List*", + "polly:SynthesizeSpeech", + "rekognition:CompareFaces", + "rekognition:Detect*", + "rekognition:List*", + "rekognition:Search*", + "rds:Describe*", + "rds:List*", + "rds:Download*", + "redshift:Describe*", + "redshift:View*", + "redshift:Get*", + "route53:Get*", + "route53:List*", + "route53:Test*", + "route53domains:Check*", + "route53domains:Get*", + "route53domains:List*", + "route53domains:View*", + "s3:Get*", + "s3:List*", + "s3:Head*", + "sdb:Get*", + "sdb:List*", + "sdb:Select*", + "servicecatalog:List*", + "servicecatalog:Scan*", + "servicecatalog:Search*", + "servicecatalog:Describe*", + "ses:Get*", + "ses:List*", + "ses:Describe*", + "ses:Verify*", + "shield:Describe*", + "shield:List*", + "sns:Get*", + "sns:List*", + "sns:Check*", + "sqs:Get*", + "sqs:List*", + "sqs:Receive*", + "ssm:Describe*", + "ssm:Get*", + "ssm:List*", + "states:List*", + "states:Describe*", + "states:GetExecutionHistory", + "storagegateway:Describe*", + "storagegateway:List*", + "sts:Get*", + "swf:Count*", + "swf:Describe*", + "swf:Get*", + "swf:List*", + "tag:Get*", + "trustedadvisor:Describe*", + "waf:Get*", + "waf:List*", + "waf-regional:List*", + "waf-regional:Get*", + "workdocs:Describe*", + "workdocs:Get*", + "workdocs:CheckAlias", + "workmail:Describe*", + "workmail:Get*", + "workmail:List*", + "workmail:Search*", + "workspaces:Describe*", + "xray:BatchGet*", + "xray:Get*" + ], + "Effect": "Allow", + "Resource": "*" + } + ], + "Version": "2012-10-17" + }, + "IsAttachable": true, + "IsDefaultVersion": true, + "Path": "/", + "PolicyId": "ANPAILL3HVNFSB6DCOWYQ", + "PolicyName": "ReadOnlyAccess", + "UpdateDate": "2017-07-20T17:43:06+00:00", + "VersionId": "v29" + }, + "ResourceGroupsandTagEditorFullAccess": { + "Arn": "arn:aws:iam::aws:policy/ResourceGroupsandTagEditorFullAccess", + "AttachmentCount": 0, + "CreateDate": "2015-02-06T18:39:53+00:00", + "DefaultVersionId": "v1", + "Document": { + "Statement": [ + { + "Action": [ + "tag:getResources", + "tag:getTagKeys", + "tag:getTagValues", + "tag:addResourceTags", + "tag:removeResourceTags" + ], + "Effect": "Allow", + "Resource": "*" + } + ], + "Version": "2012-10-17" + }, + "IsAttachable": true, + "IsDefaultVersion": true, + "Path": "/", + "PolicyId": "ANPAJNOS54ZFXN4T2Y34A", + "PolicyName": "ResourceGroupsandTagEditorFullAccess", + "UpdateDate": "2015-02-06T18:39:53+00:00", + "VersionId": "v1" + }, + "ResourceGroupsandTagEditorReadOnlyAccess": { + "Arn": "arn:aws:iam::aws:policy/ResourceGroupsandTagEditorReadOnlyAccess", + "AttachmentCount": 0, + "CreateDate": "2015-02-06T18:39:54+00:00", + "DefaultVersionId": "v1", + "Document": { + "Statement": [ + { + "Action": [ + "tag:getResources", + "tag:getTagKeys", + "tag:getTagValues" + ], + "Effect": "Allow", + "Resource": "*" + } + ], + "Version": "2012-10-17" + }, + "IsAttachable": true, + "IsDefaultVersion": true, + "Path": "/", + "PolicyId": "ANPAJHXQTPI5I5JKAIU74", + "PolicyName": "ResourceGroupsandTagEditorReadOnlyAccess", + "UpdateDate": "2015-02-06T18:39:54+00:00", + "VersionId": "v1" + }, + "SecurityAudit": { + "Arn": "arn:aws:iam::aws:policy/SecurityAudit", + "AttachmentCount": 0, + "CreateDate": "2017-07-12T20:16:44+00:00", + "DefaultVersionId": "v12", + "Document": { + "Statement": [ + { + "Action": [ + "acm:ListCertificates", + "acm:DescribeCertificate", + "cloudformation:getStackPolicy", + "logs:describeLogGroups", + "logs:describeMetricFilters", + "autoscaling:Describe*", + "cloudformation:DescribeStack*", + "cloudformation:GetTemplate", + "cloudformation:ListStack*", + "cloudfront:Get*", + "cloudfront:List*", + "cloudtrail:DescribeTrails", + "cloudtrail:GetTrailStatus", + "cloudtrail:ListTags", + "cloudwatch:Describe*", + "codecommit:BatchGetRepositories", + "codecommit:GetBranch", + "codecommit:GetObjectIdentifier", + "codecommit:GetRepository", + "codecommit:List*", + "codedeploy:Batch*", + "codedeploy:Get*", + "codedeploy:List*", + "config:Deliver*", + "config:Describe*", + "config:Get*", + "datapipeline:DescribeObjects", + "datapipeline:DescribePipelines", + "datapipeline:EvaluateExpression", + "datapipeline:GetPipelineDefinition", + "datapipeline:ListPipelines", + "datapipeline:QueryObjects", + "datapipeline:ValidatePipelineDefinition", + "directconnect:Describe*", + "dynamodb:ListTables", + "ec2:Describe*", + "ecs:Describe*", + "ecs:List*", + "elasticache:Describe*", + "elasticbeanstalk:Describe*", + "elasticloadbalancing:Describe*", + "elasticmapreduce:DescribeJobFlows", + "elasticmapreduce:ListClusters", + "elasticmapreduce:ListInstances", + "es:ListDomainNames", + "es:Describe*", + "firehose:Describe*", + "firehose:List*", + "glacier:DescribeVault", + "glacier:GetVaultAccessPolicy", + "glacier:ListVaults", + "iam:GenerateCredentialReport", + "iam:Get*", + "iam:List*", + "kms:Describe*", + "kms:Get*", + "kms:List*", + "lambda:GetPolicy", + "lambda:ListFunctions", + "rds:Describe*", + "rds:DownloadDBLogFilePortion", + "rds:ListTagsForResource", + "redshift:Describe*", + "route53:GetChange", + "route53:GetCheckerIpRanges", + "route53:GetGeoLocation", + "route53:GetHealthCheck", + "route53:GetHealthCheckCount", + "route53:GetHealthCheckLastFailureReason", + "route53:GetHostedZone", + "route53:GetHostedZoneCount", + "route53:GetReusableDelegationSet", + "route53:ListGeoLocations", + "route53:ListHealthChecks", + "route53:ListHostedZones", + "route53:ListHostedZonesByName", + "route53:ListResourceRecordSets", + "route53:ListReusableDelegationSets", + "route53:ListTagsForResource", + "route53:ListTagsForResources", + "route53domains:GetDomainDetail", + "route53domains:GetOperationDetail", + "route53domains:ListDomains", + "route53domains:ListOperations", + "route53domains:ListTagsForDomain", + "s3:GetBucket*", + "s3:GetAccelerateConfiguration", + "s3:GetAnalyticsConfiguration", + "s3:GetInventoryConfiguration", + "s3:GetMetricsConfiguration", + "s3:GetReplicationConfiguration", + "s3:GetLifecycleConfiguration", + "s3:GetObjectAcl", + "s3:GetObjectVersionAcl", + "s3:ListAllMyBuckets", + "sdb:DomainMetadata", + "sdb:ListDomains", + "ses:GetIdentityDkimAttributes", + "ses:GetIdentityVerificationAttributes", + "ses:ListIdentities", + "sns:GetTopicAttributes", + "sns:ListSubscriptionsByTopic", + "sns:ListTopics", + "sqs:GetQueueAttributes", + "sqs:ListQueues", + "tag:GetResources", + "tag:GetTagKeys" + ], + "Effect": "Allow", + "Resource": "*" + } + ], + "Version": "2012-10-17" + }, + "IsAttachable": true, + "IsDefaultVersion": true, + "Path": "/", + "PolicyId": "ANPAIX2T3QCXHR2OGGCTO", + "PolicyName": "SecurityAudit", + "UpdateDate": "2017-07-12T20:16:44+00:00", + "VersionId": "v12" + }, + "ServerMigrationConnector": { + "Arn": "arn:aws:iam::aws:policy/ServerMigrationConnector", + "AttachmentCount": 0, + "CreateDate": "2016-10-24T21:45:56+00:00", + "DefaultVersionId": "v1", + "Document": { + "Statement": [ + { + "Action": "iam:GetUser", + "Effect": "Allow", + "Resource": "*" + }, + { + "Action": [ + "sms:SendMessage", + "sms:GetMessages" + ], + "Effect": "Allow", + "Resource": "*" + }, + { + "Action": [ + "s3:CreateBucket", + "s3:DeleteBucket", + "s3:DeleteObject", + "s3:GetBucketLocation", + "s3:GetObject", + "s3:ListBucket", + "s3:PutObject", + "s3:PutObjectAcl", + "s3:PutLifecycleConfiguration", + "s3:AbortMultipartUpload", + "s3:ListBucketMultipartUploads", + "s3:ListMultipartUploadParts" + ], + "Effect": "Allow", + "Resource": [ + "arn:aws:s3:::sms-b-*", + "arn:aws:s3:::import-to-ec2-*", + "arn:aws:s3:::server-migration-service-upgrade", + "arn:aws:s3:::server-migration-service-upgrade/*", + "arn:aws:s3:::connector-platform-upgrade-info/*", + "arn:aws:s3:::connector-platform-upgrade-info", + "arn:aws:s3:::connector-platform-upgrade-bundles/*", + "arn:aws:s3:::connector-platform-upgrade-bundles", + "arn:aws:s3:::connector-platform-release-notes/*", + "arn:aws:s3:::connector-platform-release-notes" + ] + }, + { + "Action": "awsconnector:*", + "Effect": "Allow", + "Resource": "*" + }, + { + "Action": [ + "SNS:Publish" + ], + "Effect": "Allow", + "Resource": "arn:aws:sns:*:*:metrics-sns-topic-for-*" + } + ], + "Version": "2012-10-17" + }, + "IsAttachable": true, + "IsDefaultVersion": true, + "Path": "/", + "PolicyId": "ANPAJKZRWXIPK5HSG3QDQ", + "PolicyName": "ServerMigrationConnector", + "UpdateDate": "2016-10-24T21:45:56+00:00", + "VersionId": "v1" + }, + "ServerMigrationServiceRole": { + "Arn": "arn:aws:iam::aws:policy/service-role/ServerMigrationServiceRole", + "AttachmentCount": 0, + "CreateDate": "2017-06-16T18:02:04+00:00", + "DefaultVersionId": "v2", + "Document": { + "Statement": [ + { + "Action": [ + "ec2:ModifySnapshotAttribute", + "ec2:CopySnapshot", + "ec2:CopyImage", + "ec2:Describe*", + "ec2:DeleteSnapshot", + "ec2:DeregisterImage", + "ec2:CreateTags", + "ec2:DeleteTags" + ], + "Effect": "Allow", + "Resource": "*" + } + ], + "Version": "2012-10-17" + }, + "IsAttachable": true, + "IsDefaultVersion": true, + "Path": "/service-role/", + "PolicyId": "ANPAJMBH3M6BO63XFW2D4", + "PolicyName": "ServerMigrationServiceRole", + "UpdateDate": "2017-06-16T18:02:04+00:00", + "VersionId": "v2" + }, + "ServiceCatalogAdminFullAccess": { + "Arn": "arn:aws:iam::aws:policy/ServiceCatalogAdminFullAccess", + "AttachmentCount": 0, + "CreateDate": "2016-11-11T18:40:24+00:00", + "DefaultVersionId": "v2", + "Document": { + "Statement": [ + { + "Action": [ + "catalog-admin:*", + "catalog-user:*", + "cloudformation:CreateStack", + "cloudformation:CreateUploadBucket", + "cloudformation:DeleteStack", + "cloudformation:DescribeStackEvents", + "cloudformation:DescribeStacks", + "cloudformation:GetTemplateSummary", + "cloudformation:SetStackPolicy", + "cloudformation:ValidateTemplate", + "cloudformation:UpdateStack", + "iam:GetGroup", + "iam:GetRole", + "iam:GetUser", + "iam:ListGroups", + "iam:ListRoles", + "iam:ListUsers", + "iam:PassRole", + "s3:CreateBucket", + "s3:GetObject", + "s3:PutObject", + "servicecatalog:*" + ], + "Effect": "Allow", + "Resource": "*" + } + ], + "Version": "2012-10-17" + }, + "IsAttachable": true, + "IsDefaultVersion": true, + "Path": "/", + "PolicyId": "ANPAIKTX42IAS75B7B7BY", + "PolicyName": "ServiceCatalogAdminFullAccess", + "UpdateDate": "2016-11-11T18:40:24+00:00", + "VersionId": "v2" + }, + "ServiceCatalogAdminReadOnlyAccess": { + "Arn": "arn:aws:iam::aws:policy/ServiceCatalogAdminReadOnlyAccess", + "AttachmentCount": 0, + "CreateDate": "2017-08-08T18:57:36+00:00", + "DefaultVersionId": "v5", + "Document": { + "Statement": [ + { + "Action": [ + "catalog-admin:DescribeConstraints", + "catalog-admin:DescribeListingForProduct", + "catalog-admin:DescribeListings", + "catalog-admin:DescribePortfolios", + "catalog-admin:DescribeProductVersions", + "catalog-admin:GetPortfolioCount", + "catalog-admin:GetPortfolios", + "catalog-admin:GetProductCounts", + "catalog-admin:ListAllPortfolioConstraints", + "catalog-admin:ListPortfolioConstraints", + "catalog-admin:ListPortfolios", + "catalog-admin:ListPrincipalConstraints", + "catalog-admin:ListProductConstraints", + "catalog-admin:ListResourceUsers", + "catalog-admin:ListTagsForResource", + "catalog-admin:SearchListings", + "catalog-user:*", + "cloudformation:DescribeStackEvents", + "cloudformation:DescribeStacks", + "cloudformation:GetTemplateSummary", + "iam:GetGroup", + "iam:GetRole", + "iam:GetUser", + "iam:ListGroups", + "iam:ListRoles", + "iam:ListUsers", + "s3:GetObject", + "servicecatalog:DescribeTagOption", + "servicecatalog:GetTagOptionMigrationStatus", + "servicecatalog:ListResourcesForTagOption", + "servicecatalog:ListTagOptions", + "servicecatalog:AccountLevelDescribeRecord", + "servicecatalog:AccountLevelListRecordHistory", + "servicecatalog:AccountLevelScanProvisionedProducts", + "servicecatalog:DescribeProduct", + "servicecatalog:DescribeProductView", + "servicecatalog:DescribeProvisioningParameters", + "servicecatalog:DescribeProvisionedProduct", + "servicecatalog:DescribeRecord", + "servicecatalog:ListLaunchPaths", + "servicecatalog:ListRecordHistory", + "servicecatalog:ScanProvisionedProducts", + "servicecatalog:SearchProducts", + "servicecatalog:DescribeConstraint", + "servicecatalog:DescribeProductAsAdmin", + "servicecatalog:DescribePortfolio", + "servicecatalog:DescribeProvisioningArtifact", + "servicecatalog:ListAcceptedPortfolioShares", + "servicecatalog:ListConstraintsForPortfolio", + "servicecatalog:ListPortfolioAccess", + "servicecatalog:ListPortfolios", + "servicecatalog:ListPortfoliosForProduct", + "servicecatalog:ListPrincipalsForPortfolio", + "servicecatalog:ListProvisioningArtifacts", + "servicecatalog:SearchProductsAsAdmin" + ], + "Effect": "Allow", + "Resource": "*" + } + ], + "Version": "2012-10-17" + }, + "IsAttachable": true, + "IsDefaultVersion": true, + "Path": "/", + "PolicyId": "ANPAJ7XOUSS75M4LIPKO4", + "PolicyName": "ServiceCatalogAdminReadOnlyAccess", + "UpdateDate": "2017-08-08T18:57:36+00:00", + "VersionId": "v5" + }, + "ServiceCatalogEndUserAccess": { + "Arn": "arn:aws:iam::aws:policy/ServiceCatalogEndUserAccess", + "AttachmentCount": 0, + "CreateDate": "2017-08-08T18:58:57+00:00", + "DefaultVersionId": "v4", + "Document": { + "Statement": [ + { + "Action": [ + "catalog-user:*", + "s3:GetObject", + "servicecatalog:DescribeProduct", + "servicecatalog:DescribeProductView", + "servicecatalog:DescribeProvisioningParameters", + "servicecatalog:ListLaunchPaths", + "servicecatalog:SearchProducts" + ], + "Effect": "Allow", + "Resource": "*" + }, + { + "Action": [ + "servicecatalog:ListRecordHistory", + "servicecatalog:DescribeProvisionedProduct", + "servicecatalog:DescribeRecord", + "servicecatalog:ScanProvisionedProducts" + ], + "Condition": { + "StringEquals": { + "servicecatalog:userLevel": "self" + } + }, + "Effect": "Allow", + "Resource": "*" + } + ], + "Version": "2012-10-17" + }, + "IsAttachable": true, + "IsDefaultVersion": true, + "Path": "/", + "PolicyId": "ANPAJ56OMCO72RI4J5FSA", + "PolicyName": "ServiceCatalogEndUserAccess", + "UpdateDate": "2017-08-08T18:58:57+00:00", + "VersionId": "v4" + }, + "ServiceCatalogEndUserFullAccess": { + "Arn": "arn:aws:iam::aws:policy/ServiceCatalogEndUserFullAccess", + "AttachmentCount": 0, + "CreateDate": "2017-08-08T18:58:54+00:00", + "DefaultVersionId": "v4", + "Document": { + "Statement": [ + { + "Action": [ + "catalog-user:*", + "cloudformation:CreateStack", + "cloudformation:DeleteStack", + "cloudformation:DescribeStackEvents", + "cloudformation:DescribeStacks", + "cloudformation:GetTemplateSummary", + "cloudformation:SetStackPolicy", + "cloudformation:ValidateTemplate", + "cloudformation:UpdateStack", + "servicecatalog:DescribeProduct", + "servicecatalog:DescribeProductView", + "servicecatalog:DescribeProvisioningParameters", + "servicecatalog:ListLaunchPaths", + "servicecatalog:ProvisionProduct", + "servicecatalog:SearchProducts", + "s3:GetObject" + ], + "Effect": "Allow", + "Resource": "*" + }, + { + "Action": [ + "servicecatalog:DescribeProvisionedProduct", + "servicecatalog:DescribeRecord", + "servicecatalog:ListRecordHistory", + "servicecatalog:ScanProvisionedProducts", + "servicecatalog:TerminateProvisionedProduct", + "servicecatalog:UpdateProvisionedProduct" + ], + "Condition": { + "StringEquals": { + "servicecatalog:userLevel": "self" + } + }, + "Effect": "Allow", + "Resource": "*" + } + ], + "Version": "2012-10-17" + }, + "IsAttachable": true, + "IsDefaultVersion": true, + "Path": "/", + "PolicyId": "ANPAJIW7AFFOONVKW75KU", + "PolicyName": "ServiceCatalogEndUserFullAccess", + "UpdateDate": "2017-08-08T18:58:54+00:00", + "VersionId": "v4" + }, + "SimpleWorkflowFullAccess": { + "Arn": "arn:aws:iam::aws:policy/SimpleWorkflowFullAccess", + "AttachmentCount": 0, + "CreateDate": "2015-02-06T18:41:04+00:00", + "DefaultVersionId": "v1", + "Document": { + "Statement": [ + { + "Action": [ + "swf:*" + ], + "Effect": "Allow", + "Resource": "*" + } + ], + "Version": "2012-10-17" + }, + "IsAttachable": true, + "IsDefaultVersion": true, + "Path": "/", + "PolicyId": "ANPAIFE3AV6VE7EANYBVM", + "PolicyName": "SimpleWorkflowFullAccess", + "UpdateDate": "2015-02-06T18:41:04+00:00", + "VersionId": "v1" + }, + "SupportUser": { + "Arn": "arn:aws:iam::aws:policy/job-function/SupportUser", + "AttachmentCount": 0, + "CreateDate": "2017-05-17T23:11:51+00:00", + "DefaultVersionId": "v2", + "Document": { + "Statement": [ + { + "Action": [ + "support:*", + "acm:DescribeCertificate", + "acm:GetCertificate", + "acm:List*", + "apigateway:GET", + "appstream:Get*", + "autoscaling:Describe*", + "aws-marketplace:ViewSubscriptions", + "cloudformation:Describe*", + "cloudformation:Get*", + "cloudformation:List*", + "cloudformation:EstimateTemplateCost", + "cloudfront:Get*", + "cloudfront:List*", + "cloudsearch:Describe*", + "cloudsearch:List*", + "cloudtrail:DescribeTrails", + "cloudtrail:GetTrailStatus", + "cloudtrail:LookupEvents", + "cloudtrail:ListTags", + "cloudtrail:ListPublicKeys", + "cloudwatch:Describe*", + "cloudwatch:Get*", + "cloudwatch:List*", + "codecommit:BatchGetRepositories", + "codecommit:Get*", + "codecommit:List*", + "codedeploy:Batch*", + "codedeploy:Get*", + "codedeploy:List*", + "codepipeline:AcknowledgeJob", + "codepipeline:AcknowledgeThirdPartyJob", + "codepipeline:ListActionTypes", + "codepipeline:ListPipelines", + "codepipeline:PollForJobs", + "codepipeline:PollForThirdPartyJobs", + "codepipeline:GetPipelineState", + "codepipeline:GetPipeline", + "cognito-identity:List*", + "cognito-identity:LookupDeveloperIdentity", + "cognito-identity:Describe*", + "cognito-idp:Describe*", + "cognito-sync:Describe*", + "cognito-sync:GetBulkPublishDetails", + "cognito-sync:GetCognitoEvents", + "cognito-sync:GetIdentityPoolConfiguration", + "cognito-sync:List*", + "config:DescribeConfigurationRecorders", + "config:DescribeConfigurationRecorderStatus", + "config:DescribeConfigRuleEvaluationStatus", + "config:DescribeConfigRules", + "config:DescribeDeliveryChannels", + "config:DescribeDeliveryChannelStatus", + "config:GetResourceConfigHistory", + "config:ListDiscoveredResources", + "datapipeline:DescribeObjects", + "datapipeline:DescribePipelines", + "datapipeline:GetPipelineDefinition", + "datapipeline:ListPipelines", + "datapipeline:QueryObjects", + "datapipeline:ReportTaskProgress", + "datapipeline:ReportTaskRunnerHeartbeat", + "devicefarm:List*", + "devicefarm:Get*", + "directconnect:Describe*", + "discovery:Describe*", + "discovery:ListConfigurations", + "dms:Describe*", + "dms:List*", + "ds:DescribeDirectories", + "ds:DescribeSnapshots", + "ds:GetDirectoryLimits", + "ds:GetSnapshotLimits", + "ds:ListAuthorizedApplications", + "dynamodb:DescribeLimits", + "dynamodb:DescribeTable", + "dynamodb:ListTables", + "ec2:Describe*", + "ec2:DescribeHosts", + "ec2:describeIdentityIdFormat", + "ec2:DescribeIdFormat", + "ec2:DescribeInstanceAttribute", + "ec2:DescribeNatGateways", + "ec2:DescribeReservedInstancesModifications", + "ec2:DescribeTags", + "ec2:GetFlowLogsCount", + "ecr:GetRepositoryPolicy", + "ecr:BatchCheckLayerAvailability", + "ecr:DescribeRepositories", + "ecr:ListImages", + "ecs:Describe*", + "ecs:List*", + "elasticache:Describe*", + "elasticache:List*", + "elasticbeanstalk:Check*", + "elasticbeanstalk:Describe*", + "elasticbeanstalk:List*", + "elasticbeanstalk:RequestEnvironmentInfo", + "elasticbeanstalk:RetrieveEnvironmentInfo", + "elasticbeanstalk:ValidateConfigurationSettings", + "elasticfilesystem:Describe*", + "elasticloadbalancing:Describe*", + "elasticmapreduce:Describe*", + "elasticmapreduce:List*", + "elastictranscoder:List*", + "elastictranscoder:ReadJob", + "elasticfilesystem:DescribeFileSystems", + "es:Describe*", + "es:List*", + "es:ESHttpGet", + "es:ESHttpHead", + "events:DescribeRule", + "events:List*", + "events:TestEventPattern", + "firehose:Describe*", + "firehose:List*", + "gamelift:List*", + "gamelift:Describe*", + "glacier:ListVaults", + "glacier:DescribeVault", + "glacier:DescribeJob", + "glacier:Get*", + "glacier:List*", + "iam:GenerateCredentialReport", + "iam:GenerateServiceLastAccessedDetails", + "iam:Get*", + "iam:List*", + "importexport:GetStatus", + "importexport:ListJobs", + "importexport:GetJobDetail", + "inspector:Describe*", + "inspector:List*", + "inspector:GetAssessmentTelemetry", + "inspector:LocalizeText", + "iot:Describe*", + "iot:Get*", + "iot:List*", + "kinesisanalytics:DescribeApplication", + "kinesisanalytics:DiscoverInputSchema", + "kinesisanalytics:GetApplicationState", + "kinesisanalytics:ListApplications", + "kinesis:Describe*", + "kinesis:Get*", + "kinesis:List*", + "kms:Describe*", + "kms:Get*", + "kms:List*", + "lambda:List*", + "lambda:Get*", + "logs:Describe*", + "logs:TestMetricFilter", + "machinelearning:Describe*", + "machinelearning:Get*", + "mobilehub:GetProject", + "mobilehub:List*", + "mobilehub:ValidateProject", + "mobilehub:VerifyServiceRole", + "opsworks:Describe*", + "rds:Describe*", + "rds:ListTagsForResource", + "redshift:Describe*", + "route53:Get*", + "route53:List*", + "route53domains:CheckDomainAvailability", + "route53domains:GetDomainDetail", + "route53domains:GetOperationDetail", + "route53domains:List*", + "s3:List*", + "sdb:GetAttributes", + "sdb:List*", + "sdb:Select*", + "servicecatalog:SearchProducts", + "servicecatalog:DescribeProduct", + "servicecatalog:DescribeProductView", + "servicecatalog:ListLaunchPaths", + "servicecatalog:DescribeProvisioningParameters", + "servicecatalog:ListRecordHistory", + "servicecatalog:DescribeRecord", + "servicecatalog:ScanProvisionedProducts", + "ses:Get*", + "ses:List*", + "sns:Get*", + "sns:List*", + "sqs:GetQueueAttributes", + "sqs:GetQueueUrl", + "sqs:ListQueues", + "sqs:ReceiveMessage", + "ssm:List*", + "ssm:Describe*", + "storagegateway:Describe*", + "storagegateway:List*", + "swf:Count*", + "swf:Describe*", + "swf:Get*", + "swf:List*", + "waf:Get*", + "waf:List*", + "workspaces:Describe*", + "workdocs:Describe*", + "workmail:Describe*", + "workmail:Get*", + "workspaces:Describe*" + ], + "Effect": "Allow", + "Resource": "*" + } + ], + "Version": "2012-10-17" + }, + "IsAttachable": true, + "IsDefaultVersion": true, + "Path": "/job-function/", + "PolicyId": "ANPAI3V4GSSN5SJY3P2RO", + "PolicyName": "SupportUser", + "UpdateDate": "2017-05-17T23:11:51+00:00", + "VersionId": "v2" + }, + "SystemAdministrator": { + "Arn": "arn:aws:iam::aws:policy/job-function/SystemAdministrator", + "AttachmentCount": 0, + "CreateDate": "2017-03-24T17:45:43+00:00", + "DefaultVersionId": "v2", + "Document": { + "Statement": [ + { + "Action": [ + "acm:Describe*", + "acm:Get*", + "acm:List*", + "acm:Request*", + "acm:Resend*", + "autoscaling:*", + "cloudtrail:DescribeTrails", + "cloudtrail:GetTrailStatus", + "cloudtrail:ListPublicKeys", + "cloudtrail:ListTags", + "cloudtrail:LookupEvents", + "cloudtrail:StartLogging", + "cloudtrail:StopLogging", + "cloudwatch:*", + "codecommit:BatchGetRepositories", + "codecommit:CreateBranch", + "codecommit:CreateRepository", + "codecommit:Get*", + "codecommit:GitPull", + "codecommit:GitPush", + "codecommit:List*", + "codecommit:Put*", + "codecommit:Test*", + "codecommit:Update*", + "codedeploy:*", + "codepipeline:*", + "config:*", + "ds:*", + "ec2:Allocate*", + "ec2:AssignPrivateIpAddresses*", + "ec2:Associate*", + "ec2:Allocate*", + "ec2:AttachInternetGateway", + "ec2:AttachNetworkInterface", + "ec2:AttachVpnGateway", + "ec2:Bundle*", + "ec2:Cancel*", + "ec2:Copy*", + "ec2:CreateCustomerGateway", + "ec2:CreateDhcpOptions", + "ec2:CreateFlowLogs", + "ec2:CreateImage", + "ec2:CreateInstanceExportTask", + "ec2:CreateInternetGateway", + "ec2:CreateKeyPair", + "ec2:CreateNatGateway", + "ec2:CreateNetworkInterface", + "ec2:CreatePlacementGroup", + "ec2:CreateReservedInstancesListing", + "ec2:CreateRoute", + "ec2:CreateRouteTable", + "ec2:CreateSecurityGroup", + "ec2:CreateSnapshot", + "ec2:CreateSpotDatafeedSubscription", + "ec2:CreateSubnet", + "ec2:CreateTags", + "ec2:CreateVolume", + "ec2:CreateVpc", + "ec2:CreateVpcEndpoint", + "ec2:CreateVpnConnection", + "ec2:CreateVpnConnectionRoute", + "ec2:CreateVpnGateway", + "ec2:DeleteFlowLogs", + "ec2:DeleteKeyPair", + "ec2:DeleteNatGateway", + "ec2:DeleteNetworkInterface", + "ec2:DeletePlacementGroup", + "ec2:DeleteSnapshot", + "ec2:DeleteSpotDatafeedSubscription", + "ec2:DeleteSubnet", + "ec2:DeleteTags", + "ec2:DeleteVpc", + "ec2:DeleteVpcEndpoints", + "ec2:DeleteVpnConnection", + "ec2:DeleteVpnConnectionRoute", + "ec2:DeleteVpnGateway", + "ec2:DeregisterImage", + "ec2:Describe*", + "ec2:DetachInternetGateway", + "ec2:DetachNetworkInterface", + "ec2:DetachVpnGateway", + "ec2:DisableVgwRoutePropagation", + "ec2:DisableVpcClassicLinkDnsSupport", + "ec2:DisassociateAddress", + "ec2:DisassociateRouteTable", + "ec2:EnableVgwRoutePropagation", + "ec2:EnableVolumeIO", + "ec2:EnableVpcClassicLinkDnsSupport", + "ec2:GetConsoleOutput", + "ec2:GetHostReservationPurchasePreview", + "ec2:GetPasswordData", + "ec2:Import*", + "ec2:Modify*", + "ec2:MonitorInstances", + "ec2:MoveAddressToVpc", + "ec2:Purchase*", + "ec2:RegisterImage", + "ec2:Release*", + "ec2:Replace*", + "ec2:ReportInstanceStatus", + "ec2:Request*", + "ec2:Reset*", + "ec2:RestoreAddressToClassic", + "ec2:RunScheduledInstances", + "ec2:UnassignPrivateIpAddresses", + "ec2:UnmonitorInstances", + "elasticloadbalancing:*", + "events:*", + "iam:GetAccount*", + "iam:GetContextKeys*", + "iam:GetCredentialReport", + "iam:ListAccountAliases", + "iam:ListGroups", + "iam:ListOpenIDConnectProviders", + "iam:ListPolicies", + "iam:ListPoliciesGrantingServiceAccess", + "iam:ListRoles", + "iam:ListSAMLProviders", + "iam:ListServerCertificates", + "iam:Simulate*", + "iam:UpdateServerCertificate", + "iam:UpdateSigningCertificate", + "kinesis:ListStreams", + "kinesis:PutRecord", + "kms:CreateAlias", + "kms:CreateKey", + "kms:DeleteAlias", + "kms:Describe*", + "kms:GenerateRandom", + "kms:Get*", + "kms:List*", + "kms:Encrypt", + "kms:ReEncrypt*", + "lambda:Create*", + "lambda:Delete*", + "lambda:Get*", + "lambda:InvokeFunction", + "lambda:List*", + "lambda:PublishVersion", + "lambda:Update*", + "logs:*", + "rds:Describe*", + "rds:ListTagsForResource", + "route53:*", + "route53domains:*", + "ses:*", + "sns:*", + "sqs:*", + "trustedadvisor:*" + ], + "Effect": "Allow", + "Resource": "*" + }, + { + "Action": [ + "ec2:AcceptVpcPeeringConnection", + "ec2:AttachClassicLinkVpc", + "ec2:AttachVolume", + "ec2:AuthorizeSecurityGroupEgress", + "ec2:AuthorizeSecurityGroupIngress", + "ec2:CreateVpcPeeringConnection", + "ec2:DeleteCustomerGateway", + "ec2:DeleteDhcpOptions", + "ec2:DeleteInternetGateway", + "ec2:DeleteNetworkAcl*", + "ec2:DeleteRoute", + "ec2:DeleteRouteTable", + "ec2:DeleteSecurityGroup", + "ec2:DeleteVolume", + "ec2:DeleteVpcPeeringConnection", + "ec2:DetachClassicLinkVpc", + "ec2:DetachVolume", + "ec2:DisableVpcClassicLink", + "ec2:EnableVpcClassicLink", + "ec2:GetConsoleScreenshot", + "ec2:RebootInstances", + "ec2:RejectVpcPeeringConnection", + "ec2:RevokeSecurityGroupEgress", + "ec2:RevokeSecurityGroupIngress", + "ec2:RunInstances", + "ec2:StartInstances", + "ec2:StopInstances", + "ec2:TerminateInstances" + ], + "Effect": "Allow", + "Resource": [ + "*" + ] + }, + { + "Action": "s3:*", + "Effect": "Allow", + "Resource": [ + "*" + ] + }, + { + "Action": [ + "iam:GetAccessKeyLastUsed", + "iam:GetGroup*", + "iam:GetInstanceProfile", + "iam:GetLoginProfile", + "iam:GetOpenIDConnectProvider", + "iam:GetPolicy*", + "iam:GetRole*", + "iam:GetSAMLProvider", + "iam:GetSSHPublicKey", + "iam:GetServerCertificate", + "iam:GetServiceLastAccessed*", + "iam:GetUser*", + "iam:ListAccessKeys", + "iam:ListAttached*", + "iam:ListEntitiesForPolicy", + "iam:ListGroupPolicies", + "iam:ListGroupsForUser", + "iam:ListInstanceProfiles*", + "iam:ListMFADevices", + "iam:ListPolicyVersions", + "iam:ListRolePolicies", + "iam:ListSSHPublicKeys", + "iam:ListSigningCertificates", + "iam:ListUserPolicies", + "iam:Upload*" + ], + "Effect": "Allow", + "Resource": [ + "*" + ] + }, + { + "Action": [ + "iam:GetRole", + "iam:ListRoles", + "iam:PassRole" + ], + "Effect": "Allow", + "Resource": [ + "arn:aws:iam::*:role/rds-monitoring-role", + "arn:aws:iam::*:role/ec2-sysadmin-*", + "arn:aws:iam::*:role/ecr-sysadmin-*", + "arn:aws:iam::*:role/lamdba-sysadmin-*" + ] + } + ], + "Version": "2012-10-17" + }, + "IsAttachable": true, + "IsDefaultVersion": true, + "Path": "/job-function/", + "PolicyId": "ANPAITJPEZXCYCBXANDSW", + "PolicyName": "SystemAdministrator", + "UpdateDate": "2017-03-24T17:45:43+00:00", + "VersionId": "v2" + }, + "VMImportExportRoleForAWSConnector": { + "Arn": "arn:aws:iam::aws:policy/service-role/VMImportExportRoleForAWSConnector", + "AttachmentCount": 0, + "CreateDate": "2015-09-03T20:48:59+00:00", + "DefaultVersionId": "v1", + "Document": { + "Statement": [ + { + "Action": [ + "s3:ListBucket", + "s3:GetBucketLocation", + "s3:GetObject" + ], + "Effect": "Allow", + "Resource": [ + "arn:aws:s3:::import-to-ec2-*" + ] + }, + { + "Action": [ + "ec2:ModifySnapshotAttribute", + "ec2:CopySnapshot", + "ec2:RegisterImage", + "ec2:Describe*" + ], + "Effect": "Allow", + "Resource": "*" + } + ], + "Version": "2012-10-17" + }, + "IsAttachable": true, + "IsDefaultVersion": true, + "Path": "/service-role/", + "PolicyId": "ANPAJFLQOOJ6F5XNX4LAW", + "PolicyName": "VMImportExportRoleForAWSConnector", + "UpdateDate": "2015-09-03T20:48:59+00:00", + "VersionId": "v1" + }, + "ViewOnlyAccess": { + "Arn": "arn:aws:iam::aws:policy/job-function/ViewOnlyAccess", + "AttachmentCount": 0, + "CreateDate": "2017-06-26T22:35:31+00:00", + "DefaultVersionId": "v3", + "Document": { + "Statement": [ + { + "Action": [ + "acm:ListCertificates", + "athena:List*", + "aws-marketplace:ViewSubscriptions", + "autoscaling:Describe*", + "batch:ListJobs", + "clouddirectory:ListAppliedSchemaArns", + "clouddirectory:ListDevelopmentSchemaArns", + "clouddirectory:ListDirectories", + "clouddirectory:ListPublishedSchemaArns", + "cloudformation:List*", + "cloudformation:DescribeStacks", + "cloudfront:List*", + "cloudhsm:ListAvailableZones", + "cloudhsm:ListLunaClients", + "cloudhsm:ListHapgs", + "cloudhsm:ListHsms", + "cloudsearch:List*", + "cloudsearch:DescribeDomains", + "cloudtrail:DescribeTrails", + "cloudtrail:LookupEvents", + "cloudwatch:List*", + "cloudwatch:GetMetricData", + "codebuild:ListBuilds*", + "codebuild:ListProjects", + "codecommit:List*", + "codedeploy:List*", + "codedeploy:Get*", + "codepipeline:ListPipelines", + "codestar:List*", + "codestar:Verify*", + "cognito-idp:List*", + "cognito-identity:ListIdentities", + "cognito-identity:ListIdentityPools", + "cognito-sync:ListDatasets", + "connect:List*", + "config:List*", + "config:Describe*", + "datapipeline:ListPipelines", + "datapipeline:DescribePipelines", + "datapipeline:GetAccountLimits", + "devicefarm:List*", + "directconnect:Describe*", + "discovery:List*", + "dms:List*", + "ds:DescribeDirectories", + "dynamodb:ListTables", + "ec2:DescribeAccountAttributes", + "ec2:DescribeAddresses", + "ec2:DescribeAvailabilityZones", + "ec2:DescribeBundleTasks", + "ec2:DescribeClassicLinkInstances", + "ec2:DescribeConversionTasks", + "ec2:DescribeCustomerGateways", + "ec2:DescribeDhcpOptions", + "ec2:DescribeExportTasks", + "ec2:DescribeFlowLogs", + "ec2:DescribeHost*", + "ec2:DescribeIdentityIdFormat", + "ec2:DescribeIdFormat", + "ec2:DescribeImage*", + "ec2:DescribeImport*", + "ec2:DescribeInstance*", + "ec2:DescribeInternetGateways", + "ec2:DescribeKeyPairs", + "ec2:DescribeMovingAddresses", + "ec2:DescribeNatGateways", + "ec2:DescribeNetwork*", + "ec2:DescribePlacementGroups", + "ec2:DescribePrefixLists", + "ec2:DescribeRegions", + "ec2:DescribeReserved*", + "ec2:DescribeRouteTables", + "ec2:DescribeSecurityGroups", + "ec2:DescribeSnapshot*", + "ec2:DescribeSpot*", + "ec2:DescribeSubnets", + "ec2:DescribeVolume*", + "ec2:DescribeVpc*", + "ec2:DescribeVpnGateways", + "ecr:DescribeRepositories", + "ecr:ListImages", + "ecs:List*", + "elasticache:Describe*", + "elasticbeanstalk:DescribeApplicationVersions", + "elasticbeanstalk:DescribeApplications", + "elasticbeanstalk:DescribeEnvironments", + "elasticbeanstalk:ListAvailableSolutionStacks", + "elasticloadbalancing:DescribeListeners", + "elasticloadbalancing:DescribeLoadBalancers", + "elasticloadbalancing:DescribeTargetGroups", + "elasticloadbalancing:DescribeTargetHealth", + "elasticfilesystem:DescribeFileSystems", + "elasticmapreduce:List*", + "elastictranscoder:List*", + "es:DescribeElasticsearchDomain", + "es:DescribeElasticsearchDomains", + "es:ListDomainNames", + "events:ListRuleNamesByTarget", + "events:ListRules", + "events:ListTargetsByRule", + "firehose:List*", + "firehose:DescribeDeliveryStream", + "gamelift:List*", + "glacier:List*", + "iam:List*", + "iam:GetAccountSummary", + "iam:GetLoginProfile", + "importexport:ListJobs", + "inspector:List*", + "iot:List*", + "kinesis:ListStreams", + "kinesisanalytics:ListApplications", + "kms:ListKeys", + "lambda:List*", + "lex:GetBotAliases", + "lex:GetBotChannelAssociations", + "lex:GetBots", + "lex:GetBotVersions", + "lex:GetIntents", + "lex:GetIntentVersions", + "lex:GetSlotTypes", + "lex:GetSlotTypeVersions", + "lex:GetUtterancesView", + "lightsail:GetBlueprints", + "lightsail:GetBundles", + "lightsail:GetInstances", + "lightsail:GetInstanceSnapshots", + "lightsail:GetKeyPair", + "lightsail:GetRegions", + "lightsail:GetStaticIps", + "lightsail:IsVpcPeered", + "logs:Describe*", + "machinelearning:Describe*", + "mobilehub:ListAvailableFeatures", + "mobilehub:ListAvailableRegions", + "mobilehub:ListProjects", + "opsworks:Describe*", + "opsworks-cm:Describe*", + "organizations:List*", + "mobiletargeting:GetApplicationSettings", + "mobiletargeting:GetCampaigns", + "mobiletargeting:GetImportJobs", + "mobiletargeting:GetSegments", + "polly:Describe*", + "polly:List*", + "rds:Describe*", + "redshift:DescribeClusters", + "redshift:DescribeEvents", + "redshift:ViewQueriesInConsole", + "route53:List*", + "route53:Get*", + "route53domains:List*", + "s3:ListAllMyBuckets", + "s3:ListBucket", + "sdb:List*", + "servicecatalog:List*", + "ses:List*", + "shield:List*", + "states:ListActivities", + "states:ListStateMachines", + "sns:List*", + "sqs:ListQueues", + "ssm:ListAssociations", + "ssm:ListDocuments", + "storagegateway:ListGateways", + "storagegateway:ListLocalDisks", + "storagegateway:ListVolumeRecoveryPoints", + "storagegateway:ListVolumes", + "swf:List*", + "trustedadvisor:Describe*", + "waf:List*", + "waf-regional:List*", + "workdocs:DescribeAvailableDirectories", + "workdocs:DescribeInstances", + "workmail:Describe*", + "workspaces:Describe*" + ], + "Effect": "Allow", + "Resource": "*" + } + ], + "Version": "2012-10-17" + }, + "IsAttachable": true, + "IsDefaultVersion": true, + "Path": "/job-function/", + "PolicyId": "ANPAID22R6XPJATWOFDK6", + "PolicyName": "ViewOnlyAccess", + "UpdateDate": "2017-06-26T22:35:31+00:00", + "VersionId": "v3" + } +}""" \ No newline at end of file diff --git a/moto/iam/models.py b/moto/iam/models.py index 0005ec0a7..86f8b5aba 100644 --- a/moto/iam/models.py +++ b/moto/iam/models.py @@ -1,11 +1,13 @@ from __future__ import unicode_literals import base64 from datetime import datetime +import json import pytz from moto.core import BaseBackend, BaseModel from moto.core.utils import iso_8601_datetime_without_milliseconds +from .aws_managed_policies import aws_managed_policies_data from .exceptions import IAMNotFoundException, IAMConflictException, IAMReportNotPresentException from .utils import random_access_key, random_alphanumeric, random_resource_id, random_policy_id @@ -92,6 +94,18 @@ class ManagedPolicy(Policy): class AWSManagedPolicy(ManagedPolicy): """AWS-managed policy.""" + @classmethod + def from_data(cls, name, data): + return cls(name, + default_version_id=data.get('DefaultVersionId'), + path=data.get('Path'), + document=data.get('Document')) + +# AWS defines some of its own managed policies and we periodically +# import them via `make aws_managed_policies` +aws_managed_policies = [ + AWSManagedPolicy.from_data(name, d) for name, d + in json.loads(aws_managed_policies_data).items()] class InlinePolicy(Policy): """TODO: is this needed?""" @@ -388,115 +402,6 @@ class User(BaseModel): ) -# predefine AWS managed policies -aws_managed_policies = [ - AWSManagedPolicy( - 'AmazonElasticMapReduceRole', - default_version_id='v6', - path='/service-role/', - document={ - "Version": "2012-10-17", - "Statement": [{ - "Effect": "Allow", - "Resource": "*", - "Action": [ - "ec2:AuthorizeSecurityGroupEgress", - "ec2:AuthorizeSecurityGroupIngress", - "ec2:CancelSpotInstanceRequests", - "ec2:CreateNetworkInterface", - "ec2:CreateSecurityGroup", - "ec2:CreateTags", - "ec2:DeleteNetworkInterface", - "ec2:DeleteSecurityGroup", - "ec2:DeleteTags", - "ec2:DescribeAvailabilityZones", - "ec2:DescribeAccountAttributes", - "ec2:DescribeDhcpOptions", - "ec2:DescribeInstanceStatus", - "ec2:DescribeInstances", - "ec2:DescribeKeyPairs", - "ec2:DescribeNetworkAcls", - "ec2:DescribeNetworkInterfaces", - "ec2:DescribePrefixLists", - "ec2:DescribeRouteTables", - "ec2:DescribeSecurityGroups", - "ec2:DescribeSpotInstanceRequests", - "ec2:DescribeSpotPriceHistory", - "ec2:DescribeSubnets", - "ec2:DescribeVpcAttribute", - "ec2:DescribeVpcEndpoints", - "ec2:DescribeVpcEndpointServices", - "ec2:DescribeVpcs", - "ec2:DetachNetworkInterface", - "ec2:ModifyImageAttribute", - "ec2:ModifyInstanceAttribute", - "ec2:RequestSpotInstances", - "ec2:RevokeSecurityGroupEgress", - "ec2:RunInstances", - "ec2:TerminateInstances", - "ec2:DeleteVolume", - "ec2:DescribeVolumeStatus", - "ec2:DescribeVolumes", - "ec2:DetachVolume", - "iam:GetRole", - "iam:GetRolePolicy", - "iam:ListInstanceProfiles", - "iam:ListRolePolicies", - "iam:PassRole", - "s3:CreateBucket", - "s3:Get*", - "s3:List*", - "sdb:BatchPutAttributes", - "sdb:Select", - "sqs:CreateQueue", - "sqs:Delete*", - "sqs:GetQueue*", - "sqs:PurgeQueue", - "sqs:ReceiveMessage" - ] - }] - } - ), - AWSManagedPolicy( - 'AmazonElasticMapReduceforEC2Role', - default_version_id='v2', - path='/service-role/', - document={ - "Version": "2012-10-17", - "Statement": [{ - "Effect": "Allow", - "Resource": "*", - "Action": [ - "cloudwatch:*", - "dynamodb:*", - "ec2:Describe*", - "elasticmapreduce:Describe*", - "elasticmapreduce:ListBootstrapActions", - "elasticmapreduce:ListClusters", - "elasticmapreduce:ListInstanceGroups", - "elasticmapreduce:ListInstances", - "elasticmapreduce:ListSteps", - "kinesis:CreateStream", - "kinesis:DeleteStream", - "kinesis:DescribeStream", - "kinesis:GetRecords", - "kinesis:GetShardIterator", - "kinesis:MergeShards", - "kinesis:PutRecord", - "kinesis:SplitShard", - "rds:Describe*", - "s3:*", - "sdb:*", - "sns:*", - "sqs:*" - ] - }] - } - ) -] -# TODO: add more predefined AWS managed policies - - class IAMBackend(BaseBackend): def __init__(self): diff --git a/scripts/update_managed_policies.py b/scripts/update_managed_policies.py new file mode 100755 index 000000000..65aa5b25d --- /dev/null +++ b/scripts/update_managed_policies.py @@ -0,0 +1,63 @@ +#!/usr/bin/env python +# This updates our local copies of AWS' managed policies +# Invoked via `make update_managed_policies` +# +# Credit goes to +# https://gist.github.com/gene1wood/55b358748be3c314f956 + +from botocore.exceptions import NoCredentialsError +from datetime import datetime +import boto3 +import json +import sys + +output_file = "./moto/iam/aws_managed_policies.py" + + +def json_serial(obj): + """JSON serializer for objects not serializable by default json code""" + + if isinstance(obj, datetime): + serial = obj.isoformat() + return serial + raise TypeError("Type not serializable") + + +client = boto3.client('iam') + +policies = {} + +paginator = client.get_paginator('list_policies') +try: + response_iterator = paginator.paginate(Scope='AWS') + for response in response_iterator: + for policy in response['Policies']: + policies[policy['PolicyName']] = policy +except NoCredentialsError: + print("USAGE:") + print("Put your AWS credentials into ~/.aws/credentials and run:") + print(__file__) + print("") + print("Or specify them on the command line:") + print("AWS_ACCESS_KEY_ID=your_personal_access_key AWS_SECRET_ACCESS_KEY=your_personal_secret {}".format(__file__)) + print("") + sys.exit(1) + +for policy_name in policies: + response = client.get_policy_version( + PolicyArn=policies[policy_name]['Arn'], + VersionId=policies[policy_name]['DefaultVersionId']) + for key in response['PolicyVersion']: + policies[policy_name][key] = response['PolicyVersion'][key] + +with open(output_file, 'w') as f: + triple_quote = '\"\"\"' + + f.write("# Imported via `make aws_managed_policies`\n") + f.write('aws_managed_policies_data = {}\n'.format(triple_quote)) + f.write(json.dumps(policies, + sort_keys=True, + indent=4, + separators=(',', ': '), + default=json_serial)) + f.write(triple_quote) diff --git a/tests/test_iam/test_iam.py b/tests/test_iam/test_iam.py index 3c567136c..a80768101 100644 --- a/tests/test_iam/test_iam.py +++ b/tests/test_iam/test_iam.py @@ -525,8 +525,14 @@ def test_managed_policy(): path='/mypolicy/', description='my user managed policy') - aws_policies = conn.list_policies(scope='AWS')['list_policies_response'][ - 'list_policies_result']['policies'] + marker = 0 + aws_policies = [] + while marker is not None: + response = conn.list_policies(scope='AWS', marker=marker)[ + 'list_policies_response']['list_policies_result'] + for policy in response['policies']: + aws_policies.append(policy) + marker = response.get('marker') set(p.name for p in aws_managed_policies).should.equal( set(p['policy_name'] for p in aws_policies)) @@ -535,8 +541,14 @@ def test_managed_policy(): set(['UserManagedPolicy']).should.equal( set(p['policy_name'] for p in user_policies)) - all_policies = conn.list_policies()['list_policies_response'][ - 'list_policies_result']['policies'] + marker = 0 + all_policies = [] + while marker is not None: + response = conn.list_policies(marker=marker)[ + 'list_policies_response']['list_policies_result'] + for policy in response['policies']: + all_policies.append(policy) + marker = response.get('marker') set(p['policy_name'] for p in aws_policies + user_policies).should.equal(set(p['policy_name'] for p in all_policies)) From b840cb7455dc0aced2689514836c9fc0307dc69d Mon Sep 17 00:00:00 2001 From: Jack Danger Date: Tue, 19 Sep 2017 14:19:29 -0700 Subject: [PATCH 226/412] fixing whitespace on AWS managed policies --- moto/iam/models.py | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/moto/iam/models.py b/moto/iam/models.py index 86f8b5aba..a7e584284 100644 --- a/moto/iam/models.py +++ b/moto/iam/models.py @@ -101,11 +101,13 @@ class AWSManagedPolicy(ManagedPolicy): path=data.get('Path'), document=data.get('Document')) + # AWS defines some of its own managed policies and we periodically # import them via `make aws_managed_policies` aws_managed_policies = [ - AWSManagedPolicy.from_data(name, d) for name, d - in json.loads(aws_managed_policies_data).items()] + AWSManagedPolicy.from_data(name, d) for name, d + in json.loads(aws_managed_policies_data).items()] + class InlinePolicy(Policy): """TODO: is this needed?""" From 57d94d56c3641591ff73000b6e9a701c34011993 Mon Sep 17 00:00:00 2001 From: Terry Cain Date: Tue, 19 Sep 2017 22:48:46 +0100 Subject: [PATCH 227/412] Implemented SNS.SetSMSAttributes & SNS.GetSMSAttributes + Filtering TODO: AddPermission / RemovePermission CheckIfPhoneNumberIsOptedOut ConfirmSubscription ListPhoneNumbersOptedOut OptInPhoneNumber --- moto/sns/models.py | 4 ++ moto/sns/responses.py | 66 ++++++++++++++++++++++++++++++++ tests/test_sns/test_sms_boto3.py | 32 ++++++++++++++++ 3 files changed, 102 insertions(+) create mode 100644 tests/test_sns/test_sms_boto3.py diff --git a/moto/sns/models.py b/moto/sns/models.py index 009398407..bc80f9e41 100644 --- a/moto/sns/models.py +++ b/moto/sns/models.py @@ -172,12 +172,16 @@ class SNSBackend(BaseBackend): self.applications = {} self.platform_endpoints = {} self.region_name = region_name + self.sms_attributes = {} def reset(self): region_name = self.region_name self.__dict__ = {} self.__init__(region_name) + def update_sms_attributes(self, attrs): + self.sms_attributes.update(attrs) + def create_topic(self, name): topic = Topic(name, self) self.topics[topic.arn] = topic diff --git a/moto/sns/responses.py b/moto/sns/responses.py index 9c079b006..9ffe298af 100644 --- a/moto/sns/responses.py +++ b/moto/sns/responses.py @@ -1,5 +1,7 @@ from __future__ import unicode_literals import json +import re +from collections import defaultdict from moto.core.responses import BaseResponse from moto.core.utils import camelcase_to_underscores @@ -459,6 +461,47 @@ class SNSResponse(BaseResponse): template = self.response_template(SET_SUBSCRIPTION_ATTRIBUTES_TEMPLATE) return template.render() + def set_sms_attributes(self): + attr_regex = re.compile(r'^attributes\.entry\.(?P\d+)\.(?Pkey|value)$') + + # attributes.entry.1.key + # attributes.entry.1.value + # to + # 1: {key:X, value:Y} + temp_dict = defaultdict(dict) + for key, value in self.querystring.items(): + match = attr_regex.match(key) + if match is not None: + temp_dict[match.group('index')][match.group('type')] = value[0] + + # 1: {key:X, value:Y} + # to + # X: Y + # All of this, just to take into account when people provide invalid stuff. + result = {} + for item in temp_dict.values(): + if 'key' in item and 'value' in item: + result[item['key']] = item['value'] + + self.backend.update_sms_attributes(result) + + template = self.response_template(SET_SMS_ATTRIBUTES_TEMPLATE) + return template.render() + + def get_sms_attributes(self): + filter_list = set() + for key, value in self.querystring.items(): + if key.startswith('attributes.member.1'): + filter_list.add(value[0]) + + if len(filter_list) > 0: + result = {k: v for k, v in self.backend.sms_attributes.items() if k in filter_list} + else: + result = self.backend.sms_attributes + + template = self.response_template(GET_SMS_ATTRIBUTES_TEMPLATE) + return template.render(attributes=result) + CREATE_TOPIC_TEMPLATE = """ @@ -758,3 +801,26 @@ SET_SUBSCRIPTION_ATTRIBUTES_TEMPLATE = """a8763b99-33a7-11df-a9b7-05d48da6f042 """ + +SET_SMS_ATTRIBUTES_TEMPLATE = """ + + + 26332069-c04a-5428-b829-72524b56a364 + +""" + +GET_SMS_ATTRIBUTES_TEMPLATE = """ + + + {% for name, value in attributes.items() %} + + {{ name }} + {{ value }} + + {% endfor %} + + + + 287f9554-8db3-5e66-8abc-c76f0186db7e + +""" diff --git a/tests/test_sns/test_sms_boto3.py b/tests/test_sns/test_sms_boto3.py new file mode 100644 index 000000000..220a7530a --- /dev/null +++ b/tests/test_sns/test_sms_boto3.py @@ -0,0 +1,32 @@ +from __future__ import unicode_literals +import boto3 +import sure # noqa + +from moto import mock_sns + + +@mock_sns +def test_set_sms_attributes(): + conn = boto3.client('sns', region_name='us-east-1') + + conn.set_sms_attributes(attributes={'DefaultSMSType': 'Transactional', 'test': 'test'}) + + response = conn.get_sms_attributes() + response.should.contain('attributes') + response['attributes'].should.contain('DefaultSMSType') + response['attributes'].should.contain('test') + response['attributes']['DefaultSMSType'].should.equal('Transactional') + response['attributes']['test'].should.equal('test') + + +@mock_sns +def test_get_sms_attributes_filtered(): + conn = boto3.client('sns', region_name='us-east-1') + + conn.set_sms_attributes(attributes={'DefaultSMSType': 'Transactional', 'test': 'test'}) + + response = conn.get_sms_attributes(attributes=['DefaultSMSType']) + response.should.contain('attributes') + response['attributes'].should.contain('DefaultSMSType') + response['attributes'].should_not.contain('test') + response['attributes']['DefaultSMSType'].should.equal('Transactional') From ba8a2ccfc5c2babbd3876be05fcd5796b7ee607c Mon Sep 17 00:00:00 2001 From: Terry Cain Date: Tue, 19 Sep 2017 23:54:13 +0100 Subject: [PATCH 228/412] Implemented CheckIfPhoneNumberIsOptedOut + Tests + Error code --- moto/sns/responses.py | 41 +++++++++++++++++++++++++++++--- tests/test_sns/test_sms_boto3.py | 29 ++++++++++++++++++++++ 2 files changed, 67 insertions(+), 3 deletions(-) diff --git a/moto/sns/responses.py b/moto/sns/responses.py index 9ffe298af..f06f10816 100644 --- a/moto/sns/responses.py +++ b/moto/sns/responses.py @@ -9,11 +9,17 @@ from .models import sns_backends class SNSResponse(BaseResponse): + SMS_ATTR_REGEX = re.compile(r'^attributes\.entry\.(?P\d+)\.(?Pkey|value)$') + OPT_OUT_PHONE_NUMBER_REGEX = re.compile(r'^\+?\d+$') @property def backend(self): return sns_backends[self.region] + def _error(self, code, message, sender='Sender'): + template = self.response_template(ERROR_RESPONSE) + return template.render(code=code, message=message, sender=sender) + def _get_attributes(self): attributes = self._get_list_prefix('Attributes.entry') return dict( @@ -462,15 +468,13 @@ class SNSResponse(BaseResponse): return template.render() def set_sms_attributes(self): - attr_regex = re.compile(r'^attributes\.entry\.(?P\d+)\.(?Pkey|value)$') - # attributes.entry.1.key # attributes.entry.1.value # to # 1: {key:X, value:Y} temp_dict = defaultdict(dict) for key, value in self.querystring.items(): - match = attr_regex.match(key) + match = self.SMS_ATTR_REGEX.match(key) if match is not None: temp_dict[match.group('index')][match.group('type')] = value[0] @@ -502,6 +506,19 @@ class SNSResponse(BaseResponse): template = self.response_template(GET_SMS_ATTRIBUTES_TEMPLATE) return template.render(attributes=result) + def check_if_phone_number_is_opted_out(self): + number = self._get_param('phoneNumber') + if self.OPT_OUT_PHONE_NUMBER_REGEX.match(number) is None: + error_response = self._error( + code='InvalidParameter', + message='Invalid parameter: PhoneNumber Reason: input incorrectly formatted' + ) + return error_response, dict(status=400) + + # There should be a nicer way to set if a nubmer has opted out + template = self.response_template(CHECK_IF_OPTED_OUT_TEMPLATE) + return template.render(opt_out=str(number.endswith('99')).lower()) + CREATE_TOPIC_TEMPLATE = """ @@ -824,3 +841,21 @@ GET_SMS_ATTRIBUTES_TEMPLATE = """ + + {{ opt_out }} + + + 287f9554-8db3-5e66-8abc-c76f0186db7e + +""" + +ERROR_RESPONSE = """ + + {{ sender }} + {{ code }} + {{ message }} + + 9dd01905-5012-5f99-8663-4b3ecd0dfaef +""" \ No newline at end of file diff --git a/tests/test_sns/test_sms_boto3.py b/tests/test_sns/test_sms_boto3.py index 220a7530a..185b3e43c 100644 --- a/tests/test_sns/test_sms_boto3.py +++ b/tests/test_sns/test_sms_boto3.py @@ -3,6 +3,8 @@ import boto3 import sure # noqa from moto import mock_sns +from botocore.exceptions import ClientError +from nose.tools import assert_raises @mock_sns @@ -30,3 +32,30 @@ def test_get_sms_attributes_filtered(): response['attributes'].should.contain('DefaultSMSType') response['attributes'].should_not.contain('test') response['attributes']['DefaultSMSType'].should.equal('Transactional') + + +@mock_sns +def test_check_not_opted_out(): + conn = boto3.client('sns', region_name='us-east-1') + response = conn.check_if_phone_number_is_opted_out(phoneNumber='+447428545375') + + response.should.contain('isOptedOut') + response['isOptedOut'].should.be(False) + + +@mock_sns +def test_check_opted_out(): # Ends in 99 so is opted out + conn = boto3.client('sns', region_name='us-east-1') + response = conn.check_if_phone_number_is_opted_out(phoneNumber='+447428545399') + + response.should.contain('isOptedOut') + response['isOptedOut'].should.be(True) + + +@mock_sns +def test_check_opted_out_invalid(): + conn = boto3.client('sns', region_name='us-east-1') + + # Invalid phone number + with assert_raises(ClientError): + conn.check_if_phone_number_is_opted_out(phoneNumber='+44742LALALA') From 1281ac86d51c0f5b33e012dd7fe999114c389126 Mon Sep 17 00:00:00 2001 From: Terry Cain Date: Wed, 20 Sep 2017 00:03:58 +0100 Subject: [PATCH 229/412] Implemented ListPhoneNumbersOptedOut + Tests --- moto/sns/responses.py | 19 ++++++++++++++++++- tests/test_sns/test_sms_boto3.py | 10 ++++++++++ 2 files changed, 28 insertions(+), 1 deletion(-) diff --git a/moto/sns/responses.py b/moto/sns/responses.py index f06f10816..93786e72e 100644 --- a/moto/sns/responses.py +++ b/moto/sns/responses.py @@ -519,6 +519,10 @@ class SNSResponse(BaseResponse): template = self.response_template(CHECK_IF_OPTED_OUT_TEMPLATE) return template.render(opt_out=str(number.endswith('99')).lower()) + def list_phone_numbers_opted_out(self): + template = self.response_template(LIST_OPTOUT_TEMPLATE) + return template.render(opt_outs=['+447420500600', '+447420505401']) + CREATE_TOPIC_TEMPLATE = """ @@ -858,4 +862,17 @@ ERROR_RESPONSE = """""" + +LIST_OPTOUT_TEMPLATE = """ + + + {% for item in opt_outs %} + {{ item }} + {% endfor %} + + + + 985e196d-a237-51b6-b33a-4b5601276b38 + +""" diff --git a/tests/test_sns/test_sms_boto3.py b/tests/test_sns/test_sms_boto3.py index 185b3e43c..a10f9d6dc 100644 --- a/tests/test_sns/test_sms_boto3.py +++ b/tests/test_sns/test_sms_boto3.py @@ -59,3 +59,13 @@ def test_check_opted_out_invalid(): # Invalid phone number with assert_raises(ClientError): conn.check_if_phone_number_is_opted_out(phoneNumber='+44742LALALA') + + +@mock_sns +def test_list_opted_out(): + conn = boto3.client('sns', region_name='us-east-1') + response = conn.list_phone_numbers_opted_out() + + response.should.contain('phoneNumbers') + response['phoneNumbers'].should.contain('+447420500600') + response['phoneNumbers'].should.contain('+447420505401') From 51c3fec6dd2adc9c53ca57bd1687d5253105b9d7 Mon Sep 17 00:00:00 2001 From: Chris Keogh Date: Wed, 20 Sep 2017 13:51:22 +1200 Subject: [PATCH 230/412] fix scan return CapacityUnits --- moto/dynamodb2/responses.py | 5 +++- tests/test_dynamodb2/test_dynamodb.py | 36 ++++++++++++++++++++++++++- 2 files changed, 39 insertions(+), 2 deletions(-) diff --git a/moto/dynamodb2/responses.py b/moto/dynamodb2/responses.py index 12b166ea0..cf715bfbc 100644 --- a/moto/dynamodb2/responses.py +++ b/moto/dynamodb2/responses.py @@ -427,7 +427,10 @@ class DynamoHandler(BaseResponse): result = { "Count": len(items), "Items": [item.attrs for item in items], - "ConsumedCapacityUnits": 1, + 'ConsumedCapacity': { + 'TableName': name, + 'CapacityUnits': 1, + }, "ScannedCount": scanned_count } if last_evaluated_key is not None: diff --git a/tests/test_dynamodb2/test_dynamodb.py b/tests/test_dynamodb2/test_dynamodb.py index 3784cf71c..8f320cbab 100644 --- a/tests/test_dynamodb2/test_dynamodb.py +++ b/tests/test_dynamodb2/test_dynamodb.py @@ -193,4 +193,38 @@ def test_query_invalid_table(): try: conn.query(TableName='invalid_table', KeyConditionExpression='index1 = :partitionkeyval', ExpressionAttributeValues={':partitionkeyval': {'S':'test'}}) except ClientError as exception: - assert exception.response['Error']['Code'] == "ResourceNotFoundException" \ No newline at end of file + assert exception.response['Error']['Code'] == "ResourceNotFoundException" + + +@requires_boto_gte("2.9") +@mock_dynamodb2 +def test_scan_returns_consumed_capacity(): + name = 'TestTable' + conn = boto3.client('dynamodb', + region_name='us-west-2', + aws_access_key_id="ak", + aws_secret_access_key="sk") + + conn.create_table(TableName=name, + KeySchema=[{'AttributeName':'forum_name','KeyType':'HASH'}], + AttributeDefinitions=[{'AttributeName':'forum_name','AttributeType':'S'}], + ProvisionedThroughput={'ReadCapacityUnits':5,'WriteCapacityUnits':5}) + + conn.put_item( + TableName=name, + Item={ + 'forum_name': { 'S': 'LOLCat Forum' }, + 'subject': { 'S': 'Check this out!' }, + 'Body': { 'S': 'http://url_to_lolcat.gif'}, + 'SentBy': { 'S': "test" }, + 'ReceivedTime': { 'S': '12/9/2011 11:36:03 PM'}, + } + ) + + response = conn.scan( + TableName=name, + ) + + assert 'ConsumedCapacity' in response + assert 'CapacityUnits' in response['ConsumedCapacity'] + assert response['ConsumedCapacity']['TableName'] == name From f7f80293c7a21a6df859dfa0cc35ae1641dcba5e Mon Sep 17 00:00:00 2001 From: Terry Cain Date: Wed, 20 Sep 2017 20:56:37 +0100 Subject: [PATCH 231/412] Implemented OptInPhoneNumber + Tests --- moto/sns/models.py | 1 + moto/sns/responses.py | 22 +++++++++++++++++++++- tests/test_sns/test_sms_boto3.py | 17 +++++++++++++++-- 3 files changed, 37 insertions(+), 3 deletions(-) diff --git a/moto/sns/models.py b/moto/sns/models.py index bc80f9e41..6f5ee54ac 100644 --- a/moto/sns/models.py +++ b/moto/sns/models.py @@ -173,6 +173,7 @@ class SNSBackend(BaseBackend): self.platform_endpoints = {} self.region_name = region_name self.sms_attributes = {} + self.opt_out_numbers = ['+447420500600', '+447420505401', '+447632960543', '+447632960028', '+447700900149', '+447700900550', '+447700900545', '+447700900907'] def reset(self): region_name = self.region_name diff --git a/moto/sns/responses.py b/moto/sns/responses.py index 93786e72e..5b1c34610 100644 --- a/moto/sns/responses.py +++ b/moto/sns/responses.py @@ -521,7 +521,19 @@ class SNSResponse(BaseResponse): def list_phone_numbers_opted_out(self): template = self.response_template(LIST_OPTOUT_TEMPLATE) - return template.render(opt_outs=['+447420500600', '+447420505401']) + return template.render(opt_outs=self.backend.opt_out_numbers) + + def opt_in_phone_number(self): + number = self._get_param('phoneNumber') + + try: + self.backend.opt_out_numbers.remove(number) + except ValueError: + pass + + template = self.response_template(OPT_IN_NUMBER_TEMPLATE) + return template.render() + CREATE_TOPIC_TEMPLATE = """ @@ -876,3 +888,11 @@ LIST_OPTOUT_TEMPLATE = """ + + + 4c61842c-0796-50ef-95ac-d610c0bc8cf8 + + +""" diff --git a/tests/test_sns/test_sms_boto3.py b/tests/test_sns/test_sms_boto3.py index a10f9d6dc..9fbc90ac8 100644 --- a/tests/test_sns/test_sms_boto3.py +++ b/tests/test_sns/test_sms_boto3.py @@ -67,5 +67,18 @@ def test_list_opted_out(): response = conn.list_phone_numbers_opted_out() response.should.contain('phoneNumbers') - response['phoneNumbers'].should.contain('+447420500600') - response['phoneNumbers'].should.contain('+447420505401') + len(response['phoneNumbers']).should.be.greater_than(0) + + +@mock_sns +def test_opt_in(): + conn = boto3.client('sns', region_name='us-east-1') + response = conn.list_phone_numbers_opted_out() + current_len = len(response['phoneNumbers']) + assert current_len > 0 + + conn.opt_in_phone_number(phoneNumber=response['phoneNumbers'][0]) + + response = conn.list_phone_numbers_opted_out() + len(response['phoneNumbers']).should.be.greater_than(0) + len(response['phoneNumbers']).should.be.lower_than(current_len) From ef8a97f6c35b63bfb6c7c9a0b10c6b9e94faffc2 Mon Sep 17 00:00:00 2001 From: Terry Cain Date: Wed, 20 Sep 2017 21:13:26 +0100 Subject: [PATCH 232/412] Implemented Add/RemovePermission + Tests --- moto/sns/models.py | 1 + moto/sns/responses.py | 39 ++++++++++++++++++++++++++++++-- tests/test_sns/test_sms_boto3.py | 16 +++++++++++++ 3 files changed, 54 insertions(+), 2 deletions(-) diff --git a/moto/sns/models.py b/moto/sns/models.py index 6f5ee54ac..4a7cf7e7d 100644 --- a/moto/sns/models.py +++ b/moto/sns/models.py @@ -174,6 +174,7 @@ class SNSBackend(BaseBackend): self.region_name = region_name self.sms_attributes = {} self.opt_out_numbers = ['+447420500600', '+447420505401', '+447632960543', '+447632960028', '+447700900149', '+447700900550', '+447700900545', '+447700900907'] + self.permissions = {} def reset(self): region_name = self.region_name diff --git a/moto/sns/responses.py b/moto/sns/responses.py index 5b1c34610..97939d6b9 100644 --- a/moto/sns/responses.py +++ b/moto/sns/responses.py @@ -534,6 +534,30 @@ class SNSResponse(BaseResponse): template = self.response_template(OPT_IN_NUMBER_TEMPLATE) return template.render() + def add_permission(self): + arn = self._get_param('TopicArn') + label = self._get_param('Label') + accounts = self._get_multi_param('AWSAccountId.member.') + action = self._get_multi_param('ActionName.member.') + + key = (arn, label) + self.backend.permissions[key] = {'accounts': accounts, 'action': action} + + template = self.response_template(ADD_PERMISSION) + return template.render() + + def remove_permission(self): + arn = self._get_param('TopicArn') + label = self._get_param('Label') + + try: + key = (arn, label) + del self.backend.permissions[key] + except KeyError: + pass + + template = self.response_template(DEL_PERMISSION) + return template.render() CREATE_TOPIC_TEMPLATE = """ @@ -894,5 +918,16 @@ OPT_IN_NUMBER_TEMPLATE = """""" + +ADD_PERMISSION = """ + + c046e713-c5ff-5888-a7bc-b52f0e4f1299 + +""" + +DEL_PERMISSION = """ + + e767cc9f-314b-5e1b-b283-9ea3fd4e38a3 + +""" diff --git a/tests/test_sns/test_sms_boto3.py b/tests/test_sns/test_sms_boto3.py index 9fbc90ac8..beaa92d55 100644 --- a/tests/test_sns/test_sms_boto3.py +++ b/tests/test_sns/test_sms_boto3.py @@ -82,3 +82,19 @@ def test_opt_in(): response = conn.list_phone_numbers_opted_out() len(response['phoneNumbers']).should.be.greater_than(0) len(response['phoneNumbers']).should.be.lower_than(current_len) + + +@mock_sns +def test_add_remove_permissions(): + conn = boto3.client('sns', region_name='us-east-1') + + conn.add_permission( + TopicArn='arn:aws:sns:us-east-1:000000000000:terry_test', + Label='Test1234', + AWSAccountId=['999999999999'], + ActionName=['AddPermission'] + ) + conn.remove_permission( + TopicArn='arn:aws:sns:us-east-1:000000000000:terry_test', + Label='Test1234' + ) From 19074c535cee1f23f495042141395e9256d1e053 Mon Sep 17 00:00:00 2001 From: Terry Cain Date: Wed, 20 Sep 2017 21:47:02 +0100 Subject: [PATCH 233/412] Added ConfirmSubscription + Tests + checks For now subscriptions do nothing, but if we go the route of handing out subscribe tokens, I have layed the groundwork for validating that --- moto/sns/models.py | 1 + moto/sns/responses.py | 45 +++++++++++++++++++++++++++++--- tests/test_sns/test_sms_boto3.py | 17 ++++++++++-- 3 files changed, 57 insertions(+), 6 deletions(-) diff --git a/moto/sns/models.py b/moto/sns/models.py index 4a7cf7e7d..9feed0198 100644 --- a/moto/sns/models.py +++ b/moto/sns/models.py @@ -77,6 +77,7 @@ class Subscription(BaseModel): self.protocol = protocol self.arn = make_arn_for_subscription(self.topic.arn) self.attributes = {} + self.confirmed = False def publish(self, message, message_id): if self.protocol == 'sqs': diff --git a/moto/sns/responses.py b/moto/sns/responses.py index 97939d6b9..87d35ec17 100644 --- a/moto/sns/responses.py +++ b/moto/sns/responses.py @@ -540,25 +540,53 @@ class SNSResponse(BaseResponse): accounts = self._get_multi_param('AWSAccountId.member.') action = self._get_multi_param('ActionName.member.') + if arn not in self.backend.topics: + error_response = self._error('NotFound', 'Topic does not exist') + return error_response, dict(status=404) + key = (arn, label) self.backend.permissions[key] = {'accounts': accounts, 'action': action} - template = self.response_template(ADD_PERMISSION) + template = self.response_template(ADD_PERMISSION_TEMPLATE) return template.render() def remove_permission(self): arn = self._get_param('TopicArn') label = self._get_param('Label') + if arn not in self.backend.topics: + error_response = self._error('NotFound', 'Topic does not exist') + return error_response, dict(status=404) + try: key = (arn, label) del self.backend.permissions[key] except KeyError: pass - template = self.response_template(DEL_PERMISSION) + template = self.response_template(DEL_PERMISSION_TEMPLATE) return template.render() + def confirm_subscription(self): + arn = self._get_param('TopicArn') + + if arn not in self.backend.topics: + error_response = self._error('NotFound', 'Topic does not exist') + return error_response, dict(status=404) + + # Added other parts here for when they are needed + # token = self._get_param('Token') + # auth = self._get_param('AuthenticateOnUnsubscribe') + # if already_subscribed: + # error_response = self._error( + # code='AuthorizationError', + # message='Subscription already confirmed' + # ) + # return error_response, dict(status=400) + + template = self.response_template(CONFIRM_SUBSCRIPTION_TEMPLATE) + return template.render(sub_arn='{0}:68762e72-e9b1-410a-8b3b-903da69ee1d5'.format(arn)) + CREATE_TOPIC_TEMPLATE = """ @@ -920,14 +948,23 @@ OPT_IN_NUMBER_TEMPLATE = """ +ADD_PERMISSION_TEMPLATE = """ c046e713-c5ff-5888-a7bc-b52f0e4f1299 """ -DEL_PERMISSION = """ +DEL_PERMISSION_TEMPLATE = """ e767cc9f-314b-5e1b-b283-9ea3fd4e38a3 """ + +CONFIRM_SUBSCRIPTION_TEMPLATE = """ + + {{ sub_arn }} + + + 16eb4dde-7b3c-5b3e-a22a-1fe2a92d3293 + +""" diff --git a/tests/test_sns/test_sms_boto3.py b/tests/test_sns/test_sms_boto3.py index beaa92d55..ca6bfd22a 100644 --- a/tests/test_sns/test_sms_boto3.py +++ b/tests/test_sns/test_sms_boto3.py @@ -87,14 +87,27 @@ def test_opt_in(): @mock_sns def test_add_remove_permissions(): conn = boto3.client('sns', region_name='us-east-1') + response = conn.create_topic(Name='testpermissions') conn.add_permission( - TopicArn='arn:aws:sns:us-east-1:000000000000:terry_test', + TopicArn=response['TopicArn'], Label='Test1234', AWSAccountId=['999999999999'], ActionName=['AddPermission'] ) conn.remove_permission( - TopicArn='arn:aws:sns:us-east-1:000000000000:terry_test', + TopicArn=response['TopicArn'], Label='Test1234' ) + + +@mock_sns +def test_confirm_subscription(): + conn = boto3.client('sns', region_name='us-east-1') + response = conn.create_topic(Name='testconfirm') + + conn.confirm_subscription( + TopicArn=response['TopicArn'], + Token='2336412f37fb687f5d51e6e241d59b68c4e583a5cee0be6f95bbf97ab8d2441cf47b99e848408adaadf4c197e65f03473d53c4ba398f6abbf38ce2e8ebf7b4ceceb2cd817959bcde1357e58a2861b05288c535822eb88cac3db04f592285249971efc6484194fc4a4586147f16916692', + AuthenticateOnUnsubscribe='true' + ) From fec81fc6ea6093a2a39aa387d8db7c3b9a321237 Mon Sep 17 00:00:00 2001 From: Terry Cain Date: Wed, 20 Sep 2017 21:49:09 +0100 Subject: [PATCH 234/412] Updated README --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index 4d5d2d7e6..cca50a16e 100644 --- a/README.md +++ b/README.md @@ -110,7 +110,7 @@ It gets even better! Moto isn't just for Python code and it isn't just for S3. L |------------------------------------------------------------------------------| | SES | @mock_ses | core endpoints done | |------------------------------------------------------------------------------| -| SNS | @mock_sns | core endpoints done | +| SNS | @mock_sns | all endpoints done | |------------------------------------------------------------------------------| | SQS | @mock_sqs | core endpoints done | |------------------------------------------------------------------------------| From 727f757bc4237f49860c479789f4e8d5b31265b5 Mon Sep 17 00:00:00 2001 From: Jack Danger Date: Wed, 20 Sep 2017 18:12:30 -0700 Subject: [PATCH 235/412] regenerating policies --- moto/iam/aws_managed_policies.py | 138 +++++++++++++++++++++++++++-- scripts/update_managed_policies.py | 2 +- 2 files changed, 134 insertions(+), 6 deletions(-) diff --git a/moto/iam/aws_managed_policies.py b/moto/iam/aws_managed_policies.py index 277783123..df348c0d9 100644 --- a/moto/iam/aws_managed_policies.py +++ b/moto/iam/aws_managed_policies.py @@ -2641,6 +2641,124 @@ aws_managed_policies_data = """ "UpdateDate": "2016-12-21T02:01:55+00:00", "VersionId": "v4" }, + "AWSElasticLoadBalancingClassicServiceRolePolicy": { + "Arn": "arn:aws:iam::aws:policy/aws-service-role/AWSElasticLoadBalancingClassicServiceRolePolicy", + "AttachmentCount": 0, + "CreateDate": "2017-09-19T22:36:18+00:00", + "DefaultVersionId": "v1", + "Document": { + "Statement": [ + { + "Action": [ + "ec2:DescribeAddresses", + "ec2:DescribeInstances", + "ec2:DescribeSubnets", + "ec2:DescribeSecurityGroups", + "ec2:DescribeVpcs", + "ec2:DescribeInternetGateways", + "ec2:DescribeAccountAttributes", + "ec2:DescribeClassicLinkInstances", + "ec2:DescribeVpcClassicLink", + "ec2:CreateSecurityGroup", + "ec2:CreateNetworkInterface", + "ec2:DeleteNetworkInterface", + "ec2:ModifyNetworkInterface", + "ec2:ModifyNetworkInterfaceAttribute", + "ec2:AuthorizeSecurityGroupIngress", + "ec2:AssociateAddress", + "ec2:DisassociateAddress", + "ec2:AttachNetworkInterface", + "ec2:DetachNetworkInterface", + "ec2:AssignPrivateIpAddresses", + "ec2:AssignIpv6Addresses", + "ec2:UnassignIpv6Addresses" + ], + "Effect": "Allow", + "Resource": "*" + } + ], + "Version": "2012-10-17" + }, + "IsAttachable": true, + "IsDefaultVersion": true, + "Path": "/aws-service-role/", + "PolicyId": "ANPAIUMWW3QP7DPZPNVU4", + "PolicyName": "AWSElasticLoadBalancingClassicServiceRolePolicy", + "UpdateDate": "2017-09-19T22:36:18+00:00", + "VersionId": "v1" + }, + "AWSElasticLoadBalancingServiceRolePolicy": { + "Arn": "arn:aws:iam::aws:policy/aws-service-role/AWSElasticLoadBalancingServiceRolePolicy", + "AttachmentCount": 0, + "CreateDate": "2017-09-19T22:19:04+00:00", + "DefaultVersionId": "v1", + "Document": { + "Statement": [ + { + "Action": [ + "ec2:DescribeAddresses", + "ec2:DescribeInstances", + "ec2:DescribeSubnets", + "ec2:DescribeSecurityGroups", + "ec2:DescribeVpcs", + "ec2:DescribeInternetGateways", + "ec2:DescribeAccountAttributes", + "ec2:DescribeClassicLinkInstances", + "ec2:DescribeVpcClassicLink", + "ec2:CreateSecurityGroup", + "ec2:CreateNetworkInterface", + "ec2:DeleteNetworkInterface", + "ec2:ModifyNetworkInterface", + "ec2:ModifyNetworkInterfaceAttribute", + "ec2:AuthorizeSecurityGroupIngress", + "ec2:AssociateAddress", + "ec2:DisassociateAddress", + "ec2:AttachNetworkInterface", + "ec2:DetachNetworkInterface", + "ec2:AssignPrivateIpAddresses", + "ec2:AssignIpv6Addresses", + "ec2:UnassignIpv6Addresses" + ], + "Effect": "Allow", + "Resource": "*" + } + ], + "Version": "2012-10-17" + }, + "IsAttachable": true, + "IsDefaultVersion": true, + "Path": "/aws-service-role/", + "PolicyId": "ANPAIMHWGGSRHLOQUICJQ", + "PolicyName": "AWSElasticLoadBalancingServiceRolePolicy", + "UpdateDate": "2017-09-19T22:19:04+00:00", + "VersionId": "v1" + }, + "AWSEnhancedClassicNetworkingMangementPolicy": { + "Arn": "arn:aws:iam::aws:policy/aws-service-role/AWSEnhancedClassicNetworkingMangementPolicy", + "AttachmentCount": 0, + "CreateDate": "2017-09-20T17:29:09+00:00", + "DefaultVersionId": "v1", + "Document": { + "Statement": [ + { + "Action": [ + "ec2:DescribeInstances", + "ec2:DescribeSecurityGroups" + ], + "Effect": "Allow", + "Resource": "*" + } + ], + "Version": "2012-10-17" + }, + "IsAttachable": true, + "IsDefaultVersion": true, + "Path": "/aws-service-role/", + "PolicyId": "ANPAI7T4V2HZTS72QVO52", + "PolicyName": "AWSEnhancedClassicNetworkingMangementPolicy", + "UpdateDate": "2017-09-20T17:29:09+00:00", + "VersionId": "v1" + }, "AWSGlueConsoleFullAccess": { "Arn": "arn:aws:iam::aws:policy/AWSGlueConsoleFullAccess", "AttachmentCount": 0, @@ -7078,8 +7196,8 @@ aws_managed_policies_data = """ "AmazonElasticMapReduceFullAccess": { "Arn": "arn:aws:iam::aws:policy/AmazonElasticMapReduceFullAccess", "AttachmentCount": 0, - "CreateDate": "2015-12-21T23:20:38+00:00", - "DefaultVersionId": "v4", + "CreateDate": "2017-09-20T19:27:37+00:00", + "DefaultVersionId": "v5", "Document": { "Statement": [ { @@ -7130,6 +7248,16 @@ aws_managed_policies_data = """ ], "Effect": "Allow", "Resource": "*" + }, + { + "Action": "iam:CreateServiceLinkedRole", + "Condition": { + "StringLike": { + "iam:AWSServiceName": "elasticmapreduce.amazonaws.com" + } + }, + "Effect": "Allow", + "Resource": "arn:aws:iam::*:role/aws-service-role/elasticmapreduce.amazonaws.com/AWSServiceRoleForEMRCleanup" } ], "Version": "2012-10-17" @@ -7139,8 +7267,8 @@ aws_managed_policies_data = """ "Path": "/", "PolicyId": "ANPAIZP5JFP3AMSGINBB2", "PolicyName": "AmazonElasticMapReduceFullAccess", - "UpdateDate": "2015-12-21T23:20:38+00:00", - "VersionId": "v4" + "UpdateDate": "2017-09-20T19:27:37+00:00", + "VersionId": "v5" }, "AmazonElasticMapReduceReadOnlyAccess": { "Arn": "arn:aws:iam::aws:policy/AmazonElasticMapReduceReadOnlyAccess", @@ -12813,4 +12941,4 @@ aws_managed_policies_data = """ "UpdateDate": "2017-06-26T22:35:31+00:00", "VersionId": "v3" } -}""" \ No newline at end of file +}""" diff --git a/scripts/update_managed_policies.py b/scripts/update_managed_policies.py index 65aa5b25d..5b60660f6 100755 --- a/scripts/update_managed_policies.py +++ b/scripts/update_managed_policies.py @@ -60,4 +60,4 @@ with open(output_file, 'w') as f: indent=4, separators=(',', ': '), default=json_serial)) - f.write(triple_quote) + f.write('{}\n'.format(triple_quote)) From ca7661bc1e47aa1a2e2370e5977ee9198640d8bc Mon Sep 17 00:00:00 2001 From: Jack Danger Date: Wed, 20 Sep 2017 19:04:23 -0700 Subject: [PATCH 236/412] S3 keys inherit bucket ACL --- moto/s3/responses.py | 4 +++- tests/test_s3/test_s3.py | 2 +- 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/moto/s3/responses.py b/moto/s3/responses.py index fbd142a34..86d5dbdef 100755 --- a/moto/s3/responses.py +++ b/moto/s3/responses.py @@ -636,6 +636,8 @@ class ResponseObject(_TemplateEnvironmentMixin): storage_class = request.headers.get('x-amz-storage-class', 'STANDARD') acl = self._acl_from_headers(request.headers) + if acl is None: + acl = self.backend.get_bucket(bucket_name).acl tagging = self._tagging_from_headers(request.headers) if 'acl' in query: @@ -740,7 +742,7 @@ class ResponseObject(_TemplateEnvironmentMixin): if grants: return FakeAcl(grants) else: - return get_canned_acl('private') + return None def _tagging_from_headers(self, headers): if headers.get('x-amz-tagging'): diff --git a/tests/test_s3/test_s3.py b/tests/test_s3/test_s3.py index 4fc698787..8ce56bd01 100644 --- a/tests/test_s3/test_s3.py +++ b/tests/test_s3/test_s3.py @@ -870,7 +870,7 @@ def test_s3_object_in_public_bucket(): s3 = boto3.resource('s3') bucket = s3.Bucket('test-bucket') bucket.create(ACL='public-read') - bucket.put_object(ACL='public-read', Body=b'ABCD', Key='file.txt') + bucket.put_object(Body=b'ABCD', Key='file.txt') s3_anonymous = boto3.resource('s3') s3_anonymous.meta.client.meta.events.register('choose-signer.s3.*', disable_signing) From 719e7866ab80d8ac54810a6cf1a1f5e1094e6b01 Mon Sep 17 00:00:00 2001 From: Toshiya Kawasaki Date: Wed, 20 Sep 2017 04:36:11 +0900 Subject: [PATCH 237/412] auto generate teamplte --- requirements-dev.txt | 1 + scaffold.py | 148 ++++++++++++++++++++++++++++++++++++++++++- 2 files changed, 148 insertions(+), 1 deletion(-) diff --git a/requirements-dev.txt b/requirements-dev.txt index 602e6fbbe..7dda4026b 100644 --- a/requirements-dev.txt +++ b/requirements-dev.txt @@ -11,3 +11,4 @@ botocore>=1.5.77 six>=1.9 prompt-toolkit==1.0.14 click==6.7 +inflection==0.3.1 diff --git a/scaffold.py b/scaffold.py index 77cc997c5..79133b9d3 100755 --- a/scaffold.py +++ b/scaffold.py @@ -1,5 +1,7 @@ #!/usr/bin/env python import os +import re +from lxml import etree import click import jinja2 @@ -16,8 +18,13 @@ import boto3 from implementation_coverage import ( get_moto_implementation ) +from inflection import singularize + TEMPLATE_DIR = './template' +INPUT_IGNORED_IN_BACKEND = ['Marker', 'PageSize'] +OUTPUT_IGNORED_IN_BACKEND = ['NextMarker'] + def print_progress(title, body, color): click.secho('\t{}\t'.format(title), fg=color, nl=False) @@ -127,11 +134,150 @@ def initialize_service(service, operation): tmpl_dir, tmpl_filename, tmpl_context, service, alt_filename ) +def to_upper_camel_case(s): + return ''.join([_.title() for _ in s.split('_')]) + +def to_snake_case(s): + s1 = re.sub('(.)([A-Z][a-z]+)', r'\1_\2', s) + return re.sub('([a-z0-9])([A-Z])', r'\1_\2', s1).lower() + + +def get_function_in_responses(service, operation): + """refers to definition of API in botocore, and autogenerates function + You can see example of elbv2 from link below. + https://github.com/boto/botocore/blob/develop/botocore/data/elbv2/2015-12-01/service-2.json + """ + client = boto3.client(service) + + aws_operation_name = to_upper_camel_case(operation) + op_model = client._service_model.operation_model(aws_operation_name) + outputs = op_model.output_shape.members + inputs = op_model.input_shape.members + input_names = [to_snake_case(_) for _ in inputs.keys() if _ not in INPUT_IGNORED_IN_BACKEND] + output_names = [to_snake_case(_) for _ in outputs.keys() if _ not in OUTPUT_IGNORED_IN_BACKEND] + body = 'def {}(self):\n'.format(operation) + + for input_name, input_type in inputs.items(): + type_name = input_type.type_name + if type_name == 'integer': + arg_line_tmpl = ' {} = _get_int_param("{}")\n' + elif type_name == 'list': + arg_line_tmpl = ' {} = self._get_list_prefix("{}.member")\n' + else: + arg_line_tmpl = ' {} = self._get_param("{}")\n' + body += arg_line_tmpl.format(to_snake_case(input_name), input_name) + if output_names: + body += ' {} = self.{}_backend.{}(\n'.format(','.join(output_names), service, operation) + else: + body += ' self.{}_backend.{}(\n'.format(service, operation) + for input_name in input_names: + body += ' {}={},\n'.format(input_name, input_name) + + body += ' )\n' + body += ' template = self.response_template({}_TEMPLATE)\n'.format(operation.upper()) + body += ' return template.render({})\n'.format( + ','.join(['{}={}'.format(_, _) for _ in output_names]) + ) + return body + + +def get_function_in_models(service, operation): + """refers to definition of API in botocore, and autogenerates function + You can see example of elbv2 from link below. + https://github.com/boto/botocore/blob/develop/botocore/data/elbv2/2015-12-01/service-2.json + """ + client = boto3.client(service) + aws_operation_name = to_upper_camel_case(operation) + op_model = client._service_model.operation_model(aws_operation_name) + inputs = op_model.input_shape.members + outputs = op_model.output_shape.members + input_names = [to_snake_case(_) for _ in inputs.keys() if _ not in INPUT_IGNORED_IN_BACKEND] + output_names = [to_snake_case(_) for _ in outputs.keys() if _ not in OUTPUT_IGNORED_IN_BACKEND] + if input_names: + body = 'def {}(self, {}):\n'.format(operation, ', '.join(input_names)) + else: + body = 'def {}(self)\n' + body += ' # implement here\n' + body += ' return {}\n'.format(', '.join(output_names)) + + return body + + +def _get_subtree(name, shape, replace_list, name_prefix=[]): + class_name = shape.__class__.__name__ + if class_name in ('StringShape', 'Shape'): + t = etree.Element(name) + if name_prefix: + t.text = '{{ %s.%s }}' % (name_prefix[-1], to_snake_case(name)) + else: + t.text = '{{ %s }}' % to_snake_case(name) + return t + elif class_name in ('ListShape', ): + replace_list.append((name, name_prefix)) + t = etree.Element(name) + t_member = etree.Element('member') + t.append(t_member) + for nested_name, nested_shape in shape.member.members.items(): + t_member.append(_get_subtree(nested_name, nested_shape, replace_list, name_prefix + [singularize(name.lower())])) + return t + raise ValueError('Not supported Shape') + + +def get_response_template(service, operation): + """refers to definition of API in botocore, and autogenerates template + You can see example of elbv2 from link below. + https://github.com/boto/botocore/blob/develop/botocore/data/elbv2/2015-12-01/service-2.json + """ + client = boto3.client(service) + aws_operation_name = to_upper_camel_case(operation) + op_model = client._service_model.operation_model(aws_operation_name) + result_wrapper = op_model.output_shape.serialization['resultWrapper'] + response_wrapper = result_wrapper.replace('Result', 'Response') + metadata = op_model.metadata + xml_namespace = metadata['xmlNamespace'] + + # build xml tree + t_root = etree.Element(response_wrapper, xmlns=xml_namespace) + + # build metadata + t_metadata = etree.Element('ResponseMetadata') + t_request_id = etree.Element('RequestId') + t_request_id.text = '1549581b-12b7-11e3-895e-1334aEXAMPLE' + t_metadata.append(t_request_id) + t_root.append(t_metadata) + + # build result + t_result = etree.Element(result_wrapper) + outputs = op_model.output_shape.members + replace_list = [] + for output_name, output_shape in outputs.items(): + t_result.append(_get_subtree(output_name, output_shape, replace_list)) + t_root.append(t_result) + body = etree.tostring(t_root, pretty_print=True).decode('utf-8') + for replace in replace_list: + name = replace[0] + prefix = replace[1] + singular_name = singularize(name) + + start_tag = '<%s>' % name + iter_name = '{}.{}'.format(prefix[-1], name.lower())if prefix else name.lower() + start_tag_to_replace = '<%s>\n{%% for %s in %s %%}' % (name, singular_name.lower(), iter_name) + # TODO: format indents + end_tag = '' % name + end_tag_to_replace = '{{ endfor }}\n' % name + + body = body.replace(start_tag, start_tag_to_replace) + body = body.replace(end_tag, end_tag_to_replace) + print(body) @click.command() def main(): service, operation = select_service_and_operation() initialize_service(service, operation) + if __name__ == '__main__': - main() +# print(get_function_in_responses('elbv2', 'describe_listeners')) +# print(get_function_in_models('elbv2', 'describe_listeners')) + get_response_template('elbv2', 'describe_listeners') +# main() From e330d7876ee89a08fc297d6fd5d83db6e891815e Mon Sep 17 00:00:00 2001 From: Toshiya Kawasaki Date: Thu, 21 Sep 2017 21:23:13 +0900 Subject: [PATCH 238/412] fix indent --- scaffold.py | 41 ++++++++++++++++++++++++++++++----------- 1 file changed, 30 insertions(+), 11 deletions(-) diff --git a/scaffold.py b/scaffold.py index 79133b9d3..2c168da69 100755 --- a/scaffold.py +++ b/scaffold.py @@ -93,7 +93,7 @@ def render_teamplte(tmpl_dir, tmpl_filename, context, service, alt_filename=None f.write(rendered) -def initialize_service(service, operation): +def initialize_service(service, operation, api_protocol): """create lib and test dirs if not exist """ lib_dir = os.path.join('moto', service) @@ -142,7 +142,7 @@ def to_snake_case(s): return re.sub('([a-z0-9])([A-Z])', r'\1_\2', s1).lower() -def get_function_in_responses(service, operation): +def get_function_in_query_responses(service, operation): """refers to definition of API in botocore, and autogenerates function You can see example of elbv2 from link below. https://github.com/boto/botocore/blob/develop/botocore/data/elbv2/2015-12-01/service-2.json @@ -223,8 +223,10 @@ def _get_subtree(name, shape, replace_list, name_prefix=[]): raise ValueError('Not supported Shape') -def get_response_template(service, operation): +def get_response_query_template(service, operation): """refers to definition of API in botocore, and autogenerates template + Assume that response format is xml when protocol is query + You can see example of elbv2 from link below. https://github.com/boto/botocore/blob/develop/botocore/data/elbv2/2015-12-01/service-2.json """ @@ -254,6 +256,7 @@ def get_response_template(service, operation): t_result.append(_get_subtree(output_name, output_shape, replace_list)) t_root.append(t_result) body = etree.tostring(t_root, pretty_print=True).decode('utf-8') + body_lines = body.splitlines() for replace in replace_list: name = replace[0] prefix = replace[1] @@ -261,23 +264,39 @@ def get_response_template(service, operation): start_tag = '<%s>' % name iter_name = '{}.{}'.format(prefix[-1], name.lower())if prefix else name.lower() - start_tag_to_replace = '<%s>\n{%% for %s in %s %%}' % (name, singular_name.lower(), iter_name) - # TODO: format indents + loop_start = '{%% for %s in %s %%}' % (singular_name.lower(), iter_name) end_tag = '' % name - end_tag_to_replace = '{{ endfor }}\n' % name + loop_end = '{{ endfor }}' - body = body.replace(start_tag, start_tag_to_replace) - body = body.replace(end_tag, end_tag_to_replace) - print(body) + start_tag_indexes = [i for i, l in enumerate(body_lines) if start_tag in l] + if len(start_tag_indexes) != 1: + raise Exception('tag %s not found in response body' % start_tag) + start_tag_index = start_tag_indexes[0] + body_lines.insert(start_tag_index + 1, loop_start) + + end_tag_indexes = [i for i, l in enumerate(body_lines) if end_tag in l] + if len(end_tag_indexes) != 1: + raise Exception('tag %s not found in response body' % end_tag) + end_tag_index = end_tag_indexes[0] + body_lines.insert(end_tag_index, loop_end) + body = '\n'.join(body_lines) + return body @click.command() def main(): service, operation = select_service_and_operation() - initialize_service(service, operation) + api_protocol = boto3.client(service_name)._service_model.metadata['protocol'] + initialize_service(service, operation, api_protocol) + if api_protocol == 'query': + func_in_responses = get_function_in_responses(service, operation) + func_in_models = get_function_in_models(service, operation) + teamplte = get_response_xml_template(service, operation) + if __name__ == '__main__': # print(get_function_in_responses('elbv2', 'describe_listeners')) # print(get_function_in_models('elbv2', 'describe_listeners')) - get_response_template('elbv2', 'describe_listeners') + b = get_response_query_template('elbv2', 'describe_listeners') + print(b) # main() From 6c33888b0fd3795d27151ee13e6a74927197a874 Mon Sep 17 00:00:00 2001 From: Toshiya Kawasaki Date: Thu, 21 Sep 2017 21:54:14 +0900 Subject: [PATCH 239/412] insert functions and templates for query and json protocol --- requirements-dev.txt | 1 + scaffold.py | 110 ++++++++++++++++++++++++++++++++++--------- 2 files changed, 88 insertions(+), 23 deletions(-) diff --git a/requirements-dev.txt b/requirements-dev.txt index 7dda4026b..6d84d7a86 100644 --- a/requirements-dev.txt +++ b/requirements-dev.txt @@ -12,3 +12,4 @@ six>=1.9 prompt-toolkit==1.0.14 click==6.7 inflection==0.3.1 +lxml==4.0.0 diff --git a/scaffold.py b/scaffold.py index 2c168da69..d2f06b127 100755 --- a/scaffold.py +++ b/scaffold.py @@ -1,6 +1,8 @@ #!/usr/bin/env python import os import re +import inspect +import importlib from lxml import etree import click @@ -15,6 +17,8 @@ from botocore import xform_name from botocore.session import Session import boto3 +from moto.core.responses import BaseResponse +from moto.core import BaseBackend from implementation_coverage import ( get_moto_implementation ) @@ -142,7 +146,7 @@ def to_snake_case(s): return re.sub('([a-z0-9])([A-Z])', r'\1_\2', s1).lower() -def get_function_in_query_responses(service, operation): +def get_function_in_responses(service, operation, protocol): """refers to definition of API in botocore, and autogenerates function You can see example of elbv2 from link below. https://github.com/boto/botocore/blob/develop/botocore/data/elbv2/2015-12-01/service-2.json @@ -174,10 +178,14 @@ def get_function_in_query_responses(service, operation): body += ' {}={},\n'.format(input_name, input_name) body += ' )\n' - body += ' template = self.response_template({}_TEMPLATE)\n'.format(operation.upper()) - body += ' return template.render({})\n'.format( - ','.join(['{}={}'.format(_, _) for _ in output_names]) - ) + if protocol == 'query': + body += ' template = self.response_template({}_TEMPLATE)\n'.format(operation.upper()) + body += ' return template.render({})\n'.format( + ','.join(['{}={}'.format(_, _) for _ in output_names]) + ) + elif protocol == 'json': + body += ' # TODO: adjust reponse\n' + body += ' return json.dumps({})\n'.format(','.join(['{}={}'.format(_, _) for _ in output_names])) return body @@ -255,8 +263,8 @@ def get_response_query_template(service, operation): for output_name, output_shape in outputs.items(): t_result.append(_get_subtree(output_name, output_shape, replace_list)) t_root.append(t_result) - body = etree.tostring(t_root, pretty_print=True).decode('utf-8') - body_lines = body.splitlines() + xml_body = etree.tostring(t_root, pretty_print=True).decode('utf-8') + xml_body_lines = xml_body.splitlines() for replace in replace_list: name = replace[0] prefix = replace[1] @@ -268,35 +276,91 @@ def get_response_query_template(service, operation): end_tag = '' % name loop_end = '{{ endfor }}' - start_tag_indexes = [i for i, l in enumerate(body_lines) if start_tag in l] + start_tag_indexes = [i for i, l in enumerate(xml_body_lines) if start_tag in l] if len(start_tag_indexes) != 1: raise Exception('tag %s not found in response body' % start_tag) start_tag_index = start_tag_indexes[0] - body_lines.insert(start_tag_index + 1, loop_start) + xml_body_lines.insert(start_tag_index + 1, loop_start) - end_tag_indexes = [i for i, l in enumerate(body_lines) if end_tag in l] + end_tag_indexes = [i for i, l in enumerate(xml_body_lines) if end_tag in l] if len(end_tag_indexes) != 1: raise Exception('tag %s not found in response body' % end_tag) end_tag_index = end_tag_indexes[0] - body_lines.insert(end_tag_index, loop_end) - body = '\n'.join(body_lines) + xml_body_lines.insert(end_tag_index, loop_end) + xml_body = '\n'.join(xml_body_lines) + body = '\n{}_TEMPLATE = """{}"""'.format(operation.upper(), xml_body) return body + +def insert_code_to_class(path, base_class, new_code): + with open(path) as f: + lines = [_.replace('\n', '') for _ in f.readlines()] + mod_path = os.path.splitext(path)[0].replace('/', '.') + mod = importlib.import_module(mod_path) + clsmembers = inspect.getmembers(mod, inspect.isclass) + _response_cls = [_[1] for _ in clsmembers if issubclass(_[1], base_class) and _[1] != base_class] + if len(_response_cls) != 1: + raise Exception('unknown error, number of clsmembers is not 1') + response_cls = _response_cls[0] + code_lines, line_no = inspect.getsourcelines(response_cls) + end_line_no = line_no + len(code_lines) + + func_lines = [' ' * 4 + _ for _ in new_code.splitlines()] + + lines = lines[:end_line_no] + func_lines + lines[end_line_no:] + + with open(path, 'w') as f: + f.write('\n'.join(lines)) + + +def insert_query_codes(service, operation): + func_in_responses = get_function_in_responses(service, operation, 'query') + func_in_models = get_function_in_models(service, operation) + template = get_response_query_template(service, operation) + + # edit responses.py + responses_path = 'moto/{}/responses.py'.format(service) + print_progress('inserting code', responses_path, 'green') + insert_code_to_class(responses_path, BaseResponse, func_in_responses) + + # insert template + with open(responses_path) as f: + lines = [_[:-1] for _ in f.readlines()] + lines += template.splitlines() + with open(responses_path, 'w') as f: + f.write('\n'.join(lines)) + + # edit models.py + models_path = 'moto/{}/models.py'.format(service) + print_progress('inserting code', models_path, 'green') + insert_code_to_class(models_path, BaseBackend, func_in_models) + +def insert_json_codes(service, operation): + func_in_responses = get_function_in_responses(service, operation, 'json') + func_in_models = get_function_in_models(service, operation) + + # edit responses.py + responses_path = 'moto/{}/responses.py'.format(service) + print_progress('inserting code', responses_path, 'green') + insert_code_to_class(responses_path, BaseResponse, func_in_responses) + + # edit models.py + models_path = 'moto/{}/models.py'.format(service) + print_progress('inserting code', models_path, 'green') + insert_code_to_class(models_path, BaseBackend, func_in_models) + @click.command() def main(): service, operation = select_service_and_operation() - api_protocol = boto3.client(service_name)._service_model.metadata['protocol'] + api_protocol = boto3.client(service)._service_model.metadata['protocol'] initialize_service(service, operation, api_protocol) if api_protocol == 'query': - func_in_responses = get_function_in_responses(service, operation) - func_in_models = get_function_in_models(service, operation) - teamplte = get_response_xml_template(service, operation) - - + insert_query_codes(service, operation) + elif api_protocol == 'json': + insert_json_codes(service, operation) + pass + else: + print_progress('skip inserting code', 'api protocol "{}" is not supported'.format(api_protocol), 'yellow') if __name__ == '__main__': -# print(get_function_in_responses('elbv2', 'describe_listeners')) -# print(get_function_in_models('elbv2', 'describe_listeners')) - b = get_response_query_template('elbv2', 'describe_listeners') - print(b) -# main() + main() From d0154b8e71266096c9140007f6ba767861b0fd4a Mon Sep 17 00:00:00 2001 From: Toshiya Kawasaki Date: Fri, 22 Sep 2017 00:01:01 +0900 Subject: [PATCH 240/412] insert functions and templates for query and rest-json protocol --- scaffold.py | 12 +++++++++++- 1 file changed, 11 insertions(+), 1 deletion(-) diff --git a/scaffold.py b/scaffold.py index d2f06b127..172c21bbb 100755 --- a/scaffold.py +++ b/scaffold.py @@ -349,6 +349,15 @@ def insert_json_codes(service, operation): print_progress('inserting code', models_path, 'green') insert_code_to_class(models_path, BaseBackend, func_in_models) +def insert_restjson_codes(service, operation): + func_in_models = get_function_in_models(service, operation) + + print_progress('skipping inserting code to responses.py', "dont't know how to implement", 'yellow') + # edit models.py + models_path = 'moto/{}/models.py'.format(service) + print_progress('inserting code', models_path, 'green') + insert_code_to_class(models_path, BaseBackend, func_in_models) + @click.command() def main(): service, operation = select_service_and_operation() @@ -358,7 +367,8 @@ def main(): insert_query_codes(service, operation) elif api_protocol == 'json': insert_json_codes(service, operation) - pass + elif api_protocol == 'rest-json': + insert_restjson_codes(service, operation) else: print_progress('skip inserting code', 'api protocol "{}" is not supported'.format(api_protocol), 'yellow') From 16f0868d420b31e3e00656854b1f19b4d9d15e47 Mon Sep 17 00:00:00 2001 From: Toshiya Kawasaki Date: Fri, 22 Sep 2017 00:03:52 +0900 Subject: [PATCH 241/412] rename script name --- scaffold.py => setup_new_function.py | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename scaffold.py => setup_new_function.py (100%) diff --git a/scaffold.py b/setup_new_function.py similarity index 100% rename from scaffold.py rename to setup_new_function.py From a1292d8c6e49a5bc3227bcd41e18bde93e284a13 Mon Sep 17 00:00:00 2001 From: Jack Danger Date: Thu, 21 Sep 2017 10:55:19 -0700 Subject: [PATCH 242/412] bumping to version 1.1.12 --- setup.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/setup.py b/setup.py index 2af39396d..166846ce6 100755 --- a/setup.py +++ b/setup.py @@ -24,7 +24,7 @@ extras_require = { setup( name='moto', - version='1.1.11', + version='1.1.12', description='A library that allows your python tests to easily' ' mock out the boto library', author='Steve Pulec', From 18cb0bce542823bf328cb53839246f94d3b94466 Mon Sep 17 00:00:00 2001 From: Terry Cain Date: Thu, 21 Sep 2017 21:16:00 +0100 Subject: [PATCH 243/412] General tidy up --- moto/sns/responses.py | 7 +- tests/test_sns/test_application_boto3.py | 27 +++++ tests/test_sns/test_sms_boto3.py | 113 --------------------- tests/test_sns/test_subscriptions.py | 2 + tests/test_sns/test_subscriptions_boto3.py | 66 ++++++++++++ tests/test_sns/test_topics_boto3.py | 17 ++++ 6 files changed, 118 insertions(+), 114 deletions(-) delete mode 100644 tests/test_sns/test_sms_boto3.py diff --git a/moto/sns/responses.py b/moto/sns/responses.py index 87d35ec17..92092dc42 100644 --- a/moto/sns/responses.py +++ b/moto/sns/responses.py @@ -574,7 +574,12 @@ class SNSResponse(BaseResponse): error_response = self._error('NotFound', 'Topic does not exist') return error_response, dict(status=404) - # Added other parts here for when they are needed + # Once Tokens are stored by the `subscribe` endpoint and distributed + # to the client somehow, then we can check validity of tokens + # presented to this method. The following code works, all thats + # needed is to perform a token check and assign that value to the + # `already_subscribed` variable. + # # token = self._get_param('Token') # auth = self._get_param('AuthenticateOnUnsubscribe') # if already_subscribed: diff --git a/tests/test_sns/test_application_boto3.py b/tests/test_sns/test_application_boto3.py index 99c378fe4..1c9695fea 100644 --- a/tests/test_sns/test_application_boto3.py +++ b/tests/test_sns/test_application_boto3.py @@ -321,3 +321,30 @@ def test_publish_to_disabled_platform_endpoint(): MessageStructure="json", TargetArn=endpoint_arn, ).should.throw(ClientError) + + +@mock_sns +def test_set_sms_attributes(): + conn = boto3.client('sns', region_name='us-east-1') + + conn.set_sms_attributes(attributes={'DefaultSMSType': 'Transactional', 'test': 'test'}) + + response = conn.get_sms_attributes() + response.should.contain('attributes') + response['attributes'].should.contain('DefaultSMSType') + response['attributes'].should.contain('test') + response['attributes']['DefaultSMSType'].should.equal('Transactional') + response['attributes']['test'].should.equal('test') + + +@mock_sns +def test_get_sms_attributes_filtered(): + conn = boto3.client('sns', region_name='us-east-1') + + conn.set_sms_attributes(attributes={'DefaultSMSType': 'Transactional', 'test': 'test'}) + + response = conn.get_sms_attributes(attributes=['DefaultSMSType']) + response.should.contain('attributes') + response['attributes'].should.contain('DefaultSMSType') + response['attributes'].should_not.contain('test') + response['attributes']['DefaultSMSType'].should.equal('Transactional') diff --git a/tests/test_sns/test_sms_boto3.py b/tests/test_sns/test_sms_boto3.py deleted file mode 100644 index ca6bfd22a..000000000 --- a/tests/test_sns/test_sms_boto3.py +++ /dev/null @@ -1,113 +0,0 @@ -from __future__ import unicode_literals -import boto3 -import sure # noqa - -from moto import mock_sns -from botocore.exceptions import ClientError -from nose.tools import assert_raises - - -@mock_sns -def test_set_sms_attributes(): - conn = boto3.client('sns', region_name='us-east-1') - - conn.set_sms_attributes(attributes={'DefaultSMSType': 'Transactional', 'test': 'test'}) - - response = conn.get_sms_attributes() - response.should.contain('attributes') - response['attributes'].should.contain('DefaultSMSType') - response['attributes'].should.contain('test') - response['attributes']['DefaultSMSType'].should.equal('Transactional') - response['attributes']['test'].should.equal('test') - - -@mock_sns -def test_get_sms_attributes_filtered(): - conn = boto3.client('sns', region_name='us-east-1') - - conn.set_sms_attributes(attributes={'DefaultSMSType': 'Transactional', 'test': 'test'}) - - response = conn.get_sms_attributes(attributes=['DefaultSMSType']) - response.should.contain('attributes') - response['attributes'].should.contain('DefaultSMSType') - response['attributes'].should_not.contain('test') - response['attributes']['DefaultSMSType'].should.equal('Transactional') - - -@mock_sns -def test_check_not_opted_out(): - conn = boto3.client('sns', region_name='us-east-1') - response = conn.check_if_phone_number_is_opted_out(phoneNumber='+447428545375') - - response.should.contain('isOptedOut') - response['isOptedOut'].should.be(False) - - -@mock_sns -def test_check_opted_out(): # Ends in 99 so is opted out - conn = boto3.client('sns', region_name='us-east-1') - response = conn.check_if_phone_number_is_opted_out(phoneNumber='+447428545399') - - response.should.contain('isOptedOut') - response['isOptedOut'].should.be(True) - - -@mock_sns -def test_check_opted_out_invalid(): - conn = boto3.client('sns', region_name='us-east-1') - - # Invalid phone number - with assert_raises(ClientError): - conn.check_if_phone_number_is_opted_out(phoneNumber='+44742LALALA') - - -@mock_sns -def test_list_opted_out(): - conn = boto3.client('sns', region_name='us-east-1') - response = conn.list_phone_numbers_opted_out() - - response.should.contain('phoneNumbers') - len(response['phoneNumbers']).should.be.greater_than(0) - - -@mock_sns -def test_opt_in(): - conn = boto3.client('sns', region_name='us-east-1') - response = conn.list_phone_numbers_opted_out() - current_len = len(response['phoneNumbers']) - assert current_len > 0 - - conn.opt_in_phone_number(phoneNumber=response['phoneNumbers'][0]) - - response = conn.list_phone_numbers_opted_out() - len(response['phoneNumbers']).should.be.greater_than(0) - len(response['phoneNumbers']).should.be.lower_than(current_len) - - -@mock_sns -def test_add_remove_permissions(): - conn = boto3.client('sns', region_name='us-east-1') - response = conn.create_topic(Name='testpermissions') - - conn.add_permission( - TopicArn=response['TopicArn'], - Label='Test1234', - AWSAccountId=['999999999999'], - ActionName=['AddPermission'] - ) - conn.remove_permission( - TopicArn=response['TopicArn'], - Label='Test1234' - ) - - -@mock_sns -def test_confirm_subscription(): - conn = boto3.client('sns', region_name='us-east-1') - response = conn.create_topic(Name='testconfirm') - - conn.confirm_subscription( - TopicArn=response['TopicArn'], - Token='2336412f37fb687f5d51e6e241d59b68c4e583a5cee0be6f95bbf97ab8d2441cf47b99e848408adaadf4c197e65f03473d53c4ba398f6abbf38ce2e8ebf7b4ceceb2cd817959bcde1357e58a2861b05288c535822eb88cac3db04f592285249971efc6484194fc4a4586147f16916692', - AuthenticateOnUnsubscribe='true' - ) diff --git a/tests/test_sns/test_subscriptions.py b/tests/test_sns/test_subscriptions.py index 292fd83c0..ba241ba44 100644 --- a/tests/test_sns/test_subscriptions.py +++ b/tests/test_sns/test_subscriptions.py @@ -34,6 +34,7 @@ def test_creating_subscription(): "ListSubscriptionsResult"]["Subscriptions"] subscriptions.should.have.length_of(0) + @mock_sns_deprecated def test_deleting_subscriptions_by_deleting_topic(): conn = boto.connect_sns() @@ -66,6 +67,7 @@ def test_deleting_subscriptions_by_deleting_topic(): "ListSubscriptionsResult"]["Subscriptions"] subscriptions.should.have.length_of(0) + @mock_sns_deprecated def test_getting_subscriptions_by_topic(): conn = boto.connect_sns() diff --git a/tests/test_sns/test_subscriptions_boto3.py b/tests/test_sns/test_subscriptions_boto3.py index 8cb5c1886..e600d6422 100644 --- a/tests/test_sns/test_subscriptions_boto3.py +++ b/tests/test_sns/test_subscriptions_boto3.py @@ -37,6 +37,7 @@ def test_creating_subscription(): subscriptions = conn.list_subscriptions()["Subscriptions"] subscriptions.should.have.length_of(0) + @mock_sns def test_deleting_subscriptions_by_deleting_topic(): conn = boto3.client('sns', region_name='us-east-1') @@ -68,6 +69,7 @@ def test_deleting_subscriptions_by_deleting_topic(): subscriptions = conn.list_subscriptions()["Subscriptions"] subscriptions.should.have.length_of(0) + @mock_sns def test_getting_subscriptions_by_topic(): conn = boto3.client('sns', region_name='us-east-1') @@ -197,3 +199,67 @@ def test_set_subscription_attributes(): AttributeName='InvalidName', AttributeValue='true' ) + + +@mock_sns +def test_check_not_opted_out(): + conn = boto3.client('sns', region_name='us-east-1') + response = conn.check_if_phone_number_is_opted_out(phoneNumber='+447428545375') + + response.should.contain('isOptedOut') + response['isOptedOut'].should.be(False) + + +@mock_sns +def test_check_opted_out(): + # Phone number ends in 99 so is hardcoded in the endpoint to return opted + # out status + conn = boto3.client('sns', region_name='us-east-1') + response = conn.check_if_phone_number_is_opted_out(phoneNumber='+447428545399') + + response.should.contain('isOptedOut') + response['isOptedOut'].should.be(True) + + +@mock_sns +def test_check_opted_out_invalid(): + conn = boto3.client('sns', region_name='us-east-1') + + # Invalid phone number + with assert_raises(ClientError): + conn.check_if_phone_number_is_opted_out(phoneNumber='+44742LALALA') + + +@mock_sns +def test_list_opted_out(): + conn = boto3.client('sns', region_name='us-east-1') + response = conn.list_phone_numbers_opted_out() + + response.should.contain('phoneNumbers') + len(response['phoneNumbers']).should.be.greater_than(0) + + +@mock_sns +def test_opt_in(): + conn = boto3.client('sns', region_name='us-east-1') + response = conn.list_phone_numbers_opted_out() + current_len = len(response['phoneNumbers']) + assert current_len > 0 + + conn.opt_in_phone_number(phoneNumber=response['phoneNumbers'][0]) + + response = conn.list_phone_numbers_opted_out() + len(response['phoneNumbers']).should.be.greater_than(0) + len(response['phoneNumbers']).should.be.lower_than(current_len) + + +@mock_sns +def test_confirm_subscription(): + conn = boto3.client('sns', region_name='us-east-1') + response = conn.create_topic(Name='testconfirm') + + conn.confirm_subscription( + TopicArn=response['TopicArn'], + Token='2336412f37fb687f5d51e6e241d59b68c4e583a5cee0be6f95bbf97ab8d2441cf47b99e848408adaadf4c197e65f03473d53c4ba398f6abbf38ce2e8ebf7b4ceceb2cd817959bcde1357e58a2861b05288c535822eb88cac3db04f592285249971efc6484194fc4a4586147f16916692', + AuthenticateOnUnsubscribe='true' + ) diff --git a/tests/test_sns/test_topics_boto3.py b/tests/test_sns/test_topics_boto3.py index 4702744c3..a9c2a2904 100644 --- a/tests/test_sns/test_topics_boto3.py +++ b/tests/test_sns/test_topics_boto3.py @@ -129,3 +129,20 @@ def test_topic_paging(): response.shouldnt.have("NextToken") topics_list.should.have.length_of(int(DEFAULT_PAGE_SIZE / 2)) + + +@mock_sns +def test_add_remove_permissions(): + conn = boto3.client('sns', region_name='us-east-1') + response = conn.create_topic(Name='testpermissions') + + conn.add_permission( + TopicArn=response['TopicArn'], + Label='Test1234', + AWSAccountId=['999999999999'], + ActionName=['AddPermission'] + ) + conn.remove_permission( + TopicArn=response['TopicArn'], + Label='Test1234' + ) From 9e19243310203ffce9c58281b23102f0e8996dbd Mon Sep 17 00:00:00 2001 From: Terry Cain Date: Thu, 21 Sep 2017 21:44:34 +0100 Subject: [PATCH 244/412] Started ACM framework --- moto/acm/__init__.py | 6 ++++++ moto/acm/models.py | 19 ++++++++++++++++ moto/acm/responses.py | 50 +++++++++++++++++++++++++++++++++++++++++++ moto/acm/urls.py | 10 +++++++++ 4 files changed, 85 insertions(+) create mode 100644 moto/acm/__init__.py create mode 100644 moto/acm/models.py create mode 100644 moto/acm/responses.py create mode 100644 moto/acm/urls.py diff --git a/moto/acm/__init__.py b/moto/acm/__init__.py new file mode 100644 index 000000000..6cd8a4aa5 --- /dev/null +++ b/moto/acm/__init__.py @@ -0,0 +1,6 @@ +from __future__ import unicode_literals +from .models import acm_backends +from ..core.models import base_decorator + +acm_backend = acm_backends['us-east-1'] +mock_acm = base_decorator(acm_backends) diff --git a/moto/acm/models.py b/moto/acm/models.py new file mode 100644 index 000000000..d5d7a3475 --- /dev/null +++ b/moto/acm/models.py @@ -0,0 +1,19 @@ +from __future__ import unicode_literals + +from moto.core import BaseBackend, BaseModel +from moto.ec2 import ec2_backends + + +class Certificate(BaseModel): + pass + + +class AWSCertificateManagerBackend(BaseBackend): + + def __init__(self): + self._certificates = {} + + +acm_backends = {} +for region, ec2_backend in ec2_backends.items(): + acm_backends[region] = AWSCertificateManagerBackend() diff --git a/moto/acm/responses.py b/moto/acm/responses.py new file mode 100644 index 000000000..47649e97b --- /dev/null +++ b/moto/acm/responses.py @@ -0,0 +1,50 @@ +from __future__ import unicode_literals +import json + +from moto.core.responses import BaseResponse +from .models import acm_backends + + +class AWSCertificateManagerResponse(BaseResponse): + + @property + def acm_backend(self): + return acm_backends[self.region] + + @property + def request_params(self): + try: + return json.loads(self.body) + except ValueError: + return {} + + def _get_param(self, param, default=None): + return self.request_params.get(param, default) + + def add_tags_to_certificate(self): + raise NotImplementedError() + + def delete_certificate(self): + raise NotImplementedError() + + def describe_certificate(self): + raise NotImplementedError() + + def import_certificate(self): + raise NotImplementedError() + + def list_certificates(self): + raise NotImplementedError() + + def list_tags_for_certificate(self): + raise NotImplementedError() + + def remove_tags_from_certificate(self): + raise NotImplementedError() + + def request_certificate(self): + raise NotImplementedError() + + def resend_validation_email(self): + raise NotImplementedError() + diff --git a/moto/acm/urls.py b/moto/acm/urls.py new file mode 100644 index 000000000..20acbb3f4 --- /dev/null +++ b/moto/acm/urls.py @@ -0,0 +1,10 @@ +from __future__ import unicode_literals +from .responses import AWSCertificateManagerResponse + +url_bases = [ + "https?://acm.(.+).amazonaws.com", +] + +url_paths = { + '{0}/$': AWSCertificateManagerResponse.dispatch, +} From 283b67cb9bbb8e9d4f1a4fb87c469d701010862c Mon Sep 17 00:00:00 2001 From: Terry Cain Date: Fri, 22 Sep 2017 00:20:26 +0100 Subject: [PATCH 245/412] Added nearly all ACM method implementations Added cryptography>=2.0.0 to install deps Created base backend and certificate objects Implemented most of the methods Added pregenerated certs --- moto/__init__.py | 1 + moto/acm/models.py | 233 +++++++++++++++++- moto/acm/responses.py | 129 +++++++++- moto/acm/utils.py | 7 + moto/backends.py | 2 + setup.py | 1 + tests/test_acm/resources/README.md | 40 +++ tests/test_acm/resources/ca.key | 51 ++++ tests/test_acm/resources/ca.pem | 58 +++++ tests/test_acm/resources/ca.srl | 1 + .../test_acm/resources/star_moto_com-bad.pem | 26 ++ tests/test_acm/resources/star_moto_com.csr | 17 ++ tests/test_acm/resources/star_moto_com.key | 27 ++ tests/test_acm/resources/star_moto_com.pem | 26 ++ tests/test_acm/test_acm.py | 32 +++ 15 files changed, 639 insertions(+), 12 deletions(-) create mode 100644 moto/acm/utils.py create mode 100644 tests/test_acm/resources/README.md create mode 100644 tests/test_acm/resources/ca.key create mode 100644 tests/test_acm/resources/ca.pem create mode 100644 tests/test_acm/resources/ca.srl create mode 100644 tests/test_acm/resources/star_moto_com-bad.pem create mode 100644 tests/test_acm/resources/star_moto_com.csr create mode 100644 tests/test_acm/resources/star_moto_com.key create mode 100644 tests/test_acm/resources/star_moto_com.pem create mode 100644 tests/test_acm/test_acm.py diff --git a/moto/__init__.py b/moto/__init__.py index 728d8db71..5ca34fdec 100644 --- a/moto/__init__.py +++ b/moto/__init__.py @@ -5,6 +5,7 @@ import logging __title__ = 'moto' __version__ = '1.0.1' +from .acm import mock_acm # flake8: noqa from .apigateway import mock_apigateway, mock_apigateway_deprecated # flake8: noqa from .autoscaling import mock_autoscaling, mock_autoscaling_deprecated # flake8: noqa from .awslambda import mock_lambda, mock_lambda_deprecated # flake8: noqa diff --git a/moto/acm/models.py b/moto/acm/models.py index d5d7a3475..46a7d97b2 100644 --- a/moto/acm/models.py +++ b/moto/acm/models.py @@ -1,19 +1,242 @@ from __future__ import unicode_literals +import re +import json +import datetime from moto.core import BaseBackend, BaseModel from moto.ec2 import ec2_backends +from .utils import make_arn_for_certificate -class Certificate(BaseModel): - pass +import cryptography.x509 +from cryptography.hazmat.primitives import serialization +from cryptography.hazmat.backends import default_backend + + +DEFAULT_ACCOUNT_ID = 123456789012 +GOOGLE_ROOT_CA = b"""-----BEGIN CERTIFICATE----- +MIIEKDCCAxCgAwIBAgIQAQAhJYiw+lmnd+8Fe2Yn3zANBgkqhkiG9w0BAQsFADBC +MQswCQYDVQQGEwJVUzEWMBQGA1UEChMNR2VvVHJ1c3QgSW5jLjEbMBkGA1UEAxMS +R2VvVHJ1c3QgR2xvYmFsIENBMB4XDTE3MDUyMjExMzIzN1oXDTE4MTIzMTIzNTk1 +OVowSTELMAkGA1UEBhMCVVMxEzARBgNVBAoTCkdvb2dsZSBJbmMxJTAjBgNVBAMT +HEdvb2dsZSBJbnRlcm5ldCBBdXRob3JpdHkgRzIwggEiMA0GCSqGSIb3DQEBAQUA +A4IBDwAwggEKAoIBAQCcKgR3XNhQkToGo4Lg2FBIvIk/8RlwGohGfuCPxfGJziHu +Wv5hDbcyRImgdAtTT1WkzoJile7rWV/G4QWAEsRelD+8W0g49FP3JOb7kekVxM/0 +Uw30SvyfVN59vqBrb4fA0FAfKDADQNoIc1Fsf/86PKc3Bo69SxEE630k3ub5/DFx ++5TVYPMuSq9C0svqxGoassxT3RVLix/IGWEfzZ2oPmMrhDVpZYTIGcVGIvhTlb7j +gEoQxirsupcgEcc5mRAEoPBhepUljE5SdeK27QjKFPzOImqzTs9GA5eXA37Asd57 +r0Uzz7o+cbfe9CUlwg01iZ2d+w4ReYkeN8WvjnJpAgMBAAGjggERMIIBDTAfBgNV +HSMEGDAWgBTAephojYn7qwVkDBF9qn1luMrMTjAdBgNVHQ4EFgQUSt0GFhu89mi1 +dvWBtrtiGrpagS8wDgYDVR0PAQH/BAQDAgEGMC4GCCsGAQUFBwEBBCIwIDAeBggr +BgEFBQcwAYYSaHR0cDovL2cuc3ltY2QuY29tMBIGA1UdEwEB/wQIMAYBAf8CAQAw +NQYDVR0fBC4wLDAqoCigJoYkaHR0cDovL2cuc3ltY2IuY29tL2NybHMvZ3RnbG9i +YWwuY3JsMCEGA1UdIAQaMBgwDAYKKwYBBAHWeQIFATAIBgZngQwBAgIwHQYDVR0l +BBYwFAYIKwYBBQUHAwEGCCsGAQUFBwMCMA0GCSqGSIb3DQEBCwUAA4IBAQDKSeWs +12Rkd1u+cfrP9B4jx5ppY1Rf60zWGSgjZGaOHMeHgGRfBIsmr5jfCnC8vBk97nsz +qX+99AXUcLsFJnnqmseYuQcZZTTMPOk/xQH6bwx+23pwXEz+LQDwyr4tjrSogPsB +E4jLnD/lu3fKOmc2887VJwJyQ6C9bgLxRwVxPgFZ6RGeGvOED4Cmong1L7bHon8X +fOGLVq7uZ4hRJzBgpWJSwzfVO+qFKgE4h6LPcK2kesnE58rF2rwjMvL+GMJ74N87 +L9TQEOaWTPtEtyFkDbkAlDASJodYmDkFOA/MgkgMCkdm7r+0X8T/cKjhf4t5K7hl +MqO5tzHpCvX2HzLc +-----END CERTIFICATE-----""" +# Added google root CA as AWS returns chain you gave it + root CA (provided or not) +# so for now a cheap response is just give any old root CA + + +class AWSError(Exception): + TYPE = None + STATUS = 400 + + def __init__(self, message): + self.message = message + + def response(self): + resp = {'__type': self.TYPE, 'message': self.message} + return json.dumps(resp), dict(status=self.STATUS) + + +class AWSValidationException(AWSError): + TYPE = 'ValidationException' + + +class AWSResourceNotFoundException(AWSError): + TYPE = 'ResourceNotFoundException' + + +class CertBundle(BaseModel): + def __init__(self, certificate, private_key, chain=None, region='us-east-1', arn=None): + self.cert = certificate + self._cert = None + self.common_name = None + self.key = private_key + self._key = None + self.chain = chain + self.tags = {} + self._chain = None + + # AWS always returns your chain + root CA + if self.chain is None: + self.chain = GOOGLE_ROOT_CA + else: + self.chain += b'\n' + GOOGLE_ROOT_CA + + # Takes care of PEM checking + self.validate_pk() + self.validate_certificate() + if chain is not None: + self.validate_chain() + + # TODO check cert is valid, or if self-signed then a chain is provided, otherwise + # raise AWSValidationException('Provided certificate is not a valid self signed. Please provide either a valid self-signed certificate or certificate chain.') + + # Used for when one wants to overwrite an arn + if arn is None: + self.arn = make_arn_for_certificate(DEFAULT_ACCOUNT_ID, region) + else: + self.arn = arn + + def validate_pk(self): + try: + self._key = serialization.load_pem_private_key(self.key, password=None, backend=default_backend()) + + if self._key.key_size > 2048: + AWSValidationException('The private key length is not supported. Only 1024-bit and 2048-bit are allowed.') + + except Exception as err: + if isinstance(err, AWSValidationException): + raise + raise AWSValidationException('The private key is not PEM-encoded or is not valid.') + + def validate_certificate(self): + try: + self._cert = cryptography.x509.load_pem_x509_certificate(self.cert, default_backend()) + + now = datetime.datetime.now() + if self._cert.not_valid_after < now: + raise AWSValidationException('The certificate has expired, is not valid.') + + if self._cert.not_valid_before > now: + raise AWSValidationException('The certificate is not in effect yet, is not valid.') + + # Extracting some common fields for ease of use + # Have to search through cert.subject for OIDs + self.common_name = self._cert.subject.get_attributes_for_oid(cryptography.x509.OID_COMMON_NAME)[0].value + + except Exception as err: + if isinstance(err, AWSValidationException): + raise + raise AWSValidationException('The certificate is not PEM-encoded or is not valid.') + + def validate_chain(self): + try: + self._chain = [] + + for cert_armored in self.chain.split(b'-\n-'): + # Fix missing -'s on split + cert_armored = re.sub(rb'^----B', b'-----B', cert_armored) + cert_armored = re.sub(rb'E----$', b'E-----', cert_armored) + cert = cryptography.x509.load_pem_x509_certificate(cert_armored, default_backend()) + self._chain.append(cert) + + now = datetime.datetime.now() + if self._cert.not_valid_after < now: + raise AWSValidationException('The certificate chain has expired, is not valid.') + + if self._cert.not_valid_before > now: + raise AWSValidationException('The certificate chain is not in effect yet, is not valid.') + + except Exception as err: + if isinstance(err, AWSValidationException): + raise + raise AWSValidationException('The certificate is not PEM-encoded or is not valid.') + + def __str__(self): + return self.arn + + def __repr__(self): + return '' class AWSCertificateManagerBackend(BaseBackend): - - def __init__(self): + def __init__(self, region): + super(AWSCertificateManagerBackend, self).__init__() + self.region = region self._certificates = {} + def reset(self): + region = self.region + self.__dict__ = {} + self.__init__(region) + + def _arn_not_found(self, arn): + msg = 'Certificate with arn {0} not found in account {1}'.format(arn, DEFAULT_ACCOUNT_ID) + return AWSResourceNotFoundException(msg) + + def import_cert(self, certificate, private_key, chain=None, arn=None): + if arn is not None: + if arn not in self._certificates: + raise self._arn_not_found(arn) + else: + # Will reuse provided ARN + bundle = CertBundle(certificate, private_key, chain=chain, region=region, arn=arn) + else: + # Will generate a random ARN + bundle = CertBundle(certificate, private_key, chain=chain, region=region) + + self._certificates[bundle.arn] = bundle + + return bundle.arn + + def get_certificates_list(self): + """ + Get list of certificates + + :return: List of certificates + :rtype: list of CertBundle + """ + return self._certificates.values() + + def get_certificate(self, arn): + if arn not in self._certificates: + raise self._arn_not_found(arn) + + return self._certificates[arn] + + def delete_certificate(self, arn): + if arn not in self._certificates: + raise self._arn_not_found(arn) + + del self._certificates[arn] + + def add_tags_to_certificate(self, arn, tags): + # get_cert does arn check + cert_bundle = self.get_certificate(arn) + + for tag in tags: + key = tag['Key'] + value = tag.get('Value', None) + cert_bundle.tags[key] = value + + def remove_tags_from_certificate(self, arn, tags): + # get_cert does arn check + cert_bundle = self.get_certificate(arn) + + for tag in tags: + key = tag['Key'] + value = tag.get('Value', None) + + try: + # If value isnt provided, just delete key + if value is None: + del cert_bundle.tags[key] + # If value is provided, only delete if it matches what already exists + elif cert_bundle.tags[key] == value: + del cert_bundle.tags[key] + except KeyError: + pass + + acm_backends = {} for region, ec2_backend in ec2_backends.items(): - acm_backends[region] = AWSCertificateManagerBackend() + acm_backends[region] = AWSCertificateManagerBackend(region) diff --git a/moto/acm/responses.py b/moto/acm/responses.py index 47649e97b..06ef43bac 100644 --- a/moto/acm/responses.py +++ b/moto/acm/responses.py @@ -1,14 +1,21 @@ from __future__ import unicode_literals import json +import base64 from moto.core.responses import BaseResponse -from .models import acm_backends +from .models import acm_backends, AWSError, AWSValidationException class AWSCertificateManagerResponse(BaseResponse): @property def acm_backend(self): + """ + ACM Backend + + :return: ACM Backend object + :rtype: moto.acm.models.AWSCertificateManagerBackend + """ return acm_backends[self.region] @property @@ -22,25 +29,133 @@ class AWSCertificateManagerResponse(BaseResponse): return self.request_params.get(param, default) def add_tags_to_certificate(self): - raise NotImplementedError() + arn = self._get_param('CertificateArn') + tags = self._get_list_prefix('Tags') + + if arn is None: + msg = 'A required parameter for the specified action is not supplied.' + return {'__type': 'MissingParameter', 'message': msg}, dict(status=400) + + try: + self.acm_backend.add_tags_to_certificate(arn, tags) + except AWSError as err: + return err.response() + + return '' def delete_certificate(self): - raise NotImplementedError() + arn = self._get_param('CertificateArn') + + if arn is None: + msg = 'A required parameter for the specified action is not supplied.' + return {'__type': 'MissingParameter', 'message': msg}, dict(status=400) + + try: + self.acm_backend.delete_certificate(arn) + except AWSError as err: + return err.response() + + return '' def describe_certificate(self): raise NotImplementedError() + def get_certificate(self): + arn = self._get_param('CertificateArn') + + if arn is None: + msg = 'A required parameter for the specified action is not supplied.' + return {'__type': 'MissingParameter', 'message': msg}, dict(status=400) + + try: + cert_bundle = self.acm_backend.get_certificate(arn) + except AWSError as err: + return err.response() + + result = { + 'Certificate': cert_bundle.cert.decode(), + 'CertificateChain': cert_bundle.chain.decode() + } + return json.dumps(result) + def import_certificate(self): - raise NotImplementedError() + # TODO comment on what raises exceptions for all branches + certificate = self._get_param('Certificate') + private_key = self._get_param('PrivateKey') + chain = self._get_param('CertificateChain') # Optional + current_arn = self._get_param('CertificateArn') # Optional + + # Simple parameter decoding. Rather do it here as its a data transport decision not part of the + # actual data + try: + certificate = base64.standard_b64decode(certificate) + except: + return AWSValidationException('The certificate is not PEM-encoded or is not valid.').response() + try: + private_key = base64.standard_b64decode(private_key) + except: + return AWSValidationException('The private key is not PEM-encoded or is not valid.').response() + if chain is not None: + try: + chain = base64.standard_b64decode(chain) + except: + return AWSValidationException('The certificate chain is not PEM-encoded or is not valid.').response() + + try: + arn = self.acm_backend.import_cert(certificate, private_key, chain=chain, arn=current_arn) + except AWSError as err: + return err.response() + + return json.dumps({'CertificateArn': arn}) def list_certificates(self): - raise NotImplementedError() + certs = [] + + for cert_bundle in self.acm_backend.get_certificates_list(): + certs.append({ + 'CertificateArn': cert_bundle.arn, + 'DomainName': cert_bundle.common_name + }) + + result = {'CertificateSummaryList': certs} + return json.dumps(result) def list_tags_for_certificate(self): - raise NotImplementedError() + arn = self._get_param('CertificateArn') + + if arn is None: + msg = 'A required parameter for the specified action is not supplied.' + return {'__type': 'MissingParameter', 'message': msg}, dict(status=400) + + try: + cert_bundle = self.acm_backend.get_certificate(arn) + except AWSError as err: + return err.response() + + result = {'Tags': []} + # Tag "objects" can not contain the Value part + for key, value in cert_bundle.tags: + tag_dict = {'Key': key} + if value is not None: + tag_dict['Value'] = value + result['Tags'].append(tag_dict) + + return json.dumps(result) def remove_tags_from_certificate(self): - raise NotImplementedError() + arn = self._get_param('CertificateArn') + tags = self._get_list_prefix('Tags') + + if arn is None: + msg = 'A required parameter for the specified action is not supplied.' + return {'__type': 'MissingParameter', 'message': msg}, dict(status=400) + + try: + self.acm_backend.remove_tags_from_certificate(arn, tags) + except AWSError as err: + return err.response() + + return '' def request_certificate(self): raise NotImplementedError() diff --git a/moto/acm/utils.py b/moto/acm/utils.py new file mode 100644 index 000000000..b3c441454 --- /dev/null +++ b/moto/acm/utils.py @@ -0,0 +1,7 @@ +import uuid + + +def make_arn_for_certificate(account_id, region_name): + # Example + # arn:aws:acm:eu-west-2:764371465172:certificate/c4b738b8-56fe-4b3a-b841-1c047654780b + return "arn:aws:acm:{0}:{1}:certificate/{2}".format(region_name, account_id, uuid.uuid4()) diff --git a/moto/backends.py b/moto/backends.py index b452b45fd..1d4332d64 100644 --- a/moto/backends.py +++ b/moto/backends.py @@ -1,5 +1,6 @@ from __future__ import unicode_literals +from moto.acm import acm_backends from moto.apigateway import apigateway_backends from moto.autoscaling import autoscaling_backends from moto.awslambda import lambda_backends @@ -33,6 +34,7 @@ from moto.ssm import ssm_backends from moto.sts import sts_backends BACKENDS = { + 'acm': acm_backends, 'apigateway': apigateway_backends, 'autoscaling': autoscaling_backends, 'cloudformation': cloudformation_backends, diff --git a/setup.py b/setup.py index 166846ce6..696436ba1 100755 --- a/setup.py +++ b/setup.py @@ -7,6 +7,7 @@ install_requires = [ "boto>=2.36.0", "boto3>=1.2.1", "cookies", + "cryptography>=2.0.0", "requests>=2.5", "xmltodict", "dicttoxml", diff --git a/tests/test_acm/resources/README.md b/tests/test_acm/resources/README.md new file mode 100644 index 000000000..fa39f2d01 --- /dev/null +++ b/tests/test_acm/resources/README.md @@ -0,0 +1,40 @@ +# Simple CA and server cert generation + +Commands: +``` +openssl genrsa -out ca.key 4096 +openssl req -x509 -new -nodes -key ca.key -sha512 -days 3650 -out ca.pem +openssl genrsa -out star_moto_com.key 2048 +openssl req -new -key star_moto_com.key -out star_moto_com.csr +openssl x509 -req -in star_moto_com.csr -CA ca.pem -CAkey ca.key -CAcreateserial -out star_moto_com.pem -days 3650 -sha512 +``` + +Also appended GeoTrust cert to the ca.pem + +``` +-----BEGIN CERTIFICATE----- +MIIEKDCCAxCgAwIBAgIQAQAhJYiw+lmnd+8Fe2Yn3zANBgkqhkiG9w0BAQsFADBC +MQswCQYDVQQGEwJVUzEWMBQGA1UEChMNR2VvVHJ1c3QgSW5jLjEbMBkGA1UEAxMS +R2VvVHJ1c3QgR2xvYmFsIENBMB4XDTE3MDUyMjExMzIzN1oXDTE4MTIzMTIzNTk1 +OVowSTELMAkGA1UEBhMCVVMxEzARBgNVBAoTCkdvb2dsZSBJbmMxJTAjBgNVBAMT +HEdvb2dsZSBJbnRlcm5ldCBBdXRob3JpdHkgRzIwggEiMA0GCSqGSIb3DQEBAQUA +A4IBDwAwggEKAoIBAQCcKgR3XNhQkToGo4Lg2FBIvIk/8RlwGohGfuCPxfGJziHu +Wv5hDbcyRImgdAtTT1WkzoJile7rWV/G4QWAEsRelD+8W0g49FP3JOb7kekVxM/0 +Uw30SvyfVN59vqBrb4fA0FAfKDADQNoIc1Fsf/86PKc3Bo69SxEE630k3ub5/DFx ++5TVYPMuSq9C0svqxGoassxT3RVLix/IGWEfzZ2oPmMrhDVpZYTIGcVGIvhTlb7j +gEoQxirsupcgEcc5mRAEoPBhepUljE5SdeK27QjKFPzOImqzTs9GA5eXA37Asd57 +r0Uzz7o+cbfe9CUlwg01iZ2d+w4ReYkeN8WvjnJpAgMBAAGjggERMIIBDTAfBgNV +HSMEGDAWgBTAephojYn7qwVkDBF9qn1luMrMTjAdBgNVHQ4EFgQUSt0GFhu89mi1 +dvWBtrtiGrpagS8wDgYDVR0PAQH/BAQDAgEGMC4GCCsGAQUFBwEBBCIwIDAeBggr +BgEFBQcwAYYSaHR0cDovL2cuc3ltY2QuY29tMBIGA1UdEwEB/wQIMAYBAf8CAQAw +NQYDVR0fBC4wLDAqoCigJoYkaHR0cDovL2cuc3ltY2IuY29tL2NybHMvZ3RnbG9i +YWwuY3JsMCEGA1UdIAQaMBgwDAYKKwYBBAHWeQIFATAIBgZngQwBAgIwHQYDVR0l +BBYwFAYIKwYBBQUHAwEGCCsGAQUFBwMCMA0GCSqGSIb3DQEBCwUAA4IBAQDKSeWs +12Rkd1u+cfrP9B4jx5ppY1Rf60zWGSgjZGaOHMeHgGRfBIsmr5jfCnC8vBk97nsz +qX+99AXUcLsFJnnqmseYuQcZZTTMPOk/xQH6bwx+23pwXEz+LQDwyr4tjrSogPsB +E4jLnD/lu3fKOmc2887VJwJyQ6C9bgLxRwVxPgFZ6RGeGvOED4Cmong1L7bHon8X +fOGLVq7uZ4hRJzBgpWJSwzfVO+qFKgE4h6LPcK2kesnE58rF2rwjMvL+GMJ74N87 +L9TQEOaWTPtEtyFkDbkAlDASJodYmDkFOA/MgkgMCkdm7r+0X8T/cKjhf4t5K7hl +MqO5tzHpCvX2HzLc +-----END CERTIFICATE----- +``` diff --git a/tests/test_acm/resources/ca.key b/tests/test_acm/resources/ca.key new file mode 100644 index 000000000..dc3110483 --- /dev/null +++ b/tests/test_acm/resources/ca.key @@ -0,0 +1,51 @@ +-----BEGIN RSA PRIVATE KEY----- +MIIJKAIBAAKCAgEAo8Yi3CUvZ/AEHjGz2o8HUX70xtWqmOE85uoyQHQAdGZHvu4S +0QPMAmr49t14PyNWo4+6BAYwgatHIMuGCPWMnmgyHQq6/Had5awhviF6MwnAGaSs +l4+YaVaCQLpPkaKVKtG16dLj49Q24uwy1d/XQnyKZpNWr5FrAayUizijfnUrLpJc +UtGnRGlSlfOxkvSiQ1tVFdVTjVQRkOGzJE2xbtVbJUu/RqyVgnKkIIl6SL4wJS+K +I5oDDLNuGgTm2ajpcMJw6fn5XYpV0eGgNxTLE+ao4x8+FGnWOvdBi3EiHf6W6w6M +v36bW8m4KAB7XH2f5DdkhmsICqhakfXlbJD2CYEg5MUUIoCSHGm6AL8YPYEOlG5l +9Sbv3ydzVn6oGKxlBN3m3LJQezT1KcfHKxBS9uDPjSHjVGjpocxZeejA0qtHwiKZ +bpkOS4GXMb/A0z+DtlN8R0m5XIOYbwNTiurNswH/XQ5dzH424KZ9drc9GTqqQNRB +hGaX3pSrnd4qKDk+WV2H9y/WaZrxAQS2IXDeiqtjZoF8c/n+k7BnELcz+cQ9hKjw +xKfge+Rx4YCCm1KVRk2w4ZojlmhCeexWOmNONRibRFAUgXxk5HUv74bEU5CSfr/z +CX9kgpxpe8WTj/MbNZtDLO/XewPSk1lnFhmyfMyiY0EbA0iqjlvzhUVr3IsCAwEA +AQKCAgAQWSD1tMiMqYrfsLpxYMGsQu4QQxfqduFrc4lcobfB5svWpVE/iA9/VkpP +6j+sncxyO1CoQi3pY72P6oEQt+I3ldMazw1nUjfky0/6+MCIA7snVCbeYjkmmroZ +1/9FXGNjiNeN5b1V6sMn18gjTVrhiikOoDqRAAUcf6u8UgUQBIYw+e85XTBDRfg2 +e8MIFl90NdPCgC789p1iRwVo5FCH7chRasRHO8cY5HS5wr9wL3wC2kIB18fiJq8g +7chVheT3mpFG6esYiUyTzN//X5X+AJJvSZka1I3MCQ6D6uEq+VT7VdJ68xpzCSnW +5GVsECY708O3H0wINFIGK/Og6L+GfnSafAcMWCfNqTtlqFkQw7I5SGd6rpJrylzx +dixR9tq5cpPf1L6lwr43SYgBfT4Kk6jOSh4YEruWxFhyKCyB87i9zNS6OLab1V4o +CTA3ePuhduq8pSf6ID4ko2kytVoxx5kQPVs7uCEbgAvqc0pYKp24WN0zf0ALsaUg +ef0IXDCkZ7kNEl5ySs+TW1KwCEMAbEcoLmchE7lZISIcCo7XAvhaj8zNOW2TMmmU +4QfS7IJQrL5sh1o7L9JGfXhwPJshZIfsLl7t2MAmRW9gKRG/HZyT0joxpNqKSn76 +sWrahRU/lJD8FgjexIb6Jo9Y2sdYQ55kFh9i0iubn12grsipkQKCAQEA05lFY1f3 +FPvAkldASEcT4utyyEinEdoUhxfbd6ge8GSjyrx3OpD+fXd9r8DH7VTeDUqdCjQ0 +xXjXVBvThBy3xW1iri1msPoS0rLY9KNsk+ewJjpu55kdIssAo85cq7u4Gj4OD4K+ +Ga2Ob9GB7m8i7Z+jsEe9I2zesQ0fx6+u4gSSzHdwSTTocyfXfNT3vpqxgbMzNLfh +6QvjZ0/oQik5cPOYRTpyPNpBv3puRMrrTEbi7/GeR1t8mmoj9Jx6La5l2ZpWhuEF +PEJRkPnkY1cHUMkSNq6+6dWFvxyxwQBFO9XpAbEfCTApIuUv+L7kQildhLvJuO1k +5GIXnRDMWA+p5QKCAQEAxiPMv/uMa4DX7UIUw5C1Sc8JwQHeyiTNzfTOQYywHuZO +PZxpCy2Bg8J29npBbz1XStErJTVqlSGvoLvej+UPEjOIo1wGIN2mfOHJmMgRZPj7 +0xb5ViCWti1tD2KBVzfkw8yVurVymFCELcXDgmoefaXF0JB/8lbyXJUYnVh+dVFj +UHCuhmK2FzL3zFu/sLzRIUCUIQuneh4XawIjL/qlVPUGmg1Pl3Jc5Nabb9BDndqr +F6iESnS6ojHvc9CxiIe0VjJJB5ypu/K2/LiuYPIak7KD08XmmccO0xN0JbLVotpd +gxY3QzOxdkObqsrPYam3n0umRNBes9w7xm2jY3xFrwKCAQAZ+Y2wBUNaexEHpdhJ +Rxhk2bxMY8bGhTvR3ZbeWG/72I7Wu03zjYsAAeQW6BZixvE/NnKSpxf3Sb89HvFX +HWNdw/DCKuaZQQmAfd3uIgWZHm7cMn4cxgnylHLuqM1tc2zFI+r78nO9mTWL+m71 +wwTJoLgqUpQgPiQUHeVR0Pop2p/eo6bQBcOnJzPnqgkDh9/UaRgXF5+OyRvQOdns +DT105SJDFUqit7Qsei4BGdvKkEUZaVKhtdRU7ESfqXnCE8+C59RJWGQZIpb6sgJc +Q3mtbBFlTww2jjSN4krbw1m8X6TrxT9nFFdoZjP+WAiTKprFSXwYzGN/OZ9mc4Jy +KPIVAoIBACiJanpcnH3h/kssGdNo564SDYzPNSVmIjTgwNHoVp/7vkYcmeEPjk/G +mVAT8w8vHYzQ/mK+au/X1Hat+Pq3gj3XDT1etmJC9qzWBMidJfHifqLRMHHOeQcM +hCOBo7SUWtk0Ie3w8WD4GBLFQxqLW3GZWL8y0Ppjj3IhjseiMz0NSaRLaWlVCKv2 +YXwNyUn/V0nWTHf2Sm1RerkJ1ukZ/nlDJ/acgowZeafXwDVABpVlB1vvviD9gLFu +Re8L40ZrfRmlcAt+obsyDP3nSsXKwmU1QIMzGdqcPwwwDrMnw01uH3OSN/wnt0ba +zh5DH+p7LnYIpBuwBbAGfrQ5+hOAeUECggEBAMtPtTu9gXr/pAJlxT34uHa5vnHf +dErlpJFBE0lc8rZyPWyAfL9j+ovPyjMVT0uFzoLfyxHx/vvb08TMuc2FngPk3s5j +GCv90bHKkgqNMQUOH9AtAr1VbfkI55CGZDlcqXPnLx2Q/BHvd0w2t35e0q3Wox9I +7+IqCM7S1RSwZjXQ9wk+MKnBVow8vXhDPs+txAj56RdmYlYQTlhcAWnAIpFX5W60 +40FxP0LluSNIKR9Omp33KLjbOXeXySaYbe6Bv2/XGhRz6XQTL/xoB1WJj+QGXVm8 +ZfwiH08nMFr7KvQZFv8WEn9/yX8TE3hp83GH6cSWRm+KabUjoUc6nNgFvmY= +-----END RSA PRIVATE KEY----- diff --git a/tests/test_acm/resources/ca.pem b/tests/test_acm/resources/ca.pem new file mode 100644 index 000000000..29c4b6d28 --- /dev/null +++ b/tests/test_acm/resources/ca.pem @@ -0,0 +1,58 @@ +-----BEGIN CERTIFICATE----- +MIIFmTCCA4GgAwIBAgIJAJEFhPHteB99MA0GCSqGSIb3DQEBDQUAMGMxCzAJBgNV +BAYTAkdCMRIwEAYDVQQIDAlCZXJrc2hpcmUxDzANBgNVBAcMBlNsb3VnaDETMBEG +A1UECgwKTW90b1NlcnZlcjELMAkGA1UECwwCUUExDTALBgNVBAMMBE1vdG8wHhcN +MTcwOTIxMjA1MTM0WhcNMjcwOTE5MjA1MTM0WjBjMQswCQYDVQQGEwJHQjESMBAG +A1UECAwJQmVya3NoaXJlMQ8wDQYDVQQHDAZTbG91Z2gxEzARBgNVBAoMCk1vdG9T +ZXJ2ZXIxCzAJBgNVBAsMAlFBMQ0wCwYDVQQDDARNb3RvMIICIjANBgkqhkiG9w0B +AQEFAAOCAg8AMIICCgKCAgEAo8Yi3CUvZ/AEHjGz2o8HUX70xtWqmOE85uoyQHQA +dGZHvu4S0QPMAmr49t14PyNWo4+6BAYwgatHIMuGCPWMnmgyHQq6/Had5awhviF6 +MwnAGaSsl4+YaVaCQLpPkaKVKtG16dLj49Q24uwy1d/XQnyKZpNWr5FrAayUizij +fnUrLpJcUtGnRGlSlfOxkvSiQ1tVFdVTjVQRkOGzJE2xbtVbJUu/RqyVgnKkIIl6 +SL4wJS+KI5oDDLNuGgTm2ajpcMJw6fn5XYpV0eGgNxTLE+ao4x8+FGnWOvdBi3Ei +Hf6W6w6Mv36bW8m4KAB7XH2f5DdkhmsICqhakfXlbJD2CYEg5MUUIoCSHGm6AL8Y +PYEOlG5l9Sbv3ydzVn6oGKxlBN3m3LJQezT1KcfHKxBS9uDPjSHjVGjpocxZeejA +0qtHwiKZbpkOS4GXMb/A0z+DtlN8R0m5XIOYbwNTiurNswH/XQ5dzH424KZ9drc9 +GTqqQNRBhGaX3pSrnd4qKDk+WV2H9y/WaZrxAQS2IXDeiqtjZoF8c/n+k7BnELcz ++cQ9hKjwxKfge+Rx4YCCm1KVRk2w4ZojlmhCeexWOmNONRibRFAUgXxk5HUv74bE +U5CSfr/zCX9kgpxpe8WTj/MbNZtDLO/XewPSk1lnFhmyfMyiY0EbA0iqjlvzhUVr +3IsCAwEAAaNQME4wHQYDVR0OBBYEFLVMZNPKo5ZWUjU/bH8lRPyWBOYRMB8GA1Ud +IwQYMBaAFLVMZNPKo5ZWUjU/bH8lRPyWBOYRMAwGA1UdEwQFMAMBAf8wDQYJKoZI +hvcNAQENBQADggIBAC5EmWeJIRkRZ47hm+Q6QLyiRvilrBYobwJsCEsUnfYut0+v +bX+1L39hvkFPK9gx1bta38ZeVVc2uwkC59FgVFyWwQG8FpFo5Urbxp1ErRwXBcbs +cjG/GubMYJ0aNUYRbV4phlIh1nXby4vqRGAGukvdzix5UO3HrnT/T/mOzdXtvZ0H +KjB7z+CT5m6fqB+vbnOnY8kJNvzl1oz22NAvGqNM32MA/7oFg9bfpLAuaHwsXxXj +5J2GfN82DaVvFvwJ1RYcvC1UsTm6b69YLrnMvimZ+kH4a9HNz7JZPEBrGg87EclN +QecwL0RvAYq2AN+u5bPJSa4eel3wnimfgaKqiVEebx6IcBeoCu4HEfz46AJ/mCoT +5Y+41t0RhpfawJWz4v7QuEf7lf7d0lvk27VmGWmAjQv3MrDIVpyPmSG73o5b9zos +i2aGClD2kn+YPY8/XoDUc8qFNhTxk/ey7xuUjwViKNDyprApT5yBTs7PazDN+JbK +/lLQJh2V1qq8utiCZhLGhZL353pCf56MNAB2MbVk5yyP+FhJ058ouQHerszeESTI +uuaSFKYdgOX9BHdEhCDebF3e9K3+6MeOgnfY12jzhX6dygQDcUAuIamLo5hEptBl +XD1cVBrMdxKLjxUVaYAWw2n8HBt97oMzrHhmr5JE4yIU2MYf2B5c0aewRrnG +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIIEKDCCAxCgAwIBAgIQAQAhJYiw+lmnd+8Fe2Yn3zANBgkqhkiG9w0BAQsFADBC +MQswCQYDVQQGEwJVUzEWMBQGA1UEChMNR2VvVHJ1c3QgSW5jLjEbMBkGA1UEAxMS +R2VvVHJ1c3QgR2xvYmFsIENBMB4XDTE3MDUyMjExMzIzN1oXDTE4MTIzMTIzNTk1 +OVowSTELMAkGA1UEBhMCVVMxEzARBgNVBAoTCkdvb2dsZSBJbmMxJTAjBgNVBAMT +HEdvb2dsZSBJbnRlcm5ldCBBdXRob3JpdHkgRzIwggEiMA0GCSqGSIb3DQEBAQUA +A4IBDwAwggEKAoIBAQCcKgR3XNhQkToGo4Lg2FBIvIk/8RlwGohGfuCPxfGJziHu +Wv5hDbcyRImgdAtTT1WkzoJile7rWV/G4QWAEsRelD+8W0g49FP3JOb7kekVxM/0 +Uw30SvyfVN59vqBrb4fA0FAfKDADQNoIc1Fsf/86PKc3Bo69SxEE630k3ub5/DFx ++5TVYPMuSq9C0svqxGoassxT3RVLix/IGWEfzZ2oPmMrhDVpZYTIGcVGIvhTlb7j +gEoQxirsupcgEcc5mRAEoPBhepUljE5SdeK27QjKFPzOImqzTs9GA5eXA37Asd57 +r0Uzz7o+cbfe9CUlwg01iZ2d+w4ReYkeN8WvjnJpAgMBAAGjggERMIIBDTAfBgNV +HSMEGDAWgBTAephojYn7qwVkDBF9qn1luMrMTjAdBgNVHQ4EFgQUSt0GFhu89mi1 +dvWBtrtiGrpagS8wDgYDVR0PAQH/BAQDAgEGMC4GCCsGAQUFBwEBBCIwIDAeBggr +BgEFBQcwAYYSaHR0cDovL2cuc3ltY2QuY29tMBIGA1UdEwEB/wQIMAYBAf8CAQAw +NQYDVR0fBC4wLDAqoCigJoYkaHR0cDovL2cuc3ltY2IuY29tL2NybHMvZ3RnbG9i +YWwuY3JsMCEGA1UdIAQaMBgwDAYKKwYBBAHWeQIFATAIBgZngQwBAgIwHQYDVR0l +BBYwFAYIKwYBBQUHAwEGCCsGAQUFBwMCMA0GCSqGSIb3DQEBCwUAA4IBAQDKSeWs +12Rkd1u+cfrP9B4jx5ppY1Rf60zWGSgjZGaOHMeHgGRfBIsmr5jfCnC8vBk97nsz +qX+99AXUcLsFJnnqmseYuQcZZTTMPOk/xQH6bwx+23pwXEz+LQDwyr4tjrSogPsB +E4jLnD/lu3fKOmc2887VJwJyQ6C9bgLxRwVxPgFZ6RGeGvOED4Cmong1L7bHon8X +fOGLVq7uZ4hRJzBgpWJSwzfVO+qFKgE4h6LPcK2kesnE58rF2rwjMvL+GMJ74N87 +L9TQEOaWTPtEtyFkDbkAlDASJodYmDkFOA/MgkgMCkdm7r+0X8T/cKjhf4t5K7hl +MqO5tzHpCvX2HzLc +-----END CERTIFICATE----- + diff --git a/tests/test_acm/resources/ca.srl b/tests/test_acm/resources/ca.srl new file mode 100644 index 000000000..ba4789240 --- /dev/null +++ b/tests/test_acm/resources/ca.srl @@ -0,0 +1 @@ +DF5D91CC8A8FBAA0 diff --git a/tests/test_acm/resources/star_moto_com-bad.pem b/tests/test_acm/resources/star_moto_com-bad.pem new file mode 100644 index 000000000..e79c7d2e6 --- /dev/null +++ b/tests/test_acm/resources/star_moto_com-bad.pem @@ -0,0 +1,26 @@ +-----BEGIN CERTIFICATE----- +MIIEUDCCAjgCCQDfXZHMio+6oDANBgkqhkiG9w0BAQ0FADBjMQswCQYDVQQGEwJH +gbhdsgndthgngfdhujmnfhjmnftghjmQjESMBAGA1UECAwJQmVya3NoaXJlMQ8wDQYDVQQHDAZTbG91Z2gxEzARBgNVBAoM +Ck1vdG9TZXJ2ZXIxCzAJBgNVBAsMAlFBMQ0wCwYDVQQDDARNb3RvMB4XDTE3MDky +MTIxMjQ1MFoXDTI3MDkxOTIxMjQ1MFowcTELMAkGA1UEBhMCR0IxEjAQBgNVBAgM +CUJlcmtzaGlyZTEPMA0GA1UEBwwGU2xvdWdoMRMwEQYDVQQKDApNb3RvU2VydmVy +MRMwEQYDVQQLDApPcGVyYXRpb25zMRMwEQYDVQQDDAoqLm1vdG8uY29tMIIBIjAN +BgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAzC/oBkzwiIBEceSC/tSD7hkqs8AW +niDXbMgAQE9oxUxtkFESxiNa+EbAMLBFtBkPRvc3iKXh/cfLo7yP8VdqEIDmJCB/ +3T3ljjmrCMwquxYgZWMShnXZV0YfC19Vzq/gFpiyoaI2SI5NOFlfwhs5hFacTGkf +vpjJvf6HnrNJ7keQR+oGJNf7jVaCgOVdJ4lt7+98YDVde7jLx1DN+QbvViJQl60n +K3bmfuLiiw8154Eyi9DOcJE8AB+W7KpPdrmbPisR1EiqY0i0L62ZixN0rPi5hHF+ +ozwURL1axcmLjlhIFi8YhBCNcY6ThE7jrqgLIq1n6d8ezRxjDKmqfH1spQIDAQAB +MA0GCSqGSIb3DQEBDQUAA4ICAQCgl/EfjE0Jh3cqQgoOlaFq6L1iJVgy5sYKCC4r +OU4dHgifZ6/grqCJesGiS1Vh4L8XklN++C2aSL73lVtxXoCSopP8Yj0rOGeA6b+7 +Fetm4ZQYF61QtahC0L2fkvKXR+uz1I85ndSoMJPT8lbm7sYJuL81Si32NOo6kC6y +4eKzV4KznxdAf6XaQMKtMIyXO3PWTrjm5ayzS6UsmnBvULGDCaAQznFlVFdGNSHx +CaENICR0CBcB+vbL7FPC683a4afceM+aMcMVElWG5q8fxtgbL/aPhzfonhDGWOM4 +Rdg8x+yDdi7swxmWlcW5wlP8LpLxN/S3GR9j9IyelxUGmb20yTph3i1K6RM/Fm2W +PI8xdneA6qycUAJo93NfaCuNK7yBfK3uDLqmWlGh3xCG+I1JETLRbxYBWiqeVTb3 +qjHMrsgqTqjcaCiKR/5H2eVkdcr8mLxrV5niyBItDl1xGxj4LF8hDLormhaCjiBb +N1cMq5saj/BpoIanlqOWby6uRMYlZvuhwKQGPVWgfuRWKFzGbMWyPCxATbiU89Wb +IykNkT1zTCE/eZwH12T4A7jrBiWq8WNfIST0Z7MReE6Oz+M9Pxx7DyDzSb2Y1RmU +xNYd8CavZLCfns00xZSo+10deMoKVS9GgxSHcS4ELaVaBQwu35emiMJSLcK7iNGE +I4WVSA== +-----END CERTIFICATE----- diff --git a/tests/test_acm/resources/star_moto_com.csr b/tests/test_acm/resources/star_moto_com.csr new file mode 100644 index 000000000..9b745261f --- /dev/null +++ b/tests/test_acm/resources/star_moto_com.csr @@ -0,0 +1,17 @@ +-----BEGIN CERTIFICATE REQUEST----- +MIICtjCCAZ4CAQAwcTELMAkGA1UEBhMCR0IxEjAQBgNVBAgMCUJlcmtzaGlyZTEP +MA0GA1UEBwwGU2xvdWdoMRMwEQYDVQQKDApNb3RvU2VydmVyMRMwEQYDVQQLDApP +cGVyYXRpb25zMRMwEQYDVQQDDAoqLm1vdG8uY29tMIIBIjANBgkqhkiG9w0BAQEF +AAOCAQ8AMIIBCgKCAQEAzC/oBkzwiIBEceSC/tSD7hkqs8AWniDXbMgAQE9oxUxt +kFESxiNa+EbAMLBFtBkPRvc3iKXh/cfLo7yP8VdqEIDmJCB/3T3ljjmrCMwquxYg +ZWMShnXZV0YfC19Vzq/gFpiyoaI2SI5NOFlfwhs5hFacTGkfvpjJvf6HnrNJ7keQ +R+oGJNf7jVaCgOVdJ4lt7+98YDVde7jLx1DN+QbvViJQl60nK3bmfuLiiw8154Ey +i9DOcJE8AB+W7KpPdrmbPisR1EiqY0i0L62ZixN0rPi5hHF+ozwURL1axcmLjlhI +Fi8YhBCNcY6ThE7jrqgLIq1n6d8ezRxjDKmqfH1spQIDAQABoAAwDQYJKoZIhvcN +AQELBQADggEBAAioQDDifgKjJXhK9w0+dvTdw80cdc8Y4/vkkJe6fqR5i6qM6Nbk +FQt0YNy4dScU6/u+YBFRqRfSKS1QOh2Uq6pKoloHxlhf9gh/8aqjvgN3qy3Ncyya +D9pqlSe70NIHIIBB3EDyocTFtscEX4s8ysuGDxKysWsL57YrDCbVjliK6sRIDPOk +CqkQJXjbQdi4bwqE5iYgheQEFQV+uGpdsV7ZZi4E7KcFmIKk3PzattWUd8+bglPC +/rrzb97nRiz8J5XzoqrEPA+0ZCuQ6cvbbEOWggs5kMJe/MfihH0yGA5kIQNmTmRK +1PLqpTE6g293pgeBcWsuydIBB9pUmSMDT1I= +-----END CERTIFICATE REQUEST----- diff --git a/tests/test_acm/resources/star_moto_com.key b/tests/test_acm/resources/star_moto_com.key new file mode 100644 index 000000000..f8585a81e --- /dev/null +++ b/tests/test_acm/resources/star_moto_com.key @@ -0,0 +1,27 @@ +-----BEGIN RSA PRIVATE KEY----- +MIIEogIBAAKCAQEAzC/oBkzwiIBEceSC/tSD7hkqs8AWniDXbMgAQE9oxUxtkFES +xiNa+EbAMLBFtBkPRvc3iKXh/cfLo7yP8VdqEIDmJCB/3T3ljjmrCMwquxYgZWMS +hnXZV0YfC19Vzq/gFpiyoaI2SI5NOFlfwhs5hFacTGkfvpjJvf6HnrNJ7keQR+oG +JNf7jVaCgOVdJ4lt7+98YDVde7jLx1DN+QbvViJQl60nK3bmfuLiiw8154Eyi9DO +cJE8AB+W7KpPdrmbPisR1EiqY0i0L62ZixN0rPi5hHF+ozwURL1axcmLjlhIFi8Y +hBCNcY6ThE7jrqgLIq1n6d8ezRxjDKmqfH1spQIDAQABAoIBAECa588WiQSnkQB4 +TPpUQ2oSjHBTVtSxj3fb0DiI552FkSUYgdgvV5k2yZieLW/Ofgb2MZwK4HZrwQMN +pn22KtkN78N+hPZ7nyZhGLyv3NVVKurpbfMdVqdGiIwQnhXHkB+WMO7zZDmQzN4H +aUUBWDGHNez3VhP4Q9zZrA+Kqtm5OYmkDQYO6LqR+OQmqmLEeJOsbR9EUXDuhd5O +CyWkBwZP5JcmP985hZ7dGTZJ9ehFLYq6i6ZLmuSkt6QS/jf+AdLjd6b2b326CUwJ +xEf3ZwQ9b+BPZ+gCx91FsooRqa3NbFhvGJ34sN25xzppa5+IDDk5XZnXJugwq5Sg +t5f07AECgYEA/G3+GIXlnyLwOksFFHQp1yZIlXxeGhVZyDwSHkXcAwRnTWZHHftr +fZ2TQkyYxsySx/pP6PUHQDwhZKFSLIpc2Di2ZIUPZSNYrzEqCZIBTO9+2DBshjs6 +2tUyvpD68lZsQpjipD6wNF+308Px5hAg5mKr5IstHCcXkJcxa3v5kVMCgYEAzxM8 +PbGQmSNalcO1cBcj/f7sbEbJOtdb94ig8KRc8ImL3ZM9dJOugqc0EchMzUzFD4H/ +CjaC25CjxfBZSxV+0D6spUeLKogdwoyAM08/ZwD6BuMKZlbim84wV0VZBXjSaihq +qdaLnx0qC7/DPLf2zQfWkJCcqvPzMf+W6PgQcycCgYA3VW0jtwY0shXy0UsVxrj9 +Ppkem5qNIS0DJZfbJvkpeCek4cypF9niOU50dBHxUhrC12345O1n+UZgprQ6q0Ha +6+OfeUN8qhjgnmhWnLjIQp+NiF/htM4b9iwfdexsfuFQX+8ejddWQ70qIIPAKLzt +g6eme5Ox3ifePCZLJ2v3nQKBgFBeitb2/8Qv8IyH9PeYQ6PlOSWdI6TuyQb9xFkh +seC5wcsxxnxkhSq4coEkWIql7SXjsnToS0mkjavZaQ63PQzeBmvvpJfRVJuZpHhF +nboAqwnZPMQTnMgT8rcsdyykhCYnoZ5hYrdSvmro9oGudN+G10QsnGHNZOpW5N9u +yBOpAoGASb5aNQU9QFT8kyxZB+nKAuh6efa6HNMXMdEoYD9VOm0zPMRtorZdX4s4 +nYctHiIUmVAIXtkG0tR+cOelv2qKR5EfOo3HZtaP+fbOd0IykoZcbQJpc3PwDcCq +WgkRhN4dCVYD3ZXFYlUrCoDca7JE1KxmIbrlVSAaYilkt7UB3Qk= +-----END RSA PRIVATE KEY----- diff --git a/tests/test_acm/resources/star_moto_com.pem b/tests/test_acm/resources/star_moto_com.pem new file mode 100644 index 000000000..6d599d53e --- /dev/null +++ b/tests/test_acm/resources/star_moto_com.pem @@ -0,0 +1,26 @@ +-----BEGIN CERTIFICATE----- +MIIEUDCCAjgCCQDfXZHMio+6oDANBgkqhkiG9w0BAQ0FADBjMQswCQYDVQQGEwJH +QjESMBAGA1UECAwJQmVya3NoaXJlMQ8wDQYDVQQHDAZTbG91Z2gxEzARBgNVBAoM +Ck1vdG9TZXJ2ZXIxCzAJBgNVBAsMAlFBMQ0wCwYDVQQDDARNb3RvMB4XDTE3MDky +MTIxMjQ1MFoXDTI3MDkxOTIxMjQ1MFowcTELMAkGA1UEBhMCR0IxEjAQBgNVBAgM +CUJlcmtzaGlyZTEPMA0GA1UEBwwGU2xvdWdoMRMwEQYDVQQKDApNb3RvU2VydmVy +MRMwEQYDVQQLDApPcGVyYXRpb25zMRMwEQYDVQQDDAoqLm1vdG8uY29tMIIBIjAN +BgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAzC/oBkzwiIBEceSC/tSD7hkqs8AW +niDXbMgAQE9oxUxtkFESxiNa+EbAMLBFtBkPRvc3iKXh/cfLo7yP8VdqEIDmJCB/ +3T3ljjmrCMwquxYgZWMShnXZV0YfC19Vzq/gFpiyoaI2SI5NOFlfwhs5hFacTGkf +vpjJvf6HnrNJ7keQR+oGJNf7jVaCgOVdJ4lt7+98YDVde7jLx1DN+QbvViJQl60n +K3bmfuLiiw8154Eyi9DOcJE8AB+W7KpPdrmbPisR1EiqY0i0L62ZixN0rPi5hHF+ +ozwURL1axcmLjlhIFi8YhBCNcY6ThE7jrqgLIq1n6d8ezRxjDKmqfH1spQIDAQAB +MA0GCSqGSIb3DQEBDQUAA4ICAQCgl/EfjE0Jh3cqQgoOlaFq6L1iJVgy5sYKCC4r +OU4dHgifZ6/grqCJesGiS1Vh4L8XklN++C2aSL73lVtxXoCSopP8Yj0rOGeA6b+7 +Fetm4ZQYF61QtahC0L2fkvKXR+uz1I85ndSoMJPT8lbm7sYJuL81Si32NOo6kC6y +4eKzV4KznxdAf6XaQMKtMIyXO3PWTrjm5ayzS6UsmnBvULGDCaAQznFlVFdGNSHx +CaENICR0CBcB+vbL7FPC683a4afceM+aMcMVElWG5q8fxtgbL/aPhzfonhDGWOM4 +Rdg8x+yDdi7swxmWlcW5wlP8LpLxN/S3GR9j9IyelxUGmb20yTph3i1K6RM/Fm2W +PI8xdneA6qycUAJo93NfaCuNK7yBfK3uDLqmWlGh3xCG+I1JETLRbxYBWiqeVTb3 +qjHMrsgqTqjcaCiKR/5H2eVkdcr8mLxrV5niyBItDl1xGxj4LF8hDLormhaCjiBb +N1cMq5saj/BpoIanlqOWby6uRMYlZvuhwKQGPVWgfuRWKFzGbMWyPCxATbiU89Wb +IykNkT1zTCE/eZwH12T4A7jrBiWq8WNfIST0Z7MReE6Oz+M9Pxx7DyDzSb2Y1RmU +xNYd8CavZLCfns00xZSo+10deMoKVS9GgxSHcS4ELaVaBQwu35emiMJSLcK7iNGE +I4WVSA== +-----END CERTIFICATE----- diff --git a/tests/test_acm/test_acm.py b/tests/test_acm/test_acm.py new file mode 100644 index 000000000..b45018e67 --- /dev/null +++ b/tests/test_acm/test_acm.py @@ -0,0 +1,32 @@ +from __future__ import unicode_literals + +import os +import boto3 +import sure # noqa + +from botocore.exceptions import ClientError + +from moto import mock_acm + + +RESOURCE_FOLDER = os.path.join(os.path.dirname(__file__), 'resources') +_GET_RESOURCE = lambda x: open(os.path.join(RESOURCE_FOLDER, x), 'rb').read() +CA_CRT = _GET_RESOURCE('ca.pem') +CA_KEY = _GET_RESOURCE('ca.key') +SERVER_CRT = _GET_RESOURCE('star_moto_com.pem') +SERVER_CRT_BAD = _GET_RESOURCE('star_moto_com-bad.pem') +SERVER_KEY = _GET_RESOURCE('star_moto_com.key') + + +@mock_acm +def test_import_certificate(): + client = boto3.client('acm', region_name='eu-central-1') + + resp = client.import_certificate( + Certificate=SERVER_CRT, + PrivateKey=SERVER_KEY, + CertificateChain=CA_CRT + ) + resp = client.get_certificate(CertificateArn=resp['CertificateArn']) + + print(resp) From ea318edc9462f568ac00a79661c8699f1211da97 Mon Sep 17 00:00:00 2001 From: Terry Cain Date: Fri, 22 Sep 2017 00:22:50 +0100 Subject: [PATCH 246/412] Flake8 --- moto/acm/models.py | 1 - moto/acm/responses.py | 1 - 2 files changed, 2 deletions(-) diff --git a/moto/acm/models.py b/moto/acm/models.py index 46a7d97b2..b51d7aa68 100644 --- a/moto/acm/models.py +++ b/moto/acm/models.py @@ -236,7 +236,6 @@ class AWSCertificateManagerBackend(BaseBackend): pass - acm_backends = {} for region, ec2_backend in ec2_backends.items(): acm_backends[region] = AWSCertificateManagerBackend(region) diff --git a/moto/acm/responses.py b/moto/acm/responses.py index 06ef43bac..a9243b7ea 100644 --- a/moto/acm/responses.py +++ b/moto/acm/responses.py @@ -162,4 +162,3 @@ class AWSCertificateManagerResponse(BaseResponse): def resend_validation_email(self): raise NotImplementedError() - From 037b35702944bf825c94422a21eedc8ab9c2865f Mon Sep 17 00:00:00 2001 From: Chris Keogh Date: Fri, 22 Sep 2017 13:12:11 +1200 Subject: [PATCH 247/412] add basic projection expressions --- moto/dynamodb2/models.py | 16 +++- moto/dynamodb2/responses.py | 10 ++- tests/test_dynamodb2/test_dynamodb.py | 124 ++++++++++++++++++++++++++ 3 files changed, 145 insertions(+), 5 deletions(-) diff --git a/moto/dynamodb2/models.py b/moto/dynamodb2/models.py index 5915d6eea..fd4c01805 100644 --- a/moto/dynamodb2/models.py +++ b/moto/dynamodb2/models.py @@ -412,7 +412,8 @@ class Table(BaseModel): return None def query(self, hash_key, range_comparison, range_objs, limit, - exclusive_start_key, scan_index_forward, index_name=None, **filter_kwargs): + exclusive_start_key, scan_index_forward, projection_expression, index_name=None, **filter_kwargs): + results = [] if index_name: all_indexes = (self.global_indexes or []) + (self.indexes or []) @@ -483,6 +484,14 @@ class Table(BaseModel): else: results.sort(key=lambda item: item.range_key) + + if projection_expression: + expressions = [x.strip() for x in projection_expression.split(',')] + for result in possible_results: + for attr in list(result.attrs): + if attr not in expressions: + result.attrs.pop(attr) + if scan_index_forward is False: results.reverse() @@ -490,6 +499,7 @@ class Table(BaseModel): results, last_evaluated_key = self._trim_results(results, limit, exclusive_start_key) + return results, scanned_count, last_evaluated_key def all_items(self): @@ -678,7 +688,7 @@ class DynamoDBBackend(BaseBackend): return table.get_item(hash_key, range_key) def query(self, table_name, hash_key_dict, range_comparison, range_value_dicts, - limit, exclusive_start_key, scan_index_forward, index_name=None, **filter_kwargs): + limit, exclusive_start_key, scan_index_forward, projection_expression, index_name=None, **filter_kwargs): table = self.tables.get(table_name) if not table: return None, None @@ -688,7 +698,7 @@ class DynamoDBBackend(BaseBackend): for range_value in range_value_dicts] return table.query(hash_key, range_comparison, range_values, limit, - exclusive_start_key, scan_index_forward, index_name, **filter_kwargs) + exclusive_start_key, scan_index_forward, projection_expression, index_name, **filter_kwargs) def scan(self, table_name, filters, limit, exclusive_start_key): table = self.tables.get(table_name) diff --git a/moto/dynamodb2/responses.py b/moto/dynamodb2/responses.py index cf715bfbc..031ac1871 100644 --- a/moto/dynamodb2/responses.py +++ b/moto/dynamodb2/responses.py @@ -276,6 +276,8 @@ class DynamoHandler(BaseResponse): name = self.body['TableName'] # {u'KeyConditionExpression': u'#n0 = :v0', u'ExpressionAttributeValues': {u':v0': {u'S': u'johndoe'}}, u'ExpressionAttributeNames': {u'#n0': u'username'}} key_condition_expression = self.body.get('KeyConditionExpression') + projection_expression = self.body.get('ProjectionExpression') + filter_kwargs = {} if key_condition_expression: value_alias_map = self.body['ExpressionAttributeValues'] @@ -383,16 +385,20 @@ class DynamoHandler(BaseResponse): scan_index_forward = self.body.get("ScanIndexForward") items, scanned_count, last_evaluated_key = dynamodb_backend2.query( name, hash_key, range_comparison, range_values, limit, - exclusive_start_key, scan_index_forward, index_name=index_name, **filter_kwargs) + exclusive_start_key, scan_index_forward, projection_expression, index_name=index_name, **filter_kwargs) if items is None: er = 'com.amazonaws.dynamodb.v20111205#ResourceNotFoundException' return self.error(er, 'Requested resource not found') result = { "Count": len(items), - "ConsumedCapacityUnits": 1, + 'ConsumedCapacity': { + 'TableName': name, + 'CapacityUnits': 1, + }, "ScannedCount": scanned_count } + if self.body.get('Select', '').upper() != 'COUNT': result["Items"] = [item.attrs for item in items] diff --git a/tests/test_dynamodb2/test_dynamodb.py b/tests/test_dynamodb2/test_dynamodb.py index 8f320cbab..7e79ac350 100644 --- a/tests/test_dynamodb2/test_dynamodb.py +++ b/tests/test_dynamodb2/test_dynamodb.py @@ -9,6 +9,7 @@ from moto import mock_dynamodb2, mock_dynamodb2_deprecated from moto.dynamodb2 import dynamodb_backend2 from boto.exception import JSONResponseError from botocore.exceptions import ClientError +from boto3.dynamodb.conditions import Key from tests.helpers import requires_boto_gte import tests.backport_assert_raises from nose.tools import assert_raises @@ -228,3 +229,126 @@ def test_scan_returns_consumed_capacity(): assert 'ConsumedCapacity' in response assert 'CapacityUnits' in response['ConsumedCapacity'] assert response['ConsumedCapacity']['TableName'] == name + +@requires_boto_gte("2.9") +@mock_dynamodb2 +def test_query_returns_consumed_capacity(): + dynamodb = boto3.resource('dynamodb', region_name='us-east-1') + + # Create the DynamoDB table. + table = dynamodb.create_table( + TableName='users', + KeySchema=[ + { + 'AttributeName': 'forum_name', + 'KeyType': 'HASH' + }, + { + 'AttributeName': 'subject', + 'KeyType': 'RANGE' + }, + ], + AttributeDefinitions=[ + { + 'AttributeName': 'forum_name', + 'AttributeType': 'S' + }, + { + 'AttributeName': 'subject', + 'AttributeType': 'S' + }, + ], + ProvisionedThroughput={ + 'ReadCapacityUnits': 5, + 'WriteCapacityUnits': 5 + } + ) + table = dynamodb.Table('users') + + table.put_item(Item={ + 'forum_name': 'the-key', + 'subject': '123', + 'body': 'some test message' + }) + + results = table.query( + KeyConditionExpression=Key('forum_name').eq( + 'the-key') + ) + + assert 'ConsumedCapacity' in results + assert 'CapacityUnits' in results['ConsumedCapacity'] + assert results['ConsumedCapacity']['CapacityUnits'] == 1 + +@mock_dynamodb2 +def test_projection_expressions(): + dynamodb = boto3.resource('dynamodb', region_name='us-east-1') + + # Create the DynamoDB table. + table = dynamodb.create_table( + TableName='users', + KeySchema=[ + { + 'AttributeName': 'forum_name', + 'KeyType': 'HASH' + }, + { + 'AttributeName': 'subject', + 'KeyType': 'RANGE' + }, + ], + AttributeDefinitions=[ + { + 'AttributeName': 'forum_name', + 'AttributeType': 'S' + }, + { + 'AttributeName': 'subject', + 'AttributeType': 'S' + }, + ], + ProvisionedThroughput={ + 'ReadCapacityUnits': 5, + 'WriteCapacityUnits': 5 + } + ) + table = dynamodb.Table('users') + + table.put_item(Item={ + 'forum_name': 'the-key', + 'subject': '123', + 'body': 'some test message' + }) + + table.put_item(Item={ + 'forum_name': 'not-the-key', + 'subject': '123', + 'body': 'some other test message' + }) + # Test a query returning all items + results = table.query( + KeyConditionExpression=Key('forum_name').eq( + 'the-key'), + ProjectionExpression='body, subject' + ) + + assert 'body' in results['Items'][0] + assert results['Items'][0]['body'] == 'some test message' + assert 'subject' in results['Items'][0] + + table.put_item(Item={ + 'forum_name': 'the-key', + 'subject': '1234', + 'body': 'yet another test message' + }) + + results = table.query( + KeyConditionExpression=Key('forum_name').eq( + 'the-key'), + ProjectionExpression='body' + ) + + assert 'body' in results['Items'][0] + assert results['Items'][0]['body'] == 'some test message' + assert 'body' in results['Items'][1] + assert results['Items'][1]['body'] == 'yet another test message' From 8840495f77fc456e73dabca8a60ba5ac0ee6c9a9 Mon Sep 17 00:00:00 2001 From: Chris Keogh Date: Fri, 22 Sep 2017 15:40:30 +1200 Subject: [PATCH 248/412] add AttributeValueNames for basic ProjectionExpressions on query --- moto/dynamodb2/responses.py | 7 +++ tests/test_dynamodb2/test_dynamodb.py | 61 ++++++++++++++++++++++++++- 2 files changed, 67 insertions(+), 1 deletion(-) diff --git a/moto/dynamodb2/responses.py b/moto/dynamodb2/responses.py index 031ac1871..37b73160e 100644 --- a/moto/dynamodb2/responses.py +++ b/moto/dynamodb2/responses.py @@ -277,6 +277,13 @@ class DynamoHandler(BaseResponse): # {u'KeyConditionExpression': u'#n0 = :v0', u'ExpressionAttributeValues': {u':v0': {u'S': u'johndoe'}}, u'ExpressionAttributeNames': {u'#n0': u'username'}} key_condition_expression = self.body.get('KeyConditionExpression') projection_expression = self.body.get('ProjectionExpression') + expression_attribute_names = self.body.get('ExpressionAttributeNames') + + if projection_expression and expression_attribute_names: + expressions = [x.strip() for x in projection_expression.split(',')] + for expression in expressions: + if expression in expression_attribute_names: + projection_expression = projection_expression.replace(expression, expression_attribute_names[expression]) filter_kwargs = {} if key_condition_expression: diff --git a/tests/test_dynamodb2/test_dynamodb.py b/tests/test_dynamodb2/test_dynamodb.py index 7e79ac350..4eb73730a 100644 --- a/tests/test_dynamodb2/test_dynamodb.py +++ b/tests/test_dynamodb2/test_dynamodb.py @@ -281,7 +281,7 @@ def test_query_returns_consumed_capacity(): assert results['ConsumedCapacity']['CapacityUnits'] == 1 @mock_dynamodb2 -def test_projection_expressions(): +def test_basic_projection_expressions(): dynamodb = boto3.resource('dynamodb', region_name='us-east-1') # Create the DynamoDB table. @@ -352,3 +352,62 @@ def test_projection_expressions(): assert results['Items'][0]['body'] == 'some test message' assert 'body' in results['Items'][1] assert results['Items'][1]['body'] == 'yet another test message' + +@mock_dynamodb2 +def test_basic_projection_expressions_with_attr_expression_names(): + dynamodb = boto3.resource('dynamodb', region_name='us-east-1') + + # Create the DynamoDB table. + table = dynamodb.create_table( + TableName='users', + KeySchema=[ + { + 'AttributeName': 'forum_name', + 'KeyType': 'HASH' + }, + { + 'AttributeName': 'subject', + 'KeyType': 'RANGE' + }, + ], + AttributeDefinitions=[ + { + 'AttributeName': 'forum_name', + 'AttributeType': 'S' + }, + { + 'AttributeName': 'subject', + 'AttributeType': 'S' + }, + ], + ProvisionedThroughput={ + 'ReadCapacityUnits': 5, + 'WriteCapacityUnits': 5 + } + ) + table = dynamodb.Table('users') + + table.put_item(Item={ + 'forum_name': 'the-key', + 'subject': '123', + 'body': 'some test message' + }) + + table.put_item(Item={ + 'forum_name': 'not-the-key', + 'subject': '123', + 'body': 'some other test message' + }) + # Test a query returning all items + + results = table.query( + KeyConditionExpression=Key('forum_name').eq( + 'the-key'), + ProjectionExpression='#rl, subject', + ExpressionAttributeNames={'#rl':'body'}, + ) + + assert 'body' in results['Items'][0] + assert results['Items'][0]['body'] == 'some test message' + assert 'subject' in results['Items'][0] + assert results['Items'][0]['subject'] == '123' From d0f38407a1af857f2b23c986a0daa05fe396a729 Mon Sep 17 00:00:00 2001 From: Chris Keogh Date: Fri, 22 Sep 2017 15:50:08 +1200 Subject: [PATCH 249/412] update expression attribute test --- moto/dynamodb2/models.py | 4 ++-- tests/test_dynamodb2/test_dynamodb.py | 15 +++++++++++---- 2 files changed, 13 insertions(+), 6 deletions(-) diff --git a/moto/dynamodb2/models.py b/moto/dynamodb2/models.py index f21f9ebbd..fde269726 100644 --- a/moto/dynamodb2/models.py +++ b/moto/dynamodb2/models.py @@ -412,8 +412,8 @@ class Table(BaseModel): return None def query(self, hash_key, range_comparison, range_objs, limit, - exclusive_start_key, scan_index_forward, projection_expression, index_name=None, **filter_kwargs): - + exclusive_start_key, scan_index_forward, projection_expression, + index_name=None, **filter_kwargs): results = [] if index_name: all_indexes = (self.global_indexes or []) + (self.indexes or []) diff --git a/tests/test_dynamodb2/test_dynamodb.py b/tests/test_dynamodb2/test_dynamodb.py index 4eb73730a..2d58740f5 100644 --- a/tests/test_dynamodb2/test_dynamodb.py +++ b/tests/test_dynamodb2/test_dynamodb.py @@ -390,24 +390,31 @@ def test_basic_projection_expressions_with_attr_expression_names(): table.put_item(Item={ 'forum_name': 'the-key', 'subject': '123', - 'body': 'some test message' + 'body': 'some test message', + 'attachment': 'something' }) table.put_item(Item={ 'forum_name': 'not-the-key', 'subject': '123', - 'body': 'some other test message' + 'body': 'some other test message', + 'attachment': 'something' }) # Test a query returning all items results = table.query( KeyConditionExpression=Key('forum_name').eq( 'the-key'), - ProjectionExpression='#rl, subject', - ExpressionAttributeNames={'#rl':'body'}, + ProjectionExpression='#rl, #rt, subject', + ExpressionAttributeNames={ + '#rl': 'body', + '#rt': 'attachment' + }, ) assert 'body' in results['Items'][0] assert results['Items'][0]['body'] == 'some test message' assert 'subject' in results['Items'][0] assert results['Items'][0]['subject'] == '123' + assert 'attachment' in results['Items'][0] + assert results['Items'][0]['attachment'] == 'something' From 84fc4734fc9ffc0283869fc7e79b85ade0374a6b Mon Sep 17 00:00:00 2001 From: Toshiya Kawasaki Date: Fri, 22 Sep 2017 14:03:12 +0900 Subject: [PATCH 250/412] fix typo --- setup_new_function.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/setup_new_function.py b/setup_new_function.py index 172c21bbb..3927ace30 100755 --- a/setup_new_function.py +++ b/setup_new_function.py @@ -79,7 +79,7 @@ def get_test_dir(service): return os.path.join('tests', 'test_{}'.format(service)) -def render_teamplte(tmpl_dir, tmpl_filename, context, service, alt_filename=None): +def render_template(tmpl_dir, tmpl_filename, context, service, alt_filename=None): is_test = True if 'test' in tmpl_dir else False rendered = jinja2.Environment( loader=jinja2.FileSystemLoader(tmpl_dir) @@ -121,7 +121,7 @@ def initialize_service(service, operation, api_protocol): tmpl_dir = os.path.join(TEMPLATE_DIR, 'lib') for tmpl_filename in os.listdir(tmpl_dir): - render_teamplte( + render_template( tmpl_dir, tmpl_filename, tmpl_context, service ) @@ -134,7 +134,7 @@ def initialize_service(service, operation, api_protocol): tmpl_dir = os.path.join(TEMPLATE_DIR, 'test') for tmpl_filename in os.listdir(tmpl_dir): alt_filename = 'test_{}.py'.format(service) if tmpl_filename == 'test_service.py.j2' else None - render_teamplte( + render_template( tmpl_dir, tmpl_filename, tmpl_context, service, alt_filename ) From f8cdb50f461125cfbce22b2b5525dd618d0d9233 Mon Sep 17 00:00:00 2001 From: Toshiya Kawasaki Date: Fri, 22 Sep 2017 19:11:13 +0900 Subject: [PATCH 251/412] support python2 by using "u" string --- setup_new_function.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/setup_new_function.py b/setup_new_function.py index 3927ace30..0102d74b2 100755 --- a/setup_new_function.py +++ b/setup_new_function.py @@ -31,16 +31,16 @@ OUTPUT_IGNORED_IN_BACKEND = ['NextMarker'] def print_progress(title, body, color): - click.secho('\t{}\t'.format(title), fg=color, nl=False) + click.secho(u'\t{}\t'.format(title), fg=color, nl=False) click.echo(body) def select_service_and_operation(): service_names = Session().get_available_services() service_completer = WordCompleter(service_names) - service_name = prompt('Select service: ', completer=service_completer) + service_name = prompt(u'Select service: ', completer=service_completer) if service_name not in service_names: - click.secho('{} is not valid service'.format(service_name), fg='red') + click.secho(u'{} is not valid service'.format(service_name), fg='red') raise click.Abort() moto_client = get_moto_implementation(service_name) real_client = boto3.client(service_name, region_name='us-east-1') @@ -60,7 +60,7 @@ def select_service_and_operation(): check = 'X' if operation_name in implemented else ' ' click.secho('[{}] {}'.format(check, operation_name)) click.echo('=================================') - operation_name = prompt('Select Operation: ', completer=operation_completer) + operation_name = prompt(u'Select Operation: ', completer=operation_completer) if operation_name not in operation_names: click.secho('{} is not valid operation'.format(operation_name), fg='red') From edbbbf6d200b95279f91266f01507a30342a912d Mon Sep 17 00:00:00 2001 From: Terry Cain Date: Fri, 22 Sep 2017 11:21:36 +0100 Subject: [PATCH 252/412] Nearly finished implementation and tests --- moto/acm/models.py | 54 +++++++++- moto/acm/responses.py | 32 +++++- tests/test_acm/test_acm.py | 211 ++++++++++++++++++++++++++++++++++++- 3 files changed, 287 insertions(+), 10 deletions(-) diff --git a/moto/acm/models.py b/moto/acm/models.py index b51d7aa68..e4b0204c1 100644 --- a/moto/acm/models.py +++ b/moto/acm/models.py @@ -43,6 +43,11 @@ MqO5tzHpCvX2HzLc # so for now a cheap response is just give any old root CA +def datetime_to_epoch(date): + # As only Py3 has datetime.timestamp() + return int((date - datetime.datetime(1970, 1, 1)).total_seconds()) + + class AWSError(Exception): TYPE = None STATUS = 400 @@ -64,7 +69,8 @@ class AWSResourceNotFoundException(AWSError): class CertBundle(BaseModel): - def __init__(self, certificate, private_key, chain=None, region='us-east-1', arn=None): + def __init__(self, certificate, private_key, chain=None, region='us-east-1', arn=None, cert_type='IMPORTED'): + self.created_at = datetime.datetime.now() self.cert = certificate self._cert = None self.common_name = None @@ -73,6 +79,7 @@ class CertBundle(BaseModel): self.chain = chain self.tags = {} self._chain = None + self.type = cert_type # Should really be an enum # AWS always returns your chain + root CA if self.chain is None: @@ -132,10 +139,13 @@ class CertBundle(BaseModel): self._chain = [] for cert_armored in self.chain.split(b'-\n-'): + # Would leave encoded but Py2 does not have raw binary strings + cert_armored = cert_armored.decode() + # Fix missing -'s on split - cert_armored = re.sub(rb'^----B', b'-----B', cert_armored) - cert_armored = re.sub(rb'E----$', b'E-----', cert_armored) - cert = cryptography.x509.load_pem_x509_certificate(cert_armored, default_backend()) + cert_armored = re.sub(r'^----B', '-----B', cert_armored) + cert_armored = re.sub(r'E----$', 'E-----', cert_armored) + cert = cryptography.x509.load_pem_x509_certificate(cert_armored.encode(), default_backend()) self._chain.append(cert) now = datetime.datetime.now() @@ -150,6 +160,42 @@ class CertBundle(BaseModel): raise raise AWSValidationException('The certificate is not PEM-encoded or is not valid.') + def describe(self): + #'RenewalSummary': {}, # Only when cert is amazon issued + + if self._key.key_size == 1024: + key_algo = 'RSA_1024' + elif self._key.key_size == 2048: + key_algo = 'RSA_2048' + else: + key_algo = 'EC_prime256v1' + + result = { + 'Certificate': { + 'CertificateArn': self.arn, + 'DomainName': self.common_name, + 'InUseBy': [], + 'Issuer': self._cert.issuer.get_attributes_for_oid(cryptography.x509.OID_COMMON_NAME)[0].value, + 'KeyAlgorithm': key_algo, + 'NotAfter': datetime_to_epoch(self._cert.not_valid_after), + 'NotBefore': datetime_to_epoch(self._cert.not_valid_before), + 'Serial': self._cert.serial, + 'SignatureAlgorithm': self._cert.signature_algorithm_oid._name.upper().replace('ENCRYPTION', ''), + 'Status': 'ISSUED', # One of PENDING_VALIDATION, ISSUED, INACTIVE, EXPIRED, VALIDATION_TIMED_OUT, REVOKED, FAILED. + 'Subject': 'CN={0}'.format(self.common_name), + 'SubjectAlternativeNames': [], + 'Type': self.type # One of IMPORTED, AMAZON_ISSUED + } + } + + if self.type == 'IMPORTED': + result['Certificate']['ImportedAt'] = datetime_to_epoch(self.created_at) + else: + result['Certificate']['CreatedAt'] = datetime_to_epoch(self.created_at) + result['Certificate']['IssuedAt'] = datetime_to_epoch(self.created_at) + + return result + def __str__(self): return self.arn diff --git a/moto/acm/responses.py b/moto/acm/responses.py index a9243b7ea..35cf64099 100644 --- a/moto/acm/responses.py +++ b/moto/acm/responses.py @@ -30,7 +30,7 @@ class AWSCertificateManagerResponse(BaseResponse): def add_tags_to_certificate(self): arn = self._get_param('CertificateArn') - tags = self._get_list_prefix('Tags') + tags = self._get_param('Tags') if arn is None: msg = 'A required parameter for the specified action is not supplied.' @@ -58,7 +58,18 @@ class AWSCertificateManagerResponse(BaseResponse): return '' def describe_certificate(self): - raise NotImplementedError() + arn = self._get_param('CertificateArn') + + if arn is None: + msg = 'A required parameter for the specified action is not supplied.' + return {'__type': 'MissingParameter', 'message': msg}, dict(status=400) + + try: + cert_bundle = self.acm_backend.get_certificate(arn) + except AWSError as err: + return err.response() + + return json.dumps(cert_bundle.describe()) def get_certificate(self): arn = self._get_param('CertificateArn') @@ -79,7 +90,18 @@ class AWSCertificateManagerResponse(BaseResponse): return json.dumps(result) def import_certificate(self): - # TODO comment on what raises exceptions for all branches + """ + Returns errors on: + Certificate, PrivateKey or Chain not being properly formatted + Arn not existing if its provided + PrivateKey size > 2048 + Certificate expired or is not yet in effect + + Does not return errors on: + Checking Certificate is legit, or a selfsigned chain is provided + + :return: str(JSON) for response + """ certificate = self._get_param('Certificate') private_key = self._get_param('PrivateKey') chain = self._get_param('CertificateChain') # Optional @@ -134,7 +156,7 @@ class AWSCertificateManagerResponse(BaseResponse): result = {'Tags': []} # Tag "objects" can not contain the Value part - for key, value in cert_bundle.tags: + for key, value in cert_bundle.tags.items(): tag_dict = {'Key': key} if value is not None: tag_dict['Value'] = value @@ -144,7 +166,7 @@ class AWSCertificateManagerResponse(BaseResponse): def remove_tags_from_certificate(self): arn = self._get_param('CertificateArn') - tags = self._get_list_prefix('Tags') + tags = self._get_param('Tags') if arn is None: msg = 'A required parameter for the specified action is not supplied.' diff --git a/tests/test_acm/test_acm.py b/tests/test_acm/test_acm.py index b45018e67..ff9ec6510 100644 --- a/tests/test_acm/test_acm.py +++ b/tests/test_acm/test_acm.py @@ -14,10 +14,22 @@ _GET_RESOURCE = lambda x: open(os.path.join(RESOURCE_FOLDER, x), 'rb').read() CA_CRT = _GET_RESOURCE('ca.pem') CA_KEY = _GET_RESOURCE('ca.key') SERVER_CRT = _GET_RESOURCE('star_moto_com.pem') +SERVER_COMMON_NAME = '*.moto.com' SERVER_CRT_BAD = _GET_RESOURCE('star_moto_com-bad.pem') SERVER_KEY = _GET_RESOURCE('star_moto_com.key') +BAD_ARN = 'arn:aws:acm:us-east-2:123456789012:certificate/_0000000-0000-0000-0000-000000000000' +def _import_cert(client): + response = client.import_certificate( + Certificate=SERVER_CRT, + PrivateKey=SERVER_KEY, + CertificateChain=CA_CRT + ) + return response['CertificateArn'] + + +# Also tests GetCertificate @mock_acm def test_import_certificate(): client = boto3.client('acm', region_name='eu-central-1') @@ -29,4 +41,201 @@ def test_import_certificate(): ) resp = client.get_certificate(CertificateArn=resp['CertificateArn']) - print(resp) + resp['Certificate'].should.equal(SERVER_CRT.decode()) + resp.should.contain('CertificateChain') + + +@mock_acm +def test_import_bad_certificate(): + client = boto3.client('acm', region_name='eu-central-1') + + try: + client.import_certificate( + Certificate=SERVER_CRT_BAD, + PrivateKey=SERVER_KEY, + ) + except ClientError as err: + err.response['Error']['Code'].should.equal('ValidationException') + else: + raise RuntimeError('Should of raised ValidationException') + + +@mock_acm +def test_list_certificates(): + client = boto3.client('acm', region_name='eu-central-1') + arn = _import_cert(client) + + resp = client.list_certificates() + len(resp['CertificateSummaryList']).should.equal(1) + + resp['CertificateSummaryList'][0]['CertificateArn'].should.equal(arn) + resp['CertificateSummaryList'][0]['DomainName'].should.equal(SERVER_COMMON_NAME) + + +@mock_acm +def test_get_invalid_certificate(): + client = boto3.client('acm', region_name='eu-central-1') + + try: + client.get_certificate(CertificateArn=BAD_ARN) + except ClientError as err: + err.response['Error']['Code'].should.equal('ResourceNotFoundException') + else: + raise RuntimeError('Should of raised ResourceNotFoundException') + + +# Also tests deleting invalid certificate +@mock_acm +def test_delete_certificate(): + client = boto3.client('acm', region_name='eu-central-1') + arn = _import_cert(client) + + # If it does not raise an error and the next call does, all is fine + client.delete_certificate(CertificateArn=arn) + + try: + client.delete_certificate(CertificateArn=arn) + except ClientError as err: + err.response['Error']['Code'].should.equal('ResourceNotFoundException') + else: + raise RuntimeError('Should of raised ResourceNotFoundException') + + +@mock_acm +def test_describe_certificate(): + client = boto3.client('acm', region_name='eu-central-1') + arn = _import_cert(client) + + resp = client.describe_certificate(CertificateArn=arn) + resp['Certificate']['CertificateArn'].should.equal(arn) + resp['Certificate']['DomainName'].should.equal(SERVER_COMMON_NAME) + resp['Certificate']['Issuer'].should.equal('Moto') + resp['Certificate']['KeyAlgorithm'].should.equal('RSA_2048') + resp['Certificate']['Status'].should.equal('ISSUED') + resp['Certificate']['Type'].should.equal('IMPORTED') + + +@mock_acm +def test_describe_certificate(): + client = boto3.client('acm', region_name='eu-central-1') + + try: + client.describe_certificate(CertificateArn=BAD_ARN) + except ClientError as err: + err.response['Error']['Code'].should.equal('ResourceNotFoundException') + else: + raise RuntimeError('Should of raised ResourceNotFoundException') + + +# Also tests ListTagsForCertificate +@mock_acm +def test_add_tags_to_certificate(): + client = boto3.client('acm', region_name='eu-central-1') + arn = _import_cert(client) + + client.add_tags_to_certificate( + CertificateArn=arn, + Tags=[ + {'Key': 'key1', 'Value': 'value1'}, + {'Key': 'key2'}, + ] + ) + + resp = client.list_tags_for_certificate(CertificateArn=arn) + tags = {item['Key']: item.get('Value', '__NONE__') for item in resp['Tags']} + + tags.should.contain('key1') + tags.should.contain('key2') + tags['key1'].should.equal('value1') + + # This way, it ensures that we can detect if None is passed back when it shouldnt, + # as we store keys without values with a value of None, but it shouldnt be passed back + tags['key2'].should.equal('__NONE__') + + +@mock_acm +def test_add_tags_to_invalid_certificate(): + client = boto3.client('acm', region_name='eu-central-1') + + try: + client.add_tags_to_certificate( + CertificateArn=BAD_ARN, + Tags=[ + {'Key': 'key1', 'Value': 'value1'}, + {'Key': 'key2'}, + ] + ) + except ClientError as err: + err.response['Error']['Code'].should.equal('ResourceNotFoundException') + else: + raise RuntimeError('Should of raised ResourceNotFoundException') + + +@mock_acm +def test_list_tags_for_invalid_certificate(): + client = boto3.client('acm', region_name='eu-central-1') + + try: + client.list_tags_for_certificate(CertificateArn=BAD_ARN) + except ClientError as err: + err.response['Error']['Code'].should.equal('ResourceNotFoundException') + else: + raise RuntimeError('Should of raised ResourceNotFoundException') + + +@mock_acm +def test_remove_tags_from_certificate(): + client = boto3.client('acm', region_name='eu-central-1') + arn = _import_cert(client) + + client.add_tags_to_certificate( + CertificateArn=arn, + Tags=[ + {'Key': 'key1', 'Value': 'value1'}, + {'Key': 'key2'}, + {'Key': 'key3', 'Value': 'value3'}, + {'Key': 'key4', 'Value': 'value4'}, + ] + ) + + client.remove_tags_from_certificate( + CertificateArn=arn, + Tags=[ + {'Key': 'key1', 'Value': 'value2'}, # Should not remove as doesnt match + {'Key': 'key2'}, # Single key removal + {'Key': 'key3', 'Value': 'value3'}, # Exact match removal + {'Key': 'key4'} # Partial match removal + ] + ) + + resp = client.list_tags_for_certificate(CertificateArn=arn) + tags = {item['Key']: item.get('Value', '__NONE__') for item in resp['Tags']} + + for key in ('key2', 'key3', 'key4'): + tags.should_not.contain(key) + + tags.should.contain('key1') + + +@mock_acm +def test_remove_tags_from_invalid_certificate(): + client = boto3.client('acm', region_name='eu-central-1') + + try: + client.remove_tags_from_certificate( + CertificateArn=BAD_ARN, + Tags=[ + {'Key': 'key1', 'Value': 'value1'}, + {'Key': 'key2'}, + ] + ) + except ClientError as err: + err.response['Error']['Code'].should.equal('ResourceNotFoundException') + else: + raise RuntimeError('Should of raised ResourceNotFoundException') + + + + + + From 4cc4b36f1560e84e20d9e810b6ca713c1f59f24c Mon Sep 17 00:00:00 2001 From: Toshiya Kawasaki Date: Fri, 22 Sep 2017 19:23:10 +0900 Subject: [PATCH 253/412] move helper script to scripts dir and add one to Makefile --- Makefile | 4 ++++ .../implementation_coverage.py | 0 setup_new_function.py => scripts/scaffold.py | 2 +- {template => scripts/template}/lib/__init__.py.j2 | 0 {template => scripts/template}/lib/exceptions.py.j2 | 0 {template => scripts/template}/lib/models.py.j2 | 0 {template => scripts/template}/lib/responses.py.j2 | 0 {template => scripts/template}/test/test_server.py.j2 | 0 {template => scripts/template}/test/test_service.py.j2 | 0 9 files changed, 5 insertions(+), 1 deletion(-) rename implementation_coverage.py => scripts/implementation_coverage.py (100%) rename setup_new_function.py => scripts/scaffold.py (99%) rename {template => scripts/template}/lib/__init__.py.j2 (100%) rename {template => scripts/template}/lib/exceptions.py.j2 (100%) rename {template => scripts/template}/lib/models.py.j2 (100%) rename {template => scripts/template}/lib/responses.py.j2 (100%) rename {template => scripts/template}/test/test_server.py.j2 (100%) rename {template => scripts/template}/test/test_service.py.j2 (100%) diff --git a/Makefile b/Makefile index 3c5582c2d..38a73aa28 100644 --- a/Makefile +++ b/Makefile @@ -19,3 +19,7 @@ publish: python setup.py sdist bdist_wheel upload git tag `python setup.py --version` git push origin `python setup.py --version` + +scaffold: + @pip install -r requirements-dev.txt > /dev/null + @python scripts/scaffold.py diff --git a/implementation_coverage.py b/scripts/implementation_coverage.py similarity index 100% rename from implementation_coverage.py rename to scripts/implementation_coverage.py diff --git a/setup_new_function.py b/scripts/scaffold.py similarity index 99% rename from setup_new_function.py rename to scripts/scaffold.py index 0102d74b2..c38544abf 100755 --- a/setup_new_function.py +++ b/scripts/scaffold.py @@ -24,7 +24,7 @@ from implementation_coverage import ( ) from inflection import singularize -TEMPLATE_DIR = './template' +TEMPLATE_DIR = os.path.join(os.path.dirname(__file__), './template') INPUT_IGNORED_IN_BACKEND = ['Marker', 'PageSize'] OUTPUT_IGNORED_IN_BACKEND = ['NextMarker'] diff --git a/template/lib/__init__.py.j2 b/scripts/template/lib/__init__.py.j2 similarity index 100% rename from template/lib/__init__.py.j2 rename to scripts/template/lib/__init__.py.j2 diff --git a/template/lib/exceptions.py.j2 b/scripts/template/lib/exceptions.py.j2 similarity index 100% rename from template/lib/exceptions.py.j2 rename to scripts/template/lib/exceptions.py.j2 diff --git a/template/lib/models.py.j2 b/scripts/template/lib/models.py.j2 similarity index 100% rename from template/lib/models.py.j2 rename to scripts/template/lib/models.py.j2 diff --git a/template/lib/responses.py.j2 b/scripts/template/lib/responses.py.j2 similarity index 100% rename from template/lib/responses.py.j2 rename to scripts/template/lib/responses.py.j2 diff --git a/template/test/test_server.py.j2 b/scripts/template/test/test_server.py.j2 similarity index 100% rename from template/test/test_server.py.j2 rename to scripts/template/test/test_server.py.j2 diff --git a/template/test/test_service.py.j2 b/scripts/template/test/test_service.py.j2 similarity index 100% rename from template/test/test_service.py.j2 rename to scripts/template/test/test_service.py.j2 From e3034275dbc67156122e7d1043047b5beb7286d8 Mon Sep 17 00:00:00 2001 From: Terry Cain Date: Fri, 22 Sep 2017 14:26:05 +0100 Subject: [PATCH 254/412] Finished ACM + tests --- moto/acm/models.py | 126 ++++++++++++++++++++++++++++++++++--- moto/acm/responses.py | 52 ++++++++++++--- tests/test_acm/test_acm.py | 116 ++++++++++++++++++++++++++++++++++ 3 files changed, 278 insertions(+), 16 deletions(-) diff --git a/moto/acm/models.py b/moto/acm/models.py index e4b0204c1..de26529a4 100644 --- a/moto/acm/models.py +++ b/moto/acm/models.py @@ -9,7 +9,8 @@ from moto.ec2 import ec2_backends from .utils import make_arn_for_certificate import cryptography.x509 -from cryptography.hazmat.primitives import serialization +import cryptography.hazmat.primitives.asymmetric.rsa +from cryptography.hazmat.primitives import serialization, hashes from cryptography.hazmat.backends import default_backend @@ -69,7 +70,7 @@ class AWSResourceNotFoundException(AWSError): class CertBundle(BaseModel): - def __init__(self, certificate, private_key, chain=None, region='us-east-1', arn=None, cert_type='IMPORTED'): + def __init__(self, certificate, private_key, chain=None, region='us-east-1', arn=None, cert_type='IMPORTED', cert_status='ISSUED'): self.created_at = datetime.datetime.now() self.cert = certificate self._cert = None @@ -80,6 +81,7 @@ class CertBundle(BaseModel): self.tags = {} self._chain = None self.type = cert_type # Should really be an enum + self.status = cert_status # Should really be an enum # AWS always returns your chain + root CA if self.chain is None: @@ -102,6 +104,56 @@ class CertBundle(BaseModel): else: self.arn = arn + @classmethod + def generate_cert(cls, domain_name, sans=None): + if sans is None: + sans = set() + else: + sans = set(sans) + + sans.add(domain_name) + sans = [cryptography.x509.DNSName(item) for item in sans] + + key = cryptography.hazmat.primitives.asymmetric.rsa.generate_private_key(public_exponent=65537, key_size=2048, backend=default_backend()) + subject = cryptography.x509.Name([ + cryptography.x509.NameAttribute(cryptography.x509.NameOID.COUNTRY_NAME, u"US"), + cryptography.x509.NameAttribute(cryptography.x509.NameOID.STATE_OR_PROVINCE_NAME, u"CA"), + cryptography.x509.NameAttribute(cryptography.x509.NameOID.LOCALITY_NAME, u"San Francisco"), + cryptography.x509.NameAttribute(cryptography.x509.NameOID.ORGANIZATION_NAME, u"My Company"), + cryptography.x509.NameAttribute(cryptography.x509.NameOID.COMMON_NAME, domain_name), + ]) + issuer = cryptography.x509.Name([ # C = US, O = Amazon, OU = Server CA 1B, CN = Amazon + cryptography.x509.NameAttribute(cryptography.x509.NameOID.COUNTRY_NAME, u"US"), + cryptography.x509.NameAttribute(cryptography.x509.NameOID.ORGANIZATION_NAME, u"Amazon"), + cryptography.x509.NameAttribute(cryptography.x509.NameOID.ORGANIZATIONAL_UNIT_NAME, u"Server CA 1B"), + cryptography.x509.NameAttribute(cryptography.x509.NameOID.COMMON_NAME, u"Amazon"), + ]) + cert = cryptography.x509.CertificateBuilder().subject_name( + subject + ).issuer_name( + issuer + ).public_key( + key.public_key() + ).serial_number( + cryptography.x509.random_serial_number() + ).not_valid_before( + datetime.datetime.utcnow() + ).not_valid_after( + datetime.datetime.utcnow() + datetime.timedelta(days=365) + ).add_extension( + cryptography.x509.SubjectAlternativeName(sans), + critical=False, + ).sign(key, hashes.SHA512(), default_backend()) + + cert_armored = cert.public_bytes(serialization.Encoding.PEM) + private_key = key.private_bytes( + encoding=serialization.Encoding.PEM, + format=serialization.PrivateFormat.TraditionalOpenSSL, + encryption_algorithm=serialization.NoEncryption() + ) + + return cls(cert_armored, private_key, cert_type='AMAZON_ISSUED', cert_status='PENDING_VALIDATION') + def validate_pk(self): try: self._key = serialization.load_pem_private_key(self.key, password=None, backend=default_backend()) @@ -160,9 +212,15 @@ class CertBundle(BaseModel): raise raise AWSValidationException('The certificate is not PEM-encoded or is not valid.') - def describe(self): - #'RenewalSummary': {}, # Only when cert is amazon issued + def check(self): + # Basically, if the certificate is pending, and then checked again after 1 min + # It will appear as if its been validated + if self.type == 'AMAZON_ISSUED' and self.status == 'PENDING_VALIDATION' and \ + (datetime.datetime.now() - self.created_at).total_seconds() > 60: # 1min + self.status = 'ISSUED' + def describe(self): + # 'RenewalSummary': {}, # Only when cert is amazon issued if self._key.key_size == 1024: key_algo = 'RSA_1024' elif self._key.key_size == 2048: @@ -170,6 +228,12 @@ class CertBundle(BaseModel): else: key_algo = 'EC_prime256v1' + # Look for SANs + san_obj = self._cert.extensions.get_extension_for_oid(cryptography.x509.OID_SUBJECT_ALTERNATIVE_NAME) + sans = [] + if san_obj is not None: + sans = [item.value for item in san_obj.value] + result = { 'Certificate': { 'CertificateArn': self.arn, @@ -181,9 +245,9 @@ class CertBundle(BaseModel): 'NotBefore': datetime_to_epoch(self._cert.not_valid_before), 'Serial': self._cert.serial, 'SignatureAlgorithm': self._cert.signature_algorithm_oid._name.upper().replace('ENCRYPTION', ''), - 'Status': 'ISSUED', # One of PENDING_VALIDATION, ISSUED, INACTIVE, EXPIRED, VALIDATION_TIMED_OUT, REVOKED, FAILED. + 'Status': self.status, # One of PENDING_VALIDATION, ISSUED, INACTIVE, EXPIRED, VALIDATION_TIMED_OUT, REVOKED, FAILED. 'Subject': 'CN={0}'.format(self.common_name), - 'SubjectAlternativeNames': [], + 'SubjectAlternativeNames': sans, 'Type': self.type # One of IMPORTED, AMAZON_ISSUED } } @@ -208,16 +272,44 @@ class AWSCertificateManagerBackend(BaseBackend): super(AWSCertificateManagerBackend, self).__init__() self.region = region self._certificates = {} + self._idempotency_tokens = {} def reset(self): region = self.region self.__dict__ = {} self.__init__(region) - def _arn_not_found(self, arn): + @staticmethod + def _arn_not_found(arn): msg = 'Certificate with arn {0} not found in account {1}'.format(arn, DEFAULT_ACCOUNT_ID) return AWSResourceNotFoundException(msg) + def _get_arn_from_idempotency_token(self, token): + """ + If token doesnt exist, return None, later it will be + set with an expiry and arn. + + If token expiry has passed, delete entry and return None + + Else return ARN + + :param token: String token + :return: None or ARN + """ + now = datetime.datetime.now() + if token in self._idempotency_tokens: + if self._idempotency_tokens[token]['expires'] < now: + # Token has expired, new request + del self._idempotency_tokens[token] + return None + else: + return self._idempotency_tokens[token]['arn'] + + return None + + def _set_idempotency_token_arn(self, token, arn): + self._idempotency_tokens[token] = {'arn': arn, 'expires': datetime.datetime.now() + datetime.timedelta(hours=1)} + def import_cert(self, certificate, private_key, chain=None, arn=None): if arn is not None: if arn not in self._certificates: @@ -240,13 +332,16 @@ class AWSCertificateManagerBackend(BaseBackend): :return: List of certificates :rtype: list of CertBundle """ - return self._certificates.values() + for arn in self._certificates.keys(): + yield self.get_certificate(arn) def get_certificate(self, arn): if arn not in self._certificates: raise self._arn_not_found(arn) - return self._certificates[arn] + cert_bundle = self._certificates[arn] + cert_bundle.check() + return cert_bundle def delete_certificate(self, arn): if arn not in self._certificates: @@ -254,6 +349,19 @@ class AWSCertificateManagerBackend(BaseBackend): del self._certificates[arn] + def request_certificate(self, domain_name, domain_validation_options, idempotency_token, subject_alt_names): + if idempotency_token is not None: + arn = self._get_arn_from_idempotency_token(idempotency_token) + if arn is not None: + return arn + + cert = CertBundle.generate_cert(domain_name, subject_alt_names) + if idempotency_token is not None: + self._set_idempotency_token_arn(idempotency_token, cert.arn) + self._certificates[cert.arn] = cert + + return cert.arn + def add_tags_to_certificate(self, arn, tags): # get_cert does arn check cert_bundle = self.get_certificate(arn) diff --git a/moto/acm/responses.py b/moto/acm/responses.py index 35cf64099..7bf12bbb8 100644 --- a/moto/acm/responses.py +++ b/moto/acm/responses.py @@ -34,7 +34,7 @@ class AWSCertificateManagerResponse(BaseResponse): if arn is None: msg = 'A required parameter for the specified action is not supplied.' - return {'__type': 'MissingParameter', 'message': msg}, dict(status=400) + return json.dumps({'__type': 'MissingParameter', 'message': msg}), dict(status=400) try: self.acm_backend.add_tags_to_certificate(arn, tags) @@ -48,7 +48,7 @@ class AWSCertificateManagerResponse(BaseResponse): if arn is None: msg = 'A required parameter for the specified action is not supplied.' - return {'__type': 'MissingParameter', 'message': msg}, dict(status=400) + return json.dumps({'__type': 'MissingParameter', 'message': msg}), dict(status=400) try: self.acm_backend.delete_certificate(arn) @@ -62,7 +62,7 @@ class AWSCertificateManagerResponse(BaseResponse): if arn is None: msg = 'A required parameter for the specified action is not supplied.' - return {'__type': 'MissingParameter', 'message': msg}, dict(status=400) + return json.dumps({'__type': 'MissingParameter', 'message': msg}), dict(status=400) try: cert_bundle = self.acm_backend.get_certificate(arn) @@ -76,7 +76,7 @@ class AWSCertificateManagerResponse(BaseResponse): if arn is None: msg = 'A required parameter for the specified action is not supplied.' - return {'__type': 'MissingParameter', 'message': msg}, dict(status=400) + return json.dumps({'__type': 'MissingParameter', 'message': msg}), dict(status=400) try: cert_bundle = self.acm_backend.get_certificate(arn) @@ -170,7 +170,7 @@ class AWSCertificateManagerResponse(BaseResponse): if arn is None: msg = 'A required parameter for the specified action is not supplied.' - return {'__type': 'MissingParameter', 'message': msg}, dict(status=400) + return json.dumps({'__type': 'MissingParameter', 'message': msg}), dict(status=400) try: self.acm_backend.remove_tags_from_certificate(arn, tags) @@ -180,7 +180,45 @@ class AWSCertificateManagerResponse(BaseResponse): return '' def request_certificate(self): - raise NotImplementedError() + domain_name = self._get_param('DomainName') + domain_validation_options = self._get_param('DomainValidationOptions') # is ignored atm + idempotency_token = self._get_param('IdempotencyToken') + subject_alt_names = self._get_param('SubjectAlternativeNames') + + if len(subject_alt_names) > 10: + # There is initial AWS limit of 10 + msg = 'An ACM limit has been exceeded. Need to request SAN limit to be raised' + return json.dumps({'__type': 'LimitExceededException', 'message': msg}), dict(status=400) + + try: + arn = self.acm_backend.request_certificate(domain_name, domain_validation_options, idempotency_token, subject_alt_names) + except AWSError as err: + return err.response() + + return json.dumps({'CertificateArn': arn}) def resend_validation_email(self): - raise NotImplementedError() + arn = self._get_param('CertificateArn') + domain = self._get_param('Domain') + # ValidationDomain not used yet. + # Contains domain which is equal to or a subset of Domain + # that AWS will send validation emails to + # https://docs.aws.amazon.com/acm/latest/APIReference/API_ResendValidationEmail.html + # validation_domain = self._get_param('ValidationDomain') + + if arn is None: + msg = 'A required parameter for the specified action is not supplied.' + return json.dumps({'__type': 'MissingParameter', 'message': msg}), dict(status=400) + + try: + cert_bundle = self.acm_backend.get_certificate(arn) + + if cert_bundle.common_name != domain: + msg = 'Parameter Domain does not match certificate domain' + _type = 'InvalidDomainValidationOptionsException' + return json.dumps({'__type': _type, 'message': msg}), dict(status=400) + + except AWSError as err: + return err.response() + + return '' diff --git a/tests/test_acm/test_acm.py b/tests/test_acm/test_acm.py index ff9ec6510..96e362d1e 100644 --- a/tests/test_acm/test_acm.py +++ b/tests/test_acm/test_acm.py @@ -2,6 +2,7 @@ from __future__ import unicode_literals import os import boto3 +from freezegun import freeze_time import sure # noqa from botocore.exceptions import ClientError @@ -235,7 +236,122 @@ def test_remove_tags_from_invalid_certificate(): raise RuntimeError('Should of raised ResourceNotFoundException') +@mock_acm +def test_resend_validation_email(): + client = boto3.client('acm', region_name='eu-central-1') + arn = _import_cert(client) + + client.resend_validation_email( + CertificateArn=arn, + Domain='*.moto.com', + ValidationDomain='NOTUSEDYET' + ) + # Returns nothing, boto would raise Exceptions otherwise +@mock_acm +def test_resend_validation_email_invalid(): + client = boto3.client('acm', region_name='eu-central-1') + arn = _import_cert(client) + + try: + client.resend_validation_email( + CertificateArn=arn, + Domain='no-match.moto.com', + ValidationDomain='NOTUSEDYET' + ) + except ClientError as err: + err.response['Error']['Code'].should.equal('InvalidDomainValidationOptionsException') + else: + raise RuntimeError('Should of raised InvalidDomainValidationOptionsException') + + try: + client.resend_validation_email( + CertificateArn=BAD_ARN, + Domain='no-match.moto.com', + ValidationDomain='NOTUSEDYET' + ) + except ClientError as err: + err.response['Error']['Code'].should.equal('ResourceNotFoundException') + else: + raise RuntimeError('Should of raised ResourceNotFoundException') +@mock_acm +def test_request_certificate(): + client = boto3.client('acm', region_name='eu-central-1') + + resp = client.request_certificate( + DomainName='google.com', + SubjectAlternativeNames=['google.com', 'www.google.com', 'mail.google.com'], + ) + resp.should.contain('CertificateArn') + + +# # Also tests the SAN code +# # requires Pull: https://github.com/spulec/freezegun/pull/210 +# @freeze_time("2012-01-01 12:00:00", as_arg=True) +# @mock_acm +# def test_request_certificate(frozen_time): +# # After requesting a certificate, it should then auto-validate after 1 minute +# # Some sneaky programming for that ;-) +# client = boto3.client('acm', region_name='eu-central-1') +# +# resp = client.request_certificate( +# DomainName='google.com', +# SubjectAlternativeNames=['google.com', 'www.google.com', 'mail.google.com'], +# ) +# arn = resp['CertificateArn'] +# +# resp = client.describe_certificate(CertificateArn=arn) +# resp['Certificate']['CertificateArn'].should.equal(arn) +# resp['Certificate']['DomainName'].should.equal('google.com') +# resp['Certificate']['Issuer'].should.equal('Amazon') +# resp['Certificate']['KeyAlgorithm'].should.equal('RSA_2048') +# resp['Certificate']['Status'].should.equal('PENDING_VALIDATION') +# resp['Certificate']['Type'].should.equal('AMAZON_ISSUED') +# len(resp['Certificate']['SubjectAlternativeNames']).should.equal(3) +# +# # Move time +# frozen_time.move_to('2012-01-01 12:02:00') +# resp = client.describe_certificate(CertificateArn=arn) +# resp['Certificate']['CertificateArn'].should.equal(arn) +# resp['Certificate']['Status'].should.equal('ISSUED') +# +# +# # requires Pull: https://github.com/spulec/freezegun/pull/210 +# @freeze_time("2012-01-01 12:00:00", as_arg=True) +# @mock_acm +# def test_request_certificate(frozen_time): +# # After requesting a certificate, it should then auto-validate after 1 minute +# # Some sneaky programming for that ;-) +# client = boto3.client('acm', region_name='eu-central-1') +# +# resp = client.request_certificate( +# IdempotencyToken='test_token', +# DomainName='google.com', +# SubjectAlternativeNames=['google.com', 'www.google.com', 'mail.google.com'], +# ) +# original_arn = resp['CertificateArn'] +# +# # Should be able to request a certificate multiple times in an hour +# # after that it makes a new one +# for time_intervals in ('2012-01-01 12:15:00', '2012-01-01 12:30:00', '2012-01-01 12:45:00'): +# frozen_time.move_to(time_intervals) +# resp = client.request_certificate( +# IdempotencyToken='test_token', +# DomainName='google.com', +# SubjectAlternativeNames=['google.com', 'www.google.com', 'mail.google.com'], +# ) +# arn = resp['CertificateArn'] +# arn.should.equal(original_arn) +# +# # Move time +# frozen_time.move_to('2012-01-01 13:01:00') +# resp = client.request_certificate( +# IdempotencyToken='test_token', +# DomainName='google.com', +# SubjectAlternativeNames=['google.com', 'www.google.com', 'mail.google.com'], +# ) +# arn = resp['CertificateArn'] +# arn.should_not.equal(original_arn) From c02dfa52db8c8a777c6317bd86258ca2d57b458d Mon Sep 17 00:00:00 2001 From: Terry Cain Date: Fri, 22 Sep 2017 15:16:12 +0100 Subject: [PATCH 255/412] Updated README --- README.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/README.md b/README.md index 4d5d2d7e6..8c9a6aacb 100644 --- a/README.md +++ b/README.md @@ -58,6 +58,8 @@ It gets even better! Moto isn't just for Python code and it isn't just for S3. L |------------------------------------------------------------------------------| | Service Name | Decorator | Development Status | |------------------------------------------------------------------------------| +| ACM | @mock_acm | all endpoints done | +|------------------------------------------------------------------------------| | API Gateway | @mock_apigateway | core endpoints done | |------------------------------------------------------------------------------| | Autoscaling | @mock_autoscaling| core endpoints done | From 4029afeb5ba31d2383c405659b6f0b156614ddba Mon Sep 17 00:00:00 2001 From: Terry Cain Date: Fri, 22 Sep 2017 15:20:33 +0100 Subject: [PATCH 256/412] Added method stubs --- moto/cloudwatch/responses.py | 30 ++++++++++++++++++++++++++++++ 1 file changed, 30 insertions(+) diff --git a/moto/cloudwatch/responses.py b/moto/cloudwatch/responses.py index d06fe21d7..f114260dc 100644 --- a/moto/cloudwatch/responses.py +++ b/moto/cloudwatch/responses.py @@ -100,6 +100,36 @@ class CloudWatchResponse(BaseResponse): template = self.response_template(LIST_METRICS_TEMPLATE) return template.render(metrics=metrics) + def delete_dashboards(self): + raise NotImplementedError() + + def describe_alarm_history(self): + raise NotImplementedError() + + def describe_alarms_for_metric(self): + raise NotImplementedError() + + def disable_alarm_actions(self): + raise NotImplementedError() + + def enable_alarm_actions(self): + raise NotImplementedError() + + def get_dashboard(self): + raise NotImplementedError() + + def get_metric_statistics(self): + raise NotImplementedError() + + def list_dashboards(self): + raise NotImplementedError() + + def put_dashboard(self): + raise NotImplementedError() + + def set_alarm_state(self): + raise NotImplementedError() + PUT_METRIC_ALARM_TEMPLATE = """ From c965fdd47feaf044a904c14d63f6c42f85fa48b5 Mon Sep 17 00:00:00 2001 From: Terry Cain Date: Fri, 22 Sep 2017 16:38:20 +0100 Subject: [PATCH 257/412] Added dashboard methods + tests --- moto/cloudwatch/models.py | 54 +++++++ moto/cloudwatch/responses.py | 141 ++++++++++++++---- moto/cloudwatch/utils.py | 5 + .../test_cloudwatch/test_cloudwatch_boto3.py | 94 ++++++++++++ 4 files changed, 269 insertions(+), 25 deletions(-) create mode 100644 moto/cloudwatch/utils.py create mode 100644 tests/test_cloudwatch/test_cloudwatch_boto3.py diff --git a/moto/cloudwatch/models.py b/moto/cloudwatch/models.py index ed0086d93..ac328def2 100644 --- a/moto/cloudwatch/models.py +++ b/moto/cloudwatch/models.py @@ -2,6 +2,11 @@ from moto.core import BaseBackend, BaseModel import boto.ec2.cloudwatch import datetime +from .utils import make_arn_for_dashboard + + +DEFAULT_ACCOUNT_ID = 123456789012 + class Dimension(object): @@ -44,10 +49,34 @@ class MetricDatum(BaseModel): 'value']) for dimension in dimensions] +class Dashboard(BaseModel): + def __init__(self, name, body): + # Guaranteed to be unique for now as the name is also the key of a dictionary where they are stored + self.arn = make_arn_for_dashboard(DEFAULT_ACCOUNT_ID, name) + self.name = name + self.body = body + self.last_modified = datetime.datetime.now() + + @property + def last_modified_iso(self): + return self.last_modified.isoformat() + + @property + def size(self): + return len(self) + + def __len__(self): + return len(self.body) + + def __repr__(self): + return ''.format(self.name) + + class CloudWatchBackend(BaseBackend): def __init__(self): self.alarms = {} + self.dashboards = {} self.metric_data = [] def put_metric_alarm(self, name, namespace, metric_name, comparison_operator, evaluation_periods, @@ -110,6 +139,31 @@ class CloudWatchBackend(BaseBackend): def get_all_metrics(self): return self.metric_data + def put_dashboard(self, name, body): + self.dashboards[name] = Dashboard(name, body) + + def list_dashboards(self, prefix=''): + for key, value in self.dashboards.items(): + if key.startswith(prefix): + yield value + + def delete_dashboards(self, dashboards): + to_delete = set(dashboards) + all_dashboards = set(self.dashboards.keys()) + + left_over = to_delete - all_dashboards + if len(left_over) > 0: + # Some dashboards are not found + return False, 'The specified dashboard does not exist. [{0}]'.format(', '.join(left_over)) + + for dashboard in to_delete: + del self.dashboards[dashboard] + + return True, None + + def get_dashboard(self, dashboard): + return self.dashboards.get(dashboard) + class LogGroup(BaseModel): diff --git a/moto/cloudwatch/responses.py b/moto/cloudwatch/responses.py index f114260dc..cd7ce123e 100644 --- a/moto/cloudwatch/responses.py +++ b/moto/cloudwatch/responses.py @@ -1,9 +1,18 @@ +import json from moto.core.responses import BaseResponse from .models import cloudwatch_backends class CloudWatchResponse(BaseResponse): + @property + def cloudwatch_backend(self): + return cloudwatch_backends[self.region] + + def _error(self, code, message, status=400): + template = self.response_template(ERROR_RESPONSE_TEMPLATE) + return template.render(code=code, message=message), dict(status=status) + def put_metric_alarm(self): name = self._get_param('AlarmName') namespace = self._get_param('Namespace') @@ -20,15 +29,14 @@ class CloudWatchResponse(BaseResponse): insufficient_data_actions = self._get_multi_param( "InsufficientDataActions.member") unit = self._get_param('Unit') - cloudwatch_backend = cloudwatch_backends[self.region] - alarm = cloudwatch_backend.put_metric_alarm(name, namespace, metric_name, - comparison_operator, - evaluation_periods, period, - threshold, statistic, - description, dimensions, - alarm_actions, ok_actions, - insufficient_data_actions, - unit) + alarm = self.cloudwatch_backend.put_metric_alarm(name, namespace, metric_name, + comparison_operator, + evaluation_periods, period, + threshold, statistic, + description, dimensions, + alarm_actions, ok_actions, + insufficient_data_actions, + unit) template = self.response_template(PUT_METRIC_ALARM_TEMPLATE) return template.render(alarm=alarm) @@ -37,28 +45,26 @@ class CloudWatchResponse(BaseResponse): alarm_name_prefix = self._get_param('AlarmNamePrefix') alarm_names = self._get_multi_param('AlarmNames.member') state_value = self._get_param('StateValue') - cloudwatch_backend = cloudwatch_backends[self.region] if action_prefix: - alarms = cloudwatch_backend.get_alarms_by_action_prefix( + alarms = self.cloudwatch_backend.get_alarms_by_action_prefix( action_prefix) elif alarm_name_prefix: - alarms = cloudwatch_backend.get_alarms_by_alarm_name_prefix( + alarms = self.cloudwatch_backend.get_alarms_by_alarm_name_prefix( alarm_name_prefix) elif alarm_names: - alarms = cloudwatch_backend.get_alarms_by_alarm_names(alarm_names) + alarms = self.cloudwatch_backend.get_alarms_by_alarm_names(alarm_names) elif state_value: - alarms = cloudwatch_backend.get_alarms_by_state_value(state_value) + alarms = self.cloudwatch_backend.get_alarms_by_state_value(state_value) else: - alarms = cloudwatch_backend.get_all_alarms() + alarms = self.cloudwatch_backend.get_all_alarms() template = self.response_template(DESCRIBE_ALARMS_TEMPLATE) return template.render(alarms=alarms) def delete_alarms(self): alarm_names = self._get_multi_param('AlarmNames.member') - cloudwatch_backend = cloudwatch_backends[self.region] - cloudwatch_backend.delete_alarms(alarm_names) + self.cloudwatch_backend.delete_alarms(alarm_names) template = self.response_template(DELETE_METRIC_ALARMS_TEMPLATE) return template.render() @@ -89,19 +95,26 @@ class CloudWatchResponse(BaseResponse): dimension_index += 1 metric_data.append([metric_name, value, dimensions]) metric_index += 1 - cloudwatch_backend = cloudwatch_backends[self.region] - cloudwatch_backend.put_metric_data(namespace, metric_data) + self.cloudwatch_backend.put_metric_data(namespace, metric_data) template = self.response_template(PUT_METRIC_DATA_TEMPLATE) return template.render() def list_metrics(self): - cloudwatch_backend = cloudwatch_backends[self.region] - metrics = cloudwatch_backend.get_all_metrics() + metrics = self.cloudwatch_backend.get_all_metrics() template = self.response_template(LIST_METRICS_TEMPLATE) return template.render(metrics=metrics) def delete_dashboards(self): - raise NotImplementedError() + dashboards = self._get_multi_param('DashboardNames.member') + if dashboards is None: + return self._error('InvalidParameterValue', 'Need at least 1 dashboard') + + status, error = self.cloudwatch_backend.delete_dashboards(dashboards) + if not status: + return self._error('ResourceNotFound', error) + + template = self.response_template(DELETE_DASHBOARD_TEMPLATE) + return template.render() def describe_alarm_history(self): raise NotImplementedError() @@ -116,16 +129,39 @@ class CloudWatchResponse(BaseResponse): raise NotImplementedError() def get_dashboard(self): - raise NotImplementedError() + dashboard_name = self._get_param('DashboardName') + + dashboard = self.cloudwatch_backend.get_dashboard(dashboard_name) + if dashboard is None: + return self._error('ResourceNotFound', 'Dashboard does not exist') + + template = self.response_template(GET_DASHBOARD_TEMPLATE) + return template.render(dashboard=dashboard) def get_metric_statistics(self): raise NotImplementedError() def list_dashboards(self): - raise NotImplementedError() + prefix = self._get_param('DashboardNamePrefix', '') + + dashboards = self.cloudwatch_backend.list_dashboards(prefix) + + template = self.response_template(LIST_DASHBOARD_RESPONSE) + return template.render(dashboards=dashboards) def put_dashboard(self): - raise NotImplementedError() + name = self._get_param('DashboardName') + body = self._get_param('DashboardBody') + + try: + json.loads(body) + except ValueError: + return self._error('InvalidParameterInput', 'Body is invalid JSON') + + self.cloudwatch_backend.put_dashboard(name, body) + + template = self.response_template(PUT_DASHBOARD_RESPONSE) + return template.render() def set_alarm_state(self): raise NotImplementedError() @@ -229,3 +265,58 @@ LIST_METRICS_TEMPLATE = """ + + + + + 44b1d4d8-9fa3-11e7-8ad3-41b86ac5e49e + +""" + +LIST_DASHBOARD_RESPONSE = """ + + + {% for dashboard in dashboards %} + + {{ dashboard.arn }} + {{ dashboard.last_modified_iso }} + {{ dashboard.size }} + {{ dashboard.name }} + + {% endfor %} + + + + c3773873-9fa5-11e7-b315-31fcc9275d62 + +""" + +DELETE_DASHBOARD_TEMPLATE = """ + + + 68d1dc8c-9faa-11e7-a694-df2715690df2 + +""" + +GET_DASHBOARD_TEMPLATE = """ + + {{ dashboard.arn }} + {{ dashboard.body }} + {{ dashboard.name }} + + + e3c16bb0-9faa-11e7-b315-31fcc9275d62 + + +""" + +ERROR_RESPONSE_TEMPLATE = """ + + Sender + {{ code }} + {{ message }} + + 5e45fd1e-9fa3-11e7-b720-89e8821d38c4 +""" diff --git a/moto/cloudwatch/utils.py b/moto/cloudwatch/utils.py new file mode 100644 index 000000000..ee33a4402 --- /dev/null +++ b/moto/cloudwatch/utils.py @@ -0,0 +1,5 @@ +from __future__ import unicode_literals + + +def make_arn_for_dashboard(account_id, name): + return "arn:aws:cloudwatch::{0}dashboard/{1}".format(account_id, name) diff --git a/tests/test_cloudwatch/test_cloudwatch_boto3.py b/tests/test_cloudwatch/test_cloudwatch_boto3.py new file mode 100644 index 000000000..923ba0b75 --- /dev/null +++ b/tests/test_cloudwatch/test_cloudwatch_boto3.py @@ -0,0 +1,94 @@ +from __future__ import unicode_literals + +import boto3 +from botocore.exceptions import ClientError +import sure # noqa + +from moto import mock_cloudwatch + + +@mock_cloudwatch +def test_put_list_dashboard(): + client = boto3.client('cloudwatch', region_name='eu-central-1') + widget = '{"widgets": [{"type": "text", "x": 0, "y": 7, "width": 3, "height": 3, "properties": {"markdown": "Hello world"}}]}' + + client.put_dashboard(DashboardName='test1', DashboardBody=widget) + resp = client.list_dashboards() + + len(resp['DashboardEntries']).should.equal(1) + + +@mock_cloudwatch +def test_put_list_prefix_nomatch_dashboard(): + client = boto3.client('cloudwatch', region_name='eu-central-1') + widget = '{"widgets": [{"type": "text", "x": 0, "y": 7, "width": 3, "height": 3, "properties": {"markdown": "Hello world"}}]}' + + client.put_dashboard(DashboardName='test1', DashboardBody=widget) + resp = client.list_dashboards(DashboardNamePrefix='nomatch') + + len(resp['DashboardEntries']).should.equal(0) + + +@mock_cloudwatch +def test_delete_dashboard(): + client = boto3.client('cloudwatch', region_name='eu-central-1') + widget = '{"widgets": [{"type": "text", "x": 0, "y": 7, "width": 3, "height": 3, "properties": {"markdown": "Hello world"}}]}' + + client.put_dashboard(DashboardName='test1', DashboardBody=widget) + client.put_dashboard(DashboardName='test2', DashboardBody=widget) + client.put_dashboard(DashboardName='test3', DashboardBody=widget) + client.delete_dashboards(DashboardNames=['test2', 'test1']) + + resp = client.list_dashboards(DashboardNamePrefix='test3') + len(resp['DashboardEntries']).should.equal(1) + + +@mock_cloudwatch +def test_delete_dashboard_fail(): + client = boto3.client('cloudwatch', region_name='eu-central-1') + widget = '{"widgets": [{"type": "text", "x": 0, "y": 7, "width": 3, "height": 3, "properties": {"markdown": "Hello world"}}]}' + + client.put_dashboard(DashboardName='test1', DashboardBody=widget) + client.put_dashboard(DashboardName='test2', DashboardBody=widget) + client.put_dashboard(DashboardName='test3', DashboardBody=widget) + # Doesnt delete anything if all dashboards to be deleted do not exist + try: + client.delete_dashboards(DashboardNames=['test2', 'test1', 'test_no_match']) + except ClientError as err: + err.response['Error']['Code'].should.equal('ResourceNotFound') + else: + raise RuntimeError('Should of raised error') + + resp = client.list_dashboards() + len(resp['DashboardEntries']).should.equal(3) + + +@mock_cloudwatch +def test_get_dashboard(): + client = boto3.client('cloudwatch', region_name='eu-central-1') + widget = '{"widgets": [{"type": "text", "x": 0, "y": 7, "width": 3, "height": 3, "properties": {"markdown": "Hello world"}}]}' + client.put_dashboard(DashboardName='test1', DashboardBody=widget) + + resp = client.get_dashboard(DashboardName='test1') + resp.should.contain('DashboardArn') + resp.should.contain('DashboardBody') + resp['DashboardName'].should.equal('test1') + + +@mock_cloudwatch +def test_get_dashboard_fail(): + client = boto3.client('cloudwatch', region_name='eu-central-1') + + try: + client.get_dashboard(DashboardName='test1') + except ClientError as err: + err.response['Error']['Code'].should.equal('ResourceNotFound') + else: + raise RuntimeError('Should of raised error') + + + + + + + From c2e0d8876d71dfdfabefc61a52be932aa2e27381 Mon Sep 17 00:00:00 2001 From: Jack Danger Date: Fri, 22 Sep 2017 10:11:09 -0700 Subject: [PATCH 258/412] bumping to version 1.1.13 --- setup.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/setup.py b/setup.py index 166846ce6..4b552de3e 100755 --- a/setup.py +++ b/setup.py @@ -24,7 +24,7 @@ extras_require = { setup( name='moto', - version='1.1.12', + version='1.1.13', description='A library that allows your python tests to easily' ' mock out the boto library', author='Steve Pulec', From 597f754600eaf8b8aeaa5bf2bf2e3ff3d12a01ec Mon Sep 17 00:00:00 2001 From: Jack Danger Date: Fri, 22 Sep 2017 10:12:45 -0700 Subject: [PATCH 259/412] including dockerhub in releases --- Makefile | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/Makefile b/Makefile index 6e6ad26ce..fe36a06e4 100644 --- a/Makefile +++ b/Makefile @@ -18,7 +18,14 @@ test_server: aws_managed_policies: scripts/update_managed_policies.py -publish: +upload_pypi_artifact: python setup.py sdist bdist_wheel upload + +build_dockerhub_image: + docker build -t motoserver/moto . + +tag_github_release: git tag `python setup.py --version` git push origin `python setup.py --version` + +publish: upload_pypi_artifact build_dockerhub_image tag_github_release From ea66a717657c6d55f91be4ac7062a337c88023d9 Mon Sep 17 00:00:00 2001 From: Jack Danger Date: Fri, 22 Sep 2017 10:42:13 -0700 Subject: [PATCH 260/412] supporting signed urls for private keys --- moto/s3/responses.py | 6 ++++-- tests/test_s3/test_s3.py | 14 +++++++++----- 2 files changed, 13 insertions(+), 7 deletions(-) diff --git a/moto/s3/responses.py b/moto/s3/responses.py index 86d5dbdef..781af2e48 100755 --- a/moto/s3/responses.py +++ b/moto/s3/responses.py @@ -548,8 +548,10 @@ class ResponseObject(_TemplateEnvironmentMixin): # header. if 'Authorization' not in request.headers: key = self.backend.get_key(bucket_name, key_name) - if key and not key.acl.public_read: - return 403, {}, "" + signed_url = 'Signature=' in request.url + if key: + if not key.acl.public_read and not signed_url: + return 403, {}, "" if hasattr(request, 'body'): # Boto diff --git a/tests/test_s3/test_s3.py b/tests/test_s3/test_s3.py index 8ce56bd01..67ef17bc6 100644 --- a/tests/test_s3/test_s3.py +++ b/tests/test_s3/test_s3.py @@ -875,15 +875,19 @@ def test_s3_object_in_public_bucket(): s3_anonymous = boto3.resource('s3') s3_anonymous.meta.client.meta.events.register('choose-signer.s3.*', disable_signing) - contents = s3_anonymous.Object(key='file.txt', bucket_name='test-bucket').get()['Body'].read() - contents.should.equal(b'ABCD') + # contents = s3_anonymous.Object(key='file.txt', bucket_name='test-bucket').get()['Body'].read() + # contents.should.equal(b'ABCD') bucket.put_object(ACL='private', Body=b'ABCD', Key='file.txt') - with assert_raises(ClientError) as exc: - s3_anonymous.Object(key='file.txt', bucket_name='test-bucket').get() - exc.exception.response['Error']['Code'].should.equal('403') + # with assert_raises(ClientError) as exc: + # s3_anonymous.Object(key='file.txt', bucket_name='test-bucket').get() + # exc.exception.response['Error']['Code'].should.equal('403') + params = {'Bucket': 'test-bucket','Key': 'file.txt'} + presigned_url = boto3.client('s3').generate_presigned_url('get_object', params, ExpiresIn=900) + response = requests.get(presigned_url) + assert response.status_code == 200 @mock_s3 def test_s3_object_in_private_bucket(): From 390fe8513748bab74a4f6c3e556048f9abef000b Mon Sep 17 00:00:00 2001 From: Jack Danger Date: Fri, 22 Sep 2017 10:44:55 -0700 Subject: [PATCH 261/412] supporting httpretty requests --- moto/s3/responses.py | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/moto/s3/responses.py b/moto/s3/responses.py index 781af2e48..d340d16e4 100755 --- a/moto/s3/responses.py +++ b/moto/s3/responses.py @@ -547,8 +547,12 @@ class ResponseObject(_TemplateEnvironmentMixin): # ACL and checking for the mere presence of an Authorization # header. if 'Authorization' not in request.headers: + if hasattr(request, 'url'): + signed_url = 'Signature=' in request.url + elif hasattr(request, 'requestline'): + signed_url = 'Signature=' in request.path key = self.backend.get_key(bucket_name, key_name) - signed_url = 'Signature=' in request.url + if key: if not key.acl.public_read and not signed_url: return 403, {}, "" From eeda0cd28edc472a73443fb6cdf00354760bc6ef Mon Sep 17 00:00:00 2001 From: Jack Danger Date: Fri, 22 Sep 2017 10:57:06 -0700 Subject: [PATCH 262/412] re-enabling tests --- tests/test_s3/test_s3.py | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/tests/test_s3/test_s3.py b/tests/test_s3/test_s3.py index 67ef17bc6..cb40edb33 100644 --- a/tests/test_s3/test_s3.py +++ b/tests/test_s3/test_s3.py @@ -875,14 +875,14 @@ def test_s3_object_in_public_bucket(): s3_anonymous = boto3.resource('s3') s3_anonymous.meta.client.meta.events.register('choose-signer.s3.*', disable_signing) - # contents = s3_anonymous.Object(key='file.txt', bucket_name='test-bucket').get()['Body'].read() - # contents.should.equal(b'ABCD') + contents = s3_anonymous.Object(key='file.txt', bucket_name='test-bucket').get()['Body'].read() + contents.should.equal(b'ABCD') bucket.put_object(ACL='private', Body=b'ABCD', Key='file.txt') - # with assert_raises(ClientError) as exc: - # s3_anonymous.Object(key='file.txt', bucket_name='test-bucket').get() - # exc.exception.response['Error']['Code'].should.equal('403') + with assert_raises(ClientError) as exc: + s3_anonymous.Object(key='file.txt', bucket_name='test-bucket').get() + exc.exception.response['Error']['Code'].should.equal('403') params = {'Bucket': 'test-bucket','Key': 'file.txt'} presigned_url = boto3.client('s3').generate_presigned_url('get_object', params, ExpiresIn=900) From a91c0253982e81a701d74b4d1a5506fe10502786 Mon Sep 17 00:00:00 2001 From: Terry Cain Date: Fri, 22 Sep 2017 20:08:20 +0100 Subject: [PATCH 263/412] Cleanup, and fix #1105 --- moto/core/responses.py | 6 ++--- moto/sqs/models.py | 54 ++++++++++++++++++++++--------------- moto/sqs/responses.py | 55 ++++++++++++++++++++++---------------- tests/test_sqs/test_sqs.py | 37 ++++++++++++++++++++++++- 4 files changed, 104 insertions(+), 48 deletions(-) diff --git a/moto/core/responses.py b/moto/core/responses.py index 82e9d4cad..a97f66f6c 100644 --- a/moto/core/responses.py +++ b/moto/core/responses.py @@ -310,7 +310,7 @@ class BaseResponse(_TemplateEnvironmentMixin): param_index += 1 return results - def _get_map_prefix(self, param_prefix): + def _get_map_prefix(self, param_prefix, key_end='.key', value_end='.value'): results = {} param_index = 1 while 1: @@ -319,9 +319,9 @@ class BaseResponse(_TemplateEnvironmentMixin): k, v = None, None for key, value in self.querystring.items(): if key.startswith(index_prefix): - if key.endswith('.key'): + if key.endswith(key_end): k = value[0] - elif key.endswith('.value'): + elif key.endswith(value_end): v = value[0] if not (k and v): diff --git a/moto/sqs/models.py b/moto/sqs/models.py index e6209b4ba..4ea5ef579 100644 --- a/moto/sqs/models.py +++ b/moto/sqs/models.py @@ -12,10 +12,7 @@ import boto.sqs from moto.core import BaseBackend, BaseModel from moto.core.utils import camelcase_to_underscores, get_random_message_id, unix_time, unix_time_millis from .utils import generate_receipt_handle -from .exceptions import ( - ReceiptHandleIsInvalid, - MessageNotInflight -) +from .exceptions import ReceiptHandleIsInvalid, MessageNotInflight, MessageAttributesInvalid DEFAULT_ACCOUNT_ID = 123456789012 DEFAULT_SENDER_ID = "AIDAIT2UOQQY3AUEKVGXU" @@ -151,8 +148,12 @@ class Queue(BaseModel): camelcase_attributes = ['ApproximateNumberOfMessages', 'ApproximateNumberOfMessagesDelayed', 'ApproximateNumberOfMessagesNotVisible', + 'ContentBasedDeduplication', 'CreatedTimestamp', 'DelaySeconds', + 'FifoQueue', + 'KmsDataKeyReusePeriodSeconds', + 'KmsMasterKeyId', 'LastModifiedTimestamp', 'MaximumMessageSize', 'MessageRetentionPeriod', @@ -161,25 +162,35 @@ class Queue(BaseModel): 'VisibilityTimeout', 'WaitTimeSeconds'] - def __init__(self, name, visibility_timeout, wait_time_seconds, region): + def __init__(self, name, region, **kwargs): self.name = name - self.visibility_timeout = visibility_timeout or 30 + self.visibility_timeout = int(kwargs.get('VisibilityTimeout', 30)) self.region = region - # wait_time_seconds will be set to immediate return messages - self.wait_time_seconds = int(wait_time_seconds) if wait_time_seconds else 0 self._messages = [] now = unix_time() + # kwargs can also have: + # [Policy, RedrivePolicy] + self.fifo_queue = kwargs.get('FifoQueue', 'false') == 'true' + self.content_based_deduplication = kwargs.get('ContentBasedDeduplication', 'false') == 'true' + self.kms_master_key_id = kwargs.get('KmsMasterKeyId', 'alias/aws/sqs') + self.kms_data_key_reuse_period_seconds = int(kwargs.get('KmsDataKeyReusePeriodSeconds', 300)) self.created_timestamp = now - self.delay_seconds = 0 + self.delay_seconds = int(kwargs.get('DelaySeconds', 0)) self.last_modified_timestamp = now - self.maximum_message_size = 64 << 10 - self.message_retention_period = 86400 * 4 # four days - self.queue_arn = 'arn:aws:sqs:{0}:123456789012:{1}'.format( - self.region, self.name) - self.receive_message_wait_time_seconds = 0 + self.maximum_message_size = int(kwargs.get('MaximumMessageSize', 64 << 10)) + self.message_retention_period = int(kwargs.get('MessageRetentionPeriod', 86400 * 4)) # four days + self.queue_arn = 'arn:aws:sqs:{0}:123456789012:{1}'.format(self.region, self.name) + self.receive_message_wait_time_seconds = int(kwargs.get('ReceiveMessageWaitTimeSeconds', 0)) + + # wait_time_seconds will be set to immediate return messages + self.wait_time_seconds = int(kwargs.get('WaitTimeSeconds', 0)) + + # Check some conditions + if self.fifo_queue and not self.name.endswith('.fifo'): + raise MessageAttributesInvalid('Queue name must end in .fifo for FIFO queues') @classmethod def create_from_cloudformation_json(cls, resource_name, cloudformation_json, region_name): @@ -188,8 +199,8 @@ class Queue(BaseModel): sqs_backend = sqs_backends[region_name] return sqs_backend.create_queue( name=properties['QueueName'], - visibility_timeout=properties.get('VisibilityTimeout'), - wait_time_seconds=properties.get('WaitTimeSeconds') + region=region_name, + **properties ) @classmethod @@ -233,8 +244,10 @@ class Queue(BaseModel): def attributes(self): result = {} for attribute in self.camelcase_attributes: - result[attribute] = getattr( - self, camelcase_to_underscores(attribute)) + attr = getattr(self, camelcase_to_underscores(attribute)) + if isinstance(attr, bool): + attr = str(attr).lower() + result[attribute] = attr return result def url(self, request_url): @@ -268,11 +281,10 @@ class SQSBackend(BaseBackend): self.__dict__ = {} self.__init__(region_name) - def create_queue(self, name, visibility_timeout, wait_time_seconds): + def create_queue(self, name, **kwargs): queue = self.queues.get(name) if queue is None: - queue = Queue(name, visibility_timeout, - wait_time_seconds, self.region_name) + queue = Queue(name, **kwargs, region=self.region_name) self.queues[name] = queue return queue diff --git a/moto/sqs/responses.py b/moto/sqs/responses.py index ba4a56b8f..8a027ba60 100644 --- a/moto/sqs/responses.py +++ b/moto/sqs/responses.py @@ -28,8 +28,7 @@ class SQSResponse(BaseResponse): @property def attribute(self): if not hasattr(self, '_attribute'): - self._attribute = dict([(a['name'], a['value']) - for a in self._get_list_prefix('Attribute')]) + self._attribute = self._get_map_prefix('Attribute', key_end='Name', value_end='Value') return self._attribute def _get_queue_name(self): @@ -58,17 +57,25 @@ class SQSResponse(BaseResponse): return 404, headers, ERROR_INEXISTENT_QUEUE return status_code, headers, body + def _error(self, code, message, status=400): + template = self.response_template(ERROR_TEMPLATE) + return template.render(code=code, message=message), dict(status=status) + def create_queue(self): request_url = urlparse(self.uri) - queue_name = self.querystring.get("QueueName")[0] - queue = self.sqs_backend.create_queue(queue_name, visibility_timeout=self.attribute.get('VisibilityTimeout'), - wait_time_seconds=self.attribute.get('WaitTimeSeconds')) + queue_name = self._get_param("QueueName") + + try: + queue = self.sqs_backend.create_queue(queue_name, **self.attribute) + except MessageAttributesInvalid as e: + return self._error('InvalidParameterValue', e.description) + template = self.response_template(CREATE_QUEUE_RESPONSE) return template.render(queue=queue, request_url=request_url) def get_queue_url(self): request_url = urlparse(self.uri) - queue_name = self.querystring.get("QueueName")[0] + queue_name = self._get_param("QueueName") queue = self.sqs_backend.get_queue(queue_name) if queue: template = self.response_template(GET_QUEUE_URL_RESPONSE) @@ -78,14 +85,14 @@ class SQSResponse(BaseResponse): def list_queues(self): request_url = urlparse(self.uri) - queue_name_prefix = self.querystring.get("QueueNamePrefix", [None])[0] + queue_name_prefix = self._get_param('QueueNamePrefix') queues = self.sqs_backend.list_queues(queue_name_prefix) template = self.response_template(LIST_QUEUES_RESPONSE) return template.render(queues=queues, request_url=request_url) def change_message_visibility(self): queue_name = self._get_queue_name() - receipt_handle = self.querystring.get("ReceiptHandle")[0] + receipt_handle = self._get_param('ReceiptHandle') try: visibility_timeout = self._get_validated_visibility_timeout() @@ -111,19 +118,15 @@ class SQSResponse(BaseResponse): return template.render(queue=queue) def set_queue_attributes(self): + # TODO validate self.get_param('QueueUrl') queue_name = self._get_queue_name() - if "Attribute.Name" in self.querystring: - key = camelcase_to_underscores( - self.querystring.get("Attribute.Name")[0]) - value = self.querystring.get("Attribute.Value")[0] - self.sqs_backend.set_queue_attribute(queue_name, key, value) - for a in self._get_list_prefix("Attribute"): - key = camelcase_to_underscores(a["name"]) - value = a["value"] + for key, value in self.attribute.items(): + key = camelcase_to_underscores(key) self.sqs_backend.set_queue_attribute(queue_name, key, value) return SET_QUEUE_ATTRIBUTE_RESPONSE def delete_queue(self): + # TODO validate self.get_param('QueueUrl') queue_name = self._get_queue_name() queue = self.sqs_backend.delete_queue(queue_name) if not queue: @@ -133,17 +136,12 @@ class SQSResponse(BaseResponse): return template.render(queue=queue) def send_message(self): - message = self.querystring.get("MessageBody")[0] - delay_seconds = self.querystring.get('DelaySeconds') + message = self._get_param('MessageBody') + delay_seconds = int(self._get_param('DelaySeconds', 0)) if len(message) > MAXIMUM_MESSAGE_LENGTH: return ERROR_TOO_LONG_RESPONSE, dict(status=400) - if delay_seconds: - delay_seconds = int(delay_seconds[0]) - else: - delay_seconds = 0 - try: message_attributes = parse_message_attributes(self.querystring) except MessageAttributesInvalid as e: @@ -470,3 +468,14 @@ ERROR_INEXISTENT_QUEUE = """ + + Sender + {{ code }} + {{ message }} + + + 6fde8d1e-52cd-4581-8cd9-c512f4c64223 +""" \ No newline at end of file diff --git a/tests/test_sqs/test_sqs.py b/tests/test_sqs/test_sqs.py index 3eb8e2213..9c439eb68 100644 --- a/tests/test_sqs/test_sqs.py +++ b/tests/test_sqs/test_sqs.py @@ -8,7 +8,6 @@ from boto.exception import SQSError from boto.sqs.message import RawMessage, Message import base64 -import requests import sure # noqa import time @@ -18,6 +17,39 @@ import tests.backport_assert_raises # noqa from nose.tools import assert_raises +@mock_sqs +def test_create_fifo_queue_fail(): + sqs = boto3.client('sqs', region_name='us-east-1') + + try: + sqs.create_queue( + QueueName='test-queue', + Attributes={ + 'FifoQueue': 'true', + } + ) + except botocore.exceptions.ClientError as err: + err.response['Error']['Code'].should.equal('InvalidParameterValue') + else: + raise RuntimeError('Should of raised InvalidParameterValue Exception') + +@mock_sqs +def test_create_fifo_queue(): + sqs = boto3.client('sqs', region_name='us-east-1') + resp = sqs.create_queue( + QueueName='test-queue.fifo', + Attributes={ + 'FifoQueue': 'true', + } + ) + queue_url = resp['QueueUrl'] + + response = sqs.get_queue_attributes(QueueUrl=queue_url) + response['Attributes'].should.contain('FifoQueue') + response['Attributes']['FifoQueue'].should.equal('true') + + + @mock_sqs def test_create_queue(): sqs = boto3.resource('sqs', region_name='us-east-1') @@ -39,6 +71,7 @@ def test_get_inexistent_queue(): sqs.get_queue_by_name.when.called_with( QueueName='nonexisting-queue').should.throw(botocore.exceptions.ClientError) + @mock_sqs def test_message_send_without_attributes(): sqs = boto3.resource('sqs', region_name='us-east-1') @@ -56,6 +89,7 @@ def test_message_send_without_attributes(): messages = queue.receive_messages() messages.should.have.length_of(1) + @mock_sqs def test_message_send_with_attributes(): sqs = boto3.resource('sqs', region_name='us-east-1') @@ -229,6 +263,7 @@ def test_send_receive_message_without_attributes(): message1.shouldnt.have.key('MD5OfMessageAttributes') message2.shouldnt.have.key('MD5OfMessageAttributes') + @mock_sqs def test_send_receive_message_with_attributes(): sqs = boto3.resource('sqs', region_name='us-east-1') From 932cab7f762716ac5c897cc802d90b28be858a5e Mon Sep 17 00:00:00 2001 From: Terry Cain Date: Fri, 22 Sep 2017 20:24:14 +0100 Subject: [PATCH 264/412] Fixed typo --- moto/sqs/models.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/moto/sqs/models.py b/moto/sqs/models.py index 4ea5ef579..e8649ba24 100644 --- a/moto/sqs/models.py +++ b/moto/sqs/models.py @@ -284,7 +284,7 @@ class SQSBackend(BaseBackend): def create_queue(self, name, **kwargs): queue = self.queues.get(name) if queue is None: - queue = Queue(name, **kwargs, region=self.region_name) + queue = Queue(name, region=self.region_name, **kwargs) self.queues[name] = queue return queue From 219ed45f94946f47d7d44633ccae2bf819df0742 Mon Sep 17 00:00:00 2001 From: Terry Cain Date: Fri, 22 Sep 2017 20:29:40 +0100 Subject: [PATCH 265/412] Flake8 --- moto/sqs/responses.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/moto/sqs/responses.py b/moto/sqs/responses.py index 8a027ba60..e0e493ad8 100644 --- a/moto/sqs/responses.py +++ b/moto/sqs/responses.py @@ -469,7 +469,6 @@ ERROR_INEXISTENT_QUEUE = """ Sender @@ -478,4 +477,4 @@ ERROR_TEMPLATE = """""" From 81e615f5c1bd3cc4ae1baae11bcefa6a88575c84 Mon Sep 17 00:00:00 2001 From: Terry Cain Date: Fri, 22 Sep 2017 20:37:32 +0100 Subject: [PATCH 266/412] Fixed conflicting arguments --- moto/sqs/models.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/moto/sqs/models.py b/moto/sqs/models.py index e8649ba24..e9d889453 100644 --- a/moto/sqs/models.py +++ b/moto/sqs/models.py @@ -284,6 +284,10 @@ class SQSBackend(BaseBackend): def create_queue(self, name, **kwargs): queue = self.queues.get(name) if queue is None: + try: + kwargs.pop('region') + except KeyError: + pass queue = Queue(name, region=self.region_name, **kwargs) self.queues[name] = queue return queue From 7167e09ca5f22c6901292ed8a445c300f60612ca Mon Sep 17 00:00:00 2001 From: Mike Grima Date: Fri, 22 Sep 2017 13:35:00 -0700 Subject: [PATCH 267/412] Fixed bug in S3 put_bucket_tagging. Closes #1181. --- moto/s3/responses.py | 9 +++++++-- tests/test_s3/test_s3.py | 13 +++++++++++++ 2 files changed, 20 insertions(+), 2 deletions(-) diff --git a/moto/s3/responses.py b/moto/s3/responses.py index d340d16e4..b04cb9496 100755 --- a/moto/s3/responses.py +++ b/moto/s3/responses.py @@ -780,8 +780,13 @@ class ResponseObject(_TemplateEnvironmentMixin): tags = [] # Optional if no tags are being sent: if parsed_xml['Tagging'].get('TagSet'): - for tag in parsed_xml['Tagging']['TagSet']['Tag']: - tags.append(FakeTag(tag['Key'], tag['Value'])) + # If there is only 1 tag, then it's not a list: + if not isinstance(parsed_xml['Tagging']['TagSet']['Tag'], list): + tags.append(FakeTag(parsed_xml['Tagging']['TagSet']['Tag']['Key'], + parsed_xml['Tagging']['TagSet']['Tag']['Value'])) + else: + for tag in parsed_xml['Tagging']['TagSet']['Tag']: + tags.append(FakeTag(tag['Key'], tag['Value'])) tag_set = FakeTagSet(tags) tagging = FakeTagging(tag_set) diff --git a/tests/test_s3/test_s3.py b/tests/test_s3/test_s3.py index cb40edb33..e4cb499b9 100644 --- a/tests/test_s3/test_s3.py +++ b/tests/test_s3/test_s3.py @@ -1441,6 +1441,19 @@ def test_boto3_put_bucket_tagging(): bucket_name = "mybucket" s3.create_bucket(Bucket=bucket_name) + # With 1 tag: + resp = s3.put_bucket_tagging(Bucket=bucket_name, + Tagging={ + "TagSet": [ + { + "Key": "TagOne", + "Value": "ValueOne" + } + ] + }) + resp['ResponseMetadata']['HTTPStatusCode'].should.equal(200) + + # With multiple tags: resp = s3.put_bucket_tagging(Bucket=bucket_name, Tagging={ "TagSet": [ From edd10aaa1915e51201b5daba31a4f3c895e3fa99 Mon Sep 17 00:00:00 2001 From: Terry Cain Date: Fri, 22 Sep 2017 23:36:26 +0100 Subject: [PATCH 268/412] Started Work on X-Ray Could help #1006 --- README.md | 2 + moto/__init__.py | 1 + moto/xray/__init__.py | 6 + moto/xray/exceptions.py | 39 ++++++ moto/xray/models.py | 208 +++++++++++++++++++++++++++++ moto/xray/responses.py | 133 ++++++++++++++++++ moto/xray/urls.py | 10 ++ tests/test_xray/test_xray_boto3.py | 84 ++++++++++++ 8 files changed, 483 insertions(+) create mode 100644 moto/xray/__init__.py create mode 100644 moto/xray/exceptions.py create mode 100644 moto/xray/models.py create mode 100644 moto/xray/responses.py create mode 100644 moto/xray/urls.py create mode 100644 tests/test_xray/test_xray_boto3.py diff --git a/README.md b/README.md index cca50a16e..39dc49fea 100644 --- a/README.md +++ b/README.md @@ -120,6 +120,8 @@ It gets even better! Moto isn't just for Python code and it isn't just for S3. L |------------------------------------------------------------------------------| | SWF | @mock_swf | basic endpoints done | |------------------------------------------------------------------------------| +| X-Ray | @mock_xray | core endpoints done | +|------------------------------------------------------------------------------| ``` ### Another Example diff --git a/moto/__init__.py b/moto/__init__.py index 728d8db71..871aab881 100644 --- a/moto/__init__.py +++ b/moto/__init__.py @@ -36,6 +36,7 @@ from .sts import mock_sts, mock_sts_deprecated # flake8: noqa from .ssm import mock_ssm # flake8: noqa from .route53 import mock_route53, mock_route53_deprecated # flake8: noqa from .swf import mock_swf, mock_swf_deprecated # flake8: noqa +from .xray import mock_xray # flake8: noqa try: diff --git a/moto/xray/__init__.py b/moto/xray/__init__.py new file mode 100644 index 000000000..7b32ca0b0 --- /dev/null +++ b/moto/xray/__init__.py @@ -0,0 +1,6 @@ +from __future__ import unicode_literals +from .models import xray_backends +from ..core.models import base_decorator + +xray_backend = xray_backends['us-east-1'] +mock_xray = base_decorator(xray_backends) diff --git a/moto/xray/exceptions.py b/moto/xray/exceptions.py new file mode 100644 index 000000000..24f700178 --- /dev/null +++ b/moto/xray/exceptions.py @@ -0,0 +1,39 @@ +import json + + +class AWSError(Exception): + CODE = None + STATUS = 400 + + def __init__(self, message, code=None, status=None): + self.message = message + self.code = code if code is not None else self.CODE + self.status = status if status is not None else self.STATUS + + def response(self): + return json.dumps({'__type': self.code, 'message': self.message}), dict(status=self.status) + + +class InvalidRequestException(AWSError): + CODE = 'InvalidRequestException' + + +class BadSegmentException(Exception): + def __init__(self, seg_id=None, code=None, message=None): + self.id = seg_id + self.code = code + self.message = message + + def __repr__(self): + return ''.format('-'.join([self.id, self.code, self.message])) + + def to_dict(self): + result = {} + if self.id is not None: + result['Id'] = self.id + if self.code is not None: + result['ErrorCode'] = self.code + if self.message is not None: + result['Message'] = self.message + + return result diff --git a/moto/xray/models.py b/moto/xray/models.py new file mode 100644 index 000000000..f22edeb9f --- /dev/null +++ b/moto/xray/models.py @@ -0,0 +1,208 @@ +from __future__ import unicode_literals + +import bisect +import datetime +from collections import defaultdict +import json +from moto.core import BaseBackend, BaseModel +from moto.ec2 import ec2_backends +from .exceptions import BadSegmentException, AWSError + + +class TelemetryRecords(BaseModel): + def __init__(self, instance_id, hostname, resource_arn, records): + self.instance_id = instance_id + self.hostname = hostname + self.resource_arn = resource_arn + self.records = records + + @classmethod + def from_json(cls, json): + instance_id = json.get('EC2InstanceId', None) + hostname = json.get('Hostname') + resource_arn = json.get('ResourceARN') + telemetry_records = json['TelemetryRecords'] + + return cls(instance_id, hostname, resource_arn, telemetry_records) + + +# https://docs.aws.amazon.com/xray/latest/devguide/xray-api-segmentdocuments.html +class TraceSegment(BaseModel): + def __init__(self, name, segment_id, trace_id, start_time, end_time=None, in_progress=False, service=None, user=None, + origin=None, parent_id=None, http=None, aws=None, metadata=None, annotations=None, subsegments=None, **kwargs): + self.name = name + self.id = segment_id + self.trace_id = trace_id + self._trace_version = None + self._original_request_start_time = None + self._trace_identifier = None + self.start_time = start_time + self._start_date = None + self.end_time = end_time + self._end_date = None + self.in_progress = in_progress + self.service = service + self.user = user + self.origin = origin + self.parent_id = parent_id + self.http = http + self.aws = aws + self.metadata = metadata + self.annotations = annotations + self.subsegments = subsegments + self.misc = kwargs + + def __lt__(self, other): + return self.start_date < other.start_date + + @property + def trace_version(self): + if self._trace_version is None: + self._trace_version = int(self.trace_id.split('-', 1)[0]) + return self._trace_version + + @property + def request_start_date(self): + if self._original_request_start_time is None: + start_time = int(self.trace_id.split('-')[1], 16) + self._original_request_start_time = datetime.datetime.fromtimestamp(start_time) + return self._original_request_start_time + + @property + def start_date(self): + if self._start_date is None: + self._start_date = datetime.datetime.fromtimestamp(self.start_time) + return self._start_date + + @property + def end_date(self): + if self._end_date is None: + self._end_date = datetime.datetime.fromtimestamp(self.end_time) + return self._end_date + + @classmethod + def from_dict(cls, data): + # Check manditory args + if 'id' not in data: + raise BadSegmentException(code='MissingParam', message='Missing segment ID') + seg_id = data['id'] + data['segment_id'] = seg_id # Just adding this key for future convenience + + for arg in ('name', 'trace_id', 'start_time'): + if arg not in data: + raise BadSegmentException(seg_id=seg_id, code='MissingParam', message='Missing segment ID') + + if 'end_time' not in data and 'in_progress' not in data: + raise BadSegmentException(seg_id=seg_id, code='MissingParam', message='Missing end_time or in_progress') + if 'end_time' not in data and data['in_progress'] == 'false': + raise BadSegmentException(seg_id=seg_id, code='MissingParam', message='Missing end_time') + + return cls(**data) + + +class SegmentCollection(object): + def __init__(self): + self._segments = defaultdict(self._new_trace_item) + + @staticmethod + def _new_trace_item(): + return { + 'start_date': datetime.datetime(1970, 1, 1), + 'end_date': datetime.datetime(1970, 1, 1), + 'finished': False, + 'segments': [] + } + + def put_segment(self, segment): + # insert into a sorted list + bisect.insort_left(self._segments[segment.trace_id]['segments'], segment) + + # Get the last segment (takes into account incorrect ordering) + # and if its the last one, mark trace as complete + if self._segments[segment.trace_id]['segments'][-1].end_time is not None: + self._segments[segment.trace_id]['finished'] = True + + start_time = self._segments[segment.trace_id]['segments'][0].start_date + end_time = self._segments[segment.trace_id]['segments'][-1].end_date + self._segments[segment.trace_id]['start_date'] = start_time + self._segments[segment.trace_id]['end_date'] = end_time + + # Todo consolidate trace segments into a trace. + # not enough working knowledge of xray to do this + + def summary(self, start_time, end_time, filter_expression=None, sampling=False): + # This beast https://docs.aws.amazon.com/xray/latest/api/API_GetTraceSummaries.html#API_GetTraceSummaries_ResponseSyntax + if filter_expression is not None: + raise AWSError('Not implemented yet - moto', code='InternalFailure', status=500) + + summaries = [] + + for tid, trace in self._segments.items(): + if trace['finished'] and start_time < trace['start_date'] and trace['end_date'] < end_time: + duration = int((trace['end_date'] - trace['start_date']).total_seconds()) + # this stuff is mostly guesses, refer to TODO above + has_error = any(['error' in seg.misc for seg in trace['segments']]) + has_fault = any(['fault' in seg.misc for seg in trace['segments']]) + has_throttle = any(['throttle' in seg.misc for seg in trace['segments']]) + + # Apparently all of these options are optional + summary_part = { + 'Annotations': {}, # Not implemented yet + 'Duration': duration, + 'HasError': has_error, + 'HasFault': has_fault, + 'HasThrottle': has_throttle, + 'Http': {}, # Not implemented yet + 'Id': tid, + 'IsParital': False, # needs lots more work to work on partials + 'ResponseTime': 1, # definitely 1ms resposnetime + 'ServiceIds': [], # Not implemented yet + 'Users': {} # Not implemented yet + } + summaries.append(summary_part) + + result = { + "ApproximateTime": int((datetime.datetime.now() - datetime.datetime(1970, 1, 1)).total_seconds()), + "TracesProcessedCount": len(summaries), + "TraceSummaries": summaries + } + + return result + + +class XRayBackend(BaseBackend): + + def __init__(self): + self._telemetry_records = [] + self._segment_collection = SegmentCollection() + + def add_telemetry_records(self, json): + self._telemetry_records.append( + TelemetryRecords.from_json(json) + ) + + def process_segment(self, doc): + try: + data = json.loads(doc) + except ValueError: + raise BadSegmentException(code='JSONFormatError', message='Bad JSON data') + + try: + # Get Segment Object + segment = TraceSegment.from_dict(data) + except ValueError: + raise BadSegmentException(code='JSONFormatError', message='Bad JSON data') + + try: + # Store Segment Object + self._segment_collection.put_segment(segment) + except Exception as err: + raise BadSegmentException(seg_id=segment.id, code='InternalFailure', message=str(err)) + + def get_trace_summary(self, start_time, end_time, filter_expression, summaries): + return self._segment_collection.summary(start_time, end_time, filter_expression, summaries) + + +xray_backends = {} +for region, ec2_backend in ec2_backends.items(): + xray_backends[region] = XRayBackend() diff --git a/moto/xray/responses.py b/moto/xray/responses.py new file mode 100644 index 000000000..3c69e105c --- /dev/null +++ b/moto/xray/responses.py @@ -0,0 +1,133 @@ +from __future__ import unicode_literals +from urllib.parse import urlsplit +import json +import six +import datetime + +from moto.core.responses import BaseResponse +from moto.core.utils import camelcase_to_underscores, method_names_from_class +from werkzeug.exceptions import HTTPException + +from .models import xray_backends +from .exceptions import AWSError, BadSegmentException + + +class XRayResponse(BaseResponse): + + def _error(self, code, message): + return json.dumps({'__type': code, 'message': message}), dict(status=400) + + @property + def xray_backend(self): + return xray_backends[self.region] + + @property + def request_params(self): + try: + return json.loads(self.body) + except ValueError: + return {} + + def _get_param(self, param, default=None): + return self.request_params.get(param, default) + + def call_action(self): + # Amazon is just calling urls like /TelemetryRecords etc... + action = urlsplit(self.uri).path.lstrip('/') + action = camelcase_to_underscores(action) + headers = self.response_headers + method_names = method_names_from_class(self.__class__) + if action in method_names: + method = getattr(self, action) + try: + response = method() + except HTTPException as http_error: + response = http_error.description, dict(status=http_error.code) + if isinstance(response, six.string_types): + return 200, headers, response + else: + body, new_headers = response + status = new_headers.get('status', 200) + headers.update(new_headers) + # Cast status to string + if "status" in headers: + headers['status'] = str(headers['status']) + return status, headers, body + + raise NotImplementedError( + "The {0} action has not been implemented".format(action)) + + # PutTelemetryRecords + def telemetry_records(self): + try: + self.xray_backend.add_telemetry_records(self.request_params) + except AWSError as err: + return err.response() + + return '' + + # PutTraceSegments + def trace_segments(self): + docs = self._get_param('TraceSegmentDocuments') + + if docs is None: + msg = 'Parameter TraceSegmentDocuments is missing' + return json.dumps({'__type': 'MissingParameter', 'message': msg}), dict(status=400) + + # Raises an exception that contains info about a bad segment, + # the object also has a to_dict() method + bad_segments = [] + for doc in docs: + try: + self.xray_backend.process_segment(doc) + except BadSegmentException as bad_seg: + bad_segments.append(bad_seg) + except Exception as err: + return json.dumps({'__type': 'InternalFailure', 'message': str(err)}), dict(status=500) + + result = {'UnprocessedTraceSegments': [x.to_dict() for x in bad_segments]} + return json.dumps(result) + + # GetTraceSummaries + def trace_summaries(self): + start_time = self._get_param('StartTime') + end_time = self._get_param('EndTime') + if start_time is None: + msg = 'Parameter StartTime is missing' + return json.dumps({'__type': 'MissingParameter', 'message': msg}), dict(status=400) + if end_time is None: + msg = 'Parameter EndTime is missing' + return json.dumps({'__type': 'MissingParameter', 'message': msg}), dict(status=400) + + filter_expression = self._get_param('FilterExpression') + sampling = self._get_param('Sampling', 'false') == 'true' + + try: + start_time = datetime.datetime.fromtimestamp(int(start_time)) + end_time = datetime.datetime.fromtimestamp(int(end_time)) + except ValueError: + msg = 'start_time and end_time are not integers' + return json.dumps({'__type': 'InvalidParameterValue', 'message': msg}), dict(status=400) + except Exception as err: + return json.dumps({'__type': 'InternalFailure', 'message': str(err)}), dict(status=500) + + try: + result = self.xray_backend.get_trace_summary(start_time, end_time, filter_expression, sampling) + except AWSError as err: + return err.response() + except Exception as err: + return json.dumps({'__type': 'InternalFailure', 'message': str(err)}), dict(status=500) + + return json.dumps(result) + + # BatchGetTraces + def traces(self): + raise NotImplementedError() + + # GetServiceGraph + def service_graph(self): + raise NotImplementedError() + + # GetTraceGraph + def trace_graph(self): + raise NotImplementedError() diff --git a/moto/xray/urls.py b/moto/xray/urls.py new file mode 100644 index 000000000..c224e8d38 --- /dev/null +++ b/moto/xray/urls.py @@ -0,0 +1,10 @@ +from __future__ import unicode_literals +from .responses import XRayResponse + +url_bases = [ + "https?://xray.(.+).amazonaws.com", +] + +url_paths = { + '{0}/.+$': XRayResponse.dispatch, +} diff --git a/tests/test_xray/test_xray_boto3.py b/tests/test_xray/test_xray_boto3.py new file mode 100644 index 000000000..9da55ad1e --- /dev/null +++ b/tests/test_xray/test_xray_boto3.py @@ -0,0 +1,84 @@ +from __future__ import unicode_literals + +import boto3 +import json +import botocore.exceptions +import sure # noqa + +from moto import mock_xray + +import datetime + + +@mock_xray +def test_put_telemetry(): + client = boto3.client('xray', region_name='us-east-1') + + client.put_telemetry_records( + TelemetryRecords=[ + { + 'Timestamp': datetime.datetime(2015, 1, 1), + 'SegmentsReceivedCount': 123, + 'SegmentsSentCount': 123, + 'SegmentsSpilloverCount': 123, + 'SegmentsRejectedCount': 123, + 'BackendConnectionErrors': { + 'TimeoutCount': 123, + 'ConnectionRefusedCount': 123, + 'HTTPCode4XXCount': 123, + 'HTTPCode5XXCount': 123, + 'UnknownHostCount': 123, + 'OtherCount': 123 + } + }, + ], + EC2InstanceId='string', + Hostname='string', + ResourceARN='string' + ) + + +@mock_xray +def test_put_trace_segments(): + client = boto3.client('xray', region_name='us-east-1') + + client.put_trace_segments( + TraceSegmentDocuments=[ + json.dumps({ + 'name': 'example.com', + 'id': '70de5b6f19ff9a0a', + 'start_time': 1.478293361271E9, + 'trace_id': '1-581cf771-a006649127e371903a2de979', + 'end_time': 1.478293361449E9 + }) + ] + ) + + +@mock_xray +def test_trace_summary(): + client = boto3.client('xray', region_name='us-east-1') + + client.put_trace_segments( + TraceSegmentDocuments=[ + json.dumps({ + 'name': 'example.com', + 'id': '70de5b6f19ff9a0a', + 'start_time': 1.478293361271E9, + 'trace_id': '1-581cf771-a006649127e371903a2de979', + 'in_progress': True + }), + json.dumps({ + 'name': 'example.com', + 'id': '70de5b6f19ff9a0b', + 'start_time': 1478293365, + 'trace_id': '1-581cf771-a006649127e371903a2de979', + 'end_time': 1478293385 + }) + ] + ) + + client.get_trace_summaries( + StartTime=datetime.datetime(2014, 1, 1), + EndTime=datetime.datetime(2017, 1, 1) + ) From 17d9701d19a3713a5ac553543366ff1d85ef4146 Mon Sep 17 00:00:00 2001 From: Terry Cain Date: Fri, 22 Sep 2017 23:50:01 +0100 Subject: [PATCH 269/412] Updated R53 ID's to match what AWS do now --- moto/route53/models.py | 13 +++++++++++-- 1 file changed, 11 insertions(+), 2 deletions(-) diff --git a/moto/route53/models.py b/moto/route53/models.py index b823cb915..d12f4ee7a 100644 --- a/moto/route53/models.py +++ b/moto/route53/models.py @@ -2,11 +2,20 @@ from __future__ import unicode_literals from collections import defaultdict +import string +import random import uuid from jinja2 import Template from moto.core import BaseBackend, BaseModel -from moto.core.utils import get_random_hex + + +ROUTE53_ID_CHOICE = string.ascii_uppercase + string.digits + + +def create_route53_zone_id(): + # New ID's look like this Z1RWWTK7Y8UDDQ + return ''.join([random.choice(ROUTE53_ID_CHOICE) for _ in range(0, 15)]) class HealthCheck(BaseModel): @@ -247,7 +256,7 @@ class Route53Backend(BaseBackend): self.resource_tags = defaultdict(dict) def create_hosted_zone(self, name, private_zone, comment=None): - new_id = get_random_hex() + new_id = create_route53_zone_id() new_zone = FakeZone( name, new_id, private_zone=private_zone, comment=comment) self.zones[new_id] = new_zone From 983ae57868d9c0acf211e93e16a3e3034d278351 Mon Sep 17 00:00:00 2001 From: Terry Cain Date: Sat, 23 Sep 2017 00:05:25 +0100 Subject: [PATCH 270/412] Changed urllib to six equivalent --- moto/xray/responses.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/moto/xray/responses.py b/moto/xray/responses.py index 3c69e105c..89705fb5b 100644 --- a/moto/xray/responses.py +++ b/moto/xray/responses.py @@ -1,5 +1,4 @@ from __future__ import unicode_literals -from urllib.parse import urlsplit import json import six import datetime @@ -7,6 +6,7 @@ import datetime from moto.core.responses import BaseResponse from moto.core.utils import camelcase_to_underscores, method_names_from_class from werkzeug.exceptions import HTTPException +from six.moves.urllib.parse import urlsplit from .models import xray_backends from .exceptions import AWSError, BadSegmentException From 7ea4d8c3e617232f78211da4e772da180ce32a4f Mon Sep 17 00:00:00 2001 From: Terry Cain Date: Sat, 23 Sep 2017 00:08:31 +0100 Subject: [PATCH 271/412] Fixed missing backend declaration --- moto/backends.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/moto/backends.py b/moto/backends.py index b452b45fd..743b15801 100644 --- a/moto/backends.py +++ b/moto/backends.py @@ -31,6 +31,7 @@ from moto.sns import sns_backends from moto.sqs import sqs_backends from moto.ssm import ssm_backends from moto.sts import sts_backends +from moto.xray import xray_backends BACKENDS = { 'apigateway': apigateway_backends, @@ -65,6 +66,7 @@ BACKENDS = { 'sts': sts_backends, 'route53': route53_backends, 'lambda': lambda_backends, + 'xray': xray_backends } From e230407074f832da17ca04d7e4ba7dcb5d5f0a77 Mon Sep 17 00:00:00 2001 From: Ron Rothman Date: Sat, 23 Sep 2017 01:41:10 -0400 Subject: [PATCH 272/412] ssm get_parameter to honot WithDecryption --- moto/ssm/responses.py | 7 +------ 1 file changed, 1 insertion(+), 6 deletions(-) diff --git a/moto/ssm/responses.py b/moto/ssm/responses.py index ca0339693..3b75ada09 100644 --- a/moto/ssm/responses.py +++ b/moto/ssm/responses.py @@ -56,13 +56,8 @@ class SimpleSystemManagerResponse(BaseResponse): return json.dumps(error), dict(status=400) response = { - 'Parameter': { - 'Name': name, - 'Type': result.type, - 'Value': result.value - } + 'Parameter': result.response_object(with_decryption) } - return json.dumps(response) def get_parameters(self): From 316a638d9e7ab8b4dca6f9d1279fe5451556bb1a Mon Sep 17 00:00:00 2001 From: Toshiya Kawasaki Date: Sat, 23 Sep 2017 17:03:42 +0900 Subject: [PATCH 273/412] add description of scaffold.py --- scripts/scaffold.py | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/scripts/scaffold.py b/scripts/scaffold.py index c38544abf..94bdad1fc 100755 --- a/scripts/scaffold.py +++ b/scripts/scaffold.py @@ -1,4 +1,14 @@ #!/usr/bin/env python +"""This script generates template codes and response body for specified boto3's operation and apply to appropriate files. +You only have to select service and operation that you want to add. +This script looks at the botocore's definition file of specified service and operation, and auto-generates codes and reponses. +Basically, this script supports almost all services, as long as its protocol is `query`, `json` or `rest-json`. +Event if aws adds new services, this script will work as long as the protocol is known. + +TODO: + - This scripts don't generates functions in `responses.py` for `rest-json`, because I don't know the rule of it. want someone fix this. + - In some services's operations, this scripts might crash. Make new issue on github then. +""" import os import re import inspect From 0a4c2301c755243c329bcd055609f6c4b8676b85 Mon Sep 17 00:00:00 2001 From: Toshiya Kawasaki Date: Sat, 23 Sep 2017 17:14:04 +0900 Subject: [PATCH 274/412] fix bug that existing template breaks --- scripts/scaffold.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/scripts/scaffold.py b/scripts/scaffold.py index 94bdad1fc..bbc8de208 100755 --- a/scripts/scaffold.py +++ b/scripts/scaffold.py @@ -319,8 +319,9 @@ def insert_code_to_class(path, base_class, new_code): lines = lines[:end_line_no] + func_lines + lines[end_line_no:] + body = '\n'.join(lines) + '\n' with open(path, 'w') as f: - f.write('\n'.join(lines)) + f.write(body) def insert_query_codes(service, operation): From 0bd3899cb311107bec2f997907e8f013dfefe145 Mon Sep 17 00:00:00 2001 From: Terry Cain Date: Sat, 23 Sep 2017 11:02:25 +0100 Subject: [PATCH 275/412] Finished X-Ray + fixed routing bug --- moto/core/responses.py | 9 ++-- moto/server.py | 7 ++- moto/xray/models.py | 71 ++++++++++++++++++++------ moto/xray/responses.py | 81 ++++++++++++++++++------------ moto/xray/urls.py | 7 ++- tests/test_xray/test_xray_boto3.py | 55 ++++++++++++++++++++ 6 files changed, 178 insertions(+), 52 deletions(-) diff --git a/moto/core/responses.py b/moto/core/responses.py index a97f66f6c..781a0b284 100644 --- a/moto/core/responses.py +++ b/moto/core/responses.py @@ -178,8 +178,7 @@ class BaseResponse(_TemplateEnvironmentMixin): self.setup_class(request, full_url, headers) return self.call_action() - def call_action(self): - headers = self.response_headers + def _get_action(self): action = self.querystring.get('Action', [""])[0] if not action: # Some services use a header for the action # Headers are case-insensitive. Probably a better way to do this. @@ -188,7 +187,11 @@ class BaseResponse(_TemplateEnvironmentMixin): if match: action = match.split(".")[-1] - action = camelcase_to_underscores(action) + return action + + def call_action(self): + headers = self.response_headers + action = camelcase_to_underscores(self._get_action()) method_names = method_names_from_class(self.__class__) if action in method_names: method = getattr(self, action) diff --git a/moto/server.py b/moto/server.py index 8d0103cc2..966cb1614 100644 --- a/moto/server.py +++ b/moto/server.py @@ -139,10 +139,13 @@ def create_backend_app(service): else: endpoint = None - if endpoint in backend_app.view_functions: + original_endpoint = endpoint + index = 2 + while endpoint in backend_app.view_functions: # HACK: Sometimes we map the same view to multiple url_paths. Flask # requries us to have different names. - endpoint += "2" + endpoint = original_endpoint + str(index) + index += 1 backend_app.add_url_rule( url_path, diff --git a/moto/xray/models.py b/moto/xray/models.py index f22edeb9f..b2d418232 100644 --- a/moto/xray/models.py +++ b/moto/xray/models.py @@ -28,7 +28,7 @@ class TelemetryRecords(BaseModel): # https://docs.aws.amazon.com/xray/latest/devguide/xray-api-segmentdocuments.html class TraceSegment(BaseModel): - def __init__(self, name, segment_id, trace_id, start_time, end_time=None, in_progress=False, service=None, user=None, + def __init__(self, name, segment_id, trace_id, start_time, raw, end_time=None, in_progress=False, service=None, user=None, origin=None, parent_id=None, http=None, aws=None, metadata=None, annotations=None, subsegments=None, **kwargs): self.name = name self.id = segment_id @@ -52,6 +52,9 @@ class TraceSegment(BaseModel): self.subsegments = subsegments self.misc = kwargs + # Raw json string + self.raw = raw + def __lt__(self, other): return self.start_date < other.start_date @@ -81,7 +84,7 @@ class TraceSegment(BaseModel): return self._end_date @classmethod - def from_dict(cls, data): + def from_dict(cls, data, raw): # Check manditory args if 'id' not in data: raise BadSegmentException(code='MissingParam', message='Missing segment ID') @@ -97,12 +100,12 @@ class TraceSegment(BaseModel): if 'end_time' not in data and data['in_progress'] == 'false': raise BadSegmentException(seg_id=seg_id, code='MissingParam', message='Missing end_time') - return cls(**data) + return cls(raw=raw, **data) class SegmentCollection(object): def __init__(self): - self._segments = defaultdict(self._new_trace_item) + self._traces = defaultdict(self._new_trace_item) @staticmethod def _new_trace_item(): @@ -110,23 +113,24 @@ class SegmentCollection(object): 'start_date': datetime.datetime(1970, 1, 1), 'end_date': datetime.datetime(1970, 1, 1), 'finished': False, + 'trace_id': None, 'segments': [] } def put_segment(self, segment): # insert into a sorted list - bisect.insort_left(self._segments[segment.trace_id]['segments'], segment) + bisect.insort_left(self._traces[segment.trace_id]['segments'], segment) # Get the last segment (takes into account incorrect ordering) # and if its the last one, mark trace as complete - if self._segments[segment.trace_id]['segments'][-1].end_time is not None: - self._segments[segment.trace_id]['finished'] = True - - start_time = self._segments[segment.trace_id]['segments'][0].start_date - end_time = self._segments[segment.trace_id]['segments'][-1].end_date - self._segments[segment.trace_id]['start_date'] = start_time - self._segments[segment.trace_id]['end_date'] = end_time + if self._traces[segment.trace_id]['segments'][-1].end_time is not None: + self._traces[segment.trace_id]['finished'] = True + start_time = self._traces[segment.trace_id]['segments'][0].start_date + end_time = self._traces[segment.trace_id]['segments'][-1].end_date + self._traces[segment.trace_id]['start_date'] = start_time + self._traces[segment.trace_id]['end_date'] = end_time + self._traces[segment.trace_id]['trace_id'] = segment.trace_id # Todo consolidate trace segments into a trace. # not enough working knowledge of xray to do this @@ -137,7 +141,7 @@ class SegmentCollection(object): summaries = [] - for tid, trace in self._segments.items(): + for tid, trace in self._traces.items(): if trace['finished'] and start_time < trace['start_date'] and trace['end_date'] < end_time: duration = int((trace['end_date'] - trace['start_date']).total_seconds()) # this stuff is mostly guesses, refer to TODO above @@ -169,6 +173,20 @@ class SegmentCollection(object): return result + def get_trace_ids(self, trace_ids): + traces = [] + unprocessed = [] + + # Its a default dict + existing_trace_ids = list(self._traces.keys()) + for trace_id in trace_ids: + if trace_id in existing_trace_ids: + traces.append(self._traces[trace_id]) + else: + unprocessed.append(trace_id) + + return traces, unprocessed + class XRayBackend(BaseBackend): @@ -189,7 +207,7 @@ class XRayBackend(BaseBackend): try: # Get Segment Object - segment = TraceSegment.from_dict(data) + segment = TraceSegment.from_dict(data, raw=doc) except ValueError: raise BadSegmentException(code='JSONFormatError', message='Bad JSON data') @@ -202,6 +220,31 @@ class XRayBackend(BaseBackend): def get_trace_summary(self, start_time, end_time, filter_expression, summaries): return self._segment_collection.summary(start_time, end_time, filter_expression, summaries) + def get_trace_ids(self, trace_ids, next_token): + traces, unprocessed_ids = self._segment_collection.get_trace_ids(trace_ids) + + result = { + 'Traces': [], + 'UnprocessedTraceIds': unprocessed_ids + + } + + for trace in traces: + segments = [] + for segment in trace['segments']: + segments.append({ + 'Id': segment.id, + 'Document': segment.raw + }) + + result['Traces'].append({ + 'Duration': int((trace['end_date'] - trace['start_date']).total_seconds()), + 'Id': trace['trace_id'], + 'Segments': segments + }) + + return result + xray_backends = {} for region, ec2_backend in ec2_backends.items(): diff --git a/moto/xray/responses.py b/moto/xray/responses.py index 89705fb5b..328a266bf 100644 --- a/moto/xray/responses.py +++ b/moto/xray/responses.py @@ -1,11 +1,8 @@ from __future__ import unicode_literals import json -import six import datetime from moto.core.responses import BaseResponse -from moto.core.utils import camelcase_to_underscores, method_names_from_class -from werkzeug.exceptions import HTTPException from six.moves.urllib.parse import urlsplit from .models import xray_backends @@ -31,31 +28,11 @@ class XRayResponse(BaseResponse): def _get_param(self, param, default=None): return self.request_params.get(param, default) - def call_action(self): + def _get_action(self): # Amazon is just calling urls like /TelemetryRecords etc... - action = urlsplit(self.uri).path.lstrip('/') - action = camelcase_to_underscores(action) - headers = self.response_headers - method_names = method_names_from_class(self.__class__) - if action in method_names: - method = getattr(self, action) - try: - response = method() - except HTTPException as http_error: - response = http_error.description, dict(status=http_error.code) - if isinstance(response, six.string_types): - return 200, headers, response - else: - body, new_headers = response - status = new_headers.get('status', 200) - headers.update(new_headers) - # Cast status to string - if "status" in headers: - headers['status'] = str(headers['status']) - return status, headers, body - - raise NotImplementedError( - "The {0} action has not been implemented".format(action)) + # This uses the value after / as the camalcase action, which then + # gets converted in call_action to find the following methods + return urlsplit(self.uri).path.lstrip('/') # PutTelemetryRecords def telemetry_records(self): @@ -122,12 +99,52 @@ class XRayResponse(BaseResponse): # BatchGetTraces def traces(self): - raise NotImplementedError() + trace_ids = self._get_param('TraceIds') + next_token = self._get_param('NextToken') # not implemented yet - # GetServiceGraph + if trace_ids is None: + msg = 'Parameter TraceIds is missing' + return json.dumps({'__type': 'MissingParameter', 'message': msg}), dict(status=400) + + try: + result = self.xray_backend.get_trace_ids(trace_ids, next_token) + except AWSError as err: + return err.response() + except Exception as err: + return json.dumps({'__type': 'InternalFailure', 'message': str(err)}), dict(status=500) + + return json.dumps(result) + + # GetServiceGraph - just a dummy response for now def service_graph(self): - raise NotImplementedError() + start_time = self._get_param('StartTime') + end_time = self._get_param('EndTime') + # next_token = self._get_param('NextToken') # not implemented yet - # GetTraceGraph + if start_time is None: + msg = 'Parameter StartTime is missing' + return json.dumps({'__type': 'MissingParameter', 'message': msg}), dict(status=400) + if end_time is None: + msg = 'Parameter EndTime is missing' + return json.dumps({'__type': 'MissingParameter', 'message': msg}), dict(status=400) + + result = { + 'StartTime': start_time, + 'EndTime': end_time, + 'Services': [] + } + return json.dumps(result) + + # GetTraceGraph - just a dummy response for now def trace_graph(self): - raise NotImplementedError() + trace_ids = self._get_param('TraceIds') + # next_token = self._get_param('NextToken') # not implemented yet + + if trace_ids is None: + msg = 'Parameter TraceIds is missing' + return json.dumps({'__type': 'MissingParameter', 'message': msg}), dict(status=400) + + result = { + 'Services': [] + } + return json.dumps(result) diff --git a/moto/xray/urls.py b/moto/xray/urls.py index c224e8d38..b0f13a980 100644 --- a/moto/xray/urls.py +++ b/moto/xray/urls.py @@ -6,5 +6,10 @@ url_bases = [ ] url_paths = { - '{0}/.+$': XRayResponse.dispatch, + '{0}/TelemetryRecords$': XRayResponse.dispatch, + '{0}/TraceSegments$': XRayResponse.dispatch, + '{0}/Traces$': XRayResponse.dispatch, + '{0}/ServiceGraph$': XRayResponse.dispatch, + '{0}/TraceGraph$': XRayResponse.dispatch, + '{0}/TraceSummaries$': XRayResponse.dispatch, } diff --git a/tests/test_xray/test_xray_boto3.py b/tests/test_xray/test_xray_boto3.py index 9da55ad1e..5ad8f8bc7 100644 --- a/tests/test_xray/test_xray_boto3.py +++ b/tests/test_xray/test_xray_boto3.py @@ -82,3 +82,58 @@ def test_trace_summary(): StartTime=datetime.datetime(2014, 1, 1), EndTime=datetime.datetime(2017, 1, 1) ) + + +@mock_xray +def test_batch_get_trace(): + client = boto3.client('xray', region_name='us-east-1') + + client.put_trace_segments( + TraceSegmentDocuments=[ + json.dumps({ + 'name': 'example.com', + 'id': '70de5b6f19ff9a0a', + 'start_time': 1.478293361271E9, + 'trace_id': '1-581cf771-a006649127e371903a2de979', + 'in_progress': True + }), + json.dumps({ + 'name': 'example.com', + 'id': '70de5b6f19ff9a0b', + 'start_time': 1478293365, + 'trace_id': '1-581cf771-a006649127e371903a2de979', + 'end_time': 1478293385 + }) + ] + ) + + resp = client.batch_get_traces( + TraceIds=['1-581cf771-a006649127e371903a2de979', '1-581cf772-b006649127e371903a2de979'] + ) + len(resp['UnprocessedTraceIds']).should.equal(1) + len(resp['Traces']).should.equal(1) + + +# Following are not implemented, just testing it returns what boto expects +@mock_xray +def test_batch_get_service_graph(): + client = boto3.client('xray', region_name='us-east-1') + + client.get_service_graph( + StartTime=datetime.datetime(2014, 1, 1), + EndTime=datetime.datetime(2017, 1, 1) + ) + + +@mock_xray +def test_batch_get_trace_graph(): + client = boto3.client('xray', region_name='us-east-1') + + client.batch_get_traces( + TraceIds=['1-581cf771-a006649127e371903a2de979', '1-581cf772-b006649127e371903a2de979'] + ) + + + + + From 84bd16d2a25dd049042133457761a88830129f95 Mon Sep 17 00:00:00 2001 From: Chris Keogh Date: Mon, 25 Sep 2017 11:39:09 +1300 Subject: [PATCH 276/412] get_item update_item return capacity consumed correctly --- moto/dynamodb2/responses.py | 15 ++++- tests/test_dynamodb2/test_dynamodb.py | 97 ++++++++++++++++++++++++++- 2 files changed, 108 insertions(+), 4 deletions(-) diff --git a/moto/dynamodb2/responses.py b/moto/dynamodb2/responses.py index 37b73160e..437850713 100644 --- a/moto/dynamodb2/responses.py +++ b/moto/dynamodb2/responses.py @@ -194,7 +194,10 @@ class DynamoHandler(BaseResponse): if result: item_dict = result.to_json() - item_dict['ConsumedCapacityUnits'] = 1 + item_dict['ConsumedCapacity'] = { + 'TableName': name, + 'CapacityUnits': 1 + } return dynamo_json_dump(item_dict) else: er = 'com.amazonaws.dynamodb.v20111205#ResourceNotFoundException' @@ -238,7 +241,10 @@ class DynamoHandler(BaseResponse): return self.error(er, 'Validation Exception') if item: item_dict = item.describe_attrs(attributes=None) - item_dict['ConsumedCapacityUnits'] = 0.5 + item_dict['ConsumedCapacity'] = { + 'TableName': name, + 'CapacityUnits': 0.5 + } return dynamo_json_dump(item_dict) else: # Item not found @@ -523,7 +529,10 @@ class DynamoHandler(BaseResponse): return self.error(er, 'Validation Exception') item_dict = item.to_json() - item_dict['ConsumedCapacityUnits'] = 0.5 + item_dict['ConsumedCapacity'] = { + 'TableName': name, + 'CapacityUnits': 0.5 + } if not existing_item: item_dict['Attributes'] = {} diff --git a/tests/test_dynamodb2/test_dynamodb.py b/tests/test_dynamodb2/test_dynamodb.py index 2d58740f5..08cd9b56c 100644 --- a/tests/test_dynamodb2/test_dynamodb.py +++ b/tests/test_dynamodb2/test_dynamodb.py @@ -405,11 +405,11 @@ def test_basic_projection_expressions_with_attr_expression_names(): results = table.query( KeyConditionExpression=Key('forum_name').eq( 'the-key'), - ProjectionExpression='#rl, #rt, subject', ExpressionAttributeNames={ '#rl': 'body', '#rt': 'attachment' }, + ProjectionExpression='#rl, #rt, subject' ) assert 'body' in results['Items'][0] @@ -418,3 +418,98 @@ def test_basic_projection_expressions_with_attr_expression_names(): assert results['Items'][0]['subject'] == '123' assert 'attachment' in results['Items'][0] assert results['Items'][0]['attachment'] == 'something' + +@mock_dynamodb2 +def test_put_item_returns_consumed_capacity(): + dynamodb = boto3.resource('dynamodb', region_name='us-east-1') + + # Create the DynamoDB table. + table = dynamodb.create_table( + TableName='users', + KeySchema=[ + { + 'AttributeName': 'forum_name', + 'KeyType': 'HASH' + }, + { + 'AttributeName': 'subject', + 'KeyType': 'RANGE' + }, + ], + AttributeDefinitions=[ + { + 'AttributeName': 'forum_name', + 'AttributeType': 'S' + }, + { + 'AttributeName': 'subject', + 'AttributeType': 'S' + }, + ], + ProvisionedThroughput={ + 'ReadCapacityUnits': 5, + 'WriteCapacityUnits': 5 + } + ) + table = dynamodb.Table('users') + + response = table.put_item(Item={ + 'forum_name': 'the-key', + 'subject': '123', + 'body': 'some test message', + }) + + assert 'ConsumedCapacity' in response + +@mock_dynamodb2 +def test_update_item_returns_consumed_capacity(): + dynamodb = boto3.resource('dynamodb', region_name='us-east-1') + + # Create the DynamoDB table. + table = dynamodb.create_table( + TableName='users', + KeySchema=[ + { + 'AttributeName': 'forum_name', + 'KeyType': 'HASH' + }, + { + 'AttributeName': 'subject', + 'KeyType': 'RANGE' + }, + ], + AttributeDefinitions=[ + { + 'AttributeName': 'forum_name', + 'AttributeType': 'S' + }, + { + 'AttributeName': 'subject', + 'AttributeType': 'S' + }, + ], + ProvisionedThroughput={ + 'ReadCapacityUnits': 5, + 'WriteCapacityUnits': 5 + } + ) + table = dynamodb.Table('users') + + table.put_item(Item={ + 'forum_name': 'the-key', + 'subject': '123', + 'body': 'some test message', + }) + + response = table.update_item(Key={ + 'forum_name': 'the-key', + 'subject': '123' + }, + UpdateExpression='set body=:tb', + ExpressionAttributeValues={ + ':tb': 'a new message' + }) + + assert 'ConsumedCapacity' in response + assert 'CapacityUnits' in response['ConsumedCapacity'] + assert 'TableName' in response['ConsumedCapacity'] From 6cf74742f84582b756a2d0e0a05b94027c8a118f Mon Sep 17 00:00:00 2001 From: Chris Keogh Date: Mon, 25 Sep 2017 11:44:10 +1300 Subject: [PATCH 277/412] add test for get_item return consumed capacity --- tests/test_dynamodb2/test_dynamodb.py | 51 ++++++++++++++++++++++++++- 1 file changed, 50 insertions(+), 1 deletion(-) diff --git a/tests/test_dynamodb2/test_dynamodb.py b/tests/test_dynamodb2/test_dynamodb.py index 08cd9b56c..35c14f396 100644 --- a/tests/test_dynamodb2/test_dynamodb.py +++ b/tests/test_dynamodb2/test_dynamodb.py @@ -405,11 +405,11 @@ def test_basic_projection_expressions_with_attr_expression_names(): results = table.query( KeyConditionExpression=Key('forum_name').eq( 'the-key'), + ProjectionExpression='#rl, #rt, subject', ExpressionAttributeNames={ '#rl': 'body', '#rt': 'attachment' }, - ProjectionExpression='#rl, #rt, subject' ) assert 'body' in results['Items'][0] @@ -513,3 +513,52 @@ def test_update_item_returns_consumed_capacity(): assert 'ConsumedCapacity' in response assert 'CapacityUnits' in response['ConsumedCapacity'] assert 'TableName' in response['ConsumedCapacity'] + +@mock_dynamodb2 +def test_get_item_returns_consumed_capacity(): + dynamodb = boto3.resource('dynamodb', region_name='us-east-1') + + # Create the DynamoDB table. + table = dynamodb.create_table( + TableName='users', + KeySchema=[ + { + 'AttributeName': 'forum_name', + 'KeyType': 'HASH' + }, + { + 'AttributeName': 'subject', + 'KeyType': 'RANGE' + }, + ], + AttributeDefinitions=[ + { + 'AttributeName': 'forum_name', + 'AttributeType': 'S' + }, + { + 'AttributeName': 'subject', + 'AttributeType': 'S' + }, + ], + ProvisionedThroughput={ + 'ReadCapacityUnits': 5, + 'WriteCapacityUnits': 5 + } + ) + table = dynamodb.Table('users') + + table.put_item(Item={ + 'forum_name': 'the-key', + 'subject': '123', + 'body': 'some test message', + }) + + response = table.get_item(Key={ + 'forum_name': 'the-key', + 'subject': '123' + }) + + assert 'ConsumedCapacity' in response + assert 'CapacityUnits' in response['ConsumedCapacity'] + assert 'TableName' in response['ConsumedCapacity'] From 7599fd5dc00d54c80d29bcfab368ca5b40a78b05 Mon Sep 17 00:00:00 2001 From: Loukas Leontopoulos Date: Mon, 25 Sep 2017 14:31:29 +0300 Subject: [PATCH 278/412] Add test_case for layer per stack Add a new test case for creating a layer with the same name under a different stack. --- tests/test_opsworks/test_layers.py | 16 ++++++++++++++++ 1 file changed, 16 insertions(+) diff --git a/tests/test_opsworks/test_layers.py b/tests/test_opsworks/test_layers.py index 31fdeae8c..4bfdd5a67 100644 --- a/tests/test_opsworks/test_layers.py +++ b/tests/test_opsworks/test_layers.py @@ -24,6 +24,22 @@ def test_create_layer_response(): Name="TestLayer", Shortname="TestLayerShortName" ) + + response.should.contain("LayerId") + + second_stack_id = client.create_stack( + Name="test_stack_2", + Region="us-east-1", + ServiceRoleArn="service_arn", + DefaultInstanceProfileArn="profile_arn" + )['StackId'] + + response = client.create_layer( + StackId=second_stack_id, + Type="custom", + Name="TestLayer", + Shortname="TestLayerShortName" + ) response.should.contain("LayerId") From 4393a4f76c0f4616c17b1e8e8da161ac3b26985f Mon Sep 17 00:00:00 2001 From: Loukas Leontopoulos Date: Mon, 25 Sep 2017 14:49:08 +0300 Subject: [PATCH 279/412] Change checks for handling same layer name un different stack --- moto/opsworks/models.py | 4 ++-- tests/test_opsworks/test_layers.py | 6 +++--- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/moto/opsworks/models.py b/moto/opsworks/models.py index 3adfd3323..fe8c882a7 100644 --- a/moto/opsworks/models.py +++ b/moto/opsworks/models.py @@ -422,11 +422,11 @@ class OpsWorksBackend(BaseBackend): stackid = kwargs['stack_id'] if stackid not in self.stacks: raise ResourceNotFoundException(stackid) - if name in [l.name for l in self.layers.values()]: + if name in [l.name for l in self.stacks[stackid].layers]: raise ValidationException( 'There is already a layer named "{0}" ' 'for this stack'.format(name)) - if shortname in [l.shortname for l in self.layers.values()]: + if shortname in [l.shortname for l in self.stacks[stackid].layers]: raise ValidationException( 'There is already a layer with shortname "{0}" ' 'for this stack'.format(shortname)) diff --git a/tests/test_opsworks/test_layers.py b/tests/test_opsworks/test_layers.py index 4bfdd5a67..03224feb0 100644 --- a/tests/test_opsworks/test_layers.py +++ b/tests/test_opsworks/test_layers.py @@ -24,16 +24,16 @@ def test_create_layer_response(): Name="TestLayer", Shortname="TestLayerShortName" ) - + response.should.contain("LayerId") - + second_stack_id = client.create_stack( Name="test_stack_2", Region="us-east-1", ServiceRoleArn="service_arn", DefaultInstanceProfileArn="profile_arn" )['StackId'] - + response = client.create_layer( StackId=second_stack_id, Type="custom", From 17de39aa8a95f85a1e60de2ed4ed2c6e760a5790 Mon Sep 17 00:00:00 2001 From: Jack Danger Date: Mon, 25 Sep 2017 14:11:11 -0700 Subject: [PATCH 280/412] accepting stdin via make task --- Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Makefile b/Makefile index 7cec13b87..f02fa9c87 100644 --- a/Makefile +++ b/Makefile @@ -32,4 +32,4 @@ publish: upload_pypi_artifact build_dockerhub_image tag_github_release scaffold: @pip install -r requirements-dev.txt > /dev/null - @python scripts/scaffold.py + exec python scripts/scaffold.py From 21460df55d975a8b3fadb7349970f556b53aab09 Mon Sep 17 00:00:00 2001 From: Jack Danger Date: Mon, 25 Sep 2017 14:40:41 -0700 Subject: [PATCH 281/412] adding CHANGELOG for 1.1.0 --- CHANGELOG.md | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index e0ec033f3..da6614c20 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -3,6 +3,13 @@ Moto Changelog Latest ------ +1.1.0 +----- + + * Add ELBv2 + * IAM user policies + * RDS snapshots + * IAM policy versions 1.0.1 ----- From 05f94c121ffa61570af90a3aa551c39fcfe2faef Mon Sep 17 00:00:00 2001 From: Jack Danger Date: Mon, 25 Sep 2017 14:40:54 -0700 Subject: [PATCH 282/412] adding CHANGELOG for 1.1.1 --- CHANGELOG.md | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index da6614c20..00b2ebedd 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -3,6 +3,12 @@ Moto Changelog Latest ------ +1.1.1 +----- + + * EC2 group-id filter + * EC2 list support for filters + 1.1.0 ----- From f832c39259dc72bf0353924c0410e57573b06244 Mon Sep 17 00:00:00 2001 From: Jack Danger Date: Mon, 25 Sep 2017 14:41:06 -0700 Subject: [PATCH 283/412] adding CHANGELOG for 1.1.2 --- CHANGELOG.md | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 00b2ebedd..405ea1218 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -3,6 +3,13 @@ Moto Changelog Latest ------ +1.1.2 +----- + + * IAM account aliases + * SNS subscription attributes + * bugfixes in Dynamo, CFN, and EC2 + 1.1.1 ----- From 59b62c9bc752b505c873317622b16fce2847e2a7 Mon Sep 17 00:00:00 2001 From: Jack Danger Date: Mon, 25 Sep 2017 14:41:17 -0700 Subject: [PATCH 284/412] adding CHANGELOG for 1.1.3 --- CHANGELOG.md | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 405ea1218..485858e06 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -3,6 +3,11 @@ Moto Changelog Latest ------ +1.1.3 +----- + + * EC2 vpc_id in responses + 1.1.2 ----- From 0f75402d500afd69010fc8eb2123ab9cbc0b6566 Mon Sep 17 00:00:00 2001 From: Jack Danger Date: Mon, 25 Sep 2017 14:41:24 -0700 Subject: [PATCH 285/412] adding CHANGELOG for 1.1.5 --- CHANGELOG.md | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 485858e06..398e8ce8e 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -3,6 +3,13 @@ Moto Changelog Latest ------ +1.1.5 +----- + + * Dynamo allow ADD update_item of a string set + * Handle max-keys in list-objects + * bugfixes in pagination + 1.1.3 ----- From cfda2aa123f4b9a1593ab842d7437edddbb8029d Mon Sep 17 00:00:00 2001 From: Jack Danger Date: Mon, 25 Sep 2017 14:41:29 -0700 Subject: [PATCH 286/412] adding CHANGELOG for 1.1.6 --- CHANGELOG.md | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 398e8ce8e..02938d27b 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -3,6 +3,12 @@ Moto Changelog Latest ------ +1.1.6 +----- + + * Dynamo ADD and DELETE operations in update expressions + * Lambda tag support + 1.1.5 ----- From c38fdaf820d7c5fc52411826c6167241c91df2b0 Mon Sep 17 00:00:00 2001 From: Jack Danger Date: Mon, 25 Sep 2017 14:41:35 -0700 Subject: [PATCH 287/412] adding CHANGELOG for 1.1.7 --- CHANGELOG.md | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 02938d27b..eca004069 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -3,6 +3,12 @@ Moto Changelog Latest ------ +1.1.7 +----- + + * Lambda invoke_async + * EC2 keypair filtering + 1.1.6 ----- From 5001cfe1357ce620c9d32d1defd0e1865d07a64f Mon Sep 17 00:00:00 2001 From: Jack Danger Date: Mon, 25 Sep 2017 14:41:43 -0700 Subject: [PATCH 288/412] adding CHANGELOG for 1.1.8 --- CHANGELOG.md | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index eca004069..74c3c0d2f 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -3,6 +3,12 @@ Moto Changelog Latest ------ +1.1.8 +----- + + * Lambda get_function for function created with zipfile + * scripts/implementation_coverage.py + 1.1.7 ----- From 7749d778a5a00adf2d031bb6f75e5cb908071262 Mon Sep 17 00:00:00 2001 From: Jack Danger Date: Mon, 25 Sep 2017 14:42:00 -0700 Subject: [PATCH 289/412] adding CHANGELOG for 1.1.9 --- CHANGELOG.md | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 74c3c0d2f..71dcfbb4b 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -3,6 +3,11 @@ Moto Changelog Latest ------ +1.1.9 +----- + + * EC2 root device mapping + 1.1.8 ----- From f4cad690422890029cb7c55d28e893fc16cff0dd Mon Sep 17 00:00:00 2001 From: Jack Danger Date: Mon, 25 Sep 2017 14:42:07 -0700 Subject: [PATCH 290/412] adding CHANGELOG for 1.1.10 --- CHANGELOG.md | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 71dcfbb4b..bdf548720 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -3,6 +3,14 @@ Moto Changelog Latest ------ +1.1.10 +----- + + * EC2 vpc address filtering + * EC2 elastic ip dissociation + * ELBv2 target group tagging + * fixed complexity of accepting new filter implementations + 1.1.9 ----- From 353e63e4ed4dc71f886ab10fbf458bf3fd84efd8 Mon Sep 17 00:00:00 2001 From: Jack Danger Date: Mon, 25 Sep 2017 14:42:13 -0700 Subject: [PATCH 291/412] adding CHANGELOG for 1.1.11 --- CHANGELOG.md | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index bdf548720..3fd9428bb 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -3,6 +3,14 @@ Moto Changelog Latest ------ +1.1.11 +----- + + * S3 authentication + * SSM get_parameter + * ELBv2 target group tagging + * EC2 Security group filters + 1.1.10 ----- From 3bdbda110badb3602539dcca855d018bc7133aa5 Mon Sep 17 00:00:00 2001 From: Jack Danger Date: Mon, 25 Sep 2017 14:42:17 -0700 Subject: [PATCH 292/412] adding CHANGELOG for 1.1.12 --- CHANGELOG.md | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 3fd9428bb..94669a9eb 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -3,6 +3,13 @@ Moto Changelog Latest ------ +1.1.12 +----- + + * implemented all AWS managed policies in source + * fixing Dynamodb CapacityUnits format + * S3 ACL implementation + 1.1.11 ----- From 3194c06e506edd5744a153fbbec9da056432e2fb Mon Sep 17 00:00:00 2001 From: Jack Danger Date: Mon, 25 Sep 2017 14:42:22 -0700 Subject: [PATCH 293/412] adding CHANGELOG for 1.1.13 --- CHANGELOG.md | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 94669a9eb..7d64aa9c7 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -3,6 +3,14 @@ Moto Changelog Latest ------ +1.1.13 +----- + + * Created alpine-based Dockerfile (dockerhub: motoserver/moto) + * SNS.SetSMSAttributes & SNS.GetSMSAttributes + Filtering + * S3 ACL implementation + * pushing to Dockerhub on `make publish` + 1.1.12 ----- From 8355024db8da56febde790938c43ff80d38249ec Mon Sep 17 00:00:00 2001 From: Jack Danger Date: Mon, 25 Sep 2017 14:42:56 -0700 Subject: [PATCH 294/412] bumping to version 1.1.14 --- setup.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/setup.py b/setup.py index a0f2ee098..329724489 100755 --- a/setup.py +++ b/setup.py @@ -25,7 +25,7 @@ extras_require = { setup( name='moto', - version='1.1.13', + version='1.1.14', description='A library that allows your python tests to easily' ' mock out the boto library', author='Steve Pulec', From 745263857ffd6ed68e6152f79cffafec7941f572 Mon Sep 17 00:00:00 2001 From: Terry Cain Date: Mon, 25 Sep 2017 23:28:18 +0100 Subject: [PATCH 295/412] Fix parsing non V4 Authorization headers --- moto/core/responses.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/moto/core/responses.py b/moto/core/responses.py index 781a0b284..e85054802 100644 --- a/moto/core/responses.py +++ b/moto/core/responses.py @@ -167,7 +167,7 @@ class BaseResponse(_TemplateEnvironmentMixin): match = re.search(self.region_regex, full_url) if match: region = match.group(1) - elif 'Authorization' in request.headers: + elif 'Authorization' in request.headers and 'AWS4' in request.headers['Authorization']: region = request.headers['Authorization'].split(",")[ 0].split("/")[2] else: From 56c65bc67cf9cc73c7417f786e3585affac8079b Mon Sep 17 00:00:00 2001 From: Terry Cain Date: Tue, 26 Sep 2017 00:21:07 +0100 Subject: [PATCH 296/412] Added in publish by phone number --- moto/sns/models.py | 6 +++ moto/sns/responses.py | 32 +++++++++++++++- moto/sns/utils.py | 7 ++++ tests/test_sns/test_publishing_boto3.py | 44 ++++++++++++++++++++++ tests/test_sns/test_subscriptions_boto3.py | 33 ++++++++++++++++ 5 files changed, 121 insertions(+), 1 deletion(-) diff --git a/moto/sns/models.py b/moto/sns/models.py index 9feed0198..36336aaac 100644 --- a/moto/sns/models.py +++ b/moto/sns/models.py @@ -221,6 +221,12 @@ class SNSBackend(BaseBackend): except KeyError: raise SNSNotFoundError("Topic with arn {0} not found".format(arn)) + def get_topic_from_phone_number(self, number): + for subscription in self.subscriptions.values(): + if subscription.protocol == 'sms' and subscription.endpoint == number: + return subscription.topic.arn + raise SNSNotFoundError('Could not find valid subscription') + def set_topic_attribute(self, topic_arn, attribute_name, attribute_value): topic = self.get_topic(topic_arn) setattr(topic, attribute_name, attribute_value) diff --git a/moto/sns/responses.py b/moto/sns/responses.py index 92092dc42..85764aa58 100644 --- a/moto/sns/responses.py +++ b/moto/sns/responses.py @@ -6,6 +6,8 @@ from collections import defaultdict from moto.core.responses import BaseResponse from moto.core.utils import camelcase_to_underscores from .models import sns_backends +from .exceptions import SNSNotFoundError +from .utils import is_e164 class SNSResponse(BaseResponse): @@ -136,6 +138,13 @@ class SNSResponse(BaseResponse): topic_arn = self._get_param('TopicArn') endpoint = self._get_param('Endpoint') protocol = self._get_param('Protocol') + + if protocol == 'sms' and not is_e164(endpoint): + return self._error( + 'InvalidParameter', + 'Phone number does not meet the E164 format' + ), dict(status=400) + subscription = self.backend.subscribe(topic_arn, endpoint, protocol) if self.request_json: @@ -229,7 +238,28 @@ class SNSResponse(BaseResponse): def publish(self): target_arn = self._get_param('TargetArn') topic_arn = self._get_param('TopicArn') - arn = target_arn if target_arn else topic_arn + phone_number = self._get_param('PhoneNumber') + if phone_number is not None: + # Check phone is correct syntax (e164) + if not is_e164(phone_number): + return self._error( + 'InvalidParameter', + 'Phone number does not meet the E164 format' + ), dict(status=400) + + # Look up topic arn by phone number + try: + arn = self.backend.get_topic_from_phone_number(phone_number) + except SNSNotFoundError: + return self._error( + 'ParameterValueInvalid', + 'Could not find topic associated with phone number' + ), dict(status=400) + elif target_arn is not None: + arn = target_arn + else: + arn = topic_arn + message = self._get_param('Message') message_id = self.backend.publish(arn, message) diff --git a/moto/sns/utils.py b/moto/sns/utils.py index 864c3af6b..7793b0f6d 100644 --- a/moto/sns/utils.py +++ b/moto/sns/utils.py @@ -1,6 +1,9 @@ from __future__ import unicode_literals +import re import uuid +E164_REGEX = re.compile(r'^\+?[1-9]\d{1,14}$') + def make_arn_for_topic(account_id, name, region_name): return "arn:aws:sns:{0}:{1}:{2}".format(region_name, account_id, name) @@ -9,3 +12,7 @@ def make_arn_for_topic(account_id, name, region_name): def make_arn_for_subscription(topic_arn): subscription_id = uuid.uuid4() return "{0}:{1}".format(topic_arn, subscription_id) + + +def is_e164(number): + return E164_REGEX.match(number) is not None diff --git a/tests/test_sns/test_publishing_boto3.py b/tests/test_sns/test_publishing_boto3.py index a53744d63..6228f212f 100644 --- a/tests/test_sns/test_publishing_boto3.py +++ b/tests/test_sns/test_publishing_boto3.py @@ -10,6 +10,7 @@ from freezegun import freeze_time import sure # noqa from moto.packages.responses import responses +from botocore.exceptions import ClientError from moto import mock_sns, mock_sqs from freezegun import freeze_time @@ -43,6 +44,49 @@ def test_publish_to_sqs(): acquired_message.should.equal(expected) +@mock_sns +def test_publish_sms(): + client = boto3.client('sns', region_name='us-east-1') + client.create_topic(Name="some-topic") + resp = client.create_topic(Name="some-topic") + arn = resp['TopicArn'] + + client.subscribe( + TopicArn=arn, + Protocol='sms', + Endpoint='+15551234567' + ) + + result = client.publish(PhoneNumber="+15551234567", Message="my message") + result.should.contain('MessageId') + + +@mock_sns +def test_publish_bad_sms(): + client = boto3.client('sns', region_name='us-east-1') + client.create_topic(Name="some-topic") + resp = client.create_topic(Name="some-topic") + arn = resp['TopicArn'] + + client.subscribe( + TopicArn=arn, + Protocol='sms', + Endpoint='+15551234567' + ) + + try: + # Test invalid number + client.publish(PhoneNumber="NAA+15551234567", Message="my message") + except ClientError as err: + err.response['Error']['Code'].should.equal('InvalidParameter') + + try: + # Test not found number + client.publish(PhoneNumber="+44001234567", Message="my message") + except ClientError as err: + err.response['Error']['Code'].should.equal('ParameterValueInvalid') + + @mock_sqs @mock_sns def test_publish_to_sqs_dump_json(): diff --git a/tests/test_sns/test_subscriptions_boto3.py b/tests/test_sns/test_subscriptions_boto3.py index e600d6422..4446febfc 100644 --- a/tests/test_sns/test_subscriptions_boto3.py +++ b/tests/test_sns/test_subscriptions_boto3.py @@ -11,6 +11,39 @@ from moto import mock_sns from moto.sns.models import DEFAULT_PAGE_SIZE +@mock_sns +def test_subscribe_sms(): + client = boto3.client('sns', region_name='us-east-1') + client.create_topic(Name="some-topic") + resp = client.create_topic(Name="some-topic") + arn = resp['TopicArn'] + + resp = client.subscribe( + TopicArn=arn, + Protocol='sms', + Endpoint='+15551234567' + ) + resp.should.contain('SubscriptionArn') + + +@mock_sns +def test_subscribe_bad_sms(): + client = boto3.client('sns', region_name='us-east-1') + client.create_topic(Name="some-topic") + resp = client.create_topic(Name="some-topic") + arn = resp['TopicArn'] + + try: + # Test invalid number + client.subscribe( + TopicArn=arn, + Protocol='sms', + Endpoint='NAA+15551234567' + ) + except ClientError as err: + err.response['Error']['Code'].should.equal('InvalidParameter') + + @mock_sns def test_creating_subscription(): conn = boto3.client('sns', region_name='us-east-1') From fcacecbef0214790760a8efaeb7d3c3dad65388f Mon Sep 17 00:00:00 2001 From: Terry Cain Date: Tue, 26 Sep 2017 16:46:18 +0100 Subject: [PATCH 297/412] Nearly finished Polly implementation --- README.md | 2 + moto/__init__.py | 3 +- moto/backends.py | 4 +- moto/polly/__init__.py | 6 + moto/polly/models.py | 114 ++++++++++++++++ moto/polly/resources.py | 61 +++++++++ moto/polly/responses.py | 188 +++++++++++++++++++++++++++ moto/polly/urls.py | 13 ++ moto/polly/utils.py | 5 + scripts/template/lib/responses.py.j2 | 2 +- tests/test_polly/test_polly.py | 109 ++++++++++++++++ tests/test_polly/test_server.py | 16 +++ 12 files changed, 520 insertions(+), 3 deletions(-) create mode 100644 moto/polly/__init__.py create mode 100644 moto/polly/models.py create mode 100644 moto/polly/resources.py create mode 100644 moto/polly/responses.py create mode 100644 moto/polly/urls.py create mode 100644 moto/polly/utils.py create mode 100644 tests/test_polly/test_polly.py create mode 100644 tests/test_polly/test_server.py diff --git a/README.md b/README.md index 1e4dd4176..3d8b61258 100644 --- a/README.md +++ b/README.md @@ -100,6 +100,8 @@ It gets even better! Moto isn't just for Python code and it isn't just for S3. L |------------------------------------------------------------------------------| | KMS | @mock_kms | basic endpoints done | |------------------------------------------------------------------------------| +| Polly | @mock_polly | all endpoints done | +|------------------------------------------------------------------------------| | RDS | @mock_rds | core endpoints done | |------------------------------------------------------------------------------| | RDS2 | @mock_rds2 | core endpoints done | diff --git a/moto/__init__.py b/moto/__init__.py index a832def53..b408f6678 100644 --- a/moto/__init__.py +++ b/moto/__init__.py @@ -22,10 +22,11 @@ from .elbv2 import mock_elbv2 # flake8: noqa from .emr import mock_emr, mock_emr_deprecated # flake8: noqa from .events import mock_events # flake8: noqa from .glacier import mock_glacier, mock_glacier_deprecated # flake8: noqa -from .opsworks import mock_opsworks, mock_opsworks_deprecated # flake8: noqa from .iam import mock_iam, mock_iam_deprecated # flake8: noqa from .kinesis import mock_kinesis, mock_kinesis_deprecated # flake8: noqa from .kms import mock_kms, mock_kms_deprecated # flake8: noqa +from .opsworks import mock_opsworks, mock_opsworks_deprecated # flake8: noqa +from .polly import mock_polly # flake8: noqa from .rds import mock_rds, mock_rds_deprecated # flake8: noqa from .rds2 import mock_rds2, mock_rds2_deprecated # flake8: noqa from .redshift import mock_redshift, mock_redshift_deprecated # flake8: noqa diff --git a/moto/backends.py b/moto/backends.py index da9d1821d..26a60002e 100644 --- a/moto/backends.py +++ b/moto/backends.py @@ -23,6 +23,7 @@ from moto.instance_metadata import instance_metadata_backends from moto.kinesis import kinesis_backends from moto.kms import kms_backends from moto.opsworks import opsworks_backends +from moto.polly import polly_backends from moto.rds2 import rds2_backends from moto.redshift import redshift_backends from moto.route53 import route53_backends @@ -54,9 +55,10 @@ BACKENDS = { 'iam': iam_backends, 'moto_api': moto_api_backends, 'instance_metadata': instance_metadata_backends, - 'opsworks': opsworks_backends, 'kinesis': kinesis_backends, 'kms': kms_backends, + 'opsworks': opsworks_backends, + 'polly': polly_backends, 'redshift': redshift_backends, 'rds': rds2_backends, 's3': s3_backends, diff --git a/moto/polly/__init__.py b/moto/polly/__init__.py new file mode 100644 index 000000000..9c2281126 --- /dev/null +++ b/moto/polly/__init__.py @@ -0,0 +1,6 @@ +from __future__ import unicode_literals +from .models import polly_backends +from ..core.models import base_decorator + +polly_backend = polly_backends['us-east-1'] +mock_polly = base_decorator(polly_backends) diff --git a/moto/polly/models.py b/moto/polly/models.py new file mode 100644 index 000000000..e7b7117dc --- /dev/null +++ b/moto/polly/models.py @@ -0,0 +1,114 @@ +from __future__ import unicode_literals +from xml.etree import ElementTree as ET +import datetime + +import boto3 +from moto.core import BaseBackend, BaseModel + +from .resources import VOICE_DATA +from .utils import make_arn_for_lexicon + +DEFAULT_ACCOUNT_ID = 123456789012 + + +class Lexicon(BaseModel): + def __init__(self, name, content, region_name): + self.name = name + self.content = content + self.size = 0 + self.alphabet = None + self.last_modified = None + self.language_code = None + self.lexemes_count = 0 + self.arn = make_arn_for_lexicon(DEFAULT_ACCOUNT_ID, name, region_name) + + self.update() + + def update(self, content=None): + if content is not None: + self.content = content + + # Probably a very naive approach, but it'll do for now. + try: + root = ET.fromstring(self.content) + self.size = len(self.content) + self.last_modified = int((datetime.datetime.now() - + datetime.datetime(1970, 1, 1)).total_seconds()) + self.lexemes_count = len(root.findall('.')) + + for key, value in root.attrib.items(): + if key.endswith('alphabet'): + self.alphabet = value + elif key.endswith('lang'): + self.language_code = value + + except Exception as err: + raise ValueError('Failure parsing XML: {0}'.format(err)) + + def to_dict(self): + return { + 'Attributes': { + 'Alphabet': self.alphabet, + 'LanguageCode': self.language_code, + 'LastModified': self.last_modified, + 'LexemesCount': self.lexemes_count, + 'LexiconArn': self.arn, + 'Size': self.size + } + } + + def __repr__(self): + return ''.format(self.name) + + +class PollyBackend(BaseBackend): + def __init__(self, region_name=None): + super(PollyBackend, self).__init__() + self.region_name = region_name + + self._lexicons = {} + + def reset(self): + region_name = self.region_name + self.__dict__ = {} + self.__init__(region_name) + + def describe_voices(self, language_code, next_token): + if language_code is None: + return VOICE_DATA + + return [item for item in VOICE_DATA if item['LanguageCode'] == language_code] + + def delete_lexicon(self, name): + # implement here + del self._lexicons[name] + + def get_lexicon(self, name): + # Raises KeyError + return self._lexicons[name] + + def list_lexicons(self, next_token): + + result = [] + + for name, lexicon in self._lexicons.items(): + lexicon_dict = lexicon.to_dict() + lexicon_dict['Name'] = name + + result.append(lexicon_dict) + + return result + + def put_lexicon(self, name, content): + # If lexicon content is bad, it will raise ValueError + if name in self._lexicons: + # Regenerated all the stats from the XML + # but keeps the ARN + self._lexicons.update(content) + else: + lexicon = Lexicon(name, content, region_name=self.region_name) + self._lexicons[name] = lexicon + + +available_regions = boto3.session.Session().get_available_regions("polly") +polly_backends = {region: PollyBackend(region_name=region) for region in available_regions} diff --git a/moto/polly/resources.py b/moto/polly/resources.py new file mode 100644 index 000000000..77971a90d --- /dev/null +++ b/moto/polly/resources.py @@ -0,0 +1,61 @@ +VOICE_DATA = [ + {'Id': 'Joanna', 'LanguageCode': 'en-US', 'LanguageName': 'US English', 'Gender': 'Female', 'Name': 'Joanna'}, + {'Id': 'Mizuki', 'LanguageCode': 'ja-JP', 'LanguageName': 'Japanese', 'Gender': 'Female', 'Name': 'Mizuki'}, + {'Id': 'Filiz', 'LanguageCode': 'tr-TR', 'LanguageName': 'Turkish', 'Gender': 'Female', 'Name': 'Filiz'}, + {'Id': 'Astrid', 'LanguageCode': 'sv-SE', 'LanguageName': 'Swedish', 'Gender': 'Female', 'Name': 'Astrid'}, + {'Id': 'Tatyana', 'LanguageCode': 'ru-RU', 'LanguageName': 'Russian', 'Gender': 'Female', 'Name': 'Tatyana'}, + {'Id': 'Maxim', 'LanguageCode': 'ru-RU', 'LanguageName': 'Russian', 'Gender': 'Male', 'Name': 'Maxim'}, + {'Id': 'Carmen', 'LanguageCode': 'ro-RO', 'LanguageName': 'Romanian', 'Gender': 'Female', 'Name': 'Carmen'}, + {'Id': 'Ines', 'LanguageCode': 'pt-PT', 'LanguageName': 'Portuguese', 'Gender': 'Female', 'Name': 'Inês'}, + {'Id': 'Cristiano', 'LanguageCode': 'pt-PT', 'LanguageName': 'Portuguese', 'Gender': 'Male', 'Name': 'Cristiano'}, + {'Id': 'Vitoria', 'LanguageCode': 'pt-BR', 'LanguageName': 'Brazilian Portuguese', 'Gender': 'Female', 'Name': 'Vitória'}, + {'Id': 'Ricardo', 'LanguageCode': 'pt-BR', 'LanguageName': 'Brazilian Portuguese', 'Gender': 'Male', 'Name': 'Ricardo'}, + {'Id': 'Maja', 'LanguageCode': 'pl-PL', 'LanguageName': 'Polish', 'Gender': 'Female', 'Name': 'Maja'}, + {'Id': 'Jan', 'LanguageCode': 'pl-PL', 'LanguageName': 'Polish', 'Gender': 'Male', 'Name': 'Jan'}, + {'Id': 'Ewa', 'LanguageCode': 'pl-PL', 'LanguageName': 'Polish', 'Gender': 'Female', 'Name': 'Ewa'}, + {'Id': 'Ruben', 'LanguageCode': 'nl-NL', 'LanguageName': 'Dutch', 'Gender': 'Male', 'Name': 'Ruben'}, + {'Id': 'Lotte', 'LanguageCode': 'nl-NL', 'LanguageName': 'Dutch', 'Gender': 'Female', 'Name': 'Lotte'}, + {'Id': 'Liv', 'LanguageCode': 'nb-NO', 'LanguageName': 'Norwegian', 'Gender': 'Female', 'Name': 'Liv'}, + {'Id': 'Giorgio', 'LanguageCode': 'it-IT', 'LanguageName': 'Italian', 'Gender': 'Male', 'Name': 'Giorgio'}, + {'Id': 'Carla', 'LanguageCode': 'it-IT', 'LanguageName': 'Italian', 'Gender': 'Female', 'Name': 'Carla'}, + {'Id': 'Karl', 'LanguageCode': 'is-IS', 'LanguageName': 'Icelandic', 'Gender': 'Male', 'Name': 'Karl'}, + {'Id': 'Dora', 'LanguageCode': 'is-IS', 'LanguageName': 'Icelandic', 'Gender': 'Female', 'Name': 'Dóra'}, + {'Id': 'Mathieu', 'LanguageCode': 'fr-FR', 'LanguageName': 'French', 'Gender': 'Male', 'Name': 'Mathieu'}, + {'Id': 'Celine', 'LanguageCode': 'fr-FR', 'LanguageName': 'French', 'Gender': 'Female', 'Name': 'Céline'}, + {'Id': 'Chantal', 'LanguageCode': 'fr-CA', 'LanguageName': 'Canadian French', 'Gender': 'Female', 'Name': 'Chantal'}, + {'Id': 'Penelope', 'LanguageCode': 'es-US', 'LanguageName': 'US Spanish', 'Gender': 'Female', 'Name': 'Penélope'}, + {'Id': 'Miguel', 'LanguageCode': 'es-US', 'LanguageName': 'US Spanish', 'Gender': 'Male', 'Name': 'Miguel'}, + {'Id': 'Enrique', 'LanguageCode': 'es-ES', 'LanguageName': 'Castilian Spanish', 'Gender': 'Male', 'Name': 'Enrique'}, + {'Id': 'Conchita', 'LanguageCode': 'es-ES', 'LanguageName': 'Castilian Spanish', 'Gender': 'Female', 'Name': 'Conchita'}, + {'Id': 'Geraint', 'LanguageCode': 'en-GB-WLS', 'LanguageName': 'Welsh English', 'Gender': 'Male', 'Name': 'Geraint'}, + {'Id': 'Salli', 'LanguageCode': 'en-US', 'LanguageName': 'US English', 'Gender': 'Female', 'Name': 'Salli'}, + {'Id': 'Kimberly', 'LanguageCode': 'en-US', 'LanguageName': 'US English', 'Gender': 'Female', 'Name': 'Kimberly'}, + {'Id': 'Kendra', 'LanguageCode': 'en-US', 'LanguageName': 'US English', 'Gender': 'Female', 'Name': 'Kendra'}, + {'Id': 'Justin', 'LanguageCode': 'en-US', 'LanguageName': 'US English', 'Gender': 'Male', 'Name': 'Justin'}, + {'Id': 'Joey', 'LanguageCode': 'en-US', 'LanguageName': 'US English', 'Gender': 'Male', 'Name': 'Joey'}, + {'Id': 'Ivy', 'LanguageCode': 'en-US', 'LanguageName': 'US English', 'Gender': 'Female', 'Name': 'Ivy'}, + {'Id': 'Raveena', 'LanguageCode': 'en-IN', 'LanguageName': 'Indian English', 'Gender': 'Female', 'Name': 'Raveena'}, + {'Id': 'Emma', 'LanguageCode': 'en-GB', 'LanguageName': 'British English', 'Gender': 'Female', 'Name': 'Emma'}, + {'Id': 'Brian', 'LanguageCode': 'en-GB', 'LanguageName': 'British English', 'Gender': 'Male', 'Name': 'Brian'}, + {'Id': 'Amy', 'LanguageCode': 'en-GB', 'LanguageName': 'British English', 'Gender': 'Female', 'Name': 'Amy'}, + {'Id': 'Russell', 'LanguageCode': 'en-AU', 'LanguageName': 'Australian English', 'Gender': 'Male', 'Name': 'Russell'}, + {'Id': 'Nicole', 'LanguageCode': 'en-AU', 'LanguageName': 'Australian English', 'Gender': 'Female', 'Name': 'Nicole'}, + {'Id': 'Vicki', 'LanguageCode': 'de-DE', 'LanguageName': 'German', 'Gender': 'Female', 'Name': 'Vicki'}, + {'Id': 'Marlene', 'LanguageCode': 'de-DE', 'LanguageName': 'German', 'Gender': 'Female', 'Name': 'Marlene'}, + {'Id': 'Hans', 'LanguageCode': 'de-DE', 'LanguageName': 'German', 'Gender': 'Male', 'Name': 'Hans'}, + {'Id': 'Naja', 'LanguageCode': 'da-DK', 'LanguageName': 'Danish', 'Gender': 'Female', 'Name': 'Naja'}, + {'Id': 'Mads', 'LanguageCode': 'da-DK', 'LanguageName': 'Danish', 'Gender': 'Male', 'Name': 'Mads'}, + {'Id': 'Gwyneth', 'LanguageCode': 'cy-GB', 'LanguageName': 'Welsh', 'Gender': 'Female', 'Name': 'Gwyneth'}, + {'Id': 'Jacek', 'LanguageCode': 'pl-PL', 'LanguageName': 'Polish', 'Gender': 'Male', 'Name': 'Jacek'} +] + +# {...} is also shorthand set syntax +LANGUAGE_CODES = {'cy-GB', 'da-DK', 'de-DE', 'en-AU', 'en-GB', 'en-GB-WLS', 'en-IN', 'en-US', 'es-ES', 'es-US', + 'fr-CA', 'fr-FR', 'is-IS', 'it-IT', 'ja-JP', 'nb-NO', 'nl-NL', 'pl-PL', 'pt-BR', 'pt-PT', 'ro-RO', + 'ru-RU', 'sv-SE', 'tr-TR'} + +VOICE_IDS = {'Geraint', 'Gwyneth', 'Mads', 'Naja', 'Hans', 'Marlene', 'Nicole', 'Russell', 'Amy', 'Brian', 'Emma', + 'Raveena', 'Ivy', 'Joanna', 'Joey', 'Justin', 'Kendra', 'Kimberly', 'Salli', 'Conchita', 'Enrique', + 'Miguel', 'Penelope', 'Chantal', 'Celine', 'Mathieu', 'Dora', 'Karl', 'Carla', 'Giorgio', 'Mizuki', + 'Liv', 'Lotte', 'Ruben', 'Ewa', 'Jacek', 'Jan', 'Maja', 'Ricardo', 'Vitoria', 'Cristiano', 'Ines', + 'Carmen', 'Maxim', 'Tatyana', 'Astrid', 'Filiz'} diff --git a/moto/polly/responses.py b/moto/polly/responses.py new file mode 100644 index 000000000..a8be1bbae --- /dev/null +++ b/moto/polly/responses.py @@ -0,0 +1,188 @@ +from __future__ import unicode_literals + +import json +import re + +from six.moves.urllib.parse import urlsplit + +from moto.core.responses import BaseResponse +from .models import polly_backends +from .resources import LANGUAGE_CODES, VOICE_IDS + +LEXICON_NAME_REGEX = re.compile(r'^[0-9A-Za-z]{1,20}$') + + +class PollyResponse(BaseResponse): + @property + def polly_backend(self): + return polly_backends[self.region] + + @property + def json(self): + if not hasattr(self, '_json'): + self._json = json.loads(self.body) + return self._json + + def _error(self, code, message): + return json.dumps({'__type': code, 'message': message}), dict(status=400) + + def _get_action(self): + # Amazon is now naming things /v1/api_name + url_parts = urlsplit(self.uri).path.lstrip('/').split('/') + # [0] = 'v1' + + return url_parts[1] + + # DescribeVoices + def voices(self): + language_code = self._get_param('LanguageCode') + next_token = self._get_param('NextToken') + + if language_code is not None and language_code not in LANGUAGE_CODES: + msg = "1 validation error detected: Value '{0}' at 'languageCode' failed to satisfy constraint: " \ + "Member must satisfy enum value set: [{1}]".format(language_code, ', '.join(LANGUAGE_CODES)) + return msg, dict(status=400) + + voices = self.polly_backend.describe_voices(language_code, next_token) + + return json.dumps({'Voices': voices}) + + def lexicons(self): + # Dish out requests based on methods + + # anything after the /v1/lexicons/ + args = urlsplit(self.uri).path.lstrip('/').split('/')[2:] + + if self.method == 'GET': + if len(args) == 0: + return self._get_lexicons_list() + else: + return self._get_lexicon(*args) + elif self.method == 'PUT': + return self._put_lexicons(*args) + elif self.method == 'DELETE': + return self._delete_lexicon(*args) + + return self._error('InvalidAction', 'Bad route') + + # PutLexicon + def _put_lexicons(self, lexicon_name): + if LEXICON_NAME_REGEX.match(lexicon_name) is None: + return self._error('InvalidParameterValue', 'Lexicon name must match [0-9A-Za-z]{1,20}') + + if 'Content' not in self.json: + return self._error('MissingParameter', 'Content is missing from the body') + + self.polly_backend.put_lexicon(lexicon_name, self.json['Content']) + + return '' + + # ListLexicons + def _get_lexicons_list(self): + next_token = self._get_param('NextToken') + + result = { + 'Lexicons': self.polly_backend.list_lexicons(next_token) + } + + return json.dumps(result) + + # GetLexicon + def _get_lexicon(self, lexicon_name): + try: + lexicon = self.polly_backend.get_lexicon(lexicon_name) + except KeyError: + return self._error('LexiconNotFoundException', 'Lexicon not found') + + result = { + 'Lexicon': { + 'Name': lexicon_name, + 'Content': lexicon.content + }, + 'LexiconAttributes': lexicon.to_dict()['Attributes'] + } + + return json.dumps(result) + + # DeleteLexicon + def _delete_lexicon(self, lexicon_name): + try: + self.polly_backend.delete_lexicon(lexicon_name) + except KeyError: + return self._error('LexiconNotFoundException', 'Lexicon not found') + + return '' + + # SynthesizeSpeech + def speech(self): + # Sanity check params + args = { + 'lexicon_names': None, + 'sample_rate': 22050, + 'speech_marks': None, + 'text': None, + 'text_type': 'text' + } + + if 'LexiconNames' in self.json: + for lex in self.json['LexiconNames']: + try: + self.polly_backend.get_lexicon(lex) + except KeyError: + return self._error('LexiconNotFoundException', 'Lexicon not found') + + args['lexicon_names'] = self.json['LexiconNames'] + + if 'OutputFormat' not in self.json: + return self._error('LexiconNotFoundException', 'Lexicon not found') + if self.json['OutputFormat'] not in ('json', 'mp3', 'ogg_vorbis', 'pcm'): + return self._error('LexiconNotFoundException', 'Lexicon not found') + args['output_format'] = self.json['OutputFormat'] + + if 'SampleRate' in self.json: + sample_rate = int(self.json['SampleRate']) + if sample_rate not in (8000, 16000, 22050): + return self._error('InvalidSampleRateException', 'The specified sample rate is not valid.') + args['sample_rate'] = sample_rate + + if 'SpeechMarkTypes' in self.json: + for value in self.json['SpeechMarkTypes']: + if value not in ('sentance', 'ssml', 'viseme', 'word'): + return self._error('LexiconNotFoundException', 'Lexicon not found') + args['speech_marks'] = self.json['SpeechMarkTypes'] + + if 'Text' not in self.json: + return self._error('LexiconNotFoundException', 'Lexicon not found') + args['text'] = self.json['Text'] + + if 'TextType' in self.json: + if self.json['TextType'] not in ('ssml', 'text'): + return self._error('LexiconNotFoundException', 'Lexicon not found') + args['text_type'] = self.json['TextType'] + + if 'VoiceId' not in self.json: + return self._error('LexiconNotFoundException', 'Lexicon not found') + if self.json['VoiceId'] not in VOICE_IDS: + return self._error('LexiconNotFoundException', 'Lexicon not found') + args['voice_id'] = self.json['VoiceId'] + + # More validation + if len(args['text']) > 3000: + return self._error('TextLengthExceededException', 'Text too long') + + if args['speech_marks'] is not None and args['output_format'] != 'json': + return self._error('MarksNotSupportedForFormatException', 'OutputFormat must be json') + if args['speech_marks'] is not None and args['text_type'] == 'text': + return self._error('SsmlMarksNotSupportedForTextTypeException', 'TextType must be ssml') + + content_type = 'audio/json' + if args['output_format'] == 'mp3': + content_type = 'audio/mpeg' + elif args['output_format'] == 'ogg_vorbis': + content_type = 'audio/ogg' + elif args['output_format'] == 'pcm': + content_type = 'audio/pcm' + + headers = {'Content-Type': content_type} + + return '\x00\x00\x00\x00\x00\x00\x00\x00', headers diff --git a/moto/polly/urls.py b/moto/polly/urls.py new file mode 100644 index 000000000..212c6822a --- /dev/null +++ b/moto/polly/urls.py @@ -0,0 +1,13 @@ +from __future__ import unicode_literals +from .responses import PollyResponse + +url_bases = [ + "https?://polly.(.+).amazonaws.com", +] + +url_paths = { + '{0}/v1/voices': PollyResponse.dispatch, + '{0}/v1/lexicons/.+': PollyResponse.dispatch, + '{0}/v1/lexicons': PollyResponse.dispatch, + '{0}/v1/speech': PollyResponse.dispatch, +} diff --git a/moto/polly/utils.py b/moto/polly/utils.py new file mode 100644 index 000000000..253b19e13 --- /dev/null +++ b/moto/polly/utils.py @@ -0,0 +1,5 @@ +from __future__ import unicode_literals + + +def make_arn_for_lexicon(account_id, name, region_name): + return "arn:aws:polly:{0}:{1}:lexicon/{2}".format(region_name, account_id, name) diff --git a/scripts/template/lib/responses.py.j2 b/scripts/template/lib/responses.py.j2 index b27da5b9f..85827e651 100644 --- a/scripts/template/lib/responses.py.j2 +++ b/scripts/template/lib/responses.py.j2 @@ -11,5 +11,5 @@ class {{ service_class }}Response(BaseResponse): # add methods from here -# add teampltes from here +# add templates from here diff --git a/tests/test_polly/test_polly.py b/tests/test_polly/test_polly.py new file mode 100644 index 000000000..2af8ed42a --- /dev/null +++ b/tests/test_polly/test_polly.py @@ -0,0 +1,109 @@ +from __future__ import unicode_literals + +from botocore.exceptions import ClientError +import boto3 +import sure # noqa +from moto import mock_polly + +# Polly only available in a few regions +DEFAULT_REGION = 'eu-west-1' + +LEXICON_XML = """ + + + W3C + World Wide Web Consortium + +""" + + +@mock_polly +def test_describe_voices(): + client = boto3.client('polly', region_name=DEFAULT_REGION) + + resp = client.describe_voices() + len(resp['Voices']).should.be.greater_than(1) + + resp = client.describe_voices(LanguageCode='en-GB') + len(resp['Voices']).should.equal(3) + + try: + client.describe_voices(LanguageCode='SOME_LANGUAGE') + except ClientError as err: + err.response['Error']['Code'].should.equal('400') + else: + raise RuntimeError('Should of raised an exception') + + +@mock_polly +def test_put_list_lexicon(): + client = boto3.client('polly', region_name=DEFAULT_REGION) + + # Return nothing + client.put_lexicon( + Name='test', + Content=LEXICON_XML + ) + + resp = client.list_lexicons() + len(resp['Lexicons']).should.equal(1) + + +@mock_polly +def test_put_get_lexicon(): + client = boto3.client('polly', region_name=DEFAULT_REGION) + + # Return nothing + client.put_lexicon( + Name='test', + Content=LEXICON_XML + ) + + resp = client.get_lexicon(Name='test') + resp.should.contain('Lexicon') + resp.should.contain('LexiconAttributes') + + +@mock_polly +def test_put_lexicon_bad_name(): + client = boto3.client('polly', region_name=DEFAULT_REGION) + + try: + client.put_lexicon( + Name='test-invalid', + Content=LEXICON_XML + ) + except ClientError as err: + err.response['Error']['Code'].should.equal('InvalidParameterValue') + else: + raise RuntimeError('Should of raised an exception') + + +@mock_polly +def test_synthesize_speech(): + client = boto3.client('polly', region_name=DEFAULT_REGION) + + # Return nothing + client.put_lexicon( + Name='test', + Content=LEXICON_XML + ) + + a = client.synthesize_speech( + LexiconNames=['test'], + OutputFormat='pcm', + SampleRate='16000', + Text='test1234', + TextType='text', + VoiceId='Astrid' + ) + # TODO check content type + print() + +# Todo expand synthesize speech tests for bad config \ No newline at end of file diff --git a/tests/test_polly/test_server.py b/tests/test_polly/test_server.py new file mode 100644 index 000000000..9dc3593d9 --- /dev/null +++ b/tests/test_polly/test_server.py @@ -0,0 +1,16 @@ +from __future__ import unicode_literals + +import sure # noqa + +import moto.server as server +from moto import mock_polly + +''' +Test the different server responses +''' + +@mock_polly +def test_polly_list(): + backend = server.create_backend_app("polly") + test_client = backend.test_client() + # do test \ No newline at end of file From 2db5f0a2471d5f571d0927455de898d7f3283150 Mon Sep 17 00:00:00 2001 From: Terry Cain Date: Tue, 26 Sep 2017 17:33:19 +0100 Subject: [PATCH 298/412] Added some instruction text --- scripts/scaffold.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/scripts/scaffold.py b/scripts/scaffold.py index bbc8de208..5373be40d 100755 --- a/scripts/scaffold.py +++ b/scripts/scaffold.py @@ -383,5 +383,7 @@ def main(): else: print_progress('skip inserting code', 'api protocol "{}" is not supported'.format(api_protocol), 'yellow') + click.echo('You will still need to make "{0}/urls.py", add the backend into "backends.py" and add the mock into "__init__.py"'.format(service)) + if __name__ == '__main__': main() From bba6d23eaeab974ff1788eb6291b9e44785e5f05 Mon Sep 17 00:00:00 2001 From: Terry Cain Date: Tue, 26 Sep 2017 17:37:26 +0100 Subject: [PATCH 299/412] Started on batch --- moto/batch/__init__.py | 6 ++++++ moto/batch/exceptions.py | 3 +++ moto/batch/models.py | 23 +++++++++++++++++++++++ moto/batch/responses.py | 14 ++++++++++++++ tests/test_batch/test_batch.py | 11 +++++++++++ tests/test_batch/test_server.py | 16 ++++++++++++++++ 6 files changed, 73 insertions(+) create mode 100644 moto/batch/__init__.py create mode 100644 moto/batch/exceptions.py create mode 100644 moto/batch/models.py create mode 100644 moto/batch/responses.py create mode 100644 tests/test_batch/test_batch.py create mode 100644 tests/test_batch/test_server.py diff --git a/moto/batch/__init__.py b/moto/batch/__init__.py new file mode 100644 index 000000000..6002b6fc7 --- /dev/null +++ b/moto/batch/__init__.py @@ -0,0 +1,6 @@ +from __future__ import unicode_literals +from .models import batch_backends +from ..core.models import base_decorator + +batch_backend = batch_backends['us-east-1'] +mock_batch = base_decorator(batch_backends) diff --git a/moto/batch/exceptions.py b/moto/batch/exceptions.py new file mode 100644 index 000000000..e598ee7af --- /dev/null +++ b/moto/batch/exceptions.py @@ -0,0 +1,3 @@ +from __future__ import unicode_literals +from moto.core.exceptions import RESTError + diff --git a/moto/batch/models.py b/moto/batch/models.py new file mode 100644 index 000000000..a54b30c32 --- /dev/null +++ b/moto/batch/models.py @@ -0,0 +1,23 @@ +from __future__ import unicode_literals +import boto3 +from moto.core import BaseBackend, BaseModel + + +class BatchBackend(BaseBackend): + def __init__(self, region_name=None): + super(BatchBackend, self).__init__() + self.region_name = region_name + + def reset(self): + region_name = self.region_name + self.__dict__ = {} + self.__init__(region_name) + + def create_compute_environment(self, compute_environment_name, type, state, compute_resources, service_role): + # implement here + return compute_environment_name, compute_environment_arn + # add methods from here + + +available_regions = boto3.session.Session().get_available_regions("batch") +batch_backends = {region: BatchBackend for region in available_regions} diff --git a/moto/batch/responses.py b/moto/batch/responses.py new file mode 100644 index 000000000..d91af8a77 --- /dev/null +++ b/moto/batch/responses.py @@ -0,0 +1,14 @@ +from __future__ import unicode_literals +from moto.core.responses import BaseResponse +from .models import batch_backends + + +class BatchResponse(BaseResponse): + @property + def batch_backend(self): + return batch_backends[self.region] + + # add methods from here + + +# add teampltes from here diff --git a/tests/test_batch/test_batch.py b/tests/test_batch/test_batch.py new file mode 100644 index 000000000..eafd32eae --- /dev/null +++ b/tests/test_batch/test_batch.py @@ -0,0 +1,11 @@ +from __future__ import unicode_literals + +import boto3 +import sure # noqa +from moto import mock_batch + + +@mock_batch +def test_list(): + # do test + pass \ No newline at end of file diff --git a/tests/test_batch/test_server.py b/tests/test_batch/test_server.py new file mode 100644 index 000000000..7c0d2b3a1 --- /dev/null +++ b/tests/test_batch/test_server.py @@ -0,0 +1,16 @@ +from __future__ import unicode_literals + +import sure # noqa + +import moto.server as server +from moto import mock_batch + +''' +Test the different server responses +''' + +@mock_batch +def test_batch_list(): + backend = server.create_backend_app("batch") + test_client = backend.test_client() + # do test \ No newline at end of file From 993e74dc773948f9e89cb7b9ad5a6af146a304b3 Mon Sep 17 00:00:00 2001 From: Terry Cain Date: Tue, 26 Sep 2017 19:42:37 +0100 Subject: [PATCH 300/412] Added tests --- moto/polly/responses.py | 14 +-- tests/test_polly/test_polly.py | 186 +++++++++++++++++++++++++++++++-- 2 files changed, 183 insertions(+), 17 deletions(-) diff --git a/moto/polly/responses.py b/moto/polly/responses.py index a8be1bbae..810264424 100644 --- a/moto/polly/responses.py +++ b/moto/polly/responses.py @@ -134,9 +134,9 @@ class PollyResponse(BaseResponse): args['lexicon_names'] = self.json['LexiconNames'] if 'OutputFormat' not in self.json: - return self._error('LexiconNotFoundException', 'Lexicon not found') + return self._error('MissingParameter', 'Missing parameter OutputFormat') if self.json['OutputFormat'] not in ('json', 'mp3', 'ogg_vorbis', 'pcm'): - return self._error('LexiconNotFoundException', 'Lexicon not found') + return self._error('InvalidParameterValue', 'Not one of json, mp3, ogg_vorbis, pcm') args['output_format'] = self.json['OutputFormat'] if 'SampleRate' in self.json: @@ -148,22 +148,22 @@ class PollyResponse(BaseResponse): if 'SpeechMarkTypes' in self.json: for value in self.json['SpeechMarkTypes']: if value not in ('sentance', 'ssml', 'viseme', 'word'): - return self._error('LexiconNotFoundException', 'Lexicon not found') + return self._error('InvalidParameterValue', 'Not one of sentance, ssml, viseme, word') args['speech_marks'] = self.json['SpeechMarkTypes'] if 'Text' not in self.json: - return self._error('LexiconNotFoundException', 'Lexicon not found') + return self._error('MissingParameter', 'Missing parameter Text') args['text'] = self.json['Text'] if 'TextType' in self.json: if self.json['TextType'] not in ('ssml', 'text'): - return self._error('LexiconNotFoundException', 'Lexicon not found') + return self._error('InvalidParameterValue', 'Not one of ssml, text') args['text_type'] = self.json['TextType'] if 'VoiceId' not in self.json: - return self._error('LexiconNotFoundException', 'Lexicon not found') + return self._error('MissingParameter', 'Missing parameter VoiceId') if self.json['VoiceId'] not in VOICE_IDS: - return self._error('LexiconNotFoundException', 'Lexicon not found') + return self._error('InvalidParameterValue', 'Not one of {0}'.format(', '.join(VOICE_IDS))) args['voice_id'] = self.json['VoiceId'] # More validation diff --git a/tests/test_polly/test_polly.py b/tests/test_polly/test_polly.py index 2af8ed42a..c5c864835 100644 --- a/tests/test_polly/test_polly.py +++ b/tests/test_polly/test_polly.py @@ -3,6 +3,7 @@ from __future__ import unicode_literals from botocore.exceptions import ClientError import boto3 import sure # noqa +from nose.tools import assert_raises from moto import mock_polly # Polly only available in a few regions @@ -95,15 +96,180 @@ def test_synthesize_speech(): Content=LEXICON_XML ) - a = client.synthesize_speech( - LexiconNames=['test'], - OutputFormat='pcm', - SampleRate='16000', - Text='test1234', - TextType='text', - VoiceId='Astrid' + tests = ( + ('pcm', 'audio/pcm'), + ('mp3', 'audio/mpeg'), + ('ogg_vorbis', 'audio/ogg'), ) - # TODO check content type - print() + for output_format, content_type in tests: + resp = client.synthesize_speech( + LexiconNames=['test'], + OutputFormat=output_format, + SampleRate='16000', + Text='test1234', + TextType='text', + VoiceId='Astrid' + ) + resp['ContentType'].should.equal(content_type) -# Todo expand synthesize speech tests for bad config \ No newline at end of file + +@mock_polly +def test_synthesize_speech_bad_lexicon(): + client = boto3.client('polly', region_name=DEFAULT_REGION) + client.put_lexicon(Name='test', Content=LEXICON_XML) + + try: + client.synthesize_speech( + LexiconNames=['test2'], + OutputFormat='pcm', + SampleRate='16000', + Text='test1234', + TextType='text', + VoiceId='Astrid' + ) + except ClientError as err: + err.response['Error']['Code'].should.equal('LexiconNotFoundException') + else: + raise RuntimeError('Should of raised LexiconNotFoundException') + + +@mock_polly +def test_synthesize_speech_bad_output_format(): + client = boto3.client('polly', region_name=DEFAULT_REGION) + client.put_lexicon(Name='test', Content=LEXICON_XML) + + try: + client.synthesize_speech( + LexiconNames=['test'], + OutputFormat='invalid', + SampleRate='16000', + Text='test1234', + TextType='text', + VoiceId='Astrid' + ) + except ClientError as err: + err.response['Error']['Code'].should.equal('InvalidParameterValue') + else: + raise RuntimeError('Should of raised ') + + +@mock_polly +def test_synthesize_speech_bad_sample_rate(): + client = boto3.client('polly', region_name=DEFAULT_REGION) + client.put_lexicon(Name='test', Content=LEXICON_XML) + + try: + client.synthesize_speech( + LexiconNames=['test'], + OutputFormat='pcm', + SampleRate='18000', + Text='test1234', + TextType='text', + VoiceId='Astrid' + ) + except ClientError as err: + err.response['Error']['Code'].should.equal('InvalidSampleRateException') + else: + raise RuntimeError('Should of raised ') + + +@mock_polly +def test_synthesize_speech_bad_text_type(): + client = boto3.client('polly', region_name=DEFAULT_REGION) + client.put_lexicon(Name='test', Content=LEXICON_XML) + + try: + client.synthesize_speech( + LexiconNames=['test'], + OutputFormat='pcm', + SampleRate='16000', + Text='test1234', + TextType='invalid', + VoiceId='Astrid' + ) + except ClientError as err: + err.response['Error']['Code'].should.equal('InvalidParameterValue') + else: + raise RuntimeError('Should of raised ') + + +@mock_polly +def test_synthesize_speech_bad_voice_id(): + client = boto3.client('polly', region_name=DEFAULT_REGION) + client.put_lexicon(Name='test', Content=LEXICON_XML) + + try: + client.synthesize_speech( + LexiconNames=['test'], + OutputFormat='pcm', + SampleRate='16000', + Text='test1234', + TextType='text', + VoiceId='Luke' + ) + except ClientError as err: + err.response['Error']['Code'].should.equal('InvalidParameterValue') + else: + raise RuntimeError('Should of raised ') + + +@mock_polly +def test_synthesize_speech_text_too_long(): + client = boto3.client('polly', region_name=DEFAULT_REGION) + client.put_lexicon(Name='test', Content=LEXICON_XML) + + try: + client.synthesize_speech( + LexiconNames=['test'], + OutputFormat='pcm', + SampleRate='16000', + Text='test1234'*376, # = 3008 characters + TextType='text', + VoiceId='Astrid' + ) + except ClientError as err: + err.response['Error']['Code'].should.equal('TextLengthExceededException') + else: + raise RuntimeError('Should of raised ') + + +@mock_polly +def test_synthesize_speech_bad_speech_marks1(): + client = boto3.client('polly', region_name=DEFAULT_REGION) + client.put_lexicon(Name='test', Content=LEXICON_XML) + + try: + client.synthesize_speech( + LexiconNames=['test'], + OutputFormat='pcm', + SampleRate='16000', + Text='test1234', + TextType='text', + SpeechMarkTypes=['word'], + VoiceId='Astrid' + ) + except ClientError as err: + err.response['Error']['Code'].should.equal('MarksNotSupportedForFormatException') + else: + raise RuntimeError('Should of raised ') + + +@mock_polly +def test_synthesize_speech_bad_speech_marks2(): + client = boto3.client('polly', region_name=DEFAULT_REGION) + client.put_lexicon(Name='test', Content=LEXICON_XML) + + try: + client.synthesize_speech( + LexiconNames=['test'], + OutputFormat='pcm', + SampleRate='16000', + Text='test1234', + TextType='ssml', + SpeechMarkTypes=['word'], + VoiceId='Astrid' + ) + except ClientError as err: + err.response['Error']['Code'].should.equal('MarksNotSupportedForFormatException') + else: + raise RuntimeError('Should of raised ') From e61b9cc682e413da2644211736361774e7dfbe50 Mon Sep 17 00:00:00 2001 From: Terry Cain Date: Tue, 26 Sep 2017 19:47:26 +0100 Subject: [PATCH 301/412] Added utf8 comment to top of resources.py --- moto/polly/resources.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/moto/polly/resources.py b/moto/polly/resources.py index 77971a90d..f4ad69a98 100644 --- a/moto/polly/resources.py +++ b/moto/polly/resources.py @@ -1,3 +1,5 @@ +# -*- coding: utf-8 -*- + VOICE_DATA = [ {'Id': 'Joanna', 'LanguageCode': 'en-US', 'LanguageName': 'US English', 'Gender': 'Female', 'Name': 'Joanna'}, {'Id': 'Mizuki', 'LanguageCode': 'ja-JP', 'LanguageName': 'Japanese', 'Gender': 'Female', 'Name': 'Mizuki'}, From f9c8836d54038fe85e703d0f241ab3d0206215e7 Mon Sep 17 00:00:00 2001 From: Terry Cain Date: Tue, 26 Sep 2017 19:55:44 +0100 Subject: [PATCH 302/412] . --- moto/__init__.py | 1 + moto/backends.py | 2 ++ moto/batch/urls.py | 10 ++++++++++ moto/batch/utils.py | 6 ++++++ 4 files changed, 19 insertions(+) create mode 100644 moto/batch/urls.py create mode 100644 moto/batch/utils.py diff --git a/moto/__init__.py b/moto/__init__.py index a832def53..cce157914 100644 --- a/moto/__init__.py +++ b/moto/__init__.py @@ -9,6 +9,7 @@ from .acm import mock_acm # flake8: noqa from .apigateway import mock_apigateway, mock_apigateway_deprecated # flake8: noqa from .autoscaling import mock_autoscaling, mock_autoscaling_deprecated # flake8: noqa from .awslambda import mock_lambda, mock_lambda_deprecated # flake8: noqa +from .batch import mock_batch # flake8: noqa from .cloudformation import mock_cloudformation, mock_cloudformation_deprecated # flake8: noqa from .cloudwatch import mock_cloudwatch, mock_cloudwatch_deprecated # flake8: noqa from .datapipeline import mock_datapipeline, mock_datapipeline_deprecated # flake8: noqa diff --git a/moto/backends.py b/moto/backends.py index da9d1821d..2725088d9 100644 --- a/moto/backends.py +++ b/moto/backends.py @@ -4,6 +4,7 @@ from moto.acm import acm_backends from moto.apigateway import apigateway_backends from moto.autoscaling import autoscaling_backends from moto.awslambda import lambda_backends +from moto.batch import batch_backends from moto.cloudformation import cloudformation_backends from moto.cloudwatch import cloudwatch_backends from moto.core import moto_api_backends @@ -38,6 +39,7 @@ BACKENDS = { 'acm': acm_backends, 'apigateway': apigateway_backends, 'autoscaling': autoscaling_backends, + 'batch': batch_backends, 'cloudformation': cloudformation_backends, 'cloudwatch': cloudwatch_backends, 'datapipeline': datapipeline_backends, diff --git a/moto/batch/urls.py b/moto/batch/urls.py new file mode 100644 index 000000000..27cd9fc51 --- /dev/null +++ b/moto/batch/urls.py @@ -0,0 +1,10 @@ +from __future__ import unicode_literals +from .responses import BatchResponse + +url_bases = [ + "https?://batch.(.+).amazonaws.com", +] + +url_paths = { + '{0}/$': BatchResponse.dispatch, +} diff --git a/moto/batch/utils.py b/moto/batch/utils.py new file mode 100644 index 000000000..33e474d61 --- /dev/null +++ b/moto/batch/utils.py @@ -0,0 +1,6 @@ +from __future__ import unicode_literals +import uuid + + +def make_arn_for_topic(account_id, name, region_name): + return "arn:aws:sns:{0}:{1}:{2}".format(region_name, account_id, name) From dfb712848d554c15b2ba2b767b9cf5a13a3d6f4e Mon Sep 17 00:00:00 2001 From: Terry Cain Date: Tue, 26 Sep 2017 20:43:58 +0100 Subject: [PATCH 303/412] Fixed issue with server routing --- moto/polly/urls.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/moto/polly/urls.py b/moto/polly/urls.py index 212c6822a..bd4057a0b 100644 --- a/moto/polly/urls.py +++ b/moto/polly/urls.py @@ -7,7 +7,7 @@ url_bases = [ url_paths = { '{0}/v1/voices': PollyResponse.dispatch, - '{0}/v1/lexicons/.+': PollyResponse.dispatch, + '{0}/v1/lexicons/(?P[^/]+)': PollyResponse.dispatch, '{0}/v1/lexicons': PollyResponse.dispatch, '{0}/v1/speech': PollyResponse.dispatch, } From 56e4300ad4d3c729093b858e7f9d8b4008b3d4f3 Mon Sep 17 00:00:00 2001 From: Terry Cain Date: Tue, 26 Sep 2017 22:22:59 +0100 Subject: [PATCH 304/412] Added preliminary CreateComputeEnvironment --- moto/batch/exceptions.py | 31 +++++++- moto/batch/models.py | 141 +++++++++++++++++++++++++++++++-- moto/batch/responses.py | 48 ++++++++++- moto/batch/urls.py | 2 +- moto/batch/utils.py | 5 +- moto/iam/models.py | 6 ++ tests/test_batch/test_batch.py | 85 +++++++++++++++++++- 7 files changed, 302 insertions(+), 16 deletions(-) diff --git a/moto/batch/exceptions.py b/moto/batch/exceptions.py index e598ee7af..cd6031a95 100644 --- a/moto/batch/exceptions.py +++ b/moto/batch/exceptions.py @@ -1,3 +1,32 @@ from __future__ import unicode_literals -from moto.core.exceptions import RESTError +import json + +class AWSError(Exception): + CODE = None + STATUS = 400 + + def __init__(self, message, code=None, status=None): + self.message = message + self.code = code if code is not None else self.CODE + self.status = status if status is not None else self.STATUS + + def response(self): + return json.dumps({'__type': self.code, 'message': self.message}), dict(status=self.status) + + +class InvalidRequestException(AWSError): + CODE = 'InvalidRequestException' + + +class InvalidParameterValueException(AWSError): + CODE = 'InvalidParameterValue' + + +class ValidationError(AWSError): + CODE = 'ValidationError' + + +class InternalFailure(AWSError): + CODE = 'InternalFailure' + STATUS = 500 diff --git a/moto/batch/models.py b/moto/batch/models.py index a54b30c32..c7def48d1 100644 --- a/moto/batch/models.py +++ b/moto/batch/models.py @@ -1,6 +1,28 @@ from __future__ import unicode_literals import boto3 +import re from moto.core import BaseBackend, BaseModel +from moto.iam import iam_backends +from moto.ec2 import ec2_backends + +from .exceptions import InvalidParameterValueException, InternalFailure +from .utils import make_arn_for_compute_env +from moto.ec2.exceptions import InvalidSubnetIdError +from moto.iam.exceptions import IAMNotFoundException + + +DEFAULT_ACCOUNT_ID = 123456789012 +COMPUTE_ENVIRONMENT_NAME_REGEX = re.compile(r'^[A-Za-z0-9_]{1,128}$') + + +class ComputeEnvironment(BaseModel): + def __init__(self, compute_environment_name, _type, state, compute_resources, service_role, region_name): + self.compute_environment_name = compute_environment_name + self.type = _type + self.state = state + self.compute_resources = compute_resources + self.service_role = service_role + self.arn = make_arn_for_compute_env(DEFAULT_ACCOUNT_ID, compute_environment_name, region_name) class BatchBackend(BaseBackend): @@ -8,16 +30,125 @@ class BatchBackend(BaseBackend): super(BatchBackend, self).__init__() self.region_name = region_name + self._compute_environments = {} + + @property + def iam_backend(self): + """ + :return: IAM Backend + :rtype: moto.iam.models.IAMBackend + """ + return iam_backends['global'] + + @property + def ec2_backend(self): + """ + :return: EC2 Backend + :rtype: moto.ec2.models.EC2Backend + """ + return ec2_backends[self.region_name] + def reset(self): region_name = self.region_name self.__dict__ = {} self.__init__(region_name) - def create_compute_environment(self, compute_environment_name, type, state, compute_resources, service_role): - # implement here - return compute_environment_name, compute_environment_arn - # add methods from here + def get_compute_environment(self, arn): + return self._compute_environments.get(arn) + + def get_compute_environment_by_name(self, name): + for comp_env in self._compute_environments.values(): + if comp_env.name == name: + return comp_env + return None + + def create_compute_environment(self, compute_environment_name, _type, state, compute_resources, service_role): + # Validate + if COMPUTE_ENVIRONMENT_NAME_REGEX.match(compute_environment_name) is None: + raise InvalidParameterValueException('Compute environment name does not match ^[A-Za-z0-9_]{1,128}$') + + if self.get_compute_environment_by_name(compute_environment_name) is not None: + raise InvalidParameterValueException('A compute environment already exists with the name {0}'.format(compute_environment_name)) + + # Look for IAM role + try: + self.iam_backend.get_role_by_arn(service_role) + except IAMNotFoundException: + raise InvalidParameterValueException('Could not find IAM role {0}'.format(service_role)) + + if _type not in ('MANAGED', 'UNMANAGED'): + raise InvalidParameterValueException('type {0} must be one of MANAGED | UNMANAGED'.format(service_role)) + + if state is not None and state not in ('ENABLED', 'DISABLED'): + raise InvalidParameterValueException('state {0} must be one of ENABLED | DISABLED'.format(state)) + + if compute_resources is None and _type == 'MANAGED': + raise InvalidParameterValueException('computeResources must be specified when creating a MANAGED environment'.format(state)) + elif compute_resources is not None: + self._validate_compute_resources(compute_resources) + + # By here, all values except SPOT ones have been validated + new_comp_env = ComputeEnvironment( + compute_environment_name, _type, state, + compute_resources, service_role, + region_name=self.region_name + ) + self._compute_environments[new_comp_env.arn] = new_comp_env + + # TODO scale out if MANAGED and we have compute instance types + + return compute_environment_name, new_comp_env.arn + + def _validate_compute_resources(self, cr): + if 'instanceRole' not in cr: + raise InvalidParameterValueException('computeResources must contain instanceRole') + elif self.iam_backend.get_role_by_arn(cr['instanceRole']) is None: + raise InvalidParameterValueException('could not find instanceRole {0}'.format(cr['instanceRole'])) + + # TODO move the not in checks to a loop, or create a json schema validator class + if 'maxvCpus' not in cr: + raise InvalidParameterValueException('computeResources must contain maxVCpus') + if 'minvCpus' not in cr: + raise InvalidParameterValueException('computeResources must contain minVCpus') + if cr['maxvCpus'] < 0: + raise InvalidParameterValueException('maxVCpus must be positive') + if cr['minvCpus'] < 0: + raise InvalidParameterValueException('minVCpus must be positive') + if cr['maxvCpus'] < cr['minvCpus']: + raise InvalidParameterValueException('maxVCpus must be greater than minvCpus') + + # TODO check instance types when that logic exists + if 'instanceTypes' not in cr: + raise InvalidParameterValueException('computeResources must contain instanceTypes') + if len(cr['instanceTypes']) == 0: + raise InvalidParameterValueException('At least 1 instance type must be provided') + + if 'securityGroupIds' not in cr: + raise InvalidParameterValueException('computeResources must contain securityGroupIds') + for sec_id in cr['securityGroupIds']: + if self.ec2_backend.get_security_group_from_id(sec_id) is None: + raise InvalidParameterValueException('security group {0} does not exist'.format(sec_id)) + if len(cr['securityGroupIds']) == 0: + raise InvalidParameterValueException('At least 1 security group must be provided') + + if 'subnets' not in cr: + raise InvalidParameterValueException('computeResources must contain subnets') + for subnet_id in cr['subnets']: + try: + self.ec2_backend.get_subnet(subnet_id) + except InvalidSubnetIdError: + raise InvalidParameterValueException('subnet {0} does not exist'.format(subnet_id)) + if len(cr['subnets']) == 0: + raise InvalidParameterValueException('At least 1 subnet must be provided') + + if 'type' not in cr: + raise InvalidParameterValueException('computeResources must contain type') + if cr['type'] not in ('EC2', 'SPOT'): + raise InvalidParameterValueException('computeResources.type must be either EC2 | SPOT') + + if cr['type'] == 'SPOT': + raise InternalFailure('SPOT NOT SUPPORTED YET') available_regions = boto3.session.Session().get_available_regions("batch") -batch_backends = {region: BatchBackend for region in available_regions} +batch_backends = {region: BatchBackend(region_name=region) for region in available_regions} diff --git a/moto/batch/responses.py b/moto/batch/responses.py index d91af8a77..0368906f0 100644 --- a/moto/batch/responses.py +++ b/moto/batch/responses.py @@ -1,14 +1,58 @@ from __future__ import unicode_literals from moto.core.responses import BaseResponse from .models import batch_backends +from six.moves.urllib.parse import urlsplit + +from .exceptions import AWSError + +import json class BatchResponse(BaseResponse): + def _error(self, code, message): + return json.dumps({'__type': code, 'message': message}), dict(status=400) + @property def batch_backend(self): return batch_backends[self.region] - # add methods from here + @property + def json(self): + if not hasattr(self, '_json'): + self._json = json.loads(self.body) + return self._json + def _get_param(self, param_name, if_none=None): + val = self.json.get(param_name) + if val is not None: + return val + return if_none -# add teampltes from here + def _get_action(self): + # Return element after the /v1/* + return urlsplit(self.uri).path.lstrip('/').split('/')[1] + + # CreateComputeEnvironment + def createcomputeenvironment(self): + compute_env_name = self._get_param('computeEnvironmentName') + compute_resource = self._get_param('computeResources') + service_role = self._get_param('serviceRole') + state = self._get_param('state') + _type = self._get_param('type') + + try: + name, arn = self.batch_backend.create_compute_environment( + compute_environment_name=compute_env_name, + _type=_type, state=state, + compute_resources=compute_resource, + service_role=service_role + ) + except AWSError as err: + return err.response() + + result = { + 'computeEnvironmentArn': arn, + 'computeEnvironmentName': name + } + + return json.dumps(result) diff --git a/moto/batch/urls.py b/moto/batch/urls.py index 27cd9fc51..93f8a2f23 100644 --- a/moto/batch/urls.py +++ b/moto/batch/urls.py @@ -6,5 +6,5 @@ url_bases = [ ] url_paths = { - '{0}/$': BatchResponse.dispatch, + '{0}/v1/createcomputeenvironment': BatchResponse.dispatch, } diff --git a/moto/batch/utils.py b/moto/batch/utils.py index 33e474d61..d323a9bf7 100644 --- a/moto/batch/utils.py +++ b/moto/batch/utils.py @@ -1,6 +1,5 @@ from __future__ import unicode_literals -import uuid -def make_arn_for_topic(account_id, name, region_name): - return "arn:aws:sns:{0}:{1}:{2}".format(region_name, account_id, name) +def make_arn_for_compute_env(account_id, name, region_name): + return "arn:aws:batch:{0}:{1}:compute-environment/{2}".format(region_name, account_id, name) diff --git a/moto/iam/models.py b/moto/iam/models.py index a7e584284..34efb1a22 100644 --- a/moto/iam/models.py +++ b/moto/iam/models.py @@ -534,6 +534,12 @@ class IAMBackend(BaseBackend): return role raise IAMNotFoundException("Role {0} not found".format(role_name)) + def get_role_by_arn(self, arn): + for role in self.get_roles(): + if role.arn == arn: + return role + raise IAMNotFoundException("Role {0} not found".format(arn)) + def delete_role(self, role_name): for role in self.get_roles(): if role.name == role_name: diff --git a/tests/test_batch/test_batch.py b/tests/test_batch/test_batch.py index eafd32eae..3aae48e1e 100644 --- a/tests/test_batch/test_batch.py +++ b/tests/test_batch/test_batch.py @@ -2,10 +2,87 @@ from __future__ import unicode_literals import boto3 import sure # noqa -from moto import mock_batch +from moto import mock_batch, mock_iam, mock_ec2 +DEFAULT_REGION = 'eu-central-1' + + +def _get_clients(): + return boto3.client('ec2', region_name=DEFAULT_REGION), \ + boto3.client('iam', region_name=DEFAULT_REGION), \ + boto3.client('batch', region_name=DEFAULT_REGION) + + +def _setup(ec2_client, iam_client): + """ + Do prerequisite setup + :return: VPC ID, Subnet ID, Security group ID, IAM Role ARN + :rtype: tuple + """ + resp = ec2_client.create_vpc(CidrBlock='172.30.0.0/24') + vpc_id = resp['Vpc']['VpcId'] + resp = ec2_client.create_subnet( + AvailabilityZone='eu-central-1a', + CidrBlock='172.30.0.0/25', + VpcId=vpc_id + ) + subnet_id = resp['Subnet']['SubnetId'] + resp = ec2_client.create_security_group( + Description='test_sg_desc', + GroupName='test_sg', + VpcId=vpc_id + ) + sg_id = resp['GroupId'] + + resp = iam_client.create_role( + RoleName='TestRole', + AssumeRolePolicyDocument='some_policy' + ) + iam_arn = resp['Role']['Arn'] + + return vpc_id, subnet_id, sg_id, iam_arn + + +# Yes, yes it talks to all the things +@mock_ec2 +@mock_iam @mock_batch -def test_list(): - # do test - pass \ No newline at end of file +def test_create_compute_environment(): + ec2_client, iam_client, batch_client = _get_clients() + vpc_id, subnet_id, sg_id, iam_arn = _setup(ec2_client, iam_client) + + compute_name = 'test_compute_env' + resp = batch_client.create_compute_environment( + computeEnvironmentName=compute_name, + type='MANAGED', + state='ENABLED', + computeResources={ + 'type': 'EC2', + 'minvCpus': 123, + 'maxvCpus': 123, + 'desiredvCpus': 123, + 'instanceTypes': [ + 'some_instance_type', + ], + 'imageId': 'some_image_id', + 'subnets': [ + subnet_id, + ], + 'securityGroupIds': [ + sg_id, + ], + 'ec2KeyPair': 'string', + 'instanceRole': iam_arn, + 'tags': { + 'string': 'string' + }, + 'bidPercentage': 123, + 'spotIamFleetRole': 'string' + }, + serviceRole=iam_arn + ) + resp.should.contain('computeEnvironmentArn') + resp['computeEnvironmentName'].should.equal(compute_name) + +# TODO create 1000s of tests to test complex option combinations of create environment From cecbbb70e1287f8e588484cdd5051a1fd570752d Mon Sep 17 00:00:00 2001 From: Terry Cain Date: Wed, 27 Sep 2017 09:54:46 +0100 Subject: [PATCH 305/412] Added server testcase --- tests/test_polly/test_server.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/tests/test_polly/test_server.py b/tests/test_polly/test_server.py index 9dc3593d9..3ae7f2254 100644 --- a/tests/test_polly/test_server.py +++ b/tests/test_polly/test_server.py @@ -9,8 +9,11 @@ from moto import mock_polly Test the different server responses ''' + @mock_polly def test_polly_list(): backend = server.create_backend_app("polly") test_client = backend.test_client() - # do test \ No newline at end of file + + res = test_client.get('/v1/lexicons') + res.status_code.should.equal(200) From 5b01071bd4030ffc0a6a052762c707a0cbb51024 Mon Sep 17 00:00:00 2001 From: Nuwan Goonasekera Date: Wed, 27 Sep 2017 20:26:45 +0530 Subject: [PATCH 306/412] Fix for regression in get_console_output() --- moto/ec2/responses/general.py | 7 ++++++- tests/test_ec2/test_general.py | 15 +++++++++++++-- 2 files changed, 19 insertions(+), 3 deletions(-) diff --git a/moto/ec2/responses/general.py b/moto/ec2/responses/general.py index 9add43d3e..262d9f8ea 100644 --- a/moto/ec2/responses/general.py +++ b/moto/ec2/responses/general.py @@ -5,7 +5,12 @@ from moto.core.responses import BaseResponse class General(BaseResponse): def get_console_output(self): - instance_id = self._get_multi_param('InstanceId')[0] + instance_id = self._get_param('InstanceId') + if not instance_id: + # For compatibility with boto. + # See: https://github.com/spulec/moto/pull/1152#issuecomment-332487599 + instance_id = self._get_multi_param('InstanceId')[0] + instance = self.ec2_backend.get_instance(instance_id) template = self.response_template(GET_CONSOLE_OUTPUT_RESULT) return template.render(instance=instance) diff --git a/tests/test_ec2/test_general.py b/tests/test_ec2/test_general.py index 1dc77df82..4c319d30d 100644 --- a/tests/test_ec2/test_general.py +++ b/tests/test_ec2/test_general.py @@ -4,10 +4,11 @@ import tests.backport_assert_raises from nose.tools import assert_raises import boto +import boto3 from boto.exception import EC2ResponseError import sure # noqa -from moto import mock_ec2_deprecated +from moto import mock_ec2_deprecated, mock_ec2 @mock_ec2_deprecated @@ -15,7 +16,6 @@ def test_console_output(): conn = boto.connect_ec2('the_key', 'the_secret') reservation = conn.run_instances('ami-1234abcd') instance_id = reservation.instances[0].id - output = conn.get_console_output(instance_id) output.output.should_not.equal(None) @@ -29,3 +29,14 @@ def test_console_output_without_instance(): cm.exception.code.should.equal('InvalidInstanceID.NotFound') cm.exception.status.should.equal(400) cm.exception.request_id.should_not.be.none + + +@mock_ec2 +def test_console_output_boto3(): + conn = boto3.resource('ec2', 'us-east-1') + instances = conn.create_instances(ImageId='ami-1234abcd', + MinCount=1, + MaxCount=1) + + output = instances[0].console_output() + output.get('Output').should_not.equal(None) From 6feaced0bfc4354a4ec604d4da705159cffc2f7b Mon Sep 17 00:00:00 2001 From: Terry Cain Date: Wed, 27 Sep 2017 17:27:36 +0100 Subject: [PATCH 307/412] Script to get instance info --- moto/ec2/resources/instance_types.json | 1 + requirements-dev.txt | 1 + scripts/get_instance_info.py | 150 +++++++++++++++++++++++++ 3 files changed, 152 insertions(+) create mode 100644 moto/ec2/resources/instance_types.json create mode 100755 scripts/get_instance_info.py diff --git a/moto/ec2/resources/instance_types.json b/moto/ec2/resources/instance_types.json new file mode 100644 index 000000000..2fa2e4e93 --- /dev/null +++ b/moto/ec2/resources/instance_types.json @@ -0,0 +1 @@ +{"m1.xlarge": {"ecu_per_vcpu": 2.0, "network_perf": 9.0, "intel_avx": "", "name": "M1 General Purpose Extra Large", "architecture": "64-bit", "linux_virtualization": "PV", "storage": 1680.0, "placement_group_support": false, "intel_avx2": "", "clock_speed_ghz": "", "apiname": "m1.xlarge", "computeunits": 8.0, "ebs_throughput": 125.0, "vpc_only": false, "max_ips": 60, "physical_processor": "", "fpga": 0, "intel_turbo": "", "enhanced_networking": false, "ebs_iops": 8000.0, "vcpus": 4.0, "memory": 15.0, "ebs_max_bandwidth": 1000.0, "gpus": 0, "ipv6_support": false}, "i3.4xlarge": {"ecu_per_vcpu": 3.3125, "network_perf": 11.0, "intel_avx": "Yes", "name": "I3 High I/O Quadruple Extra Large", "architecture": "64-bit", "linux_virtualization": "Unknown", "storage": 3800.0, "placement_group_support": true, "intel_avx2": "Yes", "clock_speed_ghz": "Yes", "apiname": "i3.4xlarge", "computeunits": 53.0, "ebs_throughput": 400.0, "vpc_only": true, "max_ips": 240, "physical_processor": "Intel Xeon E5-2686 v4", "fpga": 0, "intel_turbo": "Yes", "enhanced_networking": true, "ebs_iops": 16000.0, "vcpus": 16.0, "memory": 122.0, "ebs_max_bandwidth": 3500.0, "gpus": 0, "ipv6_support": true}, "i2.xlarge": {"ecu_per_vcpu": 3.5, "network_perf": 7.0, "intel_avx": "", "name": "I2 Extra Large", "architecture": "64-bit", "linux_virtualization": "HVM", "storage": 800.0, "placement_group_support": true, "intel_avx2": "", "clock_speed_ghz": "", "apiname": "i2.xlarge", "computeunits": 14.0, "ebs_throughput": 62.5, "vpc_only": false, "max_ips": 60, "physical_processor": "", "fpga": 0, "intel_turbo": "", "enhanced_networking": false, "ebs_iops": 4000.0, "vcpus": 4.0, "memory": 30.5, "ebs_max_bandwidth": 500.0, "gpus": 0, "ipv6_support": true}, "hs1.8xlarge": {"ecu_per_vcpu": 2.1875, "network_perf": 12.0, "intel_avx": "", "name": "High Storage Eight Extra Large", "architecture": "64-bit", "linux_virtualization": "HVM, PV", "storage": 48000.0, "placement_group_support": false, "intel_avx2": "", "clock_speed_ghz": "", "apiname": "hs1.8xlarge", "computeunits": 35.0, "ebs_throughput": 0.0, "vpc_only": false, "max_ips": 240, "physical_processor": "", "fpga": 0, "intel_turbo": "", "enhanced_networking": false, "ebs_iops": 0.0, "vcpus": 16.0, "memory": 117.0, "ebs_max_bandwidth": 0.0, "gpus": 0, "ipv6_support": false}, "t2.micro": {"ecu_per_vcpu": 0.0, "network_perf": 4.0, "intel_avx": "Yes", "name": "T2 Micro", "architecture": "32/64-bit", "linux_virtualization": "HVM", "storage": 0.0, "placement_group_support": false, "intel_avx2": "", "clock_speed_ghz": "Yes", "apiname": "t2.micro", "computeunits": 0.1, "ebs_throughput": 0.0, "vpc_only": true, "max_ips": 4, "physical_processor": "Intel Xeon family", "fpga": 0, "intel_turbo": "Yes", "enhanced_networking": false, "ebs_iops": 0.0, "vcpus": 1.0, "memory": 1.0, "ebs_max_bandwidth": 0.0, "gpus": 0, "ipv6_support": true}, "d2.4xlarge": {"ecu_per_vcpu": 3.5, "network_perf": 9.0, "intel_avx": "Yes", "name": "D2 Quadruple Extra Large", "architecture": "64-bit", "linux_virtualization": "HVM", "storage": 24000.0, "placement_group_support": true, "intel_avx2": "Yes", "clock_speed_ghz": "Yes", "apiname": "d2.4xlarge", "computeunits": 56.0, "ebs_throughput": 250.0, "vpc_only": false, "max_ips": 240, "physical_processor": "Intel Xeon E5-2676 v3", "fpga": 0, "intel_turbo": "Yes", "enhanced_networking": true, "ebs_iops": 16000.0, "vcpus": 16.0, "memory": 122.0, "ebs_max_bandwidth": 2000.0, "gpus": 0, "ipv6_support": true}, "m2.xlarge": {"ecu_per_vcpu": 3.25, "network_perf": 6.0, "intel_avx": "", "name": "M2 High Memory Extra Large", "architecture": "64-bit", "linux_virtualization": "PV", "storage": 420.0, "placement_group_support": false, "intel_avx2": "", "clock_speed_ghz": "", "apiname": "m2.xlarge", "computeunits": 6.5, "ebs_throughput": 0.0, "vpc_only": false, "max_ips": 60, "physical_processor": "", "fpga": 0, "intel_turbo": "", "enhanced_networking": false, "ebs_iops": 0.0, "vcpus": 2.0, "memory": 17.1, "ebs_max_bandwidth": 0.0, "gpus": 0, "ipv6_support": false}, "p2.xlarge": {"ecu_per_vcpu": 3.0, "network_perf": 9.0, "intel_avx": "Yes", "name": "General Purpose GPU Extra Large", "architecture": "64-bit", "linux_virtualization": "HVM", "storage": 0.0, "placement_group_support": true, "intel_avx2": "Yes", "clock_speed_ghz": "Yes", "apiname": "p2.xlarge", "computeunits": 12.0, "ebs_throughput": 93.75, "vpc_only": true, "max_ips": 60, "physical_processor": "Intel Xeon E5-2686 v4", "fpga": 0, "intel_turbo": "Yes", "enhanced_networking": true, "ebs_iops": 6000.0, "vcpus": 4.0, "memory": 61.0, "ebs_max_bandwidth": 750.0, "gpus": 1, "ipv6_support": true}, "i2.4xlarge": {"ecu_per_vcpu": 3.3125, "network_perf": 9.0, "intel_avx": "", "name": "I2 Quadruple Extra Large", "architecture": "64-bit", "linux_virtualization": "HVM", "storage": 3200.0, "placement_group_support": true, "intel_avx2": "", "clock_speed_ghz": "", "apiname": "i2.4xlarge", "computeunits": 53.0, "ebs_throughput": 250.0, "vpc_only": false, "max_ips": 240, "physical_processor": "", "fpga": 0, "intel_turbo": "", "enhanced_networking": false, "ebs_iops": 16000.0, "vcpus": 16.0, "memory": 122.0, "ebs_max_bandwidth": 2000.0, "gpus": 0, "ipv6_support": true}, "t1.micro": {"ecu_per_vcpu": 0.0, "network_perf": 0.0, "intel_avx": "", "name": "T1 Micro", "architecture": "32/64-bit", "linux_virtualization": "PV", "storage": 0.0, "placement_group_support": false, "intel_avx2": "", "clock_speed_ghz": "", "apiname": "t1.micro", "computeunits": 0.0, "ebs_throughput": 0.0, "vpc_only": false, "max_ips": 4, "physical_processor": "", "fpga": 0, "intel_turbo": "", "enhanced_networking": false, "ebs_iops": 0.0, "vcpus": 1.0, "memory": 0.613, "ebs_max_bandwidth": 0.0, "gpus": 0, "ipv6_support": false}, "d2.xlarge": {"ecu_per_vcpu": 3.5, "network_perf": 7.0, "intel_avx": "Yes", "name": "D2 Extra Large", "architecture": "64-bit", "linux_virtualization": "HVM", "storage": 6000.0, "placement_group_support": true, "intel_avx2": "Yes", "clock_speed_ghz": "Yes", "apiname": "d2.xlarge", "computeunits": 14.0, "ebs_throughput": 93.75, "vpc_only": false, "max_ips": 60, "physical_processor": "Intel Xeon E5-2676 v3", "fpga": 0, "intel_turbo": "Yes", "enhanced_networking": true, "ebs_iops": 6000.0, "vcpus": 4.0, "memory": 30.5, "ebs_max_bandwidth": 750.0, "gpus": 0, "ipv6_support": true}, "r3.2xlarge": {"ecu_per_vcpu": 3.25, "network_perf": 9.0, "intel_avx": "Yes", "name": "R3 High-Memory Double Extra Large", "architecture": "64-bit", "linux_virtualization": "HVM", "storage": 160.0, "placement_group_support": true, "intel_avx2": "", "clock_speed_ghz": "Yes", "apiname": "r3.2xlarge", "computeunits": 26.0, "ebs_throughput": 125.0, "vpc_only": false, "max_ips": 60, "physical_processor": "Intel Xeon E5-2670 v2", "fpga": 0, "intel_turbo": "Yes", "enhanced_networking": true, "ebs_iops": 8000.0, "vcpus": 8.0, "memory": 61.0, "ebs_max_bandwidth": 1000.0, "gpus": 0, "ipv6_support": true}, "i3.8xlarge": {"ecu_per_vcpu": 3.09375, "network_perf": 13.0, "intel_avx": "Yes", "name": "I3 High I/O Eight Extra Large", "architecture": "64-bit", "linux_virtualization": "Unknown", "storage": 7600.0, "placement_group_support": true, "intel_avx2": "Yes", "clock_speed_ghz": "Yes", "apiname": "i3.8xlarge", "computeunits": 99.0, "ebs_throughput": 850.0, "vpc_only": true, "max_ips": 240, "physical_processor": "Intel Xeon E5-2686 v4", "fpga": 0, "intel_turbo": "Yes", "enhanced_networking": true, "ebs_iops": 32500.0, "vcpus": 32.0, "memory": 244.0, "ebs_max_bandwidth": 7000.0, "gpus": 0, "ipv6_support": true}, "c3.2xlarge": {"ecu_per_vcpu": 3.5, "network_perf": 9.0, "intel_avx": "Yes", "name": "C3 High-CPU Double Extra Large", "architecture": "64-bit", "linux_virtualization": "HVM, PV", "storage": 160.0, "placement_group_support": true, "intel_avx2": "", "clock_speed_ghz": "Yes", "apiname": "c3.2xlarge", "computeunits": 28.0, "ebs_throughput": 125.0, "vpc_only": false, "max_ips": 60, "physical_processor": "Intel Xeon E5-2680 v2", "fpga": 0, "intel_turbo": "Yes", "enhanced_networking": true, "ebs_iops": 8000.0, "vcpus": 8.0, "memory": 15.0, "ebs_max_bandwidth": 1000.0, "gpus": 0, "ipv6_support": true}, "g2.8xlarge": {"ecu_per_vcpu": 3.25, "network_perf": 16.0, "intel_avx": "", "name": "G2 Eight Extra Large", "architecture": "64-bit", "linux_virtualization": "Unknown", "storage": 240.0, "placement_group_support": true, "intel_avx2": "", "clock_speed_ghz": "", "apiname": "g2.8xlarge", "computeunits": 104.0, "ebs_throughput": 0.0, "vpc_only": false, "max_ips": 240, "physical_processor": "", "fpga": 0, "intel_turbo": "", "enhanced_networking": false, "ebs_iops": 0.0, "vcpus": 32.0, "memory": 60.0, "ebs_max_bandwidth": 0.0, "gpus": 0, "ipv6_support": false}, "t2.medium": {"ecu_per_vcpu": 0.0, "network_perf": 4.0, "intel_avx": "Yes", "name": "T2 Medium", "architecture": "32/64-bit", "linux_virtualization": "HVM", "storage": 0.0, "placement_group_support": false, "intel_avx2": "", "clock_speed_ghz": "Yes", "apiname": "t2.medium", "computeunits": 0.4, "ebs_throughput": 0.0, "vpc_only": true, "max_ips": 18, "physical_processor": "Intel Xeon family", "fpga": 0, "intel_turbo": "Yes", "enhanced_networking": false, "ebs_iops": 0.0, "vcpus": 2.0, "memory": 4.0, "ebs_max_bandwidth": 0.0, "gpus": 0, "ipv6_support": true}, "m4.xlarge": {"ecu_per_vcpu": 3.25, "network_perf": 9.0, "intel_avx": "Yes", "name": "M4 Extra Large", "architecture": "64-bit", "linux_virtualization": "HVM", "storage": 0.0, "placement_group_support": true, "intel_avx2": "Yes", "clock_speed_ghz": "Yes", "apiname": "m4.xlarge", "computeunits": 13.0, "ebs_throughput": 93.75, "vpc_only": true, "max_ips": 60, "physical_processor": "Intel Xeon E5-2676 v3", "fpga": 0, "intel_turbo": "Yes", "enhanced_networking": true, "ebs_iops": 6000.0, "vcpus": 4.0, "memory": 16.0, "ebs_max_bandwidth": 750.0, "gpus": 0, "ipv6_support": true}, "x1.16xlarge": {"ecu_per_vcpu": 2.7265625, "network_perf": 13.0, "intel_avx": "Yes", "name": "X1 Extra High-Memory 16xlarge", "architecture": "64-bit", "linux_virtualization": "HVM", "storage": 1920.0, "placement_group_support": true, "intel_avx2": "Yes", "clock_speed_ghz": "Yes", "apiname": "x1.16xlarge", "computeunits": 174.5, "ebs_throughput": 875.0, "vpc_only": true, "max_ips": 240, "physical_processor": "Intel Xeon E7-8880 v3", "fpga": 0, "intel_turbo": "Yes", "enhanced_networking": true, "ebs_iops": 40000.0, "vcpus": 64.0, "memory": 976.0, "ebs_max_bandwidth": 7000.0, "gpus": 0, "ipv6_support": true}, "p2.8xlarge": {"ecu_per_vcpu": 2.9375, "network_perf": 13.0, "intel_avx": "Yes", "name": "General Purpose GPU Eight Extra Large", "architecture": "64-bit", "linux_virtualization": "HVM", "storage": 0.0, "placement_group_support": true, "intel_avx2": "Yes", "clock_speed_ghz": "Yes", "apiname": "p2.8xlarge", "computeunits": 94.0, "ebs_throughput": 625.0, "vpc_only": true, "max_ips": 240, "physical_processor": "Intel Xeon E5-2686 v4", "fpga": 0, "intel_turbo": "Yes", "enhanced_networking": true, "ebs_iops": 32500.0, "vcpus": 32.0, "memory": 488.0, "ebs_max_bandwidth": 5000.0, "gpus": 8, "ipv6_support": true}, "f1.16xlarge": {"ecu_per_vcpu": 2.9375, "network_perf": 17.0, "intel_avx": "Yes", "name": "F1 16xlarge", "architecture": "64-bit", "linux_virtualization": "HVM", "storage": 3760.0, "placement_group_support": true, "intel_avx2": "Yes", "clock_speed_ghz": "Yes", "apiname": "f1.16xlarge", "computeunits": 188.0, "ebs_throughput": 1750.0, "vpc_only": true, "max_ips": 400, "physical_processor": "Intel Xeon E5-2686 v4", "fpga": 8, "intel_turbo": "Yes", "enhanced_networking": true, "ebs_iops": 75000.0, "vcpus": 64.0, "memory": 976.0, "ebs_max_bandwidth": 14000.0, "gpus": 0, "ipv6_support": true}, "r4.8xlarge": {"ecu_per_vcpu": 3.09375, "network_perf": 13.0, "intel_avx": "Yes", "name": "R4 High-Memory Eight Extra Large", "architecture": "64-bit", "linux_virtualization": "HVM", "storage": 0.0, "placement_group_support": true, "intel_avx2": "Yes", "clock_speed_ghz": "Yes", "apiname": "r4.8xlarge", "computeunits": 99.0, "ebs_throughput": 875.0, "vpc_only": true, "max_ips": 240, "physical_processor": "Intel Xeon E5-2686 v4", "fpga": 0, "intel_turbo": "Yes", "enhanced_networking": true, "ebs_iops": 37500.0, "vcpus": 32.0, "memory": 244.0, "ebs_max_bandwidth": 7000.0, "gpus": 0, "ipv6_support": true}, "g3.4xlarge": {"ecu_per_vcpu": 2.9375, "network_perf": 11.0, "intel_avx": "Yes", "name": "G3 Quadruple Extra Large", "architecture": "64-bit", "linux_virtualization": "HVM", "storage": 0.0, "placement_group_support": true, "intel_avx2": "Yes", "clock_speed_ghz": "Yes", "apiname": "g3.4xlarge", "computeunits": 47.0, "ebs_throughput": 437.0, "vpc_only": true, "max_ips": 240, "physical_processor": "Intel Xeon E5-2686 v4", "fpga": 0, "intel_turbo": "Yes", "enhanced_networking": true, "ebs_iops": 20000.0, "vcpus": 16.0, "memory": 122.0, "ebs_max_bandwidth": 3500.0, "gpus": 1, "ipv6_support": true}, "cg1.4xlarge": {"ecu_per_vcpu": 2.09375, "network_perf": 12.0, "intel_avx": "", "name": "Cluster GPU Quadruple Extra Large", "architecture": "64-bit", "linux_virtualization": "HVM", "storage": 1680.0, "placement_group_support": false, "intel_avx2": "", "clock_speed_ghz": "", "apiname": "cg1.4xlarge", "computeunits": 33.5, "ebs_throughput": 0.0, "vpc_only": false, "max_ips": 240, "physical_processor": "", "fpga": 0, "intel_turbo": "", "enhanced_networking": false, "ebs_iops": 0.0, "vcpus": 16.0, "memory": 22.5, "ebs_max_bandwidth": 0.0, "gpus": 0, "ipv6_support": false}, "c4.large": {"ecu_per_vcpu": 4.0, "network_perf": 7.0, "intel_avx": "Yes", "name": "C4 High-CPU Large", "architecture": "64-bit", "linux_virtualization": "HVM", "storage": 0.0, "placement_group_support": true, "intel_avx2": "Yes", "clock_speed_ghz": "Yes", "apiname": "c4.large", "computeunits": 8.0, "ebs_throughput": 62.5, "vpc_only": true, "max_ips": 30, "physical_processor": "Intel Xeon E5-2666 v3", "fpga": 0, "intel_turbo": "Yes", "enhanced_networking": true, "ebs_iops": 4000.0, "vcpus": 2.0, "memory": 3.75, "ebs_max_bandwidth": 500.0, "gpus": 0, "ipv6_support": true}, "m4.16xlarge": {"ecu_per_vcpu": 2.9375, "network_perf": 17.0, "intel_avx": "Yes", "name": "M4 16xlarge", "architecture": "64-bit", "linux_virtualization": "HVM", "storage": 0.0, "placement_group_support": true, "intel_avx2": "Yes", "clock_speed_ghz": "Yes", "apiname": "m4.16xlarge", "computeunits": 188.0, "ebs_throughput": 1250.0, "vpc_only": true, "max_ips": 240, "physical_processor": "Intel Xeon E5-2686 v4", "fpga": 0, "intel_turbo": "Yes", "enhanced_networking": true, "ebs_iops": 65000.0, "vcpus": 64.0, "memory": 256.0, "ebs_max_bandwidth": 10000.0, "gpus": 0, "ipv6_support": true}, "r4.4xlarge": {"ecu_per_vcpu": 3.3125, "network_perf": 11.0, "intel_avx": "Yes", "name": "R4 High-Memory Quadruple Extra Large", "architecture": "64-bit", "linux_virtualization": "HVM", "storage": 0.0, "placement_group_support": true, "intel_avx2": "Yes", "clock_speed_ghz": "Yes", "apiname": "r4.4xlarge", "computeunits": 53.0, "ebs_throughput": 437.0, "vpc_only": true, "max_ips": 240, "physical_processor": "Intel Xeon E5-2686 v4", "fpga": 0, "intel_turbo": "Yes", "enhanced_networking": true, "ebs_iops": 18750.0, "vcpus": 16.0, "memory": 122.0, "ebs_max_bandwidth": 3500.0, "gpus": 0, "ipv6_support": true}, "r4.2xlarge": {"ecu_per_vcpu": 3.375, "network_perf": 11.0, "intel_avx": "Yes", "name": "R4 High-Memory Double Extra Large", "architecture": "64-bit", "linux_virtualization": "HVM", "storage": 0.0, "placement_group_support": true, "intel_avx2": "Yes", "clock_speed_ghz": "Yes", "apiname": "r4.2xlarge", "computeunits": 27.0, "ebs_throughput": 218.0, "vpc_only": true, "max_ips": 60, "physical_processor": "Intel Xeon E5-2686 v4", "fpga": 0, "intel_turbo": "Yes", "enhanced_networking": true, "ebs_iops": 12000.0, "vcpus": 8.0, "memory": 61.0, "ebs_max_bandwidth": 1750.0, "gpus": 0, "ipv6_support": true}, "c3.xlarge": {"ecu_per_vcpu": 3.5, "network_perf": 7.0, "intel_avx": "Yes", "name": "C3 High-CPU Extra Large", "architecture": "64-bit", "linux_virtualization": "HVM, PV", "storage": 80.0, "placement_group_support": true, "intel_avx2": "", "clock_speed_ghz": "Yes", "apiname": "c3.xlarge", "computeunits": 14.0, "ebs_throughput": 62.5, "vpc_only": false, "max_ips": 60, "physical_processor": "Intel Xeon E5-2680 v2", "fpga": 0, "intel_turbo": "Yes", "enhanced_networking": true, "ebs_iops": 4000.0, "vcpus": 4.0, "memory": 7.5, "ebs_max_bandwidth": 500.0, "gpus": 0, "ipv6_support": true}, "i3.large": {"ecu_per_vcpu": 3.5, "network_perf": 11.0, "intel_avx": "Yes", "name": "I3 High I/O Large", "architecture": "64-bit", "linux_virtualization": "Unknown", "storage": 475.0, "placement_group_support": true, "intel_avx2": "Yes", "clock_speed_ghz": "Yes", "apiname": "i3.large", "computeunits": 7.0, "ebs_throughput": 50.0, "vpc_only": true, "max_ips": 30, "physical_processor": "Intel Xeon E5-2686 v4", "fpga": 0, "intel_turbo": "Yes", "enhanced_networking": true, "ebs_iops": 3000.0, "vcpus": 2.0, "memory": 15.25, "ebs_max_bandwidth": 425.0, "gpus": 0, "ipv6_support": true}, "r4.xlarge": {"ecu_per_vcpu": 3.375, "network_perf": 11.0, "intel_avx": "Yes", "name": "R4 High-Memory Extra Large", "architecture": "64-bit", "linux_virtualization": "HVM", "storage": 0.0, "placement_group_support": true, "intel_avx2": "Yes", "clock_speed_ghz": "Yes", "apiname": "r4.xlarge", "computeunits": 13.5, "ebs_throughput": 109.0, "vpc_only": true, "max_ips": 60, "physical_processor": "Intel Xeon E5-2686 v4", "fpga": 0, "intel_turbo": "Yes", "enhanced_networking": true, "ebs_iops": 6000.0, "vcpus": 4.0, "memory": 30.5, "ebs_max_bandwidth": 875.0, "gpus": 0, "ipv6_support": true}, "m2.2xlarge": {"ecu_per_vcpu": 3.25, "network_perf": 7.0, "intel_avx": "", "name": "M2 High Memory Double Extra Large", "architecture": "64-bit", "linux_virtualization": "PV", "storage": 850.0, "placement_group_support": false, "intel_avx2": "", "clock_speed_ghz": "", "apiname": "m2.2xlarge", "computeunits": 13.0, "ebs_throughput": 62.5, "vpc_only": false, "max_ips": 120, "physical_processor": "", "fpga": 0, "intel_turbo": "", "enhanced_networking": false, "ebs_iops": 4000.0, "vcpus": 4.0, "memory": 34.2, "ebs_max_bandwidth": 500.0, "gpus": 0, "ipv6_support": false}, "m3.medium": {"ecu_per_vcpu": 3.0, "network_perf": 6.0, "intel_avx": "Yes", "name": "M3 General Purpose Medium", "architecture": "64-bit", "linux_virtualization": "HVM, PV", "storage": 4.0, "placement_group_support": false, "intel_avx2": "", "clock_speed_ghz": "Yes", "apiname": "m3.medium", "computeunits": 3.0, "ebs_throughput": 0.0, "vpc_only": false, "max_ips": 12, "physical_processor": "Intel Xeon E5-2670 v2", "fpga": 0, "intel_turbo": "Yes", "enhanced_networking": false, "ebs_iops": 0.0, "vcpus": 1.0, "memory": 3.75, "ebs_max_bandwidth": 0.0, "gpus": 0, "ipv6_support": false}, "r3.4xlarge": {"ecu_per_vcpu": 3.25, "network_perf": 9.0, "intel_avx": "Yes", "name": "R3 High-Memory Quadruple Extra Large", "architecture": "64-bit", "linux_virtualization": "HVM", "storage": 320.0, "placement_group_support": true, "intel_avx2": "", "clock_speed_ghz": "Yes", "apiname": "r3.4xlarge", "computeunits": 52.0, "ebs_throughput": 250.0, "vpc_only": false, "max_ips": 240, "physical_processor": "Intel Xeon E5-2670 v2", "fpga": 0, "intel_turbo": "Yes", "enhanced_networking": true, "ebs_iops": 16000.0, "vcpus": 16.0, "memory": 122.0, "ebs_max_bandwidth": 2000.0, "gpus": 0, "ipv6_support": true}, "t2.small": {"ecu_per_vcpu": 0.0, "network_perf": 4.0, "intel_avx": "Yes", "name": "T2 Small", "architecture": "32/64-bit", "linux_virtualization": "HVM", "storage": 0.0, "placement_group_support": false, "intel_avx2": "", "clock_speed_ghz": "Yes", "apiname": "t2.small", "computeunits": 0.2, "ebs_throughput": 0.0, "vpc_only": true, "max_ips": 8, "physical_processor": "Intel Xeon family", "fpga": 0, "intel_turbo": "Yes", "enhanced_networking": false, "ebs_iops": 0.0, "vcpus": 1.0, "memory": 2.0, "ebs_max_bandwidth": 0.0, "gpus": 0, "ipv6_support": true}, "r3.large": {"ecu_per_vcpu": 3.25, "network_perf": 6.0, "intel_avx": "Yes", "name": "R3 High-Memory Large", "architecture": "64-bit", "linux_virtualization": "HVM", "storage": 32.0, "placement_group_support": true, "intel_avx2": "", "clock_speed_ghz": "Yes", "apiname": "r3.large", "computeunits": 6.5, "ebs_throughput": 0.0, "vpc_only": false, "max_ips": 30, "physical_processor": "Intel Xeon E5-2670 v2", "fpga": 0, "intel_turbo": "Yes", "enhanced_networking": true, "ebs_iops": 0.0, "vcpus": 2.0, "memory": 15.25, "ebs_max_bandwidth": 0.0, "gpus": 0, "ipv6_support": true}, "i3.16xlarge": {"ecu_per_vcpu": 3.125, "network_perf": 17.0, "intel_avx": "Yes", "name": "I3 High I/O 16xlarge", "architecture": "64-bit", "linux_virtualization": "Unknown", "storage": 15200.0, "placement_group_support": true, "intel_avx2": "Yes", "clock_speed_ghz": "Yes", "apiname": "i3.16xlarge", "computeunits": 200.0, "ebs_throughput": 1750.0, "vpc_only": true, "max_ips": 750, "physical_processor": "Intel Xeon E5-2686 v4", "fpga": 0, "intel_turbo": "Yes", "enhanced_networking": true, "ebs_iops": 65000.0, "vcpus": 64.0, "memory": 488.0, "ebs_max_bandwidth": 14000.0, "gpus": 0, "ipv6_support": true}, "c3.large": {"ecu_per_vcpu": 3.5, "network_perf": 6.0, "intel_avx": "Yes", "name": "C3 High-CPU Large", "architecture": "32/64-bit", "linux_virtualization": "HVM, PV", "storage": 32.0, "placement_group_support": true, "intel_avx2": "", "clock_speed_ghz": "Yes", "apiname": "c3.large", "computeunits": 7.0, "ebs_throughput": 0.0, "vpc_only": false, "max_ips": 30, "physical_processor": "Intel Xeon E5-2680 v2", "fpga": 0, "intel_turbo": "Yes", "enhanced_networking": true, "ebs_iops": 0.0, "vcpus": 2.0, "memory": 3.75, "ebs_max_bandwidth": 0.0, "gpus": 0, "ipv6_support": true}, "i2.2xlarge": {"ecu_per_vcpu": 3.375, "network_perf": 7.0, "intel_avx": "", "name": "I2 Double Extra Large", "architecture": "64-bit", "linux_virtualization": "HVM", "storage": 1600.0, "placement_group_support": true, "intel_avx2": "", "clock_speed_ghz": "", "apiname": "i2.2xlarge", "computeunits": 27.0, "ebs_throughput": 125.0, "vpc_only": false, "max_ips": 60, "physical_processor": "", "fpga": 0, "intel_turbo": "", "enhanced_networking": false, "ebs_iops": 8000.0, "vcpus": 8.0, "memory": 61.0, "ebs_max_bandwidth": 1000.0, "gpus": 0, "ipv6_support": true}, "i3.xlarge": {"ecu_per_vcpu": 3.25, "network_perf": 11.0, "intel_avx": "Yes", "name": "I3 High I/O Extra Large", "architecture": "64-bit", "linux_virtualization": "Unknown", "storage": 950.0, "placement_group_support": true, "intel_avx2": "Yes", "clock_speed_ghz": "Yes", "apiname": "i3.xlarge", "computeunits": 13.0, "ebs_throughput": 100.0, "vpc_only": true, "max_ips": 60, "physical_processor": "Intel Xeon E5-2686 v4", "fpga": 0, "intel_turbo": "Yes", "enhanced_networking": true, "ebs_iops": 6000.0, "vcpus": 4.0, "memory": 30.5, "ebs_max_bandwidth": 850.0, "gpus": 0, "ipv6_support": true}, "i2.8xlarge": {"ecu_per_vcpu": 3.25, "network_perf": 13.0, "intel_avx": "", "name": "I2 Eight Extra Large", "architecture": "64-bit", "linux_virtualization": "HVM", "storage": 6400.0, "placement_group_support": true, "intel_avx2": "", "clock_speed_ghz": "", "apiname": "i2.8xlarge", "computeunits": 104.0, "ebs_throughput": 0.0, "vpc_only": false, "max_ips": 240, "physical_processor": "", "fpga": 0, "intel_turbo": "", "enhanced_networking": false, "ebs_iops": 0.0, "vcpus": 32.0, "memory": 244.0, "ebs_max_bandwidth": 0.0, "gpus": 0, "ipv6_support": true}, "r4.16xlarge": {"ecu_per_vcpu": 3.046875, "network_perf": 17.0, "intel_avx": "Yes", "name": "R4 High-Memory 16xlarge", "architecture": "64-bit", "linux_virtualization": "HVM", "storage": 0.0, "placement_group_support": true, "intel_avx2": "Yes", "clock_speed_ghz": "Yes", "apiname": "r4.16xlarge", "computeunits": 195.0, "ebs_throughput": 1750.0, "vpc_only": true, "max_ips": 750, "physical_processor": "Intel Xeon E5-2686 v4", "fpga": 0, "intel_turbo": "Yes", "enhanced_networking": true, "ebs_iops": 75000.0, "vcpus": 64.0, "memory": 488.0, "ebs_max_bandwidth": 14000.0, "gpus": 0, "ipv6_support": true}, "g3.8xlarge": {"ecu_per_vcpu": 2.9375, "network_perf": 13.0, "intel_avx": "Yes", "name": "G3 Eight Extra Large", "architecture": "64-bit", "linux_virtualization": "HVM", "storage": 0.0, "placement_group_support": true, "intel_avx2": "Yes", "clock_speed_ghz": "Yes", "apiname": "g3.8xlarge", "computeunits": 94.0, "ebs_throughput": 875.0, "vpc_only": true, "max_ips": 240, "physical_processor": "Intel Xeon E5-2686 v4", "fpga": 0, "intel_turbo": "Yes", "enhanced_networking": true, "ebs_iops": 40000.0, "vcpus": 32.0, "memory": 244.0, "ebs_max_bandwidth": 7000.0, "gpus": 2, "ipv6_support": true}, "c3.4xlarge": {"ecu_per_vcpu": 3.4375, "network_perf": 9.0, "intel_avx": "Yes", "name": "C3 High-CPU Quadruple Extra Large", "architecture": "64-bit", "linux_virtualization": "HVM, PV", "storage": 320.0, "placement_group_support": true, "intel_avx2": "", "clock_speed_ghz": "Yes", "apiname": "c3.4xlarge", "computeunits": 55.0, "ebs_throughput": 250.0, "vpc_only": false, "max_ips": 240, "physical_processor": "Intel Xeon E5-2680 v2", "fpga": 0, "intel_turbo": "Yes", "enhanced_networking": true, "ebs_iops": 16000.0, "vcpus": 16.0, "memory": 30.0, "ebs_max_bandwidth": 2000.0, "gpus": 0, "ipv6_support": true}, "r4.large": {"ecu_per_vcpu": 3.5, "network_perf": 11.0, "intel_avx": "Yes", "name": "R4 High-Memory Large", "architecture": "64-bit", "linux_virtualization": "HVM", "storage": 0.0, "placement_group_support": true, "intel_avx2": "Yes", "clock_speed_ghz": "Yes", "apiname": "r4.large", "computeunits": 7.0, "ebs_throughput": 54.0, "vpc_only": true, "max_ips": 30, "physical_processor": "Intel Xeon E5-2686 v4", "fpga": 0, "intel_turbo": "Yes", "enhanced_networking": true, "ebs_iops": 3000.0, "vcpus": 2.0, "memory": 15.25, "ebs_max_bandwidth": 437.0, "gpus": 0, "ipv6_support": true}, "f1.2xlarge": {"ecu_per_vcpu": 3.25, "network_perf": 11.0, "intel_avx": "Yes", "name": "F1 Double Extra Large", "architecture": "64-bit", "linux_virtualization": "HVM", "storage": 470.0, "placement_group_support": true, "intel_avx2": "Yes", "clock_speed_ghz": "Yes", "apiname": "f1.2xlarge", "computeunits": 26.0, "ebs_throughput": 200.0, "vpc_only": true, "max_ips": 60, "physical_processor": "Intel Xeon E5-2686 v4", "fpga": 1, "intel_turbo": "Yes", "enhanced_networking": true, "ebs_iops": 12000.0, "vcpus": 8.0, "memory": 122.0, "ebs_max_bandwidth": 1700.0, "gpus": 0, "ipv6_support": true}, "m4.2xlarge": {"ecu_per_vcpu": 3.25, "network_perf": 9.0, "intel_avx": "Yes", "name": "M4 Double Extra Large", "architecture": "64-bit", "linux_virtualization": "HVM", "storage": 0.0, "placement_group_support": true, "intel_avx2": "Yes", "clock_speed_ghz": "Yes", "apiname": "m4.2xlarge", "computeunits": 26.0, "ebs_throughput": 125.0, "vpc_only": true, "max_ips": 60, "physical_processor": "Intel Xeon E5-2676 v3", "fpga": 0, "intel_turbo": "Yes", "enhanced_networking": true, "ebs_iops": 8000.0, "vcpus": 8.0, "memory": 32.0, "ebs_max_bandwidth": 1000.0, "gpus": 0, "ipv6_support": true}, "m3.2xlarge": {"ecu_per_vcpu": 3.25, "network_perf": 9.0, "intel_avx": "Yes", "name": "M3 General Purpose Double Extra Large", "architecture": "64-bit", "linux_virtualization": "HVM, PV", "storage": 160.0, "placement_group_support": false, "intel_avx2": "", "clock_speed_ghz": "Yes", "apiname": "m3.2xlarge", "computeunits": 26.0, "ebs_throughput": 125.0, "vpc_only": false, "max_ips": 120, "physical_processor": "Intel Xeon E5-2670 v2", "fpga": 0, "intel_turbo": "Yes", "enhanced_networking": false, "ebs_iops": 8000.0, "vcpus": 8.0, "memory": 30.0, "ebs_max_bandwidth": 1000.0, "gpus": 0, "ipv6_support": false}, "c3.8xlarge": {"ecu_per_vcpu": 3.375, "network_perf": 12.0, "intel_avx": "Yes", "name": "C3 High-CPU Eight Extra Large", "architecture": "64-bit", "linux_virtualization": "HVM, PV", "storage": 640.0, "placement_group_support": true, "intel_avx2": "", "clock_speed_ghz": "Yes", "apiname": "c3.8xlarge", "computeunits": 108.0, "ebs_throughput": 0.0, "vpc_only": false, "max_ips": 240, "physical_processor": "Intel Xeon E5-2680 v2", "fpga": 0, "intel_turbo": "Yes", "enhanced_networking": true, "ebs_iops": 0.0, "vcpus": 32.0, "memory": 60.0, "ebs_max_bandwidth": 0.0, "gpus": 0, "ipv6_support": true}, "cr1.8xlarge": {"ecu_per_vcpu": 2.75, "network_perf": 12.0, "intel_avx": "", "name": "High Memory Cluster Eight Extra Large", "architecture": "64-bit", "linux_virtualization": "Unknown", "storage": 240.0, "placement_group_support": false, "intel_avx2": "", "clock_speed_ghz": "", "apiname": "cr1.8xlarge", "computeunits": 88.0, "ebs_throughput": 0.0, "vpc_only": false, "max_ips": 240, "physical_processor": "", "fpga": 0, "intel_turbo": "", "enhanced_networking": false, "ebs_iops": 0.0, "vcpus": 32.0, "memory": 244.0, "ebs_max_bandwidth": 0.0, "gpus": 0, "ipv6_support": false}, "cc2.8xlarge": {"ecu_per_vcpu": 2.75, "network_perf": 12.0, "intel_avx": "", "name": "Cluster Compute Eight Extra Large", "architecture": "64-bit", "linux_virtualization": "HVM", "storage": 3360.0, "placement_group_support": false, "intel_avx2": "", "clock_speed_ghz": "", "apiname": "cc2.8xlarge", "computeunits": 88.0, "ebs_throughput": 0.0, "vpc_only": false, "max_ips": 240, "physical_processor": "", "fpga": 0, "intel_turbo": "", "enhanced_networking": false, "ebs_iops": 0.0, "vcpus": 32.0, "memory": 60.5, "ebs_max_bandwidth": 0.0, "gpus": 0, "ipv6_support": false}, "m1.large": {"ecu_per_vcpu": 2.0, "network_perf": 7.0, "intel_avx": "", "name": "M1 General Purpose Large", "architecture": "64-bit", "linux_virtualization": "PV", "storage": 840.0, "placement_group_support": false, "intel_avx2": "", "clock_speed_ghz": "", "apiname": "m1.large", "computeunits": 4.0, "ebs_throughput": 62.5, "vpc_only": false, "max_ips": 30, "physical_processor": "", "fpga": 0, "intel_turbo": "", "enhanced_networking": false, "ebs_iops": 4000.0, "vcpus": 2.0, "memory": 7.5, "ebs_max_bandwidth": 500.0, "gpus": 0, "ipv6_support": false}, "r3.xlarge": {"ecu_per_vcpu": 3.25, "network_perf": 7.0, "intel_avx": "Yes", "name": "R3 High-Memory Extra Large", "architecture": "64-bit", "linux_virtualization": "HVM", "storage": 80.0, "placement_group_support": true, "intel_avx2": "", "clock_speed_ghz": "Yes", "apiname": "r3.xlarge", "computeunits": 13.0, "ebs_throughput": 62.5, "vpc_only": false, "max_ips": 60, "physical_processor": "Intel Xeon E5-2670 v2", "fpga": 0, "intel_turbo": "Yes", "enhanced_networking": true, "ebs_iops": 4000.0, "vcpus": 4.0, "memory": 30.5, "ebs_max_bandwidth": 500.0, "gpus": 0, "ipv6_support": true}, "g3.16xlarge": {"ecu_per_vcpu": 2.9375, "network_perf": 17.0, "intel_avx": "Yes", "name": "G3 16xlarge", "architecture": "64-bit", "linux_virtualization": "HVM", "storage": 0.0, "placement_group_support": true, "intel_avx2": "Yes", "clock_speed_ghz": "Yes", "apiname": "g3.16xlarge", "computeunits": 188.0, "ebs_throughput": 1750.0, "vpc_only": true, "max_ips": 750, "physical_processor": "Intel Xeon E5-2686 v4", "fpga": 0, "intel_turbo": "Yes", "enhanced_networking": true, "ebs_iops": 80000.0, "vcpus": 64.0, "memory": 488.0, "ebs_max_bandwidth": 14000.0, "gpus": 4, "ipv6_support": true}, "m1.medium": {"ecu_per_vcpu": 2.0, "network_perf": 6.0, "intel_avx": "", "name": "M1 General Purpose Medium", "architecture": "32/64-bit", "linux_virtualization": "PV", "storage": 410.0, "placement_group_support": false, "intel_avx2": "", "clock_speed_ghz": "", "apiname": "m1.medium", "computeunits": 2.0, "ebs_throughput": 0.0, "vpc_only": false, "max_ips": 12, "physical_processor": "", "fpga": 0, "intel_turbo": "", "enhanced_networking": false, "ebs_iops": 0.0, "vcpus": 1.0, "memory": 3.75, "ebs_max_bandwidth": 0.0, "gpus": 0, "ipv6_support": false}, "i3.2xlarge": {"ecu_per_vcpu": 3.375, "network_perf": 11.0, "intel_avx": "Yes", "name": "I3 High I/O Double Extra Large", "architecture": "64-bit", "linux_virtualization": "Unknown", "storage": 1900.0, "placement_group_support": true, "intel_avx2": "Yes", "clock_speed_ghz": "Yes", "apiname": "i3.2xlarge", "computeunits": 27.0, "ebs_throughput": 200.0, "vpc_only": true, "max_ips": 60, "physical_processor": "Intel Xeon E5-2686 v4", "fpga": 0, "intel_turbo": "Yes", "enhanced_networking": true, "ebs_iops": 12000.0, "vcpus": 8.0, "memory": 61.0, "ebs_max_bandwidth": 1700.0, "gpus": 0, "ipv6_support": true}, "t2.xlarge": {"ecu_per_vcpu": 0.0, "network_perf": 6.0, "intel_avx": "Yes", "name": "T2 Extra Large", "architecture": "64-bit", "linux_virtualization": "HVM", "storage": 0.0, "placement_group_support": false, "intel_avx2": "", "clock_speed_ghz": "Yes", "apiname": "t2.xlarge", "computeunits": 0.9, "ebs_throughput": 0.0, "vpc_only": true, "max_ips": 45, "physical_processor": "Intel Xeon family", "fpga": 0, "intel_turbo": "Yes", "enhanced_networking": false, "ebs_iops": 0.0, "vcpus": 4.0, "memory": 16.0, "ebs_max_bandwidth": 0.0, "gpus": 0, "ipv6_support": true}, "g2.2xlarge": {"ecu_per_vcpu": 3.25, "network_perf": 16.0, "intel_avx": "", "name": "G2 Double Extra Large", "architecture": "64-bit", "linux_virtualization": "Unknown", "storage": 60.0, "placement_group_support": true, "intel_avx2": "", "clock_speed_ghz": "", "apiname": "g2.2xlarge", "computeunits": 26.0, "ebs_throughput": 125.0, "vpc_only": false, "max_ips": 60, "physical_processor": "", "fpga": 0, "intel_turbo": "", "enhanced_networking": false, "ebs_iops": 8000.0, "vcpus": 8.0, "memory": 15.0, "ebs_max_bandwidth": 1000.0, "gpus": 0, "ipv6_support": false}, "c1.medium": {"ecu_per_vcpu": 2.5, "network_perf": 6.0, "intel_avx": "", "name": "C1 High-CPU Medium", "architecture": "32/64-bit", "linux_virtualization": "PV", "storage": 350.0, "placement_group_support": false, "intel_avx2": "", "clock_speed_ghz": "", "apiname": "c1.medium", "computeunits": 5.0, "ebs_throughput": 0.0, "vpc_only": false, "max_ips": 12, "physical_processor": "", "fpga": 0, "intel_turbo": "", "enhanced_networking": false, "ebs_iops": 0.0, "vcpus": 2.0, "memory": 1.7, "ebs_max_bandwidth": 0.0, "gpus": 0, "ipv6_support": false}, "t2.large": {"ecu_per_vcpu": 0.0, "network_perf": 4.0, "intel_avx": "Yes", "name": "T2 Large", "architecture": "64-bit", "linux_virtualization": "HVM", "storage": 0.0, "placement_group_support": false, "intel_avx2": "", "clock_speed_ghz": "Yes", "apiname": "t2.large", "computeunits": 0.6, "ebs_throughput": 0.0, "vpc_only": true, "max_ips": 36, "physical_processor": "Intel Xeon family", "fpga": 0, "intel_turbo": "Yes", "enhanced_networking": false, "ebs_iops": 0.0, "vcpus": 2.0, "memory": 8.0, "ebs_max_bandwidth": 0.0, "gpus": 0, "ipv6_support": true}, "d2.2xlarge": {"ecu_per_vcpu": 3.5, "network_perf": 9.0, "intel_avx": "Yes", "name": "D2 Double Extra Large", "architecture": "64-bit", "linux_virtualization": "HVM", "storage": 12000.0, "placement_group_support": true, "intel_avx2": "Yes", "clock_speed_ghz": "Yes", "apiname": "d2.2xlarge", "computeunits": 28.0, "ebs_throughput": 125.0, "vpc_only": false, "max_ips": 60, "physical_processor": "Intel Xeon E5-2676 v3", "fpga": 0, "intel_turbo": "Yes", "enhanced_networking": true, "ebs_iops": 8000.0, "vcpus": 8.0, "memory": 61.0, "ebs_max_bandwidth": 1000.0, "gpus": 0, "ipv6_support": true}, "c4.8xlarge": {"ecu_per_vcpu": 3.66666666667, "network_perf": 13.0, "intel_avx": "Yes", "name": "C4 High-CPU Eight Extra Large", "architecture": "64-bit", "linux_virtualization": "HVM", "storage": 0.0, "placement_group_support": true, "intel_avx2": "Yes", "clock_speed_ghz": "Yes", "apiname": "c4.8xlarge", "computeunits": 132.0, "ebs_throughput": 500.0, "vpc_only": true, "max_ips": 240, "physical_processor": "Intel Xeon E5-2666 v3", "fpga": 0, "intel_turbo": "Yes", "enhanced_networking": true, "ebs_iops": 32000.0, "vcpus": 36.0, "memory": 60.0, "ebs_max_bandwidth": 4000.0, "gpus": 0, "ipv6_support": true}, "c4.2xlarge": {"ecu_per_vcpu": 3.875, "network_perf": 9.0, "intel_avx": "Yes", "name": "C4 High-CPU Double Extra Large", "architecture": "64-bit", "linux_virtualization": "HVM", "storage": 0.0, "placement_group_support": true, "intel_avx2": "Yes", "clock_speed_ghz": "Yes", "apiname": "c4.2xlarge", "computeunits": 31.0, "ebs_throughput": 125.0, "vpc_only": true, "max_ips": 60, "physical_processor": "Intel Xeon E5-2666 v3", "fpga": 0, "intel_turbo": "Yes", "enhanced_networking": true, "ebs_iops": 8000.0, "vcpus": 8.0, "memory": 15.0, "ebs_max_bandwidth": 1000.0, "gpus": 0, "ipv6_support": true}, "x1e.32xlarge": {"ecu_per_vcpu": 2.65625, "network_perf": 17.0, "intel_avx": "Yes", "name": "X1E 32xlarge", "architecture": "64-bit", "linux_virtualization": "Unknown", "storage": 3840.0, "placement_group_support": false, "intel_avx2": "Yes", "clock_speed_ghz": "Yes", "apiname": "x1e.32xlarge", "computeunits": 340.0, "ebs_throughput": 1750.0, "vpc_only": true, "max_ips": 240, "physical_processor": "Intel Xeon E7-8880 v3", "fpga": 0, "intel_turbo": "Yes", "enhanced_networking": true, "ebs_iops": 80000.0, "vcpus": 128.0, "memory": 3904.0, "ebs_max_bandwidth": 14000.0, "gpus": 0, "ipv6_support": false}, "m4.10xlarge": {"ecu_per_vcpu": 3.1125, "network_perf": 13.0, "intel_avx": "Yes", "name": "M4 Deca Extra Large", "architecture": "64-bit", "linux_virtualization": "HVM", "storage": 0.0, "placement_group_support": true, "intel_avx2": "Yes", "clock_speed_ghz": "Yes", "apiname": "m4.10xlarge", "computeunits": 124.5, "ebs_throughput": 500.0, "vpc_only": true, "max_ips": 240, "physical_processor": "Intel Xeon E5-2676 v3", "fpga": 0, "intel_turbo": "Yes", "enhanced_networking": true, "ebs_iops": 32000.0, "vcpus": 40.0, "memory": 160.0, "ebs_max_bandwidth": 4000.0, "gpus": 0, "ipv6_support": true}, "t2.2xlarge": {"ecu_per_vcpu": 0.0, "network_perf": 6.0, "intel_avx": "Yes", "name": "T2 Double Extra Large", "architecture": "64-bit", "linux_virtualization": "HVM", "storage": 0.0, "placement_group_support": false, "intel_avx2": "", "clock_speed_ghz": "Yes", "apiname": "t2.2xlarge", "computeunits": 1.35, "ebs_throughput": 0.0, "vpc_only": true, "max_ips": 45, "physical_processor": "Intel Xeon family", "fpga": 0, "intel_turbo": "Yes", "enhanced_networking": false, "ebs_iops": 0.0, "vcpus": 8.0, "memory": 32.0, "ebs_max_bandwidth": 0.0, "gpus": 0, "ipv6_support": true}, "m4.4xlarge": {"ecu_per_vcpu": 3.34375, "network_perf": 9.0, "intel_avx": "Yes", "name": "M4 Quadruple Extra Large", "architecture": "64-bit", "linux_virtualization": "HVM", "storage": 0.0, "placement_group_support": true, "intel_avx2": "Yes", "clock_speed_ghz": "Yes", "apiname": "m4.4xlarge", "computeunits": 53.5, "ebs_throughput": 250.0, "vpc_only": true, "max_ips": 240, "physical_processor": "Intel Xeon E5-2676 v3", "fpga": 0, "intel_turbo": "Yes", "enhanced_networking": true, "ebs_iops": 16000.0, "vcpus": 16.0, "memory": 64.0, "ebs_max_bandwidth": 2000.0, "gpus": 0, "ipv6_support": true}, "t2.nano": {"ecu_per_vcpu": 0.0, "network_perf": 2.0, "intel_avx": "Yes", "name": "T2 Nano", "architecture": "32/64-bit", "linux_virtualization": "HVM", "storage": 0.0, "placement_group_support": false, "intel_avx2": "", "clock_speed_ghz": "Yes", "apiname": "t2.nano", "computeunits": 0.05, "ebs_throughput": 0.0, "vpc_only": true, "max_ips": 4, "physical_processor": "Intel Xeon family", "fpga": 0, "intel_turbo": "Yes", "enhanced_networking": false, "ebs_iops": 0.0, "vcpus": 1.0, "memory": 0.5, "ebs_max_bandwidth": 0.0, "gpus": 0, "ipv6_support": true}, "d2.8xlarge": {"ecu_per_vcpu": 3.22222222222, "network_perf": 13.0, "intel_avx": "Yes", "name": "D2 Eight Extra Large", "architecture": "64-bit", "linux_virtualization": "HVM", "storage": 48000.0, "placement_group_support": true, "intel_avx2": "Yes", "clock_speed_ghz": "Yes", "apiname": "d2.8xlarge", "computeunits": 116.0, "ebs_throughput": 500.0, "vpc_only": false, "max_ips": 240, "physical_processor": "Intel Xeon E5-2676 v3", "fpga": 0, "intel_turbo": "Yes", "enhanced_networking": true, "ebs_iops": 32000.0, "vcpus": 36.0, "memory": 244.0, "ebs_max_bandwidth": 4000.0, "gpus": 0, "ipv6_support": true}, "m3.large": {"ecu_per_vcpu": 3.25, "network_perf": 6.0, "intel_avx": "Yes", "name": "M3 General Purpose Large", "architecture": "64-bit", "linux_virtualization": "HVM, PV", "storage": 32.0, "placement_group_support": false, "intel_avx2": "", "clock_speed_ghz": "Yes", "apiname": "m3.large", "computeunits": 6.5, "ebs_throughput": 0.0, "vpc_only": false, "max_ips": 30, "physical_processor": "Intel Xeon E5-2670 v2", "fpga": 0, "intel_turbo": "Yes", "enhanced_networking": false, "ebs_iops": 0.0, "vcpus": 2.0, "memory": 7.5, "ebs_max_bandwidth": 0.0, "gpus": 0, "ipv6_support": false}, "m2.4xlarge": {"ecu_per_vcpu": 3.25, "network_perf": 9.0, "intel_avx": "", "name": "M2 High Memory Quadruple Extra Large", "architecture": "64-bit", "linux_virtualization": "PV", "storage": 1680.0, "placement_group_support": false, "intel_avx2": "", "clock_speed_ghz": "", "apiname": "m2.4xlarge", "computeunits": 26.0, "ebs_throughput": 125.0, "vpc_only": false, "max_ips": 240, "physical_processor": "", "fpga": 0, "intel_turbo": "", "enhanced_networking": false, "ebs_iops": 8000.0, "vcpus": 8.0, "memory": 68.4, "ebs_max_bandwidth": 1000.0, "gpus": 0, "ipv6_support": false}, "m1.small": {"ecu_per_vcpu": 1.0, "network_perf": 2.0, "intel_avx": "", "name": "M1 General Purpose Small", "architecture": "32/64-bit", "linux_virtualization": "PV", "storage": 160.0, "placement_group_support": false, "intel_avx2": "", "clock_speed_ghz": "", "apiname": "m1.small", "computeunits": 1.0, "ebs_throughput": 0.0, "vpc_only": false, "max_ips": 8, "physical_processor": "", "fpga": 0, "intel_turbo": "", "enhanced_networking": false, "ebs_iops": 0.0, "vcpus": 1.0, "memory": 1.7, "ebs_max_bandwidth": 0.0, "gpus": 0, "ipv6_support": false}, "c1.xlarge": {"ecu_per_vcpu": 2.5, "network_perf": 9.0, "intel_avx": "", "name": "C1 High-CPU Extra Large", "architecture": "64-bit", "linux_virtualization": "PV", "storage": 1680.0, "placement_group_support": false, "intel_avx2": "", "clock_speed_ghz": "", "apiname": "c1.xlarge", "computeunits": 20.0, "ebs_throughput": 125.0, "vpc_only": false, "max_ips": 60, "physical_processor": "", "fpga": 0, "intel_turbo": "", "enhanced_networking": false, "ebs_iops": 8000.0, "vcpus": 8.0, "memory": 7.0, "ebs_max_bandwidth": 1000.0, "gpus": 0, "ipv6_support": false}, "x1.32xlarge": {"ecu_per_vcpu": 2.7265625, "network_perf": 17.0, "intel_avx": "Yes", "name": "X1 Extra High-Memory 32xlarge", "architecture": "64-bit", "linux_virtualization": "HVM", "storage": 3840.0, "placement_group_support": true, "intel_avx2": "Yes", "clock_speed_ghz": "Yes", "apiname": "x1.32xlarge", "computeunits": 349.0, "ebs_throughput": 1750.0, "vpc_only": true, "max_ips": 240, "physical_processor": "Intel Xeon E7-8880 v3", "fpga": 0, "intel_turbo": "Yes", "enhanced_networking": true, "ebs_iops": 80000.0, "vcpus": 128.0, "memory": 1952.0, "ebs_max_bandwidth": 14000.0, "gpus": 0, "ipv6_support": true}, "r3.8xlarge": {"ecu_per_vcpu": 3.25, "network_perf": 12.0, "intel_avx": "Yes", "name": "R3 High-Memory Eight Extra Large", "architecture": "64-bit", "linux_virtualization": "HVM", "storage": 640.0, "placement_group_support": true, "intel_avx2": "", "clock_speed_ghz": "Yes", "apiname": "r3.8xlarge", "computeunits": 104.0, "ebs_throughput": 0.0, "vpc_only": false, "max_ips": 240, "physical_processor": "Intel Xeon E5-2670 v2", "fpga": 0, "intel_turbo": "Yes", "enhanced_networking": true, "ebs_iops": 0.0, "vcpus": 32.0, "memory": 244.0, "ebs_max_bandwidth": 0.0, "gpus": 0, "ipv6_support": true}, "m4.large": {"ecu_per_vcpu": 3.25, "network_perf": 7.0, "intel_avx": "Yes", "name": "M4 Large", "architecture": "64-bit", "linux_virtualization": "HVM", "storage": 0.0, "placement_group_support": true, "intel_avx2": "Yes", "clock_speed_ghz": "Yes", "apiname": "m4.large", "computeunits": 6.5, "ebs_throughput": 56.25, "vpc_only": true, "max_ips": 20, "physical_processor": "Intel Xeon E5-2676 v3", "fpga": 0, "intel_turbo": "Yes", "enhanced_networking": true, "ebs_iops": 3600.0, "vcpus": 2.0, "memory": 8.0, "ebs_max_bandwidth": 450.0, "gpus": 0, "ipv6_support": true}, "p2.16xlarge": {"ecu_per_vcpu": 2.9375, "network_perf": 17.0, "intel_avx": "Yes", "name": "General Purpose GPU 16xlarge", "architecture": "64-bit", "linux_virtualization": "HVM", "storage": 0.0, "placement_group_support": true, "intel_avx2": "Yes", "clock_speed_ghz": "Yes", "apiname": "p2.16xlarge", "computeunits": 188.0, "ebs_throughput": 1250.0, "vpc_only": true, "max_ips": 240, "physical_processor": "Intel Xeon E5-2686 v4", "fpga": 0, "intel_turbo": "Yes", "enhanced_networking": true, "ebs_iops": 65000.0, "vcpus": 64.0, "memory": 732.0, "ebs_max_bandwidth": 10000.0, "gpus": 16, "ipv6_support": true}, "hi1.4xlarge": {"ecu_per_vcpu": 2.1875, "network_perf": 12.0, "intel_avx": "", "name": "HI1. High I/O Quadruple Extra Large", "architecture": "64-bit", "linux_virtualization": "HVM, PV", "storage": 2048.0, "placement_group_support": false, "intel_avx2": "", "clock_speed_ghz": "", "apiname": "hi1.4xlarge", "computeunits": 35.0, "ebs_throughput": 0.0, "vpc_only": false, "max_ips": 240, "physical_processor": "", "fpga": 0, "intel_turbo": "", "enhanced_networking": false, "ebs_iops": 0.0, "vcpus": 16.0, "memory": 60.5, "ebs_max_bandwidth": 0.0, "gpus": 0, "ipv6_support": false}, "c4.4xlarge": {"ecu_per_vcpu": 3.875, "network_perf": 9.0, "intel_avx": "Yes", "name": "C4 High-CPU Quadruple Extra Large", "architecture": "64-bit", "linux_virtualization": "HVM", "storage": 0.0, "placement_group_support": true, "intel_avx2": "Yes", "clock_speed_ghz": "Yes", "apiname": "c4.4xlarge", "computeunits": 62.0, "ebs_throughput": 250.0, "vpc_only": true, "max_ips": 240, "physical_processor": "Intel Xeon E5-2666 v3", "fpga": 0, "intel_turbo": "Yes", "enhanced_networking": true, "ebs_iops": 16000.0, "vcpus": 16.0, "memory": 30.0, "ebs_max_bandwidth": 2000.0, "gpus": 0, "ipv6_support": true}, "c4.xlarge": {"ecu_per_vcpu": 4.0, "network_perf": 9.0, "intel_avx": "Yes", "name": "C4 High-CPU Extra Large", "architecture": "64-bit", "linux_virtualization": "HVM", "storage": 0.0, "placement_group_support": true, "intel_avx2": "Yes", "clock_speed_ghz": "Yes", "apiname": "c4.xlarge", "computeunits": 16.0, "ebs_throughput": 93.75, "vpc_only": true, "max_ips": 60, "physical_processor": "Intel Xeon E5-2666 v3", "fpga": 0, "intel_turbo": "Yes", "enhanced_networking": true, "ebs_iops": 6000.0, "vcpus": 4.0, "memory": 7.5, "ebs_max_bandwidth": 750.0, "gpus": 0, "ipv6_support": true}, "m3.xlarge": {"ecu_per_vcpu": 3.25, "network_perf": 9.0, "intel_avx": "Yes", "name": "M3 General Purpose Extra Large", "architecture": "64-bit", "linux_virtualization": "HVM, PV", "storage": 80.0, "placement_group_support": false, "intel_avx2": "", "clock_speed_ghz": "Yes", "apiname": "m3.xlarge", "computeunits": 13.0, "ebs_throughput": 62.5, "vpc_only": false, "max_ips": 60, "physical_processor": "Intel Xeon E5-2670 v2", "fpga": 0, "intel_turbo": "Yes", "enhanced_networking": false, "ebs_iops": 4000.0, "vcpus": 4.0, "memory": 15.0, "ebs_max_bandwidth": 500.0, "gpus": 0, "ipv6_support": false}} \ No newline at end of file diff --git a/requirements-dev.txt b/requirements-dev.txt index 6d84d7a86..13e4e2f20 100644 --- a/requirements-dev.txt +++ b/requirements-dev.txt @@ -13,3 +13,4 @@ prompt-toolkit==1.0.14 click==6.7 inflection==0.3.1 lxml==4.0.0 +beautifulsoup4==4.6.0 diff --git a/scripts/get_instance_info.py b/scripts/get_instance_info.py new file mode 100755 index 000000000..f883c0cae --- /dev/null +++ b/scripts/get_instance_info.py @@ -0,0 +1,150 @@ +#!/usr/bin/env python +import json +import os +import subprocess +import requests +from bs4 import BeautifulSoup + + +class Instance(object): + def __init__(self, instance): + self.instance = instance + + def _get_td(self, td): + return self.instance.find('td', attrs={'class': td}) + + def _get_sort(self, td): + return float(self.instance.find('td', attrs={'class': td}).find('span')['sort']) + + @property + def name(self): + return self._get_td('name').text.strip() + + @property + def apiname(self): + return self._get_td('apiname').text.strip() + + @property + def memory(self): + return self._get_sort('memory') + + @property + def computeunits(self): + return self._get_sort('computeunits') + + @property + def vcpus(self): + return self._get_sort('vcpus') + + @property + def gpus(self): + return int(self._get_td('gpus').text.strip()) + + @property + def fpga(self): + return int(self._get_td('fpga').text.strip()) + + @property + def ecu_per_vcpu(self): + return self._get_sort('ecu-per-vcpu') + + @property + def physical_processor(self): + return self._get_td('physical_processor').text.strip() + + @property + def clock_speed_ghz(self): + return self._get_td('clock_speed_ghz').text.strip() + + @property + def intel_avx(self): + return self._get_td('intel_avx').text.strip() + + @property + def intel_avx2(self): + return self._get_td('intel_avx2').text.strip() + + @property + def intel_turbo(self): + return self._get_td('intel_turbo').text.strip() + + @property + def storage(self): + return self._get_sort('storage') + + @property + def architecture(self): + return self._get_td('architecture').text.strip() + + @property + def network_perf(self): # 2 == low + return self._get_sort('networkperf') + + @property + def ebs_max_bandwidth(self): + return self._get_sort('ebs-max-bandwidth') + + @property + def ebs_throughput(self): + return self._get_sort('ebs-throughput') + + @property + def ebs_iops(self): + return self._get_sort('ebs-iops') + + @property + def max_ips(self): + return int(self._get_td('maxips').text.strip()) + + @property + def enhanced_networking(self): + return self._get_td('enhanced-networking').text.strip() != 'No' + + @property + def vpc_only(self): + return self._get_td('vpc-only').text.strip() != 'No' + + @property + def ipv6_support(self): + return self._get_td('ipv6-support').text.strip() != 'No' + + @property + def placement_group_support(self): + return self._get_td('placement-group-support').text.strip() != 'No' + + @property + def linux_virtualization(self): + return self._get_td('linux-virtualization').text.strip() + + def to_dict(self): + result = {} + + for attr in [x for x in self.__class__.__dict__.keys() if not x.startswith('_') and x != 'to_dict']: + result[attr] = getattr(self, attr) + + return self.apiname, result + + +def main(): + print("Getting HTML from http://www.ec2instances.info") + page_request = requests.get('http://www.ec2instances.info') + soup = BeautifulSoup(page_request.text, 'html.parser') + data_table = soup.find(id='data') + + print("Finding data in table") + instances = data_table.find('tbody').find_all('tr') + + print("Parsing data") + result = {} + for instance in instances: + instance_id, instance_data = Instance(instance).to_dict() + result[instance_id] = instance_data + + root_dir = subprocess.check_output(['git', 'rev-parse', '--show-toplevel']).decode().strip() + dest = os.path.join(root_dir, 'moto/ec2/resources/instance_types.json') + print("Writing data to {0}".format(dest)) + with open(dest, 'w') as open_file: + json.dump(result, open_file) + +if __name__ == '__main__': + main() From c888d65d6dfb27e3e44e86a7b94d834bb193dd9e Mon Sep 17 00:00:00 2001 From: Terry Cain Date: Wed, 27 Sep 2017 17:29:58 +0100 Subject: [PATCH 308/412] Added constant containing instance types --- moto/ec2/models.py | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/moto/ec2/models.py b/moto/ec2/models.py index e7e8a1dd8..07e218106 100755 --- a/moto/ec2/models.py +++ b/moto/ec2/models.py @@ -2,6 +2,8 @@ from __future__ import unicode_literals import copy import itertools +import json +import os import re import six @@ -109,6 +111,9 @@ from .utils import ( is_tag_filter, ) +RESOURCES_DIR = os.path.join(os.path.dirname(__file__), 'resources') +INSTANCE_TYPES = json.load(open(os.path.join(RESOURCES_DIR, 'instance_types.json'), 'r')) + def utc_date_and_time(): return datetime.utcnow().strftime('%Y-%m-%dT%H:%M:%S.000Z') From dd32e02dfa690c62ad4c5dd678802e1dd949c0eb Mon Sep 17 00:00:00 2001 From: Jack Danger Date: Wed, 27 Sep 2017 11:54:35 -0700 Subject: [PATCH 309/412] bumping to version 1.1.15 --- setup.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/setup.py b/setup.py index 329724489..4464d01f2 100755 --- a/setup.py +++ b/setup.py @@ -25,7 +25,7 @@ extras_require = { setup( name='moto', - version='1.1.14', + version='1.1.15', description='A library that allows your python tests to easily' ' mock out the boto library', author='Steve Pulec', From ae110dc808ba4167a050fbbab77f74ac2605d1da Mon Sep 17 00:00:00 2001 From: Jack Danger Date: Wed, 27 Sep 2017 11:59:42 -0700 Subject: [PATCH 310/412] changelog-1.1.14 --- CHANGELOG.md | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 7d64aa9c7..1dafbc2ae 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -3,6 +3,13 @@ Moto Changelog Latest ------ +1.1.14 +----- + + * ACM implementation + * Added `make scaffold` + * X-Ray implementation + 1.1.13 ----- From 4b5d7808a463d205c97687e17f05fb48a2bca9cb Mon Sep 17 00:00:00 2001 From: Jack Danger Date: Wed, 27 Sep 2017 12:00:46 -0700 Subject: [PATCH 311/412] changelog-1.1.15 --- CHANGELOG.md | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 1dafbc2ae..a20e186ec 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -3,6 +3,13 @@ Moto Changelog Latest ------ +1.1.15 +----- + + * Polly implementation + * Added EC2 instance info + * SNS publish by phone number + 1.1.14 ----- From f2b2d9ea65cf7d56e81577179aaa80f6c4d2066f Mon Sep 17 00:00:00 2001 From: Jack Danger Date: Wed, 27 Sep 2017 12:40:08 -0700 Subject: [PATCH 312/412] push dockerhub image on release --- Makefile | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/Makefile b/Makefile index f02fa9c87..3a98ed7f1 100644 --- a/Makefile +++ b/Makefile @@ -21,8 +21,9 @@ aws_managed_policies: upload_pypi_artifact: python setup.py sdist bdist_wheel upload -build_dockerhub_image: +push_dockerhub_image: docker build -t motoserver/moto . + docker push motoserver/moto tag_github_release: git tag `python setup.py --version` From 12d3cc7558a582e78e8fc0c31101cbc0f5ba579a Mon Sep 17 00:00:00 2001 From: Jack Danger Date: Wed, 27 Sep 2017 12:53:36 -0700 Subject: [PATCH 313/412] bumping to version 1.1.16 --- setup.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/setup.py b/setup.py index 4464d01f2..f5344a0c0 100755 --- a/setup.py +++ b/setup.py @@ -25,7 +25,7 @@ extras_require = { setup( name='moto', - version='1.1.15', + version='1.1.16', description='A library that allows your python tests to easily' ' mock out the boto library', author='Steve Pulec', From c09a693324b366c657632c5c679df0b86554de6a Mon Sep 17 00:00:00 2001 From: Jack Danger Date: Wed, 27 Sep 2017 12:54:07 -0700 Subject: [PATCH 314/412] changelog 1.1.16 --- CHANGELOG.md | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index a20e186ec..40edb4204 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -3,6 +3,11 @@ Moto Changelog Latest ------ +1.1.16 +----- + + * Fixing regression from 1.1.15 + 1.1.15 ----- From ed20e6e12c3d5ca664994e2e5641dca51cb4e78d Mon Sep 17 00:00:00 2001 From: Kevin Frommelt Date: Wed, 27 Sep 2017 15:50:39 -0500 Subject: [PATCH 315/412] Include moto/ec2/resources/instance_types.json in package --- MANIFEST.in | 1 + 1 file changed, 1 insertion(+) diff --git a/MANIFEST.in b/MANIFEST.in index c21ea9947..7e219f463 100644 --- a/MANIFEST.in +++ b/MANIFEST.in @@ -1,3 +1,4 @@ include README.md LICENSE AUTHORS.md include requirements.txt requirements-dev.txt tox.ini +include moto/ec2/resources/instance_types.json recursive-include tests * From 4c1d0c3f5f573e74f2cfaa886267054097a73cc2 Mon Sep 17 00:00:00 2001 From: Jack Danger Date: Wed, 27 Sep 2017 13:50:48 -0700 Subject: [PATCH 316/412] Store JSON in Python for instance types This allows it to be bundled in the distributed app more simply --- moto/ec2/models.py | 3 +- moto/ec2/resources/__init__.py | 0 moto/ec2/resources/instance_types.json | 1 - moto/ec2/resources/instance_types.py | 2164 ++++++++++++++++++++++++ scripts/get_instance_info.py | 14 +- 5 files changed, 2178 insertions(+), 4 deletions(-) create mode 100644 moto/ec2/resources/__init__.py delete mode 100644 moto/ec2/resources/instance_types.json create mode 100644 moto/ec2/resources/instance_types.py diff --git a/moto/ec2/models.py b/moto/ec2/models.py index 07e218106..e687ea1a0 100755 --- a/moto/ec2/models.py +++ b/moto/ec2/models.py @@ -67,6 +67,7 @@ from .exceptions import ( MotoNotImplementedError, FilterNotImplementedError ) +from .resources.instance_types import instance_types_data from .utils import ( EC2_RESOURCE_TO_PREFIX, EC2_PREFIX_TO_RESOURCE, @@ -112,7 +113,7 @@ from .utils import ( ) RESOURCES_DIR = os.path.join(os.path.dirname(__file__), 'resources') -INSTANCE_TYPES = json.load(open(os.path.join(RESOURCES_DIR, 'instance_types.json'), 'r')) +INSTANCE_TYPES = json.loads(instance_types_data) def utc_date_and_time(): diff --git a/moto/ec2/resources/__init__.py b/moto/ec2/resources/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/moto/ec2/resources/instance_types.json b/moto/ec2/resources/instance_types.json deleted file mode 100644 index 2fa2e4e93..000000000 --- a/moto/ec2/resources/instance_types.json +++ /dev/null @@ -1 +0,0 @@ -{"m1.xlarge": {"ecu_per_vcpu": 2.0, "network_perf": 9.0, "intel_avx": "", "name": "M1 General Purpose Extra Large", "architecture": "64-bit", "linux_virtualization": "PV", "storage": 1680.0, "placement_group_support": false, "intel_avx2": "", "clock_speed_ghz": "", "apiname": "m1.xlarge", "computeunits": 8.0, "ebs_throughput": 125.0, "vpc_only": false, "max_ips": 60, "physical_processor": "", "fpga": 0, "intel_turbo": "", "enhanced_networking": false, "ebs_iops": 8000.0, "vcpus": 4.0, "memory": 15.0, "ebs_max_bandwidth": 1000.0, "gpus": 0, "ipv6_support": false}, "i3.4xlarge": {"ecu_per_vcpu": 3.3125, "network_perf": 11.0, "intel_avx": "Yes", "name": "I3 High I/O Quadruple Extra Large", "architecture": "64-bit", "linux_virtualization": "Unknown", "storage": 3800.0, "placement_group_support": true, "intel_avx2": "Yes", "clock_speed_ghz": "Yes", "apiname": "i3.4xlarge", "computeunits": 53.0, "ebs_throughput": 400.0, "vpc_only": true, "max_ips": 240, "physical_processor": "Intel Xeon E5-2686 v4", "fpga": 0, "intel_turbo": "Yes", "enhanced_networking": true, "ebs_iops": 16000.0, "vcpus": 16.0, "memory": 122.0, "ebs_max_bandwidth": 3500.0, "gpus": 0, "ipv6_support": true}, "i2.xlarge": {"ecu_per_vcpu": 3.5, "network_perf": 7.0, "intel_avx": "", "name": "I2 Extra Large", "architecture": "64-bit", "linux_virtualization": "HVM", "storage": 800.0, "placement_group_support": true, "intel_avx2": "", "clock_speed_ghz": "", "apiname": "i2.xlarge", "computeunits": 14.0, "ebs_throughput": 62.5, "vpc_only": false, "max_ips": 60, "physical_processor": "", "fpga": 0, "intel_turbo": "", "enhanced_networking": false, "ebs_iops": 4000.0, "vcpus": 4.0, "memory": 30.5, "ebs_max_bandwidth": 500.0, "gpus": 0, "ipv6_support": true}, "hs1.8xlarge": {"ecu_per_vcpu": 2.1875, "network_perf": 12.0, "intel_avx": "", "name": "High Storage Eight Extra Large", "architecture": "64-bit", "linux_virtualization": "HVM, PV", "storage": 48000.0, "placement_group_support": false, "intel_avx2": "", "clock_speed_ghz": "", "apiname": "hs1.8xlarge", "computeunits": 35.0, "ebs_throughput": 0.0, "vpc_only": false, "max_ips": 240, "physical_processor": "", "fpga": 0, "intel_turbo": "", "enhanced_networking": false, "ebs_iops": 0.0, "vcpus": 16.0, "memory": 117.0, "ebs_max_bandwidth": 0.0, "gpus": 0, "ipv6_support": false}, "t2.micro": {"ecu_per_vcpu": 0.0, "network_perf": 4.0, "intel_avx": "Yes", "name": "T2 Micro", "architecture": "32/64-bit", "linux_virtualization": "HVM", "storage": 0.0, "placement_group_support": false, "intel_avx2": "", "clock_speed_ghz": "Yes", "apiname": "t2.micro", "computeunits": 0.1, "ebs_throughput": 0.0, "vpc_only": true, "max_ips": 4, "physical_processor": "Intel Xeon family", "fpga": 0, "intel_turbo": "Yes", "enhanced_networking": false, "ebs_iops": 0.0, "vcpus": 1.0, "memory": 1.0, "ebs_max_bandwidth": 0.0, "gpus": 0, "ipv6_support": true}, "d2.4xlarge": {"ecu_per_vcpu": 3.5, "network_perf": 9.0, "intel_avx": "Yes", "name": "D2 Quadruple Extra Large", "architecture": "64-bit", "linux_virtualization": "HVM", "storage": 24000.0, "placement_group_support": true, "intel_avx2": "Yes", "clock_speed_ghz": "Yes", "apiname": "d2.4xlarge", "computeunits": 56.0, "ebs_throughput": 250.0, "vpc_only": false, "max_ips": 240, "physical_processor": "Intel Xeon E5-2676 v3", "fpga": 0, "intel_turbo": "Yes", "enhanced_networking": true, "ebs_iops": 16000.0, "vcpus": 16.0, "memory": 122.0, "ebs_max_bandwidth": 2000.0, "gpus": 0, "ipv6_support": true}, "m2.xlarge": {"ecu_per_vcpu": 3.25, "network_perf": 6.0, "intel_avx": "", "name": "M2 High Memory Extra Large", "architecture": "64-bit", "linux_virtualization": "PV", "storage": 420.0, "placement_group_support": false, "intel_avx2": "", "clock_speed_ghz": "", "apiname": "m2.xlarge", "computeunits": 6.5, "ebs_throughput": 0.0, "vpc_only": false, "max_ips": 60, "physical_processor": "", "fpga": 0, "intel_turbo": "", "enhanced_networking": false, "ebs_iops": 0.0, "vcpus": 2.0, "memory": 17.1, "ebs_max_bandwidth": 0.0, "gpus": 0, "ipv6_support": false}, "p2.xlarge": {"ecu_per_vcpu": 3.0, "network_perf": 9.0, "intel_avx": "Yes", "name": "General Purpose GPU Extra Large", "architecture": "64-bit", "linux_virtualization": "HVM", "storage": 0.0, "placement_group_support": true, "intel_avx2": "Yes", "clock_speed_ghz": "Yes", "apiname": "p2.xlarge", "computeunits": 12.0, "ebs_throughput": 93.75, "vpc_only": true, "max_ips": 60, "physical_processor": "Intel Xeon E5-2686 v4", "fpga": 0, "intel_turbo": "Yes", "enhanced_networking": true, "ebs_iops": 6000.0, "vcpus": 4.0, "memory": 61.0, "ebs_max_bandwidth": 750.0, "gpus": 1, "ipv6_support": true}, "i2.4xlarge": {"ecu_per_vcpu": 3.3125, "network_perf": 9.0, "intel_avx": "", "name": "I2 Quadruple Extra Large", "architecture": "64-bit", "linux_virtualization": "HVM", "storage": 3200.0, "placement_group_support": true, "intel_avx2": "", "clock_speed_ghz": "", "apiname": "i2.4xlarge", "computeunits": 53.0, "ebs_throughput": 250.0, "vpc_only": false, "max_ips": 240, "physical_processor": "", "fpga": 0, "intel_turbo": "", "enhanced_networking": false, "ebs_iops": 16000.0, "vcpus": 16.0, "memory": 122.0, "ebs_max_bandwidth": 2000.0, "gpus": 0, "ipv6_support": true}, "t1.micro": {"ecu_per_vcpu": 0.0, "network_perf": 0.0, "intel_avx": "", "name": "T1 Micro", "architecture": "32/64-bit", "linux_virtualization": "PV", "storage": 0.0, "placement_group_support": false, "intel_avx2": "", "clock_speed_ghz": "", "apiname": "t1.micro", "computeunits": 0.0, "ebs_throughput": 0.0, "vpc_only": false, "max_ips": 4, "physical_processor": "", "fpga": 0, "intel_turbo": "", "enhanced_networking": false, "ebs_iops": 0.0, "vcpus": 1.0, "memory": 0.613, "ebs_max_bandwidth": 0.0, "gpus": 0, "ipv6_support": false}, "d2.xlarge": {"ecu_per_vcpu": 3.5, "network_perf": 7.0, "intel_avx": "Yes", "name": "D2 Extra Large", "architecture": "64-bit", "linux_virtualization": "HVM", "storage": 6000.0, "placement_group_support": true, "intel_avx2": "Yes", "clock_speed_ghz": "Yes", "apiname": "d2.xlarge", "computeunits": 14.0, "ebs_throughput": 93.75, "vpc_only": false, "max_ips": 60, "physical_processor": "Intel Xeon E5-2676 v3", "fpga": 0, "intel_turbo": "Yes", "enhanced_networking": true, "ebs_iops": 6000.0, "vcpus": 4.0, "memory": 30.5, "ebs_max_bandwidth": 750.0, "gpus": 0, "ipv6_support": true}, "r3.2xlarge": {"ecu_per_vcpu": 3.25, "network_perf": 9.0, "intel_avx": "Yes", "name": "R3 High-Memory Double Extra Large", "architecture": "64-bit", "linux_virtualization": "HVM", "storage": 160.0, "placement_group_support": true, "intel_avx2": "", "clock_speed_ghz": "Yes", "apiname": "r3.2xlarge", "computeunits": 26.0, "ebs_throughput": 125.0, "vpc_only": false, "max_ips": 60, "physical_processor": "Intel Xeon E5-2670 v2", "fpga": 0, "intel_turbo": "Yes", "enhanced_networking": true, "ebs_iops": 8000.0, "vcpus": 8.0, "memory": 61.0, "ebs_max_bandwidth": 1000.0, "gpus": 0, "ipv6_support": true}, "i3.8xlarge": {"ecu_per_vcpu": 3.09375, "network_perf": 13.0, "intel_avx": "Yes", "name": "I3 High I/O Eight Extra Large", "architecture": "64-bit", "linux_virtualization": "Unknown", "storage": 7600.0, "placement_group_support": true, "intel_avx2": "Yes", "clock_speed_ghz": "Yes", "apiname": "i3.8xlarge", "computeunits": 99.0, "ebs_throughput": 850.0, "vpc_only": true, "max_ips": 240, "physical_processor": "Intel Xeon E5-2686 v4", "fpga": 0, "intel_turbo": "Yes", "enhanced_networking": true, "ebs_iops": 32500.0, "vcpus": 32.0, "memory": 244.0, "ebs_max_bandwidth": 7000.0, "gpus": 0, "ipv6_support": true}, "c3.2xlarge": {"ecu_per_vcpu": 3.5, "network_perf": 9.0, "intel_avx": "Yes", "name": "C3 High-CPU Double Extra Large", "architecture": "64-bit", "linux_virtualization": "HVM, PV", "storage": 160.0, "placement_group_support": true, "intel_avx2": "", "clock_speed_ghz": "Yes", "apiname": "c3.2xlarge", "computeunits": 28.0, "ebs_throughput": 125.0, "vpc_only": false, "max_ips": 60, "physical_processor": "Intel Xeon E5-2680 v2", "fpga": 0, "intel_turbo": "Yes", "enhanced_networking": true, "ebs_iops": 8000.0, "vcpus": 8.0, "memory": 15.0, "ebs_max_bandwidth": 1000.0, "gpus": 0, "ipv6_support": true}, "g2.8xlarge": {"ecu_per_vcpu": 3.25, "network_perf": 16.0, "intel_avx": "", "name": "G2 Eight Extra Large", "architecture": "64-bit", "linux_virtualization": "Unknown", "storage": 240.0, "placement_group_support": true, "intel_avx2": "", "clock_speed_ghz": "", "apiname": "g2.8xlarge", "computeunits": 104.0, "ebs_throughput": 0.0, "vpc_only": false, "max_ips": 240, "physical_processor": "", "fpga": 0, "intel_turbo": "", "enhanced_networking": false, "ebs_iops": 0.0, "vcpus": 32.0, "memory": 60.0, "ebs_max_bandwidth": 0.0, "gpus": 0, "ipv6_support": false}, "t2.medium": {"ecu_per_vcpu": 0.0, "network_perf": 4.0, "intel_avx": "Yes", "name": "T2 Medium", "architecture": "32/64-bit", "linux_virtualization": "HVM", "storage": 0.0, "placement_group_support": false, "intel_avx2": "", "clock_speed_ghz": "Yes", "apiname": "t2.medium", "computeunits": 0.4, "ebs_throughput": 0.0, "vpc_only": true, "max_ips": 18, "physical_processor": "Intel Xeon family", "fpga": 0, "intel_turbo": "Yes", "enhanced_networking": false, "ebs_iops": 0.0, "vcpus": 2.0, "memory": 4.0, "ebs_max_bandwidth": 0.0, "gpus": 0, "ipv6_support": true}, "m4.xlarge": {"ecu_per_vcpu": 3.25, "network_perf": 9.0, "intel_avx": "Yes", "name": "M4 Extra Large", "architecture": "64-bit", "linux_virtualization": "HVM", "storage": 0.0, "placement_group_support": true, "intel_avx2": "Yes", "clock_speed_ghz": "Yes", "apiname": "m4.xlarge", "computeunits": 13.0, "ebs_throughput": 93.75, "vpc_only": true, "max_ips": 60, "physical_processor": "Intel Xeon E5-2676 v3", "fpga": 0, "intel_turbo": "Yes", "enhanced_networking": true, "ebs_iops": 6000.0, "vcpus": 4.0, "memory": 16.0, "ebs_max_bandwidth": 750.0, "gpus": 0, "ipv6_support": true}, "x1.16xlarge": {"ecu_per_vcpu": 2.7265625, "network_perf": 13.0, "intel_avx": "Yes", "name": "X1 Extra High-Memory 16xlarge", "architecture": "64-bit", "linux_virtualization": "HVM", "storage": 1920.0, "placement_group_support": true, "intel_avx2": "Yes", "clock_speed_ghz": "Yes", "apiname": "x1.16xlarge", "computeunits": 174.5, "ebs_throughput": 875.0, "vpc_only": true, "max_ips": 240, "physical_processor": "Intel Xeon E7-8880 v3", "fpga": 0, "intel_turbo": "Yes", "enhanced_networking": true, "ebs_iops": 40000.0, "vcpus": 64.0, "memory": 976.0, "ebs_max_bandwidth": 7000.0, "gpus": 0, "ipv6_support": true}, "p2.8xlarge": {"ecu_per_vcpu": 2.9375, "network_perf": 13.0, "intel_avx": "Yes", "name": "General Purpose GPU Eight Extra Large", "architecture": "64-bit", "linux_virtualization": "HVM", "storage": 0.0, "placement_group_support": true, "intel_avx2": "Yes", "clock_speed_ghz": "Yes", "apiname": "p2.8xlarge", "computeunits": 94.0, "ebs_throughput": 625.0, "vpc_only": true, "max_ips": 240, "physical_processor": "Intel Xeon E5-2686 v4", "fpga": 0, "intel_turbo": "Yes", "enhanced_networking": true, "ebs_iops": 32500.0, "vcpus": 32.0, "memory": 488.0, "ebs_max_bandwidth": 5000.0, "gpus": 8, "ipv6_support": true}, "f1.16xlarge": {"ecu_per_vcpu": 2.9375, "network_perf": 17.0, "intel_avx": "Yes", "name": "F1 16xlarge", "architecture": "64-bit", "linux_virtualization": "HVM", "storage": 3760.0, "placement_group_support": true, "intel_avx2": "Yes", "clock_speed_ghz": "Yes", "apiname": "f1.16xlarge", "computeunits": 188.0, "ebs_throughput": 1750.0, "vpc_only": true, "max_ips": 400, "physical_processor": "Intel Xeon E5-2686 v4", "fpga": 8, "intel_turbo": "Yes", "enhanced_networking": true, "ebs_iops": 75000.0, "vcpus": 64.0, "memory": 976.0, "ebs_max_bandwidth": 14000.0, "gpus": 0, "ipv6_support": true}, "r4.8xlarge": {"ecu_per_vcpu": 3.09375, "network_perf": 13.0, "intel_avx": "Yes", "name": "R4 High-Memory Eight Extra Large", "architecture": "64-bit", "linux_virtualization": "HVM", "storage": 0.0, "placement_group_support": true, "intel_avx2": "Yes", "clock_speed_ghz": "Yes", "apiname": "r4.8xlarge", "computeunits": 99.0, "ebs_throughput": 875.0, "vpc_only": true, "max_ips": 240, "physical_processor": "Intel Xeon E5-2686 v4", "fpga": 0, "intel_turbo": "Yes", "enhanced_networking": true, "ebs_iops": 37500.0, "vcpus": 32.0, "memory": 244.0, "ebs_max_bandwidth": 7000.0, "gpus": 0, "ipv6_support": true}, "g3.4xlarge": {"ecu_per_vcpu": 2.9375, "network_perf": 11.0, "intel_avx": "Yes", "name": "G3 Quadruple Extra Large", "architecture": "64-bit", "linux_virtualization": "HVM", "storage": 0.0, "placement_group_support": true, "intel_avx2": "Yes", "clock_speed_ghz": "Yes", "apiname": "g3.4xlarge", "computeunits": 47.0, "ebs_throughput": 437.0, "vpc_only": true, "max_ips": 240, "physical_processor": "Intel Xeon E5-2686 v4", "fpga": 0, "intel_turbo": "Yes", "enhanced_networking": true, "ebs_iops": 20000.0, "vcpus": 16.0, "memory": 122.0, "ebs_max_bandwidth": 3500.0, "gpus": 1, "ipv6_support": true}, "cg1.4xlarge": {"ecu_per_vcpu": 2.09375, "network_perf": 12.0, "intel_avx": "", "name": "Cluster GPU Quadruple Extra Large", "architecture": "64-bit", "linux_virtualization": "HVM", "storage": 1680.0, "placement_group_support": false, "intel_avx2": "", "clock_speed_ghz": "", "apiname": "cg1.4xlarge", "computeunits": 33.5, "ebs_throughput": 0.0, "vpc_only": false, "max_ips": 240, "physical_processor": "", "fpga": 0, "intel_turbo": "", "enhanced_networking": false, "ebs_iops": 0.0, "vcpus": 16.0, "memory": 22.5, "ebs_max_bandwidth": 0.0, "gpus": 0, "ipv6_support": false}, "c4.large": {"ecu_per_vcpu": 4.0, "network_perf": 7.0, "intel_avx": "Yes", "name": "C4 High-CPU Large", "architecture": "64-bit", "linux_virtualization": "HVM", "storage": 0.0, "placement_group_support": true, "intel_avx2": "Yes", "clock_speed_ghz": "Yes", "apiname": "c4.large", "computeunits": 8.0, "ebs_throughput": 62.5, "vpc_only": true, "max_ips": 30, "physical_processor": "Intel Xeon E5-2666 v3", "fpga": 0, "intel_turbo": "Yes", "enhanced_networking": true, "ebs_iops": 4000.0, "vcpus": 2.0, "memory": 3.75, "ebs_max_bandwidth": 500.0, "gpus": 0, "ipv6_support": true}, "m4.16xlarge": {"ecu_per_vcpu": 2.9375, "network_perf": 17.0, "intel_avx": "Yes", "name": "M4 16xlarge", "architecture": "64-bit", "linux_virtualization": "HVM", "storage": 0.0, "placement_group_support": true, "intel_avx2": "Yes", "clock_speed_ghz": "Yes", "apiname": "m4.16xlarge", "computeunits": 188.0, "ebs_throughput": 1250.0, "vpc_only": true, "max_ips": 240, "physical_processor": "Intel Xeon E5-2686 v4", "fpga": 0, "intel_turbo": "Yes", "enhanced_networking": true, "ebs_iops": 65000.0, "vcpus": 64.0, "memory": 256.0, "ebs_max_bandwidth": 10000.0, "gpus": 0, "ipv6_support": true}, "r4.4xlarge": {"ecu_per_vcpu": 3.3125, "network_perf": 11.0, "intel_avx": "Yes", "name": "R4 High-Memory Quadruple Extra Large", "architecture": "64-bit", "linux_virtualization": "HVM", "storage": 0.0, "placement_group_support": true, "intel_avx2": "Yes", "clock_speed_ghz": "Yes", "apiname": "r4.4xlarge", "computeunits": 53.0, "ebs_throughput": 437.0, "vpc_only": true, "max_ips": 240, "physical_processor": "Intel Xeon E5-2686 v4", "fpga": 0, "intel_turbo": "Yes", "enhanced_networking": true, "ebs_iops": 18750.0, "vcpus": 16.0, "memory": 122.0, "ebs_max_bandwidth": 3500.0, "gpus": 0, "ipv6_support": true}, "r4.2xlarge": {"ecu_per_vcpu": 3.375, "network_perf": 11.0, "intel_avx": "Yes", "name": "R4 High-Memory Double Extra Large", "architecture": "64-bit", "linux_virtualization": "HVM", "storage": 0.0, "placement_group_support": true, "intel_avx2": "Yes", "clock_speed_ghz": "Yes", "apiname": "r4.2xlarge", "computeunits": 27.0, "ebs_throughput": 218.0, "vpc_only": true, "max_ips": 60, "physical_processor": "Intel Xeon E5-2686 v4", "fpga": 0, "intel_turbo": "Yes", "enhanced_networking": true, "ebs_iops": 12000.0, "vcpus": 8.0, "memory": 61.0, "ebs_max_bandwidth": 1750.0, "gpus": 0, "ipv6_support": true}, "c3.xlarge": {"ecu_per_vcpu": 3.5, "network_perf": 7.0, "intel_avx": "Yes", "name": "C3 High-CPU Extra Large", "architecture": "64-bit", "linux_virtualization": "HVM, PV", "storage": 80.0, "placement_group_support": true, "intel_avx2": "", "clock_speed_ghz": "Yes", "apiname": "c3.xlarge", "computeunits": 14.0, "ebs_throughput": 62.5, "vpc_only": false, "max_ips": 60, "physical_processor": "Intel Xeon E5-2680 v2", "fpga": 0, "intel_turbo": "Yes", "enhanced_networking": true, "ebs_iops": 4000.0, "vcpus": 4.0, "memory": 7.5, "ebs_max_bandwidth": 500.0, "gpus": 0, "ipv6_support": true}, "i3.large": {"ecu_per_vcpu": 3.5, "network_perf": 11.0, "intel_avx": "Yes", "name": "I3 High I/O Large", "architecture": "64-bit", "linux_virtualization": "Unknown", "storage": 475.0, "placement_group_support": true, "intel_avx2": "Yes", "clock_speed_ghz": "Yes", "apiname": "i3.large", "computeunits": 7.0, "ebs_throughput": 50.0, "vpc_only": true, "max_ips": 30, "physical_processor": "Intel Xeon E5-2686 v4", "fpga": 0, "intel_turbo": "Yes", "enhanced_networking": true, "ebs_iops": 3000.0, "vcpus": 2.0, "memory": 15.25, "ebs_max_bandwidth": 425.0, "gpus": 0, "ipv6_support": true}, "r4.xlarge": {"ecu_per_vcpu": 3.375, "network_perf": 11.0, "intel_avx": "Yes", "name": "R4 High-Memory Extra Large", "architecture": "64-bit", "linux_virtualization": "HVM", "storage": 0.0, "placement_group_support": true, "intel_avx2": "Yes", "clock_speed_ghz": "Yes", "apiname": "r4.xlarge", "computeunits": 13.5, "ebs_throughput": 109.0, "vpc_only": true, "max_ips": 60, "physical_processor": "Intel Xeon E5-2686 v4", "fpga": 0, "intel_turbo": "Yes", "enhanced_networking": true, "ebs_iops": 6000.0, "vcpus": 4.0, "memory": 30.5, "ebs_max_bandwidth": 875.0, "gpus": 0, "ipv6_support": true}, "m2.2xlarge": {"ecu_per_vcpu": 3.25, "network_perf": 7.0, "intel_avx": "", "name": "M2 High Memory Double Extra Large", "architecture": "64-bit", "linux_virtualization": "PV", "storage": 850.0, "placement_group_support": false, "intel_avx2": "", "clock_speed_ghz": "", "apiname": "m2.2xlarge", "computeunits": 13.0, "ebs_throughput": 62.5, "vpc_only": false, "max_ips": 120, "physical_processor": "", "fpga": 0, "intel_turbo": "", "enhanced_networking": false, "ebs_iops": 4000.0, "vcpus": 4.0, "memory": 34.2, "ebs_max_bandwidth": 500.0, "gpus": 0, "ipv6_support": false}, "m3.medium": {"ecu_per_vcpu": 3.0, "network_perf": 6.0, "intel_avx": "Yes", "name": "M3 General Purpose Medium", "architecture": "64-bit", "linux_virtualization": "HVM, PV", "storage": 4.0, "placement_group_support": false, "intel_avx2": "", "clock_speed_ghz": "Yes", "apiname": "m3.medium", "computeunits": 3.0, "ebs_throughput": 0.0, "vpc_only": false, "max_ips": 12, "physical_processor": "Intel Xeon E5-2670 v2", "fpga": 0, "intel_turbo": "Yes", "enhanced_networking": false, "ebs_iops": 0.0, "vcpus": 1.0, "memory": 3.75, "ebs_max_bandwidth": 0.0, "gpus": 0, "ipv6_support": false}, "r3.4xlarge": {"ecu_per_vcpu": 3.25, "network_perf": 9.0, "intel_avx": "Yes", "name": "R3 High-Memory Quadruple Extra Large", "architecture": "64-bit", "linux_virtualization": "HVM", "storage": 320.0, "placement_group_support": true, "intel_avx2": "", "clock_speed_ghz": "Yes", "apiname": "r3.4xlarge", "computeunits": 52.0, "ebs_throughput": 250.0, "vpc_only": false, "max_ips": 240, "physical_processor": "Intel Xeon E5-2670 v2", "fpga": 0, "intel_turbo": "Yes", "enhanced_networking": true, "ebs_iops": 16000.0, "vcpus": 16.0, "memory": 122.0, "ebs_max_bandwidth": 2000.0, "gpus": 0, "ipv6_support": true}, "t2.small": {"ecu_per_vcpu": 0.0, "network_perf": 4.0, "intel_avx": "Yes", "name": "T2 Small", "architecture": "32/64-bit", "linux_virtualization": "HVM", "storage": 0.0, "placement_group_support": false, "intel_avx2": "", "clock_speed_ghz": "Yes", "apiname": "t2.small", "computeunits": 0.2, "ebs_throughput": 0.0, "vpc_only": true, "max_ips": 8, "physical_processor": "Intel Xeon family", "fpga": 0, "intel_turbo": "Yes", "enhanced_networking": false, "ebs_iops": 0.0, "vcpus": 1.0, "memory": 2.0, "ebs_max_bandwidth": 0.0, "gpus": 0, "ipv6_support": true}, "r3.large": {"ecu_per_vcpu": 3.25, "network_perf": 6.0, "intel_avx": "Yes", "name": "R3 High-Memory Large", "architecture": "64-bit", "linux_virtualization": "HVM", "storage": 32.0, "placement_group_support": true, "intel_avx2": "", "clock_speed_ghz": "Yes", "apiname": "r3.large", "computeunits": 6.5, "ebs_throughput": 0.0, "vpc_only": false, "max_ips": 30, "physical_processor": "Intel Xeon E5-2670 v2", "fpga": 0, "intel_turbo": "Yes", "enhanced_networking": true, "ebs_iops": 0.0, "vcpus": 2.0, "memory": 15.25, "ebs_max_bandwidth": 0.0, "gpus": 0, "ipv6_support": true}, "i3.16xlarge": {"ecu_per_vcpu": 3.125, "network_perf": 17.0, "intel_avx": "Yes", "name": "I3 High I/O 16xlarge", "architecture": "64-bit", "linux_virtualization": "Unknown", "storage": 15200.0, "placement_group_support": true, "intel_avx2": "Yes", "clock_speed_ghz": "Yes", "apiname": "i3.16xlarge", "computeunits": 200.0, "ebs_throughput": 1750.0, "vpc_only": true, "max_ips": 750, "physical_processor": "Intel Xeon E5-2686 v4", "fpga": 0, "intel_turbo": "Yes", "enhanced_networking": true, "ebs_iops": 65000.0, "vcpus": 64.0, "memory": 488.0, "ebs_max_bandwidth": 14000.0, "gpus": 0, "ipv6_support": true}, "c3.large": {"ecu_per_vcpu": 3.5, "network_perf": 6.0, "intel_avx": "Yes", "name": "C3 High-CPU Large", "architecture": "32/64-bit", "linux_virtualization": "HVM, PV", "storage": 32.0, "placement_group_support": true, "intel_avx2": "", "clock_speed_ghz": "Yes", "apiname": "c3.large", "computeunits": 7.0, "ebs_throughput": 0.0, "vpc_only": false, "max_ips": 30, "physical_processor": "Intel Xeon E5-2680 v2", "fpga": 0, "intel_turbo": "Yes", "enhanced_networking": true, "ebs_iops": 0.0, "vcpus": 2.0, "memory": 3.75, "ebs_max_bandwidth": 0.0, "gpus": 0, "ipv6_support": true}, "i2.2xlarge": {"ecu_per_vcpu": 3.375, "network_perf": 7.0, "intel_avx": "", "name": "I2 Double Extra Large", "architecture": "64-bit", "linux_virtualization": "HVM", "storage": 1600.0, "placement_group_support": true, "intel_avx2": "", "clock_speed_ghz": "", "apiname": "i2.2xlarge", "computeunits": 27.0, "ebs_throughput": 125.0, "vpc_only": false, "max_ips": 60, "physical_processor": "", "fpga": 0, "intel_turbo": "", "enhanced_networking": false, "ebs_iops": 8000.0, "vcpus": 8.0, "memory": 61.0, "ebs_max_bandwidth": 1000.0, "gpus": 0, "ipv6_support": true}, "i3.xlarge": {"ecu_per_vcpu": 3.25, "network_perf": 11.0, "intel_avx": "Yes", "name": "I3 High I/O Extra Large", "architecture": "64-bit", "linux_virtualization": "Unknown", "storage": 950.0, "placement_group_support": true, "intel_avx2": "Yes", "clock_speed_ghz": "Yes", "apiname": "i3.xlarge", "computeunits": 13.0, "ebs_throughput": 100.0, "vpc_only": true, "max_ips": 60, "physical_processor": "Intel Xeon E5-2686 v4", "fpga": 0, "intel_turbo": "Yes", "enhanced_networking": true, "ebs_iops": 6000.0, "vcpus": 4.0, "memory": 30.5, "ebs_max_bandwidth": 850.0, "gpus": 0, "ipv6_support": true}, "i2.8xlarge": {"ecu_per_vcpu": 3.25, "network_perf": 13.0, "intel_avx": "", "name": "I2 Eight Extra Large", "architecture": "64-bit", "linux_virtualization": "HVM", "storage": 6400.0, "placement_group_support": true, "intel_avx2": "", "clock_speed_ghz": "", "apiname": "i2.8xlarge", "computeunits": 104.0, "ebs_throughput": 0.0, "vpc_only": false, "max_ips": 240, "physical_processor": "", "fpga": 0, "intel_turbo": "", "enhanced_networking": false, "ebs_iops": 0.0, "vcpus": 32.0, "memory": 244.0, "ebs_max_bandwidth": 0.0, "gpus": 0, "ipv6_support": true}, "r4.16xlarge": {"ecu_per_vcpu": 3.046875, "network_perf": 17.0, "intel_avx": "Yes", "name": "R4 High-Memory 16xlarge", "architecture": "64-bit", "linux_virtualization": "HVM", "storage": 0.0, "placement_group_support": true, "intel_avx2": "Yes", "clock_speed_ghz": "Yes", "apiname": "r4.16xlarge", "computeunits": 195.0, "ebs_throughput": 1750.0, "vpc_only": true, "max_ips": 750, "physical_processor": "Intel Xeon E5-2686 v4", "fpga": 0, "intel_turbo": "Yes", "enhanced_networking": true, "ebs_iops": 75000.0, "vcpus": 64.0, "memory": 488.0, "ebs_max_bandwidth": 14000.0, "gpus": 0, "ipv6_support": true}, "g3.8xlarge": {"ecu_per_vcpu": 2.9375, "network_perf": 13.0, "intel_avx": "Yes", "name": "G3 Eight Extra Large", "architecture": "64-bit", "linux_virtualization": "HVM", "storage": 0.0, "placement_group_support": true, "intel_avx2": "Yes", "clock_speed_ghz": "Yes", "apiname": "g3.8xlarge", "computeunits": 94.0, "ebs_throughput": 875.0, "vpc_only": true, "max_ips": 240, "physical_processor": "Intel Xeon E5-2686 v4", "fpga": 0, "intel_turbo": "Yes", "enhanced_networking": true, "ebs_iops": 40000.0, "vcpus": 32.0, "memory": 244.0, "ebs_max_bandwidth": 7000.0, "gpus": 2, "ipv6_support": true}, "c3.4xlarge": {"ecu_per_vcpu": 3.4375, "network_perf": 9.0, "intel_avx": "Yes", "name": "C3 High-CPU Quadruple Extra Large", "architecture": "64-bit", "linux_virtualization": "HVM, PV", "storage": 320.0, "placement_group_support": true, "intel_avx2": "", "clock_speed_ghz": "Yes", "apiname": "c3.4xlarge", "computeunits": 55.0, "ebs_throughput": 250.0, "vpc_only": false, "max_ips": 240, "physical_processor": "Intel Xeon E5-2680 v2", "fpga": 0, "intel_turbo": "Yes", "enhanced_networking": true, "ebs_iops": 16000.0, "vcpus": 16.0, "memory": 30.0, "ebs_max_bandwidth": 2000.0, "gpus": 0, "ipv6_support": true}, "r4.large": {"ecu_per_vcpu": 3.5, "network_perf": 11.0, "intel_avx": "Yes", "name": "R4 High-Memory Large", "architecture": "64-bit", "linux_virtualization": "HVM", "storage": 0.0, "placement_group_support": true, "intel_avx2": "Yes", "clock_speed_ghz": "Yes", "apiname": "r4.large", "computeunits": 7.0, "ebs_throughput": 54.0, "vpc_only": true, "max_ips": 30, "physical_processor": "Intel Xeon E5-2686 v4", "fpga": 0, "intel_turbo": "Yes", "enhanced_networking": true, "ebs_iops": 3000.0, "vcpus": 2.0, "memory": 15.25, "ebs_max_bandwidth": 437.0, "gpus": 0, "ipv6_support": true}, "f1.2xlarge": {"ecu_per_vcpu": 3.25, "network_perf": 11.0, "intel_avx": "Yes", "name": "F1 Double Extra Large", "architecture": "64-bit", "linux_virtualization": "HVM", "storage": 470.0, "placement_group_support": true, "intel_avx2": "Yes", "clock_speed_ghz": "Yes", "apiname": "f1.2xlarge", "computeunits": 26.0, "ebs_throughput": 200.0, "vpc_only": true, "max_ips": 60, "physical_processor": "Intel Xeon E5-2686 v4", "fpga": 1, "intel_turbo": "Yes", "enhanced_networking": true, "ebs_iops": 12000.0, "vcpus": 8.0, "memory": 122.0, "ebs_max_bandwidth": 1700.0, "gpus": 0, "ipv6_support": true}, "m4.2xlarge": {"ecu_per_vcpu": 3.25, "network_perf": 9.0, "intel_avx": "Yes", "name": "M4 Double Extra Large", "architecture": "64-bit", "linux_virtualization": "HVM", "storage": 0.0, "placement_group_support": true, "intel_avx2": "Yes", "clock_speed_ghz": "Yes", "apiname": "m4.2xlarge", "computeunits": 26.0, "ebs_throughput": 125.0, "vpc_only": true, "max_ips": 60, "physical_processor": "Intel Xeon E5-2676 v3", "fpga": 0, "intel_turbo": "Yes", "enhanced_networking": true, "ebs_iops": 8000.0, "vcpus": 8.0, "memory": 32.0, "ebs_max_bandwidth": 1000.0, "gpus": 0, "ipv6_support": true}, "m3.2xlarge": {"ecu_per_vcpu": 3.25, "network_perf": 9.0, "intel_avx": "Yes", "name": "M3 General Purpose Double Extra Large", "architecture": "64-bit", "linux_virtualization": "HVM, PV", "storage": 160.0, "placement_group_support": false, "intel_avx2": "", "clock_speed_ghz": "Yes", "apiname": "m3.2xlarge", "computeunits": 26.0, "ebs_throughput": 125.0, "vpc_only": false, "max_ips": 120, "physical_processor": "Intel Xeon E5-2670 v2", "fpga": 0, "intel_turbo": "Yes", "enhanced_networking": false, "ebs_iops": 8000.0, "vcpus": 8.0, "memory": 30.0, "ebs_max_bandwidth": 1000.0, "gpus": 0, "ipv6_support": false}, "c3.8xlarge": {"ecu_per_vcpu": 3.375, "network_perf": 12.0, "intel_avx": "Yes", "name": "C3 High-CPU Eight Extra Large", "architecture": "64-bit", "linux_virtualization": "HVM, PV", "storage": 640.0, "placement_group_support": true, "intel_avx2": "", "clock_speed_ghz": "Yes", "apiname": "c3.8xlarge", "computeunits": 108.0, "ebs_throughput": 0.0, "vpc_only": false, "max_ips": 240, "physical_processor": "Intel Xeon E5-2680 v2", "fpga": 0, "intel_turbo": "Yes", "enhanced_networking": true, "ebs_iops": 0.0, "vcpus": 32.0, "memory": 60.0, "ebs_max_bandwidth": 0.0, "gpus": 0, "ipv6_support": true}, "cr1.8xlarge": {"ecu_per_vcpu": 2.75, "network_perf": 12.0, "intel_avx": "", "name": "High Memory Cluster Eight Extra Large", "architecture": "64-bit", "linux_virtualization": "Unknown", "storage": 240.0, "placement_group_support": false, "intel_avx2": "", "clock_speed_ghz": "", "apiname": "cr1.8xlarge", "computeunits": 88.0, "ebs_throughput": 0.0, "vpc_only": false, "max_ips": 240, "physical_processor": "", "fpga": 0, "intel_turbo": "", "enhanced_networking": false, "ebs_iops": 0.0, "vcpus": 32.0, "memory": 244.0, "ebs_max_bandwidth": 0.0, "gpus": 0, "ipv6_support": false}, "cc2.8xlarge": {"ecu_per_vcpu": 2.75, "network_perf": 12.0, "intel_avx": "", "name": "Cluster Compute Eight Extra Large", "architecture": "64-bit", "linux_virtualization": "HVM", "storage": 3360.0, "placement_group_support": false, "intel_avx2": "", "clock_speed_ghz": "", "apiname": "cc2.8xlarge", "computeunits": 88.0, "ebs_throughput": 0.0, "vpc_only": false, "max_ips": 240, "physical_processor": "", "fpga": 0, "intel_turbo": "", "enhanced_networking": false, "ebs_iops": 0.0, "vcpus": 32.0, "memory": 60.5, "ebs_max_bandwidth": 0.0, "gpus": 0, "ipv6_support": false}, "m1.large": {"ecu_per_vcpu": 2.0, "network_perf": 7.0, "intel_avx": "", "name": "M1 General Purpose Large", "architecture": "64-bit", "linux_virtualization": "PV", "storage": 840.0, "placement_group_support": false, "intel_avx2": "", "clock_speed_ghz": "", "apiname": "m1.large", "computeunits": 4.0, "ebs_throughput": 62.5, "vpc_only": false, "max_ips": 30, "physical_processor": "", "fpga": 0, "intel_turbo": "", "enhanced_networking": false, "ebs_iops": 4000.0, "vcpus": 2.0, "memory": 7.5, "ebs_max_bandwidth": 500.0, "gpus": 0, "ipv6_support": false}, "r3.xlarge": {"ecu_per_vcpu": 3.25, "network_perf": 7.0, "intel_avx": "Yes", "name": "R3 High-Memory Extra Large", "architecture": "64-bit", "linux_virtualization": "HVM", "storage": 80.0, "placement_group_support": true, "intel_avx2": "", "clock_speed_ghz": "Yes", "apiname": "r3.xlarge", "computeunits": 13.0, "ebs_throughput": 62.5, "vpc_only": false, "max_ips": 60, "physical_processor": "Intel Xeon E5-2670 v2", "fpga": 0, "intel_turbo": "Yes", "enhanced_networking": true, "ebs_iops": 4000.0, "vcpus": 4.0, "memory": 30.5, "ebs_max_bandwidth": 500.0, "gpus": 0, "ipv6_support": true}, "g3.16xlarge": {"ecu_per_vcpu": 2.9375, "network_perf": 17.0, "intel_avx": "Yes", "name": "G3 16xlarge", "architecture": "64-bit", "linux_virtualization": "HVM", "storage": 0.0, "placement_group_support": true, "intel_avx2": "Yes", "clock_speed_ghz": "Yes", "apiname": "g3.16xlarge", "computeunits": 188.0, "ebs_throughput": 1750.0, "vpc_only": true, "max_ips": 750, "physical_processor": "Intel Xeon E5-2686 v4", "fpga": 0, "intel_turbo": "Yes", "enhanced_networking": true, "ebs_iops": 80000.0, "vcpus": 64.0, "memory": 488.0, "ebs_max_bandwidth": 14000.0, "gpus": 4, "ipv6_support": true}, "m1.medium": {"ecu_per_vcpu": 2.0, "network_perf": 6.0, "intel_avx": "", "name": "M1 General Purpose Medium", "architecture": "32/64-bit", "linux_virtualization": "PV", "storage": 410.0, "placement_group_support": false, "intel_avx2": "", "clock_speed_ghz": "", "apiname": "m1.medium", "computeunits": 2.0, "ebs_throughput": 0.0, "vpc_only": false, "max_ips": 12, "physical_processor": "", "fpga": 0, "intel_turbo": "", "enhanced_networking": false, "ebs_iops": 0.0, "vcpus": 1.0, "memory": 3.75, "ebs_max_bandwidth": 0.0, "gpus": 0, "ipv6_support": false}, "i3.2xlarge": {"ecu_per_vcpu": 3.375, "network_perf": 11.0, "intel_avx": "Yes", "name": "I3 High I/O Double Extra Large", "architecture": "64-bit", "linux_virtualization": "Unknown", "storage": 1900.0, "placement_group_support": true, "intel_avx2": "Yes", "clock_speed_ghz": "Yes", "apiname": "i3.2xlarge", "computeunits": 27.0, "ebs_throughput": 200.0, "vpc_only": true, "max_ips": 60, "physical_processor": "Intel Xeon E5-2686 v4", "fpga": 0, "intel_turbo": "Yes", "enhanced_networking": true, "ebs_iops": 12000.0, "vcpus": 8.0, "memory": 61.0, "ebs_max_bandwidth": 1700.0, "gpus": 0, "ipv6_support": true}, "t2.xlarge": {"ecu_per_vcpu": 0.0, "network_perf": 6.0, "intel_avx": "Yes", "name": "T2 Extra Large", "architecture": "64-bit", "linux_virtualization": "HVM", "storage": 0.0, "placement_group_support": false, "intel_avx2": "", "clock_speed_ghz": "Yes", "apiname": "t2.xlarge", "computeunits": 0.9, "ebs_throughput": 0.0, "vpc_only": true, "max_ips": 45, "physical_processor": "Intel Xeon family", "fpga": 0, "intel_turbo": "Yes", "enhanced_networking": false, "ebs_iops": 0.0, "vcpus": 4.0, "memory": 16.0, "ebs_max_bandwidth": 0.0, "gpus": 0, "ipv6_support": true}, "g2.2xlarge": {"ecu_per_vcpu": 3.25, "network_perf": 16.0, "intel_avx": "", "name": "G2 Double Extra Large", "architecture": "64-bit", "linux_virtualization": "Unknown", "storage": 60.0, "placement_group_support": true, "intel_avx2": "", "clock_speed_ghz": "", "apiname": "g2.2xlarge", "computeunits": 26.0, "ebs_throughput": 125.0, "vpc_only": false, "max_ips": 60, "physical_processor": "", "fpga": 0, "intel_turbo": "", "enhanced_networking": false, "ebs_iops": 8000.0, "vcpus": 8.0, "memory": 15.0, "ebs_max_bandwidth": 1000.0, "gpus": 0, "ipv6_support": false}, "c1.medium": {"ecu_per_vcpu": 2.5, "network_perf": 6.0, "intel_avx": "", "name": "C1 High-CPU Medium", "architecture": "32/64-bit", "linux_virtualization": "PV", "storage": 350.0, "placement_group_support": false, "intel_avx2": "", "clock_speed_ghz": "", "apiname": "c1.medium", "computeunits": 5.0, "ebs_throughput": 0.0, "vpc_only": false, "max_ips": 12, "physical_processor": "", "fpga": 0, "intel_turbo": "", "enhanced_networking": false, "ebs_iops": 0.0, "vcpus": 2.0, "memory": 1.7, "ebs_max_bandwidth": 0.0, "gpus": 0, "ipv6_support": false}, "t2.large": {"ecu_per_vcpu": 0.0, "network_perf": 4.0, "intel_avx": "Yes", "name": "T2 Large", "architecture": "64-bit", "linux_virtualization": "HVM", "storage": 0.0, "placement_group_support": false, "intel_avx2": "", "clock_speed_ghz": "Yes", "apiname": "t2.large", "computeunits": 0.6, "ebs_throughput": 0.0, "vpc_only": true, "max_ips": 36, "physical_processor": "Intel Xeon family", "fpga": 0, "intel_turbo": "Yes", "enhanced_networking": false, "ebs_iops": 0.0, "vcpus": 2.0, "memory": 8.0, "ebs_max_bandwidth": 0.0, "gpus": 0, "ipv6_support": true}, "d2.2xlarge": {"ecu_per_vcpu": 3.5, "network_perf": 9.0, "intel_avx": "Yes", "name": "D2 Double Extra Large", "architecture": "64-bit", "linux_virtualization": "HVM", "storage": 12000.0, "placement_group_support": true, "intel_avx2": "Yes", "clock_speed_ghz": "Yes", "apiname": "d2.2xlarge", "computeunits": 28.0, "ebs_throughput": 125.0, "vpc_only": false, "max_ips": 60, "physical_processor": "Intel Xeon E5-2676 v3", "fpga": 0, "intel_turbo": "Yes", "enhanced_networking": true, "ebs_iops": 8000.0, "vcpus": 8.0, "memory": 61.0, "ebs_max_bandwidth": 1000.0, "gpus": 0, "ipv6_support": true}, "c4.8xlarge": {"ecu_per_vcpu": 3.66666666667, "network_perf": 13.0, "intel_avx": "Yes", "name": "C4 High-CPU Eight Extra Large", "architecture": "64-bit", "linux_virtualization": "HVM", "storage": 0.0, "placement_group_support": true, "intel_avx2": "Yes", "clock_speed_ghz": "Yes", "apiname": "c4.8xlarge", "computeunits": 132.0, "ebs_throughput": 500.0, "vpc_only": true, "max_ips": 240, "physical_processor": "Intel Xeon E5-2666 v3", "fpga": 0, "intel_turbo": "Yes", "enhanced_networking": true, "ebs_iops": 32000.0, "vcpus": 36.0, "memory": 60.0, "ebs_max_bandwidth": 4000.0, "gpus": 0, "ipv6_support": true}, "c4.2xlarge": {"ecu_per_vcpu": 3.875, "network_perf": 9.0, "intel_avx": "Yes", "name": "C4 High-CPU Double Extra Large", "architecture": "64-bit", "linux_virtualization": "HVM", "storage": 0.0, "placement_group_support": true, "intel_avx2": "Yes", "clock_speed_ghz": "Yes", "apiname": "c4.2xlarge", "computeunits": 31.0, "ebs_throughput": 125.0, "vpc_only": true, "max_ips": 60, "physical_processor": "Intel Xeon E5-2666 v3", "fpga": 0, "intel_turbo": "Yes", "enhanced_networking": true, "ebs_iops": 8000.0, "vcpus": 8.0, "memory": 15.0, "ebs_max_bandwidth": 1000.0, "gpus": 0, "ipv6_support": true}, "x1e.32xlarge": {"ecu_per_vcpu": 2.65625, "network_perf": 17.0, "intel_avx": "Yes", "name": "X1E 32xlarge", "architecture": "64-bit", "linux_virtualization": "Unknown", "storage": 3840.0, "placement_group_support": false, "intel_avx2": "Yes", "clock_speed_ghz": "Yes", "apiname": "x1e.32xlarge", "computeunits": 340.0, "ebs_throughput": 1750.0, "vpc_only": true, "max_ips": 240, "physical_processor": "Intel Xeon E7-8880 v3", "fpga": 0, "intel_turbo": "Yes", "enhanced_networking": true, "ebs_iops": 80000.0, "vcpus": 128.0, "memory": 3904.0, "ebs_max_bandwidth": 14000.0, "gpus": 0, "ipv6_support": false}, "m4.10xlarge": {"ecu_per_vcpu": 3.1125, "network_perf": 13.0, "intel_avx": "Yes", "name": "M4 Deca Extra Large", "architecture": "64-bit", "linux_virtualization": "HVM", "storage": 0.0, "placement_group_support": true, "intel_avx2": "Yes", "clock_speed_ghz": "Yes", "apiname": "m4.10xlarge", "computeunits": 124.5, "ebs_throughput": 500.0, "vpc_only": true, "max_ips": 240, "physical_processor": "Intel Xeon E5-2676 v3", "fpga": 0, "intel_turbo": "Yes", "enhanced_networking": true, "ebs_iops": 32000.0, "vcpus": 40.0, "memory": 160.0, "ebs_max_bandwidth": 4000.0, "gpus": 0, "ipv6_support": true}, "t2.2xlarge": {"ecu_per_vcpu": 0.0, "network_perf": 6.0, "intel_avx": "Yes", "name": "T2 Double Extra Large", "architecture": "64-bit", "linux_virtualization": "HVM", "storage": 0.0, "placement_group_support": false, "intel_avx2": "", "clock_speed_ghz": "Yes", "apiname": "t2.2xlarge", "computeunits": 1.35, "ebs_throughput": 0.0, "vpc_only": true, "max_ips": 45, "physical_processor": "Intel Xeon family", "fpga": 0, "intel_turbo": "Yes", "enhanced_networking": false, "ebs_iops": 0.0, "vcpus": 8.0, "memory": 32.0, "ebs_max_bandwidth": 0.0, "gpus": 0, "ipv6_support": true}, "m4.4xlarge": {"ecu_per_vcpu": 3.34375, "network_perf": 9.0, "intel_avx": "Yes", "name": "M4 Quadruple Extra Large", "architecture": "64-bit", "linux_virtualization": "HVM", "storage": 0.0, "placement_group_support": true, "intel_avx2": "Yes", "clock_speed_ghz": "Yes", "apiname": "m4.4xlarge", "computeunits": 53.5, "ebs_throughput": 250.0, "vpc_only": true, "max_ips": 240, "physical_processor": "Intel Xeon E5-2676 v3", "fpga": 0, "intel_turbo": "Yes", "enhanced_networking": true, "ebs_iops": 16000.0, "vcpus": 16.0, "memory": 64.0, "ebs_max_bandwidth": 2000.0, "gpus": 0, "ipv6_support": true}, "t2.nano": {"ecu_per_vcpu": 0.0, "network_perf": 2.0, "intel_avx": "Yes", "name": "T2 Nano", "architecture": "32/64-bit", "linux_virtualization": "HVM", "storage": 0.0, "placement_group_support": false, "intel_avx2": "", "clock_speed_ghz": "Yes", "apiname": "t2.nano", "computeunits": 0.05, "ebs_throughput": 0.0, "vpc_only": true, "max_ips": 4, "physical_processor": "Intel Xeon family", "fpga": 0, "intel_turbo": "Yes", "enhanced_networking": false, "ebs_iops": 0.0, "vcpus": 1.0, "memory": 0.5, "ebs_max_bandwidth": 0.0, "gpus": 0, "ipv6_support": true}, "d2.8xlarge": {"ecu_per_vcpu": 3.22222222222, "network_perf": 13.0, "intel_avx": "Yes", "name": "D2 Eight Extra Large", "architecture": "64-bit", "linux_virtualization": "HVM", "storage": 48000.0, "placement_group_support": true, "intel_avx2": "Yes", "clock_speed_ghz": "Yes", "apiname": "d2.8xlarge", "computeunits": 116.0, "ebs_throughput": 500.0, "vpc_only": false, "max_ips": 240, "physical_processor": "Intel Xeon E5-2676 v3", "fpga": 0, "intel_turbo": "Yes", "enhanced_networking": true, "ebs_iops": 32000.0, "vcpus": 36.0, "memory": 244.0, "ebs_max_bandwidth": 4000.0, "gpus": 0, "ipv6_support": true}, "m3.large": {"ecu_per_vcpu": 3.25, "network_perf": 6.0, "intel_avx": "Yes", "name": "M3 General Purpose Large", "architecture": "64-bit", "linux_virtualization": "HVM, PV", "storage": 32.0, "placement_group_support": false, "intel_avx2": "", "clock_speed_ghz": "Yes", "apiname": "m3.large", "computeunits": 6.5, "ebs_throughput": 0.0, "vpc_only": false, "max_ips": 30, "physical_processor": "Intel Xeon E5-2670 v2", "fpga": 0, "intel_turbo": "Yes", "enhanced_networking": false, "ebs_iops": 0.0, "vcpus": 2.0, "memory": 7.5, "ebs_max_bandwidth": 0.0, "gpus": 0, "ipv6_support": false}, "m2.4xlarge": {"ecu_per_vcpu": 3.25, "network_perf": 9.0, "intel_avx": "", "name": "M2 High Memory Quadruple Extra Large", "architecture": "64-bit", "linux_virtualization": "PV", "storage": 1680.0, "placement_group_support": false, "intel_avx2": "", "clock_speed_ghz": "", "apiname": "m2.4xlarge", "computeunits": 26.0, "ebs_throughput": 125.0, "vpc_only": false, "max_ips": 240, "physical_processor": "", "fpga": 0, "intel_turbo": "", "enhanced_networking": false, "ebs_iops": 8000.0, "vcpus": 8.0, "memory": 68.4, "ebs_max_bandwidth": 1000.0, "gpus": 0, "ipv6_support": false}, "m1.small": {"ecu_per_vcpu": 1.0, "network_perf": 2.0, "intel_avx": "", "name": "M1 General Purpose Small", "architecture": "32/64-bit", "linux_virtualization": "PV", "storage": 160.0, "placement_group_support": false, "intel_avx2": "", "clock_speed_ghz": "", "apiname": "m1.small", "computeunits": 1.0, "ebs_throughput": 0.0, "vpc_only": false, "max_ips": 8, "physical_processor": "", "fpga": 0, "intel_turbo": "", "enhanced_networking": false, "ebs_iops": 0.0, "vcpus": 1.0, "memory": 1.7, "ebs_max_bandwidth": 0.0, "gpus": 0, "ipv6_support": false}, "c1.xlarge": {"ecu_per_vcpu": 2.5, "network_perf": 9.0, "intel_avx": "", "name": "C1 High-CPU Extra Large", "architecture": "64-bit", "linux_virtualization": "PV", "storage": 1680.0, "placement_group_support": false, "intel_avx2": "", "clock_speed_ghz": "", "apiname": "c1.xlarge", "computeunits": 20.0, "ebs_throughput": 125.0, "vpc_only": false, "max_ips": 60, "physical_processor": "", "fpga": 0, "intel_turbo": "", "enhanced_networking": false, "ebs_iops": 8000.0, "vcpus": 8.0, "memory": 7.0, "ebs_max_bandwidth": 1000.0, "gpus": 0, "ipv6_support": false}, "x1.32xlarge": {"ecu_per_vcpu": 2.7265625, "network_perf": 17.0, "intel_avx": "Yes", "name": "X1 Extra High-Memory 32xlarge", "architecture": "64-bit", "linux_virtualization": "HVM", "storage": 3840.0, "placement_group_support": true, "intel_avx2": "Yes", "clock_speed_ghz": "Yes", "apiname": "x1.32xlarge", "computeunits": 349.0, "ebs_throughput": 1750.0, "vpc_only": true, "max_ips": 240, "physical_processor": "Intel Xeon E7-8880 v3", "fpga": 0, "intel_turbo": "Yes", "enhanced_networking": true, "ebs_iops": 80000.0, "vcpus": 128.0, "memory": 1952.0, "ebs_max_bandwidth": 14000.0, "gpus": 0, "ipv6_support": true}, "r3.8xlarge": {"ecu_per_vcpu": 3.25, "network_perf": 12.0, "intel_avx": "Yes", "name": "R3 High-Memory Eight Extra Large", "architecture": "64-bit", "linux_virtualization": "HVM", "storage": 640.0, "placement_group_support": true, "intel_avx2": "", "clock_speed_ghz": "Yes", "apiname": "r3.8xlarge", "computeunits": 104.0, "ebs_throughput": 0.0, "vpc_only": false, "max_ips": 240, "physical_processor": "Intel Xeon E5-2670 v2", "fpga": 0, "intel_turbo": "Yes", "enhanced_networking": true, "ebs_iops": 0.0, "vcpus": 32.0, "memory": 244.0, "ebs_max_bandwidth": 0.0, "gpus": 0, "ipv6_support": true}, "m4.large": {"ecu_per_vcpu": 3.25, "network_perf": 7.0, "intel_avx": "Yes", "name": "M4 Large", "architecture": "64-bit", "linux_virtualization": "HVM", "storage": 0.0, "placement_group_support": true, "intel_avx2": "Yes", "clock_speed_ghz": "Yes", "apiname": "m4.large", "computeunits": 6.5, "ebs_throughput": 56.25, "vpc_only": true, "max_ips": 20, "physical_processor": "Intel Xeon E5-2676 v3", "fpga": 0, "intel_turbo": "Yes", "enhanced_networking": true, "ebs_iops": 3600.0, "vcpus": 2.0, "memory": 8.0, "ebs_max_bandwidth": 450.0, "gpus": 0, "ipv6_support": true}, "p2.16xlarge": {"ecu_per_vcpu": 2.9375, "network_perf": 17.0, "intel_avx": "Yes", "name": "General Purpose GPU 16xlarge", "architecture": "64-bit", "linux_virtualization": "HVM", "storage": 0.0, "placement_group_support": true, "intel_avx2": "Yes", "clock_speed_ghz": "Yes", "apiname": "p2.16xlarge", "computeunits": 188.0, "ebs_throughput": 1250.0, "vpc_only": true, "max_ips": 240, "physical_processor": "Intel Xeon E5-2686 v4", "fpga": 0, "intel_turbo": "Yes", "enhanced_networking": true, "ebs_iops": 65000.0, "vcpus": 64.0, "memory": 732.0, "ebs_max_bandwidth": 10000.0, "gpus": 16, "ipv6_support": true}, "hi1.4xlarge": {"ecu_per_vcpu": 2.1875, "network_perf": 12.0, "intel_avx": "", "name": "HI1. High I/O Quadruple Extra Large", "architecture": "64-bit", "linux_virtualization": "HVM, PV", "storage": 2048.0, "placement_group_support": false, "intel_avx2": "", "clock_speed_ghz": "", "apiname": "hi1.4xlarge", "computeunits": 35.0, "ebs_throughput": 0.0, "vpc_only": false, "max_ips": 240, "physical_processor": "", "fpga": 0, "intel_turbo": "", "enhanced_networking": false, "ebs_iops": 0.0, "vcpus": 16.0, "memory": 60.5, "ebs_max_bandwidth": 0.0, "gpus": 0, "ipv6_support": false}, "c4.4xlarge": {"ecu_per_vcpu": 3.875, "network_perf": 9.0, "intel_avx": "Yes", "name": "C4 High-CPU Quadruple Extra Large", "architecture": "64-bit", "linux_virtualization": "HVM", "storage": 0.0, "placement_group_support": true, "intel_avx2": "Yes", "clock_speed_ghz": "Yes", "apiname": "c4.4xlarge", "computeunits": 62.0, "ebs_throughput": 250.0, "vpc_only": true, "max_ips": 240, "physical_processor": "Intel Xeon E5-2666 v3", "fpga": 0, "intel_turbo": "Yes", "enhanced_networking": true, "ebs_iops": 16000.0, "vcpus": 16.0, "memory": 30.0, "ebs_max_bandwidth": 2000.0, "gpus": 0, "ipv6_support": true}, "c4.xlarge": {"ecu_per_vcpu": 4.0, "network_perf": 9.0, "intel_avx": "Yes", "name": "C4 High-CPU Extra Large", "architecture": "64-bit", "linux_virtualization": "HVM", "storage": 0.0, "placement_group_support": true, "intel_avx2": "Yes", "clock_speed_ghz": "Yes", "apiname": "c4.xlarge", "computeunits": 16.0, "ebs_throughput": 93.75, "vpc_only": true, "max_ips": 60, "physical_processor": "Intel Xeon E5-2666 v3", "fpga": 0, "intel_turbo": "Yes", "enhanced_networking": true, "ebs_iops": 6000.0, "vcpus": 4.0, "memory": 7.5, "ebs_max_bandwidth": 750.0, "gpus": 0, "ipv6_support": true}, "m3.xlarge": {"ecu_per_vcpu": 3.25, "network_perf": 9.0, "intel_avx": "Yes", "name": "M3 General Purpose Extra Large", "architecture": "64-bit", "linux_virtualization": "HVM, PV", "storage": 80.0, "placement_group_support": false, "intel_avx2": "", "clock_speed_ghz": "Yes", "apiname": "m3.xlarge", "computeunits": 13.0, "ebs_throughput": 62.5, "vpc_only": false, "max_ips": 60, "physical_processor": "Intel Xeon E5-2670 v2", "fpga": 0, "intel_turbo": "Yes", "enhanced_networking": false, "ebs_iops": 4000.0, "vcpus": 4.0, "memory": 15.0, "ebs_max_bandwidth": 500.0, "gpus": 0, "ipv6_support": false}} \ No newline at end of file diff --git a/moto/ec2/resources/instance_types.py b/moto/ec2/resources/instance_types.py new file mode 100644 index 000000000..9568b535c --- /dev/null +++ b/moto/ec2/resources/instance_types.py @@ -0,0 +1,2164 @@ +# Imported via `scripts/get_instance_info.py` +instance_types_data = """ +{ + "c1.medium": { + "apiname": "c1.medium", + "architecture": "32/64-bit", + "clock_speed_ghz": "", + "computeunits": 5.0, + "ebs_iops": 0.0, + "ebs_max_bandwidth": 0.0, + "ebs_throughput": 0.0, + "ecu_per_vcpu": 2.5, + "enhanced_networking": false, + "fpga": 0, + "gpus": 0, + "intel_avx": "", + "intel_avx2": "", + "intel_turbo": "", + "ipv6_support": false, + "linux_virtualization": "PV", + "max_ips": 12, + "memory": 1.7, + "name": "C1 High-CPU Medium", + "network_perf": 6.0, + "physical_processor": "", + "placement_group_support": false, + "storage": 350.0, + "vcpus": 2.0, + "vpc_only": false + }, + "c1.xlarge": { + "apiname": "c1.xlarge", + "architecture": "64-bit", + "clock_speed_ghz": "", + "computeunits": 20.0, + "ebs_iops": 8000.0, + "ebs_max_bandwidth": 1000.0, + "ebs_throughput": 125.0, + "ecu_per_vcpu": 2.5, + "enhanced_networking": false, + "fpga": 0, + "gpus": 0, + "intel_avx": "", + "intel_avx2": "", + "intel_turbo": "", + "ipv6_support": false, + "linux_virtualization": "PV", + "max_ips": 60, + "memory": 7.0, + "name": "C1 High-CPU Extra Large", + "network_perf": 9.0, + "physical_processor": "", + "placement_group_support": false, + "storage": 1680.0, + "vcpus": 8.0, + "vpc_only": false + }, + "c3.2xlarge": { + "apiname": "c3.2xlarge", + "architecture": "64-bit", + "clock_speed_ghz": "Yes", + "computeunits": 28.0, + "ebs_iops": 8000.0, + "ebs_max_bandwidth": 1000.0, + "ebs_throughput": 125.0, + "ecu_per_vcpu": 3.5, + "enhanced_networking": true, + "fpga": 0, + "gpus": 0, + "intel_avx": "Yes", + "intel_avx2": "", + "intel_turbo": "Yes", + "ipv6_support": true, + "linux_virtualization": "HVM, PV", + "max_ips": 60, + "memory": 15.0, + "name": "C3 High-CPU Double Extra Large", + "network_perf": 9.0, + "physical_processor": "Intel Xeon E5-2680 v2", + "placement_group_support": true, + "storage": 160.0, + "vcpus": 8.0, + "vpc_only": false + }, + "c3.4xlarge": { + "apiname": "c3.4xlarge", + "architecture": "64-bit", + "clock_speed_ghz": "Yes", + "computeunits": 55.0, + "ebs_iops": 16000.0, + "ebs_max_bandwidth": 2000.0, + "ebs_throughput": 250.0, + "ecu_per_vcpu": 3.4375, + "enhanced_networking": true, + "fpga": 0, + "gpus": 0, + "intel_avx": "Yes", + "intel_avx2": "", + "intel_turbo": "Yes", + "ipv6_support": true, + "linux_virtualization": "HVM, PV", + "max_ips": 240, + "memory": 30.0, + "name": "C3 High-CPU Quadruple Extra Large", + "network_perf": 9.0, + "physical_processor": "Intel Xeon E5-2680 v2", + "placement_group_support": true, + "storage": 320.0, + "vcpus": 16.0, + "vpc_only": false + }, + "c3.8xlarge": { + "apiname": "c3.8xlarge", + "architecture": "64-bit", + "clock_speed_ghz": "Yes", + "computeunits": 108.0, + "ebs_iops": 0.0, + "ebs_max_bandwidth": 0.0, + "ebs_throughput": 0.0, + "ecu_per_vcpu": 3.375, + "enhanced_networking": true, + "fpga": 0, + "gpus": 0, + "intel_avx": "Yes", + "intel_avx2": "", + "intel_turbo": "Yes", + "ipv6_support": true, + "linux_virtualization": "HVM, PV", + "max_ips": 240, + "memory": 60.0, + "name": "C3 High-CPU Eight Extra Large", + "network_perf": 12.0, + "physical_processor": "Intel Xeon E5-2680 v2", + "placement_group_support": true, + "storage": 640.0, + "vcpus": 32.0, + "vpc_only": false + }, + "c3.large": { + "apiname": "c3.large", + "architecture": "32/64-bit", + "clock_speed_ghz": "Yes", + "computeunits": 7.0, + "ebs_iops": 0.0, + "ebs_max_bandwidth": 0.0, + "ebs_throughput": 0.0, + "ecu_per_vcpu": 3.5, + "enhanced_networking": true, + "fpga": 0, + "gpus": 0, + "intel_avx": "Yes", + "intel_avx2": "", + "intel_turbo": "Yes", + "ipv6_support": true, + "linux_virtualization": "HVM, PV", + "max_ips": 30, + "memory": 3.75, + "name": "C3 High-CPU Large", + "network_perf": 6.0, + "physical_processor": "Intel Xeon E5-2680 v2", + "placement_group_support": true, + "storage": 32.0, + "vcpus": 2.0, + "vpc_only": false + }, + "c3.xlarge": { + "apiname": "c3.xlarge", + "architecture": "64-bit", + "clock_speed_ghz": "Yes", + "computeunits": 14.0, + "ebs_iops": 4000.0, + "ebs_max_bandwidth": 500.0, + "ebs_throughput": 62.5, + "ecu_per_vcpu": 3.5, + "enhanced_networking": true, + "fpga": 0, + "gpus": 0, + "intel_avx": "Yes", + "intel_avx2": "", + "intel_turbo": "Yes", + "ipv6_support": true, + "linux_virtualization": "HVM, PV", + "max_ips": 60, + "memory": 7.5, + "name": "C3 High-CPU Extra Large", + "network_perf": 7.0, + "physical_processor": "Intel Xeon E5-2680 v2", + "placement_group_support": true, + "storage": 80.0, + "vcpus": 4.0, + "vpc_only": false + }, + "c4.2xlarge": { + "apiname": "c4.2xlarge", + "architecture": "64-bit", + "clock_speed_ghz": "Yes", + "computeunits": 31.0, + "ebs_iops": 8000.0, + "ebs_max_bandwidth": 1000.0, + "ebs_throughput": 125.0, + "ecu_per_vcpu": 3.875, + "enhanced_networking": true, + "fpga": 0, + "gpus": 0, + "intel_avx": "Yes", + "intel_avx2": "Yes", + "intel_turbo": "Yes", + "ipv6_support": true, + "linux_virtualization": "HVM", + "max_ips": 60, + "memory": 15.0, + "name": "C4 High-CPU Double Extra Large", + "network_perf": 9.0, + "physical_processor": "Intel Xeon E5-2666 v3", + "placement_group_support": true, + "storage": 0.0, + "vcpus": 8.0, + "vpc_only": true + }, + "c4.4xlarge": { + "apiname": "c4.4xlarge", + "architecture": "64-bit", + "clock_speed_ghz": "Yes", + "computeunits": 62.0, + "ebs_iops": 16000.0, + "ebs_max_bandwidth": 2000.0, + "ebs_throughput": 250.0, + "ecu_per_vcpu": 3.875, + "enhanced_networking": true, + "fpga": 0, + "gpus": 0, + "intel_avx": "Yes", + "intel_avx2": "Yes", + "intel_turbo": "Yes", + "ipv6_support": true, + "linux_virtualization": "HVM", + "max_ips": 240, + "memory": 30.0, + "name": "C4 High-CPU Quadruple Extra Large", + "network_perf": 9.0, + "physical_processor": "Intel Xeon E5-2666 v3", + "placement_group_support": true, + "storage": 0.0, + "vcpus": 16.0, + "vpc_only": true + }, + "c4.8xlarge": { + "apiname": "c4.8xlarge", + "architecture": "64-bit", + "clock_speed_ghz": "Yes", + "computeunits": 132.0, + "ebs_iops": 32000.0, + "ebs_max_bandwidth": 4000.0, + "ebs_throughput": 500.0, + "ecu_per_vcpu": 3.66666666667, + "enhanced_networking": true, + "fpga": 0, + "gpus": 0, + "intel_avx": "Yes", + "intel_avx2": "Yes", + "intel_turbo": "Yes", + "ipv6_support": true, + "linux_virtualization": "HVM", + "max_ips": 240, + "memory": 60.0, + "name": "C4 High-CPU Eight Extra Large", + "network_perf": 13.0, + "physical_processor": "Intel Xeon E5-2666 v3", + "placement_group_support": true, + "storage": 0.0, + "vcpus": 36.0, + "vpc_only": true + }, + "c4.large": { + "apiname": "c4.large", + "architecture": "64-bit", + "clock_speed_ghz": "Yes", + "computeunits": 8.0, + "ebs_iops": 4000.0, + "ebs_max_bandwidth": 500.0, + "ebs_throughput": 62.5, + "ecu_per_vcpu": 4.0, + "enhanced_networking": true, + "fpga": 0, + "gpus": 0, + "intel_avx": "Yes", + "intel_avx2": "Yes", + "intel_turbo": "Yes", + "ipv6_support": true, + "linux_virtualization": "HVM", + "max_ips": 30, + "memory": 3.75, + "name": "C4 High-CPU Large", + "network_perf": 7.0, + "physical_processor": "Intel Xeon E5-2666 v3", + "placement_group_support": true, + "storage": 0.0, + "vcpus": 2.0, + "vpc_only": true + }, + "c4.xlarge": { + "apiname": "c4.xlarge", + "architecture": "64-bit", + "clock_speed_ghz": "Yes", + "computeunits": 16.0, + "ebs_iops": 6000.0, + "ebs_max_bandwidth": 750.0, + "ebs_throughput": 93.75, + "ecu_per_vcpu": 4.0, + "enhanced_networking": true, + "fpga": 0, + "gpus": 0, + "intel_avx": "Yes", + "intel_avx2": "Yes", + "intel_turbo": "Yes", + "ipv6_support": true, + "linux_virtualization": "HVM", + "max_ips": 60, + "memory": 7.5, + "name": "C4 High-CPU Extra Large", + "network_perf": 9.0, + "physical_processor": "Intel Xeon E5-2666 v3", + "placement_group_support": true, + "storage": 0.0, + "vcpus": 4.0, + "vpc_only": true + }, + "cc2.8xlarge": { + "apiname": "cc2.8xlarge", + "architecture": "64-bit", + "clock_speed_ghz": "", + "computeunits": 88.0, + "ebs_iops": 0.0, + "ebs_max_bandwidth": 0.0, + "ebs_throughput": 0.0, + "ecu_per_vcpu": 2.75, + "enhanced_networking": false, + "fpga": 0, + "gpus": 0, + "intel_avx": "", + "intel_avx2": "", + "intel_turbo": "", + "ipv6_support": false, + "linux_virtualization": "HVM", + "max_ips": 240, + "memory": 60.5, + "name": "Cluster Compute Eight Extra Large", + "network_perf": 12.0, + "physical_processor": "", + "placement_group_support": false, + "storage": 3360.0, + "vcpus": 32.0, + "vpc_only": false + }, + "cg1.4xlarge": { + "apiname": "cg1.4xlarge", + "architecture": "64-bit", + "clock_speed_ghz": "", + "computeunits": 33.5, + "ebs_iops": 0.0, + "ebs_max_bandwidth": 0.0, + "ebs_throughput": 0.0, + "ecu_per_vcpu": 2.09375, + "enhanced_networking": false, + "fpga": 0, + "gpus": 0, + "intel_avx": "", + "intel_avx2": "", + "intel_turbo": "", + "ipv6_support": false, + "linux_virtualization": "HVM", + "max_ips": 240, + "memory": 22.5, + "name": "Cluster GPU Quadruple Extra Large", + "network_perf": 12.0, + "physical_processor": "", + "placement_group_support": false, + "storage": 1680.0, + "vcpus": 16.0, + "vpc_only": false + }, + "cr1.8xlarge": { + "apiname": "cr1.8xlarge", + "architecture": "64-bit", + "clock_speed_ghz": "", + "computeunits": 88.0, + "ebs_iops": 0.0, + "ebs_max_bandwidth": 0.0, + "ebs_throughput": 0.0, + "ecu_per_vcpu": 2.75, + "enhanced_networking": false, + "fpga": 0, + "gpus": 0, + "intel_avx": "", + "intel_avx2": "", + "intel_turbo": "", + "ipv6_support": false, + "linux_virtualization": "Unknown", + "max_ips": 240, + "memory": 244.0, + "name": "High Memory Cluster Eight Extra Large", + "network_perf": 12.0, + "physical_processor": "", + "placement_group_support": false, + "storage": 240.0, + "vcpus": 32.0, + "vpc_only": false + }, + "d2.2xlarge": { + "apiname": "d2.2xlarge", + "architecture": "64-bit", + "clock_speed_ghz": "Yes", + "computeunits": 28.0, + "ebs_iops": 8000.0, + "ebs_max_bandwidth": 1000.0, + "ebs_throughput": 125.0, + "ecu_per_vcpu": 3.5, + "enhanced_networking": true, + "fpga": 0, + "gpus": 0, + "intel_avx": "Yes", + "intel_avx2": "Yes", + "intel_turbo": "Yes", + "ipv6_support": true, + "linux_virtualization": "HVM", + "max_ips": 60, + "memory": 61.0, + "name": "D2 Double Extra Large", + "network_perf": 9.0, + "physical_processor": "Intel Xeon E5-2676 v3", + "placement_group_support": true, + "storage": 12000.0, + "vcpus": 8.0, + "vpc_only": false + }, + "d2.4xlarge": { + "apiname": "d2.4xlarge", + "architecture": "64-bit", + "clock_speed_ghz": "Yes", + "computeunits": 56.0, + "ebs_iops": 16000.0, + "ebs_max_bandwidth": 2000.0, + "ebs_throughput": 250.0, + "ecu_per_vcpu": 3.5, + "enhanced_networking": true, + "fpga": 0, + "gpus": 0, + "intel_avx": "Yes", + "intel_avx2": "Yes", + "intel_turbo": "Yes", + "ipv6_support": true, + "linux_virtualization": "HVM", + "max_ips": 240, + "memory": 122.0, + "name": "D2 Quadruple Extra Large", + "network_perf": 9.0, + "physical_processor": "Intel Xeon E5-2676 v3", + "placement_group_support": true, + "storage": 24000.0, + "vcpus": 16.0, + "vpc_only": false + }, + "d2.8xlarge": { + "apiname": "d2.8xlarge", + "architecture": "64-bit", + "clock_speed_ghz": "Yes", + "computeunits": 116.0, + "ebs_iops": 32000.0, + "ebs_max_bandwidth": 4000.0, + "ebs_throughput": 500.0, + "ecu_per_vcpu": 3.22222222222, + "enhanced_networking": true, + "fpga": 0, + "gpus": 0, + "intel_avx": "Yes", + "intel_avx2": "Yes", + "intel_turbo": "Yes", + "ipv6_support": true, + "linux_virtualization": "HVM", + "max_ips": 240, + "memory": 244.0, + "name": "D2 Eight Extra Large", + "network_perf": 13.0, + "physical_processor": "Intel Xeon E5-2676 v3", + "placement_group_support": true, + "storage": 48000.0, + "vcpus": 36.0, + "vpc_only": false + }, + "d2.xlarge": { + "apiname": "d2.xlarge", + "architecture": "64-bit", + "clock_speed_ghz": "Yes", + "computeunits": 14.0, + "ebs_iops": 6000.0, + "ebs_max_bandwidth": 750.0, + "ebs_throughput": 93.75, + "ecu_per_vcpu": 3.5, + "enhanced_networking": true, + "fpga": 0, + "gpus": 0, + "intel_avx": "Yes", + "intel_avx2": "Yes", + "intel_turbo": "Yes", + "ipv6_support": true, + "linux_virtualization": "HVM", + "max_ips": 60, + "memory": 30.5, + "name": "D2 Extra Large", + "network_perf": 7.0, + "physical_processor": "Intel Xeon E5-2676 v3", + "placement_group_support": true, + "storage": 6000.0, + "vcpus": 4.0, + "vpc_only": false + }, + "f1.16xlarge": { + "apiname": "f1.16xlarge", + "architecture": "64-bit", + "clock_speed_ghz": "Yes", + "computeunits": 188.0, + "ebs_iops": 75000.0, + "ebs_max_bandwidth": 14000.0, + "ebs_throughput": 1750.0, + "ecu_per_vcpu": 2.9375, + "enhanced_networking": true, + "fpga": 8, + "gpus": 0, + "intel_avx": "Yes", + "intel_avx2": "Yes", + "intel_turbo": "Yes", + "ipv6_support": true, + "linux_virtualization": "HVM", + "max_ips": 400, + "memory": 976.0, + "name": "F1 16xlarge", + "network_perf": 17.0, + "physical_processor": "Intel Xeon E5-2686 v4", + "placement_group_support": true, + "storage": 3760.0, + "vcpus": 64.0, + "vpc_only": true + }, + "f1.2xlarge": { + "apiname": "f1.2xlarge", + "architecture": "64-bit", + "clock_speed_ghz": "Yes", + "computeunits": 26.0, + "ebs_iops": 12000.0, + "ebs_max_bandwidth": 1700.0, + "ebs_throughput": 200.0, + "ecu_per_vcpu": 3.25, + "enhanced_networking": true, + "fpga": 1, + "gpus": 0, + "intel_avx": "Yes", + "intel_avx2": "Yes", + "intel_turbo": "Yes", + "ipv6_support": true, + "linux_virtualization": "HVM", + "max_ips": 60, + "memory": 122.0, + "name": "F1 Double Extra Large", + "network_perf": 11.0, + "physical_processor": "Intel Xeon E5-2686 v4", + "placement_group_support": true, + "storage": 470.0, + "vcpus": 8.0, + "vpc_only": true + }, + "g2.2xlarge": { + "apiname": "g2.2xlarge", + "architecture": "64-bit", + "clock_speed_ghz": "", + "computeunits": 26.0, + "ebs_iops": 8000.0, + "ebs_max_bandwidth": 1000.0, + "ebs_throughput": 125.0, + "ecu_per_vcpu": 3.25, + "enhanced_networking": false, + "fpga": 0, + "gpus": 0, + "intel_avx": "", + "intel_avx2": "", + "intel_turbo": "", + "ipv6_support": false, + "linux_virtualization": "Unknown", + "max_ips": 60, + "memory": 15.0, + "name": "G2 Double Extra Large", + "network_perf": 16.0, + "physical_processor": "", + "placement_group_support": true, + "storage": 60.0, + "vcpus": 8.0, + "vpc_only": false + }, + "g2.8xlarge": { + "apiname": "g2.8xlarge", + "architecture": "64-bit", + "clock_speed_ghz": "", + "computeunits": 104.0, + "ebs_iops": 0.0, + "ebs_max_bandwidth": 0.0, + "ebs_throughput": 0.0, + "ecu_per_vcpu": 3.25, + "enhanced_networking": false, + "fpga": 0, + "gpus": 0, + "intel_avx": "", + "intel_avx2": "", + "intel_turbo": "", + "ipv6_support": false, + "linux_virtualization": "Unknown", + "max_ips": 240, + "memory": 60.0, + "name": "G2 Eight Extra Large", + "network_perf": 16.0, + "physical_processor": "", + "placement_group_support": true, + "storage": 240.0, + "vcpus": 32.0, + "vpc_only": false + }, + "g3.16xlarge": { + "apiname": "g3.16xlarge", + "architecture": "64-bit", + "clock_speed_ghz": "Yes", + "computeunits": 188.0, + "ebs_iops": 80000.0, + "ebs_max_bandwidth": 14000.0, + "ebs_throughput": 1750.0, + "ecu_per_vcpu": 2.9375, + "enhanced_networking": true, + "fpga": 0, + "gpus": 4, + "intel_avx": "Yes", + "intel_avx2": "Yes", + "intel_turbo": "Yes", + "ipv6_support": true, + "linux_virtualization": "HVM", + "max_ips": 750, + "memory": 488.0, + "name": "G3 16xlarge", + "network_perf": 17.0, + "physical_processor": "Intel Xeon E5-2686 v4", + "placement_group_support": true, + "storage": 0.0, + "vcpus": 64.0, + "vpc_only": true + }, + "g3.4xlarge": { + "apiname": "g3.4xlarge", + "architecture": "64-bit", + "clock_speed_ghz": "Yes", + "computeunits": 47.0, + "ebs_iops": 20000.0, + "ebs_max_bandwidth": 3500.0, + "ebs_throughput": 437.0, + "ecu_per_vcpu": 2.9375, + "enhanced_networking": true, + "fpga": 0, + "gpus": 1, + "intel_avx": "Yes", + "intel_avx2": "Yes", + "intel_turbo": "Yes", + "ipv6_support": true, + "linux_virtualization": "HVM", + "max_ips": 240, + "memory": 122.0, + "name": "G3 Quadruple Extra Large", + "network_perf": 11.0, + "physical_processor": "Intel Xeon E5-2686 v4", + "placement_group_support": true, + "storage": 0.0, + "vcpus": 16.0, + "vpc_only": true + }, + "g3.8xlarge": { + "apiname": "g3.8xlarge", + "architecture": "64-bit", + "clock_speed_ghz": "Yes", + "computeunits": 94.0, + "ebs_iops": 40000.0, + "ebs_max_bandwidth": 7000.0, + "ebs_throughput": 875.0, + "ecu_per_vcpu": 2.9375, + "enhanced_networking": true, + "fpga": 0, + "gpus": 2, + "intel_avx": "Yes", + "intel_avx2": "Yes", + "intel_turbo": "Yes", + "ipv6_support": true, + "linux_virtualization": "HVM", + "max_ips": 240, + "memory": 244.0, + "name": "G3 Eight Extra Large", + "network_perf": 13.0, + "physical_processor": "Intel Xeon E5-2686 v4", + "placement_group_support": true, + "storage": 0.0, + "vcpus": 32.0, + "vpc_only": true + }, + "hi1.4xlarge": { + "apiname": "hi1.4xlarge", + "architecture": "64-bit", + "clock_speed_ghz": "", + "computeunits": 35.0, + "ebs_iops": 0.0, + "ebs_max_bandwidth": 0.0, + "ebs_throughput": 0.0, + "ecu_per_vcpu": 2.1875, + "enhanced_networking": false, + "fpga": 0, + "gpus": 0, + "intel_avx": "", + "intel_avx2": "", + "intel_turbo": "", + "ipv6_support": false, + "linux_virtualization": "HVM, PV", + "max_ips": 240, + "memory": 60.5, + "name": "HI1. High I/O Quadruple Extra Large", + "network_perf": 12.0, + "physical_processor": "", + "placement_group_support": false, + "storage": 2048.0, + "vcpus": 16.0, + "vpc_only": false + }, + "hs1.8xlarge": { + "apiname": "hs1.8xlarge", + "architecture": "64-bit", + "clock_speed_ghz": "", + "computeunits": 35.0, + "ebs_iops": 0.0, + "ebs_max_bandwidth": 0.0, + "ebs_throughput": 0.0, + "ecu_per_vcpu": 2.1875, + "enhanced_networking": false, + "fpga": 0, + "gpus": 0, + "intel_avx": "", + "intel_avx2": "", + "intel_turbo": "", + "ipv6_support": false, + "linux_virtualization": "HVM, PV", + "max_ips": 240, + "memory": 117.0, + "name": "High Storage Eight Extra Large", + "network_perf": 12.0, + "physical_processor": "", + "placement_group_support": false, + "storage": 48000.0, + "vcpus": 16.0, + "vpc_only": false + }, + "i2.2xlarge": { + "apiname": "i2.2xlarge", + "architecture": "64-bit", + "clock_speed_ghz": "", + "computeunits": 27.0, + "ebs_iops": 8000.0, + "ebs_max_bandwidth": 1000.0, + "ebs_throughput": 125.0, + "ecu_per_vcpu": 3.375, + "enhanced_networking": false, + "fpga": 0, + "gpus": 0, + "intel_avx": "", + "intel_avx2": "", + "intel_turbo": "", + "ipv6_support": true, + "linux_virtualization": "HVM", + "max_ips": 60, + "memory": 61.0, + "name": "I2 Double Extra Large", + "network_perf": 7.0, + "physical_processor": "", + "placement_group_support": true, + "storage": 1600.0, + "vcpus": 8.0, + "vpc_only": false + }, + "i2.4xlarge": { + "apiname": "i2.4xlarge", + "architecture": "64-bit", + "clock_speed_ghz": "", + "computeunits": 53.0, + "ebs_iops": 16000.0, + "ebs_max_bandwidth": 2000.0, + "ebs_throughput": 250.0, + "ecu_per_vcpu": 3.3125, + "enhanced_networking": false, + "fpga": 0, + "gpus": 0, + "intel_avx": "", + "intel_avx2": "", + "intel_turbo": "", + "ipv6_support": true, + "linux_virtualization": "HVM", + "max_ips": 240, + "memory": 122.0, + "name": "I2 Quadruple Extra Large", + "network_perf": 9.0, + "physical_processor": "", + "placement_group_support": true, + "storage": 3200.0, + "vcpus": 16.0, + "vpc_only": false + }, + "i2.8xlarge": { + "apiname": "i2.8xlarge", + "architecture": "64-bit", + "clock_speed_ghz": "", + "computeunits": 104.0, + "ebs_iops": 0.0, + "ebs_max_bandwidth": 0.0, + "ebs_throughput": 0.0, + "ecu_per_vcpu": 3.25, + "enhanced_networking": false, + "fpga": 0, + "gpus": 0, + "intel_avx": "", + "intel_avx2": "", + "intel_turbo": "", + "ipv6_support": true, + "linux_virtualization": "HVM", + "max_ips": 240, + "memory": 244.0, + "name": "I2 Eight Extra Large", + "network_perf": 13.0, + "physical_processor": "", + "placement_group_support": true, + "storage": 6400.0, + "vcpus": 32.0, + "vpc_only": false + }, + "i2.xlarge": { + "apiname": "i2.xlarge", + "architecture": "64-bit", + "clock_speed_ghz": "", + "computeunits": 14.0, + "ebs_iops": 4000.0, + "ebs_max_bandwidth": 500.0, + "ebs_throughput": 62.5, + "ecu_per_vcpu": 3.5, + "enhanced_networking": false, + "fpga": 0, + "gpus": 0, + "intel_avx": "", + "intel_avx2": "", + "intel_turbo": "", + "ipv6_support": true, + "linux_virtualization": "HVM", + "max_ips": 60, + "memory": 30.5, + "name": "I2 Extra Large", + "network_perf": 7.0, + "physical_processor": "", + "placement_group_support": true, + "storage": 800.0, + "vcpus": 4.0, + "vpc_only": false + }, + "i3.16xlarge": { + "apiname": "i3.16xlarge", + "architecture": "64-bit", + "clock_speed_ghz": "Yes", + "computeunits": 200.0, + "ebs_iops": 65000.0, + "ebs_max_bandwidth": 14000.0, + "ebs_throughput": 1750.0, + "ecu_per_vcpu": 3.125, + "enhanced_networking": true, + "fpga": 0, + "gpus": 0, + "intel_avx": "Yes", + "intel_avx2": "Yes", + "intel_turbo": "Yes", + "ipv6_support": true, + "linux_virtualization": "Unknown", + "max_ips": 750, + "memory": 488.0, + "name": "I3 High I/O 16xlarge", + "network_perf": 17.0, + "physical_processor": "Intel Xeon E5-2686 v4", + "placement_group_support": true, + "storage": 15200.0, + "vcpus": 64.0, + "vpc_only": true + }, + "i3.2xlarge": { + "apiname": "i3.2xlarge", + "architecture": "64-bit", + "clock_speed_ghz": "Yes", + "computeunits": 27.0, + "ebs_iops": 12000.0, + "ebs_max_bandwidth": 1700.0, + "ebs_throughput": 200.0, + "ecu_per_vcpu": 3.375, + "enhanced_networking": true, + "fpga": 0, + "gpus": 0, + "intel_avx": "Yes", + "intel_avx2": "Yes", + "intel_turbo": "Yes", + "ipv6_support": true, + "linux_virtualization": "Unknown", + "max_ips": 60, + "memory": 61.0, + "name": "I3 High I/O Double Extra Large", + "network_perf": 11.0, + "physical_processor": "Intel Xeon E5-2686 v4", + "placement_group_support": true, + "storage": 1900.0, + "vcpus": 8.0, + "vpc_only": true + }, + "i3.4xlarge": { + "apiname": "i3.4xlarge", + "architecture": "64-bit", + "clock_speed_ghz": "Yes", + "computeunits": 53.0, + "ebs_iops": 16000.0, + "ebs_max_bandwidth": 3500.0, + "ebs_throughput": 400.0, + "ecu_per_vcpu": 3.3125, + "enhanced_networking": true, + "fpga": 0, + "gpus": 0, + "intel_avx": "Yes", + "intel_avx2": "Yes", + "intel_turbo": "Yes", + "ipv6_support": true, + "linux_virtualization": "Unknown", + "max_ips": 240, + "memory": 122.0, + "name": "I3 High I/O Quadruple Extra Large", + "network_perf": 11.0, + "physical_processor": "Intel Xeon E5-2686 v4", + "placement_group_support": true, + "storage": 3800.0, + "vcpus": 16.0, + "vpc_only": true + }, + "i3.8xlarge": { + "apiname": "i3.8xlarge", + "architecture": "64-bit", + "clock_speed_ghz": "Yes", + "computeunits": 99.0, + "ebs_iops": 32500.0, + "ebs_max_bandwidth": 7000.0, + "ebs_throughput": 850.0, + "ecu_per_vcpu": 3.09375, + "enhanced_networking": true, + "fpga": 0, + "gpus": 0, + "intel_avx": "Yes", + "intel_avx2": "Yes", + "intel_turbo": "Yes", + "ipv6_support": true, + "linux_virtualization": "Unknown", + "max_ips": 240, + "memory": 244.0, + "name": "I3 High I/O Eight Extra Large", + "network_perf": 13.0, + "physical_processor": "Intel Xeon E5-2686 v4", + "placement_group_support": true, + "storage": 7600.0, + "vcpus": 32.0, + "vpc_only": true + }, + "i3.large": { + "apiname": "i3.large", + "architecture": "64-bit", + "clock_speed_ghz": "Yes", + "computeunits": 7.0, + "ebs_iops": 3000.0, + "ebs_max_bandwidth": 425.0, + "ebs_throughput": 50.0, + "ecu_per_vcpu": 3.5, + "enhanced_networking": true, + "fpga": 0, + "gpus": 0, + "intel_avx": "Yes", + "intel_avx2": "Yes", + "intel_turbo": "Yes", + "ipv6_support": true, + "linux_virtualization": "Unknown", + "max_ips": 30, + "memory": 15.25, + "name": "I3 High I/O Large", + "network_perf": 11.0, + "physical_processor": "Intel Xeon E5-2686 v4", + "placement_group_support": true, + "storage": 475.0, + "vcpus": 2.0, + "vpc_only": true + }, + "i3.xlarge": { + "apiname": "i3.xlarge", + "architecture": "64-bit", + "clock_speed_ghz": "Yes", + "computeunits": 13.0, + "ebs_iops": 6000.0, + "ebs_max_bandwidth": 850.0, + "ebs_throughput": 100.0, + "ecu_per_vcpu": 3.25, + "enhanced_networking": true, + "fpga": 0, + "gpus": 0, + "intel_avx": "Yes", + "intel_avx2": "Yes", + "intel_turbo": "Yes", + "ipv6_support": true, + "linux_virtualization": "Unknown", + "max_ips": 60, + "memory": 30.5, + "name": "I3 High I/O Extra Large", + "network_perf": 11.0, + "physical_processor": "Intel Xeon E5-2686 v4", + "placement_group_support": true, + "storage": 950.0, + "vcpus": 4.0, + "vpc_only": true + }, + "m1.large": { + "apiname": "m1.large", + "architecture": "64-bit", + "clock_speed_ghz": "", + "computeunits": 4.0, + "ebs_iops": 4000.0, + "ebs_max_bandwidth": 500.0, + "ebs_throughput": 62.5, + "ecu_per_vcpu": 2.0, + "enhanced_networking": false, + "fpga": 0, + "gpus": 0, + "intel_avx": "", + "intel_avx2": "", + "intel_turbo": "", + "ipv6_support": false, + "linux_virtualization": "PV", + "max_ips": 30, + "memory": 7.5, + "name": "M1 General Purpose Large", + "network_perf": 7.0, + "physical_processor": "", + "placement_group_support": false, + "storage": 840.0, + "vcpus": 2.0, + "vpc_only": false + }, + "m1.medium": { + "apiname": "m1.medium", + "architecture": "32/64-bit", + "clock_speed_ghz": "", + "computeunits": 2.0, + "ebs_iops": 0.0, + "ebs_max_bandwidth": 0.0, + "ebs_throughput": 0.0, + "ecu_per_vcpu": 2.0, + "enhanced_networking": false, + "fpga": 0, + "gpus": 0, + "intel_avx": "", + "intel_avx2": "", + "intel_turbo": "", + "ipv6_support": false, + "linux_virtualization": "PV", + "max_ips": 12, + "memory": 3.75, + "name": "M1 General Purpose Medium", + "network_perf": 6.0, + "physical_processor": "", + "placement_group_support": false, + "storage": 410.0, + "vcpus": 1.0, + "vpc_only": false + }, + "m1.small": { + "apiname": "m1.small", + "architecture": "32/64-bit", + "clock_speed_ghz": "", + "computeunits": 1.0, + "ebs_iops": 0.0, + "ebs_max_bandwidth": 0.0, + "ebs_throughput": 0.0, + "ecu_per_vcpu": 1.0, + "enhanced_networking": false, + "fpga": 0, + "gpus": 0, + "intel_avx": "", + "intel_avx2": "", + "intel_turbo": "", + "ipv6_support": false, + "linux_virtualization": "PV", + "max_ips": 8, + "memory": 1.7, + "name": "M1 General Purpose Small", + "network_perf": 2.0, + "physical_processor": "", + "placement_group_support": false, + "storage": 160.0, + "vcpus": 1.0, + "vpc_only": false + }, + "m1.xlarge": { + "apiname": "m1.xlarge", + "architecture": "64-bit", + "clock_speed_ghz": "", + "computeunits": 8.0, + "ebs_iops": 8000.0, + "ebs_max_bandwidth": 1000.0, + "ebs_throughput": 125.0, + "ecu_per_vcpu": 2.0, + "enhanced_networking": false, + "fpga": 0, + "gpus": 0, + "intel_avx": "", + "intel_avx2": "", + "intel_turbo": "", + "ipv6_support": false, + "linux_virtualization": "PV", + "max_ips": 60, + "memory": 15.0, + "name": "M1 General Purpose Extra Large", + "network_perf": 9.0, + "physical_processor": "", + "placement_group_support": false, + "storage": 1680.0, + "vcpus": 4.0, + "vpc_only": false + }, + "m2.2xlarge": { + "apiname": "m2.2xlarge", + "architecture": "64-bit", + "clock_speed_ghz": "", + "computeunits": 13.0, + "ebs_iops": 4000.0, + "ebs_max_bandwidth": 500.0, + "ebs_throughput": 62.5, + "ecu_per_vcpu": 3.25, + "enhanced_networking": false, + "fpga": 0, + "gpus": 0, + "intel_avx": "", + "intel_avx2": "", + "intel_turbo": "", + "ipv6_support": false, + "linux_virtualization": "PV", + "max_ips": 120, + "memory": 34.2, + "name": "M2 High Memory Double Extra Large", + "network_perf": 7.0, + "physical_processor": "", + "placement_group_support": false, + "storage": 850.0, + "vcpus": 4.0, + "vpc_only": false + }, + "m2.4xlarge": { + "apiname": "m2.4xlarge", + "architecture": "64-bit", + "clock_speed_ghz": "", + "computeunits": 26.0, + "ebs_iops": 8000.0, + "ebs_max_bandwidth": 1000.0, + "ebs_throughput": 125.0, + "ecu_per_vcpu": 3.25, + "enhanced_networking": false, + "fpga": 0, + "gpus": 0, + "intel_avx": "", + "intel_avx2": "", + "intel_turbo": "", + "ipv6_support": false, + "linux_virtualization": "PV", + "max_ips": 240, + "memory": 68.4, + "name": "M2 High Memory Quadruple Extra Large", + "network_perf": 9.0, + "physical_processor": "", + "placement_group_support": false, + "storage": 1680.0, + "vcpus": 8.0, + "vpc_only": false + }, + "m2.xlarge": { + "apiname": "m2.xlarge", + "architecture": "64-bit", + "clock_speed_ghz": "", + "computeunits": 6.5, + "ebs_iops": 0.0, + "ebs_max_bandwidth": 0.0, + "ebs_throughput": 0.0, + "ecu_per_vcpu": 3.25, + "enhanced_networking": false, + "fpga": 0, + "gpus": 0, + "intel_avx": "", + "intel_avx2": "", + "intel_turbo": "", + "ipv6_support": false, + "linux_virtualization": "PV", + "max_ips": 60, + "memory": 17.1, + "name": "M2 High Memory Extra Large", + "network_perf": 6.0, + "physical_processor": "", + "placement_group_support": false, + "storage": 420.0, + "vcpus": 2.0, + "vpc_only": false + }, + "m3.2xlarge": { + "apiname": "m3.2xlarge", + "architecture": "64-bit", + "clock_speed_ghz": "Yes", + "computeunits": 26.0, + "ebs_iops": 8000.0, + "ebs_max_bandwidth": 1000.0, + "ebs_throughput": 125.0, + "ecu_per_vcpu": 3.25, + "enhanced_networking": false, + "fpga": 0, + "gpus": 0, + "intel_avx": "Yes", + "intel_avx2": "", + "intel_turbo": "Yes", + "ipv6_support": false, + "linux_virtualization": "HVM, PV", + "max_ips": 120, + "memory": 30.0, + "name": "M3 General Purpose Double Extra Large", + "network_perf": 9.0, + "physical_processor": "Intel Xeon E5-2670 v2", + "placement_group_support": false, + "storage": 160.0, + "vcpus": 8.0, + "vpc_only": false + }, + "m3.large": { + "apiname": "m3.large", + "architecture": "64-bit", + "clock_speed_ghz": "Yes", + "computeunits": 6.5, + "ebs_iops": 0.0, + "ebs_max_bandwidth": 0.0, + "ebs_throughput": 0.0, + "ecu_per_vcpu": 3.25, + "enhanced_networking": false, + "fpga": 0, + "gpus": 0, + "intel_avx": "Yes", + "intel_avx2": "", + "intel_turbo": "Yes", + "ipv6_support": false, + "linux_virtualization": "HVM, PV", + "max_ips": 30, + "memory": 7.5, + "name": "M3 General Purpose Large", + "network_perf": 6.0, + "physical_processor": "Intel Xeon E5-2670 v2", + "placement_group_support": false, + "storage": 32.0, + "vcpus": 2.0, + "vpc_only": false + }, + "m3.medium": { + "apiname": "m3.medium", + "architecture": "64-bit", + "clock_speed_ghz": "Yes", + "computeunits": 3.0, + "ebs_iops": 0.0, + "ebs_max_bandwidth": 0.0, + "ebs_throughput": 0.0, + "ecu_per_vcpu": 3.0, + "enhanced_networking": false, + "fpga": 0, + "gpus": 0, + "intel_avx": "Yes", + "intel_avx2": "", + "intel_turbo": "Yes", + "ipv6_support": false, + "linux_virtualization": "HVM, PV", + "max_ips": 12, + "memory": 3.75, + "name": "M3 General Purpose Medium", + "network_perf": 6.0, + "physical_processor": "Intel Xeon E5-2670 v2", + "placement_group_support": false, + "storage": 4.0, + "vcpus": 1.0, + "vpc_only": false + }, + "m3.xlarge": { + "apiname": "m3.xlarge", + "architecture": "64-bit", + "clock_speed_ghz": "Yes", + "computeunits": 13.0, + "ebs_iops": 4000.0, + "ebs_max_bandwidth": 500.0, + "ebs_throughput": 62.5, + "ecu_per_vcpu": 3.25, + "enhanced_networking": false, + "fpga": 0, + "gpus": 0, + "intel_avx": "Yes", + "intel_avx2": "", + "intel_turbo": "Yes", + "ipv6_support": false, + "linux_virtualization": "HVM, PV", + "max_ips": 60, + "memory": 15.0, + "name": "M3 General Purpose Extra Large", + "network_perf": 9.0, + "physical_processor": "Intel Xeon E5-2670 v2", + "placement_group_support": false, + "storage": 80.0, + "vcpus": 4.0, + "vpc_only": false + }, + "m4.10xlarge": { + "apiname": "m4.10xlarge", + "architecture": "64-bit", + "clock_speed_ghz": "Yes", + "computeunits": 124.5, + "ebs_iops": 32000.0, + "ebs_max_bandwidth": 4000.0, + "ebs_throughput": 500.0, + "ecu_per_vcpu": 3.1125, + "enhanced_networking": true, + "fpga": 0, + "gpus": 0, + "intel_avx": "Yes", + "intel_avx2": "Yes", + "intel_turbo": "Yes", + "ipv6_support": true, + "linux_virtualization": "HVM", + "max_ips": 240, + "memory": 160.0, + "name": "M4 Deca Extra Large", + "network_perf": 13.0, + "physical_processor": "Intel Xeon E5-2676 v3", + "placement_group_support": true, + "storage": 0.0, + "vcpus": 40.0, + "vpc_only": true + }, + "m4.16xlarge": { + "apiname": "m4.16xlarge", + "architecture": "64-bit", + "clock_speed_ghz": "Yes", + "computeunits": 188.0, + "ebs_iops": 65000.0, + "ebs_max_bandwidth": 10000.0, + "ebs_throughput": 1250.0, + "ecu_per_vcpu": 2.9375, + "enhanced_networking": true, + "fpga": 0, + "gpus": 0, + "intel_avx": "Yes", + "intel_avx2": "Yes", + "intel_turbo": "Yes", + "ipv6_support": true, + "linux_virtualization": "HVM", + "max_ips": 240, + "memory": 256.0, + "name": "M4 16xlarge", + "network_perf": 17.0, + "physical_processor": "Intel Xeon E5-2686 v4", + "placement_group_support": true, + "storage": 0.0, + "vcpus": 64.0, + "vpc_only": true + }, + "m4.2xlarge": { + "apiname": "m4.2xlarge", + "architecture": "64-bit", + "clock_speed_ghz": "Yes", + "computeunits": 26.0, + "ebs_iops": 8000.0, + "ebs_max_bandwidth": 1000.0, + "ebs_throughput": 125.0, + "ecu_per_vcpu": 3.25, + "enhanced_networking": true, + "fpga": 0, + "gpus": 0, + "intel_avx": "Yes", + "intel_avx2": "Yes", + "intel_turbo": "Yes", + "ipv6_support": true, + "linux_virtualization": "HVM", + "max_ips": 60, + "memory": 32.0, + "name": "M4 Double Extra Large", + "network_perf": 9.0, + "physical_processor": "Intel Xeon E5-2676 v3", + "placement_group_support": true, + "storage": 0.0, + "vcpus": 8.0, + "vpc_only": true + }, + "m4.4xlarge": { + "apiname": "m4.4xlarge", + "architecture": "64-bit", + "clock_speed_ghz": "Yes", + "computeunits": 53.5, + "ebs_iops": 16000.0, + "ebs_max_bandwidth": 2000.0, + "ebs_throughput": 250.0, + "ecu_per_vcpu": 3.34375, + "enhanced_networking": true, + "fpga": 0, + "gpus": 0, + "intel_avx": "Yes", + "intel_avx2": "Yes", + "intel_turbo": "Yes", + "ipv6_support": true, + "linux_virtualization": "HVM", + "max_ips": 240, + "memory": 64.0, + "name": "M4 Quadruple Extra Large", + "network_perf": 9.0, + "physical_processor": "Intel Xeon E5-2676 v3", + "placement_group_support": true, + "storage": 0.0, + "vcpus": 16.0, + "vpc_only": true + }, + "m4.large": { + "apiname": "m4.large", + "architecture": "64-bit", + "clock_speed_ghz": "Yes", + "computeunits": 6.5, + "ebs_iops": 3600.0, + "ebs_max_bandwidth": 450.0, + "ebs_throughput": 56.25, + "ecu_per_vcpu": 3.25, + "enhanced_networking": true, + "fpga": 0, + "gpus": 0, + "intel_avx": "Yes", + "intel_avx2": "Yes", + "intel_turbo": "Yes", + "ipv6_support": true, + "linux_virtualization": "HVM", + "max_ips": 20, + "memory": 8.0, + "name": "M4 Large", + "network_perf": 7.0, + "physical_processor": "Intel Xeon E5-2676 v3", + "placement_group_support": true, + "storage": 0.0, + "vcpus": 2.0, + "vpc_only": true + }, + "m4.xlarge": { + "apiname": "m4.xlarge", + "architecture": "64-bit", + "clock_speed_ghz": "Yes", + "computeunits": 13.0, + "ebs_iops": 6000.0, + "ebs_max_bandwidth": 750.0, + "ebs_throughput": 93.75, + "ecu_per_vcpu": 3.25, + "enhanced_networking": true, + "fpga": 0, + "gpus": 0, + "intel_avx": "Yes", + "intel_avx2": "Yes", + "intel_turbo": "Yes", + "ipv6_support": true, + "linux_virtualization": "HVM", + "max_ips": 60, + "memory": 16.0, + "name": "M4 Extra Large", + "network_perf": 9.0, + "physical_processor": "Intel Xeon E5-2676 v3", + "placement_group_support": true, + "storage": 0.0, + "vcpus": 4.0, + "vpc_only": true + }, + "p2.16xlarge": { + "apiname": "p2.16xlarge", + "architecture": "64-bit", + "clock_speed_ghz": "Yes", + "computeunits": 188.0, + "ebs_iops": 65000.0, + "ebs_max_bandwidth": 10000.0, + "ebs_throughput": 1250.0, + "ecu_per_vcpu": 2.9375, + "enhanced_networking": true, + "fpga": 0, + "gpus": 16, + "intel_avx": "Yes", + "intel_avx2": "Yes", + "intel_turbo": "Yes", + "ipv6_support": true, + "linux_virtualization": "HVM", + "max_ips": 240, + "memory": 732.0, + "name": "General Purpose GPU 16xlarge", + "network_perf": 17.0, + "physical_processor": "Intel Xeon E5-2686 v4", + "placement_group_support": true, + "storage": 0.0, + "vcpus": 64.0, + "vpc_only": true + }, + "p2.8xlarge": { + "apiname": "p2.8xlarge", + "architecture": "64-bit", + "clock_speed_ghz": "Yes", + "computeunits": 94.0, + "ebs_iops": 32500.0, + "ebs_max_bandwidth": 5000.0, + "ebs_throughput": 625.0, + "ecu_per_vcpu": 2.9375, + "enhanced_networking": true, + "fpga": 0, + "gpus": 8, + "intel_avx": "Yes", + "intel_avx2": "Yes", + "intel_turbo": "Yes", + "ipv6_support": true, + "linux_virtualization": "HVM", + "max_ips": 240, + "memory": 488.0, + "name": "General Purpose GPU Eight Extra Large", + "network_perf": 13.0, + "physical_processor": "Intel Xeon E5-2686 v4", + "placement_group_support": true, + "storage": 0.0, + "vcpus": 32.0, + "vpc_only": true + }, + "p2.xlarge": { + "apiname": "p2.xlarge", + "architecture": "64-bit", + "clock_speed_ghz": "Yes", + "computeunits": 12.0, + "ebs_iops": 6000.0, + "ebs_max_bandwidth": 750.0, + "ebs_throughput": 93.75, + "ecu_per_vcpu": 3.0, + "enhanced_networking": true, + "fpga": 0, + "gpus": 1, + "intel_avx": "Yes", + "intel_avx2": "Yes", + "intel_turbo": "Yes", + "ipv6_support": true, + "linux_virtualization": "HVM", + "max_ips": 60, + "memory": 61.0, + "name": "General Purpose GPU Extra Large", + "network_perf": 9.0, + "physical_processor": "Intel Xeon E5-2686 v4", + "placement_group_support": true, + "storage": 0.0, + "vcpus": 4.0, + "vpc_only": true + }, + "r3.2xlarge": { + "apiname": "r3.2xlarge", + "architecture": "64-bit", + "clock_speed_ghz": "Yes", + "computeunits": 26.0, + "ebs_iops": 8000.0, + "ebs_max_bandwidth": 1000.0, + "ebs_throughput": 125.0, + "ecu_per_vcpu": 3.25, + "enhanced_networking": true, + "fpga": 0, + "gpus": 0, + "intel_avx": "Yes", + "intel_avx2": "", + "intel_turbo": "Yes", + "ipv6_support": true, + "linux_virtualization": "HVM", + "max_ips": 60, + "memory": 61.0, + "name": "R3 High-Memory Double Extra Large", + "network_perf": 9.0, + "physical_processor": "Intel Xeon E5-2670 v2", + "placement_group_support": true, + "storage": 160.0, + "vcpus": 8.0, + "vpc_only": false + }, + "r3.4xlarge": { + "apiname": "r3.4xlarge", + "architecture": "64-bit", + "clock_speed_ghz": "Yes", + "computeunits": 52.0, + "ebs_iops": 16000.0, + "ebs_max_bandwidth": 2000.0, + "ebs_throughput": 250.0, + "ecu_per_vcpu": 3.25, + "enhanced_networking": true, + "fpga": 0, + "gpus": 0, + "intel_avx": "Yes", + "intel_avx2": "", + "intel_turbo": "Yes", + "ipv6_support": true, + "linux_virtualization": "HVM", + "max_ips": 240, + "memory": 122.0, + "name": "R3 High-Memory Quadruple Extra Large", + "network_perf": 9.0, + "physical_processor": "Intel Xeon E5-2670 v2", + "placement_group_support": true, + "storage": 320.0, + "vcpus": 16.0, + "vpc_only": false + }, + "r3.8xlarge": { + "apiname": "r3.8xlarge", + "architecture": "64-bit", + "clock_speed_ghz": "Yes", + "computeunits": 104.0, + "ebs_iops": 0.0, + "ebs_max_bandwidth": 0.0, + "ebs_throughput": 0.0, + "ecu_per_vcpu": 3.25, + "enhanced_networking": true, + "fpga": 0, + "gpus": 0, + "intel_avx": "Yes", + "intel_avx2": "", + "intel_turbo": "Yes", + "ipv6_support": true, + "linux_virtualization": "HVM", + "max_ips": 240, + "memory": 244.0, + "name": "R3 High-Memory Eight Extra Large", + "network_perf": 12.0, + "physical_processor": "Intel Xeon E5-2670 v2", + "placement_group_support": true, + "storage": 640.0, + "vcpus": 32.0, + "vpc_only": false + }, + "r3.large": { + "apiname": "r3.large", + "architecture": "64-bit", + "clock_speed_ghz": "Yes", + "computeunits": 6.5, + "ebs_iops": 0.0, + "ebs_max_bandwidth": 0.0, + "ebs_throughput": 0.0, + "ecu_per_vcpu": 3.25, + "enhanced_networking": true, + "fpga": 0, + "gpus": 0, + "intel_avx": "Yes", + "intel_avx2": "", + "intel_turbo": "Yes", + "ipv6_support": true, + "linux_virtualization": "HVM", + "max_ips": 30, + "memory": 15.25, + "name": "R3 High-Memory Large", + "network_perf": 6.0, + "physical_processor": "Intel Xeon E5-2670 v2", + "placement_group_support": true, + "storage": 32.0, + "vcpus": 2.0, + "vpc_only": false + }, + "r3.xlarge": { + "apiname": "r3.xlarge", + "architecture": "64-bit", + "clock_speed_ghz": "Yes", + "computeunits": 13.0, + "ebs_iops": 4000.0, + "ebs_max_bandwidth": 500.0, + "ebs_throughput": 62.5, + "ecu_per_vcpu": 3.25, + "enhanced_networking": true, + "fpga": 0, + "gpus": 0, + "intel_avx": "Yes", + "intel_avx2": "", + "intel_turbo": "Yes", + "ipv6_support": true, + "linux_virtualization": "HVM", + "max_ips": 60, + "memory": 30.5, + "name": "R3 High-Memory Extra Large", + "network_perf": 7.0, + "physical_processor": "Intel Xeon E5-2670 v2", + "placement_group_support": true, + "storage": 80.0, + "vcpus": 4.0, + "vpc_only": false + }, + "r4.16xlarge": { + "apiname": "r4.16xlarge", + "architecture": "64-bit", + "clock_speed_ghz": "Yes", + "computeunits": 195.0, + "ebs_iops": 75000.0, + "ebs_max_bandwidth": 14000.0, + "ebs_throughput": 1750.0, + "ecu_per_vcpu": 3.046875, + "enhanced_networking": true, + "fpga": 0, + "gpus": 0, + "intel_avx": "Yes", + "intel_avx2": "Yes", + "intel_turbo": "Yes", + "ipv6_support": true, + "linux_virtualization": "HVM", + "max_ips": 750, + "memory": 488.0, + "name": "R4 High-Memory 16xlarge", + "network_perf": 17.0, + "physical_processor": "Intel Xeon E5-2686 v4", + "placement_group_support": true, + "storage": 0.0, + "vcpus": 64.0, + "vpc_only": true + }, + "r4.2xlarge": { + "apiname": "r4.2xlarge", + "architecture": "64-bit", + "clock_speed_ghz": "Yes", + "computeunits": 27.0, + "ebs_iops": 12000.0, + "ebs_max_bandwidth": 1750.0, + "ebs_throughput": 218.0, + "ecu_per_vcpu": 3.375, + "enhanced_networking": true, + "fpga": 0, + "gpus": 0, + "intel_avx": "Yes", + "intel_avx2": "Yes", + "intel_turbo": "Yes", + "ipv6_support": true, + "linux_virtualization": "HVM", + "max_ips": 60, + "memory": 61.0, + "name": "R4 High-Memory Double Extra Large", + "network_perf": 11.0, + "physical_processor": "Intel Xeon E5-2686 v4", + "placement_group_support": true, + "storage": 0.0, + "vcpus": 8.0, + "vpc_only": true + }, + "r4.4xlarge": { + "apiname": "r4.4xlarge", + "architecture": "64-bit", + "clock_speed_ghz": "Yes", + "computeunits": 53.0, + "ebs_iops": 18750.0, + "ebs_max_bandwidth": 3500.0, + "ebs_throughput": 437.0, + "ecu_per_vcpu": 3.3125, + "enhanced_networking": true, + "fpga": 0, + "gpus": 0, + "intel_avx": "Yes", + "intel_avx2": "Yes", + "intel_turbo": "Yes", + "ipv6_support": true, + "linux_virtualization": "HVM", + "max_ips": 240, + "memory": 122.0, + "name": "R4 High-Memory Quadruple Extra Large", + "network_perf": 11.0, + "physical_processor": "Intel Xeon E5-2686 v4", + "placement_group_support": true, + "storage": 0.0, + "vcpus": 16.0, + "vpc_only": true + }, + "r4.8xlarge": { + "apiname": "r4.8xlarge", + "architecture": "64-bit", + "clock_speed_ghz": "Yes", + "computeunits": 99.0, + "ebs_iops": 37500.0, + "ebs_max_bandwidth": 7000.0, + "ebs_throughput": 875.0, + "ecu_per_vcpu": 3.09375, + "enhanced_networking": true, + "fpga": 0, + "gpus": 0, + "intel_avx": "Yes", + "intel_avx2": "Yes", + "intel_turbo": "Yes", + "ipv6_support": true, + "linux_virtualization": "HVM", + "max_ips": 240, + "memory": 244.0, + "name": "R4 High-Memory Eight Extra Large", + "network_perf": 13.0, + "physical_processor": "Intel Xeon E5-2686 v4", + "placement_group_support": true, + "storage": 0.0, + "vcpus": 32.0, + "vpc_only": true + }, + "r4.large": { + "apiname": "r4.large", + "architecture": "64-bit", + "clock_speed_ghz": "Yes", + "computeunits": 7.0, + "ebs_iops": 3000.0, + "ebs_max_bandwidth": 437.0, + "ebs_throughput": 54.0, + "ecu_per_vcpu": 3.5, + "enhanced_networking": true, + "fpga": 0, + "gpus": 0, + "intel_avx": "Yes", + "intel_avx2": "Yes", + "intel_turbo": "Yes", + "ipv6_support": true, + "linux_virtualization": "HVM", + "max_ips": 30, + "memory": 15.25, + "name": "R4 High-Memory Large", + "network_perf": 11.0, + "physical_processor": "Intel Xeon E5-2686 v4", + "placement_group_support": true, + "storage": 0.0, + "vcpus": 2.0, + "vpc_only": true + }, + "r4.xlarge": { + "apiname": "r4.xlarge", + "architecture": "64-bit", + "clock_speed_ghz": "Yes", + "computeunits": 13.5, + "ebs_iops": 6000.0, + "ebs_max_bandwidth": 875.0, + "ebs_throughput": 109.0, + "ecu_per_vcpu": 3.375, + "enhanced_networking": true, + "fpga": 0, + "gpus": 0, + "intel_avx": "Yes", + "intel_avx2": "Yes", + "intel_turbo": "Yes", + "ipv6_support": true, + "linux_virtualization": "HVM", + "max_ips": 60, + "memory": 30.5, + "name": "R4 High-Memory Extra Large", + "network_perf": 11.0, + "physical_processor": "Intel Xeon E5-2686 v4", + "placement_group_support": true, + "storage": 0.0, + "vcpus": 4.0, + "vpc_only": true + }, + "t1.micro": { + "apiname": "t1.micro", + "architecture": "32/64-bit", + "clock_speed_ghz": "", + "computeunits": 0.0, + "ebs_iops": 0.0, + "ebs_max_bandwidth": 0.0, + "ebs_throughput": 0.0, + "ecu_per_vcpu": 0.0, + "enhanced_networking": false, + "fpga": 0, + "gpus": 0, + "intel_avx": "", + "intel_avx2": "", + "intel_turbo": "", + "ipv6_support": false, + "linux_virtualization": "PV", + "max_ips": 4, + "memory": 0.613, + "name": "T1 Micro", + "network_perf": 0.0, + "physical_processor": "", + "placement_group_support": false, + "storage": 0.0, + "vcpus": 1.0, + "vpc_only": false + }, + "t2.2xlarge": { + "apiname": "t2.2xlarge", + "architecture": "64-bit", + "clock_speed_ghz": "Yes", + "computeunits": 1.35, + "ebs_iops": 0.0, + "ebs_max_bandwidth": 0.0, + "ebs_throughput": 0.0, + "ecu_per_vcpu": 0.0, + "enhanced_networking": false, + "fpga": 0, + "gpus": 0, + "intel_avx": "Yes", + "intel_avx2": "", + "intel_turbo": "Yes", + "ipv6_support": true, + "linux_virtualization": "HVM", + "max_ips": 45, + "memory": 32.0, + "name": "T2 Double Extra Large", + "network_perf": 6.0, + "physical_processor": "Intel Xeon family", + "placement_group_support": false, + "storage": 0.0, + "vcpus": 8.0, + "vpc_only": true + }, + "t2.large": { + "apiname": "t2.large", + "architecture": "64-bit", + "clock_speed_ghz": "Yes", + "computeunits": 0.6, + "ebs_iops": 0.0, + "ebs_max_bandwidth": 0.0, + "ebs_throughput": 0.0, + "ecu_per_vcpu": 0.0, + "enhanced_networking": false, + "fpga": 0, + "gpus": 0, + "intel_avx": "Yes", + "intel_avx2": "", + "intel_turbo": "Yes", + "ipv6_support": true, + "linux_virtualization": "HVM", + "max_ips": 36, + "memory": 8.0, + "name": "T2 Large", + "network_perf": 4.0, + "physical_processor": "Intel Xeon family", + "placement_group_support": false, + "storage": 0.0, + "vcpus": 2.0, + "vpc_only": true + }, + "t2.medium": { + "apiname": "t2.medium", + "architecture": "32/64-bit", + "clock_speed_ghz": "Yes", + "computeunits": 0.4, + "ebs_iops": 0.0, + "ebs_max_bandwidth": 0.0, + "ebs_throughput": 0.0, + "ecu_per_vcpu": 0.0, + "enhanced_networking": false, + "fpga": 0, + "gpus": 0, + "intel_avx": "Yes", + "intel_avx2": "", + "intel_turbo": "Yes", + "ipv6_support": true, + "linux_virtualization": "HVM", + "max_ips": 18, + "memory": 4.0, + "name": "T2 Medium", + "network_perf": 4.0, + "physical_processor": "Intel Xeon family", + "placement_group_support": false, + "storage": 0.0, + "vcpus": 2.0, + "vpc_only": true + }, + "t2.micro": { + "apiname": "t2.micro", + "architecture": "32/64-bit", + "clock_speed_ghz": "Yes", + "computeunits": 0.1, + "ebs_iops": 0.0, + "ebs_max_bandwidth": 0.0, + "ebs_throughput": 0.0, + "ecu_per_vcpu": 0.0, + "enhanced_networking": false, + "fpga": 0, + "gpus": 0, + "intel_avx": "Yes", + "intel_avx2": "", + "intel_turbo": "Yes", + "ipv6_support": true, + "linux_virtualization": "HVM", + "max_ips": 4, + "memory": 1.0, + "name": "T2 Micro", + "network_perf": 4.0, + "physical_processor": "Intel Xeon family", + "placement_group_support": false, + "storage": 0.0, + "vcpus": 1.0, + "vpc_only": true + }, + "t2.nano": { + "apiname": "t2.nano", + "architecture": "32/64-bit", + "clock_speed_ghz": "Yes", + "computeunits": 0.05, + "ebs_iops": 0.0, + "ebs_max_bandwidth": 0.0, + "ebs_throughput": 0.0, + "ecu_per_vcpu": 0.0, + "enhanced_networking": false, + "fpga": 0, + "gpus": 0, + "intel_avx": "Yes", + "intel_avx2": "", + "intel_turbo": "Yes", + "ipv6_support": true, + "linux_virtualization": "HVM", + "max_ips": 4, + "memory": 0.5, + "name": "T2 Nano", + "network_perf": 2.0, + "physical_processor": "Intel Xeon family", + "placement_group_support": false, + "storage": 0.0, + "vcpus": 1.0, + "vpc_only": true + }, + "t2.small": { + "apiname": "t2.small", + "architecture": "32/64-bit", + "clock_speed_ghz": "Yes", + "computeunits": 0.2, + "ebs_iops": 0.0, + "ebs_max_bandwidth": 0.0, + "ebs_throughput": 0.0, + "ecu_per_vcpu": 0.0, + "enhanced_networking": false, + "fpga": 0, + "gpus": 0, + "intel_avx": "Yes", + "intel_avx2": "", + "intel_turbo": "Yes", + "ipv6_support": true, + "linux_virtualization": "HVM", + "max_ips": 8, + "memory": 2.0, + "name": "T2 Small", + "network_perf": 4.0, + "physical_processor": "Intel Xeon family", + "placement_group_support": false, + "storage": 0.0, + "vcpus": 1.0, + "vpc_only": true + }, + "t2.xlarge": { + "apiname": "t2.xlarge", + "architecture": "64-bit", + "clock_speed_ghz": "Yes", + "computeunits": 0.9, + "ebs_iops": 0.0, + "ebs_max_bandwidth": 0.0, + "ebs_throughput": 0.0, + "ecu_per_vcpu": 0.0, + "enhanced_networking": false, + "fpga": 0, + "gpus": 0, + "intel_avx": "Yes", + "intel_avx2": "", + "intel_turbo": "Yes", + "ipv6_support": true, + "linux_virtualization": "HVM", + "max_ips": 45, + "memory": 16.0, + "name": "T2 Extra Large", + "network_perf": 6.0, + "physical_processor": "Intel Xeon family", + "placement_group_support": false, + "storage": 0.0, + "vcpus": 4.0, + "vpc_only": true + }, + "x1.16xlarge": { + "apiname": "x1.16xlarge", + "architecture": "64-bit", + "clock_speed_ghz": "Yes", + "computeunits": 174.5, + "ebs_iops": 40000.0, + "ebs_max_bandwidth": 7000.0, + "ebs_throughput": 875.0, + "ecu_per_vcpu": 2.7265625, + "enhanced_networking": true, + "fpga": 0, + "gpus": 0, + "intel_avx": "Yes", + "intel_avx2": "Yes", + "intel_turbo": "Yes", + "ipv6_support": true, + "linux_virtualization": "HVM", + "max_ips": 240, + "memory": 976.0, + "name": "X1 Extra High-Memory 16xlarge", + "network_perf": 13.0, + "physical_processor": "Intel Xeon E7-8880 v3", + "placement_group_support": true, + "storage": 1920.0, + "vcpus": 64.0, + "vpc_only": true + }, + "x1.32xlarge": { + "apiname": "x1.32xlarge", + "architecture": "64-bit", + "clock_speed_ghz": "Yes", + "computeunits": 349.0, + "ebs_iops": 80000.0, + "ebs_max_bandwidth": 14000.0, + "ebs_throughput": 1750.0, + "ecu_per_vcpu": 2.7265625, + "enhanced_networking": true, + "fpga": 0, + "gpus": 0, + "intel_avx": "Yes", + "intel_avx2": "Yes", + "intel_turbo": "Yes", + "ipv6_support": true, + "linux_virtualization": "HVM", + "max_ips": 240, + "memory": 1952.0, + "name": "X1 Extra High-Memory 32xlarge", + "network_perf": 17.0, + "physical_processor": "Intel Xeon E7-8880 v3", + "placement_group_support": true, + "storage": 3840.0, + "vcpus": 128.0, + "vpc_only": true + }, + "x1e.32xlarge": { + "apiname": "x1e.32xlarge", + "architecture": "64-bit", + "clock_speed_ghz": "Yes", + "computeunits": 340.0, + "ebs_iops": 80000.0, + "ebs_max_bandwidth": 14000.0, + "ebs_throughput": 1750.0, + "ecu_per_vcpu": 2.65625, + "enhanced_networking": true, + "fpga": 0, + "gpus": 0, + "intel_avx": "Yes", + "intel_avx2": "Yes", + "intel_turbo": "Yes", + "ipv6_support": false, + "linux_virtualization": "Unknown", + "max_ips": 240, + "memory": 3904.0, + "name": "X1E 32xlarge", + "network_perf": 17.0, + "physical_processor": "Intel Xeon E7-8880 v3", + "placement_group_support": false, + "storage": 3840.0, + "vcpus": 128.0, + "vpc_only": true + } +}""" diff --git a/scripts/get_instance_info.py b/scripts/get_instance_info.py index f883c0cae..bb8d28eb3 100755 --- a/scripts/get_instance_info.py +++ b/scripts/get_instance_info.py @@ -141,10 +141,20 @@ def main(): result[instance_id] = instance_data root_dir = subprocess.check_output(['git', 'rev-parse', '--show-toplevel']).decode().strip() - dest = os.path.join(root_dir, 'moto/ec2/resources/instance_types.json') + dest = os.path.join(root_dir, 'moto/ec2/resources/instance_types.py') print("Writing data to {0}".format(dest)) with open(dest, 'w') as open_file: - json.dump(result, open_file) + triple_quote = '\"\"\"' + + open_file.write("# Imported via `scripts/get_instance_info.py`\n") + open_file.write('instance_types_data = {}\n'.format(triple_quote)) + json.dump(result, + open_file, + sort_keys=True, + indent=4, + separators=(',', ': ')) + open_file.write('{}\n'.format(triple_quote)) + if __name__ == '__main__': main() From 9a036c4fd673987f23617d31e178265adeac620a Mon Sep 17 00:00:00 2001 From: Jack Danger Date: Wed, 27 Sep 2017 13:51:00 -0700 Subject: [PATCH 317/412] Small typo in makefile --- Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Makefile b/Makefile index 3a98ed7f1..a963c8293 100644 --- a/Makefile +++ b/Makefile @@ -29,7 +29,7 @@ tag_github_release: git tag `python setup.py --version` git push origin `python setup.py --version` -publish: upload_pypi_artifact build_dockerhub_image tag_github_release +publish: upload_pypi_artifact push_dockerhub_image tag_github_release scaffold: @pip install -r requirements-dev.txt > /dev/null From 4768c28443f49a600131dfe786af6a4ed2915b98 Mon Sep 17 00:00:00 2001 From: Jack Danger Date: Wed, 27 Sep 2017 13:51:41 -0700 Subject: [PATCH 318/412] bumping to version 1.1.17 --- setup.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/setup.py b/setup.py index f5344a0c0..1093af638 100755 --- a/setup.py +++ b/setup.py @@ -25,7 +25,7 @@ extras_require = { setup( name='moto', - version='1.1.16', + version='1.1.17', description='A library that allows your python tests to easily' ' mock out the boto library', author='Steve Pulec', From 53d8d48e88e9a5df2e3680dead1aeb0faffc57e9 Mon Sep 17 00:00:00 2001 From: Jack Danger Date: Wed, 27 Sep 2017 13:57:40 -0700 Subject: [PATCH 319/412] Revert "Merge pull request #1209 from JackDanger/jack/store-as-python" This reverts commit 2a3fdf6b642d471391dff23ad06f9b1a74f5939d, reversing changes made to a06145d7816ca7778b39f792b4fe99439ed0cc41. --- moto/ec2/models.py | 3 +- moto/ec2/resources/__init__.py | 0 moto/ec2/resources/instance_types.json | 1 + moto/ec2/resources/instance_types.py | 2164 ------------------------ scripts/get_instance_info.py | 14 +- 5 files changed, 4 insertions(+), 2178 deletions(-) delete mode 100644 moto/ec2/resources/__init__.py create mode 100644 moto/ec2/resources/instance_types.json delete mode 100644 moto/ec2/resources/instance_types.py diff --git a/moto/ec2/models.py b/moto/ec2/models.py index e687ea1a0..07e218106 100755 --- a/moto/ec2/models.py +++ b/moto/ec2/models.py @@ -67,7 +67,6 @@ from .exceptions import ( MotoNotImplementedError, FilterNotImplementedError ) -from .resources.instance_types import instance_types_data from .utils import ( EC2_RESOURCE_TO_PREFIX, EC2_PREFIX_TO_RESOURCE, @@ -113,7 +112,7 @@ from .utils import ( ) RESOURCES_DIR = os.path.join(os.path.dirname(__file__), 'resources') -INSTANCE_TYPES = json.loads(instance_types_data) +INSTANCE_TYPES = json.load(open(os.path.join(RESOURCES_DIR, 'instance_types.json'), 'r')) def utc_date_and_time(): diff --git a/moto/ec2/resources/__init__.py b/moto/ec2/resources/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/moto/ec2/resources/instance_types.json b/moto/ec2/resources/instance_types.json new file mode 100644 index 000000000..2fa2e4e93 --- /dev/null +++ b/moto/ec2/resources/instance_types.json @@ -0,0 +1 @@ +{"m1.xlarge": {"ecu_per_vcpu": 2.0, "network_perf": 9.0, "intel_avx": "", "name": "M1 General Purpose Extra Large", "architecture": "64-bit", "linux_virtualization": "PV", "storage": 1680.0, "placement_group_support": false, "intel_avx2": "", "clock_speed_ghz": "", "apiname": "m1.xlarge", "computeunits": 8.0, "ebs_throughput": 125.0, "vpc_only": false, "max_ips": 60, "physical_processor": "", "fpga": 0, "intel_turbo": "", "enhanced_networking": false, "ebs_iops": 8000.0, "vcpus": 4.0, "memory": 15.0, "ebs_max_bandwidth": 1000.0, "gpus": 0, "ipv6_support": false}, "i3.4xlarge": {"ecu_per_vcpu": 3.3125, "network_perf": 11.0, "intel_avx": "Yes", "name": "I3 High I/O Quadruple Extra Large", "architecture": "64-bit", "linux_virtualization": "Unknown", "storage": 3800.0, "placement_group_support": true, "intel_avx2": "Yes", "clock_speed_ghz": "Yes", "apiname": "i3.4xlarge", "computeunits": 53.0, "ebs_throughput": 400.0, "vpc_only": true, "max_ips": 240, "physical_processor": "Intel Xeon E5-2686 v4", "fpga": 0, "intel_turbo": "Yes", "enhanced_networking": true, "ebs_iops": 16000.0, "vcpus": 16.0, "memory": 122.0, "ebs_max_bandwidth": 3500.0, "gpus": 0, "ipv6_support": true}, "i2.xlarge": {"ecu_per_vcpu": 3.5, "network_perf": 7.0, "intel_avx": "", "name": "I2 Extra Large", "architecture": "64-bit", "linux_virtualization": "HVM", "storage": 800.0, "placement_group_support": true, "intel_avx2": "", "clock_speed_ghz": "", "apiname": "i2.xlarge", "computeunits": 14.0, "ebs_throughput": 62.5, "vpc_only": false, "max_ips": 60, "physical_processor": "", "fpga": 0, "intel_turbo": "", "enhanced_networking": false, "ebs_iops": 4000.0, "vcpus": 4.0, "memory": 30.5, "ebs_max_bandwidth": 500.0, "gpus": 0, "ipv6_support": true}, "hs1.8xlarge": {"ecu_per_vcpu": 2.1875, "network_perf": 12.0, "intel_avx": "", "name": "High Storage Eight Extra Large", "architecture": "64-bit", "linux_virtualization": "HVM, PV", "storage": 48000.0, "placement_group_support": false, "intel_avx2": "", "clock_speed_ghz": "", "apiname": "hs1.8xlarge", "computeunits": 35.0, "ebs_throughput": 0.0, "vpc_only": false, "max_ips": 240, "physical_processor": "", "fpga": 0, "intel_turbo": "", "enhanced_networking": false, "ebs_iops": 0.0, "vcpus": 16.0, "memory": 117.0, "ebs_max_bandwidth": 0.0, "gpus": 0, "ipv6_support": false}, "t2.micro": {"ecu_per_vcpu": 0.0, "network_perf": 4.0, "intel_avx": "Yes", "name": "T2 Micro", "architecture": "32/64-bit", "linux_virtualization": "HVM", "storage": 0.0, "placement_group_support": false, "intel_avx2": "", "clock_speed_ghz": "Yes", "apiname": "t2.micro", "computeunits": 0.1, "ebs_throughput": 0.0, "vpc_only": true, "max_ips": 4, "physical_processor": "Intel Xeon family", "fpga": 0, "intel_turbo": "Yes", "enhanced_networking": false, "ebs_iops": 0.0, "vcpus": 1.0, "memory": 1.0, "ebs_max_bandwidth": 0.0, "gpus": 0, "ipv6_support": true}, "d2.4xlarge": {"ecu_per_vcpu": 3.5, "network_perf": 9.0, "intel_avx": "Yes", "name": "D2 Quadruple Extra Large", "architecture": "64-bit", "linux_virtualization": "HVM", "storage": 24000.0, "placement_group_support": true, "intel_avx2": "Yes", "clock_speed_ghz": "Yes", "apiname": "d2.4xlarge", "computeunits": 56.0, "ebs_throughput": 250.0, "vpc_only": false, "max_ips": 240, "physical_processor": "Intel Xeon E5-2676 v3", "fpga": 0, "intel_turbo": "Yes", "enhanced_networking": true, "ebs_iops": 16000.0, "vcpus": 16.0, "memory": 122.0, "ebs_max_bandwidth": 2000.0, "gpus": 0, "ipv6_support": true}, "m2.xlarge": {"ecu_per_vcpu": 3.25, "network_perf": 6.0, "intel_avx": "", "name": "M2 High Memory Extra Large", "architecture": "64-bit", "linux_virtualization": "PV", "storage": 420.0, "placement_group_support": false, "intel_avx2": "", "clock_speed_ghz": "", "apiname": "m2.xlarge", "computeunits": 6.5, "ebs_throughput": 0.0, "vpc_only": false, "max_ips": 60, "physical_processor": "", "fpga": 0, "intel_turbo": "", "enhanced_networking": false, "ebs_iops": 0.0, "vcpus": 2.0, "memory": 17.1, "ebs_max_bandwidth": 0.0, "gpus": 0, "ipv6_support": false}, "p2.xlarge": {"ecu_per_vcpu": 3.0, "network_perf": 9.0, "intel_avx": "Yes", "name": "General Purpose GPU Extra Large", "architecture": "64-bit", "linux_virtualization": "HVM", "storage": 0.0, "placement_group_support": true, "intel_avx2": "Yes", "clock_speed_ghz": "Yes", "apiname": "p2.xlarge", "computeunits": 12.0, "ebs_throughput": 93.75, "vpc_only": true, "max_ips": 60, "physical_processor": "Intel Xeon E5-2686 v4", "fpga": 0, "intel_turbo": "Yes", "enhanced_networking": true, "ebs_iops": 6000.0, "vcpus": 4.0, "memory": 61.0, "ebs_max_bandwidth": 750.0, "gpus": 1, "ipv6_support": true}, "i2.4xlarge": {"ecu_per_vcpu": 3.3125, "network_perf": 9.0, "intel_avx": "", "name": "I2 Quadruple Extra Large", "architecture": "64-bit", "linux_virtualization": "HVM", "storage": 3200.0, "placement_group_support": true, "intel_avx2": "", "clock_speed_ghz": "", "apiname": "i2.4xlarge", "computeunits": 53.0, "ebs_throughput": 250.0, "vpc_only": false, "max_ips": 240, "physical_processor": "", "fpga": 0, "intel_turbo": "", "enhanced_networking": false, "ebs_iops": 16000.0, "vcpus": 16.0, "memory": 122.0, "ebs_max_bandwidth": 2000.0, "gpus": 0, "ipv6_support": true}, "t1.micro": {"ecu_per_vcpu": 0.0, "network_perf": 0.0, "intel_avx": "", "name": "T1 Micro", "architecture": "32/64-bit", "linux_virtualization": "PV", "storage": 0.0, "placement_group_support": false, "intel_avx2": "", "clock_speed_ghz": "", "apiname": "t1.micro", "computeunits": 0.0, "ebs_throughput": 0.0, "vpc_only": false, "max_ips": 4, "physical_processor": "", "fpga": 0, "intel_turbo": "", "enhanced_networking": false, "ebs_iops": 0.0, "vcpus": 1.0, "memory": 0.613, "ebs_max_bandwidth": 0.0, "gpus": 0, "ipv6_support": false}, "d2.xlarge": {"ecu_per_vcpu": 3.5, "network_perf": 7.0, "intel_avx": "Yes", "name": "D2 Extra Large", "architecture": "64-bit", "linux_virtualization": "HVM", "storage": 6000.0, "placement_group_support": true, "intel_avx2": "Yes", "clock_speed_ghz": "Yes", "apiname": "d2.xlarge", "computeunits": 14.0, "ebs_throughput": 93.75, "vpc_only": false, "max_ips": 60, "physical_processor": "Intel Xeon E5-2676 v3", "fpga": 0, "intel_turbo": "Yes", "enhanced_networking": true, "ebs_iops": 6000.0, "vcpus": 4.0, "memory": 30.5, "ebs_max_bandwidth": 750.0, "gpus": 0, "ipv6_support": true}, "r3.2xlarge": {"ecu_per_vcpu": 3.25, "network_perf": 9.0, "intel_avx": "Yes", "name": "R3 High-Memory Double Extra Large", "architecture": "64-bit", "linux_virtualization": "HVM", "storage": 160.0, "placement_group_support": true, "intel_avx2": "", "clock_speed_ghz": "Yes", "apiname": "r3.2xlarge", "computeunits": 26.0, "ebs_throughput": 125.0, "vpc_only": false, "max_ips": 60, "physical_processor": "Intel Xeon E5-2670 v2", "fpga": 0, "intel_turbo": "Yes", "enhanced_networking": true, "ebs_iops": 8000.0, "vcpus": 8.0, "memory": 61.0, "ebs_max_bandwidth": 1000.0, "gpus": 0, "ipv6_support": true}, "i3.8xlarge": {"ecu_per_vcpu": 3.09375, "network_perf": 13.0, "intel_avx": "Yes", "name": "I3 High I/O Eight Extra Large", "architecture": "64-bit", "linux_virtualization": "Unknown", "storage": 7600.0, "placement_group_support": true, "intel_avx2": "Yes", "clock_speed_ghz": "Yes", "apiname": "i3.8xlarge", "computeunits": 99.0, "ebs_throughput": 850.0, "vpc_only": true, "max_ips": 240, "physical_processor": "Intel Xeon E5-2686 v4", "fpga": 0, "intel_turbo": "Yes", "enhanced_networking": true, "ebs_iops": 32500.0, "vcpus": 32.0, "memory": 244.0, "ebs_max_bandwidth": 7000.0, "gpus": 0, "ipv6_support": true}, "c3.2xlarge": {"ecu_per_vcpu": 3.5, "network_perf": 9.0, "intel_avx": "Yes", "name": "C3 High-CPU Double Extra Large", "architecture": "64-bit", "linux_virtualization": "HVM, PV", "storage": 160.0, "placement_group_support": true, "intel_avx2": "", "clock_speed_ghz": "Yes", "apiname": "c3.2xlarge", "computeunits": 28.0, "ebs_throughput": 125.0, "vpc_only": false, "max_ips": 60, "physical_processor": "Intel Xeon E5-2680 v2", "fpga": 0, "intel_turbo": "Yes", "enhanced_networking": true, "ebs_iops": 8000.0, "vcpus": 8.0, "memory": 15.0, "ebs_max_bandwidth": 1000.0, "gpus": 0, "ipv6_support": true}, "g2.8xlarge": {"ecu_per_vcpu": 3.25, "network_perf": 16.0, "intel_avx": "", "name": "G2 Eight Extra Large", "architecture": "64-bit", "linux_virtualization": "Unknown", "storage": 240.0, "placement_group_support": true, "intel_avx2": "", "clock_speed_ghz": "", "apiname": "g2.8xlarge", "computeunits": 104.0, "ebs_throughput": 0.0, "vpc_only": false, "max_ips": 240, "physical_processor": "", "fpga": 0, "intel_turbo": "", "enhanced_networking": false, "ebs_iops": 0.0, "vcpus": 32.0, "memory": 60.0, "ebs_max_bandwidth": 0.0, "gpus": 0, "ipv6_support": false}, "t2.medium": {"ecu_per_vcpu": 0.0, "network_perf": 4.0, "intel_avx": "Yes", "name": "T2 Medium", "architecture": "32/64-bit", "linux_virtualization": "HVM", "storage": 0.0, "placement_group_support": false, "intel_avx2": "", "clock_speed_ghz": "Yes", "apiname": "t2.medium", "computeunits": 0.4, "ebs_throughput": 0.0, "vpc_only": true, "max_ips": 18, "physical_processor": "Intel Xeon family", "fpga": 0, "intel_turbo": "Yes", "enhanced_networking": false, "ebs_iops": 0.0, "vcpus": 2.0, "memory": 4.0, "ebs_max_bandwidth": 0.0, "gpus": 0, "ipv6_support": true}, "m4.xlarge": {"ecu_per_vcpu": 3.25, "network_perf": 9.0, "intel_avx": "Yes", "name": "M4 Extra Large", "architecture": "64-bit", "linux_virtualization": "HVM", "storage": 0.0, "placement_group_support": true, "intel_avx2": "Yes", "clock_speed_ghz": "Yes", "apiname": "m4.xlarge", "computeunits": 13.0, "ebs_throughput": 93.75, "vpc_only": true, "max_ips": 60, "physical_processor": "Intel Xeon E5-2676 v3", "fpga": 0, "intel_turbo": "Yes", "enhanced_networking": true, "ebs_iops": 6000.0, "vcpus": 4.0, "memory": 16.0, "ebs_max_bandwidth": 750.0, "gpus": 0, "ipv6_support": true}, "x1.16xlarge": {"ecu_per_vcpu": 2.7265625, "network_perf": 13.0, "intel_avx": "Yes", "name": "X1 Extra High-Memory 16xlarge", "architecture": "64-bit", "linux_virtualization": "HVM", "storage": 1920.0, "placement_group_support": true, "intel_avx2": "Yes", "clock_speed_ghz": "Yes", "apiname": "x1.16xlarge", "computeunits": 174.5, "ebs_throughput": 875.0, "vpc_only": true, "max_ips": 240, "physical_processor": "Intel Xeon E7-8880 v3", "fpga": 0, "intel_turbo": "Yes", "enhanced_networking": true, "ebs_iops": 40000.0, "vcpus": 64.0, "memory": 976.0, "ebs_max_bandwidth": 7000.0, "gpus": 0, "ipv6_support": true}, "p2.8xlarge": {"ecu_per_vcpu": 2.9375, "network_perf": 13.0, "intel_avx": "Yes", "name": "General Purpose GPU Eight Extra Large", "architecture": "64-bit", "linux_virtualization": "HVM", "storage": 0.0, "placement_group_support": true, "intel_avx2": "Yes", "clock_speed_ghz": "Yes", "apiname": "p2.8xlarge", "computeunits": 94.0, "ebs_throughput": 625.0, "vpc_only": true, "max_ips": 240, "physical_processor": "Intel Xeon E5-2686 v4", "fpga": 0, "intel_turbo": "Yes", "enhanced_networking": true, "ebs_iops": 32500.0, "vcpus": 32.0, "memory": 488.0, "ebs_max_bandwidth": 5000.0, "gpus": 8, "ipv6_support": true}, "f1.16xlarge": {"ecu_per_vcpu": 2.9375, "network_perf": 17.0, "intel_avx": "Yes", "name": "F1 16xlarge", "architecture": "64-bit", "linux_virtualization": "HVM", "storage": 3760.0, "placement_group_support": true, "intel_avx2": "Yes", "clock_speed_ghz": "Yes", "apiname": "f1.16xlarge", "computeunits": 188.0, "ebs_throughput": 1750.0, "vpc_only": true, "max_ips": 400, "physical_processor": "Intel Xeon E5-2686 v4", "fpga": 8, "intel_turbo": "Yes", "enhanced_networking": true, "ebs_iops": 75000.0, "vcpus": 64.0, "memory": 976.0, "ebs_max_bandwidth": 14000.0, "gpus": 0, "ipv6_support": true}, "r4.8xlarge": {"ecu_per_vcpu": 3.09375, "network_perf": 13.0, "intel_avx": "Yes", "name": "R4 High-Memory Eight Extra Large", "architecture": "64-bit", "linux_virtualization": "HVM", "storage": 0.0, "placement_group_support": true, "intel_avx2": "Yes", "clock_speed_ghz": "Yes", "apiname": "r4.8xlarge", "computeunits": 99.0, "ebs_throughput": 875.0, "vpc_only": true, "max_ips": 240, "physical_processor": "Intel Xeon E5-2686 v4", "fpga": 0, "intel_turbo": "Yes", "enhanced_networking": true, "ebs_iops": 37500.0, "vcpus": 32.0, "memory": 244.0, "ebs_max_bandwidth": 7000.0, "gpus": 0, "ipv6_support": true}, "g3.4xlarge": {"ecu_per_vcpu": 2.9375, "network_perf": 11.0, "intel_avx": "Yes", "name": "G3 Quadruple Extra Large", "architecture": "64-bit", "linux_virtualization": "HVM", "storage": 0.0, "placement_group_support": true, "intel_avx2": "Yes", "clock_speed_ghz": "Yes", "apiname": "g3.4xlarge", "computeunits": 47.0, "ebs_throughput": 437.0, "vpc_only": true, "max_ips": 240, "physical_processor": "Intel Xeon E5-2686 v4", "fpga": 0, "intel_turbo": "Yes", "enhanced_networking": true, "ebs_iops": 20000.0, "vcpus": 16.0, "memory": 122.0, "ebs_max_bandwidth": 3500.0, "gpus": 1, "ipv6_support": true}, "cg1.4xlarge": {"ecu_per_vcpu": 2.09375, "network_perf": 12.0, "intel_avx": "", "name": "Cluster GPU Quadruple Extra Large", "architecture": "64-bit", "linux_virtualization": "HVM", "storage": 1680.0, "placement_group_support": false, "intel_avx2": "", "clock_speed_ghz": "", "apiname": "cg1.4xlarge", "computeunits": 33.5, "ebs_throughput": 0.0, "vpc_only": false, "max_ips": 240, "physical_processor": "", "fpga": 0, "intel_turbo": "", "enhanced_networking": false, "ebs_iops": 0.0, "vcpus": 16.0, "memory": 22.5, "ebs_max_bandwidth": 0.0, "gpus": 0, "ipv6_support": false}, "c4.large": {"ecu_per_vcpu": 4.0, "network_perf": 7.0, "intel_avx": "Yes", "name": "C4 High-CPU Large", "architecture": "64-bit", "linux_virtualization": "HVM", "storage": 0.0, "placement_group_support": true, "intel_avx2": "Yes", "clock_speed_ghz": "Yes", "apiname": "c4.large", "computeunits": 8.0, "ebs_throughput": 62.5, "vpc_only": true, "max_ips": 30, "physical_processor": "Intel Xeon E5-2666 v3", "fpga": 0, "intel_turbo": "Yes", "enhanced_networking": true, "ebs_iops": 4000.0, "vcpus": 2.0, "memory": 3.75, "ebs_max_bandwidth": 500.0, "gpus": 0, "ipv6_support": true}, "m4.16xlarge": {"ecu_per_vcpu": 2.9375, "network_perf": 17.0, "intel_avx": "Yes", "name": "M4 16xlarge", "architecture": "64-bit", "linux_virtualization": "HVM", "storage": 0.0, "placement_group_support": true, "intel_avx2": "Yes", "clock_speed_ghz": "Yes", "apiname": "m4.16xlarge", "computeunits": 188.0, "ebs_throughput": 1250.0, "vpc_only": true, "max_ips": 240, "physical_processor": "Intel Xeon E5-2686 v4", "fpga": 0, "intel_turbo": "Yes", "enhanced_networking": true, "ebs_iops": 65000.0, "vcpus": 64.0, "memory": 256.0, "ebs_max_bandwidth": 10000.0, "gpus": 0, "ipv6_support": true}, "r4.4xlarge": {"ecu_per_vcpu": 3.3125, "network_perf": 11.0, "intel_avx": "Yes", "name": "R4 High-Memory Quadruple Extra Large", "architecture": "64-bit", "linux_virtualization": "HVM", "storage": 0.0, "placement_group_support": true, "intel_avx2": "Yes", "clock_speed_ghz": "Yes", "apiname": "r4.4xlarge", "computeunits": 53.0, "ebs_throughput": 437.0, "vpc_only": true, "max_ips": 240, "physical_processor": "Intel Xeon E5-2686 v4", "fpga": 0, "intel_turbo": "Yes", "enhanced_networking": true, "ebs_iops": 18750.0, "vcpus": 16.0, "memory": 122.0, "ebs_max_bandwidth": 3500.0, "gpus": 0, "ipv6_support": true}, "r4.2xlarge": {"ecu_per_vcpu": 3.375, "network_perf": 11.0, "intel_avx": "Yes", "name": "R4 High-Memory Double Extra Large", "architecture": "64-bit", "linux_virtualization": "HVM", "storage": 0.0, "placement_group_support": true, "intel_avx2": "Yes", "clock_speed_ghz": "Yes", "apiname": "r4.2xlarge", "computeunits": 27.0, "ebs_throughput": 218.0, "vpc_only": true, "max_ips": 60, "physical_processor": "Intel Xeon E5-2686 v4", "fpga": 0, "intel_turbo": "Yes", "enhanced_networking": true, "ebs_iops": 12000.0, "vcpus": 8.0, "memory": 61.0, "ebs_max_bandwidth": 1750.0, "gpus": 0, "ipv6_support": true}, "c3.xlarge": {"ecu_per_vcpu": 3.5, "network_perf": 7.0, "intel_avx": "Yes", "name": "C3 High-CPU Extra Large", "architecture": "64-bit", "linux_virtualization": "HVM, PV", "storage": 80.0, "placement_group_support": true, "intel_avx2": "", "clock_speed_ghz": "Yes", "apiname": "c3.xlarge", "computeunits": 14.0, "ebs_throughput": 62.5, "vpc_only": false, "max_ips": 60, "physical_processor": "Intel Xeon E5-2680 v2", "fpga": 0, "intel_turbo": "Yes", "enhanced_networking": true, "ebs_iops": 4000.0, "vcpus": 4.0, "memory": 7.5, "ebs_max_bandwidth": 500.0, "gpus": 0, "ipv6_support": true}, "i3.large": {"ecu_per_vcpu": 3.5, "network_perf": 11.0, "intel_avx": "Yes", "name": "I3 High I/O Large", "architecture": "64-bit", "linux_virtualization": "Unknown", "storage": 475.0, "placement_group_support": true, "intel_avx2": "Yes", "clock_speed_ghz": "Yes", "apiname": "i3.large", "computeunits": 7.0, "ebs_throughput": 50.0, "vpc_only": true, "max_ips": 30, "physical_processor": "Intel Xeon E5-2686 v4", "fpga": 0, "intel_turbo": "Yes", "enhanced_networking": true, "ebs_iops": 3000.0, "vcpus": 2.0, "memory": 15.25, "ebs_max_bandwidth": 425.0, "gpus": 0, "ipv6_support": true}, "r4.xlarge": {"ecu_per_vcpu": 3.375, "network_perf": 11.0, "intel_avx": "Yes", "name": "R4 High-Memory Extra Large", "architecture": "64-bit", "linux_virtualization": "HVM", "storage": 0.0, "placement_group_support": true, "intel_avx2": "Yes", "clock_speed_ghz": "Yes", "apiname": "r4.xlarge", "computeunits": 13.5, "ebs_throughput": 109.0, "vpc_only": true, "max_ips": 60, "physical_processor": "Intel Xeon E5-2686 v4", "fpga": 0, "intel_turbo": "Yes", "enhanced_networking": true, "ebs_iops": 6000.0, "vcpus": 4.0, "memory": 30.5, "ebs_max_bandwidth": 875.0, "gpus": 0, "ipv6_support": true}, "m2.2xlarge": {"ecu_per_vcpu": 3.25, "network_perf": 7.0, "intel_avx": "", "name": "M2 High Memory Double Extra Large", "architecture": "64-bit", "linux_virtualization": "PV", "storage": 850.0, "placement_group_support": false, "intel_avx2": "", "clock_speed_ghz": "", "apiname": "m2.2xlarge", "computeunits": 13.0, "ebs_throughput": 62.5, "vpc_only": false, "max_ips": 120, "physical_processor": "", "fpga": 0, "intel_turbo": "", "enhanced_networking": false, "ebs_iops": 4000.0, "vcpus": 4.0, "memory": 34.2, "ebs_max_bandwidth": 500.0, "gpus": 0, "ipv6_support": false}, "m3.medium": {"ecu_per_vcpu": 3.0, "network_perf": 6.0, "intel_avx": "Yes", "name": "M3 General Purpose Medium", "architecture": "64-bit", "linux_virtualization": "HVM, PV", "storage": 4.0, "placement_group_support": false, "intel_avx2": "", "clock_speed_ghz": "Yes", "apiname": "m3.medium", "computeunits": 3.0, "ebs_throughput": 0.0, "vpc_only": false, "max_ips": 12, "physical_processor": "Intel Xeon E5-2670 v2", "fpga": 0, "intel_turbo": "Yes", "enhanced_networking": false, "ebs_iops": 0.0, "vcpus": 1.0, "memory": 3.75, "ebs_max_bandwidth": 0.0, "gpus": 0, "ipv6_support": false}, "r3.4xlarge": {"ecu_per_vcpu": 3.25, "network_perf": 9.0, "intel_avx": "Yes", "name": "R3 High-Memory Quadruple Extra Large", "architecture": "64-bit", "linux_virtualization": "HVM", "storage": 320.0, "placement_group_support": true, "intel_avx2": "", "clock_speed_ghz": "Yes", "apiname": "r3.4xlarge", "computeunits": 52.0, "ebs_throughput": 250.0, "vpc_only": false, "max_ips": 240, "physical_processor": "Intel Xeon E5-2670 v2", "fpga": 0, "intel_turbo": "Yes", "enhanced_networking": true, "ebs_iops": 16000.0, "vcpus": 16.0, "memory": 122.0, "ebs_max_bandwidth": 2000.0, "gpus": 0, "ipv6_support": true}, "t2.small": {"ecu_per_vcpu": 0.0, "network_perf": 4.0, "intel_avx": "Yes", "name": "T2 Small", "architecture": "32/64-bit", "linux_virtualization": "HVM", "storage": 0.0, "placement_group_support": false, "intel_avx2": "", "clock_speed_ghz": "Yes", "apiname": "t2.small", "computeunits": 0.2, "ebs_throughput": 0.0, "vpc_only": true, "max_ips": 8, "physical_processor": "Intel Xeon family", "fpga": 0, "intel_turbo": "Yes", "enhanced_networking": false, "ebs_iops": 0.0, "vcpus": 1.0, "memory": 2.0, "ebs_max_bandwidth": 0.0, "gpus": 0, "ipv6_support": true}, "r3.large": {"ecu_per_vcpu": 3.25, "network_perf": 6.0, "intel_avx": "Yes", "name": "R3 High-Memory Large", "architecture": "64-bit", "linux_virtualization": "HVM", "storage": 32.0, "placement_group_support": true, "intel_avx2": "", "clock_speed_ghz": "Yes", "apiname": "r3.large", "computeunits": 6.5, "ebs_throughput": 0.0, "vpc_only": false, "max_ips": 30, "physical_processor": "Intel Xeon E5-2670 v2", "fpga": 0, "intel_turbo": "Yes", "enhanced_networking": true, "ebs_iops": 0.0, "vcpus": 2.0, "memory": 15.25, "ebs_max_bandwidth": 0.0, "gpus": 0, "ipv6_support": true}, "i3.16xlarge": {"ecu_per_vcpu": 3.125, "network_perf": 17.0, "intel_avx": "Yes", "name": "I3 High I/O 16xlarge", "architecture": "64-bit", "linux_virtualization": "Unknown", "storage": 15200.0, "placement_group_support": true, "intel_avx2": "Yes", "clock_speed_ghz": "Yes", "apiname": "i3.16xlarge", "computeunits": 200.0, "ebs_throughput": 1750.0, "vpc_only": true, "max_ips": 750, "physical_processor": "Intel Xeon E5-2686 v4", "fpga": 0, "intel_turbo": "Yes", "enhanced_networking": true, "ebs_iops": 65000.0, "vcpus": 64.0, "memory": 488.0, "ebs_max_bandwidth": 14000.0, "gpus": 0, "ipv6_support": true}, "c3.large": {"ecu_per_vcpu": 3.5, "network_perf": 6.0, "intel_avx": "Yes", "name": "C3 High-CPU Large", "architecture": "32/64-bit", "linux_virtualization": "HVM, PV", "storage": 32.0, "placement_group_support": true, "intel_avx2": "", "clock_speed_ghz": "Yes", "apiname": "c3.large", "computeunits": 7.0, "ebs_throughput": 0.0, "vpc_only": false, "max_ips": 30, "physical_processor": "Intel Xeon E5-2680 v2", "fpga": 0, "intel_turbo": "Yes", "enhanced_networking": true, "ebs_iops": 0.0, "vcpus": 2.0, "memory": 3.75, "ebs_max_bandwidth": 0.0, "gpus": 0, "ipv6_support": true}, "i2.2xlarge": {"ecu_per_vcpu": 3.375, "network_perf": 7.0, "intel_avx": "", "name": "I2 Double Extra Large", "architecture": "64-bit", "linux_virtualization": "HVM", "storage": 1600.0, "placement_group_support": true, "intel_avx2": "", "clock_speed_ghz": "", "apiname": "i2.2xlarge", "computeunits": 27.0, "ebs_throughput": 125.0, "vpc_only": false, "max_ips": 60, "physical_processor": "", "fpga": 0, "intel_turbo": "", "enhanced_networking": false, "ebs_iops": 8000.0, "vcpus": 8.0, "memory": 61.0, "ebs_max_bandwidth": 1000.0, "gpus": 0, "ipv6_support": true}, "i3.xlarge": {"ecu_per_vcpu": 3.25, "network_perf": 11.0, "intel_avx": "Yes", "name": "I3 High I/O Extra Large", "architecture": "64-bit", "linux_virtualization": "Unknown", "storage": 950.0, "placement_group_support": true, "intel_avx2": "Yes", "clock_speed_ghz": "Yes", "apiname": "i3.xlarge", "computeunits": 13.0, "ebs_throughput": 100.0, "vpc_only": true, "max_ips": 60, "physical_processor": "Intel Xeon E5-2686 v4", "fpga": 0, "intel_turbo": "Yes", "enhanced_networking": true, "ebs_iops": 6000.0, "vcpus": 4.0, "memory": 30.5, "ebs_max_bandwidth": 850.0, "gpus": 0, "ipv6_support": true}, "i2.8xlarge": {"ecu_per_vcpu": 3.25, "network_perf": 13.0, "intel_avx": "", "name": "I2 Eight Extra Large", "architecture": "64-bit", "linux_virtualization": "HVM", "storage": 6400.0, "placement_group_support": true, "intel_avx2": "", "clock_speed_ghz": "", "apiname": "i2.8xlarge", "computeunits": 104.0, "ebs_throughput": 0.0, "vpc_only": false, "max_ips": 240, "physical_processor": "", "fpga": 0, "intel_turbo": "", "enhanced_networking": false, "ebs_iops": 0.0, "vcpus": 32.0, "memory": 244.0, "ebs_max_bandwidth": 0.0, "gpus": 0, "ipv6_support": true}, "r4.16xlarge": {"ecu_per_vcpu": 3.046875, "network_perf": 17.0, "intel_avx": "Yes", "name": "R4 High-Memory 16xlarge", "architecture": "64-bit", "linux_virtualization": "HVM", "storage": 0.0, "placement_group_support": true, "intel_avx2": "Yes", "clock_speed_ghz": "Yes", "apiname": "r4.16xlarge", "computeunits": 195.0, "ebs_throughput": 1750.0, "vpc_only": true, "max_ips": 750, "physical_processor": "Intel Xeon E5-2686 v4", "fpga": 0, "intel_turbo": "Yes", "enhanced_networking": true, "ebs_iops": 75000.0, "vcpus": 64.0, "memory": 488.0, "ebs_max_bandwidth": 14000.0, "gpus": 0, "ipv6_support": true}, "g3.8xlarge": {"ecu_per_vcpu": 2.9375, "network_perf": 13.0, "intel_avx": "Yes", "name": "G3 Eight Extra Large", "architecture": "64-bit", "linux_virtualization": "HVM", "storage": 0.0, "placement_group_support": true, "intel_avx2": "Yes", "clock_speed_ghz": "Yes", "apiname": "g3.8xlarge", "computeunits": 94.0, "ebs_throughput": 875.0, "vpc_only": true, "max_ips": 240, "physical_processor": "Intel Xeon E5-2686 v4", "fpga": 0, "intel_turbo": "Yes", "enhanced_networking": true, "ebs_iops": 40000.0, "vcpus": 32.0, "memory": 244.0, "ebs_max_bandwidth": 7000.0, "gpus": 2, "ipv6_support": true}, "c3.4xlarge": {"ecu_per_vcpu": 3.4375, "network_perf": 9.0, "intel_avx": "Yes", "name": "C3 High-CPU Quadruple Extra Large", "architecture": "64-bit", "linux_virtualization": "HVM, PV", "storage": 320.0, "placement_group_support": true, "intel_avx2": "", "clock_speed_ghz": "Yes", "apiname": "c3.4xlarge", "computeunits": 55.0, "ebs_throughput": 250.0, "vpc_only": false, "max_ips": 240, "physical_processor": "Intel Xeon E5-2680 v2", "fpga": 0, "intel_turbo": "Yes", "enhanced_networking": true, "ebs_iops": 16000.0, "vcpus": 16.0, "memory": 30.0, "ebs_max_bandwidth": 2000.0, "gpus": 0, "ipv6_support": true}, "r4.large": {"ecu_per_vcpu": 3.5, "network_perf": 11.0, "intel_avx": "Yes", "name": "R4 High-Memory Large", "architecture": "64-bit", "linux_virtualization": "HVM", "storage": 0.0, "placement_group_support": true, "intel_avx2": "Yes", "clock_speed_ghz": "Yes", "apiname": "r4.large", "computeunits": 7.0, "ebs_throughput": 54.0, "vpc_only": true, "max_ips": 30, "physical_processor": "Intel Xeon E5-2686 v4", "fpga": 0, "intel_turbo": "Yes", "enhanced_networking": true, "ebs_iops": 3000.0, "vcpus": 2.0, "memory": 15.25, "ebs_max_bandwidth": 437.0, "gpus": 0, "ipv6_support": true}, "f1.2xlarge": {"ecu_per_vcpu": 3.25, "network_perf": 11.0, "intel_avx": "Yes", "name": "F1 Double Extra Large", "architecture": "64-bit", "linux_virtualization": "HVM", "storage": 470.0, "placement_group_support": true, "intel_avx2": "Yes", "clock_speed_ghz": "Yes", "apiname": "f1.2xlarge", "computeunits": 26.0, "ebs_throughput": 200.0, "vpc_only": true, "max_ips": 60, "physical_processor": "Intel Xeon E5-2686 v4", "fpga": 1, "intel_turbo": "Yes", "enhanced_networking": true, "ebs_iops": 12000.0, "vcpus": 8.0, "memory": 122.0, "ebs_max_bandwidth": 1700.0, "gpus": 0, "ipv6_support": true}, "m4.2xlarge": {"ecu_per_vcpu": 3.25, "network_perf": 9.0, "intel_avx": "Yes", "name": "M4 Double Extra Large", "architecture": "64-bit", "linux_virtualization": "HVM", "storage": 0.0, "placement_group_support": true, "intel_avx2": "Yes", "clock_speed_ghz": "Yes", "apiname": "m4.2xlarge", "computeunits": 26.0, "ebs_throughput": 125.0, "vpc_only": true, "max_ips": 60, "physical_processor": "Intel Xeon E5-2676 v3", "fpga": 0, "intel_turbo": "Yes", "enhanced_networking": true, "ebs_iops": 8000.0, "vcpus": 8.0, "memory": 32.0, "ebs_max_bandwidth": 1000.0, "gpus": 0, "ipv6_support": true}, "m3.2xlarge": {"ecu_per_vcpu": 3.25, "network_perf": 9.0, "intel_avx": "Yes", "name": "M3 General Purpose Double Extra Large", "architecture": "64-bit", "linux_virtualization": "HVM, PV", "storage": 160.0, "placement_group_support": false, "intel_avx2": "", "clock_speed_ghz": "Yes", "apiname": "m3.2xlarge", "computeunits": 26.0, "ebs_throughput": 125.0, "vpc_only": false, "max_ips": 120, "physical_processor": "Intel Xeon E5-2670 v2", "fpga": 0, "intel_turbo": "Yes", "enhanced_networking": false, "ebs_iops": 8000.0, "vcpus": 8.0, "memory": 30.0, "ebs_max_bandwidth": 1000.0, "gpus": 0, "ipv6_support": false}, "c3.8xlarge": {"ecu_per_vcpu": 3.375, "network_perf": 12.0, "intel_avx": "Yes", "name": "C3 High-CPU Eight Extra Large", "architecture": "64-bit", "linux_virtualization": "HVM, PV", "storage": 640.0, "placement_group_support": true, "intel_avx2": "", "clock_speed_ghz": "Yes", "apiname": "c3.8xlarge", "computeunits": 108.0, "ebs_throughput": 0.0, "vpc_only": false, "max_ips": 240, "physical_processor": "Intel Xeon E5-2680 v2", "fpga": 0, "intel_turbo": "Yes", "enhanced_networking": true, "ebs_iops": 0.0, "vcpus": 32.0, "memory": 60.0, "ebs_max_bandwidth": 0.0, "gpus": 0, "ipv6_support": true}, "cr1.8xlarge": {"ecu_per_vcpu": 2.75, "network_perf": 12.0, "intel_avx": "", "name": "High Memory Cluster Eight Extra Large", "architecture": "64-bit", "linux_virtualization": "Unknown", "storage": 240.0, "placement_group_support": false, "intel_avx2": "", "clock_speed_ghz": "", "apiname": "cr1.8xlarge", "computeunits": 88.0, "ebs_throughput": 0.0, "vpc_only": false, "max_ips": 240, "physical_processor": "", "fpga": 0, "intel_turbo": "", "enhanced_networking": false, "ebs_iops": 0.0, "vcpus": 32.0, "memory": 244.0, "ebs_max_bandwidth": 0.0, "gpus": 0, "ipv6_support": false}, "cc2.8xlarge": {"ecu_per_vcpu": 2.75, "network_perf": 12.0, "intel_avx": "", "name": "Cluster Compute Eight Extra Large", "architecture": "64-bit", "linux_virtualization": "HVM", "storage": 3360.0, "placement_group_support": false, "intel_avx2": "", "clock_speed_ghz": "", "apiname": "cc2.8xlarge", "computeunits": 88.0, "ebs_throughput": 0.0, "vpc_only": false, "max_ips": 240, "physical_processor": "", "fpga": 0, "intel_turbo": "", "enhanced_networking": false, "ebs_iops": 0.0, "vcpus": 32.0, "memory": 60.5, "ebs_max_bandwidth": 0.0, "gpus": 0, "ipv6_support": false}, "m1.large": {"ecu_per_vcpu": 2.0, "network_perf": 7.0, "intel_avx": "", "name": "M1 General Purpose Large", "architecture": "64-bit", "linux_virtualization": "PV", "storage": 840.0, "placement_group_support": false, "intel_avx2": "", "clock_speed_ghz": "", "apiname": "m1.large", "computeunits": 4.0, "ebs_throughput": 62.5, "vpc_only": false, "max_ips": 30, "physical_processor": "", "fpga": 0, "intel_turbo": "", "enhanced_networking": false, "ebs_iops": 4000.0, "vcpus": 2.0, "memory": 7.5, "ebs_max_bandwidth": 500.0, "gpus": 0, "ipv6_support": false}, "r3.xlarge": {"ecu_per_vcpu": 3.25, "network_perf": 7.0, "intel_avx": "Yes", "name": "R3 High-Memory Extra Large", "architecture": "64-bit", "linux_virtualization": "HVM", "storage": 80.0, "placement_group_support": true, "intel_avx2": "", "clock_speed_ghz": "Yes", "apiname": "r3.xlarge", "computeunits": 13.0, "ebs_throughput": 62.5, "vpc_only": false, "max_ips": 60, "physical_processor": "Intel Xeon E5-2670 v2", "fpga": 0, "intel_turbo": "Yes", "enhanced_networking": true, "ebs_iops": 4000.0, "vcpus": 4.0, "memory": 30.5, "ebs_max_bandwidth": 500.0, "gpus": 0, "ipv6_support": true}, "g3.16xlarge": {"ecu_per_vcpu": 2.9375, "network_perf": 17.0, "intel_avx": "Yes", "name": "G3 16xlarge", "architecture": "64-bit", "linux_virtualization": "HVM", "storage": 0.0, "placement_group_support": true, "intel_avx2": "Yes", "clock_speed_ghz": "Yes", "apiname": "g3.16xlarge", "computeunits": 188.0, "ebs_throughput": 1750.0, "vpc_only": true, "max_ips": 750, "physical_processor": "Intel Xeon E5-2686 v4", "fpga": 0, "intel_turbo": "Yes", "enhanced_networking": true, "ebs_iops": 80000.0, "vcpus": 64.0, "memory": 488.0, "ebs_max_bandwidth": 14000.0, "gpus": 4, "ipv6_support": true}, "m1.medium": {"ecu_per_vcpu": 2.0, "network_perf": 6.0, "intel_avx": "", "name": "M1 General Purpose Medium", "architecture": "32/64-bit", "linux_virtualization": "PV", "storage": 410.0, "placement_group_support": false, "intel_avx2": "", "clock_speed_ghz": "", "apiname": "m1.medium", "computeunits": 2.0, "ebs_throughput": 0.0, "vpc_only": false, "max_ips": 12, "physical_processor": "", "fpga": 0, "intel_turbo": "", "enhanced_networking": false, "ebs_iops": 0.0, "vcpus": 1.0, "memory": 3.75, "ebs_max_bandwidth": 0.0, "gpus": 0, "ipv6_support": false}, "i3.2xlarge": {"ecu_per_vcpu": 3.375, "network_perf": 11.0, "intel_avx": "Yes", "name": "I3 High I/O Double Extra Large", "architecture": "64-bit", "linux_virtualization": "Unknown", "storage": 1900.0, "placement_group_support": true, "intel_avx2": "Yes", "clock_speed_ghz": "Yes", "apiname": "i3.2xlarge", "computeunits": 27.0, "ebs_throughput": 200.0, "vpc_only": true, "max_ips": 60, "physical_processor": "Intel Xeon E5-2686 v4", "fpga": 0, "intel_turbo": "Yes", "enhanced_networking": true, "ebs_iops": 12000.0, "vcpus": 8.0, "memory": 61.0, "ebs_max_bandwidth": 1700.0, "gpus": 0, "ipv6_support": true}, "t2.xlarge": {"ecu_per_vcpu": 0.0, "network_perf": 6.0, "intel_avx": "Yes", "name": "T2 Extra Large", "architecture": "64-bit", "linux_virtualization": "HVM", "storage": 0.0, "placement_group_support": false, "intel_avx2": "", "clock_speed_ghz": "Yes", "apiname": "t2.xlarge", "computeunits": 0.9, "ebs_throughput": 0.0, "vpc_only": true, "max_ips": 45, "physical_processor": "Intel Xeon family", "fpga": 0, "intel_turbo": "Yes", "enhanced_networking": false, "ebs_iops": 0.0, "vcpus": 4.0, "memory": 16.0, "ebs_max_bandwidth": 0.0, "gpus": 0, "ipv6_support": true}, "g2.2xlarge": {"ecu_per_vcpu": 3.25, "network_perf": 16.0, "intel_avx": "", "name": "G2 Double Extra Large", "architecture": "64-bit", "linux_virtualization": "Unknown", "storage": 60.0, "placement_group_support": true, "intel_avx2": "", "clock_speed_ghz": "", "apiname": "g2.2xlarge", "computeunits": 26.0, "ebs_throughput": 125.0, "vpc_only": false, "max_ips": 60, "physical_processor": "", "fpga": 0, "intel_turbo": "", "enhanced_networking": false, "ebs_iops": 8000.0, "vcpus": 8.0, "memory": 15.0, "ebs_max_bandwidth": 1000.0, "gpus": 0, "ipv6_support": false}, "c1.medium": {"ecu_per_vcpu": 2.5, "network_perf": 6.0, "intel_avx": "", "name": "C1 High-CPU Medium", "architecture": "32/64-bit", "linux_virtualization": "PV", "storage": 350.0, "placement_group_support": false, "intel_avx2": "", "clock_speed_ghz": "", "apiname": "c1.medium", "computeunits": 5.0, "ebs_throughput": 0.0, "vpc_only": false, "max_ips": 12, "physical_processor": "", "fpga": 0, "intel_turbo": "", "enhanced_networking": false, "ebs_iops": 0.0, "vcpus": 2.0, "memory": 1.7, "ebs_max_bandwidth": 0.0, "gpus": 0, "ipv6_support": false}, "t2.large": {"ecu_per_vcpu": 0.0, "network_perf": 4.0, "intel_avx": "Yes", "name": "T2 Large", "architecture": "64-bit", "linux_virtualization": "HVM", "storage": 0.0, "placement_group_support": false, "intel_avx2": "", "clock_speed_ghz": "Yes", "apiname": "t2.large", "computeunits": 0.6, "ebs_throughput": 0.0, "vpc_only": true, "max_ips": 36, "physical_processor": "Intel Xeon family", "fpga": 0, "intel_turbo": "Yes", "enhanced_networking": false, "ebs_iops": 0.0, "vcpus": 2.0, "memory": 8.0, "ebs_max_bandwidth": 0.0, "gpus": 0, "ipv6_support": true}, "d2.2xlarge": {"ecu_per_vcpu": 3.5, "network_perf": 9.0, "intel_avx": "Yes", "name": "D2 Double Extra Large", "architecture": "64-bit", "linux_virtualization": "HVM", "storage": 12000.0, "placement_group_support": true, "intel_avx2": "Yes", "clock_speed_ghz": "Yes", "apiname": "d2.2xlarge", "computeunits": 28.0, "ebs_throughput": 125.0, "vpc_only": false, "max_ips": 60, "physical_processor": "Intel Xeon E5-2676 v3", "fpga": 0, "intel_turbo": "Yes", "enhanced_networking": true, "ebs_iops": 8000.0, "vcpus": 8.0, "memory": 61.0, "ebs_max_bandwidth": 1000.0, "gpus": 0, "ipv6_support": true}, "c4.8xlarge": {"ecu_per_vcpu": 3.66666666667, "network_perf": 13.0, "intel_avx": "Yes", "name": "C4 High-CPU Eight Extra Large", "architecture": "64-bit", "linux_virtualization": "HVM", "storage": 0.0, "placement_group_support": true, "intel_avx2": "Yes", "clock_speed_ghz": "Yes", "apiname": "c4.8xlarge", "computeunits": 132.0, "ebs_throughput": 500.0, "vpc_only": true, "max_ips": 240, "physical_processor": "Intel Xeon E5-2666 v3", "fpga": 0, "intel_turbo": "Yes", "enhanced_networking": true, "ebs_iops": 32000.0, "vcpus": 36.0, "memory": 60.0, "ebs_max_bandwidth": 4000.0, "gpus": 0, "ipv6_support": true}, "c4.2xlarge": {"ecu_per_vcpu": 3.875, "network_perf": 9.0, "intel_avx": "Yes", "name": "C4 High-CPU Double Extra Large", "architecture": "64-bit", "linux_virtualization": "HVM", "storage": 0.0, "placement_group_support": true, "intel_avx2": "Yes", "clock_speed_ghz": "Yes", "apiname": "c4.2xlarge", "computeunits": 31.0, "ebs_throughput": 125.0, "vpc_only": true, "max_ips": 60, "physical_processor": "Intel Xeon E5-2666 v3", "fpga": 0, "intel_turbo": "Yes", "enhanced_networking": true, "ebs_iops": 8000.0, "vcpus": 8.0, "memory": 15.0, "ebs_max_bandwidth": 1000.0, "gpus": 0, "ipv6_support": true}, "x1e.32xlarge": {"ecu_per_vcpu": 2.65625, "network_perf": 17.0, "intel_avx": "Yes", "name": "X1E 32xlarge", "architecture": "64-bit", "linux_virtualization": "Unknown", "storage": 3840.0, "placement_group_support": false, "intel_avx2": "Yes", "clock_speed_ghz": "Yes", "apiname": "x1e.32xlarge", "computeunits": 340.0, "ebs_throughput": 1750.0, "vpc_only": true, "max_ips": 240, "physical_processor": "Intel Xeon E7-8880 v3", "fpga": 0, "intel_turbo": "Yes", "enhanced_networking": true, "ebs_iops": 80000.0, "vcpus": 128.0, "memory": 3904.0, "ebs_max_bandwidth": 14000.0, "gpus": 0, "ipv6_support": false}, "m4.10xlarge": {"ecu_per_vcpu": 3.1125, "network_perf": 13.0, "intel_avx": "Yes", "name": "M4 Deca Extra Large", "architecture": "64-bit", "linux_virtualization": "HVM", "storage": 0.0, "placement_group_support": true, "intel_avx2": "Yes", "clock_speed_ghz": "Yes", "apiname": "m4.10xlarge", "computeunits": 124.5, "ebs_throughput": 500.0, "vpc_only": true, "max_ips": 240, "physical_processor": "Intel Xeon E5-2676 v3", "fpga": 0, "intel_turbo": "Yes", "enhanced_networking": true, "ebs_iops": 32000.0, "vcpus": 40.0, "memory": 160.0, "ebs_max_bandwidth": 4000.0, "gpus": 0, "ipv6_support": true}, "t2.2xlarge": {"ecu_per_vcpu": 0.0, "network_perf": 6.0, "intel_avx": "Yes", "name": "T2 Double Extra Large", "architecture": "64-bit", "linux_virtualization": "HVM", "storage": 0.0, "placement_group_support": false, "intel_avx2": "", "clock_speed_ghz": "Yes", "apiname": "t2.2xlarge", "computeunits": 1.35, "ebs_throughput": 0.0, "vpc_only": true, "max_ips": 45, "physical_processor": "Intel Xeon family", "fpga": 0, "intel_turbo": "Yes", "enhanced_networking": false, "ebs_iops": 0.0, "vcpus": 8.0, "memory": 32.0, "ebs_max_bandwidth": 0.0, "gpus": 0, "ipv6_support": true}, "m4.4xlarge": {"ecu_per_vcpu": 3.34375, "network_perf": 9.0, "intel_avx": "Yes", "name": "M4 Quadruple Extra Large", "architecture": "64-bit", "linux_virtualization": "HVM", "storage": 0.0, "placement_group_support": true, "intel_avx2": "Yes", "clock_speed_ghz": "Yes", "apiname": "m4.4xlarge", "computeunits": 53.5, "ebs_throughput": 250.0, "vpc_only": true, "max_ips": 240, "physical_processor": "Intel Xeon E5-2676 v3", "fpga": 0, "intel_turbo": "Yes", "enhanced_networking": true, "ebs_iops": 16000.0, "vcpus": 16.0, "memory": 64.0, "ebs_max_bandwidth": 2000.0, "gpus": 0, "ipv6_support": true}, "t2.nano": {"ecu_per_vcpu": 0.0, "network_perf": 2.0, "intel_avx": "Yes", "name": "T2 Nano", "architecture": "32/64-bit", "linux_virtualization": "HVM", "storage": 0.0, "placement_group_support": false, "intel_avx2": "", "clock_speed_ghz": "Yes", "apiname": "t2.nano", "computeunits": 0.05, "ebs_throughput": 0.0, "vpc_only": true, "max_ips": 4, "physical_processor": "Intel Xeon family", "fpga": 0, "intel_turbo": "Yes", "enhanced_networking": false, "ebs_iops": 0.0, "vcpus": 1.0, "memory": 0.5, "ebs_max_bandwidth": 0.0, "gpus": 0, "ipv6_support": true}, "d2.8xlarge": {"ecu_per_vcpu": 3.22222222222, "network_perf": 13.0, "intel_avx": "Yes", "name": "D2 Eight Extra Large", "architecture": "64-bit", "linux_virtualization": "HVM", "storage": 48000.0, "placement_group_support": true, "intel_avx2": "Yes", "clock_speed_ghz": "Yes", "apiname": "d2.8xlarge", "computeunits": 116.0, "ebs_throughput": 500.0, "vpc_only": false, "max_ips": 240, "physical_processor": "Intel Xeon E5-2676 v3", "fpga": 0, "intel_turbo": "Yes", "enhanced_networking": true, "ebs_iops": 32000.0, "vcpus": 36.0, "memory": 244.0, "ebs_max_bandwidth": 4000.0, "gpus": 0, "ipv6_support": true}, "m3.large": {"ecu_per_vcpu": 3.25, "network_perf": 6.0, "intel_avx": "Yes", "name": "M3 General Purpose Large", "architecture": "64-bit", "linux_virtualization": "HVM, PV", "storage": 32.0, "placement_group_support": false, "intel_avx2": "", "clock_speed_ghz": "Yes", "apiname": "m3.large", "computeunits": 6.5, "ebs_throughput": 0.0, "vpc_only": false, "max_ips": 30, "physical_processor": "Intel Xeon E5-2670 v2", "fpga": 0, "intel_turbo": "Yes", "enhanced_networking": false, "ebs_iops": 0.0, "vcpus": 2.0, "memory": 7.5, "ebs_max_bandwidth": 0.0, "gpus": 0, "ipv6_support": false}, "m2.4xlarge": {"ecu_per_vcpu": 3.25, "network_perf": 9.0, "intel_avx": "", "name": "M2 High Memory Quadruple Extra Large", "architecture": "64-bit", "linux_virtualization": "PV", "storage": 1680.0, "placement_group_support": false, "intel_avx2": "", "clock_speed_ghz": "", "apiname": "m2.4xlarge", "computeunits": 26.0, "ebs_throughput": 125.0, "vpc_only": false, "max_ips": 240, "physical_processor": "", "fpga": 0, "intel_turbo": "", "enhanced_networking": false, "ebs_iops": 8000.0, "vcpus": 8.0, "memory": 68.4, "ebs_max_bandwidth": 1000.0, "gpus": 0, "ipv6_support": false}, "m1.small": {"ecu_per_vcpu": 1.0, "network_perf": 2.0, "intel_avx": "", "name": "M1 General Purpose Small", "architecture": "32/64-bit", "linux_virtualization": "PV", "storage": 160.0, "placement_group_support": false, "intel_avx2": "", "clock_speed_ghz": "", "apiname": "m1.small", "computeunits": 1.0, "ebs_throughput": 0.0, "vpc_only": false, "max_ips": 8, "physical_processor": "", "fpga": 0, "intel_turbo": "", "enhanced_networking": false, "ebs_iops": 0.0, "vcpus": 1.0, "memory": 1.7, "ebs_max_bandwidth": 0.0, "gpus": 0, "ipv6_support": false}, "c1.xlarge": {"ecu_per_vcpu": 2.5, "network_perf": 9.0, "intel_avx": "", "name": "C1 High-CPU Extra Large", "architecture": "64-bit", "linux_virtualization": "PV", "storage": 1680.0, "placement_group_support": false, "intel_avx2": "", "clock_speed_ghz": "", "apiname": "c1.xlarge", "computeunits": 20.0, "ebs_throughput": 125.0, "vpc_only": false, "max_ips": 60, "physical_processor": "", "fpga": 0, "intel_turbo": "", "enhanced_networking": false, "ebs_iops": 8000.0, "vcpus": 8.0, "memory": 7.0, "ebs_max_bandwidth": 1000.0, "gpus": 0, "ipv6_support": false}, "x1.32xlarge": {"ecu_per_vcpu": 2.7265625, "network_perf": 17.0, "intel_avx": "Yes", "name": "X1 Extra High-Memory 32xlarge", "architecture": "64-bit", "linux_virtualization": "HVM", "storage": 3840.0, "placement_group_support": true, "intel_avx2": "Yes", "clock_speed_ghz": "Yes", "apiname": "x1.32xlarge", "computeunits": 349.0, "ebs_throughput": 1750.0, "vpc_only": true, "max_ips": 240, "physical_processor": "Intel Xeon E7-8880 v3", "fpga": 0, "intel_turbo": "Yes", "enhanced_networking": true, "ebs_iops": 80000.0, "vcpus": 128.0, "memory": 1952.0, "ebs_max_bandwidth": 14000.0, "gpus": 0, "ipv6_support": true}, "r3.8xlarge": {"ecu_per_vcpu": 3.25, "network_perf": 12.0, "intel_avx": "Yes", "name": "R3 High-Memory Eight Extra Large", "architecture": "64-bit", "linux_virtualization": "HVM", "storage": 640.0, "placement_group_support": true, "intel_avx2": "", "clock_speed_ghz": "Yes", "apiname": "r3.8xlarge", "computeunits": 104.0, "ebs_throughput": 0.0, "vpc_only": false, "max_ips": 240, "physical_processor": "Intel Xeon E5-2670 v2", "fpga": 0, "intel_turbo": "Yes", "enhanced_networking": true, "ebs_iops": 0.0, "vcpus": 32.0, "memory": 244.0, "ebs_max_bandwidth": 0.0, "gpus": 0, "ipv6_support": true}, "m4.large": {"ecu_per_vcpu": 3.25, "network_perf": 7.0, "intel_avx": "Yes", "name": "M4 Large", "architecture": "64-bit", "linux_virtualization": "HVM", "storage": 0.0, "placement_group_support": true, "intel_avx2": "Yes", "clock_speed_ghz": "Yes", "apiname": "m4.large", "computeunits": 6.5, "ebs_throughput": 56.25, "vpc_only": true, "max_ips": 20, "physical_processor": "Intel Xeon E5-2676 v3", "fpga": 0, "intel_turbo": "Yes", "enhanced_networking": true, "ebs_iops": 3600.0, "vcpus": 2.0, "memory": 8.0, "ebs_max_bandwidth": 450.0, "gpus": 0, "ipv6_support": true}, "p2.16xlarge": {"ecu_per_vcpu": 2.9375, "network_perf": 17.0, "intel_avx": "Yes", "name": "General Purpose GPU 16xlarge", "architecture": "64-bit", "linux_virtualization": "HVM", "storage": 0.0, "placement_group_support": true, "intel_avx2": "Yes", "clock_speed_ghz": "Yes", "apiname": "p2.16xlarge", "computeunits": 188.0, "ebs_throughput": 1250.0, "vpc_only": true, "max_ips": 240, "physical_processor": "Intel Xeon E5-2686 v4", "fpga": 0, "intel_turbo": "Yes", "enhanced_networking": true, "ebs_iops": 65000.0, "vcpus": 64.0, "memory": 732.0, "ebs_max_bandwidth": 10000.0, "gpus": 16, "ipv6_support": true}, "hi1.4xlarge": {"ecu_per_vcpu": 2.1875, "network_perf": 12.0, "intel_avx": "", "name": "HI1. High I/O Quadruple Extra Large", "architecture": "64-bit", "linux_virtualization": "HVM, PV", "storage": 2048.0, "placement_group_support": false, "intel_avx2": "", "clock_speed_ghz": "", "apiname": "hi1.4xlarge", "computeunits": 35.0, "ebs_throughput": 0.0, "vpc_only": false, "max_ips": 240, "physical_processor": "", "fpga": 0, "intel_turbo": "", "enhanced_networking": false, "ebs_iops": 0.0, "vcpus": 16.0, "memory": 60.5, "ebs_max_bandwidth": 0.0, "gpus": 0, "ipv6_support": false}, "c4.4xlarge": {"ecu_per_vcpu": 3.875, "network_perf": 9.0, "intel_avx": "Yes", "name": "C4 High-CPU Quadruple Extra Large", "architecture": "64-bit", "linux_virtualization": "HVM", "storage": 0.0, "placement_group_support": true, "intel_avx2": "Yes", "clock_speed_ghz": "Yes", "apiname": "c4.4xlarge", "computeunits": 62.0, "ebs_throughput": 250.0, "vpc_only": true, "max_ips": 240, "physical_processor": "Intel Xeon E5-2666 v3", "fpga": 0, "intel_turbo": "Yes", "enhanced_networking": true, "ebs_iops": 16000.0, "vcpus": 16.0, "memory": 30.0, "ebs_max_bandwidth": 2000.0, "gpus": 0, "ipv6_support": true}, "c4.xlarge": {"ecu_per_vcpu": 4.0, "network_perf": 9.0, "intel_avx": "Yes", "name": "C4 High-CPU Extra Large", "architecture": "64-bit", "linux_virtualization": "HVM", "storage": 0.0, "placement_group_support": true, "intel_avx2": "Yes", "clock_speed_ghz": "Yes", "apiname": "c4.xlarge", "computeunits": 16.0, "ebs_throughput": 93.75, "vpc_only": true, "max_ips": 60, "physical_processor": "Intel Xeon E5-2666 v3", "fpga": 0, "intel_turbo": "Yes", "enhanced_networking": true, "ebs_iops": 6000.0, "vcpus": 4.0, "memory": 7.5, "ebs_max_bandwidth": 750.0, "gpus": 0, "ipv6_support": true}, "m3.xlarge": {"ecu_per_vcpu": 3.25, "network_perf": 9.0, "intel_avx": "Yes", "name": "M3 General Purpose Extra Large", "architecture": "64-bit", "linux_virtualization": "HVM, PV", "storage": 80.0, "placement_group_support": false, "intel_avx2": "", "clock_speed_ghz": "Yes", "apiname": "m3.xlarge", "computeunits": 13.0, "ebs_throughput": 62.5, "vpc_only": false, "max_ips": 60, "physical_processor": "Intel Xeon E5-2670 v2", "fpga": 0, "intel_turbo": "Yes", "enhanced_networking": false, "ebs_iops": 4000.0, "vcpus": 4.0, "memory": 15.0, "ebs_max_bandwidth": 500.0, "gpus": 0, "ipv6_support": false}} \ No newline at end of file diff --git a/moto/ec2/resources/instance_types.py b/moto/ec2/resources/instance_types.py deleted file mode 100644 index 9568b535c..000000000 --- a/moto/ec2/resources/instance_types.py +++ /dev/null @@ -1,2164 +0,0 @@ -# Imported via `scripts/get_instance_info.py` -instance_types_data = """ -{ - "c1.medium": { - "apiname": "c1.medium", - "architecture": "32/64-bit", - "clock_speed_ghz": "", - "computeunits": 5.0, - "ebs_iops": 0.0, - "ebs_max_bandwidth": 0.0, - "ebs_throughput": 0.0, - "ecu_per_vcpu": 2.5, - "enhanced_networking": false, - "fpga": 0, - "gpus": 0, - "intel_avx": "", - "intel_avx2": "", - "intel_turbo": "", - "ipv6_support": false, - "linux_virtualization": "PV", - "max_ips": 12, - "memory": 1.7, - "name": "C1 High-CPU Medium", - "network_perf": 6.0, - "physical_processor": "", - "placement_group_support": false, - "storage": 350.0, - "vcpus": 2.0, - "vpc_only": false - }, - "c1.xlarge": { - "apiname": "c1.xlarge", - "architecture": "64-bit", - "clock_speed_ghz": "", - "computeunits": 20.0, - "ebs_iops": 8000.0, - "ebs_max_bandwidth": 1000.0, - "ebs_throughput": 125.0, - "ecu_per_vcpu": 2.5, - "enhanced_networking": false, - "fpga": 0, - "gpus": 0, - "intel_avx": "", - "intel_avx2": "", - "intel_turbo": "", - "ipv6_support": false, - "linux_virtualization": "PV", - "max_ips": 60, - "memory": 7.0, - "name": "C1 High-CPU Extra Large", - "network_perf": 9.0, - "physical_processor": "", - "placement_group_support": false, - "storage": 1680.0, - "vcpus": 8.0, - "vpc_only": false - }, - "c3.2xlarge": { - "apiname": "c3.2xlarge", - "architecture": "64-bit", - "clock_speed_ghz": "Yes", - "computeunits": 28.0, - "ebs_iops": 8000.0, - "ebs_max_bandwidth": 1000.0, - "ebs_throughput": 125.0, - "ecu_per_vcpu": 3.5, - "enhanced_networking": true, - "fpga": 0, - "gpus": 0, - "intel_avx": "Yes", - "intel_avx2": "", - "intel_turbo": "Yes", - "ipv6_support": true, - "linux_virtualization": "HVM, PV", - "max_ips": 60, - "memory": 15.0, - "name": "C3 High-CPU Double Extra Large", - "network_perf": 9.0, - "physical_processor": "Intel Xeon E5-2680 v2", - "placement_group_support": true, - "storage": 160.0, - "vcpus": 8.0, - "vpc_only": false - }, - "c3.4xlarge": { - "apiname": "c3.4xlarge", - "architecture": "64-bit", - "clock_speed_ghz": "Yes", - "computeunits": 55.0, - "ebs_iops": 16000.0, - "ebs_max_bandwidth": 2000.0, - "ebs_throughput": 250.0, - "ecu_per_vcpu": 3.4375, - "enhanced_networking": true, - "fpga": 0, - "gpus": 0, - "intel_avx": "Yes", - "intel_avx2": "", - "intel_turbo": "Yes", - "ipv6_support": true, - "linux_virtualization": "HVM, PV", - "max_ips": 240, - "memory": 30.0, - "name": "C3 High-CPU Quadruple Extra Large", - "network_perf": 9.0, - "physical_processor": "Intel Xeon E5-2680 v2", - "placement_group_support": true, - "storage": 320.0, - "vcpus": 16.0, - "vpc_only": false - }, - "c3.8xlarge": { - "apiname": "c3.8xlarge", - "architecture": "64-bit", - "clock_speed_ghz": "Yes", - "computeunits": 108.0, - "ebs_iops": 0.0, - "ebs_max_bandwidth": 0.0, - "ebs_throughput": 0.0, - "ecu_per_vcpu": 3.375, - "enhanced_networking": true, - "fpga": 0, - "gpus": 0, - "intel_avx": "Yes", - "intel_avx2": "", - "intel_turbo": "Yes", - "ipv6_support": true, - "linux_virtualization": "HVM, PV", - "max_ips": 240, - "memory": 60.0, - "name": "C3 High-CPU Eight Extra Large", - "network_perf": 12.0, - "physical_processor": "Intel Xeon E5-2680 v2", - "placement_group_support": true, - "storage": 640.0, - "vcpus": 32.0, - "vpc_only": false - }, - "c3.large": { - "apiname": "c3.large", - "architecture": "32/64-bit", - "clock_speed_ghz": "Yes", - "computeunits": 7.0, - "ebs_iops": 0.0, - "ebs_max_bandwidth": 0.0, - "ebs_throughput": 0.0, - "ecu_per_vcpu": 3.5, - "enhanced_networking": true, - "fpga": 0, - "gpus": 0, - "intel_avx": "Yes", - "intel_avx2": "", - "intel_turbo": "Yes", - "ipv6_support": true, - "linux_virtualization": "HVM, PV", - "max_ips": 30, - "memory": 3.75, - "name": "C3 High-CPU Large", - "network_perf": 6.0, - "physical_processor": "Intel Xeon E5-2680 v2", - "placement_group_support": true, - "storage": 32.0, - "vcpus": 2.0, - "vpc_only": false - }, - "c3.xlarge": { - "apiname": "c3.xlarge", - "architecture": "64-bit", - "clock_speed_ghz": "Yes", - "computeunits": 14.0, - "ebs_iops": 4000.0, - "ebs_max_bandwidth": 500.0, - "ebs_throughput": 62.5, - "ecu_per_vcpu": 3.5, - "enhanced_networking": true, - "fpga": 0, - "gpus": 0, - "intel_avx": "Yes", - "intel_avx2": "", - "intel_turbo": "Yes", - "ipv6_support": true, - "linux_virtualization": "HVM, PV", - "max_ips": 60, - "memory": 7.5, - "name": "C3 High-CPU Extra Large", - "network_perf": 7.0, - "physical_processor": "Intel Xeon E5-2680 v2", - "placement_group_support": true, - "storage": 80.0, - "vcpus": 4.0, - "vpc_only": false - }, - "c4.2xlarge": { - "apiname": "c4.2xlarge", - "architecture": "64-bit", - "clock_speed_ghz": "Yes", - "computeunits": 31.0, - "ebs_iops": 8000.0, - "ebs_max_bandwidth": 1000.0, - "ebs_throughput": 125.0, - "ecu_per_vcpu": 3.875, - "enhanced_networking": true, - "fpga": 0, - "gpus": 0, - "intel_avx": "Yes", - "intel_avx2": "Yes", - "intel_turbo": "Yes", - "ipv6_support": true, - "linux_virtualization": "HVM", - "max_ips": 60, - "memory": 15.0, - "name": "C4 High-CPU Double Extra Large", - "network_perf": 9.0, - "physical_processor": "Intel Xeon E5-2666 v3", - "placement_group_support": true, - "storage": 0.0, - "vcpus": 8.0, - "vpc_only": true - }, - "c4.4xlarge": { - "apiname": "c4.4xlarge", - "architecture": "64-bit", - "clock_speed_ghz": "Yes", - "computeunits": 62.0, - "ebs_iops": 16000.0, - "ebs_max_bandwidth": 2000.0, - "ebs_throughput": 250.0, - "ecu_per_vcpu": 3.875, - "enhanced_networking": true, - "fpga": 0, - "gpus": 0, - "intel_avx": "Yes", - "intel_avx2": "Yes", - "intel_turbo": "Yes", - "ipv6_support": true, - "linux_virtualization": "HVM", - "max_ips": 240, - "memory": 30.0, - "name": "C4 High-CPU Quadruple Extra Large", - "network_perf": 9.0, - "physical_processor": "Intel Xeon E5-2666 v3", - "placement_group_support": true, - "storage": 0.0, - "vcpus": 16.0, - "vpc_only": true - }, - "c4.8xlarge": { - "apiname": "c4.8xlarge", - "architecture": "64-bit", - "clock_speed_ghz": "Yes", - "computeunits": 132.0, - "ebs_iops": 32000.0, - "ebs_max_bandwidth": 4000.0, - "ebs_throughput": 500.0, - "ecu_per_vcpu": 3.66666666667, - "enhanced_networking": true, - "fpga": 0, - "gpus": 0, - "intel_avx": "Yes", - "intel_avx2": "Yes", - "intel_turbo": "Yes", - "ipv6_support": true, - "linux_virtualization": "HVM", - "max_ips": 240, - "memory": 60.0, - "name": "C4 High-CPU Eight Extra Large", - "network_perf": 13.0, - "physical_processor": "Intel Xeon E5-2666 v3", - "placement_group_support": true, - "storage": 0.0, - "vcpus": 36.0, - "vpc_only": true - }, - "c4.large": { - "apiname": "c4.large", - "architecture": "64-bit", - "clock_speed_ghz": "Yes", - "computeunits": 8.0, - "ebs_iops": 4000.0, - "ebs_max_bandwidth": 500.0, - "ebs_throughput": 62.5, - "ecu_per_vcpu": 4.0, - "enhanced_networking": true, - "fpga": 0, - "gpus": 0, - "intel_avx": "Yes", - "intel_avx2": "Yes", - "intel_turbo": "Yes", - "ipv6_support": true, - "linux_virtualization": "HVM", - "max_ips": 30, - "memory": 3.75, - "name": "C4 High-CPU Large", - "network_perf": 7.0, - "physical_processor": "Intel Xeon E5-2666 v3", - "placement_group_support": true, - "storage": 0.0, - "vcpus": 2.0, - "vpc_only": true - }, - "c4.xlarge": { - "apiname": "c4.xlarge", - "architecture": "64-bit", - "clock_speed_ghz": "Yes", - "computeunits": 16.0, - "ebs_iops": 6000.0, - "ebs_max_bandwidth": 750.0, - "ebs_throughput": 93.75, - "ecu_per_vcpu": 4.0, - "enhanced_networking": true, - "fpga": 0, - "gpus": 0, - "intel_avx": "Yes", - "intel_avx2": "Yes", - "intel_turbo": "Yes", - "ipv6_support": true, - "linux_virtualization": "HVM", - "max_ips": 60, - "memory": 7.5, - "name": "C4 High-CPU Extra Large", - "network_perf": 9.0, - "physical_processor": "Intel Xeon E5-2666 v3", - "placement_group_support": true, - "storage": 0.0, - "vcpus": 4.0, - "vpc_only": true - }, - "cc2.8xlarge": { - "apiname": "cc2.8xlarge", - "architecture": "64-bit", - "clock_speed_ghz": "", - "computeunits": 88.0, - "ebs_iops": 0.0, - "ebs_max_bandwidth": 0.0, - "ebs_throughput": 0.0, - "ecu_per_vcpu": 2.75, - "enhanced_networking": false, - "fpga": 0, - "gpus": 0, - "intel_avx": "", - "intel_avx2": "", - "intel_turbo": "", - "ipv6_support": false, - "linux_virtualization": "HVM", - "max_ips": 240, - "memory": 60.5, - "name": "Cluster Compute Eight Extra Large", - "network_perf": 12.0, - "physical_processor": "", - "placement_group_support": false, - "storage": 3360.0, - "vcpus": 32.0, - "vpc_only": false - }, - "cg1.4xlarge": { - "apiname": "cg1.4xlarge", - "architecture": "64-bit", - "clock_speed_ghz": "", - "computeunits": 33.5, - "ebs_iops": 0.0, - "ebs_max_bandwidth": 0.0, - "ebs_throughput": 0.0, - "ecu_per_vcpu": 2.09375, - "enhanced_networking": false, - "fpga": 0, - "gpus": 0, - "intel_avx": "", - "intel_avx2": "", - "intel_turbo": "", - "ipv6_support": false, - "linux_virtualization": "HVM", - "max_ips": 240, - "memory": 22.5, - "name": "Cluster GPU Quadruple Extra Large", - "network_perf": 12.0, - "physical_processor": "", - "placement_group_support": false, - "storage": 1680.0, - "vcpus": 16.0, - "vpc_only": false - }, - "cr1.8xlarge": { - "apiname": "cr1.8xlarge", - "architecture": "64-bit", - "clock_speed_ghz": "", - "computeunits": 88.0, - "ebs_iops": 0.0, - "ebs_max_bandwidth": 0.0, - "ebs_throughput": 0.0, - "ecu_per_vcpu": 2.75, - "enhanced_networking": false, - "fpga": 0, - "gpus": 0, - "intel_avx": "", - "intel_avx2": "", - "intel_turbo": "", - "ipv6_support": false, - "linux_virtualization": "Unknown", - "max_ips": 240, - "memory": 244.0, - "name": "High Memory Cluster Eight Extra Large", - "network_perf": 12.0, - "physical_processor": "", - "placement_group_support": false, - "storage": 240.0, - "vcpus": 32.0, - "vpc_only": false - }, - "d2.2xlarge": { - "apiname": "d2.2xlarge", - "architecture": "64-bit", - "clock_speed_ghz": "Yes", - "computeunits": 28.0, - "ebs_iops": 8000.0, - "ebs_max_bandwidth": 1000.0, - "ebs_throughput": 125.0, - "ecu_per_vcpu": 3.5, - "enhanced_networking": true, - "fpga": 0, - "gpus": 0, - "intel_avx": "Yes", - "intel_avx2": "Yes", - "intel_turbo": "Yes", - "ipv6_support": true, - "linux_virtualization": "HVM", - "max_ips": 60, - "memory": 61.0, - "name": "D2 Double Extra Large", - "network_perf": 9.0, - "physical_processor": "Intel Xeon E5-2676 v3", - "placement_group_support": true, - "storage": 12000.0, - "vcpus": 8.0, - "vpc_only": false - }, - "d2.4xlarge": { - "apiname": "d2.4xlarge", - "architecture": "64-bit", - "clock_speed_ghz": "Yes", - "computeunits": 56.0, - "ebs_iops": 16000.0, - "ebs_max_bandwidth": 2000.0, - "ebs_throughput": 250.0, - "ecu_per_vcpu": 3.5, - "enhanced_networking": true, - "fpga": 0, - "gpus": 0, - "intel_avx": "Yes", - "intel_avx2": "Yes", - "intel_turbo": "Yes", - "ipv6_support": true, - "linux_virtualization": "HVM", - "max_ips": 240, - "memory": 122.0, - "name": "D2 Quadruple Extra Large", - "network_perf": 9.0, - "physical_processor": "Intel Xeon E5-2676 v3", - "placement_group_support": true, - "storage": 24000.0, - "vcpus": 16.0, - "vpc_only": false - }, - "d2.8xlarge": { - "apiname": "d2.8xlarge", - "architecture": "64-bit", - "clock_speed_ghz": "Yes", - "computeunits": 116.0, - "ebs_iops": 32000.0, - "ebs_max_bandwidth": 4000.0, - "ebs_throughput": 500.0, - "ecu_per_vcpu": 3.22222222222, - "enhanced_networking": true, - "fpga": 0, - "gpus": 0, - "intel_avx": "Yes", - "intel_avx2": "Yes", - "intel_turbo": "Yes", - "ipv6_support": true, - "linux_virtualization": "HVM", - "max_ips": 240, - "memory": 244.0, - "name": "D2 Eight Extra Large", - "network_perf": 13.0, - "physical_processor": "Intel Xeon E5-2676 v3", - "placement_group_support": true, - "storage": 48000.0, - "vcpus": 36.0, - "vpc_only": false - }, - "d2.xlarge": { - "apiname": "d2.xlarge", - "architecture": "64-bit", - "clock_speed_ghz": "Yes", - "computeunits": 14.0, - "ebs_iops": 6000.0, - "ebs_max_bandwidth": 750.0, - "ebs_throughput": 93.75, - "ecu_per_vcpu": 3.5, - "enhanced_networking": true, - "fpga": 0, - "gpus": 0, - "intel_avx": "Yes", - "intel_avx2": "Yes", - "intel_turbo": "Yes", - "ipv6_support": true, - "linux_virtualization": "HVM", - "max_ips": 60, - "memory": 30.5, - "name": "D2 Extra Large", - "network_perf": 7.0, - "physical_processor": "Intel Xeon E5-2676 v3", - "placement_group_support": true, - "storage": 6000.0, - "vcpus": 4.0, - "vpc_only": false - }, - "f1.16xlarge": { - "apiname": "f1.16xlarge", - "architecture": "64-bit", - "clock_speed_ghz": "Yes", - "computeunits": 188.0, - "ebs_iops": 75000.0, - "ebs_max_bandwidth": 14000.0, - "ebs_throughput": 1750.0, - "ecu_per_vcpu": 2.9375, - "enhanced_networking": true, - "fpga": 8, - "gpus": 0, - "intel_avx": "Yes", - "intel_avx2": "Yes", - "intel_turbo": "Yes", - "ipv6_support": true, - "linux_virtualization": "HVM", - "max_ips": 400, - "memory": 976.0, - "name": "F1 16xlarge", - "network_perf": 17.0, - "physical_processor": "Intel Xeon E5-2686 v4", - "placement_group_support": true, - "storage": 3760.0, - "vcpus": 64.0, - "vpc_only": true - }, - "f1.2xlarge": { - "apiname": "f1.2xlarge", - "architecture": "64-bit", - "clock_speed_ghz": "Yes", - "computeunits": 26.0, - "ebs_iops": 12000.0, - "ebs_max_bandwidth": 1700.0, - "ebs_throughput": 200.0, - "ecu_per_vcpu": 3.25, - "enhanced_networking": true, - "fpga": 1, - "gpus": 0, - "intel_avx": "Yes", - "intel_avx2": "Yes", - "intel_turbo": "Yes", - "ipv6_support": true, - "linux_virtualization": "HVM", - "max_ips": 60, - "memory": 122.0, - "name": "F1 Double Extra Large", - "network_perf": 11.0, - "physical_processor": "Intel Xeon E5-2686 v4", - "placement_group_support": true, - "storage": 470.0, - "vcpus": 8.0, - "vpc_only": true - }, - "g2.2xlarge": { - "apiname": "g2.2xlarge", - "architecture": "64-bit", - "clock_speed_ghz": "", - "computeunits": 26.0, - "ebs_iops": 8000.0, - "ebs_max_bandwidth": 1000.0, - "ebs_throughput": 125.0, - "ecu_per_vcpu": 3.25, - "enhanced_networking": false, - "fpga": 0, - "gpus": 0, - "intel_avx": "", - "intel_avx2": "", - "intel_turbo": "", - "ipv6_support": false, - "linux_virtualization": "Unknown", - "max_ips": 60, - "memory": 15.0, - "name": "G2 Double Extra Large", - "network_perf": 16.0, - "physical_processor": "", - "placement_group_support": true, - "storage": 60.0, - "vcpus": 8.0, - "vpc_only": false - }, - "g2.8xlarge": { - "apiname": "g2.8xlarge", - "architecture": "64-bit", - "clock_speed_ghz": "", - "computeunits": 104.0, - "ebs_iops": 0.0, - "ebs_max_bandwidth": 0.0, - "ebs_throughput": 0.0, - "ecu_per_vcpu": 3.25, - "enhanced_networking": false, - "fpga": 0, - "gpus": 0, - "intel_avx": "", - "intel_avx2": "", - "intel_turbo": "", - "ipv6_support": false, - "linux_virtualization": "Unknown", - "max_ips": 240, - "memory": 60.0, - "name": "G2 Eight Extra Large", - "network_perf": 16.0, - "physical_processor": "", - "placement_group_support": true, - "storage": 240.0, - "vcpus": 32.0, - "vpc_only": false - }, - "g3.16xlarge": { - "apiname": "g3.16xlarge", - "architecture": "64-bit", - "clock_speed_ghz": "Yes", - "computeunits": 188.0, - "ebs_iops": 80000.0, - "ebs_max_bandwidth": 14000.0, - "ebs_throughput": 1750.0, - "ecu_per_vcpu": 2.9375, - "enhanced_networking": true, - "fpga": 0, - "gpus": 4, - "intel_avx": "Yes", - "intel_avx2": "Yes", - "intel_turbo": "Yes", - "ipv6_support": true, - "linux_virtualization": "HVM", - "max_ips": 750, - "memory": 488.0, - "name": "G3 16xlarge", - "network_perf": 17.0, - "physical_processor": "Intel Xeon E5-2686 v4", - "placement_group_support": true, - "storage": 0.0, - "vcpus": 64.0, - "vpc_only": true - }, - "g3.4xlarge": { - "apiname": "g3.4xlarge", - "architecture": "64-bit", - "clock_speed_ghz": "Yes", - "computeunits": 47.0, - "ebs_iops": 20000.0, - "ebs_max_bandwidth": 3500.0, - "ebs_throughput": 437.0, - "ecu_per_vcpu": 2.9375, - "enhanced_networking": true, - "fpga": 0, - "gpus": 1, - "intel_avx": "Yes", - "intel_avx2": "Yes", - "intel_turbo": "Yes", - "ipv6_support": true, - "linux_virtualization": "HVM", - "max_ips": 240, - "memory": 122.0, - "name": "G3 Quadruple Extra Large", - "network_perf": 11.0, - "physical_processor": "Intel Xeon E5-2686 v4", - "placement_group_support": true, - "storage": 0.0, - "vcpus": 16.0, - "vpc_only": true - }, - "g3.8xlarge": { - "apiname": "g3.8xlarge", - "architecture": "64-bit", - "clock_speed_ghz": "Yes", - "computeunits": 94.0, - "ebs_iops": 40000.0, - "ebs_max_bandwidth": 7000.0, - "ebs_throughput": 875.0, - "ecu_per_vcpu": 2.9375, - "enhanced_networking": true, - "fpga": 0, - "gpus": 2, - "intel_avx": "Yes", - "intel_avx2": "Yes", - "intel_turbo": "Yes", - "ipv6_support": true, - "linux_virtualization": "HVM", - "max_ips": 240, - "memory": 244.0, - "name": "G3 Eight Extra Large", - "network_perf": 13.0, - "physical_processor": "Intel Xeon E5-2686 v4", - "placement_group_support": true, - "storage": 0.0, - "vcpus": 32.0, - "vpc_only": true - }, - "hi1.4xlarge": { - "apiname": "hi1.4xlarge", - "architecture": "64-bit", - "clock_speed_ghz": "", - "computeunits": 35.0, - "ebs_iops": 0.0, - "ebs_max_bandwidth": 0.0, - "ebs_throughput": 0.0, - "ecu_per_vcpu": 2.1875, - "enhanced_networking": false, - "fpga": 0, - "gpus": 0, - "intel_avx": "", - "intel_avx2": "", - "intel_turbo": "", - "ipv6_support": false, - "linux_virtualization": "HVM, PV", - "max_ips": 240, - "memory": 60.5, - "name": "HI1. High I/O Quadruple Extra Large", - "network_perf": 12.0, - "physical_processor": "", - "placement_group_support": false, - "storage": 2048.0, - "vcpus": 16.0, - "vpc_only": false - }, - "hs1.8xlarge": { - "apiname": "hs1.8xlarge", - "architecture": "64-bit", - "clock_speed_ghz": "", - "computeunits": 35.0, - "ebs_iops": 0.0, - "ebs_max_bandwidth": 0.0, - "ebs_throughput": 0.0, - "ecu_per_vcpu": 2.1875, - "enhanced_networking": false, - "fpga": 0, - "gpus": 0, - "intel_avx": "", - "intel_avx2": "", - "intel_turbo": "", - "ipv6_support": false, - "linux_virtualization": "HVM, PV", - "max_ips": 240, - "memory": 117.0, - "name": "High Storage Eight Extra Large", - "network_perf": 12.0, - "physical_processor": "", - "placement_group_support": false, - "storage": 48000.0, - "vcpus": 16.0, - "vpc_only": false - }, - "i2.2xlarge": { - "apiname": "i2.2xlarge", - "architecture": "64-bit", - "clock_speed_ghz": "", - "computeunits": 27.0, - "ebs_iops": 8000.0, - "ebs_max_bandwidth": 1000.0, - "ebs_throughput": 125.0, - "ecu_per_vcpu": 3.375, - "enhanced_networking": false, - "fpga": 0, - "gpus": 0, - "intel_avx": "", - "intel_avx2": "", - "intel_turbo": "", - "ipv6_support": true, - "linux_virtualization": "HVM", - "max_ips": 60, - "memory": 61.0, - "name": "I2 Double Extra Large", - "network_perf": 7.0, - "physical_processor": "", - "placement_group_support": true, - "storage": 1600.0, - "vcpus": 8.0, - "vpc_only": false - }, - "i2.4xlarge": { - "apiname": "i2.4xlarge", - "architecture": "64-bit", - "clock_speed_ghz": "", - "computeunits": 53.0, - "ebs_iops": 16000.0, - "ebs_max_bandwidth": 2000.0, - "ebs_throughput": 250.0, - "ecu_per_vcpu": 3.3125, - "enhanced_networking": false, - "fpga": 0, - "gpus": 0, - "intel_avx": "", - "intel_avx2": "", - "intel_turbo": "", - "ipv6_support": true, - "linux_virtualization": "HVM", - "max_ips": 240, - "memory": 122.0, - "name": "I2 Quadruple Extra Large", - "network_perf": 9.0, - "physical_processor": "", - "placement_group_support": true, - "storage": 3200.0, - "vcpus": 16.0, - "vpc_only": false - }, - "i2.8xlarge": { - "apiname": "i2.8xlarge", - "architecture": "64-bit", - "clock_speed_ghz": "", - "computeunits": 104.0, - "ebs_iops": 0.0, - "ebs_max_bandwidth": 0.0, - "ebs_throughput": 0.0, - "ecu_per_vcpu": 3.25, - "enhanced_networking": false, - "fpga": 0, - "gpus": 0, - "intel_avx": "", - "intel_avx2": "", - "intel_turbo": "", - "ipv6_support": true, - "linux_virtualization": "HVM", - "max_ips": 240, - "memory": 244.0, - "name": "I2 Eight Extra Large", - "network_perf": 13.0, - "physical_processor": "", - "placement_group_support": true, - "storage": 6400.0, - "vcpus": 32.0, - "vpc_only": false - }, - "i2.xlarge": { - "apiname": "i2.xlarge", - "architecture": "64-bit", - "clock_speed_ghz": "", - "computeunits": 14.0, - "ebs_iops": 4000.0, - "ebs_max_bandwidth": 500.0, - "ebs_throughput": 62.5, - "ecu_per_vcpu": 3.5, - "enhanced_networking": false, - "fpga": 0, - "gpus": 0, - "intel_avx": "", - "intel_avx2": "", - "intel_turbo": "", - "ipv6_support": true, - "linux_virtualization": "HVM", - "max_ips": 60, - "memory": 30.5, - "name": "I2 Extra Large", - "network_perf": 7.0, - "physical_processor": "", - "placement_group_support": true, - "storage": 800.0, - "vcpus": 4.0, - "vpc_only": false - }, - "i3.16xlarge": { - "apiname": "i3.16xlarge", - "architecture": "64-bit", - "clock_speed_ghz": "Yes", - "computeunits": 200.0, - "ebs_iops": 65000.0, - "ebs_max_bandwidth": 14000.0, - "ebs_throughput": 1750.0, - "ecu_per_vcpu": 3.125, - "enhanced_networking": true, - "fpga": 0, - "gpus": 0, - "intel_avx": "Yes", - "intel_avx2": "Yes", - "intel_turbo": "Yes", - "ipv6_support": true, - "linux_virtualization": "Unknown", - "max_ips": 750, - "memory": 488.0, - "name": "I3 High I/O 16xlarge", - "network_perf": 17.0, - "physical_processor": "Intel Xeon E5-2686 v4", - "placement_group_support": true, - "storage": 15200.0, - "vcpus": 64.0, - "vpc_only": true - }, - "i3.2xlarge": { - "apiname": "i3.2xlarge", - "architecture": "64-bit", - "clock_speed_ghz": "Yes", - "computeunits": 27.0, - "ebs_iops": 12000.0, - "ebs_max_bandwidth": 1700.0, - "ebs_throughput": 200.0, - "ecu_per_vcpu": 3.375, - "enhanced_networking": true, - "fpga": 0, - "gpus": 0, - "intel_avx": "Yes", - "intel_avx2": "Yes", - "intel_turbo": "Yes", - "ipv6_support": true, - "linux_virtualization": "Unknown", - "max_ips": 60, - "memory": 61.0, - "name": "I3 High I/O Double Extra Large", - "network_perf": 11.0, - "physical_processor": "Intel Xeon E5-2686 v4", - "placement_group_support": true, - "storage": 1900.0, - "vcpus": 8.0, - "vpc_only": true - }, - "i3.4xlarge": { - "apiname": "i3.4xlarge", - "architecture": "64-bit", - "clock_speed_ghz": "Yes", - "computeunits": 53.0, - "ebs_iops": 16000.0, - "ebs_max_bandwidth": 3500.0, - "ebs_throughput": 400.0, - "ecu_per_vcpu": 3.3125, - "enhanced_networking": true, - "fpga": 0, - "gpus": 0, - "intel_avx": "Yes", - "intel_avx2": "Yes", - "intel_turbo": "Yes", - "ipv6_support": true, - "linux_virtualization": "Unknown", - "max_ips": 240, - "memory": 122.0, - "name": "I3 High I/O Quadruple Extra Large", - "network_perf": 11.0, - "physical_processor": "Intel Xeon E5-2686 v4", - "placement_group_support": true, - "storage": 3800.0, - "vcpus": 16.0, - "vpc_only": true - }, - "i3.8xlarge": { - "apiname": "i3.8xlarge", - "architecture": "64-bit", - "clock_speed_ghz": "Yes", - "computeunits": 99.0, - "ebs_iops": 32500.0, - "ebs_max_bandwidth": 7000.0, - "ebs_throughput": 850.0, - "ecu_per_vcpu": 3.09375, - "enhanced_networking": true, - "fpga": 0, - "gpus": 0, - "intel_avx": "Yes", - "intel_avx2": "Yes", - "intel_turbo": "Yes", - "ipv6_support": true, - "linux_virtualization": "Unknown", - "max_ips": 240, - "memory": 244.0, - "name": "I3 High I/O Eight Extra Large", - "network_perf": 13.0, - "physical_processor": "Intel Xeon E5-2686 v4", - "placement_group_support": true, - "storage": 7600.0, - "vcpus": 32.0, - "vpc_only": true - }, - "i3.large": { - "apiname": "i3.large", - "architecture": "64-bit", - "clock_speed_ghz": "Yes", - "computeunits": 7.0, - "ebs_iops": 3000.0, - "ebs_max_bandwidth": 425.0, - "ebs_throughput": 50.0, - "ecu_per_vcpu": 3.5, - "enhanced_networking": true, - "fpga": 0, - "gpus": 0, - "intel_avx": "Yes", - "intel_avx2": "Yes", - "intel_turbo": "Yes", - "ipv6_support": true, - "linux_virtualization": "Unknown", - "max_ips": 30, - "memory": 15.25, - "name": "I3 High I/O Large", - "network_perf": 11.0, - "physical_processor": "Intel Xeon E5-2686 v4", - "placement_group_support": true, - "storage": 475.0, - "vcpus": 2.0, - "vpc_only": true - }, - "i3.xlarge": { - "apiname": "i3.xlarge", - "architecture": "64-bit", - "clock_speed_ghz": "Yes", - "computeunits": 13.0, - "ebs_iops": 6000.0, - "ebs_max_bandwidth": 850.0, - "ebs_throughput": 100.0, - "ecu_per_vcpu": 3.25, - "enhanced_networking": true, - "fpga": 0, - "gpus": 0, - "intel_avx": "Yes", - "intel_avx2": "Yes", - "intel_turbo": "Yes", - "ipv6_support": true, - "linux_virtualization": "Unknown", - "max_ips": 60, - "memory": 30.5, - "name": "I3 High I/O Extra Large", - "network_perf": 11.0, - "physical_processor": "Intel Xeon E5-2686 v4", - "placement_group_support": true, - "storage": 950.0, - "vcpus": 4.0, - "vpc_only": true - }, - "m1.large": { - "apiname": "m1.large", - "architecture": "64-bit", - "clock_speed_ghz": "", - "computeunits": 4.0, - "ebs_iops": 4000.0, - "ebs_max_bandwidth": 500.0, - "ebs_throughput": 62.5, - "ecu_per_vcpu": 2.0, - "enhanced_networking": false, - "fpga": 0, - "gpus": 0, - "intel_avx": "", - "intel_avx2": "", - "intel_turbo": "", - "ipv6_support": false, - "linux_virtualization": "PV", - "max_ips": 30, - "memory": 7.5, - "name": "M1 General Purpose Large", - "network_perf": 7.0, - "physical_processor": "", - "placement_group_support": false, - "storage": 840.0, - "vcpus": 2.0, - "vpc_only": false - }, - "m1.medium": { - "apiname": "m1.medium", - "architecture": "32/64-bit", - "clock_speed_ghz": "", - "computeunits": 2.0, - "ebs_iops": 0.0, - "ebs_max_bandwidth": 0.0, - "ebs_throughput": 0.0, - "ecu_per_vcpu": 2.0, - "enhanced_networking": false, - "fpga": 0, - "gpus": 0, - "intel_avx": "", - "intel_avx2": "", - "intel_turbo": "", - "ipv6_support": false, - "linux_virtualization": "PV", - "max_ips": 12, - "memory": 3.75, - "name": "M1 General Purpose Medium", - "network_perf": 6.0, - "physical_processor": "", - "placement_group_support": false, - "storage": 410.0, - "vcpus": 1.0, - "vpc_only": false - }, - "m1.small": { - "apiname": "m1.small", - "architecture": "32/64-bit", - "clock_speed_ghz": "", - "computeunits": 1.0, - "ebs_iops": 0.0, - "ebs_max_bandwidth": 0.0, - "ebs_throughput": 0.0, - "ecu_per_vcpu": 1.0, - "enhanced_networking": false, - "fpga": 0, - "gpus": 0, - "intel_avx": "", - "intel_avx2": "", - "intel_turbo": "", - "ipv6_support": false, - "linux_virtualization": "PV", - "max_ips": 8, - "memory": 1.7, - "name": "M1 General Purpose Small", - "network_perf": 2.0, - "physical_processor": "", - "placement_group_support": false, - "storage": 160.0, - "vcpus": 1.0, - "vpc_only": false - }, - "m1.xlarge": { - "apiname": "m1.xlarge", - "architecture": "64-bit", - "clock_speed_ghz": "", - "computeunits": 8.0, - "ebs_iops": 8000.0, - "ebs_max_bandwidth": 1000.0, - "ebs_throughput": 125.0, - "ecu_per_vcpu": 2.0, - "enhanced_networking": false, - "fpga": 0, - "gpus": 0, - "intel_avx": "", - "intel_avx2": "", - "intel_turbo": "", - "ipv6_support": false, - "linux_virtualization": "PV", - "max_ips": 60, - "memory": 15.0, - "name": "M1 General Purpose Extra Large", - "network_perf": 9.0, - "physical_processor": "", - "placement_group_support": false, - "storage": 1680.0, - "vcpus": 4.0, - "vpc_only": false - }, - "m2.2xlarge": { - "apiname": "m2.2xlarge", - "architecture": "64-bit", - "clock_speed_ghz": "", - "computeunits": 13.0, - "ebs_iops": 4000.0, - "ebs_max_bandwidth": 500.0, - "ebs_throughput": 62.5, - "ecu_per_vcpu": 3.25, - "enhanced_networking": false, - "fpga": 0, - "gpus": 0, - "intel_avx": "", - "intel_avx2": "", - "intel_turbo": "", - "ipv6_support": false, - "linux_virtualization": "PV", - "max_ips": 120, - "memory": 34.2, - "name": "M2 High Memory Double Extra Large", - "network_perf": 7.0, - "physical_processor": "", - "placement_group_support": false, - "storage": 850.0, - "vcpus": 4.0, - "vpc_only": false - }, - "m2.4xlarge": { - "apiname": "m2.4xlarge", - "architecture": "64-bit", - "clock_speed_ghz": "", - "computeunits": 26.0, - "ebs_iops": 8000.0, - "ebs_max_bandwidth": 1000.0, - "ebs_throughput": 125.0, - "ecu_per_vcpu": 3.25, - "enhanced_networking": false, - "fpga": 0, - "gpus": 0, - "intel_avx": "", - "intel_avx2": "", - "intel_turbo": "", - "ipv6_support": false, - "linux_virtualization": "PV", - "max_ips": 240, - "memory": 68.4, - "name": "M2 High Memory Quadruple Extra Large", - "network_perf": 9.0, - "physical_processor": "", - "placement_group_support": false, - "storage": 1680.0, - "vcpus": 8.0, - "vpc_only": false - }, - "m2.xlarge": { - "apiname": "m2.xlarge", - "architecture": "64-bit", - "clock_speed_ghz": "", - "computeunits": 6.5, - "ebs_iops": 0.0, - "ebs_max_bandwidth": 0.0, - "ebs_throughput": 0.0, - "ecu_per_vcpu": 3.25, - "enhanced_networking": false, - "fpga": 0, - "gpus": 0, - "intel_avx": "", - "intel_avx2": "", - "intel_turbo": "", - "ipv6_support": false, - "linux_virtualization": "PV", - "max_ips": 60, - "memory": 17.1, - "name": "M2 High Memory Extra Large", - "network_perf": 6.0, - "physical_processor": "", - "placement_group_support": false, - "storage": 420.0, - "vcpus": 2.0, - "vpc_only": false - }, - "m3.2xlarge": { - "apiname": "m3.2xlarge", - "architecture": "64-bit", - "clock_speed_ghz": "Yes", - "computeunits": 26.0, - "ebs_iops": 8000.0, - "ebs_max_bandwidth": 1000.0, - "ebs_throughput": 125.0, - "ecu_per_vcpu": 3.25, - "enhanced_networking": false, - "fpga": 0, - "gpus": 0, - "intel_avx": "Yes", - "intel_avx2": "", - "intel_turbo": "Yes", - "ipv6_support": false, - "linux_virtualization": "HVM, PV", - "max_ips": 120, - "memory": 30.0, - "name": "M3 General Purpose Double Extra Large", - "network_perf": 9.0, - "physical_processor": "Intel Xeon E5-2670 v2", - "placement_group_support": false, - "storage": 160.0, - "vcpus": 8.0, - "vpc_only": false - }, - "m3.large": { - "apiname": "m3.large", - "architecture": "64-bit", - "clock_speed_ghz": "Yes", - "computeunits": 6.5, - "ebs_iops": 0.0, - "ebs_max_bandwidth": 0.0, - "ebs_throughput": 0.0, - "ecu_per_vcpu": 3.25, - "enhanced_networking": false, - "fpga": 0, - "gpus": 0, - "intel_avx": "Yes", - "intel_avx2": "", - "intel_turbo": "Yes", - "ipv6_support": false, - "linux_virtualization": "HVM, PV", - "max_ips": 30, - "memory": 7.5, - "name": "M3 General Purpose Large", - "network_perf": 6.0, - "physical_processor": "Intel Xeon E5-2670 v2", - "placement_group_support": false, - "storage": 32.0, - "vcpus": 2.0, - "vpc_only": false - }, - "m3.medium": { - "apiname": "m3.medium", - "architecture": "64-bit", - "clock_speed_ghz": "Yes", - "computeunits": 3.0, - "ebs_iops": 0.0, - "ebs_max_bandwidth": 0.0, - "ebs_throughput": 0.0, - "ecu_per_vcpu": 3.0, - "enhanced_networking": false, - "fpga": 0, - "gpus": 0, - "intel_avx": "Yes", - "intel_avx2": "", - "intel_turbo": "Yes", - "ipv6_support": false, - "linux_virtualization": "HVM, PV", - "max_ips": 12, - "memory": 3.75, - "name": "M3 General Purpose Medium", - "network_perf": 6.0, - "physical_processor": "Intel Xeon E5-2670 v2", - "placement_group_support": false, - "storage": 4.0, - "vcpus": 1.0, - "vpc_only": false - }, - "m3.xlarge": { - "apiname": "m3.xlarge", - "architecture": "64-bit", - "clock_speed_ghz": "Yes", - "computeunits": 13.0, - "ebs_iops": 4000.0, - "ebs_max_bandwidth": 500.0, - "ebs_throughput": 62.5, - "ecu_per_vcpu": 3.25, - "enhanced_networking": false, - "fpga": 0, - "gpus": 0, - "intel_avx": "Yes", - "intel_avx2": "", - "intel_turbo": "Yes", - "ipv6_support": false, - "linux_virtualization": "HVM, PV", - "max_ips": 60, - "memory": 15.0, - "name": "M3 General Purpose Extra Large", - "network_perf": 9.0, - "physical_processor": "Intel Xeon E5-2670 v2", - "placement_group_support": false, - "storage": 80.0, - "vcpus": 4.0, - "vpc_only": false - }, - "m4.10xlarge": { - "apiname": "m4.10xlarge", - "architecture": "64-bit", - "clock_speed_ghz": "Yes", - "computeunits": 124.5, - "ebs_iops": 32000.0, - "ebs_max_bandwidth": 4000.0, - "ebs_throughput": 500.0, - "ecu_per_vcpu": 3.1125, - "enhanced_networking": true, - "fpga": 0, - "gpus": 0, - "intel_avx": "Yes", - "intel_avx2": "Yes", - "intel_turbo": "Yes", - "ipv6_support": true, - "linux_virtualization": "HVM", - "max_ips": 240, - "memory": 160.0, - "name": "M4 Deca Extra Large", - "network_perf": 13.0, - "physical_processor": "Intel Xeon E5-2676 v3", - "placement_group_support": true, - "storage": 0.0, - "vcpus": 40.0, - "vpc_only": true - }, - "m4.16xlarge": { - "apiname": "m4.16xlarge", - "architecture": "64-bit", - "clock_speed_ghz": "Yes", - "computeunits": 188.0, - "ebs_iops": 65000.0, - "ebs_max_bandwidth": 10000.0, - "ebs_throughput": 1250.0, - "ecu_per_vcpu": 2.9375, - "enhanced_networking": true, - "fpga": 0, - "gpus": 0, - "intel_avx": "Yes", - "intel_avx2": "Yes", - "intel_turbo": "Yes", - "ipv6_support": true, - "linux_virtualization": "HVM", - "max_ips": 240, - "memory": 256.0, - "name": "M4 16xlarge", - "network_perf": 17.0, - "physical_processor": "Intel Xeon E5-2686 v4", - "placement_group_support": true, - "storage": 0.0, - "vcpus": 64.0, - "vpc_only": true - }, - "m4.2xlarge": { - "apiname": "m4.2xlarge", - "architecture": "64-bit", - "clock_speed_ghz": "Yes", - "computeunits": 26.0, - "ebs_iops": 8000.0, - "ebs_max_bandwidth": 1000.0, - "ebs_throughput": 125.0, - "ecu_per_vcpu": 3.25, - "enhanced_networking": true, - "fpga": 0, - "gpus": 0, - "intel_avx": "Yes", - "intel_avx2": "Yes", - "intel_turbo": "Yes", - "ipv6_support": true, - "linux_virtualization": "HVM", - "max_ips": 60, - "memory": 32.0, - "name": "M4 Double Extra Large", - "network_perf": 9.0, - "physical_processor": "Intel Xeon E5-2676 v3", - "placement_group_support": true, - "storage": 0.0, - "vcpus": 8.0, - "vpc_only": true - }, - "m4.4xlarge": { - "apiname": "m4.4xlarge", - "architecture": "64-bit", - "clock_speed_ghz": "Yes", - "computeunits": 53.5, - "ebs_iops": 16000.0, - "ebs_max_bandwidth": 2000.0, - "ebs_throughput": 250.0, - "ecu_per_vcpu": 3.34375, - "enhanced_networking": true, - "fpga": 0, - "gpus": 0, - "intel_avx": "Yes", - "intel_avx2": "Yes", - "intel_turbo": "Yes", - "ipv6_support": true, - "linux_virtualization": "HVM", - "max_ips": 240, - "memory": 64.0, - "name": "M4 Quadruple Extra Large", - "network_perf": 9.0, - "physical_processor": "Intel Xeon E5-2676 v3", - "placement_group_support": true, - "storage": 0.0, - "vcpus": 16.0, - "vpc_only": true - }, - "m4.large": { - "apiname": "m4.large", - "architecture": "64-bit", - "clock_speed_ghz": "Yes", - "computeunits": 6.5, - "ebs_iops": 3600.0, - "ebs_max_bandwidth": 450.0, - "ebs_throughput": 56.25, - "ecu_per_vcpu": 3.25, - "enhanced_networking": true, - "fpga": 0, - "gpus": 0, - "intel_avx": "Yes", - "intel_avx2": "Yes", - "intel_turbo": "Yes", - "ipv6_support": true, - "linux_virtualization": "HVM", - "max_ips": 20, - "memory": 8.0, - "name": "M4 Large", - "network_perf": 7.0, - "physical_processor": "Intel Xeon E5-2676 v3", - "placement_group_support": true, - "storage": 0.0, - "vcpus": 2.0, - "vpc_only": true - }, - "m4.xlarge": { - "apiname": "m4.xlarge", - "architecture": "64-bit", - "clock_speed_ghz": "Yes", - "computeunits": 13.0, - "ebs_iops": 6000.0, - "ebs_max_bandwidth": 750.0, - "ebs_throughput": 93.75, - "ecu_per_vcpu": 3.25, - "enhanced_networking": true, - "fpga": 0, - "gpus": 0, - "intel_avx": "Yes", - "intel_avx2": "Yes", - "intel_turbo": "Yes", - "ipv6_support": true, - "linux_virtualization": "HVM", - "max_ips": 60, - "memory": 16.0, - "name": "M4 Extra Large", - "network_perf": 9.0, - "physical_processor": "Intel Xeon E5-2676 v3", - "placement_group_support": true, - "storage": 0.0, - "vcpus": 4.0, - "vpc_only": true - }, - "p2.16xlarge": { - "apiname": "p2.16xlarge", - "architecture": "64-bit", - "clock_speed_ghz": "Yes", - "computeunits": 188.0, - "ebs_iops": 65000.0, - "ebs_max_bandwidth": 10000.0, - "ebs_throughput": 1250.0, - "ecu_per_vcpu": 2.9375, - "enhanced_networking": true, - "fpga": 0, - "gpus": 16, - "intel_avx": "Yes", - "intel_avx2": "Yes", - "intel_turbo": "Yes", - "ipv6_support": true, - "linux_virtualization": "HVM", - "max_ips": 240, - "memory": 732.0, - "name": "General Purpose GPU 16xlarge", - "network_perf": 17.0, - "physical_processor": "Intel Xeon E5-2686 v4", - "placement_group_support": true, - "storage": 0.0, - "vcpus": 64.0, - "vpc_only": true - }, - "p2.8xlarge": { - "apiname": "p2.8xlarge", - "architecture": "64-bit", - "clock_speed_ghz": "Yes", - "computeunits": 94.0, - "ebs_iops": 32500.0, - "ebs_max_bandwidth": 5000.0, - "ebs_throughput": 625.0, - "ecu_per_vcpu": 2.9375, - "enhanced_networking": true, - "fpga": 0, - "gpus": 8, - "intel_avx": "Yes", - "intel_avx2": "Yes", - "intel_turbo": "Yes", - "ipv6_support": true, - "linux_virtualization": "HVM", - "max_ips": 240, - "memory": 488.0, - "name": "General Purpose GPU Eight Extra Large", - "network_perf": 13.0, - "physical_processor": "Intel Xeon E5-2686 v4", - "placement_group_support": true, - "storage": 0.0, - "vcpus": 32.0, - "vpc_only": true - }, - "p2.xlarge": { - "apiname": "p2.xlarge", - "architecture": "64-bit", - "clock_speed_ghz": "Yes", - "computeunits": 12.0, - "ebs_iops": 6000.0, - "ebs_max_bandwidth": 750.0, - "ebs_throughput": 93.75, - "ecu_per_vcpu": 3.0, - "enhanced_networking": true, - "fpga": 0, - "gpus": 1, - "intel_avx": "Yes", - "intel_avx2": "Yes", - "intel_turbo": "Yes", - "ipv6_support": true, - "linux_virtualization": "HVM", - "max_ips": 60, - "memory": 61.0, - "name": "General Purpose GPU Extra Large", - "network_perf": 9.0, - "physical_processor": "Intel Xeon E5-2686 v4", - "placement_group_support": true, - "storage": 0.0, - "vcpus": 4.0, - "vpc_only": true - }, - "r3.2xlarge": { - "apiname": "r3.2xlarge", - "architecture": "64-bit", - "clock_speed_ghz": "Yes", - "computeunits": 26.0, - "ebs_iops": 8000.0, - "ebs_max_bandwidth": 1000.0, - "ebs_throughput": 125.0, - "ecu_per_vcpu": 3.25, - "enhanced_networking": true, - "fpga": 0, - "gpus": 0, - "intel_avx": "Yes", - "intel_avx2": "", - "intel_turbo": "Yes", - "ipv6_support": true, - "linux_virtualization": "HVM", - "max_ips": 60, - "memory": 61.0, - "name": "R3 High-Memory Double Extra Large", - "network_perf": 9.0, - "physical_processor": "Intel Xeon E5-2670 v2", - "placement_group_support": true, - "storage": 160.0, - "vcpus": 8.0, - "vpc_only": false - }, - "r3.4xlarge": { - "apiname": "r3.4xlarge", - "architecture": "64-bit", - "clock_speed_ghz": "Yes", - "computeunits": 52.0, - "ebs_iops": 16000.0, - "ebs_max_bandwidth": 2000.0, - "ebs_throughput": 250.0, - "ecu_per_vcpu": 3.25, - "enhanced_networking": true, - "fpga": 0, - "gpus": 0, - "intel_avx": "Yes", - "intel_avx2": "", - "intel_turbo": "Yes", - "ipv6_support": true, - "linux_virtualization": "HVM", - "max_ips": 240, - "memory": 122.0, - "name": "R3 High-Memory Quadruple Extra Large", - "network_perf": 9.0, - "physical_processor": "Intel Xeon E5-2670 v2", - "placement_group_support": true, - "storage": 320.0, - "vcpus": 16.0, - "vpc_only": false - }, - "r3.8xlarge": { - "apiname": "r3.8xlarge", - "architecture": "64-bit", - "clock_speed_ghz": "Yes", - "computeunits": 104.0, - "ebs_iops": 0.0, - "ebs_max_bandwidth": 0.0, - "ebs_throughput": 0.0, - "ecu_per_vcpu": 3.25, - "enhanced_networking": true, - "fpga": 0, - "gpus": 0, - "intel_avx": "Yes", - "intel_avx2": "", - "intel_turbo": "Yes", - "ipv6_support": true, - "linux_virtualization": "HVM", - "max_ips": 240, - "memory": 244.0, - "name": "R3 High-Memory Eight Extra Large", - "network_perf": 12.0, - "physical_processor": "Intel Xeon E5-2670 v2", - "placement_group_support": true, - "storage": 640.0, - "vcpus": 32.0, - "vpc_only": false - }, - "r3.large": { - "apiname": "r3.large", - "architecture": "64-bit", - "clock_speed_ghz": "Yes", - "computeunits": 6.5, - "ebs_iops": 0.0, - "ebs_max_bandwidth": 0.0, - "ebs_throughput": 0.0, - "ecu_per_vcpu": 3.25, - "enhanced_networking": true, - "fpga": 0, - "gpus": 0, - "intel_avx": "Yes", - "intel_avx2": "", - "intel_turbo": "Yes", - "ipv6_support": true, - "linux_virtualization": "HVM", - "max_ips": 30, - "memory": 15.25, - "name": "R3 High-Memory Large", - "network_perf": 6.0, - "physical_processor": "Intel Xeon E5-2670 v2", - "placement_group_support": true, - "storage": 32.0, - "vcpus": 2.0, - "vpc_only": false - }, - "r3.xlarge": { - "apiname": "r3.xlarge", - "architecture": "64-bit", - "clock_speed_ghz": "Yes", - "computeunits": 13.0, - "ebs_iops": 4000.0, - "ebs_max_bandwidth": 500.0, - "ebs_throughput": 62.5, - "ecu_per_vcpu": 3.25, - "enhanced_networking": true, - "fpga": 0, - "gpus": 0, - "intel_avx": "Yes", - "intel_avx2": "", - "intel_turbo": "Yes", - "ipv6_support": true, - "linux_virtualization": "HVM", - "max_ips": 60, - "memory": 30.5, - "name": "R3 High-Memory Extra Large", - "network_perf": 7.0, - "physical_processor": "Intel Xeon E5-2670 v2", - "placement_group_support": true, - "storage": 80.0, - "vcpus": 4.0, - "vpc_only": false - }, - "r4.16xlarge": { - "apiname": "r4.16xlarge", - "architecture": "64-bit", - "clock_speed_ghz": "Yes", - "computeunits": 195.0, - "ebs_iops": 75000.0, - "ebs_max_bandwidth": 14000.0, - "ebs_throughput": 1750.0, - "ecu_per_vcpu": 3.046875, - "enhanced_networking": true, - "fpga": 0, - "gpus": 0, - "intel_avx": "Yes", - "intel_avx2": "Yes", - "intel_turbo": "Yes", - "ipv6_support": true, - "linux_virtualization": "HVM", - "max_ips": 750, - "memory": 488.0, - "name": "R4 High-Memory 16xlarge", - "network_perf": 17.0, - "physical_processor": "Intel Xeon E5-2686 v4", - "placement_group_support": true, - "storage": 0.0, - "vcpus": 64.0, - "vpc_only": true - }, - "r4.2xlarge": { - "apiname": "r4.2xlarge", - "architecture": "64-bit", - "clock_speed_ghz": "Yes", - "computeunits": 27.0, - "ebs_iops": 12000.0, - "ebs_max_bandwidth": 1750.0, - "ebs_throughput": 218.0, - "ecu_per_vcpu": 3.375, - "enhanced_networking": true, - "fpga": 0, - "gpus": 0, - "intel_avx": "Yes", - "intel_avx2": "Yes", - "intel_turbo": "Yes", - "ipv6_support": true, - "linux_virtualization": "HVM", - "max_ips": 60, - "memory": 61.0, - "name": "R4 High-Memory Double Extra Large", - "network_perf": 11.0, - "physical_processor": "Intel Xeon E5-2686 v4", - "placement_group_support": true, - "storage": 0.0, - "vcpus": 8.0, - "vpc_only": true - }, - "r4.4xlarge": { - "apiname": "r4.4xlarge", - "architecture": "64-bit", - "clock_speed_ghz": "Yes", - "computeunits": 53.0, - "ebs_iops": 18750.0, - "ebs_max_bandwidth": 3500.0, - "ebs_throughput": 437.0, - "ecu_per_vcpu": 3.3125, - "enhanced_networking": true, - "fpga": 0, - "gpus": 0, - "intel_avx": "Yes", - "intel_avx2": "Yes", - "intel_turbo": "Yes", - "ipv6_support": true, - "linux_virtualization": "HVM", - "max_ips": 240, - "memory": 122.0, - "name": "R4 High-Memory Quadruple Extra Large", - "network_perf": 11.0, - "physical_processor": "Intel Xeon E5-2686 v4", - "placement_group_support": true, - "storage": 0.0, - "vcpus": 16.0, - "vpc_only": true - }, - "r4.8xlarge": { - "apiname": "r4.8xlarge", - "architecture": "64-bit", - "clock_speed_ghz": "Yes", - "computeunits": 99.0, - "ebs_iops": 37500.0, - "ebs_max_bandwidth": 7000.0, - "ebs_throughput": 875.0, - "ecu_per_vcpu": 3.09375, - "enhanced_networking": true, - "fpga": 0, - "gpus": 0, - "intel_avx": "Yes", - "intel_avx2": "Yes", - "intel_turbo": "Yes", - "ipv6_support": true, - "linux_virtualization": "HVM", - "max_ips": 240, - "memory": 244.0, - "name": "R4 High-Memory Eight Extra Large", - "network_perf": 13.0, - "physical_processor": "Intel Xeon E5-2686 v4", - "placement_group_support": true, - "storage": 0.0, - "vcpus": 32.0, - "vpc_only": true - }, - "r4.large": { - "apiname": "r4.large", - "architecture": "64-bit", - "clock_speed_ghz": "Yes", - "computeunits": 7.0, - "ebs_iops": 3000.0, - "ebs_max_bandwidth": 437.0, - "ebs_throughput": 54.0, - "ecu_per_vcpu": 3.5, - "enhanced_networking": true, - "fpga": 0, - "gpus": 0, - "intel_avx": "Yes", - "intel_avx2": "Yes", - "intel_turbo": "Yes", - "ipv6_support": true, - "linux_virtualization": "HVM", - "max_ips": 30, - "memory": 15.25, - "name": "R4 High-Memory Large", - "network_perf": 11.0, - "physical_processor": "Intel Xeon E5-2686 v4", - "placement_group_support": true, - "storage": 0.0, - "vcpus": 2.0, - "vpc_only": true - }, - "r4.xlarge": { - "apiname": "r4.xlarge", - "architecture": "64-bit", - "clock_speed_ghz": "Yes", - "computeunits": 13.5, - "ebs_iops": 6000.0, - "ebs_max_bandwidth": 875.0, - "ebs_throughput": 109.0, - "ecu_per_vcpu": 3.375, - "enhanced_networking": true, - "fpga": 0, - "gpus": 0, - "intel_avx": "Yes", - "intel_avx2": "Yes", - "intel_turbo": "Yes", - "ipv6_support": true, - "linux_virtualization": "HVM", - "max_ips": 60, - "memory": 30.5, - "name": "R4 High-Memory Extra Large", - "network_perf": 11.0, - "physical_processor": "Intel Xeon E5-2686 v4", - "placement_group_support": true, - "storage": 0.0, - "vcpus": 4.0, - "vpc_only": true - }, - "t1.micro": { - "apiname": "t1.micro", - "architecture": "32/64-bit", - "clock_speed_ghz": "", - "computeunits": 0.0, - "ebs_iops": 0.0, - "ebs_max_bandwidth": 0.0, - "ebs_throughput": 0.0, - "ecu_per_vcpu": 0.0, - "enhanced_networking": false, - "fpga": 0, - "gpus": 0, - "intel_avx": "", - "intel_avx2": "", - "intel_turbo": "", - "ipv6_support": false, - "linux_virtualization": "PV", - "max_ips": 4, - "memory": 0.613, - "name": "T1 Micro", - "network_perf": 0.0, - "physical_processor": "", - "placement_group_support": false, - "storage": 0.0, - "vcpus": 1.0, - "vpc_only": false - }, - "t2.2xlarge": { - "apiname": "t2.2xlarge", - "architecture": "64-bit", - "clock_speed_ghz": "Yes", - "computeunits": 1.35, - "ebs_iops": 0.0, - "ebs_max_bandwidth": 0.0, - "ebs_throughput": 0.0, - "ecu_per_vcpu": 0.0, - "enhanced_networking": false, - "fpga": 0, - "gpus": 0, - "intel_avx": "Yes", - "intel_avx2": "", - "intel_turbo": "Yes", - "ipv6_support": true, - "linux_virtualization": "HVM", - "max_ips": 45, - "memory": 32.0, - "name": "T2 Double Extra Large", - "network_perf": 6.0, - "physical_processor": "Intel Xeon family", - "placement_group_support": false, - "storage": 0.0, - "vcpus": 8.0, - "vpc_only": true - }, - "t2.large": { - "apiname": "t2.large", - "architecture": "64-bit", - "clock_speed_ghz": "Yes", - "computeunits": 0.6, - "ebs_iops": 0.0, - "ebs_max_bandwidth": 0.0, - "ebs_throughput": 0.0, - "ecu_per_vcpu": 0.0, - "enhanced_networking": false, - "fpga": 0, - "gpus": 0, - "intel_avx": "Yes", - "intel_avx2": "", - "intel_turbo": "Yes", - "ipv6_support": true, - "linux_virtualization": "HVM", - "max_ips": 36, - "memory": 8.0, - "name": "T2 Large", - "network_perf": 4.0, - "physical_processor": "Intel Xeon family", - "placement_group_support": false, - "storage": 0.0, - "vcpus": 2.0, - "vpc_only": true - }, - "t2.medium": { - "apiname": "t2.medium", - "architecture": "32/64-bit", - "clock_speed_ghz": "Yes", - "computeunits": 0.4, - "ebs_iops": 0.0, - "ebs_max_bandwidth": 0.0, - "ebs_throughput": 0.0, - "ecu_per_vcpu": 0.0, - "enhanced_networking": false, - "fpga": 0, - "gpus": 0, - "intel_avx": "Yes", - "intel_avx2": "", - "intel_turbo": "Yes", - "ipv6_support": true, - "linux_virtualization": "HVM", - "max_ips": 18, - "memory": 4.0, - "name": "T2 Medium", - "network_perf": 4.0, - "physical_processor": "Intel Xeon family", - "placement_group_support": false, - "storage": 0.0, - "vcpus": 2.0, - "vpc_only": true - }, - "t2.micro": { - "apiname": "t2.micro", - "architecture": "32/64-bit", - "clock_speed_ghz": "Yes", - "computeunits": 0.1, - "ebs_iops": 0.0, - "ebs_max_bandwidth": 0.0, - "ebs_throughput": 0.0, - "ecu_per_vcpu": 0.0, - "enhanced_networking": false, - "fpga": 0, - "gpus": 0, - "intel_avx": "Yes", - "intel_avx2": "", - "intel_turbo": "Yes", - "ipv6_support": true, - "linux_virtualization": "HVM", - "max_ips": 4, - "memory": 1.0, - "name": "T2 Micro", - "network_perf": 4.0, - "physical_processor": "Intel Xeon family", - "placement_group_support": false, - "storage": 0.0, - "vcpus": 1.0, - "vpc_only": true - }, - "t2.nano": { - "apiname": "t2.nano", - "architecture": "32/64-bit", - "clock_speed_ghz": "Yes", - "computeunits": 0.05, - "ebs_iops": 0.0, - "ebs_max_bandwidth": 0.0, - "ebs_throughput": 0.0, - "ecu_per_vcpu": 0.0, - "enhanced_networking": false, - "fpga": 0, - "gpus": 0, - "intel_avx": "Yes", - "intel_avx2": "", - "intel_turbo": "Yes", - "ipv6_support": true, - "linux_virtualization": "HVM", - "max_ips": 4, - "memory": 0.5, - "name": "T2 Nano", - "network_perf": 2.0, - "physical_processor": "Intel Xeon family", - "placement_group_support": false, - "storage": 0.0, - "vcpus": 1.0, - "vpc_only": true - }, - "t2.small": { - "apiname": "t2.small", - "architecture": "32/64-bit", - "clock_speed_ghz": "Yes", - "computeunits": 0.2, - "ebs_iops": 0.0, - "ebs_max_bandwidth": 0.0, - "ebs_throughput": 0.0, - "ecu_per_vcpu": 0.0, - "enhanced_networking": false, - "fpga": 0, - "gpus": 0, - "intel_avx": "Yes", - "intel_avx2": "", - "intel_turbo": "Yes", - "ipv6_support": true, - "linux_virtualization": "HVM", - "max_ips": 8, - "memory": 2.0, - "name": "T2 Small", - "network_perf": 4.0, - "physical_processor": "Intel Xeon family", - "placement_group_support": false, - "storage": 0.0, - "vcpus": 1.0, - "vpc_only": true - }, - "t2.xlarge": { - "apiname": "t2.xlarge", - "architecture": "64-bit", - "clock_speed_ghz": "Yes", - "computeunits": 0.9, - "ebs_iops": 0.0, - "ebs_max_bandwidth": 0.0, - "ebs_throughput": 0.0, - "ecu_per_vcpu": 0.0, - "enhanced_networking": false, - "fpga": 0, - "gpus": 0, - "intel_avx": "Yes", - "intel_avx2": "", - "intel_turbo": "Yes", - "ipv6_support": true, - "linux_virtualization": "HVM", - "max_ips": 45, - "memory": 16.0, - "name": "T2 Extra Large", - "network_perf": 6.0, - "physical_processor": "Intel Xeon family", - "placement_group_support": false, - "storage": 0.0, - "vcpus": 4.0, - "vpc_only": true - }, - "x1.16xlarge": { - "apiname": "x1.16xlarge", - "architecture": "64-bit", - "clock_speed_ghz": "Yes", - "computeunits": 174.5, - "ebs_iops": 40000.0, - "ebs_max_bandwidth": 7000.0, - "ebs_throughput": 875.0, - "ecu_per_vcpu": 2.7265625, - "enhanced_networking": true, - "fpga": 0, - "gpus": 0, - "intel_avx": "Yes", - "intel_avx2": "Yes", - "intel_turbo": "Yes", - "ipv6_support": true, - "linux_virtualization": "HVM", - "max_ips": 240, - "memory": 976.0, - "name": "X1 Extra High-Memory 16xlarge", - "network_perf": 13.0, - "physical_processor": "Intel Xeon E7-8880 v3", - "placement_group_support": true, - "storage": 1920.0, - "vcpus": 64.0, - "vpc_only": true - }, - "x1.32xlarge": { - "apiname": "x1.32xlarge", - "architecture": "64-bit", - "clock_speed_ghz": "Yes", - "computeunits": 349.0, - "ebs_iops": 80000.0, - "ebs_max_bandwidth": 14000.0, - "ebs_throughput": 1750.0, - "ecu_per_vcpu": 2.7265625, - "enhanced_networking": true, - "fpga": 0, - "gpus": 0, - "intel_avx": "Yes", - "intel_avx2": "Yes", - "intel_turbo": "Yes", - "ipv6_support": true, - "linux_virtualization": "HVM", - "max_ips": 240, - "memory": 1952.0, - "name": "X1 Extra High-Memory 32xlarge", - "network_perf": 17.0, - "physical_processor": "Intel Xeon E7-8880 v3", - "placement_group_support": true, - "storage": 3840.0, - "vcpus": 128.0, - "vpc_only": true - }, - "x1e.32xlarge": { - "apiname": "x1e.32xlarge", - "architecture": "64-bit", - "clock_speed_ghz": "Yes", - "computeunits": 340.0, - "ebs_iops": 80000.0, - "ebs_max_bandwidth": 14000.0, - "ebs_throughput": 1750.0, - "ecu_per_vcpu": 2.65625, - "enhanced_networking": true, - "fpga": 0, - "gpus": 0, - "intel_avx": "Yes", - "intel_avx2": "Yes", - "intel_turbo": "Yes", - "ipv6_support": false, - "linux_virtualization": "Unknown", - "max_ips": 240, - "memory": 3904.0, - "name": "X1E 32xlarge", - "network_perf": 17.0, - "physical_processor": "Intel Xeon E7-8880 v3", - "placement_group_support": false, - "storage": 3840.0, - "vcpus": 128.0, - "vpc_only": true - } -}""" diff --git a/scripts/get_instance_info.py b/scripts/get_instance_info.py index bb8d28eb3..f883c0cae 100755 --- a/scripts/get_instance_info.py +++ b/scripts/get_instance_info.py @@ -141,20 +141,10 @@ def main(): result[instance_id] = instance_data root_dir = subprocess.check_output(['git', 'rev-parse', '--show-toplevel']).decode().strip() - dest = os.path.join(root_dir, 'moto/ec2/resources/instance_types.py') + dest = os.path.join(root_dir, 'moto/ec2/resources/instance_types.json') print("Writing data to {0}".format(dest)) with open(dest, 'w') as open_file: - triple_quote = '\"\"\"' - - open_file.write("# Imported via `scripts/get_instance_info.py`\n") - open_file.write('instance_types_data = {}\n'.format(triple_quote)) - json.dump(result, - open_file, - sort_keys=True, - indent=4, - separators=(',', ': ')) - open_file.write('{}\n'.format(triple_quote)) - + json.dump(result, open_file) if __name__ == '__main__': main() From 9c5d05dd6112be6505f98ff4d2099df31703f39a Mon Sep 17 00:00:00 2001 From: Jack Danger Date: Wed, 27 Sep 2017 14:01:34 -0700 Subject: [PATCH 320/412] bumping to version 1.1.18 --- setup.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/setup.py b/setup.py index 1093af638..9ae558246 100755 --- a/setup.py +++ b/setup.py @@ -25,7 +25,7 @@ extras_require = { setup( name='moto', - version='1.1.17', + version='1.1.18', description='A library that allows your python tests to easily' ' mock out the boto library', author='Steve Pulec', From 617f4b0ad128581ec0b4daee036d2d1d23ce7fa4 Mon Sep 17 00:00:00 2001 From: Jack Danger Date: Wed, 27 Sep 2017 14:07:18 -0700 Subject: [PATCH 321/412] dockerfile with cryptography package --- Dockerfile | 11 +++++++++-- 1 file changed, 9 insertions(+), 2 deletions(-) diff --git a/Dockerfile b/Dockerfile index 3c18fb106..24d7c34ff 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,11 +1,18 @@ FROM alpine:3.6 +RUN apk add --no-cache --update \ + gcc \ + musl-dev \ + python3-dev \ + libffi-dev \ + openssl-dev \ + python3 + ADD . /moto/ ENV PYTHONUNBUFFERED 1 WORKDIR /moto/ -RUN apk add --no-cache python3 && \ - python3 -m ensurepip && \ +RUN python3 -m ensurepip && \ rm -r /usr/lib/python*/ensurepip && \ pip3 --no-cache-dir install --upgrade pip setuptools && \ pip3 --no-cache-dir install ".[server]" From a8b64022bb8ce0496951981f03b6b3f25e2a9f96 Mon Sep 17 00:00:00 2001 From: Kevin Frommelt Date: Wed, 27 Sep 2017 16:33:52 -0500 Subject: [PATCH 322/412] Install addition package files --- setup.py | 1 + 1 file changed, 1 insertion(+) diff --git a/setup.py b/setup.py index f5344a0c0..adca181c4 100755 --- a/setup.py +++ b/setup.py @@ -39,6 +39,7 @@ setup( packages=find_packages(exclude=("tests", "tests.*")), install_requires=install_requires, extras_require=extras_require, + include_package_data=True, license="Apache", test_suite="tests", classifiers=[ From ca9db672b46fd5b050cba65b737383e043d18639 Mon Sep 17 00:00:00 2001 From: Jack Danger Date: Wed, 27 Sep 2017 14:43:39 -0700 Subject: [PATCH 323/412] bumping to version 1.1.19 --- setup.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/setup.py b/setup.py index 9ae558246..933b8cf83 100755 --- a/setup.py +++ b/setup.py @@ -25,7 +25,7 @@ extras_require = { setup( name='moto', - version='1.1.18', + version='1.1.19', description='A library that allows your python tests to easily' ' mock out the boto library', author='Steve Pulec', From 9008b852995146b85296174108fc94c5f727bcb5 Mon Sep 17 00:00:00 2001 From: Alexander Mohr Date: Wed, 27 Sep 2017 16:04:58 -0700 Subject: [PATCH 324/412] lambda + SNS enhancements (#1048) * updates - support lambda messages from SNS - run lambda in docker container * decode output * populate timeout * simplify * whoops * skeletons of cloudwatchlogs * impl filter log streams * fix logging * PEP fixes * PEP fixes * fix reset * fix reset * add new endpoint * fix region name * add docker * try to fix tests * try to fix travis issue with boto * fix escaping in urls * fix environment variables * fix PEP * more pep * switch back to precise * another fix attempt * fix typo * fix lambda invoke * fix more unittests * work on getting this to work in new scheme * fix py2 * fix error * fix tests when running in server mode * more lambda fixes * try running with latest docker adapted from aiodocker * switch to docker python client * pep fixes * switch to docker volume * fix unittest * fix invoke from sns * fix zip2tar * add hack impl for get_function with zip * try fix * fix for py < 3.6 * add volume refcount * try to fix travis * docker test * fix yaml * try fix * update endpoints * fix * another attempt * try again * fix recursive import * refactor fix * revert changes with better fix * more reverts * wait for service to come up * add back detached mode * sleep and add another exception type * put this back for logging * put back with note * whoops :) * docker in docker! * fix invalid url * hopefully last fix! * fix lambda regions * fix protocol * travis!!!! * just run lambda test for now * use one print * fix escaping * another attempt * yet another * re-enable all tests * fixes * fix for py2 * revert change * fix for py2.7 * fix output ordering * remove this given there's a new unittest that covers it * changes based on review - add skeleton logs test file - switch to docker image that matches test env - fix mock_logs import * add readme entry --- .travis.yml | 25 +- README.md | 2 + moto/__init__.py | 1 + moto/awslambda/models.py | 362 ++++++++++++++++++++++------ moto/awslambda/urls.py | 8 +- moto/backends.py | 2 + moto/ec2/models.py | 5 +- moto/ecs/responses.py | 4 +- moto/logs/__init__.py | 5 + moto/logs/models.py | 228 ++++++++++++++++++ moto/logs/responses.py | 114 +++++++++ moto/logs/urls.py | 9 + moto/s3/urls.py | 2 +- moto/server.py | 13 +- moto/sns/models.py | 7 + requirements-dev.txt | 1 + setup.py | 15 ++ tests/test_awslambda/test_lambda.py | 150 +++++++----- tests/test_logs/test_logs.py | 14 ++ travis_moto_server.sh | 5 + wait_for.py | 31 +++ 21 files changed, 836 insertions(+), 167 deletions(-) create mode 100644 moto/logs/__init__.py create mode 100644 moto/logs/models.py create mode 100644 moto/logs/responses.py create mode 100644 moto/logs/urls.py create mode 100644 tests/test_logs/test_logs.py create mode 100755 travis_moto_server.sh create mode 100755 wait_for.py diff --git a/.travis.yml b/.travis.yml index fccbdde27..f1b7ac40d 100644 --- a/.travis.yml +++ b/.travis.yml @@ -1,23 +1,36 @@ language: python sudo: false +services: + - docker python: - 2.7 - 3.6 env: - TEST_SERVER_MODE=false - TEST_SERVER_MODE=true +before_install: + - export BOTO_CONFIG=/dev/null install: - - travis_retry pip install boto==2.45.0 - - travis_retry pip install boto3 - - travis_retry pip install . - - travis_retry pip install -r requirements-dev.txt - - travis_retry pip install coveralls==1.1 + # We build moto first so the docker container doesn't try to compile it as well, also note we don't use + # -d for docker run so the logs show up in travis + # Python images come from here: https://hub.docker.com/_/python/ - | + python setup.py sdist + if [ "$TEST_SERVER_MODE" = "true" ]; then - AWS_SECRET_ACCESS_KEY=server_secret AWS_ACCESS_KEY_ID=server_key moto_server -p 5000& + docker run --rm -t --name motoserver -e TEST_SERVER_MODE=true -e AWS_SECRET_ACCESS_KEY=server_secret -e AWS_ACCESS_KEY_ID=server_key -v `pwd`:/moto -p 5000:5000 -v /var/run/docker.sock:/var/run/docker.sock python:${TRAVIS_PYTHON_VERSION}-stretch /moto/travis_moto_server.sh & export AWS_SECRET_ACCESS_KEY=foobar_secret export AWS_ACCESS_KEY_ID=foobar_key fi + travis_retry pip install boto==2.45.0 + travis_retry pip install boto3 + travis_retry pip install dist/moto*.gz + travis_retry pip install coveralls==1.1 + travis_retry pip install -r requirements-dev.txt + + if [ "$TEST_SERVER_MODE" = "true" ]; then + python wait_for.py + fi script: - make test after_success: diff --git a/README.md b/README.md index 3d8b61258..92ad5d9c0 100644 --- a/README.md +++ b/README.md @@ -96,6 +96,8 @@ It gets even better! Moto isn't just for Python code and it isn't just for S3. L |------------------------------------------------------------------------------| | Lambda | @mock_lambda | basic endpoints done | |------------------------------------------------------------------------------| +| Logs | @mock_logs | basic endpoints done | +|------------------------------------------------------------------------------| | Kinesis | @mock_kinesis | core endpoints done | |------------------------------------------------------------------------------| | KMS | @mock_kms | basic endpoints done | diff --git a/moto/__init__.py b/moto/__init__.py index b408f6678..64baa52ac 100644 --- a/moto/__init__.py +++ b/moto/__init__.py @@ -39,6 +39,7 @@ from .ssm import mock_ssm # flake8: noqa from .route53 import mock_route53, mock_route53_deprecated # flake8: noqa from .swf import mock_swf, mock_swf_deprecated # flake8: noqa from .xray import mock_xray # flake8: noqa +from .logs import mock_logs, mock_logs_deprecated # flake8: noqa try: diff --git a/moto/awslambda/models.py b/moto/awslambda/models.py index 1c489f3fd..d22d1a7f4 100644 --- a/moto/awslambda/models.py +++ b/moto/awslambda/models.py @@ -1,34 +1,150 @@ from __future__ import unicode_literals import base64 +from collections import defaultdict import datetime +import docker.errors import hashlib import io +import logging import os import json -import sys +import re import zipfile - -try: - from StringIO import StringIO -except: - from io import StringIO +import uuid +import functools +import tarfile +import calendar +import threading +import traceback +import requests.adapters import boto.awslambda from moto.core import BaseBackend, BaseModel +from moto.core.utils import unix_time_millis from moto.s3.models import s3_backend +from moto.logs.models import logs_backends from moto.s3.exceptions import MissingBucket, MissingKey +from moto import settings + +logger = logging.getLogger(__name__) + + +try: + from tempfile import TemporaryDirectory +except ImportError: + from backports.tempfile import TemporaryDirectory + + +_stderr_regex = re.compile(r'START|END|REPORT RequestId: .*') +_orig_adapter_send = requests.adapters.HTTPAdapter.send + + +def zip2tar(zip_bytes): + with TemporaryDirectory() as td: + tarname = os.path.join(td, 'data.tar') + timeshift = int((datetime.datetime.now() - + datetime.datetime.utcnow()).total_seconds()) + with zipfile.ZipFile(io.BytesIO(zip_bytes), 'r') as zipf, \ + tarfile.TarFile(tarname, 'w') as tarf: + for zipinfo in zipf.infolist(): + if zipinfo.filename[-1] == '/': # is_dir() is py3.6+ + continue + + tarinfo = tarfile.TarInfo(name=zipinfo.filename) + tarinfo.size = zipinfo.file_size + tarinfo.mtime = calendar.timegm(zipinfo.date_time) - timeshift + infile = zipf.open(zipinfo.filename) + tarf.addfile(tarinfo, infile) + + with open(tarname, 'rb') as f: + tar_data = f.read() + return tar_data + + +class _VolumeRefCount: + __slots__ = "refcount", "volume" + + def __init__(self, refcount, volume): + self.refcount = refcount + self.volume = volume + + +class _DockerDataVolumeContext: + _data_vol_map = defaultdict(lambda: _VolumeRefCount(0, None)) # {sha256: _VolumeRefCount} + _lock = threading.Lock() + + def __init__(self, lambda_func): + self._lambda_func = lambda_func + self._vol_ref = None + + @property + def name(self): + return self._vol_ref.volume.name + + def __enter__(self): + # See if volume is already known + with self.__class__._lock: + self._vol_ref = self.__class__._data_vol_map[self._lambda_func.code_sha_256] + self._vol_ref.refcount += 1 + if self._vol_ref.refcount > 1: + return self + + # See if the volume already exists + for vol in self._lambda_func.docker_client.volumes.list(): + if vol.name == self._lambda_func.code_sha_256: + self._vol_ref.volume = vol + return self + + # It doesn't exist so we need to create it + self._vol_ref.volume = self._lambda_func.docker_client.volumes.create(self._lambda_func.code_sha_256) + container = self._lambda_func.docker_client.containers.run('alpine', 'sleep 100', volumes={self.name: '/tmp/data'}, detach=True) + try: + tar_bytes = zip2tar(self._lambda_func.code_bytes) + container.put_archive('/tmp/data', tar_bytes) + finally: + container.remove(force=True) + + return self + + def __exit__(self, exc_type, exc_val, exc_tb): + with self.__class__._lock: + self._vol_ref.refcount -= 1 + if self._vol_ref.refcount == 0: + try: + self._vol_ref.volume.remove() + except docker.errors.APIError as e: + if e.status_code != 409: + raise + + raise # multiple processes trying to use same volume? class LambdaFunction(BaseModel): - - def __init__(self, spec, validate_s3=True): + def __init__(self, spec, region, validate_s3=True): # required + self.region = region self.code = spec['Code'] self.function_name = spec['FunctionName'] self.handler = spec['Handler'] self.role = spec['Role'] self.run_time = spec['Runtime'] + self.logs_backend = logs_backends[self.region] + self.environment_vars = spec.get('Environment', {}).get('Variables', {}) + self.docker_client = docker.from_env() + + # Unfortunately mocking replaces this method w/o fallback enabled, so we + # need to replace it if we detect it's been mocked + if requests.adapters.HTTPAdapter.send != _orig_adapter_send: + _orig_get_adapter = self.docker_client.api.get_adapter + + def replace_adapter_send(*args, **kwargs): + adapter = _orig_get_adapter(*args, **kwargs) + + if isinstance(adapter, requests.adapters.HTTPAdapter): + adapter.send = functools.partial(_orig_adapter_send, adapter) + return adapter + self.docker_client.api.get_adapter = replace_adapter_send # optional self.description = spec.get('Description', '') @@ -36,13 +152,18 @@ class LambdaFunction(BaseModel): self.publish = spec.get('Publish', False) # this is ignored currently self.timeout = spec.get('Timeout', 3) + self.logs_group_name = '/aws/lambda/{}'.format(self.function_name) + self.logs_backend.ensure_log_group(self.logs_group_name, []) + # this isn't finished yet. it needs to find out the VpcId value self._vpc_config = spec.get( 'VpcConfig', {'SubnetIds': [], 'SecurityGroupIds': []}) # auto-generated self.version = '$LATEST' - self.last_modified = datetime.datetime.utcnow().strftime('%Y-%m-%d %H:%M:%S') + self.last_modified = datetime.datetime.utcnow().strftime( + '%Y-%m-%d %H:%M:%S') + if 'ZipFile' in self.code: # more hackery to handle unicode/bytes/str in python3 and python2 - # argh! @@ -52,12 +173,13 @@ class LambdaFunction(BaseModel): except Exception: to_unzip_code = base64.b64decode(self.code['ZipFile']) - zbuffer = io.BytesIO() - zbuffer.write(to_unzip_code) - zip_file = zipfile.ZipFile(zbuffer, 'r', zipfile.ZIP_DEFLATED) - self.code = zip_file.read("".join(zip_file.namelist())) + self.code_bytes = to_unzip_code self.code_size = len(to_unzip_code) self.code_sha_256 = hashlib.sha256(to_unzip_code).hexdigest() + + # TODO: we should be putting this in a lambda bucket + self.code['UUID'] = str(uuid.uuid4()) + self.code['S3Key'] = '{}-{}'.format(self.function_name, self.code['UUID']) else: # validate s3 bucket and key key = None @@ -76,10 +198,12 @@ class LambdaFunction(BaseModel): "InvalidParameterValueException", "Error occurred while GetObject. S3 Error Code: NoSuchKey. S3 Error Message: The specified key does not exist.") if key: + self.code_bytes = key.value self.code_size = key.size self.code_sha_256 = hashlib.sha256(key.value).hexdigest() - self.function_arn = 'arn:aws:lambda:123456789012:function:{0}'.format( - self.function_name) + + self.function_arn = 'arn:aws:lambda:{}:123456789012:function:{}'.format( + self.region, self.function_name) self.tags = dict() @@ -94,7 +218,7 @@ class LambdaFunction(BaseModel): return json.dumps(self.get_configuration()) def get_configuration(self): - return { + config = { "CodeSha256": self.code_sha_256, "CodeSize": self.code_size, "Description": self.description, @@ -110,70 +234,105 @@ class LambdaFunction(BaseModel): "VpcConfig": self.vpc_config, } - def get_code(self): - if isinstance(self.code, dict): - return { - "Code": { - "Location": "s3://lambda-functions.aws.amazon.com/{0}".format(self.code['S3Key']), - "RepositoryType": "S3" - }, - "Configuration": self.get_configuration(), - } - else: - return { - "Configuration": self.get_configuration(), + if self.environment_vars: + config['Environment'] = { + 'Variables': self.environment_vars } - def convert(self, s): + return config + + def get_code(self): + return { + "Code": { + "Location": "s3://awslambda-{0}-tasks.s3-{0}.amazonaws.com/{1}".format(self.region, self.code['S3Key']), + "RepositoryType": "S3" + }, + "Configuration": self.get_configuration(), + } + + @staticmethod + def convert(s): try: return str(s, encoding='utf-8') except: return s - def is_json(self, test_str): + @staticmethod + def is_json(test_str): try: response = json.loads(test_str) except: response = test_str return response - def _invoke_lambda(self, code, event={}, context={}): - # TO DO: context not yet implemented - try: - mycode = "\n".join(['import json', - self.convert(self.code), - self.convert('print(json.dumps(lambda_handler(%s, %s)))' % (self.is_json(self.convert(event)), context))]) + def _invoke_lambda(self, code, event=None, context=None): + # TODO: context not yet implemented + if event is None: + event = dict() + if context is None: + context = {} - except Exception as ex: - print("Exception %s", ex) - - errored = False try: - original_stdout = sys.stdout - original_stderr = sys.stderr - codeOut = StringIO() - codeErr = StringIO() - sys.stdout = codeOut - sys.stderr = codeErr - exec(mycode) - exec_err = codeErr.getvalue() - exec_out = codeOut.getvalue() - result = self.convert(exec_out.strip()) - if exec_err: - result = "\n".join([exec_out.strip(), self.convert(exec_err)]) - except Exception as ex: - errored = True - result = '%s\n\n\nException %s' % (mycode, ex) - finally: - codeErr.close() - codeOut.close() - sys.stdout = original_stdout - sys.stderr = original_stderr - return self.convert(result), errored + # TODO: I believe we can keep the container running and feed events as needed + # also need to hook it up to the other services so it can make kws/s3 etc calls + # Should get invoke_id /RequestId from invovation + env_vars = { + "AWS_LAMBDA_FUNCTION_TIMEOUT": self.timeout, + "AWS_LAMBDA_FUNCTION_NAME": self.function_name, + "AWS_LAMBDA_FUNCTION_MEMORY_SIZE": self.memory_size, + "AWS_LAMBDA_FUNCTION_VERSION": self.version, + "AWS_REGION": self.region, + } + + env_vars.update(self.environment_vars) + + container = output = exit_code = None + with _DockerDataVolumeContext(self) as data_vol: + try: + run_kwargs = dict(links={'motoserver': 'motoserver'}) if settings.TEST_SERVER_MODE else {} + container = self.docker_client.containers.run( + "lambci/lambda:{}".format(self.run_time), + [self.handler, json.dumps(event)], remove=False, + mem_limit="{}m".format(self.memory_size), + volumes=["{}:/var/task".format(data_vol.name)], environment=env_vars, detach=True, **run_kwargs) + finally: + if container: + exit_code = container.wait() + output = container.logs(stdout=False, stderr=True) + output += container.logs(stdout=True, stderr=False) + container.remove() + + output = output.decode('utf-8') + + # Send output to "logs" backend + invoke_id = uuid.uuid4().hex + log_stream_name = "{date.year}/{date.month:02d}/{date.day:02d}/[{version}]{invoke_id}".format( + date=datetime.datetime.utcnow(), version=self.version, invoke_id=invoke_id + ) + + self.logs_backend.create_log_stream(self.logs_group_name, log_stream_name) + + log_events = [{'timestamp': unix_time_millis(), "message": line} + for line in output.splitlines()] + self.logs_backend.put_log_events(self.logs_group_name, log_stream_name, log_events, None) + + if exit_code != 0: + raise Exception( + 'lambda invoke failed output: {}'.format(output)) + + # strip out RequestId lines + output = os.linesep.join([line for line in self.convert(output).splitlines() if not _stderr_regex.match(line)]) + return output, False + except BaseException as e: + traceback.print_exc() + return "error running lambda: {}".format(e), True def invoke(self, body, request_headers, response_headers): payload = dict() + if body: + body = json.loads(body) + # Get the invocation type: res, errored = self._invoke_lambda(code=self.code, event=body) if request_headers.get("x-amz-invocation-type") == "RequestResponse": @@ -189,7 +348,8 @@ class LambdaFunction(BaseModel): return result @classmethod - def create_from_cloudformation_json(cls, resource_name, cloudformation_json, region_name): + def create_from_cloudformation_json(cls, resource_name, cloudformation_json, + region_name): properties = cloudformation_json['Properties'] # required @@ -212,17 +372,19 @@ class LambdaFunction(BaseModel): # this snippet converts this plaintext code to a proper base64-encoded ZIP file. if 'ZipFile' in properties['Code']: spec['Code']['ZipFile'] = base64.b64encode( - cls._create_zipfile_from_plaintext_code(spec['Code']['ZipFile'])) + cls._create_zipfile_from_plaintext_code( + spec['Code']['ZipFile'])) backend = lambda_backends[region_name] fn = backend.create_function(spec) return fn def get_cfn_attribute(self, attribute_name): - from moto.cloudformation.exceptions import UnformattedGetAttTemplateException + from moto.cloudformation.exceptions import \ + UnformattedGetAttTemplateException if attribute_name == 'Arn': - region = 'us-east-1' - return 'arn:aws:lambda:{0}:123456789012:function:{1}'.format(region, self.function_name) + return 'arn:aws:lambda:{0}:123456789012:function:{1}'.format( + self.region, self.function_name) raise UnformattedGetAttTemplateException() @staticmethod @@ -236,7 +398,6 @@ class LambdaFunction(BaseModel): class EventSourceMapping(BaseModel): - def __init__(self, spec): # required self.function_name = spec['FunctionName'] @@ -246,10 +407,12 @@ class EventSourceMapping(BaseModel): # optional self.batch_size = spec.get('BatchSize', 100) self.enabled = spec.get('Enabled', True) - self.starting_position_timestamp = spec.get('StartingPositionTimestamp', None) + self.starting_position_timestamp = spec.get('StartingPositionTimestamp', + None) @classmethod - def create_from_cloudformation_json(cls, resource_name, cloudformation_json, region_name): + def create_from_cloudformation_json(cls, resource_name, cloudformation_json, + region_name): properties = cloudformation_json['Properties'] spec = { 'FunctionName': properties['FunctionName'], @@ -264,12 +427,12 @@ class EventSourceMapping(BaseModel): class LambdaVersion(BaseModel): - def __init__(self, spec): self.version = spec['Version'] @classmethod - def create_from_cloudformation_json(cls, resource_name, cloudformation_json, region_name): + def create_from_cloudformation_json(cls, resource_name, cloudformation_json, + region_name): properties = cloudformation_json['Properties'] spec = { 'Version': properties.get('Version') @@ -278,9 +441,14 @@ class LambdaVersion(BaseModel): class LambdaBackend(BaseBackend): - - def __init__(self): + def __init__(self, region_name): self._functions = {} + self.region_name = region_name + + def reset(self): + region_name = self.region_name + self.__dict__ = {} + self.__init__(region_name) def has_function(self, function_name): return function_name in self._functions @@ -289,7 +457,7 @@ class LambdaBackend(BaseBackend): return self.get_function_by_arn(function_arn) is not None def create_function(self, spec): - fn = LambdaFunction(spec) + fn = LambdaFunction(spec, self.region_name) self._functions[fn.function_name] = fn return fn @@ -308,6 +476,42 @@ class LambdaBackend(BaseBackend): def list_functions(self): return self._functions.values() + def send_message(self, function_name, message): + event = { + "Records": [ + { + "EventVersion": "1.0", + "EventSubscriptionArn": "arn:aws:sns:EXAMPLE", + "EventSource": "aws:sns", + "Sns": { + "SignatureVersion": "1", + "Timestamp": "1970-01-01T00:00:00.000Z", + "Signature": "EXAMPLE", + "SigningCertUrl": "EXAMPLE", + "MessageId": "95df01b4-ee98-5cb9-9903-4c221d41eb5e", + "Message": message, + "MessageAttributes": { + "Test": { + "Type": "String", + "Value": "TestString" + }, + "TestBinary": { + "Type": "Binary", + "Value": "TestBinary" + } + }, + "Type": "Notification", + "UnsubscribeUrl": "EXAMPLE", + "TopicArn": "arn:aws:sns:EXAMPLE", + "Subject": "TestInvoke" + } + } + ] + + } + self._functions[function_name].invoke(json.dumps(event), {}, {}) + pass + def list_tags(self, resource): return self.get_function_by_arn(resource).tags @@ -328,10 +532,8 @@ def do_validate_s3(): return os.environ.get('VALIDATE_LAMBDA_S3', '') in ['', '1', 'true'] -lambda_backends = {} -for region in boto.awslambda.regions(): - lambda_backends[region.name] = LambdaBackend() - # Handle us forgotten regions, unless Lambda truly only runs out of US and -for region in ['ap-southeast-2']: - lambda_backends[region] = LambdaBackend() +lambda_backends = {_region.name: LambdaBackend(_region.name) + for _region in boto.awslambda.regions()} + +lambda_backends['ap-southeast-2'] = LambdaBackend('ap-southeast-2') diff --git a/moto/awslambda/urls.py b/moto/awslambda/urls.py index 1b6c2e934..0fec24bab 100644 --- a/moto/awslambda/urls.py +++ b/moto/awslambda/urls.py @@ -9,8 +9,8 @@ response = LambdaResponse() url_paths = { '{0}/(?P[^/]+)/functions/?$': response.root, - '{0}/(?P[^/]+)/functions/(?P[\w_-]+)/?$': response.function, - '{0}/(?P[^/]+)/functions/(?P[\w_-]+)/invocations/?$': response.invoke, - '{0}/(?P[^/]+)/functions/(?P[\w_-]+)/invoke-async/?$': response.invoke_async, - '{0}/(?P[^/]+)/tags/(?P.+)': response.tag + r'{0}/(?P[^/]+)/functions/(?P[\w_-]+)/?$': response.function, + r'{0}/(?P[^/]+)/functions/(?P[\w_-]+)/invocations/?$': response.invoke, + r'{0}/(?P[^/]+)/functions/(?P[\w_-]+)/invoke-async/?$': response.invoke_async, + r'{0}/(?P[^/]+)/tags/(?P.+)': response.tag } diff --git a/moto/backends.py b/moto/backends.py index 26a60002e..24a8b6c2b 100644 --- a/moto/backends.py +++ b/moto/backends.py @@ -22,6 +22,7 @@ from moto.iam import iam_backends from moto.instance_metadata import instance_metadata_backends from moto.kinesis import kinesis_backends from moto.kms import kms_backends +from moto.logs import logs_backends from moto.opsworks import opsworks_backends from moto.polly import polly_backends from moto.rds2 import rds2_backends @@ -55,6 +56,7 @@ BACKENDS = { 'iam': iam_backends, 'moto_api': moto_api_backends, 'instance_metadata': instance_metadata_backends, + 'logs': logs_backends, 'kinesis': kinesis_backends, 'kms': kms_backends, 'opsworks': opsworks_backends, diff --git a/moto/ec2/models.py b/moto/ec2/models.py index 07e218106..10fec7fd7 100755 --- a/moto/ec2/models.py +++ b/moto/ec2/models.py @@ -3667,6 +3667,5 @@ class EC2Backend(BaseBackend, InstanceBackend, TagBackend, AmiBackend, return True -ec2_backends = {} -for region in RegionsAndZonesBackend.regions: - ec2_backends[region.name] = EC2Backend(region.name) +ec2_backends = {region.name: EC2Backend(region.name) + for region in RegionsAndZonesBackend.regions} diff --git a/moto/ecs/responses.py b/moto/ecs/responses.py index 50d9e3cd4..8f6fe850f 100644 --- a/moto/ecs/responses.py +++ b/moto/ecs/responses.py @@ -18,8 +18,8 @@ class EC2ContainerServiceResponse(BaseResponse): except ValueError: return {} - def _get_param(self, param): - return self.request_params.get(param, None) + def _get_param(self, param, if_none=None): + return self.request_params.get(param, if_none) def create_cluster(self): cluster_name = self._get_param('clusterName') diff --git a/moto/logs/__init__.py b/moto/logs/__init__.py new file mode 100644 index 000000000..f325243fc --- /dev/null +++ b/moto/logs/__init__.py @@ -0,0 +1,5 @@ +from .models import logs_backends +from ..core.models import base_decorator, deprecated_base_decorator + +mock_logs = base_decorator(logs_backends) +mock_logs_deprecated = deprecated_base_decorator(logs_backends) diff --git a/moto/logs/models.py b/moto/logs/models.py new file mode 100644 index 000000000..14f511932 --- /dev/null +++ b/moto/logs/models.py @@ -0,0 +1,228 @@ +from moto.core import BaseBackend +import boto.logs +from moto.core.utils import unix_time_millis + + +class LogEvent: + _event_id = 0 + + def __init__(self, ingestion_time, log_event): + self.ingestionTime = ingestion_time + self.timestamp = log_event["timestamp"] + self.message = log_event['message'] + self.eventId = self.__class__._event_id + self.__class__._event_id += 1 + + def to_filter_dict(self): + return { + "eventId": self.eventId, + "ingestionTime": self.ingestionTime, + # "logStreamName": + "message": self.message, + "timestamp": self.timestamp + } + + +class LogStream: + _log_ids = 0 + + def __init__(self, region, log_group, name): + self.region = region + self.arn = "arn:aws:logs:{region}:{id}:log-group:{log_group}:log-stream:{log_stream}".format( + region=region, id=self.__class__._log_ids, log_group=log_group, log_stream=name) + self.creationTime = unix_time_millis() + self.firstEventTimestamp = None + self.lastEventTimestamp = None + self.lastIngestionTime = None + self.logStreamName = name + self.storedBytes = 0 + self.uploadSequenceToken = 0 # I'm guessing this is token needed for sequenceToken by put_events + self.events = [] + + self.__class__._log_ids += 1 + + def to_describe_dict(self): + return { + "arn": self.arn, + "creationTime": self.creationTime, + "firstEventTimestamp": self.firstEventTimestamp, + "lastEventTimestamp": self.lastEventTimestamp, + "lastIngestionTime": self.lastIngestionTime, + "logStreamName": self.logStreamName, + "storedBytes": self.storedBytes, + "uploadSequenceToken": str(self.uploadSequenceToken), + } + + def put_log_events(self, log_group_name, log_stream_name, log_events, sequence_token): + # TODO: ensure sequence_token + # TODO: to be thread safe this would need a lock + self.lastIngestionTime = unix_time_millis() + # TODO: make this match AWS if possible + self.storedBytes += sum([len(log_event["message"]) for log_event in log_events]) + self.events += [LogEvent(self.lastIngestionTime, log_event) for log_event in log_events] + self.uploadSequenceToken += 1 + + return self.uploadSequenceToken + + def get_log_events(self, log_group_name, log_stream_name, start_time, end_time, limit, next_token, start_from_head): + def filter_func(event): + if start_time and event.timestamp < start_time: + return False + + if end_time and event.timestamp > end_time: + return False + + return True + + events = sorted(filter(filter_func, self.events), key=lambda event: event.timestamp, reverse=start_from_head) + back_token = next_token + if next_token is None: + next_token = 0 + + events_page = events[next_token: next_token + limit] + next_token += limit + if next_token >= len(self.events): + next_token = None + + return events_page, back_token, next_token + + def filter_log_events(self, log_group_name, log_stream_names, start_time, end_time, limit, next_token, filter_pattern, interleaved): + def filter_func(event): + if start_time and event.timestamp < start_time: + return False + + if end_time and event.timestamp > end_time: + return False + + return True + + events = [] + for event in sorted(filter(filter_func, self.events), key=lambda x: x.timestamp): + event_obj = event.to_filter_dict() + event_obj['logStreamName'] = self.logStreamName + events.append(event_obj) + return events + + +class LogGroup: + def __init__(self, region, name, tags): + self.name = name + self.region = region + self.tags = tags + self.streams = dict() # {name: LogStream} + + def create_log_stream(self, log_stream_name): + assert log_stream_name not in self.streams + self.streams[log_stream_name] = LogStream(self.region, self.name, log_stream_name) + + def delete_log_stream(self, log_stream_name): + assert log_stream_name in self.streams + del self.streams[log_stream_name] + + def describe_log_streams(self, descending, limit, log_group_name, log_stream_name_prefix, next_token, order_by): + log_streams = [stream.to_describe_dict() for name, stream in self.streams.items() if name.startswith(log_stream_name_prefix)] + + def sorter(stream): + return stream.name if order_by == 'logStreamName' else stream.lastEventTimestamp + + if next_token is None: + next_token = 0 + + log_streams = sorted(log_streams, key=sorter, reverse=descending) + new_token = next_token + limit + log_streams_page = log_streams[next_token: new_token] + if new_token >= len(log_streams): + new_token = None + + return log_streams_page, new_token + + def put_log_events(self, log_group_name, log_stream_name, log_events, sequence_token): + assert log_stream_name in self.streams + stream = self.streams[log_stream_name] + return stream.put_log_events(log_group_name, log_stream_name, log_events, sequence_token) + + def get_log_events(self, log_group_name, log_stream_name, start_time, end_time, limit, next_token, start_from_head): + assert log_stream_name in self.streams + stream = self.streams[log_stream_name] + return stream.get_log_events(log_group_name, log_stream_name, start_time, end_time, limit, next_token, start_from_head) + + def filter_log_events(self, log_group_name, log_stream_names, start_time, end_time, limit, next_token, filter_pattern, interleaved): + assert not filter_pattern # TODO: impl + + streams = [stream for name, stream in self.streams.items() if not log_stream_names or name in log_stream_names] + + events = [] + for stream in streams: + events += stream.filter_log_events(log_group_name, log_stream_names, start_time, end_time, limit, next_token, filter_pattern, interleaved) + + if interleaved: + events = sorted(events, key=lambda event: event.timestamp) + + if next_token is None: + next_token = 0 + + events_page = events[next_token: next_token + limit] + next_token += limit + if next_token >= len(events): + next_token = None + + searched_streams = [{"logStreamName": stream.logStreamName, "searchedCompletely": True} for stream in streams] + return events_page, next_token, searched_streams + + +class LogsBackend(BaseBackend): + def __init__(self, region_name): + self.region_name = region_name + self.groups = dict() # { logGroupName: LogGroup} + + def reset(self): + region_name = self.region_name + self.__dict__ = {} + self.__init__(region_name) + + def create_log_group(self, log_group_name, tags): + assert log_group_name not in self.groups + self.groups[log_group_name] = LogGroup(self.region_name, log_group_name, tags) + + def ensure_log_group(self, log_group_name, tags): + if log_group_name in self.groups: + return + self.groups[log_group_name] = LogGroup(self.region_name, log_group_name, tags) + + def delete_log_group(self, log_group_name): + assert log_group_name in self.groups + del self.groups[log_group_name] + + def create_log_stream(self, log_group_name, log_stream_name): + assert log_group_name in self.groups + log_group = self.groups[log_group_name] + return log_group.create_log_stream(log_stream_name) + + def delete_log_stream(self, log_group_name, log_stream_name): + assert log_group_name in self.groups + log_group = self.groups[log_group_name] + return log_group.delete_log_stream(log_stream_name) + + def describe_log_streams(self, descending, limit, log_group_name, log_stream_name_prefix, next_token, order_by): + assert log_group_name in self.groups + log_group = self.groups[log_group_name] + return log_group.describe_log_streams(descending, limit, log_group_name, log_stream_name_prefix, next_token, order_by) + + def put_log_events(self, log_group_name, log_stream_name, log_events, sequence_token): + # TODO: add support for sequence_tokens + assert log_group_name in self.groups + log_group = self.groups[log_group_name] + return log_group.put_log_events(log_group_name, log_stream_name, log_events, sequence_token) + + def get_log_events(self, log_group_name, log_stream_name, start_time, end_time, limit, next_token, start_from_head): + assert log_group_name in self.groups + log_group = self.groups[log_group_name] + return log_group.get_log_events(log_group_name, log_stream_name, start_time, end_time, limit, next_token, start_from_head) + + def filter_log_events(self, log_group_name, log_stream_names, start_time, end_time, limit, next_token, filter_pattern, interleaved): + assert log_group_name in self.groups + log_group = self.groups[log_group_name] + return log_group.filter_log_events(log_group_name, log_stream_names, start_time, end_time, limit, next_token, filter_pattern, interleaved) + + +logs_backends = {region.name: LogsBackend(region.name) for region in boto.logs.regions()} diff --git a/moto/logs/responses.py b/moto/logs/responses.py new file mode 100644 index 000000000..4cb9caa6a --- /dev/null +++ b/moto/logs/responses.py @@ -0,0 +1,114 @@ +from moto.core.responses import BaseResponse +from .models import logs_backends +import json + + +# See http://docs.aws.amazon.com/AmazonCloudWatchLogs/latest/APIReference/Welcome.html + +class LogsResponse(BaseResponse): + @property + def logs_backend(self): + return logs_backends[self.region] + + @property + def request_params(self): + try: + return json.loads(self.body) + except ValueError: + return {} + + def _get_param(self, param, if_none=None): + return self.request_params.get(param, if_none) + + def create_log_group(self): + log_group_name = self._get_param('logGroupName') + tags = self._get_param('tags') + assert 1 <= len(log_group_name) <= 512 # TODO: assert pattern + + self.logs_backend.create_log_group(log_group_name, tags) + return '' + + def delete_log_group(self): + log_group_name = self._get_param('logGroupName') + self.logs_backend.delete_log_group(log_group_name) + return '' + + def create_log_stream(self): + log_group_name = self._get_param('logGroupName') + log_stream_name = self._get_param('logStreamName') + self.logs_backend.create_log_stream(log_group_name, log_stream_name) + return '' + + def delete_log_stream(self): + log_group_name = self._get_param('logGroupName') + log_stream_name = self._get_param('logStreamName') + self.logs_backend.delete_log_stream(log_group_name, log_stream_name) + return '' + + def describe_log_streams(self): + log_group_name = self._get_param('logGroupName') + log_stream_name_prefix = self._get_param('logStreamNamePrefix') + descending = self._get_param('descending', False) + limit = self._get_param('limit', 50) + assert limit <= 50 + next_token = self._get_param('nextToken') + order_by = self._get_param('orderBy', 'LogStreamName') + assert order_by in {'LogStreamName', 'LastEventTime'} + + if order_by == 'LastEventTime': + assert not log_stream_name_prefix + + streams, next_token = self.logs_backend.describe_log_streams( + descending, limit, log_group_name, log_stream_name_prefix, + next_token, order_by) + return json.dumps({ + "logStreams": streams, + "nextToken": next_token + }) + + def put_log_events(self): + log_group_name = self._get_param('logGroupName') + log_stream_name = self._get_param('logStreamName') + log_events = self._get_param('logEvents') + sequence_token = self._get_param('sequenceToken') + + next_sequence_token = self.logs_backend.put_log_events(log_group_name, log_stream_name, log_events, sequence_token) + return json.dumps({'nextSequenceToken': next_sequence_token}) + + def get_log_events(self): + log_group_name = self._get_param('logGroupName') + log_stream_name = self._get_param('logStreamName') + start_time = self._get_param('startTime') + end_time = self._get_param("endTime") + limit = self._get_param('limit', 10000) + assert limit <= 10000 + next_token = self._get_param('nextToken') + start_from_head = self._get_param('startFromHead') + + events, next_backward_token, next_foward_token = \ + self.logs_backend.get_log_events(log_group_name, log_stream_name, start_time, end_time, limit, next_token, start_from_head) + + return json.dumps({ + "events": events, + "nextBackwardToken": next_backward_token, + "nextForwardToken": next_foward_token + }) + + def filter_log_events(self): + log_group_name = self._get_param('logGroupName') + log_stream_names = self._get_param('logStreamNames', []) + start_time = self._get_param('startTime') + # impl, see: http://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/FilterAndPatternSyntax.html + filter_pattern = self._get_param('filterPattern') + interleaved = self._get_param('interleaved', False) + end_time = self._get_param("endTime") + limit = self._get_param('limit', 10000) + assert limit <= 10000 + next_token = self._get_param('nextToken') + + events, next_token, searched_streams = self.logs_backend.filter_log_events(log_group_name, log_stream_names, start_time, end_time, limit, next_token, filter_pattern, interleaved) + return json.dumps({ + "events": events, + "nextToken": next_token, + "searchedLogStreams": searched_streams + }) diff --git a/moto/logs/urls.py b/moto/logs/urls.py new file mode 100644 index 000000000..b7910e675 --- /dev/null +++ b/moto/logs/urls.py @@ -0,0 +1,9 @@ +from .responses import LogsResponse + +url_bases = [ + "https?://logs.(.+).amazonaws.com", +] + +url_paths = { + '{0}/$': LogsResponse.dispatch, +} diff --git a/moto/s3/urls.py b/moto/s3/urls.py index 8faad6282..1d439a549 100644 --- a/moto/s3/urls.py +++ b/moto/s3/urls.py @@ -4,7 +4,7 @@ from .responses import S3ResponseInstance url_bases = [ "https?://s3(.*).amazonaws.com", - "https?://(?P[a-zA-Z0-9\-_.]*)\.?s3(.*).amazonaws.com" + r"https?://(?P[a-zA-Z0-9\-_.]*)\.?s3(.*).amazonaws.com" ] diff --git a/moto/server.py b/moto/server.py index 966cb1614..e9f4c0904 100644 --- a/moto/server.py +++ b/moto/server.py @@ -1,22 +1,23 @@ from __future__ import unicode_literals + +import argparse import json import re import sys -import argparse -import six - -from six.moves.urllib.parse import urlencode - from threading import Lock +import six from flask import Flask from flask.testing import FlaskClient + +from six.moves.urllib.parse import urlencode from werkzeug.routing import BaseConverter from werkzeug.serving import run_simple from moto.backends import BACKENDS from moto.core.utils import convert_flask_to_httpretty_response + HTTP_METHODS = ["GET", "POST", "PUT", "DELETE", "HEAD", "PATCH"] @@ -61,7 +62,7 @@ class DomainDispatcherApplication(object): host = "instance_metadata" else: host = environ['HTTP_HOST'].split(':')[0] - if host == "localhost": + if host in {'localhost', 'motoserver'} or host.startswith("192.168."): # Fall back to parsing auth header to find service # ['Credential=sdffdsa', '20170220', 'us-east-1', 'sns', 'aws4_request'] try: diff --git a/moto/sns/models.py b/moto/sns/models.py index 36336aaac..5b7277d22 100644 --- a/moto/sns/models.py +++ b/moto/sns/models.py @@ -12,6 +12,8 @@ from moto.compat import OrderedDict from moto.core import BaseBackend, BaseModel from moto.core.utils import iso_8601_datetime_with_milliseconds from moto.sqs import sqs_backends +from moto.awslambda import lambda_backends + from .exceptions import ( SNSNotFoundError, DuplicateSnsEndpointError, SnsEndpointDisabled, SNSInvalidParameter ) @@ -88,6 +90,11 @@ class Subscription(BaseModel): elif self.protocol in ['http', 'https']: post_data = self.get_post_data(message, message_id) requests.post(self.endpoint, json=post_data) + elif self.protocol == 'lambda': + # TODO: support bad function name + function_name = self.endpoint.split(":")[-1] + region = self.arn.split(':')[3] + lambda_backends[region].send_message(function_name, message) def get_post_data(self, message, message_id): return { diff --git a/requirements-dev.txt b/requirements-dev.txt index 13e4e2f20..1c001305e 100644 --- a/requirements-dev.txt +++ b/requirements-dev.txt @@ -6,6 +6,7 @@ coverage flake8 freezegun flask +boto>=2.45.0 boto3>=1.4.4 botocore>=1.5.77 six>=1.9 diff --git a/setup.py b/setup.py index b83e1203d..d4ce3d5f1 100755 --- a/setup.py +++ b/setup.py @@ -1,6 +1,9 @@ #!/usr/bin/env python from __future__ import unicode_literals +import setuptools from setuptools import setup, find_packages +import sys + install_requires = [ "Jinja2>=2.8", @@ -17,12 +20,21 @@ install_requires = [ "pytz", "python-dateutil<3.0.0,>=2.1", "mock", + "docker>=2.5.1" ] extras_require = { 'server': ['flask'], } +# https://hynek.me/articles/conditional-python-dependencies/ +if int(setuptools.__version__.split(".", 1)[0]) < 18: + if sys.version_info[0:2] < (3, 3): + install_requires.append("backports.tempfile") +else: + extras_require[":python_version<'3.3'"] = ["backports.tempfile"] + + setup( name='moto', version='1.1.19', @@ -47,6 +59,9 @@ setup( "Programming Language :: Python :: 2.7", "Programming Language :: Python :: 3", "Programming Language :: Python :: 3.3", + "Programming Language :: Python :: 3.4", + "Programming Language :: Python :: 3.5", + "Programming Language :: Python :: 3.6", "License :: OSI Approved :: Apache Software License", "Topic :: Software Development :: Testing", ], diff --git a/tests/test_awslambda/test_lambda.py b/tests/test_awslambda/test_lambda.py index 8a5d84f33..6b67ce0f0 100644 --- a/tests/test_awslambda/test_lambda.py +++ b/tests/test_awslambda/test_lambda.py @@ -12,11 +12,13 @@ import sure # noqa from freezegun import freeze_time from moto import mock_lambda, mock_s3, mock_ec2, settings +_lambda_region = 'us-east-1' if settings.TEST_SERVER_MODE else 'us-west-2' -def _process_lamda(pfunc): + +def _process_lambda(func_str): zip_output = io.BytesIO() zip_file = zipfile.ZipFile(zip_output, 'w', zipfile.ZIP_DEFLATED) - zip_file.writestr('lambda_function.zip', pfunc) + zip_file.writestr('lambda_function.py', func_str) zip_file.close() zip_output.seek(0) return zip_output.read() @@ -27,21 +29,23 @@ def get_test_zip_file1(): def lambda_handler(event, context): return event """ - return _process_lamda(pfunc) + return _process_lambda(pfunc) def get_test_zip_file2(): - pfunc = """ + func_str = """ +import boto3 + def lambda_handler(event, context): + ec2 = boto3.resource('ec2', region_name='us-west-2', endpoint_url='http://{base_url}') + volume_id = event.get('volume_id') - print('get volume details for %s' % volume_id) - import boto3 - ec2 = boto3.resource('ec2', region_name='us-west-2', endpoint_url="http://{base_url}") vol = ec2.Volume(volume_id) - print('Volume - %s state=%s, size=%s' % (volume_id, vol.state, vol.size)) + + print('get volume details for %s\\nVolume - %s state=%s, size=%s' % (volume_id, volume_id, vol.state, vol.size)) return event -""".format(base_url="localhost:5000" if settings.TEST_SERVER_MODE else "ec2.us-west-2.amazonaws.com") - return _process_lamda(pfunc) +""".format(base_url="motoserver:5000" if settings.TEST_SERVER_MODE else "ec2.us-west-2.amazonaws.com") + return _process_lambda(func_str) @mock_lambda @@ -58,7 +62,7 @@ def test_invoke_requestresponse_function(): FunctionName='testFunction', Runtime='python2.7', Role='test-iam-role', - Handler='lambda_function.handler', + Handler='lambda_function.lambda_handler', Code={ 'ZipFile': get_test_zip_file1(), }, @@ -73,10 +77,13 @@ def test_invoke_requestresponse_function(): Payload=json.dumps(in_data)) success_result["StatusCode"].should.equal(202) - base64.b64decode(success_result["LogResult"]).decode( - 'utf-8').should.equal(json.dumps(in_data)) - json.loads(success_result["Payload"].read().decode( - 'utf-8')).should.equal(in_data) + result_obj = json.loads( + base64.b64decode(success_result["LogResult"]).decode('utf-8')) + + result_obj.should.equal(in_data) + + payload = success_result["Payload"].read().decode('utf-8') + json.loads(payload).should.equal(in_data) @mock_lambda @@ -86,7 +93,7 @@ def test_invoke_event_function(): FunctionName='testFunction', Runtime='python2.7', Role='test-iam-role', - Handler='lambda_function.handler', + Handler='lambda_function.lambda_handler', Code={ 'ZipFile': get_test_zip_file1(), }, @@ -110,36 +117,47 @@ def test_invoke_event_function(): 'utf-8')).should.equal({}) -@mock_ec2 -@mock_lambda -def test_invoke_function_get_ec2_volume(): - conn = boto3.resource("ec2", "us-west-2") - vol = conn.create_volume(Size=99, AvailabilityZone='us-west-2') - vol = conn.Volume(vol.id) +if settings.TEST_SERVER_MODE: + @mock_ec2 + @mock_lambda + def test_invoke_function_get_ec2_volume(): + conn = boto3.resource("ec2", "us-west-2") + vol = conn.create_volume(Size=99, AvailabilityZone='us-west-2') + vol = conn.Volume(vol.id) - conn = boto3.client('lambda', 'us-west-2') - conn.create_function( - FunctionName='testFunction', - Runtime='python2.7', - Role='test-iam-role', - Handler='lambda_function.handler', - Code={ - 'ZipFile': get_test_zip_file2(), - }, - Description='test lambda function', - Timeout=3, - MemorySize=128, - Publish=True, - ) + conn = boto3.client('lambda', 'us-west-2') + conn.create_function( + FunctionName='testFunction', + Runtime='python2.7', + Role='test-iam-role', + Handler='lambda_function.lambda_handler', + Code={ + 'ZipFile': get_test_zip_file2(), + }, + Description='test lambda function', + Timeout=3, + MemorySize=128, + Publish=True, + ) - in_data = {'volume_id': vol.id} - result = conn.invoke(FunctionName='testFunction', - InvocationType='RequestResponse', Payload=json.dumps(in_data)) - result["StatusCode"].should.equal(202) - msg = 'get volume details for %s\nVolume - %s state=%s, size=%s\n%s' % ( - vol.id, vol.id, vol.state, vol.size, json.dumps(in_data)) - base64.b64decode(result["LogResult"]).decode('utf-8').should.equal(msg) - result['Payload'].read().decode('utf-8').should.equal(msg) + in_data = {'volume_id': vol.id} + result = conn.invoke(FunctionName='testFunction', + InvocationType='RequestResponse', Payload=json.dumps(in_data)) + result["StatusCode"].should.equal(202) + msg = 'get volume details for %s\nVolume - %s state=%s, size=%s\n%s' % ( + vol.id, vol.id, vol.state, vol.size, json.dumps(in_data)) + + log_result = base64.b64decode(result["LogResult"]).decode('utf-8') + + # fix for running under travis (TODO: investigate why it has an extra newline) + log_result = log_result.replace('\n\n', '\n') + log_result.should.equal(msg) + + payload = result['Payload'].read().decode('utf-8') + + # fix for running under travis (TODO: investigate why it has an extra newline) + payload = payload.replace('\n\n', '\n') + payload.should.equal(msg) @mock_lambda @@ -150,7 +168,7 @@ def test_create_based_on_s3_with_missing_bucket(): FunctionName='testFunction', Runtime='python2.7', Role='test-iam-role', - Handler='lambda_function.handler', + Handler='lambda_function.lambda_handler', Code={ 'S3Bucket': 'this-bucket-does-not-exist', 'S3Key': 'test.zip', @@ -181,7 +199,7 @@ def test_create_function_from_aws_bucket(): FunctionName='testFunction', Runtime='python2.7', Role='test-iam-role', - Handler='lambda_function.handler', + Handler='lambda_function.lambda_handler', Code={ 'S3Bucket': 'test-bucket', 'S3Key': 'test.zip', @@ -202,10 +220,10 @@ def test_create_function_from_aws_bucket(): result.pop('LastModified') result.should.equal({ 'FunctionName': 'testFunction', - 'FunctionArn': 'arn:aws:lambda:123456789012:function:testFunction', + 'FunctionArn': 'arn:aws:lambda:{}:123456789012:function:testFunction'.format(_lambda_region), 'Runtime': 'python2.7', 'Role': 'test-iam-role', - 'Handler': 'lambda_function.handler', + 'Handler': 'lambda_function.lambda_handler', "CodeSha256": hashlib.sha256(zip_content).hexdigest(), "CodeSize": len(zip_content), 'Description': 'test lambda function', @@ -230,7 +248,7 @@ def test_create_function_from_zipfile(): FunctionName='testFunction', Runtime='python2.7', Role='test-iam-role', - Handler='lambda_function.handler', + Handler='lambda_function.lambda_handler', Code={ 'ZipFile': zip_content, }, @@ -247,10 +265,10 @@ def test_create_function_from_zipfile(): result.should.equal({ 'FunctionName': 'testFunction', - 'FunctionArn': 'arn:aws:lambda:123456789012:function:testFunction', + 'FunctionArn': 'arn:aws:lambda:{}:123456789012:function:testFunction'.format(_lambda_region), 'Runtime': 'python2.7', 'Role': 'test-iam-role', - 'Handler': 'lambda_function.handler', + 'Handler': 'lambda_function.lambda_handler', 'CodeSize': len(zip_content), 'Description': 'test lambda function', 'Timeout': 3, @@ -281,7 +299,7 @@ def test_get_function(): FunctionName='testFunction', Runtime='python2.7', Role='test-iam-role', - Handler='lambda_function.handler', + Handler='lambda_function.lambda_handler', Code={ 'S3Bucket': 'test-bucket', 'S3Key': 'test.zip', @@ -301,16 +319,16 @@ def test_get_function(): result.should.equal({ "Code": { - "Location": "s3://lambda-functions.aws.amazon.com/test.zip", + "Location": "s3://awslambda-{0}-tasks.s3-{0}.amazonaws.com/test.zip".format(_lambda_region), "RepositoryType": "S3" }, "Configuration": { "CodeSha256": hashlib.sha256(zip_content).hexdigest(), "CodeSize": len(zip_content), "Description": "test lambda function", - "FunctionArn": "arn:aws:lambda:123456789012:function:testFunction", + "FunctionArn": 'arn:aws:lambda:{}:123456789012:function:testFunction'.format(_lambda_region), "FunctionName": "testFunction", - "Handler": "lambda_function.handler", + "Handler": "lambda_function.lambda_handler", "MemorySize": 128, "Role": "test-iam-role", "Runtime": "python2.7", @@ -339,7 +357,7 @@ def test_delete_function(): FunctionName='testFunction', Runtime='python2.7', Role='test-iam-role', - Handler='lambda_function.handler', + Handler='lambda_function.lambda_handler', Code={ 'S3Bucket': 'test-bucket', 'S3Key': 'test.zip', @@ -383,7 +401,7 @@ def test_list_create_list_get_delete_list(): FunctionName='testFunction', Runtime='python2.7', Role='test-iam-role', - Handler='lambda_function.handler', + Handler='lambda_function.lambda_handler', Code={ 'S3Bucket': 'test-bucket', 'S3Key': 'test.zip', @@ -395,16 +413,16 @@ def test_list_create_list_get_delete_list(): ) expected_function_result = { "Code": { - "Location": "s3://lambda-functions.aws.amazon.com/test.zip", + "Location": "s3://awslambda-{0}-tasks.s3-{0}.amazonaws.com/test.zip".format(_lambda_region), "RepositoryType": "S3" }, "Configuration": { "CodeSha256": hashlib.sha256(zip_content).hexdigest(), "CodeSize": len(zip_content), "Description": "test lambda function", - "FunctionArn": "arn:aws:lambda:123456789012:function:testFunction", + "FunctionArn": 'arn:aws:lambda:{}:123456789012:function:testFunction'.format(_lambda_region), "FunctionName": "testFunction", - "Handler": "lambda_function.handler", + "Handler": "lambda_function.lambda_handler", "MemorySize": 128, "Role": "test-iam-role", "Runtime": "python2.7", @@ -437,12 +455,12 @@ def test_list_create_list_get_delete_list(): @mock_lambda def test_invoke_lambda_error(): lambda_fx = """ - def lambda_handler(event, context): - raise Exception('failsauce') +def lambda_handler(event, context): + raise Exception('failsauce') """ zip_output = io.BytesIO() zip_file = zipfile.ZipFile(zip_output, 'w', zipfile.ZIP_DEFLATED) - zip_file.writestr('lambda_function.zip', lambda_fx) + zip_file.writestr('lambda_function.py', lambda_fx) zip_file.close() zip_output.seek(0) @@ -605,13 +623,15 @@ def test_get_function_created_with_zipfile(): response['Configuration'].pop('LastModified') response['ResponseMetadata']['HTTPStatusCode'].should.equal(200) - assert 'Code' not in response + assert len(response['Code']) == 2 + assert response['Code']['RepositoryType'] == 'S3' + assert response['Code']['Location'].startswith('s3://awslambda-{0}-tasks.s3-{0}.amazonaws.com'.format(_lambda_region)) response['Configuration'].should.equal( { "CodeSha256": hashlib.sha256(zip_content).hexdigest(), "CodeSize": len(zip_content), "Description": "test lambda function", - "FunctionArn": "arn:aws:lambda:123456789012:function:testFunction", + "FunctionArn":'arn:aws:lambda:{}:123456789012:function:testFunction'.format(_lambda_region), "FunctionName": "testFunction", "Handler": "lambda_function.handler", "MemorySize": 128, diff --git a/tests/test_logs/test_logs.py b/tests/test_logs/test_logs.py new file mode 100644 index 000000000..392b3f7e9 --- /dev/null +++ b/tests/test_logs/test_logs.py @@ -0,0 +1,14 @@ +import boto3 +import sure # noqa + +from moto import mock_logs, settings + +_logs_region = 'us-east-1' if settings.TEST_SERVER_MODE else 'us-west-2' + + +@mock_logs +def test_log_group_create(): + conn = boto3.client('logs', 'us-west-2') + log_group_name = 'dummy' + response = conn.create_log_group(logGroupName=log_group_name) + response = conn.delete_log_group(logGroupName=log_group_name) diff --git a/travis_moto_server.sh b/travis_moto_server.sh new file mode 100755 index 000000000..902644b20 --- /dev/null +++ b/travis_moto_server.sh @@ -0,0 +1,5 @@ +#!/usr/bin/env bash +set -e +pip install flask +pip install /moto/dist/moto*.gz +moto_server -H 0.0.0.0 -p 5000 \ No newline at end of file diff --git a/wait_for.py b/wait_for.py new file mode 100755 index 000000000..ea3639d16 --- /dev/null +++ b/wait_for.py @@ -0,0 +1,31 @@ +import time + +try: + # py2 + import urllib2 as urllib + from urllib2 import URLError + import socket + import httplib + + EXCEPTIONS = (URLError, socket.error, httplib.BadStatusLine) +except ImportError: + # py3 + import urllib.request as urllib + from urllib.error import URLError + + EXCEPTIONS = (URLError, ConnectionResetError) + + +start_ts = time.time() +print("Waiting for service to come up") +while True: + try: + urllib.urlopen('http://localhost:5000/', timeout=1) + break + except EXCEPTIONS: + elapsed_s = time.time() - start_ts + if elapsed_s > 30: + raise + + print('.') + time.sleep(1) From 5bb6b98f6d0364543018b63b61c66e5813964cb9 Mon Sep 17 00:00:00 2001 From: Brian Pandola Date: Wed, 27 Sep 2017 17:18:28 -0700 Subject: [PATCH 325/412] Implement Redshift Taggable Resources (#1217) - Implement create_tags, describe_tags, and delete_tags endpoints - Clusters, Parameter Groups, Security Groups, Snapshots, and Subnet Groups can all be tagged - Test Suite updated - Minor clean-up of restore_from_cluster_snapshot endpoint - Miscellaneous typo fixes --- moto/redshift/exceptions.py | 22 ++ moto/redshift/models.py | 262 +++++++++++++++++---- moto/redshift/responses.py | 103 ++++++--- tests/test_redshift/test_redshift.py | 326 +++++++++++++++++++++++++-- 4 files changed, 608 insertions(+), 105 deletions(-) diff --git a/moto/redshift/exceptions.py b/moto/redshift/exceptions.py index 877e850e4..a89ed5a04 100644 --- a/moto/redshift/exceptions.py +++ b/moto/redshift/exceptions.py @@ -71,3 +71,25 @@ class ClusterSnapshotAlreadyExistsError(RedshiftClientError): 'ClusterSnapshotAlreadyExists', "Cannot create the snapshot because a snapshot with the " "identifier {0} already exists".format(snapshot_identifier)) + + +class InvalidParameterValueError(RedshiftClientError): + def __init__(self, message): + super(InvalidParameterValueError, self).__init__( + 'InvalidParameterValue', + message) + + +class ResourceNotFoundFaultError(RedshiftClientError): + + code = 404 + + def __init__(self, resource_type=None, resource_name=None, message=None): + if resource_type and not resource_name: + msg = "resource of type '{0}' not found.".format(resource_type) + else: + msg = "{0} ({1}) not found.".format(resource_type, resource_name) + if message: + msg = message + super(ResourceNotFoundFaultError, self).__init__( + 'ResourceNotFoundFault', msg) diff --git a/moto/redshift/models.py b/moto/redshift/models.py index 29c802fb0..fa642ef01 100644 --- a/moto/redshift/models.py +++ b/moto/redshift/models.py @@ -15,11 +15,51 @@ from .exceptions import ( ClusterSnapshotAlreadyExistsError, ClusterSnapshotNotFoundError, ClusterSubnetGroupNotFoundError, + InvalidParameterValueError, InvalidSubnetError, + ResourceNotFoundFaultError ) -class Cluster(BaseModel): +ACCOUNT_ID = 123456789012 + + +class TaggableResourceMixin(object): + + resource_type = None + + def __init__(self, region_name, tags): + self.region = region_name + self.tags = tags or [] + + @property + def resource_id(self): + return None + + @property + def arn(self): + return "arn:aws:redshift:{region}:{account_id}:{resource_type}:{resource_id}".format( + region=self.region, + account_id=ACCOUNT_ID, + resource_type=self.resource_type, + resource_id=self.resource_id) + + def create_tags(self, tags): + new_keys = [tag_set['Key'] for tag_set in tags] + self.tags = [tag_set for tag_set in self.tags + if tag_set['Key'] not in new_keys] + self.tags.extend(tags) + return self.tags + + def delete_tags(self, tag_keys): + self.tags = [tag_set for tag_set in self.tags + if tag_set['Key'] not in tag_keys] + return self.tags + + +class Cluster(TaggableResourceMixin, BaseModel): + + resource_type = 'cluster' def __init__(self, redshift_backend, cluster_identifier, node_type, master_username, master_user_password, db_name, cluster_type, cluster_security_groups, @@ -27,7 +67,8 @@ class Cluster(BaseModel): preferred_maintenance_window, cluster_parameter_group_name, automated_snapshot_retention_period, port, cluster_version, allow_version_upgrade, number_of_nodes, publicly_accessible, - encrypted, region): + encrypted, region_name, tags=None): + super(Cluster, self).__init__(region_name, tags) self.redshift_backend = redshift_backend self.cluster_identifier = cluster_identifier self.status = 'available' @@ -57,13 +98,12 @@ class Cluster(BaseModel): else: self.cluster_security_groups = ["Default"] - self.region = region if availability_zone: self.availability_zone = availability_zone else: # This could probably be smarter, but there doesn't appear to be a # way to pull AZs for a region in boto - self.availability_zone = region + "a" + self.availability_zone = region_name + "a" if cluster_type == 'single-node': self.number_of_nodes = 1 @@ -106,7 +146,7 @@ class Cluster(BaseModel): number_of_nodes=properties.get('NumberOfNodes'), publicly_accessible=properties.get("PubliclyAccessible"), encrypted=properties.get("Encrypted"), - region=region_name, + region_name=region_name, ) return cluster @@ -149,6 +189,10 @@ class Cluster(BaseModel): if parameter_group.cluster_parameter_group_name in self.cluster_parameter_group_name ] + @property + def resource_id(self): + return self.cluster_identifier + def to_json(self): return { "MasterUsername": self.master_username, @@ -180,18 +224,21 @@ class Cluster(BaseModel): "ClusterIdentifier": self.cluster_identifier, "AllowVersionUpgrade": self.allow_version_upgrade, "Endpoint": { - "Address": '{}.{}.redshift.amazonaws.com'.format( - self.cluster_identifier, - self.region), + "Address": self.endpoint, "Port": self.port }, - "PendingModifiedValues": [] + "PendingModifiedValues": [], + "Tags": self.tags } -class SubnetGroup(BaseModel): +class SubnetGroup(TaggableResourceMixin, BaseModel): - def __init__(self, ec2_backend, cluster_subnet_group_name, description, subnet_ids): + resource_type = 'subnetgroup' + + def __init__(self, ec2_backend, cluster_subnet_group_name, description, subnet_ids, + region_name, tags=None): + super(SubnetGroup, self).__init__(region_name, tags) self.ec2_backend = ec2_backend self.cluster_subnet_group_name = cluster_subnet_group_name self.description = description @@ -208,6 +255,7 @@ class SubnetGroup(BaseModel): cluster_subnet_group_name=resource_name, description=properties.get("Description"), subnet_ids=properties.get("SubnetIds", []), + region_name=region_name ) return subnet_group @@ -219,6 +267,10 @@ class SubnetGroup(BaseModel): def vpc_id(self): return self.subnets[0].vpc_id + @property + def resource_id(self): + return self.cluster_subnet_group_name + def to_json(self): return { "VpcId": self.vpc_id, @@ -232,27 +284,39 @@ class SubnetGroup(BaseModel): "Name": subnet.availability_zone }, } for subnet in self.subnets], + "Tags": self.tags } -class SecurityGroup(BaseModel): +class SecurityGroup(TaggableResourceMixin, BaseModel): - def __init__(self, cluster_security_group_name, description): + resource_type = 'securitygroup' + + def __init__(self, cluster_security_group_name, description, region_name, tags=None): + super(SecurityGroup, self).__init__(region_name, tags) self.cluster_security_group_name = cluster_security_group_name self.description = description + @property + def resource_id(self): + return self.cluster_security_group_name + def to_json(self): return { "EC2SecurityGroups": [], "IPRanges": [], "Description": self.description, "ClusterSecurityGroupName": self.cluster_security_group_name, + "Tags": self.tags } -class ParameterGroup(BaseModel): +class ParameterGroup(TaggableResourceMixin, BaseModel): - def __init__(self, cluster_parameter_group_name, group_family, description): + resource_type = 'parametergroup' + + def __init__(self, cluster_parameter_group_name, group_family, description, region_name, tags=None): + super(ParameterGroup, self).__init__(region_name, tags) self.cluster_parameter_group_name = cluster_parameter_group_name self.group_family = group_family self.description = description @@ -266,34 +330,41 @@ class ParameterGroup(BaseModel): cluster_parameter_group_name=resource_name, description=properties.get("Description"), group_family=properties.get("ParameterGroupFamily"), + region_name=region_name ) return parameter_group + @property + def resource_id(self): + return self.cluster_parameter_group_name + def to_json(self): return { "ParameterGroupFamily": self.group_family, "Description": self.description, "ParameterGroupName": self.cluster_parameter_group_name, + "Tags": self.tags } -class Snapshot(BaseModel): +class Snapshot(TaggableResourceMixin, BaseModel): - def __init__(self, cluster, snapshot_identifier, tags=None): + resource_type = 'snapshot' + + def __init__(self, cluster, snapshot_identifier, region_name, tags=None): + super(Snapshot, self).__init__(region_name, tags) self.cluster = copy.copy(cluster) self.snapshot_identifier = snapshot_identifier self.snapshot_type = 'manual' self.status = 'available' - self.tags = tags or [] self.create_time = iso_8601_datetime_with_milliseconds( datetime.datetime.now()) @property - def arn(self): - return "arn:aws:redshift:{0}:1234567890:snapshot:{1}/{2}".format( - self.cluster.region, - self.cluster.cluster_identifier, - self.snapshot_identifier) + def resource_id(self): + return "{cluster_id}/{snapshot_id}".format( + cluster_id=self.cluster.cluster_identifier, + snapshot_id=self.snapshot_identifier) def to_json(self): return { @@ -315,26 +386,36 @@ class Snapshot(BaseModel): class RedshiftBackend(BaseBackend): - def __init__(self, ec2_backend): + def __init__(self, ec2_backend, region_name): + self.region = region_name self.clusters = {} self.subnet_groups = {} self.security_groups = { - "Default": SecurityGroup("Default", "Default Redshift Security Group") + "Default": SecurityGroup("Default", "Default Redshift Security Group", self.region) } self.parameter_groups = { "default.redshift-1.0": ParameterGroup( "default.redshift-1.0", "redshift-1.0", "Default Redshift parameter group", + self.region ) } self.ec2_backend = ec2_backend self.snapshots = OrderedDict() + self.RESOURCE_TYPE_MAP = { + 'cluster': self.clusters, + 'parametergroup': self.parameter_groups, + 'securitygroup': self.security_groups, + 'snapshot': self.snapshots, + 'subnetgroup': self.subnet_groups + } def reset(self): ec2_backend = self.ec2_backend + region_name = self.region self.__dict__ = {} - self.__init__(ec2_backend) + self.__init__(ec2_backend, region_name) def create_cluster(self, **cluster_kwargs): cluster_identifier = cluster_kwargs['cluster_identifier'] @@ -373,9 +454,10 @@ class RedshiftBackend(BaseBackend): return self.clusters.pop(cluster_identifier) raise ClusterNotFoundError(cluster_identifier) - def create_cluster_subnet_group(self, cluster_subnet_group_name, description, subnet_ids): + def create_cluster_subnet_group(self, cluster_subnet_group_name, description, subnet_ids, + region_name, tags=None): subnet_group = SubnetGroup( - self.ec2_backend, cluster_subnet_group_name, description, subnet_ids) + self.ec2_backend, cluster_subnet_group_name, description, subnet_ids, region_name, tags) self.subnet_groups[cluster_subnet_group_name] = subnet_group return subnet_group @@ -393,9 +475,9 @@ class RedshiftBackend(BaseBackend): return self.subnet_groups.pop(subnet_identifier) raise ClusterSubnetGroupNotFoundError(subnet_identifier) - def create_cluster_security_group(self, cluster_security_group_name, description): + def create_cluster_security_group(self, cluster_security_group_name, description, region_name, tags=None): security_group = SecurityGroup( - cluster_security_group_name, description) + cluster_security_group_name, description, region_name, tags) self.security_groups[cluster_security_group_name] = security_group return security_group @@ -414,9 +496,9 @@ class RedshiftBackend(BaseBackend): raise ClusterSecurityGroupNotFoundError(security_group_identifier) def create_cluster_parameter_group(self, cluster_parameter_group_name, - group_family, description): + group_family, description, region_name, tags=None): parameter_group = ParameterGroup( - cluster_parameter_group_name, group_family, description) + cluster_parameter_group_name, group_family, description, region_name, tags) self.parameter_groups[cluster_parameter_group_name] = parameter_group return parameter_group @@ -435,17 +517,17 @@ class RedshiftBackend(BaseBackend): return self.parameter_groups.pop(parameter_group_name) raise ClusterParameterGroupNotFoundError(parameter_group_name) - def create_snapshot(self, cluster_identifier, snapshot_identifier, tags): + def create_cluster_snapshot(self, cluster_identifier, snapshot_identifier, region_name, tags): cluster = self.clusters.get(cluster_identifier) if not cluster: raise ClusterNotFoundError(cluster_identifier) if self.snapshots.get(snapshot_identifier) is not None: raise ClusterSnapshotAlreadyExistsError(snapshot_identifier) - snapshot = Snapshot(cluster, snapshot_identifier, tags) + snapshot = Snapshot(cluster, snapshot_identifier, region_name, tags) self.snapshots[snapshot_identifier] = snapshot return snapshot - def describe_snapshots(self, cluster_identifier, snapshot_identifier): + def describe_cluster_snapshots(self, cluster_identifier=None, snapshot_identifier=None): if cluster_identifier: for snapshot in self.snapshots.values(): if snapshot.cluster.cluster_identifier == cluster_identifier: @@ -459,7 +541,7 @@ class RedshiftBackend(BaseBackend): return self.snapshots.values() - def delete_snapshot(self, snapshot_identifier): + def delete_cluster_snapshot(self, snapshot_identifier): if snapshot_identifier not in self.snapshots: raise ClusterSnapshotNotFoundError(snapshot_identifier) @@ -467,23 +549,105 @@ class RedshiftBackend(BaseBackend): deleted_snapshot.status = 'deleted' return deleted_snapshot - def describe_tags_for_resource_type(self, resource_type): + def restore_from_cluster_snapshot(self, **kwargs): + snapshot_identifier = kwargs.pop('snapshot_identifier') + snapshot = self.describe_cluster_snapshots(snapshot_identifier=snapshot_identifier)[0] + create_kwargs = { + "node_type": snapshot.cluster.node_type, + "master_username": snapshot.cluster.master_username, + "master_user_password": snapshot.cluster.master_user_password, + "db_name": snapshot.cluster.db_name, + "cluster_type": 'multi-node' if snapshot.cluster.number_of_nodes > 1 else 'single-node', + "availability_zone": snapshot.cluster.availability_zone, + "port": snapshot.cluster.port, + "cluster_version": snapshot.cluster.cluster_version, + "number_of_nodes": snapshot.cluster.number_of_nodes, + "encrypted": snapshot.cluster.encrypted, + "tags": snapshot.cluster.tags + } + create_kwargs.update(kwargs) + return self.create_cluster(**create_kwargs) + + def _get_resource_from_arn(self, arn): + try: + arn_breakdown = arn.split(':') + resource_type = arn_breakdown[5] + if resource_type == 'snapshot': + resource_id = arn_breakdown[6].split('/')[1] + else: + resource_id = arn_breakdown[6] + except IndexError: + resource_type = resource_id = arn + resources = self.RESOURCE_TYPE_MAP.get(resource_type) + if resources is None: + message = ( + "Tagging is not supported for this type of resource: '{0}' " + "(the ARN is potentially malformed, please check the ARN " + "documentation for more information)".format(resource_type)) + raise ResourceNotFoundFaultError(message=message) + try: + resource = resources[resource_id] + except KeyError: + raise ResourceNotFoundFaultError(resource_type, resource_id) + else: + return resource + + @staticmethod + def _describe_tags_for_resources(resources): tagged_resources = [] - if resource_type == 'Snapshot': - for snapshot in self.snapshots.values(): - for tag in snapshot.tags: - data = { - 'ResourceName': snapshot.arn, - 'ResourceType': 'snapshot', - 'Tag': { - 'Key': tag['Key'], - 'Value': tag['Value'] - } + for resource in resources: + for tag in resource.tags: + data = { + 'ResourceName': resource.arn, + 'ResourceType': resource.resource_type, + 'Tag': { + 'Key': tag['Key'], + 'Value': tag['Value'] } - tagged_resources.append(data) + } + tagged_resources.append(data) return tagged_resources + def _describe_tags_for_resource_type(self, resource_type): + resources = self.RESOURCE_TYPE_MAP.get(resource_type) + if not resources: + raise ResourceNotFoundFaultError(resource_type=resource_type) + return self._describe_tags_for_resources(resources.values()) + + def _describe_tags_for_resource_name(self, resource_name): + resource = self._get_resource_from_arn(resource_name) + return self._describe_tags_for_resources([resource]) + + def create_tags(self, resource_name, tags): + resource = self._get_resource_from_arn(resource_name) + resource.create_tags(tags) + + def describe_tags(self, resource_name, resource_type): + if resource_name and resource_type: + raise InvalidParameterValueError( + "You cannot filter a list of resources using an Amazon " + "Resource Name (ARN) and a resource type together in the " + "same request. Retry the request using either an ARN or " + "a resource type, but not both.") + if resource_type: + return self._describe_tags_for_resource_type(resource_type.lower()) + if resource_name: + return self._describe_tags_for_resource_name(resource_name) + # If name and type are not specified, return all tagged resources. + # TODO: Implement aws marker pagination + tagged_resources = [] + for resource_type in self.RESOURCE_TYPE_MAP: + try: + tagged_resources += self._describe_tags_for_resource_type(resource_type) + except ResourceNotFoundFaultError: + pass + return tagged_resources + + def delete_tags(self, resource_name, tag_keys): + resource = self._get_resource_from_arn(resource_name) + resource.delete_tags(tag_keys) + redshift_backends = {} for region in boto.redshift.regions(): - redshift_backends[region.name] = RedshiftBackend(ec2_backends[region.name]) + redshift_backends[region.name] = RedshiftBackend(ec2_backends[region.name], region.name) diff --git a/moto/redshift/responses.py b/moto/redshift/responses.py index 411569d01..0dbf35cb2 100644 --- a/moto/redshift/responses.py +++ b/moto/redshift/responses.py @@ -57,6 +57,15 @@ class RedshiftResponse(BaseResponse): count += 1 return unpacked_list + def unpack_list_params(self, label): + unpacked_list = list() + count = 1 + while self._get_param('{0}.{1}'.format(label, count)): + unpacked_list.append(self._get_param( + '{0}.{1}'.format(label, count))) + count += 1 + return unpacked_list + def create_cluster(self): cluster_kwargs = { "cluster_identifier": self._get_param('ClusterIdentifier'), @@ -78,7 +87,8 @@ class RedshiftResponse(BaseResponse): "number_of_nodes": self._get_int_param('NumberOfNodes'), "publicly_accessible": self._get_param("PubliclyAccessible"), "encrypted": self._get_param("Encrypted"), - "region": self.region, + "region_name": self.region, + "tags": self.unpack_complex_list_params('Tags.Tag', ('Key', 'Value')) } cluster = self.redshift_backend.create_cluster(**cluster_kwargs).to_json() cluster['ClusterStatus'] = 'creating' @@ -94,23 +104,8 @@ class RedshiftResponse(BaseResponse): }) def restore_from_cluster_snapshot(self): - snapshot_identifier = self._get_param('SnapshotIdentifier') - snapshots = self.redshift_backend.describe_snapshots( - None, - snapshot_identifier) - snapshot = snapshots[0] - kwargs_from_snapshot = { - "node_type": snapshot.cluster.node_type, - "master_username": snapshot.cluster.master_username, - "master_user_password": snapshot.cluster.master_user_password, - "db_name": snapshot.cluster.db_name, - "cluster_type": 'multi-node' if snapshot.cluster.number_of_nodes > 1 else 'single-node', - "availability_zone": snapshot.cluster.availability_zone, - "port": snapshot.cluster.port, - "cluster_version": snapshot.cluster.cluster_version, - "number_of_nodes": snapshot.cluster.number_of_nodes, - } - kwargs_from_request = { + restore_kwargs = { + "snapshot_identifier": self._get_param('SnapshotIdentifier'), "cluster_identifier": self._get_param('ClusterIdentifier'), "port": self._get_int_param('Port'), "availability_zone": self._get_param('AvailabilityZone'), @@ -129,12 +124,9 @@ class RedshiftResponse(BaseResponse): 'PreferredMaintenanceWindow'), "automated_snapshot_retention_period": self._get_int_param( 'AutomatedSnapshotRetentionPeriod'), - "region": self.region, - "encrypted": False, + "region_name": self.region, } - kwargs_from_snapshot.update(kwargs_from_request) - cluster_kwargs = kwargs_from_snapshot - cluster = self.redshift_backend.create_cluster(**cluster_kwargs).to_json() + cluster = self.redshift_backend.restore_from_cluster_snapshot(**restore_kwargs).to_json() cluster['ClusterStatus'] = 'creating' return self.get_response({ "RestoreFromClusterSnapshotResponse": { @@ -230,11 +222,14 @@ class RedshiftResponse(BaseResponse): # according to the AWS documentation if not subnet_ids: subnet_ids = self._get_multi_param('SubnetIds.SubnetIdentifier') + tags = self.unpack_complex_list_params('Tags.Tag', ('Key', 'Value')) subnet_group = self.redshift_backend.create_cluster_subnet_group( cluster_subnet_group_name=cluster_subnet_group_name, description=description, subnet_ids=subnet_ids, + region_name=self.region, + tags=tags ) return self.get_response({ @@ -280,10 +275,13 @@ class RedshiftResponse(BaseResponse): cluster_security_group_name = self._get_param( 'ClusterSecurityGroupName') description = self._get_param('Description') + tags = self.unpack_complex_list_params('Tags.Tag', ('Key', 'Value')) security_group = self.redshift_backend.create_cluster_security_group( cluster_security_group_name=cluster_security_group_name, description=description, + region_name=self.region, + tags=tags ) return self.get_response({ @@ -331,11 +329,14 @@ class RedshiftResponse(BaseResponse): cluster_parameter_group_name = self._get_param('ParameterGroupName') group_family = self._get_param('ParameterGroupFamily') description = self._get_param('Description') + tags = self.unpack_complex_list_params('Tags.Tag', ('Key', 'Value')) parameter_group = self.redshift_backend.create_cluster_parameter_group( cluster_parameter_group_name, group_family, description, + self.region, + tags ) return self.get_response({ @@ -381,11 +382,12 @@ class RedshiftResponse(BaseResponse): def create_cluster_snapshot(self): cluster_identifier = self._get_param('ClusterIdentifier') snapshot_identifier = self._get_param('SnapshotIdentifier') - tags = self.unpack_complex_list_params( - 'Tags.Tag', ('Key', 'Value')) - snapshot = self.redshift_backend.create_snapshot(cluster_identifier, - snapshot_identifier, - tags) + tags = self.unpack_complex_list_params('Tags.Tag', ('Key', 'Value')) + + snapshot = self.redshift_backend.create_cluster_snapshot(cluster_identifier, + snapshot_identifier, + self.region, + tags) return self.get_response({ 'CreateClusterSnapshotResponse': { "CreateClusterSnapshotResult": { @@ -399,9 +401,9 @@ class RedshiftResponse(BaseResponse): def describe_cluster_snapshots(self): cluster_identifier = self._get_param('ClusterIdentifier') - snapshot_identifier = self._get_param('DBSnapshotIdentifier') - snapshots = self.redshift_backend.describe_snapshots(cluster_identifier, - snapshot_identifier) + snapshot_identifier = self._get_param('SnapshotIdentifier') + snapshots = self.redshift_backend.describe_cluster_snapshots(cluster_identifier, + snapshot_identifier) return self.get_response({ "DescribeClusterSnapshotsResponse": { "DescribeClusterSnapshotsResult": { @@ -415,7 +417,7 @@ class RedshiftResponse(BaseResponse): def delete_cluster_snapshot(self): snapshot_identifier = self._get_param('SnapshotIdentifier') - snapshot = self.redshift_backend.delete_snapshot(snapshot_identifier) + snapshot = self.redshift_backend.delete_cluster_snapshot(snapshot_identifier) return self.get_response({ "DeleteClusterSnapshotResponse": { @@ -428,13 +430,26 @@ class RedshiftResponse(BaseResponse): } }) + def create_tags(self): + resource_name = self._get_param('ResourceName') + tags = self.unpack_complex_list_params('Tags.Tag', ('Key', 'Value')) + + self.redshift_backend.create_tags(resource_name, tags) + + return self.get_response({ + "CreateTagsResponse": { + "ResponseMetadata": { + "RequestId": "384ac68d-3775-11df-8963-01868b7c937a", + } + } + }) + def describe_tags(self): + resource_name = self._get_param('ResourceName') resource_type = self._get_param('ResourceType') - if resource_type != 'Snapshot': - raise NotImplementedError( - "The describe_tags action has not been fully implemented.") - tagged_resources = \ - self.redshift_backend.describe_tags_for_resource_type(resource_type) + + tagged_resources = self.redshift_backend.describe_tags(resource_name, + resource_type) return self.get_response({ "DescribeTagsResponse": { "DescribeTagsResult": { @@ -445,3 +460,17 @@ class RedshiftResponse(BaseResponse): } } }) + + def delete_tags(self): + resource_name = self._get_param('ResourceName') + tag_keys = self.unpack_list_params('TagKeys.TagKey') + + self.redshift_backend.delete_tags(resource_name, tag_keys) + + return self.get_response({ + "DeleteTagsResponse": { + "ResponseMetadata": { + "RequestId": "384ac68d-3775-11df-8963-01868b7c937a", + } + } + }) diff --git a/tests/test_redshift/test_redshift.py b/tests/test_redshift/test_redshift.py index 1df503de2..dca475374 100644 --- a/tests/test_redshift/test_redshift.py +++ b/tests/test_redshift/test_redshift.py @@ -106,7 +106,7 @@ def test_create_single_node_cluster(): @mock_redshift_deprecated -def test_default_cluster_attibutes(): +def test_default_cluster_attributes(): conn = boto.redshift.connect_to_region("us-east-1") cluster_identifier = 'my_cluster' @@ -267,7 +267,7 @@ def test_create_cluster_with_parameter_group(): @mock_redshift_deprecated -def test_describe_non_existant_cluster(): +def test_describe_non_existent_cluster(): conn = boto.redshift.connect_to_region("us-east-1") conn.describe_clusters.when.called_with( "not-a-cluster").should.throw(ClusterNotFound) @@ -391,7 +391,7 @@ def test_create_invalid_cluster_subnet_group(): @mock_redshift_deprecated -def test_describe_non_existant_subnet_group(): +def test_describe_non_existent_subnet_group(): conn = boto.redshift.connect_to_region("us-east-1") conn.describe_cluster_subnet_groups.when.called_with( "not-a-subnet-group").should.throw(ClusterSubnetGroupNotFound) @@ -447,7 +447,7 @@ def test_create_cluster_security_group(): @mock_redshift_deprecated -def test_describe_non_existant_security_group(): +def test_describe_non_existent_security_group(): conn = boto.redshift.connect_to_region("us-east-1") conn.describe_cluster_security_groups.when.called_with( "not-a-security-group").should.throw(ClusterSecurityGroupNotFound) @@ -498,7 +498,7 @@ def test_create_cluster_parameter_group(): @mock_redshift_deprecated -def test_describe_non_existant_parameter_group(): +def test_describe_non_existent_parameter_group(): conn = boto.redshift.connect_to_region("us-east-1") conn.describe_cluster_parameter_groups.when.called_with( "not-a-parameter-group").should.throw(ClusterParameterGroupNotFound) @@ -530,6 +530,17 @@ def test_delete_cluster_parameter_group(): "not-a-parameter-group").should.throw(ClusterParameterGroupNotFound) + +@mock_redshift +def test_create_cluster_snapshot_of_non_existent_cluster(): + client = boto3.client('redshift', region_name='us-east-1') + cluster_identifier = 'non-existent-cluster-id' + client.create_cluster_snapshot.when.called_with( + SnapshotIdentifier='snapshot-id', + ClusterIdentifier=cluster_identifier, + ).should.throw(ClientError, 'Cluster {} not found.'.format(cluster_identifier)) + + @mock_redshift def test_create_cluster_snapshot(): client = boto3.client('redshift', region_name='us-east-1') @@ -560,6 +571,52 @@ def test_create_cluster_snapshot(): snapshot['MasterUsername'].should.equal('username') +@mock_redshift +def test_describe_cluster_snapshots(): + client = boto3.client('redshift', region_name='us-east-1') + cluster_identifier = 'my_cluster' + snapshot_identifier = 'my_snapshot' + + client.create_cluster( + DBName='test-db', + ClusterIdentifier=cluster_identifier, + ClusterType='single-node', + NodeType='ds2.xlarge', + MasterUsername='username', + MasterUserPassword='password', + ) + + client.create_cluster_snapshot( + SnapshotIdentifier=snapshot_identifier, + ClusterIdentifier=cluster_identifier, + ) + + resp_clust = client.describe_cluster_snapshots(ClusterIdentifier=cluster_identifier) + resp_snap = client.describe_cluster_snapshots(SnapshotIdentifier=snapshot_identifier) + resp_clust['Snapshots'][0].should.equal(resp_snap['Snapshots'][0]) + snapshot = resp_snap['Snapshots'][0] + snapshot['SnapshotIdentifier'].should.equal(snapshot_identifier) + snapshot['ClusterIdentifier'].should.equal(cluster_identifier) + snapshot['NumberOfNodes'].should.equal(1) + snapshot['NodeType'].should.equal('ds2.xlarge') + snapshot['MasterUsername'].should.equal('username') + + +@mock_redshift +def test_describe_cluster_snapshots_not_found_error(): + client = boto3.client('redshift', region_name='us-east-1') + cluster_identifier = 'my_cluster' + snapshot_identifier = 'my_snapshot' + + client.describe_cluster_snapshots.when.called_with( + ClusterIdentifier=cluster_identifier, + ).should.throw(ClientError, 'Cluster {} not found.'.format(cluster_identifier)) + + client.describe_cluster_snapshots.when.called_with( + SnapshotIdentifier=snapshot_identifier + ).should.throw(ClientError, 'Snapshot {} not found.'.format(snapshot_identifier)) + + @mock_redshift def test_delete_cluster_snapshot(): client = boto3.client('redshift', region_name='us-east-1') @@ -652,6 +709,15 @@ def test_create_cluster_from_snapshot(): new_cluster['Endpoint']['Port'].should.equal(1234) +@mock_redshift +def test_create_cluster_from_non_existent_snapshot(): + client = boto3.client('redshift', region_name='us-east-1') + client.restore_from_cluster_snapshot.when.called_with( + ClusterIdentifier='cluster-id', + SnapshotIdentifier='non-existent-snapshot', + ).should.throw(ClientError, 'Snapshot non-existent-snapshot not found.') + + @mock_redshift def test_create_cluster_status_update(): client = boto3.client('redshift', region_name='us-east-1') @@ -673,12 +739,126 @@ def test_create_cluster_status_update(): @mock_redshift -def test_describe_snapshot_tags(): +def test_describe_tags_with_resource_type(): client = boto3.client('redshift', region_name='us-east-1') cluster_identifier = 'my_cluster' + cluster_arn = 'arn:aws:redshift:us-east-1:123456789012:' \ + 'cluster:{}'.format(cluster_identifier) snapshot_identifier = 'my_snapshot' + snapshot_arn = 'arn:aws:redshift:us-east-1:123456789012:' \ + 'snapshot:{}/{}'.format(cluster_identifier, + snapshot_identifier) tag_key = 'test-tag-key' - tag_value = 'teat-tag-value' + tag_value = 'test-tag-value' + + client.create_cluster( + DBName='test-db', + ClusterIdentifier=cluster_identifier, + ClusterType='single-node', + NodeType='ds2.xlarge', + MasterUsername='username', + MasterUserPassword='password', + Tags=[{'Key': tag_key, + 'Value': tag_value}] + ) + tags_response = client.describe_tags(ResourceType='cluster') + tagged_resources = tags_response['TaggedResources'] + list(tagged_resources).should.have.length_of(1) + tagged_resources[0]['ResourceType'].should.equal('cluster') + tagged_resources[0]['ResourceName'].should.equal(cluster_arn) + tag = tagged_resources[0]['Tag'] + tag['Key'].should.equal(tag_key) + tag['Value'].should.equal(tag_value) + + client.create_cluster_snapshot( + SnapshotIdentifier=snapshot_identifier, + ClusterIdentifier=cluster_identifier, + Tags=[{'Key': tag_key, + 'Value': tag_value}] + ) + tags_response = client.describe_tags(ResourceType='snapshot') + tagged_resources = tags_response['TaggedResources'] + list(tagged_resources).should.have.length_of(1) + tagged_resources[0]['ResourceType'].should.equal('snapshot') + tagged_resources[0]['ResourceName'].should.equal(snapshot_arn) + tag = tagged_resources[0]['Tag'] + tag['Key'].should.equal(tag_key) + tag['Value'].should.equal(tag_value) + + +@mock_redshift +def test_describe_tags_cannot_specify_resource_type_and_resource_name(): + client = boto3.client('redshift', region_name='us-east-1') + resource_name = 'arn:aws:redshift:us-east-1:123456789012:cluster:cluster-id' + resource_type = 'cluster' + client.describe_tags.when.called_with( + ResourceName=resource_name, + ResourceType=resource_type + ).should.throw(ClientError, 'using either an ARN or a resource type') + + +@mock_redshift +def test_describe_tags_with_resource_name(): + client = boto3.client('redshift', region_name='us-east-1') + cluster_identifier = 'cluster-id' + cluster_arn = 'arn:aws:redshift:us-east-1:123456789012:' \ + 'cluster:{}'.format(cluster_identifier) + snapshot_identifier = 'snapshot-id' + snapshot_arn = 'arn:aws:redshift:us-east-1:123456789012:' \ + 'snapshot:{}/{}'.format(cluster_identifier, + snapshot_identifier) + tag_key = 'test-tag-key' + tag_value = 'test-tag-value' + + client.create_cluster( + DBName='test-db', + ClusterIdentifier=cluster_identifier, + ClusterType='single-node', + NodeType='ds2.xlarge', + MasterUsername='username', + MasterUserPassword='password', + Tags=[{'Key': tag_key, + 'Value': tag_value}] + ) + tags_response = client.describe_tags(ResourceName=cluster_arn) + tagged_resources = tags_response['TaggedResources'] + list(tagged_resources).should.have.length_of(1) + tagged_resources[0]['ResourceType'].should.equal('cluster') + tagged_resources[0]['ResourceName'].should.equal(cluster_arn) + tag = tagged_resources[0]['Tag'] + tag['Key'].should.equal(tag_key) + tag['Value'].should.equal(tag_value) + + client.create_cluster_snapshot( + SnapshotIdentifier=snapshot_identifier, + ClusterIdentifier=cluster_identifier, + Tags=[{'Key': tag_key, + 'Value': tag_value}] + ) + tags_response = client.describe_tags(ResourceName=snapshot_arn) + tagged_resources = tags_response['TaggedResources'] + list(tagged_resources).should.have.length_of(1) + tagged_resources[0]['ResourceType'].should.equal('snapshot') + tagged_resources[0]['ResourceName'].should.equal(snapshot_arn) + tag = tagged_resources[0]['Tag'] + tag['Key'].should.equal(tag_key) + tag['Value'].should.equal(tag_value) + + +@mock_redshift +def test_create_tags(): + client = boto3.client('redshift', region_name='us-east-1') + cluster_identifier = 'cluster-id' + cluster_arn = 'arn:aws:redshift:us-east-1:123456789012:' \ + 'cluster:{}'.format(cluster_identifier) + tag_key = 'test-tag-key' + tag_value = 'test-tag-value' + num_tags = 5 + tags = [] + for i in range(0, num_tags): + tag = {'Key': '{}-{}'.format(tag_key, i), + 'Value': '{}-{}'.format(tag_value, i)} + tags.append(tag) client.create_cluster( DBName='test-db', @@ -688,17 +868,125 @@ def test_describe_snapshot_tags(): MasterUsername='username', MasterUserPassword='password', ) - - client.create_cluster_snapshot( - SnapshotIdentifier=snapshot_identifier, - ClusterIdentifier=cluster_identifier, - Tags=[{'Key': tag_key, - 'Value': tag_value}] + client.create_tags( + ResourceName=cluster_arn, + Tags=tags ) + response = client.describe_clusters(ClusterIdentifier=cluster_identifier) + cluster = response['Clusters'][0] + list(cluster['Tags']).should.have.length_of(num_tags) + response = client.describe_tags(ResourceName=cluster_arn) + list(response['TaggedResources']).should.have.length_of(num_tags) + + +@mock_redshift +def test_delete_tags(): + client = boto3.client('redshift', region_name='us-east-1') + cluster_identifier = 'cluster-id' + cluster_arn = 'arn:aws:redshift:us-east-1:123456789012:' \ + 'cluster:{}'.format(cluster_identifier) + tag_key = 'test-tag-key' + tag_value = 'test-tag-value' + tags = [] + for i in range(1, 2): + tag = {'Key': '{}-{}'.format(tag_key, i), + 'Value': '{}-{}'.format(tag_value, i)} + tags.append(tag) + + client.create_cluster( + DBName='test-db', + ClusterIdentifier=cluster_identifier, + ClusterType='single-node', + NodeType='ds2.xlarge', + MasterUsername='username', + MasterUserPassword='password', + Tags=tags + ) + client.delete_tags( + ResourceName=cluster_arn, + TagKeys=[tag['Key'] for tag in tags + if tag['Key'] != '{}-1'.format(tag_key)] + ) + response = client.describe_clusters(ClusterIdentifier=cluster_identifier) + cluster = response['Clusters'][0] + list(cluster['Tags']).should.have.length_of(1) + response = client.describe_tags(ResourceName=cluster_arn) + list(response['TaggedResources']).should.have.length_of(1) + + +@mock_ec2 +@mock_redshift +def test_describe_tags_all_resource_types(): + ec2 = boto3.resource('ec2', region_name='us-east-1') + vpc = ec2.create_vpc(CidrBlock='10.0.0.0/16') + subnet = ec2.create_subnet(VpcId=vpc.id, CidrBlock='10.0.0.0/24') + client = boto3.client('redshift', region_name='us-east-1') + response = client.describe_tags() + list(response['TaggedResources']).should.have.length_of(0) + client.create_cluster_subnet_group( + ClusterSubnetGroupName='my_subnet_group', + Description='This is my subnet group', + SubnetIds=[subnet.id], + Tags=[{'Key': 'tag_key', + 'Value': 'tag_value'}] + ) + client.create_cluster_security_group( + ClusterSecurityGroupName="security_group1", + Description="This is my security group", + Tags=[{'Key': 'tag_key', + 'Value': 'tag_value'}] + ) + client.create_cluster( + DBName='test', + ClusterIdentifier='my_cluster', + ClusterType='single-node', + NodeType='ds2.xlarge', + MasterUsername='user', + MasterUserPassword='password', + Tags=[{'Key': 'tag_key', + 'Value': 'tag_value'}] + ) + client.create_cluster_snapshot( + SnapshotIdentifier='my_snapshot', + ClusterIdentifier='my_cluster', + Tags=[{'Key': 'tag_key', + 'Value': 'tag_value'}] + ) + client.create_cluster_parameter_group( + ParameterGroupName="my_parameter_group", + ParameterGroupFamily="redshift-1.0", + Description="This is my parameter group", + Tags=[{'Key': 'tag_key', + 'Value': 'tag_value'}] + ) + response = client.describe_tags() + expected_types = ['cluster', 'parametergroup', 'securitygroup', 'snapshot', 'subnetgroup'] + tagged_resources = response['TaggedResources'] + returned_types = [resource['ResourceType'] for resource in tagged_resources] + list(tagged_resources).should.have.length_of(len(expected_types)) + set(returned_types).should.equal(set(expected_types)) + + +@mock_redshift +def test_tagged_resource_not_found_error(): + client = boto3.client('redshift', region_name='us-east-1') + + cluster_arn = 'arn:aws:redshift:us-east-1::cluster:fake' + client.describe_tags.when.called_with( + ResourceName=cluster_arn + ).should.throw(ClientError, 'cluster (fake) not found.') + + snapshot_arn = 'arn:aws:redshift:us-east-1::snapshot:cluster-id/snap-id' + client.delete_tags.when.called_with( + ResourceName=snapshot_arn, + TagKeys=['test'] + ).should.throw(ClientError, 'snapshot (snap-id) not found.') + + client.describe_tags.when.called_with( + ResourceType='cluster' + ).should.throw(ClientError, "resource of type 'cluster' not found.") + + client.describe_tags.when.called_with( + ResourceName='bad:arn' + ).should.throw(ClientError, "Tagging is not supported for this type of resource") - tags_response = client.describe_tags(ResourceType='Snapshot') - tagged_resources = tags_response['TaggedResources'] - list(tagged_resources).should.have.length_of(1) - tag = tagged_resources[0]['Tag'] - tag['Key'].should.equal(tag_key) - tag['Value'].should.equal(tag_value) From f95d72c37c69bf1a4b042d36bd623973d7970218 Mon Sep 17 00:00:00 2001 From: Terry Cain Date: Fri, 29 Sep 2017 23:29:36 +0100 Subject: [PATCH 326/412] Finialised create compute environment + describe environments --- moto/batch/models.py | 160 +++++++++++++++++++++++++++++---- moto/batch/responses.py | 19 +++- moto/batch/urls.py | 1 + tests/test_batch/test_batch.py | 84 +++++++++++++++-- 4 files changed, 237 insertions(+), 27 deletions(-) diff --git a/moto/batch/models.py b/moto/batch/models.py index c7def48d1..7ed75e749 100644 --- a/moto/batch/models.py +++ b/moto/batch/models.py @@ -1,13 +1,18 @@ from __future__ import unicode_literals import boto3 import re +from itertools import cycle +import six +import uuid from moto.core import BaseBackend, BaseModel from moto.iam import iam_backends from moto.ec2 import ec2_backends +from moto.ecs import ecs_backends from .exceptions import InvalidParameterValueException, InternalFailure from .utils import make_arn_for_compute_env from moto.ec2.exceptions import InvalidSubnetIdError +from moto.ec2.models import INSTANCE_TYPES as EC2_INSTANCE_TYPES from moto.iam.exceptions import IAMNotFoundException @@ -17,13 +22,22 @@ COMPUTE_ENVIRONMENT_NAME_REGEX = re.compile(r'^[A-Za-z0-9_]{1,128}$') class ComputeEnvironment(BaseModel): def __init__(self, compute_environment_name, _type, state, compute_resources, service_role, region_name): - self.compute_environment_name = compute_environment_name + self.name = compute_environment_name self.type = _type self.state = state self.compute_resources = compute_resources self.service_role = service_role self.arn = make_arn_for_compute_env(DEFAULT_ACCOUNT_ID, compute_environment_name, region_name) + self.instances = [] + self.ecs_arn = None + + def add_instance(self, instance): + self.instances.append(instance) + + def set_ecs_arn(self, arn): + self.ecs_arn = arn + class BatchBackend(BaseBackend): def __init__(self, region_name=None): @@ -48,6 +62,14 @@ class BatchBackend(BaseBackend): """ return ec2_backends[self.region_name] + @property + def ecs_backend(self): + """ + :return: ECS Backend + :rtype: moto.ecs.models.EC2ContainerServiceBackend + """ + return ecs_backends[self.region_name] + def reset(self): region_name = self.region_name self.__dict__ = {} @@ -62,6 +84,33 @@ class BatchBackend(BaseBackend): return comp_env return None + def describe_compute_environments(self, environments=None, max_results=None, next_token=None): + envs = set() + if environments is not None: + envs = set(environments) + + result = [] + for arn, environment in self._compute_environments.items(): + # Filter shortcut + if len(envs) > 0 and arn not in envs and environment.name not in envs: + continue + + json_part = { + 'computeEnvironmentArn': arn, + 'computeEnvironmentName': environment.name, + 'ecsClusterArn': environment.ecs_arn, + 'serviceRole': environment.service_role, + 'state': environment.state, + 'type': environment.type, + 'status': 'VALID' + } + if environment.type == 'MANAGED': + json_part['computeResources'] = environment.compute_resources + + result.append(json_part) + + return result + def create_compute_environment(self, compute_environment_name, _type, state, compute_resources, service_role): # Validate if COMPUTE_ENVIRONMENT_NAME_REGEX.match(compute_environment_name) is None: @@ -95,21 +144,53 @@ class BatchBackend(BaseBackend): ) self._compute_environments[new_comp_env.arn] = new_comp_env - # TODO scale out if MANAGED and we have compute instance types + # Ok by this point, everything is legit, so if its Managed then start some instances + if _type == 'MANAGED': + cpus = int(compute_resources.get('desiredvCpus', compute_resources['minvCpus'])) + instance_types = compute_resources['instanceTypes'] + needed_instance_types = self.find_min_instances_to_meet_vcpus(instance_types, cpus) + # Create instances + + # Will loop over and over so we get decent subnet coverage + subnet_cycle = cycle(compute_resources['subnets']) + + for instance_type in needed_instance_types: + reservation = self.ec2_backend.add_instances( + image_id='ami-ecs-optimised', # Todo import AMIs + count=1, + user_data=None, + security_group_names=[], + instance_type=instance_type, + region_name=self.region_name, + subnet_id=six.next(subnet_cycle), + key_name=compute_resources.get('ec2KeyPair', 'AWS_OWNED'), + security_group_ids=compute_resources['securityGroupIds'] + ) + + new_comp_env.add_instance(reservation.instances[0]) + + # Create ECS cluster + # Should be of format P2OnDemand_Batch_UUID + cluster_name = 'OnDemand_Batch_' + str(uuid.uuid4()) + ecs_cluster = self.ecs_backend.create_cluster(cluster_name) + new_comp_env.set_ecs_arn(ecs_cluster.arn) return compute_environment_name, new_comp_env.arn def _validate_compute_resources(self, cr): - if 'instanceRole' not in cr: - raise InvalidParameterValueException('computeResources must contain instanceRole') - elif self.iam_backend.get_role_by_arn(cr['instanceRole']) is None: + """ + Checks contents of sub dictionary for managed clusters + + :param cr: computeResources + :type cr: dict + """ + for param in ('instanceRole', 'maxvCpus', 'minvCpus', 'instanceTypes', 'securityGroupIds', 'subnets', 'type'): + if param not in cr: + raise InvalidParameterValueException('computeResources must contain {0}'.format(param)) + + if self.iam_backend.get_role_by_arn(cr['instanceRole']) is None: raise InvalidParameterValueException('could not find instanceRole {0}'.format(cr['instanceRole'])) - # TODO move the not in checks to a loop, or create a json schema validator class - if 'maxvCpus' not in cr: - raise InvalidParameterValueException('computeResources must contain maxVCpus') - if 'minvCpus' not in cr: - raise InvalidParameterValueException('computeResources must contain minVCpus') if cr['maxvCpus'] < 0: raise InvalidParameterValueException('maxVCpus must be positive') if cr['minvCpus'] < 0: @@ -117,22 +198,18 @@ class BatchBackend(BaseBackend): if cr['maxvCpus'] < cr['minvCpus']: raise InvalidParameterValueException('maxVCpus must be greater than minvCpus') - # TODO check instance types when that logic exists - if 'instanceTypes' not in cr: - raise InvalidParameterValueException('computeResources must contain instanceTypes') if len(cr['instanceTypes']) == 0: raise InvalidParameterValueException('At least 1 instance type must be provided') + for instance_type in cr['instanceTypes']: + if instance_type not in EC2_INSTANCE_TYPES: + raise InvalidParameterValueException('Instance type {0} does not exist'.format(instance_type)) - if 'securityGroupIds' not in cr: - raise InvalidParameterValueException('computeResources must contain securityGroupIds') for sec_id in cr['securityGroupIds']: if self.ec2_backend.get_security_group_from_id(sec_id) is None: raise InvalidParameterValueException('security group {0} does not exist'.format(sec_id)) if len(cr['securityGroupIds']) == 0: raise InvalidParameterValueException('At least 1 security group must be provided') - if 'subnets' not in cr: - raise InvalidParameterValueException('computeResources must contain subnets') for subnet_id in cr['subnets']: try: self.ec2_backend.get_subnet(subnet_id) @@ -141,14 +218,59 @@ class BatchBackend(BaseBackend): if len(cr['subnets']) == 0: raise InvalidParameterValueException('At least 1 subnet must be provided') - if 'type' not in cr: - raise InvalidParameterValueException('computeResources must contain type') if cr['type'] not in ('EC2', 'SPOT'): raise InvalidParameterValueException('computeResources.type must be either EC2 | SPOT') if cr['type'] == 'SPOT': raise InternalFailure('SPOT NOT SUPPORTED YET') + @staticmethod + def find_min_instances_to_meet_vcpus(instance_types, target): + """ + Finds the minimum needed instances to meed a vcpu target + + :param instance_types: Instance types, like ['t2.medium', 't2.small'] + :type instance_types: list of str + :param target: VCPU target + :type target: float + :return: List of instance types + :rtype: list of str + """ + # vcpus = [ (vcpus, instance_type), (vcpus, instance_type), ... ] + instance_vcpus = [] + instances = [] + + for instance_type in instance_types: + instance_vcpus.append( + (EC2_INSTANCE_TYPES[instance_type]['vcpus'], instance_type) + ) + + instance_vcpus = sorted(instance_vcpus, key=lambda item: item[0], reverse=True) + # Loop through, + # if biggest instance type smaller than target, and len(instance_types)> 1, then use biggest type + # if biggest instance type bigger than target, and len(instance_types)> 1, then remove it and move on + + # if biggest instance type bigger than target and len(instan_types) == 1 then add instance and finish + # if biggest instance type smaller than target and len(instan_types) == 1 then loop adding instances until target == 0 + # ^^ boils down to keep adding last till target vcpus is negative + # #Algorithm ;-) ... Could probably be done better with some quality lambdas + while target > 0: + current_vcpu, current_instance = instance_vcpus[0] + + if len(instance_vcpus) > 1: + if current_vcpu <= target: + target -= current_vcpu + instances.append(current_instance) + else: + # try next biggest instance + instance_vcpus.pop(0) + else: + # Were on the last instance + target -= current_vcpu + instances.append(current_instance) + + return instances + available_regions = boto3.session.Session().get_available_regions("batch") batch_backends = {region: BatchBackend(region_name=region) for region in available_regions} diff --git a/moto/batch/responses.py b/moto/batch/responses.py index 0368906f0..80aedcf70 100644 --- a/moto/batch/responses.py +++ b/moto/batch/responses.py @@ -14,11 +14,17 @@ class BatchResponse(BaseResponse): @property def batch_backend(self): + """ + :return: Batch Backend + :rtype: moto.batch.models.BatchBackend + """ return batch_backends[self.region] @property def json(self): - if not hasattr(self, '_json'): + if self.body is None: + self._json = {} + elif not hasattr(self, '_json'): self._json = json.loads(self.body) return self._json @@ -56,3 +62,14 @@ class BatchResponse(BaseResponse): } return json.dumps(result) + + # DescribeComputeEnvironments + def describecomputeenvironments(self): + compute_environments = self._get_param('computeEnvironments') + max_results = self._get_param('maxResults') # Ignored, should be int + next_token = self._get_param('nextToken') # Ignored + + envs = self.batch_backend.describe_compute_environments(compute_environments, max_results=max_results, next_token=next_token) + + result = {'computeEnvironments': envs} + return json.dumps(result) diff --git a/moto/batch/urls.py b/moto/batch/urls.py index 93f8a2f23..9ad3db06f 100644 --- a/moto/batch/urls.py +++ b/moto/batch/urls.py @@ -7,4 +7,5 @@ url_bases = [ url_paths = { '{0}/v1/createcomputeenvironment': BatchResponse.dispatch, + '{0}/v1/describecomputeenvironments': BatchResponse.dispatch, } diff --git a/tests/test_batch/test_batch.py b/tests/test_batch/test_batch.py index 3aae48e1e..aceb95804 100644 --- a/tests/test_batch/test_batch.py +++ b/tests/test_batch/test_batch.py @@ -2,7 +2,7 @@ from __future__ import unicode_literals import boto3 import sure # noqa -from moto import mock_batch, mock_iam, mock_ec2 +from moto import mock_batch, mock_iam, mock_ec2, mock_ecs DEFAULT_REGION = 'eu-central-1' @@ -11,6 +11,7 @@ DEFAULT_REGION = 'eu-central-1' def _get_clients(): return boto3.client('ec2', region_name=DEFAULT_REGION), \ boto3.client('iam', region_name=DEFAULT_REGION), \ + boto3.client('ecs', region_name=DEFAULT_REGION), \ boto3.client('batch', region_name=DEFAULT_REGION) @@ -46,10 +47,11 @@ def _setup(ec2_client, iam_client): # Yes, yes it talks to all the things @mock_ec2 +@mock_ecs @mock_iam @mock_batch -def test_create_compute_environment(): - ec2_client, iam_client, batch_client = _get_clients() +def test_create_managed_compute_environment(): + ec2_client, iam_client, ecs_client, batch_client = _get_clients() vpc_id, subnet_id, sg_id, iam_arn = _setup(ec2_client, iam_client) compute_name = 'test_compute_env' @@ -59,11 +61,12 @@ def test_create_compute_environment(): state='ENABLED', computeResources={ 'type': 'EC2', - 'minvCpus': 123, - 'maxvCpus': 123, - 'desiredvCpus': 123, + 'minvCpus': 5, + 'maxvCpus': 10, + 'desiredvCpus': 5, 'instanceTypes': [ - 'some_instance_type', + 't2.small', + 't2.medium' ], 'imageId': 'some_image_id', 'subnets': [ @@ -85,4 +88,71 @@ def test_create_compute_environment(): resp.should.contain('computeEnvironmentArn') resp['computeEnvironmentName'].should.equal(compute_name) + # Given a t2.medium is 2 vcpu and t2.small is 1, therefore 2 mediums and 1 small should be created + resp = ec2_client.describe_instances() + resp.should.contain('Reservations') + len(resp['Reservations']).should.equal(3) + + # Should have created 1 ECS cluster + resp = ecs_client.list_clusters() + resp.should.contain('clusterArns') + len(resp['clusterArns']).should.equal(1) + + +@mock_ec2 +@mock_ecs +@mock_iam +@mock_batch +def test_create_unmanaged_compute_environment(): + ec2_client, iam_client, ecs_client, batch_client = _get_clients() + vpc_id, subnet_id, sg_id, iam_arn = _setup(ec2_client, iam_client) + + compute_name = 'test_compute_env' + resp = batch_client.create_compute_environment( + computeEnvironmentName=compute_name, + type='UNMANAGED', + state='ENABLED', + serviceRole=iam_arn + ) + resp.should.contain('computeEnvironmentArn') + resp['computeEnvironmentName'].should.equal(compute_name) + + # Its unmanaged so no instances should be created + resp = ec2_client.describe_instances() + resp.should.contain('Reservations') + len(resp['Reservations']).should.equal(0) + + # Should have created 1 ECS cluster + resp = ecs_client.list_clusters() + resp.should.contain('clusterArns') + len(resp['clusterArns']).should.equal(1) + # TODO create 1000s of tests to test complex option combinations of create environment + + +@mock_ec2 +@mock_ecs +@mock_iam +@mock_batch +def test_describe_compute_environment(): + ec2_client, iam_client, ecs_client, batch_client = _get_clients() + vpc_id, subnet_id, sg_id, iam_arn = _setup(ec2_client, iam_client) + + compute_name = 'test_compute_env' + batch_client.create_compute_environment( + computeEnvironmentName=compute_name, + type='UNMANAGED', + state='ENABLED', + serviceRole=iam_arn + ) + + resp = batch_client.describe_compute_environments() + len(resp['computeEnvironments']).should.equal(1) + resp['computeEnvironments'][0]['computeEnvironmentName'].should.equal(compute_name) + + # Test filtering + resp = batch_client.describe_compute_environments( + computeEnvironments=['test1'] + ) + len(resp['computeEnvironments']).should.equal(0) + From 9af88bf20636fe9009060b5dd0cec42f6c8736d6 Mon Sep 17 00:00:00 2001 From: Terry Cain Date: Fri, 29 Sep 2017 23:43:03 +0100 Subject: [PATCH 327/412] Fixed batch errors --- moto/batch/responses.py | 7 +++++-- moto/batch/urls.py | 4 ++-- 2 files changed, 7 insertions(+), 4 deletions(-) diff --git a/moto/batch/responses.py b/moto/batch/responses.py index 80aedcf70..590cc27a4 100644 --- a/moto/batch/responses.py +++ b/moto/batch/responses.py @@ -22,10 +22,13 @@ class BatchResponse(BaseResponse): @property def json(self): - if self.body is None: + if self.body is None or self.body == '': self._json = {} elif not hasattr(self, '_json'): - self._json = json.loads(self.body) + try: + self._json = json.loads(self.body) + except json.JSONDecodeError: + print() return self._json def _get_param(self, param_name, if_none=None): diff --git a/moto/batch/urls.py b/moto/batch/urls.py index 9ad3db06f..18de99199 100644 --- a/moto/batch/urls.py +++ b/moto/batch/urls.py @@ -6,6 +6,6 @@ url_bases = [ ] url_paths = { - '{0}/v1/createcomputeenvironment': BatchResponse.dispatch, - '{0}/v1/describecomputeenvironments': BatchResponse.dispatch, + '{0}/v1/createcomputeenvironment$': BatchResponse.dispatch, + '{0}/v1/describecomputeenvironments$': BatchResponse.dispatch, } From ab595279ad67254a50da6713c30f02ba1876215c Mon Sep 17 00:00:00 2001 From: Brian Pandola Date: Sat, 30 Sep 2017 21:51:05 -0700 Subject: [PATCH 328/412] Fix boto/boto3 multi-param discrepancies boto uses the param.member.N syntax, but boto3 replaces the generic .member with a more specific identifier. Example: boto: ClusterSecurityGroups.member.N boto3: ClusterSecurityGroups.ClusterSecurityGroupName.N This commit addresses this issue for the ClusterSecurityGroups, SubnetIds, and VpcSecurityGroupIds parameters. --- moto/redshift/responses.py | 44 +++++++++++++---------- tests/test_redshift/test_redshift.py | 52 ++++++++++++++++++++++++++++ 2 files changed, 77 insertions(+), 19 deletions(-) diff --git a/moto/redshift/responses.py b/moto/redshift/responses.py index 0dbf35cb2..52ca908e8 100644 --- a/moto/redshift/responses.py +++ b/moto/redshift/responses.py @@ -66,6 +66,24 @@ class RedshiftResponse(BaseResponse): count += 1 return unpacked_list + def _get_cluster_security_groups(self): + cluster_security_groups = self._get_multi_param('ClusterSecurityGroups.member') + if not cluster_security_groups: + cluster_security_groups = self._get_multi_param('ClusterSecurityGroups.ClusterSecurityGroupName') + return cluster_security_groups + + def _get_vpc_security_group_ids(self): + vpc_security_group_ids = self._get_multi_param('VpcSecurityGroupIds.member') + if not vpc_security_group_ids: + vpc_security_group_ids = self._get_multi_param('VpcSecurityGroupIds.VpcSecurityGroupId') + return vpc_security_group_ids + + def _get_subnet_ids(self): + subnet_ids = self._get_multi_param('SubnetIds.member') + if not subnet_ids: + subnet_ids = self._get_multi_param('SubnetIds.SubnetIdentifier') + return subnet_ids + def create_cluster(self): cluster_kwargs = { "cluster_identifier": self._get_param('ClusterIdentifier'), @@ -74,8 +92,8 @@ class RedshiftResponse(BaseResponse): "master_user_password": self._get_param('MasterUserPassword'), "db_name": self._get_param('DBName'), "cluster_type": self._get_param('ClusterType'), - "cluster_security_groups": self._get_multi_param('ClusterSecurityGroups.member'), - "vpc_security_group_ids": self._get_multi_param('VpcSecurityGroupIds.member'), + "cluster_security_groups": self._get_cluster_security_groups(), + "vpc_security_group_ids": self._get_vpc_security_group_ids(), "cluster_subnet_group_name": self._get_param('ClusterSubnetGroupName'), "availability_zone": self._get_param('AvailabilityZone'), "preferred_maintenance_window": self._get_param('PreferredMaintenanceWindow'), @@ -116,10 +134,8 @@ class RedshiftResponse(BaseResponse): "publicly_accessible": self._get_param("PubliclyAccessible"), "cluster_parameter_group_name": self._get_param( 'ClusterParameterGroupName'), - "cluster_security_groups": self._get_multi_param( - 'ClusterSecurityGroups.member'), - "vpc_security_group_ids": self._get_multi_param( - 'VpcSecurityGroupIds.member'), + "cluster_security_groups": self._get_cluster_security_groups(), + "vpc_security_group_ids": self._get_vpc_security_group_ids(), "preferred_maintenance_window": self._get_param( 'PreferredMaintenanceWindow'), "automated_snapshot_retention_period": self._get_int_param( @@ -161,8 +177,8 @@ class RedshiftResponse(BaseResponse): "node_type": self._get_param('NodeType'), "master_user_password": self._get_param('MasterUserPassword'), "cluster_type": self._get_param('ClusterType'), - "cluster_security_groups": self._get_multi_param('ClusterSecurityGroups.member'), - "vpc_security_group_ids": self._get_multi_param('VpcSecurityGroupIds.member'), + "cluster_security_groups": self._get_cluster_security_groups(), + "vpc_security_group_ids": self._get_vpc_security_group_ids(), "cluster_subnet_group_name": self._get_param('ClusterSubnetGroupName'), "preferred_maintenance_window": self._get_param('PreferredMaintenanceWindow'), "cluster_parameter_group_name": self._get_param('ClusterParameterGroupName'), @@ -173,12 +189,6 @@ class RedshiftResponse(BaseResponse): "publicly_accessible": self._get_param("PubliclyAccessible"), "encrypted": self._get_param("Encrypted"), } - # There's a bug in boto3 where the security group ids are not passed - # according to the AWS documentation - if not request_kwargs['vpc_security_group_ids']: - request_kwargs['vpc_security_group_ids'] = self._get_multi_param( - 'VpcSecurityGroupIds.VpcSecurityGroupId') - cluster_kwargs = {} # We only want parameters that were actually passed in, otherwise # we'll stomp all over our cluster metadata with None values. @@ -217,11 +227,7 @@ class RedshiftResponse(BaseResponse): def create_cluster_subnet_group(self): cluster_subnet_group_name = self._get_param('ClusterSubnetGroupName') description = self._get_param('Description') - subnet_ids = self._get_multi_param('SubnetIds.member') - # There's a bug in boto3 where the subnet ids are not passed - # according to the AWS documentation - if not subnet_ids: - subnet_ids = self._get_multi_param('SubnetIds.SubnetIdentifier') + subnet_ids = self._get_subnet_ids() tags = self.unpack_complex_list_params('Tags.Tag', ('Key', 'Value')) subnet_group = self.redshift_backend.create_cluster_subnet_group( diff --git a/tests/test_redshift/test_redshift.py b/tests/test_redshift/test_redshift.py index dca475374..cebaa3ec7 100644 --- a/tests/test_redshift/test_redshift.py +++ b/tests/test_redshift/test_redshift.py @@ -216,6 +216,33 @@ def test_create_cluster_with_security_group(): set(group_names).should.equal(set(["security_group1", "security_group2"])) +@mock_redshift +def test_create_cluster_with_security_group_boto3(): + client = boto3.client('redshift', region_name='us-east-1') + client.create_cluster_security_group( + ClusterSecurityGroupName="security_group1", + Description="This is my security group", + ) + client.create_cluster_security_group( + ClusterSecurityGroupName="security_group2", + Description="This is my security group", + ) + + cluster_identifier = 'my_cluster' + client.create_cluster( + ClusterIdentifier=cluster_identifier, + NodeType="dw.hs1.xlarge", + MasterUsername="username", + MasterUserPassword="password", + ClusterSecurityGroups=["security_group1", "security_group2"] + ) + response = client.describe_clusters(ClusterIdentifier=cluster_identifier) + cluster = response['Clusters'][0] + group_names = [group['ClusterSecurityGroupName'] + for group in cluster['ClusterSecurityGroups']] + set(group_names).should.equal({"security_group1", "security_group2"}) + + @mock_redshift_deprecated @mock_ec2_deprecated def test_create_cluster_with_vpc_security_groups(): @@ -242,6 +269,31 @@ def test_create_cluster_with_vpc_security_groups(): list(group_ids).should.equal([security_group.id]) +@mock_redshift +@mock_ec2 +def test_create_cluster_with_vpc_security_groups_boto3(): + ec2 = boto3.resource('ec2', region_name='us-east-1') + vpc = ec2.create_vpc(CidrBlock='10.0.0.0/16') + client = boto3.client('redshift', region_name='us-east-1') + cluster_id = 'my_cluster' + security_group = ec2.create_security_group( + Description="vpc_security_group", + GroupName="a group", + VpcId=vpc.id) + client.create_cluster( + ClusterIdentifier=cluster_id, + NodeType="dw.hs1.xlarge", + MasterUsername="username", + MasterUserPassword="password", + VpcSecurityGroupIds=[security_group.id], + ) + response = client.describe_clusters(ClusterIdentifier=cluster_id) + cluster = response['Clusters'][0] + group_ids = [group['VpcSecurityGroupId'] + for group in cluster['VpcSecurityGroups']] + list(group_ids).should.equal([security_group.id]) + + @mock_redshift_deprecated def test_create_cluster_with_parameter_group(): conn = boto.connect_redshift() From 37ae61871c06412157b5b178546e7e63d34bd914 Mon Sep 17 00:00:00 2001 From: Jack Danger Date: Sun, 1 Oct 2017 15:01:33 -0700 Subject: [PATCH 329/412] add model methods for iam attached group policies --- moto/iam/models.py | 68 +++++++++++++++++++++++++--------------------- 1 file changed, 37 insertions(+), 31 deletions(-) diff --git a/moto/iam/models.py b/moto/iam/models.py index a7e584284..0f0f4c058 100644 --- a/moto/iam/models.py +++ b/moto/iam/models.py @@ -82,6 +82,10 @@ class ManagedPolicy(Policy): self.attachment_count -= 1 del role.managed_policies[self.name] + def attach_to_group(self, group): + self.attachment_count += 1 + group.managed_policies[self.name] = self + def attach_to_user(self, user): self.attachment_count += 1 user.managed_policies[self.name] = self @@ -249,6 +253,7 @@ class Group(BaseModel): ) self.users = [] + self.managed_policies = {} self.policies = {} def get_cfn_attribute(self, attribute_name): @@ -433,14 +438,36 @@ class IAMBackend(BaseBackend): except KeyError: raise IAMNotFoundException("Policy {0} was not found.".format(policy_arn)) + def attach_group_policy(self, policy_arn, group_name): + arns = dict((p.arn, p) for p in self.managed_policies.values()) + try: + policy = arns[policy_arn] + except KeyError: + raise IAMNotFoundException("Policy {0} was not found.".format(policy_arn)) + policy.attach_to_group(self.get_group(group_name)) + + def detach_group_policy(self, policy_arn, group_name): + arns = dict((p.arn, p) for p in self.managed_policies.values()) + try: + policy = arns[policy_arn] + except KeyError: + raise IAMNotFoundException("Policy {0} was not found.".format(policy_arn)) + policy.detach_from_group(self.get_group(group_name)) + def attach_user_policy(self, policy_arn, user_name): arns = dict((p.arn, p) for p in self.managed_policies.values()) - policy = arns[policy_arn] + try: + policy = arns[policy_arn] + except KeyError: + raise IAMNotFoundException("Policy {0} was not found.".format(policy_arn)) policy.attach_to_user(self.get_user(user_name)) def detach_user_policy(self, policy_arn, user_name): arns = dict((p.arn, p) for p in self.managed_policies.values()) - policy = arns[policy_arn] + try: + policy = arns[policy_arn] + except KeyError: + raise IAMNotFoundException("Policy {0} was not found.".format(policy_arn)) policy.detach_from_user(self.get_user(user_name)) def create_policy(self, description, path, policy_document, policy_name): @@ -458,39 +485,15 @@ class IAMBackend(BaseBackend): def list_attached_role_policies(self, role_name, marker=None, max_items=100, path_prefix='/'): policies = self.get_role(role_name).managed_policies.values() + return self._filter_attached_policies(policies, marker, max_items, path_prefix) - if path_prefix: - policies = [p for p in policies if p.path.startswith(path_prefix)] - - policies = sorted(policies, key=lambda policy: policy.name) - start_idx = int(marker) if marker else 0 - - policies = policies[start_idx:start_idx + max_items] - - if len(policies) < max_items: - marker = None - else: - marker = str(start_idx + max_items) - - return policies, marker + def list_attached_group_policies(self, group_name, marker=None, max_items=100, path_prefix='/'): + policies = self.get_group(group_name).managed_policies.values() + return self._filter_attached_policies(policies, marker, max_items, path_prefix) def list_attached_user_policies(self, user_name, marker=None, max_items=100, path_prefix='/'): policies = self.get_user(user_name).managed_policies.values() - - if path_prefix: - policies = [p for p in policies if p.path.startswith(path_prefix)] - - policies = sorted(policies, key=lambda policy: policy.name) - start_idx = int(marker) if marker else 0 - - policies = policies[start_idx:start_idx + max_items] - - if len(policies) < max_items: - marker = None - else: - marker = str(start_idx + max_items) - - return policies, marker + return self._filter_attached_policies(policies, marker, max_items, path_prefix) def list_policies(self, marker, max_items, only_attached, path_prefix, scope): policies = self.managed_policies.values() @@ -504,6 +507,9 @@ class IAMBackend(BaseBackend): policies = [p for p in policies if not isinstance( p, AWSManagedPolicy)] + return self._filter_attached_policies(policies, marker, max_items, path_prefix) + + def _filter_attached_policies(self, policies, marker, max_items, path_prefix): if path_prefix: policies = [p for p in policies if p.path.startswith(path_prefix)] From 3788e42f3518ab96ae00945a3e26a9379804fa79 Mon Sep 17 00:00:00 2001 From: Jack Danger Date: Sun, 1 Oct 2017 15:01:52 -0700 Subject: [PATCH 330/412] implement handlers for iam attached group policies --- moto/iam/responses.py | 59 +++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 59 insertions(+) diff --git a/moto/iam/responses.py b/moto/iam/responses.py index 13688869e..6ca49b830 100644 --- a/moto/iam/responses.py +++ b/moto/iam/responses.py @@ -20,6 +20,20 @@ class IamResponse(BaseResponse): template = self.response_template(GENERIC_EMPTY_TEMPLATE) return template.render(name="DetachRolePolicyResponse") + def attach_group_policy(self): + policy_arn = self._get_param('PolicyArn') + group_name = self._get_param('GroupName') + iam_backend.attach_group_policy(policy_arn, group_name) + template = self.response_template(ATTACH_GROUP_POLICY_TEMPLATE) + return template.render() + + def detach_group_policy(self): + policy_arn = self._get_param('PolicyArn') + group_name = self._get_param('GroupName') + iam_backend.detach_group_policy(policy_arn, group_name) + template = self.response_template(DETACH_GROUP_POLICY_TEMPLATE) + return template.render() + def attach_user_policy(self): policy_arn = self._get_param('PolicyArn') user_name = self._get_param('UserName') @@ -54,6 +68,17 @@ class IamResponse(BaseResponse): template = self.response_template(LIST_ATTACHED_ROLE_POLICIES_TEMPLATE) return template.render(policies=policies, marker=marker) + def list_attached_group_policies(self): + marker = self._get_param('Marker') + max_items = self._get_int_param('MaxItems', 100) + path_prefix = self._get_param('PathPrefix', '/') + group_name = self._get_param('GroupName') + policies, marker = iam_backend.list_attached_group_policies( + group_name, marker=marker, max_items=max_items, + path_prefix=path_prefix) + template = self.response_template(LIST_ATTACHED_GROUP_POLICIES_TEMPLATE) + return template.render(policies=policies, marker=marker) + def list_attached_user_policies(self): marker = self._get_param('Marker') max_items = self._get_int_param('MaxItems', 100) @@ -520,6 +545,18 @@ DETACH_USER_POLICY_TEMPLATE = """ """ +ATTACH_GROUP_POLICY_TEMPLATE = """ + + 7a62c49f-347e-4fc4-9331-6e8eEXAMPLE + +""" + +DETACH_GROUP_POLICY_TEMPLATE = """ + + 7a62c49f-347e-4fc4-9331-6e8eEXAMPLE + +""" + CREATE_POLICY_TEMPLATE = """ @@ -560,6 +597,28 @@ LIST_ATTACHED_ROLE_POLICIES_TEMPLATE = """ """ +LIST_ATTACHED_GROUP_POLICIES_TEMPLATE = """ + + {% if marker is none %} + false + {% else %} + true + {{ marker }} + {% endif %} + + {% for policy in policies %} + + {{ policy.name }} + {{ policy.arn }} + + {% endfor %} + + + + 7a62c49f-347e-4fc4-9331-6e8eEXAMPLE + +""" + LIST_ATTACHED_USER_POLICIES_TEMPLATE = """ {% if marker is none %} From 9f02a84d8da4127f581ac920fdc6e6b0beb9666e Mon Sep 17 00:00:00 2001 From: Jack Danger Date: Sun, 1 Oct 2017 15:02:00 -0700 Subject: [PATCH 331/412] test attaching group policies --- tests/test_iam/test_iam_groups.py | 17 +++++++++++++++++ 1 file changed, 17 insertions(+) diff --git a/tests/test_iam/test_iam_groups.py b/tests/test_iam/test_iam_groups.py index 9d5095884..5270fb96e 100644 --- a/tests/test_iam/test_iam_groups.py +++ b/tests/test_iam/test_iam_groups.py @@ -82,6 +82,23 @@ def test_put_group_policy(): conn.put_group_policy('my-group', 'my-policy', '{"some": "json"}') +@mock_iam +def test_attach_group_policies(): + conn = boto3.client('iam', region_name='us-east-1') + conn.create_group(GroupName='my-group') + conn.list_attached_group_policies(GroupName='my-group')['AttachedPolicies'].should.be.empty + policy_arn = 'arn:aws:iam::aws:policy/service-role/AmazonElasticMapReduceforEC2Role' + conn.list_attached_group_policies(GroupName='my-group')['AttachedPolicies'].should.be.empty + conn.attach_group_policy(GroupName='my-group', PolicyArn=policy_arn) + conn.list_attached_group_policies(GroupName='my-group')['AttachedPolicies'].should.equal( + [ + { + 'PolicyName': 'AmazonElasticMapReduceforEC2Role', + 'PolicyArn': policy_arn, + } + ]) + + @mock_iam_deprecated() def test_get_group_policy(): conn = boto.connect_iam() From cdb1ebf6669ab95838112a9105d12753dca9a12e Mon Sep 17 00:00:00 2001 From: Jack Danger Date: Sun, 1 Oct 2017 15:02:06 -0700 Subject: [PATCH 332/412] pep8 fixes --- tests/test_iam/test_iam_groups.py | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/tests/test_iam/test_iam_groups.py b/tests/test_iam/test_iam_groups.py index 5270fb96e..2b308f70a 100644 --- a/tests/test_iam/test_iam_groups.py +++ b/tests/test_iam/test_iam_groups.py @@ -107,7 +107,8 @@ def test_get_group_policy(): conn.get_group_policy('my-group', 'my-policy') conn.put_group_policy('my-group', 'my-policy', '{"some": "json"}') - policy = conn.get_group_policy('my-group', 'my-policy') + conn.get_group_policy('my-group', 'my-policy') + @mock_iam_deprecated() def test_get_all_group_policies(): @@ -124,6 +125,6 @@ def test_get_all_group_policies(): def test_list_group_policies(): conn = boto3.client('iam', region_name='us-east-1') conn.create_group(GroupName='my-group') - policies = conn.list_group_policies(GroupName='my-group')['PolicyNames'].should.be.empty + conn.list_group_policies(GroupName='my-group')['PolicyNames'].should.be.empty conn.put_group_policy(GroupName='my-group', PolicyName='my-policy', PolicyDocument='{"some": "json"}') - policies = conn.list_group_policies(GroupName='my-group')['PolicyNames'].should.equal(['my-policy']) + conn.list_group_policies(GroupName='my-group')['PolicyNames'].should.equal(['my-policy']) From 353f8387a2926c0f3776992bdd657f084b9e3ae0 Mon Sep 17 00:00:00 2001 From: Jack Danger Date: Sun, 1 Oct 2017 15:04:59 -0700 Subject: [PATCH 333/412] implementing detach_group_policy --- moto/iam/models.py | 32 ++++++++++--------------------- tests/test_iam/test_iam_groups.py | 3 +++ 2 files changed, 13 insertions(+), 22 deletions(-) diff --git a/moto/iam/models.py b/moto/iam/models.py index 0f0f4c058..18ed513b4 100644 --- a/moto/iam/models.py +++ b/moto/iam/models.py @@ -74,25 +74,13 @@ class ManagedPolicy(Policy): is_attachable = True - def attach_to_role(self, role): + def attach_to(self, obj): self.attachment_count += 1 - role.managed_policies[self.name] = self + obj.managed_policies[self.name] = self - def detach_from_role(self, role): + def detach_from(self, obj): self.attachment_count -= 1 - del role.managed_policies[self.name] - - def attach_to_group(self, group): - self.attachment_count += 1 - group.managed_policies[self.name] = self - - def attach_to_user(self, user): - self.attachment_count += 1 - user.managed_policies[self.name] = self - - def detach_from_user(self, user): - self.attachment_count -= 1 - del user.managed_policies[self.name] + del obj.managed_policies[self.name] class AWSManagedPolicy(ManagedPolicy): @@ -428,13 +416,13 @@ class IAMBackend(BaseBackend): def attach_role_policy(self, policy_arn, role_name): arns = dict((p.arn, p) for p in self.managed_policies.values()) policy = arns[policy_arn] - policy.attach_to_role(self.get_role(role_name)) + policy.attach_to(self.get_role(role_name)) def detach_role_policy(self, policy_arn, role_name): arns = dict((p.arn, p) for p in self.managed_policies.values()) try: policy = arns[policy_arn] - policy.detach_from_role(self.get_role(role_name)) + policy.detach_from(self.get_role(role_name)) except KeyError: raise IAMNotFoundException("Policy {0} was not found.".format(policy_arn)) @@ -444,7 +432,7 @@ class IAMBackend(BaseBackend): policy = arns[policy_arn] except KeyError: raise IAMNotFoundException("Policy {0} was not found.".format(policy_arn)) - policy.attach_to_group(self.get_group(group_name)) + policy.attach_to(self.get_group(group_name)) def detach_group_policy(self, policy_arn, group_name): arns = dict((p.arn, p) for p in self.managed_policies.values()) @@ -452,7 +440,7 @@ class IAMBackend(BaseBackend): policy = arns[policy_arn] except KeyError: raise IAMNotFoundException("Policy {0} was not found.".format(policy_arn)) - policy.detach_from_group(self.get_group(group_name)) + policy.detach_from(self.get_group(group_name)) def attach_user_policy(self, policy_arn, user_name): arns = dict((p.arn, p) for p in self.managed_policies.values()) @@ -460,7 +448,7 @@ class IAMBackend(BaseBackend): policy = arns[policy_arn] except KeyError: raise IAMNotFoundException("Policy {0} was not found.".format(policy_arn)) - policy.attach_to_user(self.get_user(user_name)) + policy.attach_to(self.get_user(user_name)) def detach_user_policy(self, policy_arn, user_name): arns = dict((p.arn, p) for p in self.managed_policies.values()) @@ -468,7 +456,7 @@ class IAMBackend(BaseBackend): policy = arns[policy_arn] except KeyError: raise IAMNotFoundException("Policy {0} was not found.".format(policy_arn)) - policy.detach_from_user(self.get_user(user_name)) + policy.detach_from(self.get_user(user_name)) def create_policy(self, description, path, policy_document, policy_name): policy = ManagedPolicy( diff --git a/tests/test_iam/test_iam_groups.py b/tests/test_iam/test_iam_groups.py index 2b308f70a..49c7987f6 100644 --- a/tests/test_iam/test_iam_groups.py +++ b/tests/test_iam/test_iam_groups.py @@ -98,6 +98,9 @@ def test_attach_group_policies(): } ]) + conn.detach_group_policy(GroupName='my-group', PolicyArn=policy_arn) + conn.list_attached_group_policies(GroupName='my-group')['AttachedPolicies'].should.be.empty + @mock_iam_deprecated() def test_get_group_policy(): From ea26466e6d498f2ed77b024b32c141ad8b306182 Mon Sep 17 00:00:00 2001 From: Toshiya Kawasaki Date: Mon, 2 Oct 2017 07:17:02 +0900 Subject: [PATCH 334/412] Add more files and lines when scaffolding (#1222) * auto-generatr urls.py * add mocks to moto/__init__.py and moto/backends.py * add uri to urls.py * change output of scaffold.py --- scripts/scaffold.py | 114 +++++++++++++++++++++++++++++++- scripts/template/lib/urls.py.j2 | 9 +++ 2 files changed, 120 insertions(+), 3 deletions(-) create mode 100644 scripts/template/lib/urls.py.j2 diff --git a/scripts/scaffold.py b/scripts/scaffold.py index 5373be40d..b1c9f3a0f 100755 --- a/scripts/scaffold.py +++ b/scripts/scaffold.py @@ -107,6 +107,62 @@ def render_template(tmpl_dir, tmpl_filename, context, service, alt_filename=None f.write(rendered) +def append_mock_to_init_py(service): + path = os.path.join(os.path.dirname(__file__), '..', 'moto', '__init__.py') + with open(path) as f: + lines = [_.replace('\n', '') for _ in f.readlines()] + + if any(_ for _ in lines if re.match('^from.*mock_{}.*$'.format(service), _)): + return + filtered_lines = [_ for _ in lines if re.match('^from.*mock.*$', _)] + last_import_line_index = lines.index(filtered_lines[-1]) + + new_line = 'from .{} import mock_{} # flake8: noqa'.format(service, service) + lines.insert(last_import_line_index + 1, new_line) + + body = '\n'.join(lines) + '\n' + with open(path, 'w') as f: + f.write(body) + + +def append_mock_import_to_backends_py(service): + path = os.path.join(os.path.dirname(__file__), '..', 'moto', 'backends.py') + with open(path) as f: + lines = [_.replace('\n', '') for _ in f.readlines()] + + if any(_ for _ in lines if re.match('^from moto\.{}.*{}_backends.*$'.format(service, service), _)): + return + filtered_lines = [_ for _ in lines if re.match('^from.*backends.*$', _)] + last_import_line_index = lines.index(filtered_lines[-1]) + + new_line = 'from moto.{} import {}_backends'.format(service, service) + lines.insert(last_import_line_index + 1, new_line) + + body = '\n'.join(lines) + '\n' + with open(path, 'w') as f: + f.write(body) + +def append_mock_dict_to_backends_py(service): + path = os.path.join(os.path.dirname(__file__), '..', 'moto', 'backends.py') + with open(path) as f: + lines = [_.replace('\n', '') for _ in f.readlines()] + + # 'xray': xray_backends + if any(_ for _ in lines if re.match(".*'{}': {}_backends.*".format(service, service), _)): + return + filtered_lines = [_ for _ in lines if re.match(".*'.*':.*_backends.*", _)] + last_elem_line_index = lines.index(filtered_lines[-1]) + + new_line = " '{}': {}_backends,".format(service, service) + prev_line = lines[last_elem_line_index] + if not prev_line.endswith('{') and not prev_line.endswith(','): + lines[last_elem_line_index] += ',' + lines.insert(last_elem_line_index + 1, new_line) + + body = '\n'.join(lines) + '\n' + with open(path, 'w') as f: + f.write(body) + def initialize_service(service, operation, api_protocol): """create lib and test dirs if not exist """ @@ -115,11 +171,14 @@ def initialize_service(service, operation, api_protocol): print_progress('Initializing service', service, 'green') - service_class = boto3.client(service).__class__.__name__ + client = boto3.client(service) + service_class = client.__class__.__name__ + endpoint_prefix = client._service_model.endpoint_prefix tmpl_context = { 'service': service, - 'service_class': service_class + 'service_class': service_class, + 'endpoint_prefix': endpoint_prefix } # initialize service directory @@ -148,6 +207,11 @@ def initialize_service(service, operation, api_protocol): tmpl_dir, tmpl_filename, tmpl_context, service, alt_filename ) + # append mock to init files + append_mock_to_init_py(service) + append_mock_import_to_backends_py(service) + append_mock_dict_to_backends_py(service) + def to_upper_camel_case(s): return ''.join([_.title() for _ in s.split('_')]) @@ -324,6 +388,41 @@ def insert_code_to_class(path, base_class, new_code): f.write(body) +def insert_url(service, operation): + client = boto3.client(service) + service_class = client.__class__.__name__ + aws_operation_name = to_upper_camel_case(operation) + uri = client._service_model.operation_model(aws_operation_name).http['requestUri'] + + path = os.path.join(os.path.dirname(__file__), '..', 'moto', service, 'urls.py') + with open(path) as f: + lines = [_.replace('\n', '') for _ in f.readlines()] + + if any(_ for _ in lines if re.match(uri, _)): + return + + url_paths_found = False + last_elem_line_index = -1 + for i, line in enumerate(lines): + if line.startswith('url_paths'): + url_paths_found = True + if url_paths_found and line.startswith('}'): + last_elem_line_index = i - 1 + + prev_line = lines[last_elem_line_index] + if not prev_line.endswith('{') and not prev_line.endswith(','): + lines[last_elem_line_index] += ',' + + new_line = " '{0}%s$': %sResponse.dispatch," % ( + uri, service_class + ) + lines.insert(last_elem_line_index + 1, new_line) + + body = '\n'.join(lines) + '\n' + with open(path, 'w') as f: + f.write(body) + + def insert_query_codes(service, operation): func_in_responses = get_function_in_responses(service, operation, 'query') func_in_models = get_function_in_models(service, operation) @@ -346,6 +445,9 @@ def insert_query_codes(service, operation): print_progress('inserting code', models_path, 'green') insert_code_to_class(models_path, BaseBackend, func_in_models) + # edit urls.py + insert_url(service, operation) + def insert_json_codes(service, operation): func_in_responses = get_function_in_responses(service, operation, 'json') func_in_models = get_function_in_models(service, operation) @@ -360,6 +462,9 @@ def insert_json_codes(service, operation): print_progress('inserting code', models_path, 'green') insert_code_to_class(models_path, BaseBackend, func_in_models) + # edit urls.py + insert_url(service, operation) + def insert_restjson_codes(service, operation): func_in_models = get_function_in_models(service, operation) @@ -369,6 +474,9 @@ def insert_restjson_codes(service, operation): print_progress('inserting code', models_path, 'green') insert_code_to_class(models_path, BaseBackend, func_in_models) + # edit urls.py + insert_url(service, operation) + @click.command() def main(): service, operation = select_service_and_operation() @@ -383,7 +491,7 @@ def main(): else: print_progress('skip inserting code', 'api protocol "{}" is not supported'.format(api_protocol), 'yellow') - click.echo('You will still need to make "{0}/urls.py", add the backend into "backends.py" and add the mock into "__init__.py"'.format(service)) + click.echo('You will still need to add the mock into "__init__.py"'.format(service)) if __name__ == '__main__': main() diff --git a/scripts/template/lib/urls.py.j2 b/scripts/template/lib/urls.py.j2 new file mode 100644 index 000000000..53cc03c0e --- /dev/null +++ b/scripts/template/lib/urls.py.j2 @@ -0,0 +1,9 @@ +from __future__ import unicode_literals +from .responses import {{ service_class }}Response + +url_bases = [ + "https?://{{ endpoint_prefix }}.(.+).amazonaws.com", +] + +url_paths = { +} From c5b6f5ea07321e5ae72b8fd85b1d456dae229750 Mon Sep 17 00:00:00 2001 From: Jack Danger Date: Sun, 1 Oct 2017 17:00:48 -0700 Subject: [PATCH 335/412] bumping to version 1.1.20 --- setup.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/setup.py b/setup.py index d4ce3d5f1..378119925 100755 --- a/setup.py +++ b/setup.py @@ -37,7 +37,7 @@ else: setup( name='moto', - version='1.1.19', + version='1.1.20', description='A library that allows your python tests to easily' ' mock out the boto library', author='Steve Pulec', From 6abd929c4964f03333f40d44435d3d8c08afa11a Mon Sep 17 00:00:00 2001 From: Jack Danger Date: Sun, 1 Oct 2017 17:02:52 -0700 Subject: [PATCH 336/412] Update changelog for 1.1.20 --- CHANGELOG.md | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 40edb4204..084540e62 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -3,7 +3,16 @@ Moto Changelog Latest ------ -1.1.16 +1.1.20 +----- + + * Improved `make scaffold` + * Implemented IAM attached group policies + * Redshift: fixed multi-params + * Redshift: implement taggable resources + * Lambda + SNS: Major enhancements + +1.1.19 ----- * Fixing regression from 1.1.15 From e8c868f1b700b5ef3e58dd027fbd6303e434c568 Mon Sep 17 00:00:00 2001 From: Jack Danger Date: Sun, 1 Oct 2017 17:06:24 -0700 Subject: [PATCH 337/412] adding logs to release notes --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 084540e62..109cdcf31 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -8,6 +8,7 @@ Latest * Improved `make scaffold` * Implemented IAM attached group policies + * Implemented skeleton of Cloudwatch Logs * Redshift: fixed multi-params * Redshift: implement taggable resources * Lambda + SNS: Major enhancements From 04542dccc033f6931acebe82f2382e01b4b46981 Mon Sep 17 00:00:00 2001 From: Jack Danger Date: Mon, 2 Oct 2017 12:35:52 -0700 Subject: [PATCH 338/412] implement elbv2 ResourceInUseError --- moto/elbv2/exceptions.py | 7 +++++++ moto/elbv2/models.py | 21 +++++++++++++++++++-- tests/test_elbv2/test_elbv2.py | 7 +++++++ 3 files changed, 33 insertions(+), 2 deletions(-) diff --git a/moto/elbv2/exceptions.py b/moto/elbv2/exceptions.py index 0947535eb..0bf9649d7 100644 --- a/moto/elbv2/exceptions.py +++ b/moto/elbv2/exceptions.py @@ -152,6 +152,13 @@ class InvalidDescribeRulesRequest(ELBClientError): ) +class ResourceInUseError(ELBClientError): + + def __init__(self, msg="A specified resource is in use"): + super(ResourceInUseError, self).__init__( + "ResourceInUse", msg) + + class RuleNotFoundError(ELBClientError): def __init__(self): diff --git a/moto/elbv2/models.py b/moto/elbv2/models.py index 3c6afe7f5..8aa9ee9f0 100644 --- a/moto/elbv2/models.py +++ b/moto/elbv2/models.py @@ -21,6 +21,7 @@ from .exceptions import ( InvalidActionTypeError, ActionTargetGroupNotFoundError, InvalidDescribeRulesRequest, + ResourceInUseError, RuleNotFoundError, DuplicatePriorityError, InvalidTargetGroupNameError, @@ -426,10 +427,17 @@ class ELBv2Backend(BaseBackend): # however, boto3 does't raise error even if rule is not found def delete_target_group(self, target_group_arn): - target_group = self.target_groups.pop(target_group_arn, None) + if target_group_arn not in self.target_groups: + raise TargetGroupNotFoundError() + + target_group = self.target_groups[target_group_arn] if target_group: + if self._any_listener_using(target_group_arn): + raise ResourceInUseError( + "The target group '{}' is currently in use by a listener or a rule".format( + target_group_arn)) + del self.target_groups[target_group_arn] return target_group - raise TargetGroupNotFoundError() def delete_listener(self, listener_arn): for load_balancer in self.load_balancers.values(): @@ -539,6 +547,15 @@ class ELBv2Backend(BaseBackend): modified_rules.append(given_rule) return modified_rules + def _any_listener_using(self, target_group_arn): + for load_balancer in self.load_balancers.values(): + for listener in load_balancer.listeners.values(): + for rule in listener.rules: + for action in rule.actions: + if action.get('target_group_arn') == target_group_arn: + return True + return False + elbv2_backends = {} for region in ec2_backends.keys(): diff --git a/tests/test_elbv2/test_elbv2.py b/tests/test_elbv2/test_elbv2.py index 21799ddcf..1a8494bd3 100644 --- a/tests/test_elbv2/test_elbv2.py +++ b/tests/test_elbv2/test_elbv2.py @@ -306,6 +306,13 @@ def test_create_target_group_and_listeners(): response = conn.describe_listeners(ListenerArns=[http_listener_arn, https_listener_arn]) response.get('Listeners').should.have.length_of(2) + # Try to delete the target group and it fails because there's a + # listener referencing it + with assert_raises(ClientError) as e: + conn.delete_target_group(TargetGroupArn=target_group.get('TargetGroupArn')) + e.exception.operation_name.should.equal('DeleteTargetGroup') + e.exception.args.should.equal(("An error occurred (ResourceInUse) when calling the DeleteTargetGroup operation: The target group 'arn:aws:elasticloadbalancing:us-east-1:1:targetgroup/a-target/50dc6c495c0c9188' is currently in use by a listener or a rule", )) # NOQA + # Delete one listener response = conn.describe_listeners(LoadBalancerArn=load_balancer_arn) response.get('Listeners').should.have.length_of(2) From f6166f841ad402353a6e482a59003e468846e447 Mon Sep 17 00:00:00 2001 From: Jack Danger Date: Mon, 2 Oct 2017 12:36:47 -0700 Subject: [PATCH 339/412] running autopep8 against test_elbv2.py --- tests/test_elbv2/test_elbv2.py | 284 +++++++++++++++++++++++---------- 1 file changed, 201 insertions(+), 83 deletions(-) diff --git a/tests/test_elbv2/test_elbv2.py b/tests/test_elbv2/test_elbv2.py index 1a8494bd3..8224a348d 100644 --- a/tests/test_elbv2/test_elbv2.py +++ b/tests/test_elbv2/test_elbv2.py @@ -14,10 +14,17 @@ def test_create_load_balancer(): conn = boto3.client('elbv2', region_name='us-east-1') ec2 = boto3.resource('ec2', region_name='us-east-1') - security_group = ec2.create_security_group(GroupName='a-security-group', Description='First One') + security_group = ec2.create_security_group( + GroupName='a-security-group', Description='First One') vpc = ec2.create_vpc(CidrBlock='172.28.7.0/24', InstanceTenancy='default') - subnet1 = ec2.create_subnet(VpcId=vpc.id, CidrBlock='172.28.7.192/26', AvailabilityZone='us-east-1a') - subnet2 = ec2.create_subnet(VpcId=vpc.id, CidrBlock='172.28.7.192/26', AvailabilityZone='us-east-1b') + subnet1 = ec2.create_subnet( + VpcId=vpc.id, + CidrBlock='172.28.7.192/26', + AvailabilityZone='us-east-1a') + subnet2 = ec2.create_subnet( + VpcId=vpc.id, + CidrBlock='172.28.7.192/26', + AvailabilityZone='us-east-1b') response = conn.create_load_balancer( Name='my-lb', @@ -29,7 +36,8 @@ def test_create_load_balancer(): lb = response.get('LoadBalancers')[0] lb.get('DNSName').should.equal("my-lb-1.us-east-1.elb.amazonaws.com") - lb.get('LoadBalancerArn').should.equal('arn:aws:elasticloadbalancing:us-east-1:1:loadbalancer/my-lb/50dc6c495c0c9188') + lb.get('LoadBalancerArn').should.equal( + 'arn:aws:elasticloadbalancing:us-east-1:1:loadbalancer/my-lb/50dc6c495c0c9188') lb.get('SecurityGroups').should.equal([security_group.id]) lb.get('AvailabilityZones').should.equal([ {'SubnetId': subnet1.id, 'ZoneName': 'us-east-1a'}, @@ -37,7 +45,8 @@ def test_create_load_balancer(): # Ensure the tags persisted response = conn.describe_tags(ResourceArns=[lb.get('LoadBalancerArn')]) - tags = {d['Key']: d['Value'] for d in response['TagDescriptions'][0]['Tags']} + tags = {d['Key']: d['Value'] + for d in response['TagDescriptions'][0]['Tags']} tags.should.equal({'key_name': 'a_value'}) @@ -47,10 +56,17 @@ def test_describe_load_balancers(): conn = boto3.client('elbv2', region_name='us-east-1') ec2 = boto3.resource('ec2', region_name='us-east-1') - security_group = ec2.create_security_group(GroupName='a-security-group', Description='First One') + security_group = ec2.create_security_group( + GroupName='a-security-group', Description='First One') vpc = ec2.create_vpc(CidrBlock='172.28.7.0/24', InstanceTenancy='default') - subnet1 = ec2.create_subnet(VpcId=vpc.id, CidrBlock='172.28.7.192/26', AvailabilityZone='us-east-1a') - subnet2 = ec2.create_subnet(VpcId=vpc.id, CidrBlock='172.28.7.192/26', AvailabilityZone='us-east-1b') + subnet1 = ec2.create_subnet( + VpcId=vpc.id, + CidrBlock='172.28.7.192/26', + AvailabilityZone='us-east-1a') + subnet2 = ec2.create_subnet( + VpcId=vpc.id, + CidrBlock='172.28.7.192/26', + AvailabilityZone='us-east-1b') conn.create_load_balancer( Name='my-lb', @@ -65,11 +81,14 @@ def test_describe_load_balancers(): lb = response.get('LoadBalancers')[0] lb.get('LoadBalancerName').should.equal('my-lb') - response = conn.describe_load_balancers(LoadBalancerArns=[lb.get('LoadBalancerArn')]) - response.get('LoadBalancers')[0].get('LoadBalancerName').should.equal('my-lb') + response = conn.describe_load_balancers( + LoadBalancerArns=[lb.get('LoadBalancerArn')]) + response.get('LoadBalancers')[0].get( + 'LoadBalancerName').should.equal('my-lb') response = conn.describe_load_balancers(Names=['my-lb']) - response.get('LoadBalancers')[0].get('LoadBalancerName').should.equal('my-lb') + response.get('LoadBalancers')[0].get( + 'LoadBalancerName').should.equal('my-lb') with assert_raises(ClientError): conn.describe_load_balancers(LoadBalancerArns=['not-a/real/arn']) @@ -84,10 +103,17 @@ def test_add_remove_tags(): ec2 = boto3.resource('ec2', region_name='us-east-1') - security_group = ec2.create_security_group(GroupName='a-security-group', Description='First One') + security_group = ec2.create_security_group( + GroupName='a-security-group', Description='First One') vpc = ec2.create_vpc(CidrBlock='172.28.7.0/24', InstanceTenancy='default') - subnet1 = ec2.create_subnet(VpcId=vpc.id, CidrBlock='172.28.7.192/26', AvailabilityZone='us-east-1a') - subnet2 = ec2.create_subnet(VpcId=vpc.id, CidrBlock='172.28.7.192/26', AvailabilityZone='us-east-1b') + subnet1 = ec2.create_subnet( + VpcId=vpc.id, + CidrBlock='172.28.7.192/26', + AvailabilityZone='us-east-1a') + subnet2 = ec2.create_subnet( + VpcId=vpc.id, + CidrBlock='172.28.7.192/26', + AvailabilityZone='us-east-1b') conn.create_load_balancer( Name='my-lb', @@ -197,10 +223,19 @@ def test_create_elb_in_multiple_region(): conn = boto3.client('elbv2', region_name=region) ec2 = boto3.resource('ec2', region_name=region) - security_group = ec2.create_security_group(GroupName='a-security-group', Description='First One') - vpc = ec2.create_vpc(CidrBlock='172.28.7.0/24', InstanceTenancy='default') - subnet1 = ec2.create_subnet(VpcId=vpc.id, CidrBlock='172.28.7.192/26', AvailabilityZone=region + 'a') - subnet2 = ec2.create_subnet(VpcId=vpc.id, CidrBlock='172.28.7.192/26', AvailabilityZone=region + 'b') + security_group = ec2.create_security_group( + GroupName='a-security-group', Description='First One') + vpc = ec2.create_vpc( + CidrBlock='172.28.7.0/24', + InstanceTenancy='default') + subnet1 = ec2.create_subnet( + VpcId=vpc.id, + CidrBlock='172.28.7.192/26', + AvailabilityZone=region + 'a') + subnet2 = ec2.create_subnet( + VpcId=vpc.id, + CidrBlock='172.28.7.192/26', + AvailabilityZone=region + 'b') conn.create_load_balancer( Name='my-lb', @@ -210,10 +245,14 @@ def test_create_elb_in_multiple_region(): Tags=[{'Key': 'key_name', 'Value': 'a_value'}]) list( - boto3.client('elbv2', region_name='us-west-1').describe_load_balancers().get('LoadBalancers') + boto3.client( + 'elbv2', + region_name='us-west-1').describe_load_balancers().get('LoadBalancers') ).should.have.length_of(1) list( - boto3.client('elbv2', region_name='us-west-2').describe_load_balancers().get('LoadBalancers') + boto3.client( + 'elbv2', + region_name='us-west-2').describe_load_balancers().get('LoadBalancers') ).should.have.length_of(1) @@ -223,10 +262,17 @@ def test_create_target_group_and_listeners(): conn = boto3.client('elbv2', region_name='us-east-1') ec2 = boto3.resource('ec2', region_name='us-east-1') - security_group = ec2.create_security_group(GroupName='a-security-group', Description='First One') + security_group = ec2.create_security_group( + GroupName='a-security-group', Description='First One') vpc = ec2.create_vpc(CidrBlock='172.28.7.0/24', InstanceTenancy='default') - subnet1 = ec2.create_subnet(VpcId=vpc.id, CidrBlock='172.28.7.192/26', AvailabilityZone='us-east-1a') - subnet2 = ec2.create_subnet(VpcId=vpc.id, CidrBlock='172.28.7.192/26', AvailabilityZone='us-east-1b') + subnet1 = ec2.create_subnet( + VpcId=vpc.id, + CidrBlock='172.28.7.192/26', + AvailabilityZone='us-east-1a') + subnet2 = ec2.create_subnet( + VpcId=vpc.id, + CidrBlock='172.28.7.192/26', + AvailabilityZone='us-east-1b') response = conn.create_load_balancer( Name='my-lb', @@ -254,7 +300,8 @@ def test_create_target_group_and_listeners(): target_group_arn = target_group['TargetGroupArn'] # Add tags to the target group - conn.add_tags(ResourceArns=[target_group_arn], Tags=[{'Key': 'target', 'Value': 'group'}]) + conn.add_tags(ResourceArns=[target_group_arn], Tags=[ + {'Key': 'target', 'Value': 'group'}]) conn.describe_tags(ResourceArns=[target_group_arn])['TagDescriptions'][0]['Tags'].should.equal( [{'Key': 'target', 'Value': 'group'}]) @@ -281,7 +328,8 @@ def test_create_target_group_and_listeners(): LoadBalancerArn=load_balancer_arn, Protocol='HTTPS', Port=443, - Certificates=[{'CertificateArn': 'arn:aws:iam:123456789012:server-certificate/test-cert'}], + Certificates=[ + {'CertificateArn': 'arn:aws:iam:123456789012:server-certificate/test-cert'}], DefaultActions=[{'Type': 'forward', 'TargetGroupArn': target_group.get('TargetGroupArn')}]) listener = response.get('Listeners')[0] listener.get('Port').should.equal(443) @@ -303,13 +351,17 @@ def test_create_target_group_and_listeners(): listener.get('Port').should.equal(443) listener.get('Protocol').should.equal('HTTPS') - response = conn.describe_listeners(ListenerArns=[http_listener_arn, https_listener_arn]) + response = conn.describe_listeners( + ListenerArns=[ + http_listener_arn, + https_listener_arn]) response.get('Listeners').should.have.length_of(2) # Try to delete the target group and it fails because there's a # listener referencing it with assert_raises(ClientError) as e: - conn.delete_target_group(TargetGroupArn=target_group.get('TargetGroupArn')) + conn.delete_target_group( + TargetGroupArn=target_group.get('TargetGroupArn')) e.exception.operation_name.should.equal('DeleteTargetGroup') e.exception.args.should.equal(("An error occurred (ResourceInUse) when calling the DeleteTargetGroup operation: The target group 'arn:aws:elasticloadbalancing:us-east-1:1:targetgroup/a-target/50dc6c495c0c9188' is currently in use by a listener or a rule", )) # NOQA @@ -328,7 +380,10 @@ def test_create_target_group_and_listeners(): response.get('LoadBalancers').should.have.length_of(0) # And it deleted the remaining listener - response = conn.describe_listeners(ListenerArns=[http_listener_arn, https_listener_arn]) + response = conn.describe_listeners( + ListenerArns=[ + http_listener_arn, + https_listener_arn]) response.get('Listeners').should.have.length_of(0) # But not the target groups @@ -366,7 +421,13 @@ def test_create_invalid_target_group(): UnhealthyThresholdCount=2, Matcher={'HttpCode': '200'}) - invalid_names = ['-name', 'name-', '-name-', 'example.com', 'test@test', 'Na--me'] + invalid_names = [ + '-name', + 'name-', + '-name-', + 'example.com', + 'test@test', + 'Na--me'] for name in invalid_names: with assert_raises(ClientError): conn.create_target_group( @@ -406,10 +467,17 @@ def test_describe_paginated_balancers(): conn = boto3.client('elbv2', region_name='us-east-1') ec2 = boto3.resource('ec2', region_name='us-east-1') - security_group = ec2.create_security_group(GroupName='a-security-group', Description='First One') + security_group = ec2.create_security_group( + GroupName='a-security-group', Description='First One') vpc = ec2.create_vpc(CidrBlock='172.28.7.0/24', InstanceTenancy='default') - subnet1 = ec2.create_subnet(VpcId=vpc.id, CidrBlock='172.28.7.192/26', AvailabilityZone='us-east-1a') - subnet2 = ec2.create_subnet(VpcId=vpc.id, CidrBlock='172.28.7.192/26', AvailabilityZone='us-east-1b') + subnet1 = ec2.create_subnet( + VpcId=vpc.id, + CidrBlock='172.28.7.192/26', + AvailabilityZone='us-east-1a') + subnet2 = ec2.create_subnet( + VpcId=vpc.id, + CidrBlock='172.28.7.192/26', + AvailabilityZone='us-east-1b') for i in range(51): conn.create_load_balancer( @@ -421,7 +489,8 @@ def test_describe_paginated_balancers(): resp = conn.describe_load_balancers() resp['LoadBalancers'].should.have.length_of(50) - resp['NextMarker'].should.equal(resp['LoadBalancers'][-1]['LoadBalancerName']) + resp['NextMarker'].should.equal( + resp['LoadBalancers'][-1]['LoadBalancerName']) resp2 = conn.describe_load_balancers(Marker=resp['NextMarker']) resp2['LoadBalancers'].should.have.length_of(1) assert 'NextToken' not in resp2.keys() @@ -433,10 +502,17 @@ def test_delete_load_balancer(): conn = boto3.client('elbv2', region_name='us-east-1') ec2 = boto3.resource('ec2', region_name='us-east-1') - security_group = ec2.create_security_group(GroupName='a-security-group', Description='First One') + security_group = ec2.create_security_group( + GroupName='a-security-group', Description='First One') vpc = ec2.create_vpc(CidrBlock='172.28.7.0/24', InstanceTenancy='default') - subnet1 = ec2.create_subnet(VpcId=vpc.id, CidrBlock='172.28.7.192/26', AvailabilityZone='us-east-1a') - subnet2 = ec2.create_subnet(VpcId=vpc.id, CidrBlock='172.28.7.192/26', AvailabilityZone='us-east-1b') + subnet1 = ec2.create_subnet( + VpcId=vpc.id, + CidrBlock='172.28.7.192/26', + AvailabilityZone='us-east-1a') + subnet2 = ec2.create_subnet( + VpcId=vpc.id, + CidrBlock='172.28.7.192/26', + AvailabilityZone='us-east-1b') response = conn.create_load_balancer( Name='my-lb', @@ -459,10 +535,17 @@ def test_register_targets(): conn = boto3.client('elbv2', region_name='us-east-1') ec2 = boto3.resource('ec2', region_name='us-east-1') - security_group = ec2.create_security_group(GroupName='a-security-group', Description='First One') + security_group = ec2.create_security_group( + GroupName='a-security-group', Description='First One') vpc = ec2.create_vpc(CidrBlock='172.28.7.0/24', InstanceTenancy='default') - subnet1 = ec2.create_subnet(VpcId=vpc.id, CidrBlock='172.28.7.192/26', AvailabilityZone='us-east-1a') - subnet2 = ec2.create_subnet(VpcId=vpc.id, CidrBlock='172.28.7.192/26', AvailabilityZone='us-east-1b') + subnet1 = ec2.create_subnet( + VpcId=vpc.id, + CidrBlock='172.28.7.192/26', + AvailabilityZone='us-east-1a') + subnet2 = ec2.create_subnet( + VpcId=vpc.id, + CidrBlock='172.28.7.192/26', + AvailabilityZone='us-east-1b') conn.create_load_balancer( Name='my-lb', @@ -487,7 +570,8 @@ def test_register_targets(): target_group = response.get('TargetGroups')[0] # No targets registered yet - response = conn.describe_target_health(TargetGroupArn=target_group.get('TargetGroupArn')) + response = conn.describe_target_health( + TargetGroupArn=target_group.get('TargetGroupArn')) response.get('TargetHealthDescriptions').should.have.length_of(0) response = ec2.create_instances( @@ -508,14 +592,16 @@ def test_register_targets(): }, ]) - response = conn.describe_target_health(TargetGroupArn=target_group.get('TargetGroupArn')) + response = conn.describe_target_health( + TargetGroupArn=target_group.get('TargetGroupArn')) response.get('TargetHealthDescriptions').should.have.length_of(2) response = conn.deregister_targets( TargetGroupArn=target_group.get('TargetGroupArn'), Targets=[{'Id': instance_id2}]) - response = conn.describe_target_health(TargetGroupArn=target_group.get('TargetGroupArn')) + response = conn.describe_target_health( + TargetGroupArn=target_group.get('TargetGroupArn')) response.get('TargetHealthDescriptions').should.have.length_of(1) @@ -525,10 +611,17 @@ def test_target_group_attributes(): conn = boto3.client('elbv2', region_name='us-east-1') ec2 = boto3.resource('ec2', region_name='us-east-1') - security_group = ec2.create_security_group(GroupName='a-security-group', Description='First One') + security_group = ec2.create_security_group( + GroupName='a-security-group', Description='First One') vpc = ec2.create_vpc(CidrBlock='172.28.7.0/24', InstanceTenancy='default') - subnet1 = ec2.create_subnet(VpcId=vpc.id, CidrBlock='172.28.7.192/26', AvailabilityZone='us-east-1a') - subnet2 = ec2.create_subnet(VpcId=vpc.id, CidrBlock='172.28.7.192/26', AvailabilityZone='us-east-1b') + subnet1 = ec2.create_subnet( + VpcId=vpc.id, + CidrBlock='172.28.7.192/26', + AvailabilityZone='us-east-1a') + subnet2 = ec2.create_subnet( + VpcId=vpc.id, + CidrBlock='172.28.7.192/26', + AvailabilityZone='us-east-1b') response = conn.create_load_balancer( Name='my-lb', @@ -564,9 +657,11 @@ def test_target_group_attributes(): target_group_arn = target_group['TargetGroupArn'] # The attributes should start with the two defaults - response = conn.describe_target_group_attributes(TargetGroupArn=target_group_arn) + response = conn.describe_target_group_attributes( + TargetGroupArn=target_group_arn) response['Attributes'].should.have.length_of(2) - attributes = {attr['Key']: attr['Value'] for attr in response['Attributes']} + attributes = {attr['Key']: attr['Value'] + for attr in response['Attributes']} attributes['deregistration_delay.timeout_seconds'].should.equal('300') attributes['stickiness.enabled'].should.equal('false') @@ -586,14 +681,17 @@ def test_target_group_attributes(): # The response should have only the keys updated response['Attributes'].should.have.length_of(2) - attributes = {attr['Key']: attr['Value'] for attr in response['Attributes']} + attributes = {attr['Key']: attr['Value'] + for attr in response['Attributes']} attributes['stickiness.type'].should.equal('lb_cookie') attributes['stickiness.enabled'].should.equal('true') # These new values should be in the full attribute list - response = conn.describe_target_group_attributes(TargetGroupArn=target_group_arn) + response = conn.describe_target_group_attributes( + TargetGroupArn=target_group_arn) response['Attributes'].should.have.length_of(3) - attributes = {attr['Key']: attr['Value'] for attr in response['Attributes']} + attributes = {attr['Key']: attr['Value'] + for attr in response['Attributes']} attributes['stickiness.type'].should.equal('lb_cookie') attributes['stickiness.enabled'].should.equal('true') @@ -604,10 +702,17 @@ def test_handle_listener_rules(): conn = boto3.client('elbv2', region_name='us-east-1') ec2 = boto3.resource('ec2', region_name='us-east-1') - security_group = ec2.create_security_group(GroupName='a-security-group', Description='First One') + security_group = ec2.create_security_group( + GroupName='a-security-group', Description='First One') vpc = ec2.create_vpc(CidrBlock='172.28.7.0/24', InstanceTenancy='default') - subnet1 = ec2.create_subnet(VpcId=vpc.id, CidrBlock='172.28.7.192/26', AvailabilityZone='us-east-1a') - subnet2 = ec2.create_subnet(VpcId=vpc.id, CidrBlock='172.28.7.192/26', AvailabilityZone='us-east-1b') + subnet1 = ec2.create_subnet( + VpcId=vpc.id, + CidrBlock='172.28.7.192/26', + AvailabilityZone='us-east-1a') + subnet2 = ec2.create_subnet( + VpcId=vpc.id, + CidrBlock='172.28.7.192/26', + AvailabilityZone='us-east-1b') response = conn.create_load_balancer( Name='my-lb', @@ -656,11 +761,11 @@ def test_handle_listener_rules(): Priority=priority, Conditions=[{ 'Field': 'host-header', - 'Values': [ host ] + 'Values': [host] }, - { + { 'Field': 'path-pattern', - 'Values': [ path_pattern ] + 'Values': [path_pattern] }], Actions=[{ 'TargetGroupArn': target_group.get('TargetGroupArn'), @@ -678,11 +783,11 @@ def test_handle_listener_rules(): Priority=priority, Conditions=[{ 'Field': 'host-header', - 'Values': [ host ] + 'Values': [host] }, - { + { 'Field': 'path-pattern', - 'Values': [ path_pattern ] + 'Values': [path_pattern] }], Actions=[{ 'TargetGroupArn': target_group.get('TargetGroupArn'), @@ -698,11 +803,11 @@ def test_handle_listener_rules(): Priority=priority, Conditions=[{ 'Field': 'host-header', - 'Values': [ host ] + 'Values': [host] }, - { + { 'Field': 'path-pattern', - 'Values': [ path_pattern ] + 'Values': [path_pattern] }], Actions=[{ 'TargetGroupArn': target_group.get('TargetGroupArn'), @@ -710,7 +815,6 @@ def test_handle_listener_rules(): }] ) - # test for describe listeners obtained_rules = conn.describe_rules(ListenerArn=http_listener_arn) len(obtained_rules['Rules']).should.equal(3) @@ -723,15 +827,20 @@ def test_handle_listener_rules(): obtained_rules['Rules'].should.equal([first_rule]) # test for pagination - obtained_rules = conn.describe_rules(ListenerArn=http_listener_arn, PageSize=1) + obtained_rules = conn.describe_rules( + ListenerArn=http_listener_arn, PageSize=1) len(obtained_rules['Rules']).should.equal(1) obtained_rules.should.have.key('NextMarker') next_marker = obtained_rules['NextMarker'] - following_rules = conn.describe_rules(ListenerArn=http_listener_arn, PageSize=1, Marker=next_marker) + following_rules = conn.describe_rules( + ListenerArn=http_listener_arn, + PageSize=1, + Marker=next_marker) len(following_rules['Rules']).should.equal(1) following_rules.should.have.key('NextMarker') - following_rules['Rules'][0]['RuleArn'].should_not.equal(obtained_rules['Rules'][0]['RuleArn']) + following_rules['Rules'][0]['RuleArn'].should_not.equal( + obtained_rules['Rules'][0]['RuleArn']) # test for invalid describe rule request with assert_raises(ClientError): @@ -750,13 +859,13 @@ def test_handle_listener_rules(): modified_rule = conn.modify_rule( RuleArn=first_rule['RuleArn'], Conditions=[{ - 'Field': 'host-header', - 'Values': [ new_host ] - }, + 'Field': 'host-header', + 'Values': [new_host] + }, { 'Field': 'path-pattern', - 'Values': [ new_path_pattern ] - }] + 'Values': [new_path_pattern] + }] )['Rules'][0] rules = conn.describe_rules(ListenerArn=http_listener_arn) @@ -764,12 +873,14 @@ def test_handle_listener_rules(): modified_rule.should.equal(obtained_rule) obtained_rule['Conditions'][0]['Values'][0].should.equal(new_host) obtained_rule['Conditions'][1]['Values'][0].should.equal(new_path_pattern) - obtained_rule['Actions'][0]['TargetGroupArn'].should.equal(target_group.get('TargetGroupArn')) + obtained_rule['Actions'][0]['TargetGroupArn'].should.equal( + target_group.get('TargetGroupArn')) # modify priority conn.set_rule_priorities( RulePriorities=[ - {'RuleArn': first_rule['RuleArn'], 'Priority': int(first_rule['Priority']) - 1} + {'RuleArn': first_rule['RuleArn'], + 'Priority': int(first_rule['Priority']) - 1} ] ) with assert_raises(ClientError): @@ -794,11 +905,11 @@ def test_handle_listener_rules(): Priority=safe_priority, Conditions=[{ 'Field': 'host-header', - 'Values': [ host ] + 'Values': [host] }, - { + { 'Field': 'path-pattern', - 'Values': [ path_pattern ] + 'Values': [path_pattern] }], Actions=[{ 'TargetGroupArn': target_group.get('TargetGroupArn'), @@ -815,11 +926,11 @@ def test_handle_listener_rules(): Priority=safe_priority, Conditions=[{ 'Field': 'host-header', - 'Values': [ host ] + 'Values': [host] }, - { + { 'Field': 'path-pattern', - 'Values': [ path_pattern ] + 'Values': [path_pattern] }], Actions=[{ 'TargetGroupArn': invalid_target_group_arn, @@ -835,7 +946,7 @@ def test_handle_listener_rules(): Priority=safe_priority, Conditions=[{ 'Field': 'xxxxxxx', - 'Values': [ host ] + 'Values': [host] }], Actions=[{ 'TargetGroupArn': target_group.get('TargetGroupArn'), @@ -882,10 +993,17 @@ def test_describe_invalid_target_group(): conn = boto3.client('elbv2', region_name='us-east-1') ec2 = boto3.resource('ec2', region_name='us-east-1') - security_group = ec2.create_security_group(GroupName='a-security-group', Description='First One') + security_group = ec2.create_security_group( + GroupName='a-security-group', Description='First One') vpc = ec2.create_vpc(CidrBlock='172.28.7.0/24', InstanceTenancy='default') - subnet1 = ec2.create_subnet(VpcId=vpc.id, CidrBlock='172.28.7.192/26', AvailabilityZone='us-east-1a') - subnet2 = ec2.create_subnet(VpcId=vpc.id, CidrBlock='172.28.7.192/26', AvailabilityZone='us-east-1b') + subnet1 = ec2.create_subnet( + VpcId=vpc.id, + CidrBlock='172.28.7.192/26', + AvailabilityZone='us-east-1a') + subnet2 = ec2.create_subnet( + VpcId=vpc.id, + CidrBlock='172.28.7.192/26', + AvailabilityZone='us-east-1b') response = conn.create_load_balancer( Name='my-lb', From 068c0617daacb609e6c6c3fffeafbc126e17878e Mon Sep 17 00:00:00 2001 From: Jack Danger Date: Mon, 2 Oct 2017 12:38:36 -0700 Subject: [PATCH 340/412] fixing last pep8 violations on elbv2 test --- tests/test_elbv2/test_elbv2.py | 16 +++++++--------- 1 file changed, 7 insertions(+), 9 deletions(-) diff --git a/tests/test_elbv2/test_elbv2.py b/tests/test_elbv2/test_elbv2.py index 8224a348d..98634c677 100644 --- a/tests/test_elbv2/test_elbv2.py +++ b/tests/test_elbv2/test_elbv2.py @@ -796,9 +796,8 @@ def test_handle_listener_rules(): ) # test for PriorityInUse - host2 = 'yyy.example.com' with assert_raises(ClientError): - r = conn.create_rule( + conn.create_rule( ListenerArn=http_listener_arn, Priority=priority, Conditions=[{ @@ -900,7 +899,7 @@ def test_handle_listener_rules(): # test for invalid action type safe_priority = 2 with assert_raises(ClientError): - r = conn.create_rule( + conn.create_rule( ListenerArn=http_listener_arn, Priority=safe_priority, Conditions=[{ @@ -921,7 +920,7 @@ def test_handle_listener_rules(): safe_priority = 2 invalid_target_group_arn = target_group.get('TargetGroupArn') + 'x' with assert_raises(ClientError): - r = conn.create_rule( + conn.create_rule( ListenerArn=http_listener_arn, Priority=safe_priority, Conditions=[{ @@ -941,7 +940,7 @@ def test_handle_listener_rules(): # test for invalid condition field_name safe_priority = 2 with assert_raises(ClientError): - r = conn.create_rule( + conn.create_rule( ListenerArn=http_listener_arn, Priority=safe_priority, Conditions=[{ @@ -957,7 +956,7 @@ def test_handle_listener_rules(): # test for emptry condition value safe_priority = 2 with assert_raises(ClientError): - r = conn.create_rule( + conn.create_rule( ListenerArn=http_listener_arn, Priority=safe_priority, Conditions=[{ @@ -973,7 +972,7 @@ def test_handle_listener_rules(): # test for multiple condition value safe_priority = 2 with assert_raises(ClientError): - r = conn.create_rule( + conn.create_rule( ListenerArn=http_listener_arn, Priority=safe_priority, Conditions=[{ @@ -1012,7 +1011,7 @@ def test_describe_invalid_target_group(): Scheme='internal', Tags=[{'Key': 'key_name', 'Value': 'a_value'}]) - load_balancer_arn = response.get('LoadBalancers')[0].get('LoadBalancerArn') + response.get('LoadBalancers')[0].get('LoadBalancerArn') response = conn.create_target_group( Name='a-target', @@ -1027,7 +1026,6 @@ def test_describe_invalid_target_group(): HealthyThresholdCount=5, UnhealthyThresholdCount=2, Matcher={'HttpCode': '200'}) - target_group = response.get('TargetGroups')[0] # Check error raises correctly with assert_raises(ClientError): From 867fc3b7f7c6a53cba4249b1c1485c6c092e6b3f Mon Sep 17 00:00:00 2001 From: Jack Danger Date: Mon, 2 Oct 2017 13:35:53 -0700 Subject: [PATCH 341/412] Removing dicttoxml dependency --- moto/redshift/responses.py | 23 +++++++++++++++++++++-- setup.py | 1 - 2 files changed, 21 insertions(+), 3 deletions(-) diff --git a/moto/redshift/responses.py b/moto/redshift/responses.py index 52ca908e8..58983310f 100644 --- a/moto/redshift/responses.py +++ b/moto/redshift/responses.py @@ -2,7 +2,8 @@ from __future__ import unicode_literals import json -import dicttoxml +import xmltodict + from jinja2 import Template from six import iteritems @@ -26,6 +27,24 @@ def convert_json_error_to_xml(json_error): return template.render(code=code, message=message) +def itemize(data): + """ + The xmltodict.unparse requires we modify the shape of the input dictionary slightly. Instead of a dict of the form: + {'key': ['value1', 'value2']} + We must provide: + {'key': {'item': ['value1', 'value2']}} + """ + if isinstance(data, dict): + ret = {} + for key in data: + ret[key] = itemize(data[key]) + return ret + elif isinstance(data, list): + return {'item': [itemize(value) for value in data]} + else: + return data + + class RedshiftResponse(BaseResponse): @property @@ -36,7 +55,7 @@ class RedshiftResponse(BaseResponse): if self.request_json: return json.dumps(response) else: - xml = dicttoxml.dicttoxml(response, attr_type=False, root=False) + xml = xmltodict.unparse(itemize(response), full_document=False) return xml.decode("utf-8") def call_action(self): diff --git a/setup.py b/setup.py index 378119925..0770c098f 100755 --- a/setup.py +++ b/setup.py @@ -13,7 +13,6 @@ install_requires = [ "cryptography>=2.0.0", "requests>=2.5", "xmltodict", - "dicttoxml", "six>1.9", "werkzeug", "pyaml", From 95a4bd5a7b4d73de8efcc2834b5543a3baef26cc Mon Sep 17 00:00:00 2001 From: Jack Danger Date: Mon, 2 Oct 2017 15:25:02 -0700 Subject: [PATCH 342/412] supporting python 3 --- moto/redshift/responses.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/moto/redshift/responses.py b/moto/redshift/responses.py index 58983310f..a320f9cae 100644 --- a/moto/redshift/responses.py +++ b/moto/redshift/responses.py @@ -56,7 +56,9 @@ class RedshiftResponse(BaseResponse): return json.dumps(response) else: xml = xmltodict.unparse(itemize(response), full_document=False) - return xml.decode("utf-8") + if hasattr(xml, 'decode'): + xml = xml.decode('utf-8') + return xml def call_action(self): status, headers, body = super(RedshiftResponse, self).call_action() From 6e889daaa89dd24ad8888c59898a417437a1c6f1 Mon Sep 17 00:00:00 2001 From: Jack Danger Date: Mon, 2 Oct 2017 15:35:36 -0700 Subject: [PATCH 343/412] bumping to version 1.1.21 --- setup.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/setup.py b/setup.py index 0770c098f..3f6804ce0 100755 --- a/setup.py +++ b/setup.py @@ -36,7 +36,7 @@ else: setup( name='moto', - version='1.1.20', + version='1.1.21', description='A library that allows your python tests to easily' ' mock out the boto library', author='Steve Pulec', From 74cbd08816f3c26e4ada5b3613756300363ca383 Mon Sep 17 00:00:00 2001 From: Jack Danger Date: Mon, 2 Oct 2017 15:36:31 -0700 Subject: [PATCH 344/412] changelog for 1.1.21 --- CHANGELOG.md | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 109cdcf31..bbce6c343 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -3,6 +3,12 @@ Moto Changelog Latest ------ +1.1.21 +----- + + * ELBv2 bugfixes + * Removing GPL'd dependency + 1.1.20 ----- From fc9c2509228c0bd04300347d50d46d0ff1c7087f Mon Sep 17 00:00:00 2001 From: Chris Keogh Date: Tue, 3 Oct 2017 13:33:50 +1300 Subject: [PATCH 345/412] add basic awslambda get_policy --- moto/awslambda/responses.py | 14 ++++++++++++++ moto/awslambda/urls.py | 3 ++- tests/test_awslambda/test_lambda.py | 23 +++++++++++++++++++++++ 3 files changed, 39 insertions(+), 1 deletion(-) diff --git a/moto/awslambda/responses.py b/moto/awslambda/responses.py index cf92e66f4..972cd7a4e 100644 --- a/moto/awslambda/responses.py +++ b/moto/awslambda/responses.py @@ -57,6 +57,20 @@ class LambdaResponse(BaseResponse): else: raise ValueError("Cannot handle {0} request".format(request.method)) + def policy(self, request, full_url, headers): + if request.method == 'GET': + return self._get_policy(request, full_url, headers) + + def _get_policy(self, request, full_url, headers): + lambda_backend = self.get_lambda_backend(full_url) + + path = request.path if hasattr(request, 'path') else request.path_url + function_name = path.split('/')[-2] + if lambda_backend.has_function(function_name): + return 200, {}, json.dumps(dict(Policy='test_policy')) + else: + return 404, {}, "{}" + def _invoke(self, request, full_url): response_headers = {} lambda_backend = self.get_lambda_backend(full_url) diff --git a/moto/awslambda/urls.py b/moto/awslambda/urls.py index 0fec24bab..005785f19 100644 --- a/moto/awslambda/urls.py +++ b/moto/awslambda/urls.py @@ -12,5 +12,6 @@ url_paths = { r'{0}/(?P[^/]+)/functions/(?P[\w_-]+)/?$': response.function, r'{0}/(?P[^/]+)/functions/(?P[\w_-]+)/invocations/?$': response.invoke, r'{0}/(?P[^/]+)/functions/(?P[\w_-]+)/invoke-async/?$': response.invoke_async, - r'{0}/(?P[^/]+)/tags/(?P.+)': response.tag + r'{0}/(?P[^/]+)/tags/(?P.+)': response.tag, + r'{0}/(?P[^/]+)/functions/(?P[\w_-]+)/policy/?$': response.policy } diff --git a/tests/test_awslambda/test_lambda.py b/tests/test_awslambda/test_lambda.py index 6b67ce0f0..c7fee2745 100644 --- a/tests/test_awslambda/test_lambda.py +++ b/tests/test_awslambda/test_lambda.py @@ -645,3 +645,26 @@ def test_get_function_created_with_zipfile(): } }, ) + +@mock_lambda +def get_function_policy(): + conn = boto3.client('lambda', 'us-west-2') + zip_content = get_test_zip_file1() + result = conn.create_function( + FunctionName='testFunction', + Runtime='python2.7', + Role='test-iam-role', + Handler='lambda_function.handler', + Code={ + 'ZipFile': zip_content, + }, + Description='test lambda function', + Timeout=3, + MemorySize=128, + Publish=True, + ) + + response = conn.get_policy( + FunctionName='testFunction' + ) + assert response['Policy'] == 'test_policy' \ No newline at end of file From b994cf5291888858c46609449ff0d5d940ea5a59 Mon Sep 17 00:00:00 2001 From: Chris Keogh Date: Tue, 3 Oct 2017 13:54:37 +1300 Subject: [PATCH 346/412] add more realistic policy --- moto/awslambda/responses.py | 5 ++++- tests/test_awslambda/test_lambda.py | 6 +++++- 2 files changed, 9 insertions(+), 2 deletions(-) diff --git a/moto/awslambda/responses.py b/moto/awslambda/responses.py index 972cd7a4e..94c381f5e 100644 --- a/moto/awslambda/responses.py +++ b/moto/awslambda/responses.py @@ -67,7 +67,10 @@ class LambdaResponse(BaseResponse): path = request.path if hasattr(request, 'path') else request.path_url function_name = path.split('/')[-2] if lambda_backend.has_function(function_name): - return 200, {}, json.dumps(dict(Policy='test_policy')) + policy = ("{\"Statement\":[{\"Action\":[\"lambda:InvokeFunction\"]," + "\"Resource\":\"arn:aws:lambda:us-west-2:account-id:function:helloworld\"," + "\"Effect\":\"Allow\",\"Principal\":{\"AWS\":\"account-id\"},\"Sid\":\"3\"}]}") + return 200, {}, json.dumps(dict(Policy=policy)) else: return 404, {}, "{}" diff --git a/tests/test_awslambda/test_lambda.py b/tests/test_awslambda/test_lambda.py index c7fee2745..163fa306f 100644 --- a/tests/test_awslambda/test_lambda.py +++ b/tests/test_awslambda/test_lambda.py @@ -667,4 +667,8 @@ def get_function_policy(): response = conn.get_policy( FunctionName='testFunction' ) - assert response['Policy'] == 'test_policy' \ No newline at end of file + + assert 'Policy' in response + assert isinstance(response['Policy'], str) + res = json.loads(response['Policy']) + assert res['Statement'][0]['Action'] == ['lambda:InvokeFunction'] \ No newline at end of file From 9bb07e6b6e42811fa768849618a7c32d9f55d4d1 Mon Sep 17 00:00:00 2001 From: Chris Keogh Date: Tue, 3 Oct 2017 15:23:00 +1300 Subject: [PATCH 347/412] add awslambda.add_permission --- moto/awslambda/models.py | 4 +++ moto/awslambda/responses.py | 20 ++++++++++--- tests/test_awslambda/test_lambda.py | 46 ++++++++++++++++++++++++++++- 3 files changed, 65 insertions(+), 5 deletions(-) diff --git a/moto/awslambda/models.py b/moto/awslambda/models.py index d22d1a7f4..935abbcd6 100644 --- a/moto/awslambda/models.py +++ b/moto/awslambda/models.py @@ -132,6 +132,7 @@ class LambdaFunction(BaseModel): self.logs_backend = logs_backends[self.region] self.environment_vars = spec.get('Environment', {}).get('Variables', {}) self.docker_client = docker.from_env() + self.policy = "" # Unfortunately mocking replaces this method w/o fallback enabled, so we # need to replace it if we detect it's been mocked @@ -527,6 +528,9 @@ class LambdaBackend(BaseBackend): pass # Don't care + def add_policy(self, function_name, policy): + self.get_function(function_name).policy = policy + def do_validate_s3(): return os.environ.get('VALIDATE_LAMBDA_S3', '') in ['', '1', 'true'] diff --git a/moto/awslambda/responses.py b/moto/awslambda/responses.py index 94c381f5e..5215f63c5 100644 --- a/moto/awslambda/responses.py +++ b/moto/awslambda/responses.py @@ -60,6 +60,20 @@ class LambdaResponse(BaseResponse): def policy(self, request, full_url, headers): if request.method == 'GET': return self._get_policy(request, full_url, headers) + if request.method == 'POST': + return self._add_policy(request, full_url, headers) + + def _add_policy(self, request, full_url, headers): + lambda_backend = self.get_lambda_backend(full_url) + + path = request.path if hasattr(request, 'path') else request.path_url + function_name = path.split('/')[-2] + if lambda_backend.has_function(function_name): + policy = request.body.decode('utf8') + lambda_backend.add_policy(function_name, policy) + return 200, {}, json.dumps(dict(Statement=policy)) + else: + return 404, {}, "{}" def _get_policy(self, request, full_url, headers): lambda_backend = self.get_lambda_backend(full_url) @@ -67,10 +81,8 @@ class LambdaResponse(BaseResponse): path = request.path if hasattr(request, 'path') else request.path_url function_name = path.split('/')[-2] if lambda_backend.has_function(function_name): - policy = ("{\"Statement\":[{\"Action\":[\"lambda:InvokeFunction\"]," - "\"Resource\":\"arn:aws:lambda:us-west-2:account-id:function:helloworld\"," - "\"Effect\":\"Allow\",\"Principal\":{\"AWS\":\"account-id\"},\"Sid\":\"3\"}]}") - return 200, {}, json.dumps(dict(Policy=policy)) + function = lambda_backend.get_function(function_name) + return 200, {}, json.dumps(dict(Policy="{\"Statement\":[" + function.policy + "]}")) else: return 404, {}, "{}" diff --git a/tests/test_awslambda/test_lambda.py b/tests/test_awslambda/test_lambda.py index 163fa306f..317e9f4a2 100644 --- a/tests/test_awslambda/test_lambda.py +++ b/tests/test_awslambda/test_lambda.py @@ -646,6 +646,39 @@ def test_get_function_created_with_zipfile(): }, ) +@mock_lambda +def add_function_permission(): + conn = boto3.client('lambda', 'us-west-2') + zip_content = get_test_zip_file1() + result = conn.create_function( + FunctionName='testFunction', + Runtime='python2.7', + Role='test-iam-role', + Handler='lambda_function.handler', + Code={ + 'ZipFile': zip_content, + }, + Description='test lambda function', + Timeout=3, + MemorySize=128, + Publish=True, + ) + + response = conn.add_permission( + FunctionName='testFunction', + StatementId='1', + Action="lambda:InvokeFunction", + Principal='432143214321', + SourceArn="arn:aws:lambda:us-west-2:account-id:function:helloworld", + SourceAccount='123412341234', + EventSourceToken='blah', + Qualifier='2' + ) + assert 'Statement' in response + res = json.loads(response['Statement']) + assert res['Action'] == "lambda:InvokeFunction" + + @mock_lambda def get_function_policy(): conn = boto3.client('lambda', 'us-west-2') @@ -664,6 +697,17 @@ def get_function_policy(): Publish=True, ) + response = conn.add_permission( + FunctionName='testFunction', + StatementId='1', + Action="lambda:InvokeFunction", + Principal='432143214321', + SourceArn="arn:aws:lambda:us-west-2:account-id:function:helloworld", + SourceAccount='123412341234', + EventSourceToken='blah', + Qualifier='2' + ) + response = conn.get_policy( FunctionName='testFunction' ) @@ -671,4 +715,4 @@ def get_function_policy(): assert 'Policy' in response assert isinstance(response['Policy'], str) res = json.loads(response['Policy']) - assert res['Statement'][0]['Action'] == ['lambda:InvokeFunction'] \ No newline at end of file + assert res['Statement'][0]['Action'] == 'lambda:InvokeFunction' From acb1cac0b7da8f5f18638caa629ff59455a999c2 Mon Sep 17 00:00:00 2001 From: Alexander Mohr Date: Tue, 3 Oct 2017 13:56:04 -0700 Subject: [PATCH 348/412] add note that lambda mock requires docker (#1236) --- README.md | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/README.md b/README.md index 92ad5d9c0..7ced7b895 100644 --- a/README.md +++ b/README.md @@ -94,7 +94,8 @@ It gets even better! Moto isn't just for Python code and it isn't just for S3. L |------------------------------------------------------------------------------| | IAM | @mock_iam | core endpoints done | |------------------------------------------------------------------------------| -| Lambda | @mock_lambda | basic endpoints done | +| Lambda | @mock_lambda | basic endpoints done, requires | +| | | docker | |------------------------------------------------------------------------------| | Logs | @mock_logs | basic endpoints done | |------------------------------------------------------------------------------| From 88a11b21aed178d849ff20523350099c83be5e3b Mon Sep 17 00:00:00 2001 From: Terry Cain Date: Tue, 3 Oct 2017 22:35:30 +0100 Subject: [PATCH 349/412] Added DeleteComputeEnvironment and UpdateComputeEnvironment --- moto/batch/exceptions.py | 5 ++ moto/batch/models.py | 70 +++++++++++++++++++-- moto/batch/responses.py | 34 ++++++++++ moto/batch/urls.py | 2 + tests/test_batch/test_batch.py | 109 +++++++++++++++++++++++++++++++++ 5 files changed, 216 insertions(+), 4 deletions(-) diff --git a/moto/batch/exceptions.py b/moto/batch/exceptions.py index cd6031a95..a71e54ce3 100644 --- a/moto/batch/exceptions.py +++ b/moto/batch/exceptions.py @@ -30,3 +30,8 @@ class ValidationError(AWSError): class InternalFailure(AWSError): CODE = 'InternalFailure' STATUS = 500 + + +class ClientException(AWSError): + CODE = 'ClientException' + STATUS = 400 diff --git a/moto/batch/models.py b/moto/batch/models.py index 7ed75e749..8572a46c0 100644 --- a/moto/batch/models.py +++ b/moto/batch/models.py @@ -9,7 +9,7 @@ from moto.iam import iam_backends from moto.ec2 import ec2_backends from moto.ecs import ecs_backends -from .exceptions import InvalidParameterValueException, InternalFailure +from .exceptions import InvalidParameterValueException, InternalFailure, ClientException from .utils import make_arn_for_compute_env from moto.ec2.exceptions import InvalidSubnetIdError from moto.ec2.models import INSTANCE_TYPES as EC2_INSTANCE_TYPES @@ -31,12 +31,14 @@ class ComputeEnvironment(BaseModel): self.instances = [] self.ecs_arn = None + self.ecs_name = None def add_instance(self, instance): self.instances.append(instance) - def set_ecs_arn(self, arn): + def set_ecs(self, arn, name): self.ecs_arn = arn + self.ecs_name = name class BatchBackend(BaseBackend): @@ -75,7 +77,7 @@ class BatchBackend(BaseBackend): self.__dict__ = {} self.__init__(region_name) - def get_compute_environment(self, arn): + def get_compute_environment_arn(self, arn): return self._compute_environments.get(arn) def get_compute_environment_by_name(self, name): @@ -84,6 +86,20 @@ class BatchBackend(BaseBackend): return comp_env return None + def get_compute_environment(self, identifier): + """ + Get compute environment by name or ARN + :param identifier: Name or ARN + :type identifier: str + + :return: Compute Environment or None + :rtype: ComputeEnvironment or None + """ + env = self.get_compute_environment_arn(identifier) + if env is None: + env = self.get_compute_environment_by_name(identifier) + return env + def describe_compute_environments(self, environments=None, max_results=None, next_token=None): envs = set() if environments is not None: @@ -173,7 +189,7 @@ class BatchBackend(BaseBackend): # Should be of format P2OnDemand_Batch_UUID cluster_name = 'OnDemand_Batch_' + str(uuid.uuid4()) ecs_cluster = self.ecs_backend.create_cluster(cluster_name) - new_comp_env.set_ecs_arn(ecs_cluster.arn) + new_comp_env.set_ecs(ecs_cluster.arn, cluster_name) return compute_environment_name, new_comp_env.arn @@ -271,6 +287,52 @@ class BatchBackend(BaseBackend): return instances + def delete_compute_environment(self, compute_environment_name): + if compute_environment_name is None: + raise InvalidParameterValueException('Missing computeEnvironment parameter') + + compute_env = self.get_compute_environment(compute_environment_name) + + if compute_env is not None: + # Pop ComputeEnvironment + self._compute_environments.pop(compute_env.arn) + + # Delete ECS cluster + self.ecs_backend.delete_cluster(compute_env.ecs_name) + + if compute_env.type == 'MANAGED': + # Delete compute envrionment + instance_ids = [instance.id for instance in compute_env.instances] + self.ec2_backend.terminate_instances(instance_ids) + + def update_compute_environment(self, compute_environment_name, state, compute_resources, service_role): + # Validate + compute_env = self.get_compute_environment(compute_environment_name) + if compute_env is None: + raise ClientException('Compute environment {0} does not exist') + + # Look for IAM role + if service_role is not None: + try: + role = self.iam_backend.get_role_by_arn(service_role) + except IAMNotFoundException: + raise InvalidParameterValueException('Could not find IAM role {0}'.format(service_role)) + + compute_env.service_role = role + + if state is not None: + if state not in ('ENABLED', 'DISABLED'): + raise InvalidParameterValueException('state {0} must be one of ENABLED | DISABLED'.format(state)) + + compute_env.state = state + + if compute_resources is not None: + # TODO Implement resizing of instances based on changing vCpus + # compute_resources CAN contain desiredvCpus, maxvCpus, minvCpus, and can contain none of them. + pass + + return compute_env.name, compute_env.arn + available_regions = boto3.session.Session().get_available_regions("batch") batch_backends = {region: BatchBackend(region_name=region) for region in available_regions} diff --git a/moto/batch/responses.py b/moto/batch/responses.py index 590cc27a4..86ee4fdfe 100644 --- a/moto/batch/responses.py +++ b/moto/batch/responses.py @@ -76,3 +76,37 @@ class BatchResponse(BaseResponse): result = {'computeEnvironments': envs} return json.dumps(result) + + # DeleteComputeEnvironment + def deletecomputeenvironment(self): + compute_environment = self._get_param('computeEnvironment') + + try: + self.batch_backend.delete_compute_environment(compute_environment) + except AWSError as err: + return err.response() + + return '' + + def updatecomputeenvironment(self): + compute_env_name = self._get_param('computeEnvironment') + compute_resource = self._get_param('computeResources') + service_role = self._get_param('serviceRole') + state = self._get_param('state') + + try: + name, arn = self.batch_backend.update_compute_environment( + compute_environment_name=compute_env_name, + compute_resources=compute_resource, + service_role=service_role, + state=state + ) + except AWSError as err: + return err.response() + + result = { + 'computeEnvironmentArn': arn, + 'computeEnvironmentName': name + } + + return json.dumps(result) diff --git a/moto/batch/urls.py b/moto/batch/urls.py index 18de99199..ef8f7927a 100644 --- a/moto/batch/urls.py +++ b/moto/batch/urls.py @@ -8,4 +8,6 @@ url_bases = [ url_paths = { '{0}/v1/createcomputeenvironment$': BatchResponse.dispatch, '{0}/v1/describecomputeenvironments$': BatchResponse.dispatch, + '{0}/v1/deletecomputeenvironment': BatchResponse.dispatch, + '{0}/v1/updatecomputeenvironment': BatchResponse.dispatch, } diff --git a/tests/test_batch/test_batch.py b/tests/test_batch/test_batch.py index aceb95804..159f255c3 100644 --- a/tests/test_batch/test_batch.py +++ b/tests/test_batch/test_batch.py @@ -156,3 +156,112 @@ def test_describe_compute_environment(): ) len(resp['computeEnvironments']).should.equal(0) + +@mock_ec2 +@mock_ecs +@mock_iam +@mock_batch +def test_delete_unmanaged_compute_environment(): + ec2_client, iam_client, ecs_client, batch_client = _get_clients() + vpc_id, subnet_id, sg_id, iam_arn = _setup(ec2_client, iam_client) + + compute_name = 'test_compute_env' + batch_client.create_compute_environment( + computeEnvironmentName=compute_name, + type='UNMANAGED', + state='ENABLED', + serviceRole=iam_arn + ) + + batch_client.delete_compute_environment( + computeEnvironment=compute_name, + ) + + resp = batch_client.describe_compute_environments() + len(resp['computeEnvironments']).should.equal(0) + + resp = ecs_client.list_clusters() + len(resp.get('clusterArns', [])).should.equal(0) + + +@mock_ec2 +@mock_ecs +@mock_iam +@mock_batch +def test_delete_managed_compute_environment(): + ec2_client, iam_client, ecs_client, batch_client = _get_clients() + vpc_id, subnet_id, sg_id, iam_arn = _setup(ec2_client, iam_client) + + compute_name = 'test_compute_env' + batch_client.create_compute_environment( + computeEnvironmentName=compute_name, + type='MANAGED', + state='ENABLED', + computeResources={ + 'type': 'EC2', + 'minvCpus': 5, + 'maxvCpus': 10, + 'desiredvCpus': 5, + 'instanceTypes': [ + 't2.small', + 't2.medium' + ], + 'imageId': 'some_image_id', + 'subnets': [ + subnet_id, + ], + 'securityGroupIds': [ + sg_id, + ], + 'ec2KeyPair': 'string', + 'instanceRole': iam_arn, + 'tags': { + 'string': 'string' + }, + 'bidPercentage': 123, + 'spotIamFleetRole': 'string' + }, + serviceRole=iam_arn + ) + + batch_client.delete_compute_environment( + computeEnvironment=compute_name, + ) + + resp = batch_client.describe_compute_environments() + len(resp['computeEnvironments']).should.equal(0) + + resp = ec2_client.describe_instances() + resp.should.contain('Reservations') + len(resp['Reservations']).should.equal(3) + for reservation in resp['Reservations']: + reservation['Instances'][0]['State']['Name'].should.equal('terminated') + + resp = ecs_client.list_clusters() + len(resp.get('clusterArns', [])).should.equal(0) + + +@mock_ec2 +@mock_ecs +@mock_iam +@mock_batch +def test_update_unmanaged_compute_environment_state(): + ec2_client, iam_client, ecs_client, batch_client = _get_clients() + vpc_id, subnet_id, sg_id, iam_arn = _setup(ec2_client, iam_client) + + compute_name = 'test_compute_env' + batch_client.create_compute_environment( + computeEnvironmentName=compute_name, + type='UNMANAGED', + state='ENABLED', + serviceRole=iam_arn + ) + + batch_client.update_compute_environment( + computeEnvironment=compute_name, + state='DISABLED' + ) + + resp = batch_client.describe_compute_environments() + len(resp['computeEnvironments']).should.equal(1) + resp['computeEnvironments'][0]['state'].should.equal('DISABLED') From 15218df12fafd451ba03317696f7a71323a1e0dc Mon Sep 17 00:00:00 2001 From: Terry Cain Date: Tue, 3 Oct 2017 23:21:06 +0100 Subject: [PATCH 350/412] Added CreateJobQueue and DescribeJobQueue --- moto/batch/models.py | 127 ++++++++++++++++++++++++++++++++- moto/batch/responses.py | 36 ++++++++++ moto/batch/urls.py | 2 + moto/batch/utils.py | 4 ++ tests/test_batch/test_batch.py | 71 ++++++++++++++++++ 5 files changed, 237 insertions(+), 3 deletions(-) diff --git a/moto/batch/models.py b/moto/batch/models.py index 8572a46c0..e336a60d7 100644 --- a/moto/batch/models.py +++ b/moto/batch/models.py @@ -10,7 +10,7 @@ from moto.ec2 import ec2_backends from moto.ecs import ecs_backends from .exceptions import InvalidParameterValueException, InternalFailure, ClientException -from .utils import make_arn_for_compute_env +from .utils import make_arn_for_compute_env, make_arn_for_job_queue from moto.ec2.exceptions import InvalidSubnetIdError from moto.ec2.models import INSTANCE_TYPES as EC2_INSTANCE_TYPES from moto.iam.exceptions import IAMNotFoundException @@ -41,12 +41,50 @@ class ComputeEnvironment(BaseModel): self.ecs_name = name +class JobQueue(BaseModel): + def __init__(self, name, priority, state, environments, env_order_json, region_name): + """ + :param name: Job queue name + :type name: str + :param priority: Job queue priority + :type priority: int + :param state: Either ENABLED or DISABLED + :type state: str + :param environments: Compute Environments + :type environments: list of ComputeEnvironment + :param env_order_json: Compute Environments JSON for use when describing + :type env_order_json: list of dict + :param region_name: Region name + :type region_name: str + """ + self.name = name + self.priority = priority + self.state = state + self.environments = environments + self.env_order_json = env_order_json + self.arn = make_arn_for_job_queue(DEFAULT_ACCOUNT_ID, name, region_name) + self.status = 'VALID' + + def describe(self): + result = { + 'computeEnvironmentOrder': self.env_order_json, + 'jobQueueArn': self.arn, + 'jobQueueName': self.name, + 'priority': self.priority, + 'state': self.state, + 'status': self.status + } + + return result + + class BatchBackend(BaseBackend): def __init__(self, region_name=None): super(BatchBackend, self).__init__() self.region_name = region_name self._compute_environments = {} + self._job_queues = {} @property def iam_backend(self): @@ -77,7 +115,7 @@ class BatchBackend(BaseBackend): self.__dict__ = {} self.__init__(region_name) - def get_compute_environment_arn(self, arn): + def get_compute_environment_by_arn(self, arn): return self._compute_environments.get(arn) def get_compute_environment_by_name(self, name): @@ -95,11 +133,34 @@ class BatchBackend(BaseBackend): :return: Compute Environment or None :rtype: ComputeEnvironment or None """ - env = self.get_compute_environment_arn(identifier) + env = self.get_compute_environment_by_arn(identifier) if env is None: env = self.get_compute_environment_by_name(identifier) return env + def get_job_queue_by_arn(self, arn): + return self._job_queues.get(arn) + + def get_job_queue_by_name(self, name): + for comp_env in self._job_queues.values(): + if comp_env.name == name: + return comp_env + return None + + def get_job_queue(self, identifier): + """ + Get job queue by name or ARN + :param identifier: Name or ARN + :type identifier: str + + :return: Job Queue or None + :rtype: JobQueue or None + """ + env = self.get_job_queue_by_arn(identifier) + if env is None: + env = self.get_job_queue_by_name(identifier) + return env + def describe_compute_environments(self, environments=None, max_results=None, next_token=None): envs = set() if environments is not None: @@ -333,6 +394,66 @@ class BatchBackend(BaseBackend): return compute_env.name, compute_env.arn + def create_job_queue(self, queue_name, priority, state, compute_env_order): + """ + Create a job queue + + :param queue_name: Queue name + :type queue_name: str + :param priority: Queue priority + :type priority: int + :param state: Queue state + :type state: string + :param compute_env_order: Compute environment list + :type compute_env_order: list of dict + :return: Tuple of Name, ARN + :rtype: tuple of str + """ + for variable, var_name in ((queue_name, 'jobQueueName'), (priority, 'priority'), (state, 'state'), (compute_env_order, 'computeEnvironmentOrder')): + if variable is None: + raise ClientException('{0} must be provided'.format(var_name)) + + if state not in ('ENABLED', 'DISABLED'): + raise ClientException('state {0} must be one of ENABLED | DISABLED'.format(state)) + if self.get_job_queue_by_name(queue_name) is not None: + raise ClientException('Job queue {0} already exists'.format(queue_name)) + + if len(compute_env_order) == 0: + raise ClientException('At least 1 compute environment must be provided') + try: + # orders and extracts computeEnvironment names + ordered_compute_environments = [item['computeEnvironment'] for item in sorted(compute_env_order, key=lambda x: x['order'])] + env_objects = [] + # Check each ARN exists, then make a list of compute env's + for arn in ordered_compute_environments: + env = self.get_compute_environment_by_arn(arn) + if env is None: + raise ClientException('Compute environment {0} does not exist'.format(arn)) + env_objects.append(env) + except Exception: + raise ClientException('computeEnvironmentOrder is malformed') + + # Create new Job Queue + queue = JobQueue(queue_name, priority, state, env_objects, compute_env_order, self.region_name) + self._job_queues[queue.arn] = queue + + return queue_name, queue.arn + + def describe_job_queues(self, job_queues=None, max_results=None, next_token=None): + envs = set() + if job_queues is not None: + envs = set(job_queues) + + result = [] + for arn, job_queue in self._job_queues.items(): + # Filter shortcut + if len(envs) > 0 and arn not in envs and job_queue.name not in envs: + continue + + result.append(job_queue.describe()) + + return result + available_regions = boto3.session.Session().get_available_regions("batch") batch_backends = {region: BatchBackend(region_name=region) for region in available_regions} diff --git a/moto/batch/responses.py b/moto/batch/responses.py index 86ee4fdfe..661b9c7c2 100644 --- a/moto/batch/responses.py +++ b/moto/batch/responses.py @@ -88,6 +88,7 @@ class BatchResponse(BaseResponse): return '' + # UpdateComputeEnvironment def updatecomputeenvironment(self): compute_env_name = self._get_param('computeEnvironment') compute_resource = self._get_param('computeResources') @@ -110,3 +111,38 @@ class BatchResponse(BaseResponse): } return json.dumps(result) + + # CreateJobQueue + def createjobqueue(self): + compute_env_order = self._get_param('computeEnvironmentOrder') + queue_name = self._get_param('jobQueueName') + priority = self._get_param('priority') + state = self._get_param('state') + + try: + name, arn = self.batch_backend.create_job_queue( + queue_name=queue_name, + priority=priority, + state=state, + compute_env_order=compute_env_order + ) + except AWSError as err: + return err.response() + + result = { + 'jobQueueArn': arn, + 'jobQueueName': name + } + + return json.dumps(result) + + # DescribeJobQueues + def describejobqueues(self): + job_queues = self._get_param('jobQueues') + max_results = self._get_param('maxResults') # Ignored, should be int + next_token = self._get_param('nextToken') # Ignored + + queues = self.batch_backend.describe_job_queues(job_queues, max_results=max_results, next_token=next_token) + + result = {'jobQueues': queues} + return json.dumps(result) diff --git a/moto/batch/urls.py b/moto/batch/urls.py index ef8f7927a..227e78ecf 100644 --- a/moto/batch/urls.py +++ b/moto/batch/urls.py @@ -10,4 +10,6 @@ url_paths = { '{0}/v1/describecomputeenvironments$': BatchResponse.dispatch, '{0}/v1/deletecomputeenvironment': BatchResponse.dispatch, '{0}/v1/updatecomputeenvironment': BatchResponse.dispatch, + '{0}/v1/createjobqueue': BatchResponse.dispatch, + '{0}/v1/describejobqueues': BatchResponse.dispatch, } diff --git a/moto/batch/utils.py b/moto/batch/utils.py index d323a9bf7..68c6a3581 100644 --- a/moto/batch/utils.py +++ b/moto/batch/utils.py @@ -3,3 +3,7 @@ from __future__ import unicode_literals def make_arn_for_compute_env(account_id, name, region_name): return "arn:aws:batch:{0}:{1}:compute-environment/{2}".format(region_name, account_id, name) + + +def make_arn_for_job_queue(account_id, name, region_name): + return "arn:aws:batch:{0}:{1}:job-queue/{2}".format(region_name, account_id, name) diff --git a/tests/test_batch/test_batch.py b/tests/test_batch/test_batch.py index 159f255c3..6bf68a6fc 100644 --- a/tests/test_batch/test_batch.py +++ b/tests/test_batch/test_batch.py @@ -1,6 +1,7 @@ from __future__ import unicode_literals import boto3 +from botocore.exceptions import ClientError import sure # noqa from moto import mock_batch, mock_iam, mock_ec2, mock_ecs @@ -265,3 +266,73 @@ def test_update_unmanaged_compute_environment_state(): resp = batch_client.describe_compute_environments() len(resp['computeEnvironments']).should.equal(1) resp['computeEnvironments'][0]['state'].should.equal('DISABLED') + + +@mock_ec2 +@mock_ecs +@mock_iam +@mock_batch +def test_create_job_queue(): + ec2_client, iam_client, ecs_client, batch_client = _get_clients() + vpc_id, subnet_id, sg_id, iam_arn = _setup(ec2_client, iam_client) + + compute_name = 'test_compute_env' + resp = batch_client.create_compute_environment( + computeEnvironmentName=compute_name, + type='UNMANAGED', + state='ENABLED', + serviceRole=iam_arn + ) + arn = resp['computeEnvironmentArn'] + + resp = batch_client.create_job_queue( + jobQueueName='test_job_queue', + state='ENABLED', + priority=123, + computeEnvironmentOrder=[ + { + 'order': 123, + 'computeEnvironment': arn + }, + ] + ) + resp.should.contain('jobQueueArn') + resp.should.contain('jobQueueName') + queue_arn = resp['jobQueueArn'] + + resp = batch_client.describe_job_queues() + resp.should.contain('jobQueues') + resp['jobQueues'][0]['jobQueueArn'].should.equal(queue_arn) + + +@mock_ec2 +@mock_ecs +@mock_iam +@mock_batch +def test_job_queue_bad_arn(): + ec2_client, iam_client, ecs_client, batch_client = _get_clients() + vpc_id, subnet_id, sg_id, iam_arn = _setup(ec2_client, iam_client) + + compute_name = 'test_compute_env' + resp = batch_client.create_compute_environment( + computeEnvironmentName=compute_name, + type='UNMANAGED', + state='ENABLED', + serviceRole=iam_arn + ) + arn = resp['computeEnvironmentArn'] + + try: + batch_client.create_job_queue( + jobQueueName='test_job_queue', + state='ENABLED', + priority=123, + computeEnvironmentOrder=[ + { + 'order': 123, + 'computeEnvironment': arn + 'LALALA' + }, + ] + ) + except ClientError as err: + err.response['Error']['Code'].should.equal('ClientException') From b8f24298fda5458b1de889c52591c1bd287e4e5b Mon Sep 17 00:00:00 2001 From: Terry Cain Date: Tue, 3 Oct 2017 23:28:10 +0100 Subject: [PATCH 351/412] Added filtering test part --- tests/test_batch/test_batch.py | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/tests/test_batch/test_batch.py b/tests/test_batch/test_batch.py index 6bf68a6fc..b082d656e 100644 --- a/tests/test_batch/test_batch.py +++ b/tests/test_batch/test_batch.py @@ -302,8 +302,13 @@ def test_create_job_queue(): resp = batch_client.describe_job_queues() resp.should.contain('jobQueues') + len(resp['jobQueues']).should.equal(1) resp['jobQueues'][0]['jobQueueArn'].should.equal(queue_arn) + resp = batch_client.describe_job_queues(jobQueues=['test_invalid_queue']) + resp.should.contain('jobQueues') + len(resp['jobQueues']).should.equal(0) + @mock_ec2 @mock_ecs From 8cb381f7252bc9aeff3a5ac52fb35ee2d4e166e5 Mon Sep 17 00:00:00 2001 From: Terry Cain Date: Wed, 4 Oct 2017 00:20:39 +0100 Subject: [PATCH 352/412] Possible import order fix --- moto/__init__.py | 2 +- moto/backends.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/moto/__init__.py b/moto/__init__.py index f7a74211e..3a3d6f0ac 100644 --- a/moto/__init__.py +++ b/moto/__init__.py @@ -9,7 +9,6 @@ from .acm import mock_acm # flake8: noqa from .apigateway import mock_apigateway, mock_apigateway_deprecated # flake8: noqa from .autoscaling import mock_autoscaling, mock_autoscaling_deprecated # flake8: noqa from .awslambda import mock_lambda, mock_lambda_deprecated # flake8: noqa -from .batch import mock_batch # flake8: noqa from .cloudformation import mock_cloudformation, mock_cloudformation_deprecated # flake8: noqa from .cloudwatch import mock_cloudwatch, mock_cloudwatch_deprecated # flake8: noqa from .datapipeline import mock_datapipeline, mock_datapipeline_deprecated # flake8: noqa @@ -41,6 +40,7 @@ from .route53 import mock_route53, mock_route53_deprecated # flake8: noqa from .swf import mock_swf, mock_swf_deprecated # flake8: noqa from .xray import mock_xray # flake8: noqa from .logs import mock_logs, mock_logs_deprecated # flake8: noqa +from .batch import mock_batch # flake8: noqa try: diff --git a/moto/backends.py b/moto/backends.py index 1a401ca06..d1ce0730e 100644 --- a/moto/backends.py +++ b/moto/backends.py @@ -4,7 +4,6 @@ from moto.acm import acm_backends from moto.apigateway import apigateway_backends from moto.autoscaling import autoscaling_backends from moto.awslambda import lambda_backends -from moto.batch import batch_backends from moto.cloudformation import cloudformation_backends from moto.cloudwatch import cloudwatch_backends from moto.core import moto_api_backends @@ -36,6 +35,7 @@ from moto.sqs import sqs_backends from moto.ssm import ssm_backends from moto.sts import sts_backends from moto.xray import xray_backends +from moto.batch import batch_backends BACKENDS = { 'acm': acm_backends, From 8441e44e802426cbd9aa82852708a01b6b2b7fe2 Mon Sep 17 00:00:00 2001 From: Terry Cain Date: Wed, 4 Oct 2017 01:09:28 +0100 Subject: [PATCH 353/412] Possible fix V2 --- moto/elbv2/urls.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/moto/elbv2/urls.py b/moto/elbv2/urls.py index 13a8e056f..b7d8adb58 100644 --- a/moto/elbv2/urls.py +++ b/moto/elbv2/urls.py @@ -1,10 +1,11 @@ from __future__ import unicode_literals from .responses import ELBV2Response +from ..elb.urls import api_version_elb_backend url_bases = [ "https?://elasticloadbalancing.(.+).amazonaws.com", ] url_paths = { - '{0}/$': ELBV2Response.dispatch, + '{0}/$': api_version_elb_backend, } From 2249eee49df9d4c7edbbea96013c8bbddb06a072 Mon Sep 17 00:00:00 2001 From: Terry Cain Date: Wed, 4 Oct 2017 01:13:34 +0100 Subject: [PATCH 354/412] Potential fix V3 --- moto/elbv2/urls.py | 1 - 1 file changed, 1 deletion(-) diff --git a/moto/elbv2/urls.py b/moto/elbv2/urls.py index b7d8adb58..af51f7d3a 100644 --- a/moto/elbv2/urls.py +++ b/moto/elbv2/urls.py @@ -1,5 +1,4 @@ from __future__ import unicode_literals -from .responses import ELBV2Response from ..elb.urls import api_version_elb_backend url_bases = [ From 93b8da04376d147703812fd39880096531672d1f Mon Sep 17 00:00:00 2001 From: Terry Cain Date: Wed, 4 Oct 2017 01:21:30 +0100 Subject: [PATCH 355/412] Use long format for EC2 instance ID --- moto/ec2/utils.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/moto/ec2/utils.py b/moto/ec2/utils.py index ab54ea3a8..32122c763 100644 --- a/moto/ec2/utils.py +++ b/moto/ec2/utils.py @@ -51,7 +51,7 @@ def random_ami_id(): def random_instance_id(): - return random_id(prefix=EC2_RESOURCE_TO_PREFIX['instance']) + return random_id(prefix=EC2_RESOURCE_TO_PREFIX['instance'], size=17) def random_reservation_id(): From 4a45acc216a6d4f66d194ea83f3cc7f3c7e93c26 Mon Sep 17 00:00:00 2001 From: Terry Cain Date: Wed, 4 Oct 2017 18:52:12 +0100 Subject: [PATCH 356/412] Implemented Update and Delete job queue --- moto/batch/models.py | 58 ++++++++++++++++++++++++ moto/batch/responses.py | 32 ++++++++++++++ moto/batch/urls.py | 2 + tests/test_batch/test_batch.py | 80 ++++++++++++++++++++++++++++++++++ 4 files changed, 172 insertions(+) diff --git a/moto/batch/models.py b/moto/batch/models.py index e336a60d7..6eb02c39c 100644 --- a/moto/batch/models.py +++ b/moto/batch/models.py @@ -454,6 +454,64 @@ class BatchBackend(BaseBackend): return result + def update_job_queue(self, queue_name, priority, state, compute_env_order): + """ + Update a job queue + + :param queue_name: Queue name + :type queue_name: str + :param priority: Queue priority + :type priority: int + :param state: Queue state + :type state: string + :param compute_env_order: Compute environment list + :type compute_env_order: list of dict + :return: Tuple of Name, ARN + :rtype: tuple of str + """ + if queue_name is None: + raise ClientException('jobQueueName must be provided') + + job_queue = self.get_job_queue(queue_name) + if job_queue is None: + raise ClientException('Job queue {0} does not exist'.format(queue_name)) + + if state is not None: + if state not in ('ENABLED', 'DISABLED'): + raise ClientException('state {0} must be one of ENABLED | DISABLED'.format(state)) + + job_queue.state = state + + if compute_env_order is not None: + if len(compute_env_order) == 0: + raise ClientException('At least 1 compute environment must be provided') + try: + # orders and extracts computeEnvironment names + ordered_compute_environments = [item['computeEnvironment'] for item in sorted(compute_env_order, key=lambda x: x['order'])] + env_objects = [] + # Check each ARN exists, then make a list of compute env's + for arn in ordered_compute_environments: + env = self.get_compute_environment_by_arn(arn) + if env is None: + raise ClientException('Compute environment {0} does not exist'.format(arn)) + env_objects.append(env) + except Exception: + raise ClientException('computeEnvironmentOrder is malformed') + + job_queue.env_order_json = compute_env_order + job_queue.environments = env_objects + + if priority is not None: + job_queue.priority = priority + + return queue_name, job_queue.arn + + def delete_job_queue(self, queue_name): + job_queue = self.get_job_queue(queue_name) + + if job_queue is not None: + del self._job_queues[job_queue.arn] + available_regions = boto3.session.Session().get_available_regions("batch") batch_backends = {region: BatchBackend(region_name=region) for region in available_regions} diff --git a/moto/batch/responses.py b/moto/batch/responses.py index 661b9c7c2..7c870382e 100644 --- a/moto/batch/responses.py +++ b/moto/batch/responses.py @@ -146,3 +146,35 @@ class BatchResponse(BaseResponse): result = {'jobQueues': queues} return json.dumps(result) + + # UpdateJobQueue + def updatejobqueue(self): + compute_env_order = self._get_param('computeEnvironmentOrder') + queue_name = self._get_param('jobQueue') + priority = self._get_param('priority') + state = self._get_param('state') + + try: + name, arn = self.batch_backend.update_job_queue( + queue_name=queue_name, + priority=priority, + state=state, + compute_env_order=compute_env_order + ) + except AWSError as err: + return err.response() + + result = { + 'jobQueueArn': arn, + 'jobQueueName': name + } + + return json.dumps(result) + + # DeleteJobQueue + def deletejobqueue(self): + queue_name = self._get_param('jobQueue') + + self.batch_backend.delete_job_queue(queue_name) + + return '' diff --git a/moto/batch/urls.py b/moto/batch/urls.py index 227e78ecf..bc186bd29 100644 --- a/moto/batch/urls.py +++ b/moto/batch/urls.py @@ -12,4 +12,6 @@ url_paths = { '{0}/v1/updatecomputeenvironment': BatchResponse.dispatch, '{0}/v1/createjobqueue': BatchResponse.dispatch, '{0}/v1/describejobqueues': BatchResponse.dispatch, + '{0}/v1/updatejobqueue': BatchResponse.dispatch, + '{0}/v1/deletejobqueue': BatchResponse.dispatch } diff --git a/tests/test_batch/test_batch.py b/tests/test_batch/test_batch.py index b082d656e..e7c4cf629 100644 --- a/tests/test_batch/test_batch.py +++ b/tests/test_batch/test_batch.py @@ -341,3 +341,83 @@ def test_job_queue_bad_arn(): ) except ClientError as err: err.response['Error']['Code'].should.equal('ClientException') + + +@mock_ec2 +@mock_ecs +@mock_iam +@mock_batch +def test_update_job_queue(): + ec2_client, iam_client, ecs_client, batch_client = _get_clients() + vpc_id, subnet_id, sg_id, iam_arn = _setup(ec2_client, iam_client) + + compute_name = 'test_compute_env' + resp = batch_client.create_compute_environment( + computeEnvironmentName=compute_name, + type='UNMANAGED', + state='ENABLED', + serviceRole=iam_arn + ) + arn = resp['computeEnvironmentArn'] + + resp = batch_client.create_job_queue( + jobQueueName='test_job_queue', + state='ENABLED', + priority=123, + computeEnvironmentOrder=[ + { + 'order': 123, + 'computeEnvironment': arn + }, + ] + ) + queue_arn = resp['jobQueueArn'] + + batch_client.update_job_queue( + jobQueue=queue_arn, + priority=5 + ) + + resp = batch_client.describe_job_queues() + resp.should.contain('jobQueues') + len(resp['jobQueues']).should.equal(1) + resp['jobQueues'][0]['priority'].should.equal(5) + + +@mock_ec2 +@mock_ecs +@mock_iam +@mock_batch +def test_update_job_queue(): + ec2_client, iam_client, ecs_client, batch_client = _get_clients() + vpc_id, subnet_id, sg_id, iam_arn = _setup(ec2_client, iam_client) + + compute_name = 'test_compute_env' + resp = batch_client.create_compute_environment( + computeEnvironmentName=compute_name, + type='UNMANAGED', + state='ENABLED', + serviceRole=iam_arn + ) + arn = resp['computeEnvironmentArn'] + + resp = batch_client.create_job_queue( + jobQueueName='test_job_queue', + state='ENABLED', + priority=123, + computeEnvironmentOrder=[ + { + 'order': 123, + 'computeEnvironment': arn + }, + ] + ) + queue_arn = resp['jobQueueArn'] + + batch_client.delete_job_queue( + jobQueue=queue_arn + ) + + resp = batch_client.describe_job_queues() + resp.should.contain('jobQueues') + len(resp['jobQueues']).should.equal(0) From 558f246115784cb59cc6a17b1fd67f6fac16894f Mon Sep 17 00:00:00 2001 From: Terry Cain Date: Wed, 4 Oct 2017 20:17:29 +0100 Subject: [PATCH 357/412] Added RegisterJobDefinition --- moto/batch/models.py | 90 +++++++++++++++++++++++++++++++++- moto/batch/responses.py | 27 ++++++++++ moto/batch/urls.py | 3 +- moto/batch/utils.py | 4 ++ tests/test_batch/test_batch.py | 26 ++++++++++ 5 files changed, 148 insertions(+), 2 deletions(-) diff --git a/moto/batch/models.py b/moto/batch/models.py index 6eb02c39c..2129320e7 100644 --- a/moto/batch/models.py +++ b/moto/batch/models.py @@ -10,7 +10,7 @@ from moto.ec2 import ec2_backends from moto.ecs import ecs_backends from .exceptions import InvalidParameterValueException, InternalFailure, ClientException -from .utils import make_arn_for_compute_env, make_arn_for_job_queue +from .utils import make_arn_for_compute_env, make_arn_for_job_queue, make_arn_for_task_def from moto.ec2.exceptions import InvalidSubnetIdError from moto.ec2.models import INSTANCE_TYPES as EC2_INSTANCE_TYPES from moto.iam.exceptions import IAMNotFoundException @@ -78,6 +78,52 @@ class JobQueue(BaseModel): return result +class JobDefinition(BaseModel): + def __init__(self, name, parameters, _type, container_properties, region_name, revision=0, retry_strategy=0): + self.name = name + self.retries = retry_strategy + self.type = _type + self.revision = revision + self._region = region_name + self.container_properties = container_properties + self.arn = None + + self.parameters = {} + if parameters is not None: + if not isinstance(parameters, dict): + raise ClientException('parameters must be a string to string map') + self.parameters = parameters + + if _type not in ('container',): + raise ClientException('type must be one of "container"') + + self._update_arn() + + # For future use when containers arnt the only thing in batch + if _type != 'container': + raise NotImplementedError() + + self._validate_container_properties() + + def _update_arn(self): + self.revision += 1 + self.arn = make_arn_for_task_def(DEFAULT_ACCOUNT_ID, self.name, self.revision, self._region) + + def _validate_container_properties(self): + if 'image' not in self.container_properties: + raise ClientException('containerProperties must contain image') + + if 'memory' not in self.container_properties: + raise ClientException('containerProperties must contain memory') + if self.container_properties['memory'] < 4: + raise ClientException('container memory limit must be greater than 4') + + if 'vcpus' not in self.container_properties: + raise ClientException('containerProperties must contain vcpus') + if self.container_properties['vcpus'] < 1: + raise ClientException('container vcpus limit must be greater than 0') + + class BatchBackend(BaseBackend): def __init__(self, region_name=None): super(BatchBackend, self).__init__() @@ -85,6 +131,7 @@ class BatchBackend(BaseBackend): self._compute_environments = {} self._job_queues = {} + self._job_definitions = {} @property def iam_backend(self): @@ -161,6 +208,29 @@ class BatchBackend(BaseBackend): env = self.get_job_queue_by_name(identifier) return env + def get_job_definition_by_arn(self, arn): + return self._job_definitions.get(arn) + + def get_job_definition_by_name(self, name): + for comp_env in self._job_definitions.values(): + if comp_env.name == name: + return comp_env + return None + + def get_job_definition(self, identifier): + """ + Get job queue by name or ARN + :param identifier: Name or ARN + :type identifier: str + + :return: Job Queue or None + :rtype: JobQueue or None + """ + env = self.get_job_definition_by_arn(identifier) + if env is None: + env = self.get_job_definition_by_name(identifier) + return env + def describe_compute_environments(self, environments=None, max_results=None, next_token=None): envs = set() if environments is not None: @@ -512,6 +582,24 @@ class BatchBackend(BaseBackend): if job_queue is not None: del self._job_queues[job_queue.arn] + def register_job_definition(self, def_name, parameters, _type, retry_strategy, container_properties): + if def_name is None: + raise ClientException('jobDefinitionName must be provided') + + if self.get_job_definition_by_name(def_name) is not None: + raise ClientException('A job definition called {0} already exists'.format(def_name)) + + if retry_strategy is not None: + try: + retry_strategy = retry_strategy['attempts'] + except Exception: + raise ClientException('retryStrategy is malformed') + + job_def = JobDefinition(def_name, parameters, _type, container_properties, region_name=self.region_name, retry_strategy=retry_strategy) + self._job_definitions[job_def.arn] = job_def + + return def_name, job_def.arn, job_def.revision + available_regions = boto3.session.Session().get_available_regions("batch") batch_backends = {region: BatchBackend(region_name=region) for region in available_regions} diff --git a/moto/batch/responses.py b/moto/batch/responses.py index 7c870382e..dec740221 100644 --- a/moto/batch/responses.py +++ b/moto/batch/responses.py @@ -178,3 +178,30 @@ class BatchResponse(BaseResponse): self.batch_backend.delete_job_queue(queue_name) return '' + + # RegisterJobDefinition + def registerjobdefinition(self): + container_properties = self._get_param('containerProperties') + def_name = self._get_param('jobDefinitionName') + parameters = self._get_param('parameters') + retry_strategy = self._get_param('retryStrategy') + _type = self._get_param('type') + + try: + name, arn, revision = self.batch_backend.register_job_definition( + def_name=def_name, + parameters=parameters, + _type=_type, + retry_strategy=retry_strategy, + container_properties=container_properties + ) + except AWSError as err: + return err.response() + + result = { + 'jobDefinitionArn': arn, + 'jobDefinitionName': name, + 'revision': revision + } + + return json.dumps(result) diff --git a/moto/batch/urls.py b/moto/batch/urls.py index bc186bd29..cd5ccb00c 100644 --- a/moto/batch/urls.py +++ b/moto/batch/urls.py @@ -13,5 +13,6 @@ url_paths = { '{0}/v1/createjobqueue': BatchResponse.dispatch, '{0}/v1/describejobqueues': BatchResponse.dispatch, '{0}/v1/updatejobqueue': BatchResponse.dispatch, - '{0}/v1/deletejobqueue': BatchResponse.dispatch + '{0}/v1/deletejobqueue': BatchResponse.dispatch, + '{0}/v1/registerjobdefinition': BatchResponse.dispatch } diff --git a/moto/batch/utils.py b/moto/batch/utils.py index 68c6a3581..6cdd381f7 100644 --- a/moto/batch/utils.py +++ b/moto/batch/utils.py @@ -7,3 +7,7 @@ def make_arn_for_compute_env(account_id, name, region_name): def make_arn_for_job_queue(account_id, name, region_name): return "arn:aws:batch:{0}:{1}:job-queue/{2}".format(region_name, account_id, name) + + +def make_arn_for_task_def(account_id, name, revision, region_name): + return "arn:aws:batch:{0}:{1}:job-definition/{2}:{3}".format(region_name, account_id, name, revision) diff --git a/tests/test_batch/test_batch.py b/tests/test_batch/test_batch.py index e7c4cf629..6eba45d27 100644 --- a/tests/test_batch/test_batch.py +++ b/tests/test_batch/test_batch.py @@ -421,3 +421,29 @@ def test_update_job_queue(): resp = batch_client.describe_job_queues() resp.should.contain('jobQueues') len(resp['jobQueues']).should.equal(0) + + +@mock_ec2 +@mock_ecs +@mock_iam +@mock_batch +def test_register_task_definition(): + ec2_client, iam_client, ecs_client, batch_client = _get_clients() + vpc_id, subnet_id, sg_id, iam_arn = _setup(ec2_client, iam_client) + + resp = batch_client.register_job_definition( + jobDefinitionName='sleep10', + type='container', + containerProperties={ + 'image': 'busybox', + 'vcpus': 1, + 'memory': 128, + 'command': ['sleep', '10'] + } + ) + + resp.should.contain('jobDefinitionArn') + resp.should.contain('jobDefinitionName') + resp.should.contain('revision') + + assert resp['jobDefinitionArn'].endswith('{0}:{1}'.format(resp['jobDefinitionName'], resp['revision'])) From 0ca3fcc7a23c453a6c2c675af01b416f2962a91b Mon Sep 17 00:00:00 2001 From: Terry Cain Date: Thu, 5 Oct 2017 00:00:40 +0100 Subject: [PATCH 358/412] Added DescribeJobDefinitions --- moto/batch/models.py | 137 +++++++++++++++++++++++++++------ moto/batch/responses.py | 21 +++++ moto/batch/urls.py | 4 +- tests/test_batch/test_batch.py | 124 +++++++++++++++++++++++++++++ 4 files changed, 262 insertions(+), 24 deletions(-) diff --git a/moto/batch/models.py b/moto/batch/models.py index 2129320e7..bfbdcf4a5 100644 --- a/moto/batch/models.py +++ b/moto/batch/models.py @@ -87,29 +87,30 @@ class JobDefinition(BaseModel): self._region = region_name self.container_properties = container_properties self.arn = None + self.status = 'INACTIVE' - self.parameters = {} - if parameters is not None: - if not isinstance(parameters, dict): - raise ClientException('parameters must be a string to string map') - self.parameters = parameters - - if _type not in ('container',): - raise ClientException('type must be one of "container"') + if parameters is None: + parameters = {} + self.parameters = parameters + self._validate() self._update_arn() - # For future use when containers arnt the only thing in batch - if _type != 'container': - raise NotImplementedError() - - self._validate_container_properties() - def _update_arn(self): self.revision += 1 self.arn = make_arn_for_task_def(DEFAULT_ACCOUNT_ID, self.name, self.revision, self._region) - def _validate_container_properties(self): + def _validate(self): + if self.type not in ('container',): + raise ClientException('type must be one of "container"') + + # For future use when containers arnt the only thing in batch + if self.type != 'container': + raise NotImplementedError() + + if not isinstance(self.parameters, dict): + raise ClientException('parameters must be a string to string map') + if 'image' not in self.container_properties: raise ClientException('containerProperties must contain image') @@ -123,6 +124,37 @@ class JobDefinition(BaseModel): if self.container_properties['vcpus'] < 1: raise ClientException('container vcpus limit must be greater than 0') + def update(self, parameters, _type, container_properties, retry_strategy): + if parameters is None: + parameters = self.parameters + + if _type is None: + _type = self.type + + if container_properties is None: + container_properties = self.container_properties + + if retry_strategy is None: + retry_strategy = self.retries + + return JobDefinition(self.name, parameters, _type, container_properties, region_name=self._region, revision=self.revision, retry_strategy=retry_strategy) + + def describe(self): + result = { + 'jobDefinitionArn': self.arn, + 'jobDefinitionName': self.name, + 'parameters': self.parameters, + 'revision': self.revision, + 'status': self.status, + 'type': self.type + } + if self.container_properties is not None: + result['containerProperties'] = self.container_properties + if self.retries is not None and self.retries > 0: + result['retryStrategy'] = {'attempts': self.retries} + + return result + class BatchBackend(BaseBackend): def __init__(self, region_name=None): @@ -211,26 +243,52 @@ class BatchBackend(BaseBackend): def get_job_definition_by_arn(self, arn): return self._job_definitions.get(arn) - def get_job_definition_by_name(self, name): + def get_job_definition_by_name(self, name):# for comp_env in self._job_definitions.values(): if comp_env.name == name: return comp_env return None + def get_job_definition_by_name_revision(self, name, revision): + for job_def in self._job_definitions.values(): + if job_def.name == name and job_def.revision == revision: + return job_def + return None + def get_job_definition(self, identifier): """ - Get job queue by name or ARN + Get job defintiion by name or ARN :param identifier: Name or ARN :type identifier: str - :return: Job Queue or None - :rtype: JobQueue or None + :return: Job definition or None + :rtype: JobDefinition or None """ env = self.get_job_definition_by_arn(identifier) if env is None: env = self.get_job_definition_by_name(identifier) return env + def get_job_definitions(self, identifier): + """ + Get job defintiion by name or ARN + :param identifier: Name or ARN + :type identifier: str + + :return: Job definition or None + :rtype: list of JobDefinition + """ + result = [] + env = self.get_job_definition_by_arn(identifier) + if env is not None: + result.append(env) + else: + for value in self._job_definitions.values(): + if value.name == identifier: + result.append(value) + + return result + def describe_compute_environments(self, environments=None, max_results=None, next_token=None): envs = set() if environments is not None: @@ -586,20 +644,53 @@ class BatchBackend(BaseBackend): if def_name is None: raise ClientException('jobDefinitionName must be provided') - if self.get_job_definition_by_name(def_name) is not None: - raise ClientException('A job definition called {0} already exists'.format(def_name)) - + job_def = self.get_job_definition_by_name(def_name) if retry_strategy is not None: try: retry_strategy = retry_strategy['attempts'] except Exception: raise ClientException('retryStrategy is malformed') - job_def = JobDefinition(def_name, parameters, _type, container_properties, region_name=self.region_name, retry_strategy=retry_strategy) + if job_def is None: + job_def = JobDefinition(def_name, parameters, _type, container_properties, region_name=self.region_name, retry_strategy=retry_strategy) + else: + # Make new jobdef + job_def = job_def.update(parameters, _type, container_properties, retry_strategy) + self._job_definitions[job_def.arn] = job_def return def_name, job_def.arn, job_def.revision + def deregister_job_definition(self, def_name): + job_def = self.get_job_definition_by_arn(def_name) + if job_def is None and ':' in def_name: + name, revision = def_name.split(':', 1) + job_def = self.get_job_definition_by_name_revision(name, revision) + + if job_def is not None: + del self._job_definitions[job_def.arn] + + def describe_job_definitions(self, job_def_name=None, job_def_list=None, status=None, max_results=None, next_token=None): + jobs = [] + + # As a job name can reference multiple revisions, we get a list of them + if job_def_name is not None: + job_def = self.get_job_definitions(job_def_name) + if job_def is not None: + jobs.extend(job_def) + elif job_def_list is not None: + for job in job_def_list: + job_def = self.get_job_definitions(job) + if job_def is not None: + jobs.extend(job_def) + else: + jobs.extend(self._job_definitions.values()) + + # Got all the job defs were after, filter then by status + if status is not None: + return [job for job in jobs if job.status == status] + return jobs + available_regions = boto3.session.Session().get_available_regions("batch") batch_backends = {region: BatchBackend(region_name=region) for region in available_regions} diff --git a/moto/batch/responses.py b/moto/batch/responses.py index dec740221..0d3900d1d 100644 --- a/moto/batch/responses.py +++ b/moto/batch/responses.py @@ -205,3 +205,24 @@ class BatchResponse(BaseResponse): } return json.dumps(result) + + # DeregisterJobDefinition + def deregisterjobdefinition(self): + queue_name = self._get_param('jobDefinition') + + self.batch_backend.deregister_job_definition(queue_name) + + return '' + + # DescribeJobDefinitions + def describejobdefinitions(self): + job_def_name = self._get_param('jobDefinitionName') + job_def_list = self._get_param('jobDefinitions') + max_results = self._get_param('maxResults') + next_token = self._get_param('nextToken') + status = self._get_param('status') + + job_defs = self.batch_backend.describe_job_definitions(job_def_name, job_def_list, status, max_results, next_token) + + result = {'jobDefinitions': [job.describe() for job in job_defs]} + return json.dumps(result) diff --git a/moto/batch/urls.py b/moto/batch/urls.py index cd5ccb00c..3265bb535 100644 --- a/moto/batch/urls.py +++ b/moto/batch/urls.py @@ -14,5 +14,7 @@ url_paths = { '{0}/v1/describejobqueues': BatchResponse.dispatch, '{0}/v1/updatejobqueue': BatchResponse.dispatch, '{0}/v1/deletejobqueue': BatchResponse.dispatch, - '{0}/v1/registerjobdefinition': BatchResponse.dispatch + '{0}/v1/registerjobdefinition': BatchResponse.dispatch, + '{0}/v1/deregisterjobdefinition': BatchResponse.dispatch, + '{0}/v1/describejobdefinitions': BatchResponse.dispatch } diff --git a/tests/test_batch/test_batch.py b/tests/test_batch/test_batch.py index 6eba45d27..ebe710760 100644 --- a/tests/test_batch/test_batch.py +++ b/tests/test_batch/test_batch.py @@ -447,3 +447,127 @@ def test_register_task_definition(): resp.should.contain('revision') assert resp['jobDefinitionArn'].endswith('{0}:{1}'.format(resp['jobDefinitionName'], resp['revision'])) + + +@mock_ec2 +@mock_ecs +@mock_iam +@mock_batch +def test_reregister_task_definition(): + # Reregistering task with the same name bumps the revision number + ec2_client, iam_client, ecs_client, batch_client = _get_clients() + vpc_id, subnet_id, sg_id, iam_arn = _setup(ec2_client, iam_client) + + resp1 = batch_client.register_job_definition( + jobDefinitionName='sleep10', + type='container', + containerProperties={ + 'image': 'busybox', + 'vcpus': 1, + 'memory': 128, + 'command': ['sleep', '10'] + } + ) + + resp1.should.contain('jobDefinitionArn') + resp1.should.contain('jobDefinitionName') + resp1.should.contain('revision') + + assert resp1['jobDefinitionArn'].endswith('{0}:{1}'.format(resp1['jobDefinitionName'], resp1['revision'])) + resp1['revision'].should.equal(1) + + resp2 = batch_client.register_job_definition( + jobDefinitionName='sleep10', + type='container', + containerProperties={ + 'image': 'busybox', + 'vcpus': 1, + 'memory': 68, + 'command': ['sleep', '10'] + } + ) + resp2['revision'].should.equal(2) + + resp2['jobDefinitionArn'].should_not.equal(resp1['jobDefinitionArn']) + + +@mock_ec2 +@mock_ecs +@mock_iam +@mock_batch +def test_delete_task_definition(): + ec2_client, iam_client, ecs_client, batch_client = _get_clients() + vpc_id, subnet_id, sg_id, iam_arn = _setup(ec2_client, iam_client) + + resp = batch_client.register_job_definition( + jobDefinitionName='sleep10', + type='container', + containerProperties={ + 'image': 'busybox', + 'vcpus': 1, + 'memory': 128, + 'command': ['sleep', '10'] + } + ) + + batch_client.deregister_job_definition(jobDefinition=resp['jobDefinitionArn']) + + resp = batch_client.describe_job_definitions() + len(resp['jobDefinitions']).should.equal(0) + + +@mock_ec2 +@mock_ecs +@mock_iam +@mock_batch +def test_describe_task_definition(): + ec2_client, iam_client, ecs_client, batch_client = _get_clients() + vpc_id, subnet_id, sg_id, iam_arn = _setup(ec2_client, iam_client) + + resp = batch_client.register_job_definition( + jobDefinitionName='sleep10', + type='container', + containerProperties={ + 'image': 'busybox', + 'vcpus': 1, + 'memory': 128, + 'command': ['sleep', '10'] + } + ) + arn1 = resp['jobDefinitionArn'] + resp = batch_client.register_job_definition( + jobDefinitionName='sleep10', + type='container', + containerProperties={ + 'image': 'busybox', + 'vcpus': 1, + 'memory': 64, + 'command': ['sleep', '10'] + } + ) + arn2 = resp['jobDefinitionArn'] + resp = batch_client.register_job_definition( + jobDefinitionName='test1', + type='container', + containerProperties={ + 'image': 'busybox', + 'vcpus': 1, + 'memory': 64, + 'command': ['sleep', '10'] + } + ) + arn3 = resp['jobDefinitionArn'] + + resp = batch_client.describe_job_definitions( + jobDefinitionName='sleep10' + ) + len(resp['jobDefinitions']).should.equal(2) + + resp = batch_client.describe_job_definitions() + len(resp['jobDefinitions']).should.equal(3) + + resp = batch_client.describe_job_definitions( + jobDefinitions=['sleep10', 'test1'] + ) + len(resp['jobDefinitions']).should.equal(3) + From 6eb755029cf77309c978f953e77ca033a9a8b3db Mon Sep 17 00:00:00 2001 From: Terry Cain Date: Thu, 5 Oct 2017 00:09:10 +0100 Subject: [PATCH 359/412] fix flake8 --- moto/batch/models.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/moto/batch/models.py b/moto/batch/models.py index bfbdcf4a5..05137296b 100644 --- a/moto/batch/models.py +++ b/moto/batch/models.py @@ -243,7 +243,7 @@ class BatchBackend(BaseBackend): def get_job_definition_by_arn(self, arn): return self._job_definitions.get(arn) - def get_job_definition_by_name(self, name):# + def get_job_definition_by_name(self, name): for comp_env in self._job_definitions.values(): if comp_env.name == name: return comp_env From 8ca7ccfcb57a2298d9b7dcde892283d096ea09de Mon Sep 17 00:00:00 2001 From: David Morrison Date: Thu, 5 Oct 2017 12:50:42 -0700 Subject: [PATCH 360/412] add support for the modify_spot_fleet_request operation --- moto/ec2/models.py | 70 +++++++++++++---- moto/ec2/responses/spot_fleets.py | 14 ++++ tests/test_ec2/test_spot_fleet.py | 126 ++++++++++++++++++++++++++++++ 3 files changed, 195 insertions(+), 15 deletions(-) diff --git a/moto/ec2/models.py b/moto/ec2/models.py index 10fec7fd7..f6f53683a 100755 --- a/moto/ec2/models.py +++ b/moto/ec2/models.py @@ -374,6 +374,7 @@ class Instance(TaggedEC2Resource, BotoInstance): self.source_dest_check = "true" self.launch_time = utc_date_and_time() self.disable_api_termination = kwargs.get("disable_api_termination", False) + self._spot_fleet_id = kwargs.get("spot_fleet_id", None) associate_public_ip = kwargs.get("associate_public_ip", False) if in_ec2_classic: # If we are in EC2-Classic, autoassign a public IP @@ -511,6 +512,14 @@ class Instance(TaggedEC2Resource, BotoInstance): self.teardown_defaults() + if self._spot_fleet_id: + spot_fleet = self.ec2_backend.get_spot_fleet_request(self._spot_fleet_id) + for spec in spot_fleet.launch_specs: + if spec.instance_type == self.instance_type and spec.subnet_id == self.subnet_id: + break + spot_fleet.fulfilled_capacity -= spec.weighted_capacity + spot_fleet.spot_requests = [req for req in spot_fleet.spot_requests if req.instance != self] + self._state.name = "terminated" self._state.code = 48 @@ -2623,7 +2632,7 @@ class SpotInstanceRequest(BotoSpotRequest, TaggedEC2Resource): def __init__(self, ec2_backend, spot_request_id, price, image_id, type, valid_from, valid_until, launch_group, availability_zone_group, key_name, security_groups, user_data, instance_type, placement, - kernel_id, ramdisk_id, monitoring_enabled, subnet_id, + kernel_id, ramdisk_id, monitoring_enabled, subnet_id, spot_fleet_id, **kwargs): super(SpotInstanceRequest, self).__init__(**kwargs) ls = LaunchSpecification() @@ -2646,6 +2655,7 @@ class SpotInstanceRequest(BotoSpotRequest, TaggedEC2Resource): ls.placement = placement ls.monitored = monitoring_enabled ls.subnet_id = subnet_id + self.spot_fleet_id = spot_fleet_id if security_groups: for group_name in security_groups: @@ -2678,6 +2688,7 @@ class SpotInstanceRequest(BotoSpotRequest, TaggedEC2Resource): key_name=self.launch_specification.key_name, security_group_names=[], security_group_ids=self.launch_specification.groups, + spot_fleet_id=self.spot_fleet_id, ) instance = reservation.instances[0] return instance @@ -2693,7 +2704,7 @@ class SpotRequestBackend(object): valid_until, launch_group, availability_zone_group, key_name, security_groups, user_data, instance_type, placement, kernel_id, ramdisk_id, - monitoring_enabled, subnet_id): + monitoring_enabled, subnet_id, spot_fleet_id=None): requests = [] for _ in range(count): spot_request_id = random_spot_request_id() @@ -2701,7 +2712,7 @@ class SpotRequestBackend(object): spot_request_id, price, image_id, type, valid_from, valid_until, launch_group, availability_zone_group, key_name, security_groups, user_data, instance_type, placement, kernel_id, ramdisk_id, - monitoring_enabled, subnet_id) + monitoring_enabled, subnet_id, spot_fleet_id) self.spot_instance_requests[spot_request_id] = request requests.append(request) return requests @@ -2747,7 +2758,7 @@ class SpotFleetRequest(TaggedEC2Resource): self.iam_fleet_role = iam_fleet_role self.allocation_strategy = allocation_strategy self.state = "active" - self.fulfilled_capacity = self.target_capacity + self.fulfilled_capacity = 0.0 self.launch_specs = [] for spec in launch_specs: @@ -2768,7 +2779,7 @@ class SpotFleetRequest(TaggedEC2Resource): ) self.spot_requests = [] - self.create_spot_requests() + self.create_spot_requests(self.target_capacity) @property def physical_resource_id(self): @@ -2798,31 +2809,32 @@ class SpotFleetRequest(TaggedEC2Resource): return spot_fleet_request - def get_launch_spec_counts(self): + def get_launch_spec_counts(self, weight_to_add): weight_map = defaultdict(int) + weight_so_far = 0 if self.allocation_strategy == 'diversified': - weight_so_far = 0 launch_spec_index = 0 while True: launch_spec = self.launch_specs[ launch_spec_index % len(self.launch_specs)] weight_map[launch_spec] += 1 weight_so_far += launch_spec.weighted_capacity - if weight_so_far >= self.target_capacity: + if weight_so_far >= weight_to_add: break launch_spec_index += 1 else: # lowestPrice cheapest_spec = sorted( self.launch_specs, key=lambda spec: float(spec.spot_price))[0] - extra = 1 if self.target_capacity % cheapest_spec.weighted_capacity else 0 + weight_so_far = weight_to_add + (weight_to_add % cheapest_spec.weighted_capacity) weight_map[cheapest_spec] = int( - self.target_capacity // cheapest_spec.weighted_capacity) + extra + weight_so_far // cheapest_spec.weighted_capacity) - return weight_map.items() + return weight_map, weight_so_far - def create_spot_requests(self): - for launch_spec, count in self.get_launch_spec_counts(): + def create_spot_requests(self, weight_to_add): + weight_map, added_weight = self.get_launch_spec_counts(weight_to_add) + for launch_spec, count in weight_map.items(): requests = self.ec2_backend.request_spot_instances( price=launch_spec.spot_price, image_id=launch_spec.image_id, @@ -2841,12 +2853,28 @@ class SpotFleetRequest(TaggedEC2Resource): ramdisk_id=None, monitoring_enabled=launch_spec.monitoring, subnet_id=launch_spec.subnet_id, + spot_fleet_id=self.id, ) self.spot_requests.extend(requests) + self.fulfilled_capacity += added_weight return self.spot_requests def terminate_instances(self): - pass + instance_ids = [] + new_fulfilled_capacity = self.fulfilled_capacity + for req in self.spot_requests: + instance = req.instance + for spec in self.launch_specs: + if spec.instance_type == instance.instance_type and spec.subnet_id == instance.subnet_id: + break + + if new_fulfilled_capacity - spec.weighted_capacity < self.target_capacity: + continue + new_fulfilled_capacity -= spec.weighted_capacity + instance_ids.append(instance.id) + + self.spot_requests = [req for req in self.spot_requests if req.instance.id not in instance_ids] + self.ec2_backend.terminate_instances(instance_ids) class SpotFleetBackend(object): @@ -2882,12 +2910,24 @@ class SpotFleetBackend(object): def cancel_spot_fleet_requests(self, spot_fleet_request_ids, terminate_instances): spot_requests = [] for spot_fleet_request_id in spot_fleet_request_ids: - spot_fleet = self.spot_fleet_requests.pop(spot_fleet_request_id) + spot_fleet = self.spot_fleet_requests[spot_fleet_request_id] if terminate_instances: + spot_fleet.target_capacity = 0 spot_fleet.terminate_instances() spot_requests.append(spot_fleet) + del self.spot_fleet_requests[spot_fleet_request_id] return spot_requests + def modify_spot_fleet_request(self, spot_fleet_request_id, target_capacity, terminate_instances): + spot_fleet_request = self.spot_fleet_requests[spot_fleet_request_id] + delta = target_capacity - spot_fleet_request.target_capacity + spot_fleet_request.target_capacity = target_capacity + if delta > 0: + spot_fleet_request.create_spot_requests(delta) + elif delta < 0 and terminate_instances == 'default': + spot_fleet_request.terminate_instances() + return True + class ElasticAddress(object): def __init__(self, domain): diff --git a/moto/ec2/responses/spot_fleets.py b/moto/ec2/responses/spot_fleets.py index e39d9b178..167618215 100644 --- a/moto/ec2/responses/spot_fleets.py +++ b/moto/ec2/responses/spot_fleets.py @@ -29,6 +29,15 @@ class SpotFleets(BaseResponse): template = self.response_template(DESCRIBE_SPOT_FLEET_TEMPLATE) return template.render(requests=requests) + def modify_spot_fleet_request(self): + spot_fleet_request_id = self._get_param("SpotFleetRequestId") + target_capacity = self._get_int_param("TargetCapacity") + terminate_instances = self._get_param("ExcessCapacityTerminationPolicy", if_none="default") + successful = self.ec2_backend.modify_spot_fleet_request( + spot_fleet_request_id, target_capacity, terminate_instances) + template = self.response_template(MODIFY_SPOT_FLEET_REQUEST_TEMPLATE) + return template.render(successful=successful) + def request_spot_fleet(self): spot_config = self._get_dict_param("SpotFleetRequestConfig.") spot_price = spot_config['spot_price'] @@ -56,6 +65,11 @@ REQUEST_SPOT_FLEET_TEMPLATE = """ + 21681fea-9987-aef3-2121-example + {{ successful }} +""" + DESCRIBE_SPOT_FLEET_TEMPLATE = """ 4d68a6cc-8f2e-4be1-b425-example diff --git a/tests/test_ec2/test_spot_fleet.py b/tests/test_ec2/test_spot_fleet.py index 8ac91c57b..a8737a17c 100644 --- a/tests/test_ec2/test_spot_fleet.py +++ b/tests/test_ec2/test_spot_fleet.py @@ -164,3 +164,129 @@ def test_cancel_spot_fleet_request(): spot_fleet_requests = conn.describe_spot_fleet_requests( SpotFleetRequestIds=[spot_fleet_id])['SpotFleetRequestConfigs'] len(spot_fleet_requests).should.equal(0) + + +@mock_ec2 +def test_modify_spot_fleet_request_up(): + conn = boto3.client("ec2", region_name='us-west-2') + subnet_id = get_subnet_id(conn) + + spot_fleet_res = conn.request_spot_fleet( + SpotFleetRequestConfig=spot_config(subnet_id), + ) + spot_fleet_id = spot_fleet_res['SpotFleetRequestId'] + + conn.modify_spot_fleet_request( + SpotFleetRequestId=spot_fleet_id, TargetCapacity=20) + + instance_res = conn.describe_spot_fleet_instances( + SpotFleetRequestId=spot_fleet_id) + instances = instance_res['ActiveInstances'] + len(instances).should.equal(10) + + spot_fleet_config = conn.describe_spot_fleet_requests( + SpotFleetRequestIds=[spot_fleet_id])['SpotFleetRequestConfigs'][0]['SpotFleetRequestConfig'] + spot_fleet_config['TargetCapacity'].should.equal(20) + spot_fleet_config['FulfilledCapacity'].should.equal(20) + + +@mock_ec2 +def test_modify_spot_fleet_request_up_diversified(): + conn = boto3.client("ec2", region_name='us-west-2') + subnet_id = get_subnet_id(conn) + + spot_fleet_res = conn.request_spot_fleet( + SpotFleetRequestConfig=spot_config( + subnet_id, allocation_strategy='diversified'), + ) + spot_fleet_id = spot_fleet_res['SpotFleetRequestId'] + + conn.modify_spot_fleet_request( + SpotFleetRequestId=spot_fleet_id, TargetCapacity=19) + + instance_res = conn.describe_spot_fleet_instances( + SpotFleetRequestId=spot_fleet_id) + instances = instance_res['ActiveInstances'] + len(instances).should.equal(7) + + spot_fleet_config = conn.describe_spot_fleet_requests( + SpotFleetRequestIds=[spot_fleet_id])['SpotFleetRequestConfigs'][0]['SpotFleetRequestConfig'] + spot_fleet_config['TargetCapacity'].should.equal(19) + spot_fleet_config['FulfilledCapacity'].should.equal(20.0) + + +@mock_ec2 +def test_modify_spot_fleet_request_down_no_terminate(): + conn = boto3.client("ec2", region_name='us-west-2') + subnet_id = get_subnet_id(conn) + + spot_fleet_res = conn.request_spot_fleet( + SpotFleetRequestConfig=spot_config(subnet_id), + ) + spot_fleet_id = spot_fleet_res['SpotFleetRequestId'] + + conn.modify_spot_fleet_request( + SpotFleetRequestId=spot_fleet_id, TargetCapacity=1, ExcessCapacityTerminationPolicy="noTermination") + + instance_res = conn.describe_spot_fleet_instances( + SpotFleetRequestId=spot_fleet_id) + instances = instance_res['ActiveInstances'] + len(instances).should.equal(3) + + spot_fleet_config = conn.describe_spot_fleet_requests( + SpotFleetRequestIds=[spot_fleet_id])['SpotFleetRequestConfigs'][0]['SpotFleetRequestConfig'] + spot_fleet_config['TargetCapacity'].should.equal(1) + spot_fleet_config['FulfilledCapacity'].should.equal(6) + + +@mock_ec2 +def test_modify_spot_fleet_request_down(): + conn = boto3.client("ec2", region_name='us-west-2') + subnet_id = get_subnet_id(conn) + + spot_fleet_res = conn.request_spot_fleet( + SpotFleetRequestConfig=spot_config(subnet_id), + ) + spot_fleet_id = spot_fleet_res['SpotFleetRequestId'] + + conn.modify_spot_fleet_request( + SpotFleetRequestId=spot_fleet_id, TargetCapacity=1) + + instance_res = conn.describe_spot_fleet_instances( + SpotFleetRequestId=spot_fleet_id) + instances = instance_res['ActiveInstances'] + len(instances).should.equal(1) + + spot_fleet_config = conn.describe_spot_fleet_requests( + SpotFleetRequestIds=[spot_fleet_id])['SpotFleetRequestConfigs'][0]['SpotFleetRequestConfig'] + spot_fleet_config['TargetCapacity'].should.equal(1) + spot_fleet_config['FulfilledCapacity'].should.equal(2) + + +@mock_ec2 +def test_modify_spot_fleet_request_down_no_terminate_after_custom_terminate(): + conn = boto3.client("ec2", region_name='us-west-2') + subnet_id = get_subnet_id(conn) + + spot_fleet_res = conn.request_spot_fleet( + SpotFleetRequestConfig=spot_config(subnet_id), + ) + spot_fleet_id = spot_fleet_res['SpotFleetRequestId'] + + instance_res = conn.describe_spot_fleet_instances( + SpotFleetRequestId=spot_fleet_id) + instances = instance_res['ActiveInstances'] + conn.terminate_instances(InstanceIds=[i['InstanceId'] for i in instances[1:]]) + + conn.modify_spot_fleet_request( + SpotFleetRequestId=spot_fleet_id, TargetCapacity=1, ExcessCapacityTerminationPolicy="noTermination") + + instance_res = conn.describe_spot_fleet_instances( + SpotFleetRequestId=spot_fleet_id) + instances = instance_res['ActiveInstances'] + len(instances).should.equal(1) + + spot_fleet_config = conn.describe_spot_fleet_requests( + SpotFleetRequestIds=[spot_fleet_id])['SpotFleetRequestConfigs'][0]['SpotFleetRequestConfig'] + spot_fleet_config['TargetCapacity'].should.equal(1) + spot_fleet_config['FulfilledCapacity'].should.equal(2) From e135344f0c339771f629ef9e1b1e364663b57fca Mon Sep 17 00:00:00 2001 From: Terry Cain Date: Fri, 6 Oct 2017 01:21:29 +0100 Subject: [PATCH 361/412] Added simple SubmitJob and DescribeJobs --- moto/batch/models.py | 225 +++++++++++++++++++++++++++++++++ moto/batch/responses.py | 37 ++++++ moto/batch/urls.py | 4 +- moto/logs/models.py | 24 +++- moto/logs/responses.py | 4 +- tests/test_batch/test_batch.py | 126 +++++++++++++++--- 6 files changed, 391 insertions(+), 29 deletions(-) diff --git a/moto/batch/models.py b/moto/batch/models.py index 05137296b..be8fca9d1 100644 --- a/moto/batch/models.py +++ b/moto/batch/models.py @@ -1,13 +1,22 @@ from __future__ import unicode_literals import boto3 import re +import requests.adapters from itertools import cycle import six +import datetime +import time import uuid +import logging +import docker +import functools +import threading +import dateutil.parser from moto.core import BaseBackend, BaseModel from moto.iam import iam_backends from moto.ec2 import ec2_backends from moto.ecs import ecs_backends +from moto.logs import logs_backends from .exceptions import InvalidParameterValueException, InternalFailure, ClientException from .utils import make_arn_for_compute_env, make_arn_for_job_queue, make_arn_for_task_def @@ -16,10 +25,16 @@ from moto.ec2.models import INSTANCE_TYPES as EC2_INSTANCE_TYPES from moto.iam.exceptions import IAMNotFoundException +_orig_adapter_send = requests.adapters.HTTPAdapter.send +logger = logging.getLogger(__name__) DEFAULT_ACCOUNT_ID = 123456789012 COMPUTE_ENVIRONMENT_NAME_REGEX = re.compile(r'^[A-Za-z0-9_]{1,128}$') +def datetime2int(date): + return int(time.mktime(date.timetuple())) + + class ComputeEnvironment(BaseModel): def __init__(self, compute_environment_name, _type, state, compute_resources, service_role, region_name): self.name = compute_environment_name @@ -65,6 +80,8 @@ class JobQueue(BaseModel): self.arn = make_arn_for_job_queue(DEFAULT_ACCOUNT_ID, name, region_name) self.status = 'VALID' + self.jobs = [] + def describe(self): result = { 'computeEnvironmentOrder': self.env_order_json, @@ -156,6 +173,162 @@ class JobDefinition(BaseModel): return result +class Job(threading.Thread, BaseModel): + def __init__(self, name, job_def, job_queue, log_backend): + """ + Docker Job + + :param name: Job Name + :param job_def: Job definition + :type: job_def: JobDefinition + :param job_queue: Job Queue + :param log_backend: Log backend + :type log_backend: moto.logs.models.LogsBackend + """ + threading.Thread.__init__(self) + + self.job_name = name + self.job_id = str(uuid.uuid4()) + self.job_definition = job_def + self.job_queue = job_queue + self.job_state = 'SUBMITTED' # One of SUBMITTED | PENDING | RUNNABLE | STARTING | RUNNING | SUCCEEDED | FAILED + self.job_queue.jobs.append(self) + self.job_started_at = datetime.datetime(1970, 1, 1) + self.job_stopped_at = datetime.datetime(1970, 1, 1) + self.job_stopped = False + + self.stop = False + + self.daemon = True + self.name = 'MOTO-BATCH-' + self.job_id + + self.docker_client = docker.from_env() + self._log_backend = log_backend + + # Unfortunately mocking replaces this method w/o fallback enabled, so we + # need to replace it if we detect it's been mocked + if requests.adapters.HTTPAdapter.send != _orig_adapter_send: + _orig_get_adapter = self.docker_client.api.get_adapter + + def replace_adapter_send(*args, **kwargs): + adapter = _orig_get_adapter(*args, **kwargs) + + if isinstance(adapter, requests.adapters.HTTPAdapter): + adapter.send = functools.partial(_orig_adapter_send, adapter) + return adapter + self.docker_client.api.get_adapter = replace_adapter_send + + def describe(self): + result = { + 'jobDefinition': self.job_definition.arn, + 'jobId': self.job_id, + 'jobName': self.job_name, + 'jobQueue': self.job_queue.arn, + 'startedAt': datetime2int(self.job_started_at), + 'status': self.job_state, + 'dependsOn': [] + } + if self.job_stopped: + result['stoppedAt'] = datetime2int(self.job_stopped_at) + return result + + def run(self): + """ + Run the container. + + Logic is as follows: + Generate container info (eventually from task definition) + Start container + Loop whilst not asked to stop and the container is running. + Get all logs from container between the last time I checked and now. + Convert logs into cloudwatch format + Put logs into cloudwatch + + :return: + """ + try: + self.job_state = 'PENDING' + time.sleep(1) + + image = 'alpine:latest' + cmd = '/bin/sh -c "for a in `seq 1 10`; do echo Hello World; sleep 1; done"' + name = '{0}-{1}'.format(self.job_name, self.job_id) + + self.job_state = 'RUNNABLE' + # TODO setup ecs container instance + time.sleep(1) + + self.job_state = 'STARTING' + container = self.docker_client.containers.run( + image, cmd, + detach=True, + name=name + ) + self.job_state = 'RUNNING' + self.job_started_at = datetime.datetime.now() + try: + # Log collection + logs_stdout = [] + logs_stderr = [] + container.reload() + + # Dodgy hack, we can only check docker logs once a second, but we want to loop more + # so we can stop if asked to in a quick manner, should all go away if we go async + # There also be some dodgyness when sending an integer to docker logs and some + # events seem to be duplicated. + now = datetime.datetime.now() + i = 1 + while container.status == 'running' and not self.stop: + time.sleep(0.15) + if i % 10 == 0: + logs_stderr.extend(container.logs(stdout=False, stderr=True, timestamps=True, since=datetime2int(now)).decode().split('\n')) + logs_stdout.extend(container.logs(stdout=True, stderr=False, timestamps=True, since=datetime2int(now)).decode().split('\n')) + now = datetime.datetime.now() + container.reload() + i += 1 + + # Container should be stopped by this point... unless asked to stop + if container.status == 'running': + container.kill() + + self.job_stopped_at = datetime.datetime.now() + # Get final logs + logs_stderr.extend(container.logs(stdout=False, stderr=True, timestamps=True, since=datetime2int(now)).decode().split('\n')) + logs_stdout.extend(container.logs(stdout=True, stderr=False, timestamps=True, since=datetime2int(now)).decode().split('\n')) + + self.job_state = 'SUCCEEDED' if not self.stop else 'FAILED' + + # Process logs + logs_stdout = [x for x in logs_stdout if len(x) > 0] + logs_stderr = [x for x in logs_stderr if len(x) > 0] + logs = [] + for line in logs_stdout + logs_stderr: + date, line = line.split(' ', 1) + date = dateutil.parser.parse(date) + date = int(date.timestamp()) + logs.append({'timestamp': date, 'message': line.strip()}) + + # Send to cloudwatch + log_group = '/aws/batch/job' + stream_name = '{0}/default/{1}'.format(self.job_definition.name, self.job_id) + self._log_backend.ensure_log_group(log_group, None) + self._log_backend.create_log_stream(log_group, stream_name) + self._log_backend.put_log_events(log_group, stream_name, logs, None) + + except Exception as err: + logger.error('Failed to run AWS Batch container {0}. Error {1}'.format(self.name, err)) + self.job_state = 'FAILED' + container.kill() + finally: + container.remove() + except Exception as err: + logger.error('Failed to run AWS Batch container {0}. Error {1}'.format(self.name, err)) + self.job_state = 'FAILED' + + self.job_stopped = True + self.job_stopped_at = datetime.datetime.now() + + class BatchBackend(BaseBackend): def __init__(self, region_name=None): super(BatchBackend, self).__init__() @@ -164,6 +337,7 @@ class BatchBackend(BaseBackend): self._compute_environments = {} self._job_queues = {} self._job_definitions = {} + self._jobs = {} @property def iam_backend(self): @@ -189,8 +363,23 @@ class BatchBackend(BaseBackend): """ return ecs_backends[self.region_name] + @property + def logs_backend(self): + """ + :return: ECS Backend + :rtype: moto.logs.models.LogsBackend + """ + return logs_backends[self.region_name] + def reset(self): region_name = self.region_name + + for job in self._jobs.values(): + if job.job_state not in ('FAILED', 'SUCCEEDED'): + job.stop = True + # Try to join + job.join(0.2) + self.__dict__ = {} self.__init__(region_name) @@ -691,6 +880,42 @@ class BatchBackend(BaseBackend): return [job for job in jobs if job.status == status] return jobs + def submit_job(self, job_name, job_def_id, job_queue, parameters=None, retries=None, depends_on=None, container_overrides=None): + # TODO parameters, retries (which is a dict raw from request), job dependancies and container overrides are ignored for now + + # Look for job definition + job_def = self.get_job_definition_by_arn(job_def_id) + if job_def is None and ':' in job_def_id: + job_def = self.get_job_definition_by_name_revision(*job_def_id.split(':', 1)) + if job_def is None: + raise ClientException('Job definition {0} does not exist'.format(job_def_id)) + + queue = self.get_job_queue(job_queue) + if queue is None: + raise ClientException('Job queue {0} does not exist'.format(job_queue)) + + job = Job(job_name, job_def, queue, log_backend=self.logs_backend) + self._jobs[job.job_id] = job + + # Here comes the fun + job.start() + + return job_name, job.job_id + + def describe_jobs(self, jobs): + job_filter = set() + if jobs is not None: + job_filter = set(jobs) + + result = [] + for key, job in self._jobs.items(): + if len(job_filter) > 0 and key not in job_filter: + continue + + result.append(job.describe()) + + return result + available_regions = boto3.session.Session().get_available_regions("batch") batch_backends = {region: BatchBackend(region_name=region) for region in available_regions} diff --git a/moto/batch/responses.py b/moto/batch/responses.py index 0d3900d1d..2bec7ddf1 100644 --- a/moto/batch/responses.py +++ b/moto/batch/responses.py @@ -226,3 +226,40 @@ class BatchResponse(BaseResponse): result = {'jobDefinitions': [job.describe() for job in job_defs]} return json.dumps(result) + + # SubmitJob + def submitjob(self): + container_overrides = self._get_param('containerOverrides') + depends_on = self._get_param('dependsOn') + job_def = self._get_param('jobDefinition') + job_name = self._get_param('jobName') + job_queue = self._get_param('jobQueue') + parameters = self._get_param('parameters') + retries = self._get_param('retryStrategy') + + try: + name, job_id = self.batch_backend.submit_job( + job_name, job_def, job_queue, + parameters=parameters, + retries=retries, + depends_on=depends_on, + container_overrides=container_overrides + ) + except AWSError as err: + return err.response() + + result = { + 'jobId': job_id, + 'jobName': name, + } + + return json.dumps(result) + + # DescribeJobs + def describejobs(self): + jobs = self._get_param('jobs') + + try: + return json.dumps({'jobs': self.batch_backend.describe_jobs(jobs)}) + except AWSError as err: + return err.response() diff --git a/moto/batch/urls.py b/moto/batch/urls.py index 3265bb535..924e55e6d 100644 --- a/moto/batch/urls.py +++ b/moto/batch/urls.py @@ -16,5 +16,7 @@ url_paths = { '{0}/v1/deletejobqueue': BatchResponse.dispatch, '{0}/v1/registerjobdefinition': BatchResponse.dispatch, '{0}/v1/deregisterjobdefinition': BatchResponse.dispatch, - '{0}/v1/describejobdefinitions': BatchResponse.dispatch + '{0}/v1/describejobdefinitions': BatchResponse.dispatch, + '{0}/v1/submitjob': BatchResponse.dispatch, + '{0}/v1/describejobs': BatchResponse.dispatch } diff --git a/moto/logs/models.py b/moto/logs/models.py index 14f511932..09dcb3645 100644 --- a/moto/logs/models.py +++ b/moto/logs/models.py @@ -22,6 +22,13 @@ class LogEvent: "timestamp": self.timestamp } + def to_response_dict(self): + return { + "ingestionTime": self.ingestionTime, + "message": self.message, + "timestamp": self.timestamp + } + class LogStream: _log_ids = 0 @@ -41,7 +48,14 @@ class LogStream: self.__class__._log_ids += 1 + def _update(self): + self.firstEventTimestamp = min([x.timestamp for x in self.events]) + self.lastEventTimestamp = max([x.timestamp for x in self.events]) + def to_describe_dict(self): + # Compute start and end times + self._update() + return { "arn": self.arn, "creationTime": self.creationTime, @@ -79,7 +93,7 @@ class LogStream: if next_token is None: next_token = 0 - events_page = events[next_token: next_token + limit] + events_page = [event.to_response_dict() for event in events[next_token: next_token + limit]] next_token += limit if next_token >= len(self.events): next_token = None @@ -120,17 +134,17 @@ class LogGroup: del self.streams[log_stream_name] def describe_log_streams(self, descending, limit, log_group_name, log_stream_name_prefix, next_token, order_by): - log_streams = [stream.to_describe_dict() for name, stream in self.streams.items() if name.startswith(log_stream_name_prefix)] + log_streams = [(name, stream.to_describe_dict()) for name, stream in self.streams.items() if name.startswith(log_stream_name_prefix)] - def sorter(stream): - return stream.name if order_by == 'logStreamName' else stream.lastEventTimestamp + def sorter(item): + return item[0] if order_by == 'logStreamName' else item[1]['lastEventTimestamp'] if next_token is None: next_token = 0 log_streams = sorted(log_streams, key=sorter, reverse=descending) new_token = next_token + limit - log_streams_page = log_streams[next_token: new_token] + log_streams_page = [x[1] for x in log_streams[next_token: new_token]] if new_token >= len(log_streams): new_token = None diff --git a/moto/logs/responses.py b/moto/logs/responses.py index 4cb9caa6a..53b2390f4 100644 --- a/moto/logs/responses.py +++ b/moto/logs/responses.py @@ -47,7 +47,7 @@ class LogsResponse(BaseResponse): def describe_log_streams(self): log_group_name = self._get_param('logGroupName') - log_stream_name_prefix = self._get_param('logStreamNamePrefix') + log_stream_name_prefix = self._get_param('logStreamNamePrefix', '') descending = self._get_param('descending', False) limit = self._get_param('limit', 50) assert limit <= 50 @@ -83,7 +83,7 @@ class LogsResponse(BaseResponse): limit = self._get_param('limit', 10000) assert limit <= 10000 next_token = self._get_param('nextToken') - start_from_head = self._get_param('startFromHead') + start_from_head = self._get_param('startFromHead', False) events, next_backward_token, next_foward_token = \ self.logs_backend.get_log_events(log_group_name, log_stream_name, start_time, end_time, limit, next_token, start_from_head) diff --git a/tests/test_batch/test_batch.py b/tests/test_batch/test_batch.py index ebe710760..acbe75e94 100644 --- a/tests/test_batch/test_batch.py +++ b/tests/test_batch/test_batch.py @@ -1,11 +1,24 @@ from __future__ import unicode_literals +import time +import datetime import boto3 from botocore.exceptions import ClientError import sure # noqa -from moto import mock_batch, mock_iam, mock_ec2, mock_ecs +from moto import mock_batch, mock_iam, mock_ec2, mock_ecs, mock_logs +import functools +import nose +def expected_failure(test): + @functools.wraps(test) + def inner(*args, **kwargs): + try: + test(*args, **kwargs) + except Exception as err: + raise nose.SkipTest + return inner + DEFAULT_REGION = 'eu-central-1' @@ -13,6 +26,7 @@ def _get_clients(): return boto3.client('ec2', region_name=DEFAULT_REGION), \ boto3.client('iam', region_name=DEFAULT_REGION), \ boto3.client('ecs', region_name=DEFAULT_REGION), \ + boto3.client('logs', region_name=DEFAULT_REGION), \ boto3.client('batch', region_name=DEFAULT_REGION) @@ -52,7 +66,7 @@ def _setup(ec2_client, iam_client): @mock_iam @mock_batch def test_create_managed_compute_environment(): - ec2_client, iam_client, ecs_client, batch_client = _get_clients() + ec2_client, iam_client, ecs_client, logs_client, batch_client = _get_clients() vpc_id, subnet_id, sg_id, iam_arn = _setup(ec2_client, iam_client) compute_name = 'test_compute_env' @@ -105,7 +119,7 @@ def test_create_managed_compute_environment(): @mock_iam @mock_batch def test_create_unmanaged_compute_environment(): - ec2_client, iam_client, ecs_client, batch_client = _get_clients() + ec2_client, iam_client, ecs_client, logs_client, batch_client = _get_clients() vpc_id, subnet_id, sg_id, iam_arn = _setup(ec2_client, iam_client) compute_name = 'test_compute_env' @@ -136,7 +150,7 @@ def test_create_unmanaged_compute_environment(): @mock_iam @mock_batch def test_describe_compute_environment(): - ec2_client, iam_client, ecs_client, batch_client = _get_clients() + ec2_client, iam_client, ecs_client, logs_client, batch_client = _get_clients() vpc_id, subnet_id, sg_id, iam_arn = _setup(ec2_client, iam_client) compute_name = 'test_compute_env' @@ -163,7 +177,7 @@ def test_describe_compute_environment(): @mock_iam @mock_batch def test_delete_unmanaged_compute_environment(): - ec2_client, iam_client, ecs_client, batch_client = _get_clients() + ec2_client, iam_client, ecs_client, logs_client, batch_client = _get_clients() vpc_id, subnet_id, sg_id, iam_arn = _setup(ec2_client, iam_client) compute_name = 'test_compute_env' @@ -190,7 +204,7 @@ def test_delete_unmanaged_compute_environment(): @mock_iam @mock_batch def test_delete_managed_compute_environment(): - ec2_client, iam_client, ecs_client, batch_client = _get_clients() + ec2_client, iam_client, ecs_client, logs_client, batch_client = _get_clients() vpc_id, subnet_id, sg_id, iam_arn = _setup(ec2_client, iam_client) compute_name = 'test_compute_env' @@ -247,7 +261,7 @@ def test_delete_managed_compute_environment(): @mock_iam @mock_batch def test_update_unmanaged_compute_environment_state(): - ec2_client, iam_client, ecs_client, batch_client = _get_clients() + ec2_client, iam_client, ecs_client, logs_client, batch_client = _get_clients() vpc_id, subnet_id, sg_id, iam_arn = _setup(ec2_client, iam_client) compute_name = 'test_compute_env' @@ -273,7 +287,7 @@ def test_update_unmanaged_compute_environment_state(): @mock_iam @mock_batch def test_create_job_queue(): - ec2_client, iam_client, ecs_client, batch_client = _get_clients() + ec2_client, iam_client, ecs_client, logs_client, batch_client = _get_clients() vpc_id, subnet_id, sg_id, iam_arn = _setup(ec2_client, iam_client) compute_name = 'test_compute_env' @@ -315,7 +329,7 @@ def test_create_job_queue(): @mock_iam @mock_batch def test_job_queue_bad_arn(): - ec2_client, iam_client, ecs_client, batch_client = _get_clients() + ec2_client, iam_client, ecs_client, logs_client, batch_client = _get_clients() vpc_id, subnet_id, sg_id, iam_arn = _setup(ec2_client, iam_client) compute_name = 'test_compute_env' @@ -348,7 +362,7 @@ def test_job_queue_bad_arn(): @mock_iam @mock_batch def test_update_job_queue(): - ec2_client, iam_client, ecs_client, batch_client = _get_clients() + ec2_client, iam_client, ecs_client, logs_client, batch_client = _get_clients() vpc_id, subnet_id, sg_id, iam_arn = _setup(ec2_client, iam_client) compute_name = 'test_compute_env' @@ -389,7 +403,7 @@ def test_update_job_queue(): @mock_iam @mock_batch def test_update_job_queue(): - ec2_client, iam_client, ecs_client, batch_client = _get_clients() + ec2_client, iam_client, ecs_client, logs_client, batch_client = _get_clients() vpc_id, subnet_id, sg_id, iam_arn = _setup(ec2_client, iam_client) compute_name = 'test_compute_env' @@ -428,7 +442,7 @@ def test_update_job_queue(): @mock_iam @mock_batch def test_register_task_definition(): - ec2_client, iam_client, ecs_client, batch_client = _get_clients() + ec2_client, iam_client, ecs_client, logs_client, batch_client = _get_clients() vpc_id, subnet_id, sg_id, iam_arn = _setup(ec2_client, iam_client) resp = batch_client.register_job_definition( @@ -455,7 +469,7 @@ def test_register_task_definition(): @mock_batch def test_reregister_task_definition(): # Reregistering task with the same name bumps the revision number - ec2_client, iam_client, ecs_client, batch_client = _get_clients() + ec2_client, iam_client, ecs_client, logs_client, batch_client = _get_clients() vpc_id, subnet_id, sg_id, iam_arn = _setup(ec2_client, iam_client) resp1 = batch_client.register_job_definition( @@ -496,7 +510,7 @@ def test_reregister_task_definition(): @mock_iam @mock_batch def test_delete_task_definition(): - ec2_client, iam_client, ecs_client, batch_client = _get_clients() + ec2_client, iam_client, ecs_client, logs_client, batch_client = _get_clients() vpc_id, subnet_id, sg_id, iam_arn = _setup(ec2_client, iam_client) resp = batch_client.register_job_definition( @@ -521,10 +535,10 @@ def test_delete_task_definition(): @mock_iam @mock_batch def test_describe_task_definition(): - ec2_client, iam_client, ecs_client, batch_client = _get_clients() + ec2_client, iam_client, ecs_client, logs_client, batch_client = _get_clients() vpc_id, subnet_id, sg_id, iam_arn = _setup(ec2_client, iam_client) - resp = batch_client.register_job_definition( + batch_client.register_job_definition( jobDefinitionName='sleep10', type='container', containerProperties={ @@ -534,8 +548,7 @@ def test_describe_task_definition(): 'command': ['sleep', '10'] } ) - arn1 = resp['jobDefinitionArn'] - resp = batch_client.register_job_definition( + batch_client.register_job_definition( jobDefinitionName='sleep10', type='container', containerProperties={ @@ -545,8 +558,7 @@ def test_describe_task_definition(): 'command': ['sleep', '10'] } ) - arn2 = resp['jobDefinitionArn'] - resp = batch_client.register_job_definition( + batch_client.register_job_definition( jobDefinitionName='test1', type='container', containerProperties={ @@ -556,7 +568,6 @@ def test_describe_task_definition(): 'command': ['sleep', '10'] } ) - arn3 = resp['jobDefinitionArn'] resp = batch_client.describe_job_definitions( jobDefinitionName='sleep10' @@ -571,3 +582,76 @@ def test_describe_task_definition(): ) len(resp['jobDefinitions']).should.equal(3) + +# SLOW TEST +@expected_failure +@mock_logs +@mock_ec2 +@mock_ecs +@mock_iam +@mock_batch +def test_submit_job(): + ec2_client, iam_client, ecs_client, logs_client, batch_client = _get_clients() + vpc_id, subnet_id, sg_id, iam_arn = _setup(ec2_client, iam_client) + + compute_name = 'test_compute_env' + resp = batch_client.create_compute_environment( + computeEnvironmentName=compute_name, + type='UNMANAGED', + state='ENABLED', + serviceRole=iam_arn + ) + arn = resp['computeEnvironmentArn'] + + resp = batch_client.create_job_queue( + jobQueueName='test_job_queue', + state='ENABLED', + priority=123, + computeEnvironmentOrder=[ + { + 'order': 123, + 'computeEnvironment': arn + }, + ] + ) + queue_arn = resp['jobQueueArn'] + + resp = batch_client.register_job_definition( + jobDefinitionName='sleep10', + type='container', + containerProperties={ + 'image': 'busybox', + 'vcpus': 1, + 'memory': 128, + 'command': ['sleep', '10'] + } + ) + job_def_arn = resp['jobDefinitionArn'] + + resp = batch_client.submit_job( + jobName='test1', + jobQueue=queue_arn, + jobDefinition=job_def_arn + ) + job_id = resp['jobId'] + + future = datetime.datetime.now() + datetime.timedelta(seconds=30) + + while datetime.datetime.now() < future: + resp = batch_client.describe_jobs(jobs=[job_id]) + print("{0}:{1} {2}".format(resp['jobs'][0]['jobName'], resp['jobs'][0]['jobId'], resp['jobs'][0]['status'])) + + if resp['jobs'][0]['status'] == 'FAILED': + raise RuntimeError('Batch job failed') + if resp['jobs'][0]['status'] == 'SUCCEEDED': + break + time.sleep(0.5) + else: + raise RuntimeError('Batch job timed out') + + resp = logs_client.describe_log_streams(logGroupName='/aws/batch/job') + len(resp['logStreams']).should.equal(1) + ls_name = resp['logStreams'][0]['logStreamName'] + + resp = logs_client.get_log_events(logGroupName='/aws/batch/job', logStreamName=ls_name) + len(resp['events']).should.be.greater_than(5) \ No newline at end of file From 3d3d0e916e96abef3ee651e77fad0d932dd5e448 Mon Sep 17 00:00:00 2001 From: David Morrison Date: Thu, 5 Oct 2017 18:46:58 -0700 Subject: [PATCH 362/412] minor bugfixes and added tests --- moto/ec2/models.py | 6 ++++-- moto/ec2/responses/spot_fleets.py | 8 ++++---- tests/test_ec2/test_spot_fleet.py | 26 ++++++++++++++++++++++++++ 3 files changed, 34 insertions(+), 6 deletions(-) diff --git a/moto/ec2/models.py b/moto/ec2/models.py index f6f53683a..05224a45d 100755 --- a/moto/ec2/models.py +++ b/moto/ec2/models.py @@ -2919,12 +2919,14 @@ class SpotFleetBackend(object): return spot_requests def modify_spot_fleet_request(self, spot_fleet_request_id, target_capacity, terminate_instances): + if target_capacity < 0: + raise ValueError('Cannot reduce spot fleet capacity below 0') spot_fleet_request = self.spot_fleet_requests[spot_fleet_request_id] - delta = target_capacity - spot_fleet_request.target_capacity + delta = target_capacity - spot_fleet_request.fulfilled_capacity spot_fleet_request.target_capacity = target_capacity if delta > 0: spot_fleet_request.create_spot_requests(delta) - elif delta < 0 and terminate_instances == 'default': + elif delta < 0 and terminate_instances == 'Default': spot_fleet_request.terminate_instances() return True diff --git a/moto/ec2/responses/spot_fleets.py b/moto/ec2/responses/spot_fleets.py index 167618215..81d1e0146 100644 --- a/moto/ec2/responses/spot_fleets.py +++ b/moto/ec2/responses/spot_fleets.py @@ -32,7 +32,7 @@ class SpotFleets(BaseResponse): def modify_spot_fleet_request(self): spot_fleet_request_id = self._get_param("SpotFleetRequestId") target_capacity = self._get_int_param("TargetCapacity") - terminate_instances = self._get_param("ExcessCapacityTerminationPolicy", if_none="default") + terminate_instances = self._get_param("ExcessCapacityTerminationPolicy", if_none="Default") successful = self.ec2_backend.modify_spot_fleet_request( spot_fleet_request_id, target_capacity, terminate_instances) template = self.response_template(MODIFY_SPOT_FLEET_REQUEST_TEMPLATE) @@ -65,10 +65,10 @@ REQUEST_SPOT_FLEET_TEMPLATE = """ +MODIFY_SPOT_FLEET_REQUEST_TEMPLATE = """ 21681fea-9987-aef3-2121-example - {{ successful }} -""" + {{ 'true' if successful else 'false' }} +""" DESCRIBE_SPOT_FLEET_TEMPLATE = """ 4d68a6cc-8f2e-4be1-b425-example diff --git a/tests/test_ec2/test_spot_fleet.py b/tests/test_ec2/test_spot_fleet.py index a8737a17c..442f77a94 100644 --- a/tests/test_ec2/test_spot_fleet.py +++ b/tests/test_ec2/test_spot_fleet.py @@ -239,6 +239,32 @@ def test_modify_spot_fleet_request_down_no_terminate(): spot_fleet_config['FulfilledCapacity'].should.equal(6) +@mock_ec2 +def test_modify_spot_fleet_request_down_odd(): + conn = boto3.client("ec2", region_name='us-west-2') + subnet_id = get_subnet_id(conn) + + spot_fleet_res = conn.request_spot_fleet( + SpotFleetRequestConfig=spot_config(subnet_id), + ) + spot_fleet_id = spot_fleet_res['SpotFleetRequestId'] + + conn.modify_spot_fleet_request( + SpotFleetRequestId=spot_fleet_id, TargetCapacity=7) + conn.modify_spot_fleet_request( + SpotFleetRequestId=spot_fleet_id, TargetCapacity=5) + + instance_res = conn.describe_spot_fleet_instances( + SpotFleetRequestId=spot_fleet_id) + instances = instance_res['ActiveInstances'] + len(instances).should.equal(3) + + spot_fleet_config = conn.describe_spot_fleet_requests( + SpotFleetRequestIds=[spot_fleet_id])['SpotFleetRequestConfigs'][0]['SpotFleetRequestConfig'] + spot_fleet_config['TargetCapacity'].should.equal(5) + spot_fleet_config['FulfilledCapacity'].should.equal(6) + + @mock_ec2 def test_modify_spot_fleet_request_down(): conn = boto3.client("ec2", region_name='us-west-2') From fa3268b7b7d51932bd3b38ed42d02d0deb8dafb0 Mon Sep 17 00:00:00 2001 From: David Morrison Date: Fri, 6 Oct 2017 08:07:21 -0700 Subject: [PATCH 363/412] fix tests --- tests/test_ec2/test_spot_fleet.py | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/tests/test_ec2/test_spot_fleet.py b/tests/test_ec2/test_spot_fleet.py index 442f77a94..a8d33c299 100644 --- a/tests/test_ec2/test_spot_fleet.py +++ b/tests/test_ec2/test_spot_fleet.py @@ -187,7 +187,7 @@ def test_modify_spot_fleet_request_up(): spot_fleet_config = conn.describe_spot_fleet_requests( SpotFleetRequestIds=[spot_fleet_id])['SpotFleetRequestConfigs'][0]['SpotFleetRequestConfig'] spot_fleet_config['TargetCapacity'].should.equal(20) - spot_fleet_config['FulfilledCapacity'].should.equal(20) + spot_fleet_config['FulfilledCapacity'].should.equal(20.0) @mock_ec2 @@ -236,7 +236,7 @@ def test_modify_spot_fleet_request_down_no_terminate(): spot_fleet_config = conn.describe_spot_fleet_requests( SpotFleetRequestIds=[spot_fleet_id])['SpotFleetRequestConfigs'][0]['SpotFleetRequestConfig'] spot_fleet_config['TargetCapacity'].should.equal(1) - spot_fleet_config['FulfilledCapacity'].should.equal(6) + spot_fleet_config['FulfilledCapacity'].should.equal(6.0) @mock_ec2 @@ -262,7 +262,7 @@ def test_modify_spot_fleet_request_down_odd(): spot_fleet_config = conn.describe_spot_fleet_requests( SpotFleetRequestIds=[spot_fleet_id])['SpotFleetRequestConfigs'][0]['SpotFleetRequestConfig'] spot_fleet_config['TargetCapacity'].should.equal(5) - spot_fleet_config['FulfilledCapacity'].should.equal(6) + spot_fleet_config['FulfilledCapacity'].should.equal(6.0) @mock_ec2 @@ -286,7 +286,7 @@ def test_modify_spot_fleet_request_down(): spot_fleet_config = conn.describe_spot_fleet_requests( SpotFleetRequestIds=[spot_fleet_id])['SpotFleetRequestConfigs'][0]['SpotFleetRequestConfig'] spot_fleet_config['TargetCapacity'].should.equal(1) - spot_fleet_config['FulfilledCapacity'].should.equal(2) + spot_fleet_config['FulfilledCapacity'].should.equal(2.0) @mock_ec2 @@ -315,4 +315,4 @@ def test_modify_spot_fleet_request_down_no_terminate_after_custom_terminate(): spot_fleet_config = conn.describe_spot_fleet_requests( SpotFleetRequestIds=[spot_fleet_id])['SpotFleetRequestConfigs'][0]['SpotFleetRequestConfig'] spot_fleet_config['TargetCapacity'].should.equal(1) - spot_fleet_config['FulfilledCapacity'].should.equal(2) + spot_fleet_config['FulfilledCapacity'].should.equal(2.0) From 88fb732302d62454ff7713185d596fe3dfd6d01c Mon Sep 17 00:00:00 2001 From: William Johansson Date: Fri, 6 Oct 2017 21:55:01 +0200 Subject: [PATCH 364/412] Support wildcard tag filters on SecurityGroups --- moto/ec2/models.py | 3 ++- tests/test_ec2/test_security_groups.py | 14 ++++++++++++++ 2 files changed, 16 insertions(+), 1 deletion(-) diff --git a/moto/ec2/models.py b/moto/ec2/models.py index 05224a45d..f8090e783 100755 --- a/moto/ec2/models.py +++ b/moto/ec2/models.py @@ -109,6 +109,7 @@ from .utils import ( random_vpn_connection_id, random_customer_gateway_id, is_tag_filter, + tag_filter_matches, ) RESOURCES_DIR = os.path.join(os.path.dirname(__file__), 'resources') @@ -1309,7 +1310,7 @@ class SecurityGroup(TaggedEC2Resource): elif is_tag_filter(key): tag_value = self.get_filter_value(key) if isinstance(filter_value, list): - return any(v in tag_value for v in filter_value) + return tag_filter_matches(self, key, filter_value) return tag_value in filter_value else: attr_name = to_attr(key) diff --git a/tests/test_ec2/test_security_groups.py b/tests/test_ec2/test_security_groups.py index 45e6e327d..0d7565a31 100644 --- a/tests/test_ec2/test_security_groups.py +++ b/tests/test_ec2/test_security_groups.py @@ -613,6 +613,20 @@ def test_security_group_tagging_boto3(): tag['Key'].should.equal("Test") +@mock_ec2 +def test_security_group_wildcard_tag_filter_boto3(): + conn = boto3.client('ec2', region_name='us-east-1') + sg = conn.create_security_group(GroupName="test-sg", Description="Test SG") + conn.create_tags(Resources=[sg['GroupId']], Tags=[ + {'Key': 'Test', 'Value': 'Tag'}]) + describe = conn.describe_security_groups( + Filters=[{'Name': 'tag-value', 'Values': ['*']}]) + + tag = describe["SecurityGroups"][0]['Tags'][0] + tag['Value'].should.equal("Tag") + tag['Key'].should.equal("Test") + + @mock_ec2 def test_authorize_and_revoke_in_bulk(): ec2 = boto3.resource('ec2', region_name='us-west-1') From c86bece382b50b24a629fd431c6c5a640f546a74 Mon Sep 17 00:00:00 2001 From: Terry Cain Date: Sat, 7 Oct 2017 21:57:14 +0100 Subject: [PATCH 365/412] Added FilterExpression to dynamodb scan --- moto/dynamodb2/comparisons.py | 421 ++++++++++++++++++++++++++ moto/dynamodb2/models.py | 22 +- moto/dynamodb2/responses.py | 9 +- tests/test_dynamodb2/test_dynamodb.py | 69 +++++ 4 files changed, 513 insertions(+), 8 deletions(-) diff --git a/moto/dynamodb2/comparisons.py b/moto/dynamodb2/comparisons.py index 0b323ecd5..5ac230d00 100644 --- a/moto/dynamodb2/comparisons.py +++ b/moto/dynamodb2/comparisons.py @@ -1,4 +1,6 @@ from __future__ import unicode_literals +import re +import six # TODO add tests for all of these EQ_FUNCTION = lambda item_value, test_value: item_value == test_value # flake8: noqa @@ -39,3 +41,422 @@ COMPARISON_FUNCS = { def get_comparison_func(range_comparison): return COMPARISON_FUNCS.get(range_comparison) + + +# +def get_filter_expression(expr, names, values): + # Examples + # expr = 'Id > 5 AND attribute_exists(test) AND Id BETWEEN 5 AND 6 OR length < 6 AND contains(test, 1) AND 5 IN (4,5, 6) OR (Id < 5 AND 5 > Id)' + # expr = 'Id > 5 AND Subs < 7' + + # Need to do some dodgyness for NOT i think. + if 'NOT' in expr: + raise NotImplementedError('NOT not supported yet') + + if names is None: + names = {} + if values is None: + values = {} + + # Do substitutions + for key, value in names.items(): + expr = expr.replace(key, value) + for key, value in values.items(): + if 'N' in value: + expr.replace(key, float(value['N'])) + else: + expr = expr.replace(key, value['S']) + + # Remove all spaces, tbf we could just skip them in the next step. + # The number of known options is really small so we can do a fair bit of cheating + expr = list(re.sub('\s', '', expr)) # 'Id>5ANDattribute_exists(test)ORNOTlength<6' + + # DodgyTokenisation stage 1 + def is_value(val): + return val not in ('<', '>', '=', '(', ')') + + def contains_keyword(val): + for kw in ('BETWEEN', 'IN', 'AND', 'OR', 'NOT'): + if kw in val: + return kw + return None + + def is_function(val): + return val in ('attribute_exists', 'attribute_not_exists', 'attribute_type', 'begins_with', 'contains', 'size') + + # Does the main part of splitting between sections of characters + tokens = [] + stack = '' + while len(expr) > 0: + current_char = expr.pop(0) + + if current_char == ',': # Split params , + if len(stack) > 0: + tokens.append(stack) + stack = '' + elif is_value(current_char): + stack += current_char + + kw = contains_keyword(stack) + if kw is not None: + # We have a kw in the stack, could be AND or something like 5AND + tmp = stack.replace(kw, '') + if len(tmp) > 0: + tokens.append(tmp) + tokens.append(kw) + stack = '' + else: + if len(stack) > 0: + tokens.append(stack) + tokens.append(current_char) + stack = '' + if len(stack) > 0: + tokens.append(stack) + + # DodgyTokenisation stage 2, it groups together some elements to make RPN'ing it later easier. + tokens2 = [] + token_iterator = iter(tokens) + for token in token_iterator: + if token == '(': + tuple_list = [] + + next_token = six.next(token_iterator) + while next_token != ')': + tuple_list.append(next_token) + next_token = six.next(token_iterator) + + tokens2.append(tuple(tuple_list)) + elif token == 'BETWEEN': + op1 = six.next(token_iterator) + and_op = six.next(token_iterator) + assert and_op == 'AND' + op2 = six.next(token_iterator) + tokens2.append('BETWEEN') + tokens2.append((op1, op2)) + + elif is_function(token): + function_list = [token] + + lbracket = six.next(token_iterator) + assert lbracket == '(' + + next_token = six.next(token_iterator) + while next_token != ')': + function_list.append(next_token) + next_token = six.next(token_iterator) + + tokens2.append(function_list) + + else: + try: + token = int(token) + except ValueError: + try: + token = float(token) + except ValueError: + pass + tokens2.append(token) + + # Start of the Shunting-Yard algorigth. <-- Proper beast algorithm! + def is_number(val): + return val not in ('<', '>', '=', '>=', '<=', '<>', 'BETWEEN', 'IN', 'AND', 'OR', 'NOT') + + def is_op(val): + return val in ('<', '>', '=', '>=', '<=', '<>', 'BETWEEN', 'IN', 'AND', 'OR', 'NOT') + + OPS = {'<': 5, '>': 5, '=': 5, '>=': 5, '<=': 5, '<>': 5, 'IN': 8, 'AND': 11, 'OR': 12, 'NOT': 10, 'BETWEEN': 9, '(': 1, ')': 1} + + output = [] + op_stack = [] + # Basically takes in an infix notation calculation, converts it to a reverse polish notation where there is no + # ambiguaty on which order operators are applied. + while len(tokens2) > 0: + token = tokens2.pop(0) + + if token == '(': + op_stack.append(token) + elif token == ')': + while len(op_stack) > 0 and op_stack[-1] != '(': + output.append(op_stack.pop()) + if len(op_stack) == 0: + # No left paren on the stack, error + raise Exception('Missing left paren') + + # Pop off the left paren + op_stack.pop() + + elif is_number(token): + output.append(token) + else: + # Must be operator kw + while len(op_stack) > 0 and OPS[op_stack[-1]] <= OPS[token]: + output.append(op_stack.pop()) + op_stack.append(token) + while len(op_stack) > 0: + output.append(op_stack.pop()) + + # Hacky funcition to convert dynamo functions (which are represented as lists) to their Class equivelent + def to_func(val): + if isinstance(val, list): + func_name = val.pop(0) + # Expand rest of the list to arguments + val = FUNC_CLASS[func_name](*val) + + return val + + # Simple reverse polish notation execution. Builts up a nested filter object. + # The filter object then takes a dynamo item and returns true/false + stack = [] + for token in output: + if is_op(token): + op2 = stack.pop() + op1 = stack.pop() + + op_cls = OP_CLASS[token] + stack.append(op_cls(op1, op2)) + else: + stack.append(to_func(token)) + + return stack[0] + + +class Op(object): + """ + Base class for a FilterExpression operator + """ + OP = '' + + def __init__(self, lhs, rhs): + self.lhs = lhs + self.rhs = rhs + + def _lhs(self, item): + """ + :type item: moto.dynamodb2.models.Item + """ + lhs = self.lhs + if isinstance(self.lhs, (Op, Func)): + lhs = self.lhs.expr(item) + elif isinstance(self.lhs, str): + try: + lhs = item.attrs[self.lhs].cast_value + except Exception: + pass + + return lhs + + def _rhs(self, item): + rhs = self.rhs + if isinstance(self.rhs, (Op, Func)): + rhs = self.rhs.expr(item) + elif isinstance(self.lhs, str): + try: + rhs = item.attrs[self.rhs].cast_value + except Exception: + pass + return rhs + + def expr(self, item): + return True + + def __repr__(self): + return '({0} {1} {2})'.format(self.lhs, self.OP, self.rhs) + + +class Func(object): + """ + Base class for a FilterExpression function + """ + FUNC = 'Unknown' + + def expr(self, item): + return True + + def __repr__(self): + return 'Func(...)'.format(self.FUNC) + + +class OpAnd(Op): + OP = 'AND' + + def expr(self, item): + lhs = self._lhs(item) + rhs = self._rhs(item) + return lhs and rhs + + +class OpLessThan(Op): + OP = '<' + + def expr(self, item): + lhs = self._lhs(item) + rhs = self._rhs(item) + return lhs < rhs + + +class OpGreaterThan(Op): + OP = '>' + + def expr(self, item): + lhs = self._lhs(item) + rhs = self._rhs(item) + return lhs > rhs + + +class OpEqual(Op): + OP = '=' + + def expr(self, item): + lhs = self._lhs(item) + rhs = self._rhs(item) + return lhs == rhs + + +class OpNotEqual(Op): + OP = '<>' + + def expr(self, item): + lhs = self._lhs(item) + rhs = self._rhs(item) + return lhs == rhs + + +class OpLessThanOrEqual(Op): + OP = '<=' + + def expr(self, item): + lhs = self._lhs(item) + rhs = self._rhs(item) + return lhs <= rhs + + +class OpGreaterThanOrEqual(Op): + OP = '>=' + + def expr(self, item): + lhs = self._lhs(item) + rhs = self._rhs(item) + return lhs >= rhs + + +class OpOr(Op): + OP = 'OR' + + def expr(self, item): + lhs = self._lhs(item) + rhs = self._rhs(item) + return lhs or rhs + + +class OpIn(Op): + OP = 'IN' + + def expr(self, item): + lhs = self._lhs(item) + rhs = self._rhs(item) + return lhs in rhs + + +class OpBetween(Op): + OP = 'BETWEEN' + + def expr(self, item): + lhs = self._lhs(item) + rhs = self._rhs(item) + return rhs[0] <= lhs <= rhs[1] + + +class FuncAttrExists(Func): + FUNC = 'attribute_exists' + + def __init__(self, attribute): + self.attr = attribute + + def expr(self, item): + return self.attr in item.attrs + + +class FuncAttrNotExists(Func): + FUNC = 'attribute_not_exists' + + def __init__(self, attribute): + self.attr = attribute + + def expr(self, item): + return self.attr not in item.attrs + + +class FuncAttrType(Func): + FUNC = 'attribute_type' + + def __init__(self, attribute, _type): + self.attr = attribute + self.type = _type + + def expr(self, item): + return self.attr in item.attrs and item.attrs[self.attr].type == self.type + + +class FuncBeginsWith(Func): + FUNC = 'begins_with' + + def __init__(self, attribute, substr): + self.attr = attribute + self.substr = substr + + def expr(self, item): + return self.attr in item.attrs and item.attrs[self.attr].type == 'S' and item.attrs[self.attr].value.startswith(self.substr) + + +class FuncContains(Func): + FUNC = 'contains' + + def __init__(self, attribute, operand): + self.attr = attribute + self.operand = operand + + def expr(self, item): + if self.attr not in item.attrs: + return False + + if item.attrs[self.attr].type in ('S', 'SS', 'NS', 'BS', 'L', 'M'): + return self.operand in item.attrs[self.attr].value + return False + + +class FuncSize(Func): + FUNC = 'contains' + + def __init__(self, attribute): + self.attr = attribute + + def expr(self, item): + if self.attr not in item.attrs: + raise ValueError('Invalid option') + + if item.attrs[self.attr].type in ('S', 'SS', 'NS', 'B', 'BS', 'L', 'M'): + return len(item.attrs[self.attr].value) + raise ValueError('Invalid option') + + +OP_CLASS = { + 'AND': OpAnd, + 'OR': OpOr, + 'IN': OpIn, + 'BETWEEN': OpBetween, + '<': OpLessThan, + '>': OpGreaterThan, + '<=': OpLessThanOrEqual, + '>=': OpGreaterThanOrEqual, + '=': OpEqual, + '<>': OpNotEqual +} + +FUNC_CLASS = { + 'attribute_exists': FuncAttrExists, + 'attribute_not_exists': FuncAttrNotExists, + 'attribute_type': FuncAttrType, + 'begins_with': FuncBeginsWith, + 'contains': FuncContains, + 'size': FuncSize +} diff --git a/moto/dynamodb2/models.py b/moto/dynamodb2/models.py index fde269726..bec72d327 100644 --- a/moto/dynamodb2/models.py +++ b/moto/dynamodb2/models.py @@ -8,7 +8,7 @@ import re from moto.compat import OrderedDict from moto.core import BaseBackend, BaseModel from moto.core.utils import unix_time -from .comparisons import get_comparison_func +from .comparisons import get_comparison_func, get_filter_expression, Op class DynamoJsonEncoder(json.JSONEncoder): @@ -508,15 +508,15 @@ class Table(BaseModel): else: yield hash_set - def scan(self, filters, limit, exclusive_start_key): + def scan(self, filters, limit, exclusive_start_key, filter_expression=None): results = [] scanned_count = 0 - for result in self.all_items(): + for item in self.all_items(): scanned_count += 1 passes_all_conditions = True for attribute_name, (comparison_operator, comparison_objs) in filters.items(): - attribute = result.attrs.get(attribute_name) + attribute = item.attrs.get(attribute_name) if attribute: # Attribute found @@ -532,8 +532,11 @@ class Table(BaseModel): passes_all_conditions = False break + if filter_expression is not None: + passes_all_conditions &= filter_expression.expr(item) + if passes_all_conditions: - results.append(result) + results.append(item) results, last_evaluated_key = self._trim_results(results, limit, exclusive_start_key) @@ -698,7 +701,7 @@ class DynamoDBBackend(BaseBackend): return table.query(hash_key, range_comparison, range_values, limit, exclusive_start_key, scan_index_forward, projection_expression, index_name, **filter_kwargs) - def scan(self, table_name, filters, limit, exclusive_start_key): + def scan(self, table_name, filters, limit, exclusive_start_key, filter_expression, expr_names, expr_values): table = self.tables.get(table_name) if not table: return None, None, None @@ -708,7 +711,12 @@ class DynamoDBBackend(BaseBackend): dynamo_types = [DynamoType(value) for value in comparison_values] scan_filters[key] = (comparison_operator, dynamo_types) - return table.scan(scan_filters, limit, exclusive_start_key) + if filter_expression is not None: + filter_expression = get_filter_expression(filter_expression, expr_names, expr_values) + else: + filter_expression = Op(None, None) # Will always eval to true + + return table.scan(scan_filters, limit, exclusive_start_key, filter_expression) def update_item(self, table_name, key, update_expression, attribute_updates, expression_attribute_names, expression_attribute_values, expected=None): diff --git a/moto/dynamodb2/responses.py b/moto/dynamodb2/responses.py index 437850713..32af47df1 100644 --- a/moto/dynamodb2/responses.py +++ b/moto/dynamodb2/responses.py @@ -432,12 +432,19 @@ class DynamoHandler(BaseResponse): comparison_values = scan_filter.get("AttributeValueList", []) filters[attribute_name] = (comparison_operator, comparison_values) + filter_expression = self.body.get('FilterExpression') + expression_attribute_values = self.body.get('ExpressionAttributeValues', {}) + expression_attribute_names = self.body.get('ExpressionAttributeNames', {}) + exclusive_start_key = self.body.get('ExclusiveStartKey') limit = self.body.get("Limit") items, scanned_count, last_evaluated_key = dynamodb_backend2.scan(name, filters, limit, - exclusive_start_key) + exclusive_start_key, + filter_expression, + expression_attribute_names, + expression_attribute_values) if items is None: er = 'com.amazonaws.dynamodb.v20111205#ResourceNotFoundException' diff --git a/tests/test_dynamodb2/test_dynamodb.py b/tests/test_dynamodb2/test_dynamodb.py index 35c14f396..994c64e7c 100644 --- a/tests/test_dynamodb2/test_dynamodb.py +++ b/tests/test_dynamodb2/test_dynamodb.py @@ -3,6 +3,7 @@ from __future__ import unicode_literals, print_function import six import boto import boto3 +from boto3.dynamodb.conditions import Attr import sure # noqa import requests from moto import mock_dynamodb2, mock_dynamodb2_deprecated @@ -12,6 +13,10 @@ from botocore.exceptions import ClientError from boto3.dynamodb.conditions import Key from tests.helpers import requires_boto_gte import tests.backport_assert_raises + +import moto.dynamodb2.comparisons +import moto.dynamodb2.models + from nose.tools import assert_raises try: import boto.dynamodb2 @@ -230,6 +235,7 @@ def test_scan_returns_consumed_capacity(): assert 'CapacityUnits' in response['ConsumedCapacity'] assert response['ConsumedCapacity']['TableName'] == name + @requires_boto_gte("2.9") @mock_dynamodb2 def test_query_returns_consumed_capacity(): @@ -280,6 +286,7 @@ def test_query_returns_consumed_capacity(): assert 'CapacityUnits' in results['ConsumedCapacity'] assert results['ConsumedCapacity']['CapacityUnits'] == 1 + @mock_dynamodb2 def test_basic_projection_expressions(): dynamodb = boto3.resource('dynamodb', region_name='us-east-1') @@ -353,6 +360,7 @@ def test_basic_projection_expressions(): assert 'body' in results['Items'][1] assert results['Items'][1]['body'] == 'yet another test message' + @mock_dynamodb2 def test_basic_projection_expressions_with_attr_expression_names(): dynamodb = boto3.resource('dynamodb', region_name='us-east-1') @@ -419,6 +427,7 @@ def test_basic_projection_expressions_with_attr_expression_names(): assert 'attachment' in results['Items'][0] assert results['Items'][0]['attachment'] == 'something' + @mock_dynamodb2 def test_put_item_returns_consumed_capacity(): dynamodb = boto3.resource('dynamodb', region_name='us-east-1') @@ -461,6 +470,7 @@ def test_put_item_returns_consumed_capacity(): assert 'ConsumedCapacity' in response + @mock_dynamodb2 def test_update_item_returns_consumed_capacity(): dynamodb = boto3.resource('dynamodb', region_name='us-east-1') @@ -514,6 +524,7 @@ def test_update_item_returns_consumed_capacity(): assert 'CapacityUnits' in response['ConsumedCapacity'] assert 'TableName' in response['ConsumedCapacity'] + @mock_dynamodb2 def test_get_item_returns_consumed_capacity(): dynamodb = boto3.resource('dynamodb', region_name='us-east-1') @@ -562,3 +573,61 @@ def test_get_item_returns_consumed_capacity(): assert 'ConsumedCapacity' in response assert 'CapacityUnits' in response['ConsumedCapacity'] assert 'TableName' in response['ConsumedCapacity'] + + +def test_filter_expression(): + row1 = moto.dynamodb2.models.Item(None, None, None, None, {'Id': {'N': '8'}, 'Subs': {'N': '5'}, 'Desc': {'S': 'Some description'}, 'KV': {'SS': ['test1', 'test2']}}) + row2 = moto.dynamodb2.models.Item(None, None, None, None, {'Id': {'N': '8'}, 'Subs': {'N': '10'}, 'Desc': {'S': 'A description'}, 'KV': {'SS': ['test3', 'test4']}}) + + filter1 = moto.dynamodb2.comparisons.get_filter_expression('Id > 5 AND Subs < 7', {}, {}) + filter1.expr(row1).should.be(True) + filter1.expr(row2).should.be(False) + + filter2 = moto.dynamodb2.comparisons.get_filter_expression('attribute_exists(Id) AND attribute_not_exists(User)', {}, {}) + filter2.expr(row1).should.be(True) + + filter3 = moto.dynamodb2.comparisons.get_filter_expression('attribute_type(Id, N)', {}, {}) + filter3.expr(row1).should.be(True) + + filter4 = moto.dynamodb2.comparisons.get_filter_expression('begins_with(Desc, Some)', {}, {}) + filter4.expr(row1).should.be(True) + filter4.expr(row2).should.be(False) + + filter5 = moto.dynamodb2.comparisons.get_filter_expression('contains(KV, test1)', {}, {}) + filter5.expr(row1).should.be(True) + filter5.expr(row2).should.be(False) + + filter6 = moto.dynamodb2.comparisons.get_filter_expression('size(Desc) > size(KV)', {}, {}) + filter6.expr(row1).should.be(True) + + +@mock_dynamodb2 +def test_scan_filter(): + client = boto3.client('dynamodb', region_name='us-east-1') + dynamodb = boto3.resource('dynamodb', region_name='us-east-1') + + # Create the DynamoDB table. + client.create_table( + TableName='test1', + AttributeDefinitions=[{'AttributeName': 'client', 'AttributeType': 'S'}, {'AttributeName': 'app', 'AttributeType': 'S'}], + KeySchema=[{'AttributeName': 'client', 'KeyType': 'HASH'}, {'AttributeName': 'app', 'KeyType': 'RANGE'}], + ProvisionedThroughput={'ReadCapacityUnits': 123, 'WriteCapacityUnits': 123} + ) + client.put_item( + TableName='test1', + Item={ + 'client': {'S': 'client1'}, + 'app': {'S': 'app1'} + } + ) + + table = dynamodb.Table('test1') + response = table.scan( + FilterExpression=Attr('app').eq('app2') + ) + assert response['Count'] == 0 + + response = table.scan( + FilterExpression=Attr('app').eq('app1') + ) + assert response['Count'] == 1 From a5895db4f8c42e30fc965a305a1cc43a668ae23c Mon Sep 17 00:00:00 2001 From: Terry Cain Date: Sat, 7 Oct 2017 22:20:16 +0100 Subject: [PATCH 366/412] Python27 string type fix --- moto/dynamodb2/comparisons.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/moto/dynamodb2/comparisons.py b/moto/dynamodb2/comparisons.py index 5ac230d00..57bfe6a39 100644 --- a/moto/dynamodb2/comparisons.py +++ b/moto/dynamodb2/comparisons.py @@ -237,7 +237,7 @@ class Op(object): lhs = self.lhs if isinstance(self.lhs, (Op, Func)): lhs = self.lhs.expr(item) - elif isinstance(self.lhs, str): + elif isinstance(self.lhs, six.string_types): try: lhs = item.attrs[self.lhs].cast_value except Exception: @@ -249,7 +249,7 @@ class Op(object): rhs = self.rhs if isinstance(self.rhs, (Op, Func)): rhs = self.rhs.expr(item) - elif isinstance(self.lhs, str): + elif isinstance(self.lhs, six.string_types): try: rhs = item.attrs[self.rhs].cast_value except Exception: From d5fdb837d2825c56d42fa545ee994a0765366cdf Mon Sep 17 00:00:00 2001 From: Jack Danger Date: Sat, 7 Oct 2017 16:22:48 -0700 Subject: [PATCH 367/412] trying codecov --- .travis.yml | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/.travis.yml b/.travis.yml index f1b7ac40d..a744f42cf 100644 --- a/.travis.yml +++ b/.travis.yml @@ -25,7 +25,8 @@ install: travis_retry pip install boto==2.45.0 travis_retry pip install boto3 travis_retry pip install dist/moto*.gz - travis_retry pip install coveralls==1.1 + travis_retry pip install coveralls + travis_retry pip install codecov travis_retry pip install -r requirements-dev.txt if [ "$TEST_SERVER_MODE" = "true" ]; then @@ -35,3 +36,4 @@ script: - make test after_success: - coveralls + - codecov From 77fcafca18bd95764d6e0f727dc3122517e06491 Mon Sep 17 00:00:00 2001 From: Terry Cain Date: Sun, 8 Oct 2017 04:18:25 +0100 Subject: [PATCH 368/412] Cleaned up code --- moto/dynamodb2/comparisons.py | 130 +++++++++++-------- moto/dynamodb2/responses.py | 21 +++- tests/test_dynamodb2/test_dynamodb.py | 175 +++++++++++++++++++++++--- 3 files changed, 255 insertions(+), 71 deletions(-) diff --git a/moto/dynamodb2/comparisons.py b/moto/dynamodb2/comparisons.py index 57bfe6a39..faaaaf638 100644 --- a/moto/dynamodb2/comparisons.py +++ b/moto/dynamodb2/comparisons.py @@ -69,7 +69,8 @@ def get_filter_expression(expr, names, values): # Remove all spaces, tbf we could just skip them in the next step. # The number of known options is really small so we can do a fair bit of cheating - expr = list(re.sub('\s', '', expr)) # 'Id>5ANDattribute_exists(test)ORNOTlength<6' + #expr = list(re.sub('\s', '', expr)) # 'Id>5ANDattribute_exists(test)ORNOTlength<6' + expr = list(expr) # DodgyTokenisation stage 1 def is_value(val): @@ -90,7 +91,11 @@ def get_filter_expression(expr, names, values): while len(expr) > 0: current_char = expr.pop(0) - if current_char == ',': # Split params , + if current_char == ' ': + if len(stack) > 0: + tokens.append(stack) + stack = '' + elif current_char == ',': # Split params , if len(stack) > 0: tokens.append(stack) stack = '' @@ -113,6 +118,9 @@ def get_filter_expression(expr, names, values): if len(stack) > 0: tokens.append(stack) + def is_op(val): + return val in ('<', '>', '=', '>=', '<=', '<>', 'BETWEEN', 'IN', 'AND', 'OR', 'NOT') + # DodgyTokenisation stage 2, it groups together some elements to make RPN'ing it later easier. tokens2 = [] token_iterator = iter(tokens) @@ -122,17 +130,30 @@ def get_filter_expression(expr, names, values): next_token = six.next(token_iterator) while next_token != ')': + try: + next_token = int(next_token) + except ValueError: + try: + next_token = float(next_token) + except ValueError: + pass tuple_list.append(next_token) next_token = six.next(token_iterator) - tokens2.append(tuple(tuple_list)) + # Sigh, we only want to group a tuple if it doesnt contain operators + if any([is_op(item) for item in tuple_list]): + tokens2.append('(') + tokens2.extend(tuple_list) + tokens2.append(')') + else: + tokens2.append(tuple(tuple_list)) elif token == 'BETWEEN': - op1 = six.next(token_iterator) + field = tokens2.pop() + op1 = int(six.next(token_iterator)) and_op = six.next(token_iterator) assert and_op == 'AND' - op2 = six.next(token_iterator) - tokens2.append('BETWEEN') - tokens2.append((op1, op2)) + op2 = int(six.next(token_iterator)) + tokens2.append(['between', field, op1, op2]) elif is_function(token): function_list = [token] @@ -161,39 +182,38 @@ def get_filter_expression(expr, names, values): def is_number(val): return val not in ('<', '>', '=', '>=', '<=', '<>', 'BETWEEN', 'IN', 'AND', 'OR', 'NOT') - def is_op(val): - return val in ('<', '>', '=', '>=', '<=', '<>', 'BETWEEN', 'IN', 'AND', 'OR', 'NOT') + OPS = {'<': 5, '>': 5, '=': 5, '>=': 5, '<=': 5, '<>': 5, 'IN': 8, 'AND': 11, 'OR': 12, 'NOT': 10, 'BETWEEN': 9, '(': 100, ')': 100} - OPS = {'<': 5, '>': 5, '=': 5, '>=': 5, '<=': 5, '<>': 5, 'IN': 8, 'AND': 11, 'OR': 12, 'NOT': 10, 'BETWEEN': 9, '(': 1, ')': 1} + def shunting_yard(token_list): + output = [] + op_stack = [] - output = [] - op_stack = [] - # Basically takes in an infix notation calculation, converts it to a reverse polish notation where there is no - # ambiguaty on which order operators are applied. - while len(tokens2) > 0: - token = tokens2.pop(0) + # Basically takes in an infix notation calculation, converts it to a reverse polish notation where there is no + # ambiguaty on which order operators are applied. + while len(token_list) > 0: + token = token_list.pop(0) - if token == '(': - op_stack.append(token) - elif token == ')': - while len(op_stack) > 0 and op_stack[-1] != '(': - output.append(op_stack.pop()) - if len(op_stack) == 0: - # No left paren on the stack, error - raise Exception('Missing left paren') + if token == '(': + op_stack.append(token) + elif token == ')': + while len(op_stack) > 0 and op_stack[-1] != '(': + output.append(op_stack.pop()) + lbracket = op_stack.pop() + assert lbracket == '(' - # Pop off the left paren - op_stack.pop() + elif is_number(token): + output.append(token) + else: + # Must be operator kw + while len(op_stack) > 0 and OPS[op_stack[-1]] <= OPS[token]: + output.append(op_stack.pop()) + op_stack.append(token) + while len(op_stack) > 0: + output.append(op_stack.pop()) - elif is_number(token): - output.append(token) - else: - # Must be operator kw - while len(op_stack) > 0 and OPS[op_stack[-1]] <= OPS[token]: - output.append(op_stack.pop()) - op_stack.append(token) - while len(op_stack) > 0: - output.append(op_stack.pop()) + return output + + output = shunting_yard(tokens2) # Hacky funcition to convert dynamo functions (which are represented as lists) to their Class equivelent def to_func(val): @@ -217,7 +237,11 @@ def get_filter_expression(expr, names, values): else: stack.append(to_func(token)) - return stack[0] + result = stack.pop(0) + if len(stack) > 0: + raise ValueError('Malformed filter expression') + + return result class Op(object): @@ -249,7 +273,7 @@ class Op(object): rhs = self.rhs if isinstance(self.rhs, (Op, Func)): rhs = self.rhs.expr(item) - elif isinstance(self.lhs, six.string_types): + elif isinstance(self.rhs, six.string_types): try: rhs = item.attrs[self.rhs].cast_value except Exception: @@ -357,15 +381,6 @@ class OpIn(Op): return lhs in rhs -class OpBetween(Op): - OP = 'BETWEEN' - - def expr(self, item): - lhs = self._lhs(item) - rhs = self._rhs(item) - return rhs[0] <= lhs <= rhs[1] - - class FuncAttrExists(Func): FUNC = 'attribute_exists' @@ -432,18 +447,32 @@ class FuncSize(Func): def expr(self, item): if self.attr not in item.attrs: - raise ValueError('Invalid option') + raise ValueError('Invalid attribute name {0}'.format(self.attr)) if item.attrs[self.attr].type in ('S', 'SS', 'NS', 'B', 'BS', 'L', 'M'): return len(item.attrs[self.attr].value) - raise ValueError('Invalid option') + raise ValueError('Invalid filter expression') + + +class FuncBetween(Func): + FUNC = 'between' + + def __init__(self, attribute, start, end): + self.attr = attribute + self.start = start + self.end = end + + def expr(self, item): + if self.attr not in item.attrs: + raise ValueError('Invalid attribute name {0}'.format(self.attr)) + + return self.start <= item.attrs[self.attr].cast_value <= self.end OP_CLASS = { 'AND': OpAnd, 'OR': OpOr, 'IN': OpIn, - 'BETWEEN': OpBetween, '<': OpLessThan, '>': OpGreaterThan, '<=': OpLessThanOrEqual, @@ -458,5 +487,6 @@ FUNC_CLASS = { 'attribute_type': FuncAttrType, 'begins_with': FuncBeginsWith, 'contains': FuncContains, - 'size': FuncSize + 'size': FuncSize, + 'between': FuncBetween } diff --git a/moto/dynamodb2/responses.py b/moto/dynamodb2/responses.py index 32af47df1..75e625c73 100644 --- a/moto/dynamodb2/responses.py +++ b/moto/dynamodb2/responses.py @@ -439,13 +439,22 @@ class DynamoHandler(BaseResponse): exclusive_start_key = self.body.get('ExclusiveStartKey') limit = self.body.get("Limit") - items, scanned_count, last_evaluated_key = dynamodb_backend2.scan(name, filters, - limit, - exclusive_start_key, - filter_expression, - expression_attribute_names, - expression_attribute_values) + try: + items, scanned_count, last_evaluated_key = dynamodb_backend2.scan(name, filters, + limit, + exclusive_start_key, + filter_expression, + expression_attribute_names, + expression_attribute_values) + except ValueError as err: + er = 'com.amazonaws.dynamodb.v20111205#ValidationError' + return self.error(er, 'Bad Filter Expression: {0}'.format(err)) + except Exception as err: + er = 'com.amazonaws.dynamodb.v20111205#InternalFailure' + return self.error(er, 'Internal error. {0}'.format(err)) + # Items should be a list, at least an empty one. Is None if table does not exist. + # Should really check this at the beginning if items is None: er = 'com.amazonaws.dynamodb.v20111205#ResourceNotFoundException' return self.error(er, 'Requested resource not found') diff --git a/tests/test_dynamodb2/test_dynamodb.py b/tests/test_dynamodb2/test_dynamodb.py index 994c64e7c..85d8feb34 100644 --- a/tests/test_dynamodb2/test_dynamodb.py +++ b/tests/test_dynamodb2/test_dynamodb.py @@ -576,29 +576,51 @@ def test_get_item_returns_consumed_capacity(): def test_filter_expression(): + # TODO NOT not yet supported row1 = moto.dynamodb2.models.Item(None, None, None, None, {'Id': {'N': '8'}, 'Subs': {'N': '5'}, 'Desc': {'S': 'Some description'}, 'KV': {'SS': ['test1', 'test2']}}) row2 = moto.dynamodb2.models.Item(None, None, None, None, {'Id': {'N': '8'}, 'Subs': {'N': '10'}, 'Desc': {'S': 'A description'}, 'KV': {'SS': ['test3', 'test4']}}) - filter1 = moto.dynamodb2.comparisons.get_filter_expression('Id > 5 AND Subs < 7', {}, {}) - filter1.expr(row1).should.be(True) - filter1.expr(row2).should.be(False) + # AND test + filter_expr = moto.dynamodb2.comparisons.get_filter_expression('Id > 5 AND Subs < 7', {}, {}) + filter_expr.expr(row1).should.be(True) + filter_expr.expr(row2).should.be(False) - filter2 = moto.dynamodb2.comparisons.get_filter_expression('attribute_exists(Id) AND attribute_not_exists(User)', {}, {}) - filter2.expr(row1).should.be(True) + # OR test + filter_expr = moto.dynamodb2.comparisons.get_filter_expression('Id = 5 OR Id=8', {}, {}) + filter_expr.expr(row1).should.be(True) - filter3 = moto.dynamodb2.comparisons.get_filter_expression('attribute_type(Id, N)', {}, {}) - filter3.expr(row1).should.be(True) + # BETWEEN test + filter_expr = moto.dynamodb2.comparisons.get_filter_expression('Id BETWEEN 5 AND 10', {}, {}) + filter_expr.expr(row1).should.be(True) - filter4 = moto.dynamodb2.comparisons.get_filter_expression('begins_with(Desc, Some)', {}, {}) - filter4.expr(row1).should.be(True) - filter4.expr(row2).should.be(False) + # PAREN test + filter_expr = moto.dynamodb2.comparisons.get_filter_expression('Id = 8 AND (Subs = 8 OR Subs = 5)', {}, {}) + filter_expr.expr(row1).should.be(True) - filter5 = moto.dynamodb2.comparisons.get_filter_expression('contains(KV, test1)', {}, {}) - filter5.expr(row1).should.be(True) - filter5.expr(row2).should.be(False) + # IN test + filter_expr = moto.dynamodb2.comparisons.get_filter_expression('Id IN (7,8, 9)', {}, {}) + filter_expr.expr(row1).should.be(True) - filter6 = moto.dynamodb2.comparisons.get_filter_expression('size(Desc) > size(KV)', {}, {}) - filter6.expr(row1).should.be(True) + # attribute function tests + filter_expr = moto.dynamodb2.comparisons.get_filter_expression('attribute_exists(Id) AND attribute_not_exists(User)', {}, {}) + filter_expr.expr(row1).should.be(True) + + filter_expr = moto.dynamodb2.comparisons.get_filter_expression('attribute_type(Id, N)', {}, {}) + filter_expr.expr(row1).should.be(True) + + # beginswith function test + filter_expr = moto.dynamodb2.comparisons.get_filter_expression('begins_with(Desc, Some)', {}, {}) + filter_expr.expr(row1).should.be(True) + filter_expr.expr(row2).should.be(False) + + # contains function test + filter_expr = moto.dynamodb2.comparisons.get_filter_expression('contains(KV, test1)', {}, {}) + filter_expr.expr(row1).should.be(True) + filter_expr.expr(row2).should.be(False) + + # size function test + filter_expr = moto.dynamodb2.comparisons.get_filter_expression('size(Desc) > size(KV)', {}, {}) + filter_expr.expr(row1).should.be(True) @mock_dynamodb2 @@ -631,3 +653,126 @@ def test_scan_filter(): FilterExpression=Attr('app').eq('app1') ) assert response['Count'] == 1 + + +@mock_dynamodb2 +def test_bad_scan_filter(): + client = boto3.client('dynamodb', region_name='us-east-1') + dynamodb = boto3.resource('dynamodb', region_name='us-east-1') + + # Create the DynamoDB table. + client.create_table( + TableName='test1', + AttributeDefinitions=[{'AttributeName': 'client', 'AttributeType': 'S'}, {'AttributeName': 'app', 'AttributeType': 'S'}], + KeySchema=[{'AttributeName': 'client', 'KeyType': 'HASH'}, {'AttributeName': 'app', 'KeyType': 'RANGE'}], + ProvisionedThroughput={'ReadCapacityUnits': 123, 'WriteCapacityUnits': 123} + ) + table = dynamodb.Table('test1') + + # Bad expression + try: + table.scan( + FilterExpression='client test' + ) + except ClientError as err: + err.response['Error']['Code'].should.equal('ValidationError') + else: + raise RuntimeError('Should of raised ResourceInUseException') + + + +@mock_dynamodb2 +def test_duplicate_create(): + client = boto3.client('dynamodb', region_name='us-east-1') + + # Create the DynamoDB table. + client.create_table( + TableName='test1', + AttributeDefinitions=[{'AttributeName': 'client', 'AttributeType': 'S'}, {'AttributeName': 'app', 'AttributeType': 'S'}], + KeySchema=[{'AttributeName': 'client', 'KeyType': 'HASH'}, {'AttributeName': 'app', 'KeyType': 'RANGE'}], + ProvisionedThroughput={'ReadCapacityUnits': 123, 'WriteCapacityUnits': 123} + ) + + try: + client.create_table( + TableName='test1', + AttributeDefinitions=[{'AttributeName': 'client', 'AttributeType': 'S'}, {'AttributeName': 'app', 'AttributeType': 'S'}], + KeySchema=[{'AttributeName': 'client', 'KeyType': 'HASH'}, {'AttributeName': 'app', 'KeyType': 'RANGE'}], + ProvisionedThroughput={'ReadCapacityUnits': 123, 'WriteCapacityUnits': 123} + ) + except ClientError as err: + err.response['Error']['Code'].should.equal('ResourceInUseException') + else: + raise RuntimeError('Should of raised ResourceInUseException') + + +@mock_dynamodb2 +def test_delete_table(): + client = boto3.client('dynamodb', region_name='us-east-1') + + # Create the DynamoDB table. + client.create_table( + TableName='test1', + AttributeDefinitions=[{'AttributeName': 'client', 'AttributeType': 'S'}, {'AttributeName': 'app', 'AttributeType': 'S'}], + KeySchema=[{'AttributeName': 'client', 'KeyType': 'HASH'}, {'AttributeName': 'app', 'KeyType': 'RANGE'}], + ProvisionedThroughput={'ReadCapacityUnits': 123, 'WriteCapacityUnits': 123} + ) + + client.delete_table(TableName='test1') + + resp = client.list_tables() + len(resp['TableNames']).should.equal(0) + + try: + client.delete_table(TableName='test1') + except ClientError as err: + err.response['Error']['Code'].should.equal('ResourceNotFoundException') + else: + raise RuntimeError('Should of raised ResourceNotFoundException') + + +@mock_dynamodb2 +def test_delete_item(): + client = boto3.client('dynamodb', region_name='us-east-1') + dynamodb = boto3.resource('dynamodb', region_name='us-east-1') + + # Create the DynamoDB table. + client.create_table( + TableName='test1', + AttributeDefinitions=[{'AttributeName': 'client', 'AttributeType': 'S'}, {'AttributeName': 'app', 'AttributeType': 'S'}], + KeySchema=[{'AttributeName': 'client', 'KeyType': 'HASH'}, {'AttributeName': 'app', 'KeyType': 'RANGE'}], + ProvisionedThroughput={'ReadCapacityUnits': 123, 'WriteCapacityUnits': 123} + ) + client.put_item( + TableName='test1', + Item={ + 'client': {'S': 'client1'}, + 'app': {'S': 'app1'} + } + ) + client.put_item( + TableName='test1', + Item={ + 'client': {'S': 'client1'}, + 'app': {'S': 'app2'} + } + ) + + table = dynamodb.Table('test1') + response = table.scan() + assert response['Count'] == 2 + + # Test deletion and returning old value + response = table.delete_item(Key={'client': 'client1', 'app': 'app1'}, ReturnValues='ALL_OLD') + response['Attributes'].should.contain('client') + response['Attributes'].should.contain('app') + + response = table.scan() + assert response['Count'] == 1 + + # Test deletion returning nothing + response = table.delete_item(Key={'client': 'client1', 'app': 'app2'}) + len(response['Attributes']).should.equal(0) + + response = table.scan() + assert response['Count'] == 0 From 10b0937de34f5c7bd59b84a0e0b9c36e56045b96 Mon Sep 17 00:00:00 2001 From: Jack Danger Date: Sat, 7 Oct 2017 20:46:26 -0700 Subject: [PATCH 369/412] Revert "Merge pull request #1246 from JackDanger/jack/trying-codecov" This reverts commit bd6108dae21ceaca46162c3ecb905918efcb0ff8, reversing changes made to c23c5057f252d97f7873715c008b859edd683675. --- .travis.yml | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/.travis.yml b/.travis.yml index a744f42cf..f1b7ac40d 100644 --- a/.travis.yml +++ b/.travis.yml @@ -25,8 +25,7 @@ install: travis_retry pip install boto==2.45.0 travis_retry pip install boto3 travis_retry pip install dist/moto*.gz - travis_retry pip install coveralls - travis_retry pip install codecov + travis_retry pip install coveralls==1.1 travis_retry pip install -r requirements-dev.txt if [ "$TEST_SERVER_MODE" = "true" ]; then @@ -36,4 +35,3 @@ script: - make test after_success: - coveralls - - codecov From d145b5dc1820cb6e125858c02084e27f3a74a01a Mon Sep 17 00:00:00 2001 From: Terry Cain Date: Sun, 8 Oct 2017 04:57:40 +0100 Subject: [PATCH 370/412] Possible fix --- moto/packages/httpretty/core.py | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/moto/packages/httpretty/core.py b/moto/packages/httpretty/core.py index 5a8d01798..e0f3a7e69 100644 --- a/moto/packages/httpretty/core.py +++ b/moto/packages/httpretty/core.py @@ -103,6 +103,12 @@ try: # pragma: no cover except ImportError: # pragma: no cover ssl = None +try: # pragma: no cover + from requests.packages.urllib3.contrib.pyopenssl import inject_into_urllib3, extract_from_urllib3 + pyopenssl_override = True +except: + pyopenssl_override = False + DEFAULT_HTTP_PORTS = frozenset([80]) POTENTIAL_HTTP_PORTS = set(DEFAULT_HTTP_PORTS) @@ -1013,6 +1019,9 @@ class httpretty(HttpBaseClass): ssl.sslwrap_simple = old_sslwrap_simple ssl.__dict__['sslwrap_simple'] = old_sslwrap_simple + if pyopenssl_override: + inject_into_urllib3() + @classmethod def is_enabled(cls): return cls._is_enabled @@ -1056,6 +1065,9 @@ class httpretty(HttpBaseClass): ssl.sslwrap_simple = fake_wrap_socket ssl.__dict__['sslwrap_simple'] = fake_wrap_socket + if pyopenssl_override: + extract_from_urllib3() + def httprettified(test): "A decorator tests that use HTTPretty" From 9f59f1f7ca2ff94d9ad34dfe9889f757bba2373a Mon Sep 17 00:00:00 2001 From: Terry Cain Date: Sun, 8 Oct 2017 10:34:30 +0100 Subject: [PATCH 371/412] Spelling fix ;-) --- moto/dynamodb2/comparisons.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/moto/dynamodb2/comparisons.py b/moto/dynamodb2/comparisons.py index faaaaf638..c0983a296 100644 --- a/moto/dynamodb2/comparisons.py +++ b/moto/dynamodb2/comparisons.py @@ -189,7 +189,7 @@ def get_filter_expression(expr, names, values): op_stack = [] # Basically takes in an infix notation calculation, converts it to a reverse polish notation where there is no - # ambiguaty on which order operators are applied. + # ambiguity on which order operators are applied. while len(token_list) > 0: token = token_list.pop(0) @@ -215,7 +215,7 @@ def get_filter_expression(expr, names, values): output = shunting_yard(tokens2) - # Hacky funcition to convert dynamo functions (which are represented as lists) to their Class equivelent + # Hacky function to convert dynamo functions (which are represented as lists) to their Class equivalent def to_func(val): if isinstance(val, list): func_name = val.pop(0) From 9a6ded32ea2477d01a30f8e8dde825d04f3c72c8 Mon Sep 17 00:00:00 2001 From: Terry Cain Date: Sun, 8 Oct 2017 10:36:02 +0100 Subject: [PATCH 372/412] More spelling --- moto/dynamodb2/comparisons.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/moto/dynamodb2/comparisons.py b/moto/dynamodb2/comparisons.py index c0983a296..8462c2de5 100644 --- a/moto/dynamodb2/comparisons.py +++ b/moto/dynamodb2/comparisons.py @@ -178,7 +178,7 @@ def get_filter_expression(expr, names, values): pass tokens2.append(token) - # Start of the Shunting-Yard algorigth. <-- Proper beast algorithm! + # Start of the Shunting-Yard algorithm. <-- Proper beast algorithm! def is_number(val): return val not in ('<', '>', '=', '>=', '<=', '<>', 'BETWEEN', 'IN', 'AND', 'OR', 'NOT') From dc40fce146d8bf22db4d68f4eb6bc655a58c9197 Mon Sep 17 00:00:00 2001 From: Jack Danger Date: Tue, 10 Oct 2017 12:51:48 -0700 Subject: [PATCH 373/412] implement SQS QueueDoesNotExist error --- moto/sqs/exceptions.py | 5 +++++ moto/sqs/models.py | 12 ++++++++++-- moto/sqs/responses.py | 22 ++++++++++++++++++---- tests/test_sqs/test_sqs.py | 19 +++++++++++++++---- 4 files changed, 48 insertions(+), 10 deletions(-) diff --git a/moto/sqs/exceptions.py b/moto/sqs/exceptions.py index d72cfdffc..baf721b53 100644 --- a/moto/sqs/exceptions.py +++ b/moto/sqs/exceptions.py @@ -16,3 +16,8 @@ class MessageAttributesInvalid(Exception): def __init__(self, description): self.description = description + + +class QueueDoesNotExist(Exception): + status_code = 404 + description = "The specified queue does not exist for this wsdl version." diff --git a/moto/sqs/models.py b/moto/sqs/models.py index e9d889453..22f310228 100644 --- a/moto/sqs/models.py +++ b/moto/sqs/models.py @@ -12,7 +12,12 @@ import boto.sqs from moto.core import BaseBackend, BaseModel from moto.core.utils import camelcase_to_underscores, get_random_message_id, unix_time, unix_time_millis from .utils import generate_receipt_handle -from .exceptions import ReceiptHandleIsInvalid, MessageNotInflight, MessageAttributesInvalid +from .exceptions import ( + MessageAttributesInvalid, + MessageNotInflight, + QueueDoesNotExist, + ReceiptHandleIsInvalid, +) DEFAULT_ACCOUNT_ID = 123456789012 DEFAULT_SENDER_ID = "AIDAIT2UOQQY3AUEKVGXU" @@ -304,7 +309,10 @@ class SQSBackend(BaseBackend): return qs def get_queue(self, queue_name): - return self.queues.get(queue_name, None) + queue = self.queues.get(queue_name) + if queue is None: + raise QueueDoesNotExist() + return queue def delete_queue(self, queue_name): if queue_name in self.queues: diff --git a/moto/sqs/responses.py b/moto/sqs/responses.py index e0e493ad8..540bd4e41 100644 --- a/moto/sqs/responses.py +++ b/moto/sqs/responses.py @@ -8,7 +8,8 @@ from .models import sqs_backends from .exceptions import ( MessageAttributesInvalid, MessageNotInflight, - ReceiptHandleIsInvalid + QueueDoesNotExist, + ReceiptHandleIsInvalid, ) MAXIMUM_VISIBILTY_TIMEOUT = 43200 @@ -76,7 +77,12 @@ class SQSResponse(BaseResponse): def get_queue_url(self): request_url = urlparse(self.uri) queue_name = self._get_param("QueueName") - queue = self.sqs_backend.get_queue(queue_name) + + try: + queue = self.sqs_backend.get_queue(queue_name) + except QueueDoesNotExist as e: + return self._error('QueueDoesNotExist', e.description) + if queue: template = self.response_template(GET_QUEUE_URL_RESPONSE) return template.render(queue=queue, request_url=request_url) @@ -113,7 +119,11 @@ class SQSResponse(BaseResponse): def get_queue_attributes(self): queue_name = self._get_queue_name() - queue = self.sqs_backend.get_queue(queue_name) + try: + queue = self.sqs_backend.get_queue(queue_name) + except QueueDoesNotExist as e: + return self._error('QueueDoesNotExist', e.description) + template = self.response_template(GET_QUEUE_ATTRIBUTES_RESPONSE) return template.render(queue=queue) @@ -250,7 +260,11 @@ class SQSResponse(BaseResponse): def receive_message(self): queue_name = self._get_queue_name() - queue = self.sqs_backend.get_queue(queue_name) + + try: + queue = self.sqs_backend.get_queue(queue_name) + except QueueDoesNotExist as e: + return self._error('QueueDoesNotExist', e.description) try: message_count = int(self.querystring.get("MaxNumberOfMessages")[0]) diff --git a/tests/test_sqs/test_sqs.py b/tests/test_sqs/test_sqs.py index 9c439eb68..536261504 100644 --- a/tests/test_sqs/test_sqs.py +++ b/tests/test_sqs/test_sqs.py @@ -4,6 +4,7 @@ from __future__ import unicode_literals import boto import boto3 import botocore.exceptions +from botocore.exceptions import ClientError from boto.exception import SQSError from boto.sqs.message import RawMessage, Message @@ -33,6 +34,7 @@ def test_create_fifo_queue_fail(): else: raise RuntimeError('Should of raised InvalidParameterValue Exception') + @mock_sqs def test_create_fifo_queue(): sqs = boto3.client('sqs', region_name='us-east-1') @@ -49,10 +51,10 @@ def test_create_fifo_queue(): response['Attributes']['FifoQueue'].should.equal('true') - @mock_sqs def test_create_queue(): sqs = boto3.resource('sqs', region_name='us-east-1') + new_queue = sqs.create_queue(QueueName='test-queue') new_queue.should_not.be.none new_queue.should.have.property('url').should.contain('test-queue') @@ -66,10 +68,19 @@ def test_create_queue(): @mock_sqs -def test_get_inexistent_queue(): +def test_get_nonexistent_queue(): sqs = boto3.resource('sqs', region_name='us-east-1') - sqs.get_queue_by_name.when.called_with( - QueueName='nonexisting-queue').should.throw(botocore.exceptions.ClientError) + with assert_raises(ClientError) as err: + sqs.get_queue_by_name(QueueName='nonexisting-queue') + ex = err.exception + ex.operation_name.should.equal('GetQueueUrl') + ex.response['Error']['Code'].should.equal('QueueDoesNotExist') + + with assert_raises(ClientError) as err: + sqs.Queue('http://whatever-incorrect-queue-address').load() + ex = err.exception + ex.operation_name.should.equal('GetQueueAttributes') + ex.response['Error']['Code'].should.equal('QueueDoesNotExist') @mock_sqs From 94fd0ad9f8f983a139df2fe5f5dde7ceb965a8a1 Mon Sep 17 00:00:00 2001 From: Jack Danger Date: Tue, 10 Oct 2017 13:36:50 -0700 Subject: [PATCH 374/412] bumping to version 1.1.22 --- setup.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/setup.py b/setup.py index 3f6804ce0..207c5dd2e 100755 --- a/setup.py +++ b/setup.py @@ -36,7 +36,7 @@ else: setup( name='moto', - version='1.1.21', + version='1.1.22', description='A library that allows your python tests to easily' ' mock out the boto library', author='Steve Pulec', From 9a55b0951b1d6620e63824728b61e463f5e007b0 Mon Sep 17 00:00:00 2001 From: Jack Danger Date: Tue, 10 Oct 2017 13:39:42 -0700 Subject: [PATCH 375/412] changelog for 1.1.22 --- CHANGELOG.md | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index bbce6c343..94819aa8d 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -3,6 +3,13 @@ Moto Changelog Latest ------ +1.1.22 +----- + + * Lambda policies + * Dynamodb filter expressions + * EC2 Spot fleet improvements + 1.1.21 ----- From 80210988154d93c305f8e313dbc10938039fd5d9 Mon Sep 17 00:00:00 2001 From: Brian Pandola Date: Wed, 11 Oct 2017 10:57:15 -0700 Subject: [PATCH 376/412] Remove newlines from create-access-key response --- moto/iam/responses.py | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/moto/iam/responses.py b/moto/iam/responses.py index 6ca49b830..df32732a0 100644 --- a/moto/iam/responses.py +++ b/moto/iam/responses.py @@ -1159,9 +1159,7 @@ CREATE_ACCESS_KEY_TEMPLATE = """ {{ key.user_name }} {{ key.access_key_id }} {{ key.status }} - - {{ key.secret_access_key }} - + {{ key.secret_access_key }} From e3024ae1bad51c13cc18849d1d502670410f2940 Mon Sep 17 00:00:00 2001 From: Terry Cain Date: Wed, 11 Oct 2017 23:46:27 +0100 Subject: [PATCH 377/412] Implemented Terminate, Cancel and List jobs --- moto/batch/models.py | 52 +++++++++++ moto/batch/responses.py | 39 +++++++++ moto/batch/urls.py | 5 +- tests/test_batch/test_batch.py | 156 ++++++++++++++++++++++++++++++++- 4 files changed, 249 insertions(+), 3 deletions(-) diff --git a/moto/batch/models.py b/moto/batch/models.py index be8fca9d1..7f75225f7 100644 --- a/moto/batch/models.py +++ b/moto/batch/models.py @@ -196,6 +196,7 @@ class Job(threading.Thread, BaseModel): self.job_started_at = datetime.datetime(1970, 1, 1) self.job_stopped_at = datetime.datetime(1970, 1, 1) self.job_stopped = False + self.job_stopped_reason = None self.stop = False @@ -230,6 +231,8 @@ class Job(threading.Thread, BaseModel): } if self.job_stopped: result['stoppedAt'] = datetime2int(self.job_stopped_at) + if self.job_stopped_reason is not None: + result['statusReason'] = self.job_stopped_reason return result def run(self): @@ -328,6 +331,11 @@ class Job(threading.Thread, BaseModel): self.job_stopped = True self.job_stopped_at = datetime.datetime.now() + def terminate(self, reason): + if not self.stop: + self.stop = True + self.job_stopped_reason = reason + class BatchBackend(BaseBackend): def __init__(self, region_name=None): @@ -478,6 +486,20 @@ class BatchBackend(BaseBackend): return result + def get_job_by_id(self, identifier): + """ + Get job by id + :param identifier: Job ID + :type identifier: str + + :return: Job + :rtype: Job + """ + try: + return self._jobs[identifier] + except KeyError: + return None + def describe_compute_environments(self, environments=None, max_results=None, next_token=None): envs = set() if environments is not None: @@ -916,6 +938,36 @@ class BatchBackend(BaseBackend): return result + def list_jobs(self, job_queue, job_status=None, max_results=None, next_token=None): + jobs = [] + + job_queue = self.get_job_queue(job_queue) + if job_queue is None: + raise ClientException('Job queue {0} does not exist'.format(job_queue)) + + if job_status is not None and job_status not in ('SUBMITTED', 'PENDING', 'RUNNABLE', 'STARTING', 'RUNNING', 'SUCCEEDED', 'FAILED'): + raise ClientException('Job status is not one of SUBMITTED | PENDING | RUNNABLE | STARTING | RUNNING | SUCCEEDED | FAILED') + + for job in job_queue.jobs: + if job_status is not None and job.job_state != job_status: + continue + + jobs.append(job) + + return jobs + + def terminate_job(self, job_id, reason): + if job_id is None: + raise ClientException('Job ID does not exist') + if reason is None: + raise ClientException('Reason does not exist') + + job = self.get_job_by_id(job_id) + if job is None: + raise ClientException('Job not found') + + job.terminate(reason) + available_regions = boto3.session.Session().get_available_regions("batch") batch_backends = {region: BatchBackend(region_name=region) for region in available_regions} diff --git a/moto/batch/responses.py b/moto/batch/responses.py index 2bec7ddf1..96094068d 100644 --- a/moto/batch/responses.py +++ b/moto/batch/responses.py @@ -263,3 +263,42 @@ class BatchResponse(BaseResponse): return json.dumps({'jobs': self.batch_backend.describe_jobs(jobs)}) except AWSError as err: return err.response() + + # ListJobs + def listjobs(self): + job_queue = self._get_param('jobQueue') + job_status = self._get_param('jobStatus') + max_results = self._get_param('maxResults') + next_token = self._get_param('nextToken') + + try: + jobs = self.batch_backend.list_jobs(job_queue, job_status, max_results, next_token) + except AWSError as err: + return err.response() + + result = {'jobSummaryList': [{'jobId': job.job_id, 'jobName': job.job_name} for job in jobs]} + return json.dumps(result) + + # TerminateJob + def terminatejob(self): + job_id = self._get_param('jobId') + reason = self._get_param('reason') + + try: + self.batch_backend.terminate_job(job_id, reason) + except AWSError as err: + return err.response() + + return '' + + # CancelJob + def canceljob(self): # Theres some AWS semantics on the differences but for us they're identical ;-) + job_id = self._get_param('jobId') + reason = self._get_param('reason') + + try: + self.batch_backend.terminate_job(job_id, reason) + except AWSError as err: + return err.response() + + return '' diff --git a/moto/batch/urls.py b/moto/batch/urls.py index 924e55e6d..c64086ef2 100644 --- a/moto/batch/urls.py +++ b/moto/batch/urls.py @@ -18,5 +18,8 @@ url_paths = { '{0}/v1/deregisterjobdefinition': BatchResponse.dispatch, '{0}/v1/describejobdefinitions': BatchResponse.dispatch, '{0}/v1/submitjob': BatchResponse.dispatch, - '{0}/v1/describejobs': BatchResponse.dispatch + '{0}/v1/describejobs': BatchResponse.dispatch, + '{0}/v1/listjobs': BatchResponse.dispatch, + '{0}/v1/terminatejob': BatchResponse.dispatch, + '{0}/v1/canceljob': BatchResponse.dispatch, } diff --git a/tests/test_batch/test_batch.py b/tests/test_batch/test_batch.py index acbe75e94..ec24cd911 100644 --- a/tests/test_batch/test_batch.py +++ b/tests/test_batch/test_batch.py @@ -583,7 +583,7 @@ def test_describe_task_definition(): len(resp['jobDefinitions']).should.equal(3) -# SLOW TEST +# SLOW TESTS @expected_failure @mock_logs @mock_ec2 @@ -654,4 +654,156 @@ def test_submit_job(): ls_name = resp['logStreams'][0]['logStreamName'] resp = logs_client.get_log_events(logGroupName='/aws/batch/job', logStreamName=ls_name) - len(resp['events']).should.be.greater_than(5) \ No newline at end of file + len(resp['events']).should.be.greater_than(5) + + +@expected_failure +@mock_logs +@mock_ec2 +@mock_ecs +@mock_iam +@mock_batch +def test_list_jobs(): + ec2_client, iam_client, ecs_client, logs_client, batch_client = _get_clients() + vpc_id, subnet_id, sg_id, iam_arn = _setup(ec2_client, iam_client) + + compute_name = 'test_compute_env' + resp = batch_client.create_compute_environment( + computeEnvironmentName=compute_name, + type='UNMANAGED', + state='ENABLED', + serviceRole=iam_arn + ) + arn = resp['computeEnvironmentArn'] + + resp = batch_client.create_job_queue( + jobQueueName='test_job_queue', + state='ENABLED', + priority=123, + computeEnvironmentOrder=[ + { + 'order': 123, + 'computeEnvironment': arn + }, + ] + ) + queue_arn = resp['jobQueueArn'] + + resp = batch_client.register_job_definition( + jobDefinitionName='sleep10', + type='container', + containerProperties={ + 'image': 'busybox', + 'vcpus': 1, + 'memory': 128, + 'command': ['sleep', '10'] + } + ) + job_def_arn = resp['jobDefinitionArn'] + + resp = batch_client.submit_job( + jobName='test1', + jobQueue=queue_arn, + jobDefinition=job_def_arn + ) + job_id1 = resp['jobId'] + resp = batch_client.submit_job( + jobName='test2', + jobQueue=queue_arn, + jobDefinition=job_def_arn + ) + job_id2 = resp['jobId'] + + future = datetime.datetime.now() + datetime.timedelta(seconds=30) + + resp_finished_jobs = batch_client.list_jobs( + jobQueue=queue_arn, + jobStatus='SUCCEEDED' + ) + + # Wait only as long as it takes to run the jobs + while datetime.datetime.now() < future: + resp = batch_client.describe_jobs(jobs=[job_id1, job_id2]) + + any_failed_jobs = any([job['status'] == 'FAILED' for job in resp['jobs']]) + succeeded_jobs = all([job['status'] == 'SUCCEEDED' for job in resp['jobs']]) + + if any_failed_jobs: + raise RuntimeError('A Batch job failed') + if succeeded_jobs: + break + time.sleep(0.5) + else: + raise RuntimeError('Batch jobs timed out') + + resp_finished_jobs2 = batch_client.list_jobs( + jobQueue=queue_arn, + jobStatus='SUCCEEDED' + ) + + len(resp_finished_jobs['jobSummaryList']).should.equal(0) + len(resp_finished_jobs2['jobSummaryList']).should.equal(2) + + +@expected_failure +@mock_logs +@mock_ec2 +@mock_ecs +@mock_iam +@mock_batch +def test_terminate_job(): + ec2_client, iam_client, ecs_client, logs_client, batch_client = _get_clients() + vpc_id, subnet_id, sg_id, iam_arn = _setup(ec2_client, iam_client) + + compute_name = 'test_compute_env' + resp = batch_client.create_compute_environment( + computeEnvironmentName=compute_name, + type='UNMANAGED', + state='ENABLED', + serviceRole=iam_arn + ) + arn = resp['computeEnvironmentArn'] + + resp = batch_client.create_job_queue( + jobQueueName='test_job_queue', + state='ENABLED', + priority=123, + computeEnvironmentOrder=[ + { + 'order': 123, + 'computeEnvironment': arn + }, + ] + ) + queue_arn = resp['jobQueueArn'] + + resp = batch_client.register_job_definition( + jobDefinitionName='sleep10', + type='container', + containerProperties={ + 'image': 'busybox', + 'vcpus': 1, + 'memory': 128, + 'command': ['sleep', '10'] + } + ) + job_def_arn = resp['jobDefinitionArn'] + + resp = batch_client.submit_job( + jobName='test1', + jobQueue=queue_arn, + jobDefinition=job_def_arn + ) + job_id = resp['jobId'] + + time.sleep(2) + + batch_client.terminate_job(jobId=job_id, reason='test_terminate') + + time.sleep(1) + + resp = batch_client.describe_jobs(jobs=[job_id]) + resp['jobs'][0]['jobName'].should.equal('test1') + resp['jobs'][0]['status'].should.equal('FAILED') + resp['jobs'][0]['statusReason'].should.equal('test_terminate') + From 51afd54229c0ccde8b12a5cd671ae5b2d053bf6a Mon Sep 17 00:00:00 2001 From: Tom Elliff Date: Thu, 12 Oct 2017 13:26:19 +0100 Subject: [PATCH 378/412] Fix runningTasksCount ECS container instance attribute ECS container instances have attributes of 'runningTasksCount' and 'pendingTasksCount'. See Boto3 docs here: http://boto3.readthedocs.io/en/latest/reference/services/ecs.html#ECS.Client.describe_container_instances REST API docs here: http://docs.aws.amazon.com/AmazonECS/latest/APIReference/API_DescribeContainerInstances.html#API_DescribeContainerInstances_ResponseSyntax --- moto/ecs/models.py | 10 +++++----- tests/test_ecs/test_ecs_boto3.py | 2 ++ 2 files changed, 7 insertions(+), 5 deletions(-) diff --git a/moto/ecs/models.py b/moto/ecs/models.py index bc847b32e..f5a928791 100644 --- a/moto/ecs/models.py +++ b/moto/ecs/models.py @@ -289,7 +289,7 @@ class ContainerInstance(BaseObject): 'type': 'STRINGSET'}] self.container_instance_arn = "arn:aws:ecs:us-east-1:012345678910:container-instance/{0}".format( str(uuid.uuid1())) - self.pending_task_count = 0 + self.pending_tasks_count = 0 self.remaining_resources = [ {'doubleValue': 0.0, 'integerValue': 4096, @@ -314,7 +314,7 @@ class ContainerInstance(BaseObject): 'stringSetValue': [], 'type': 'STRINGSET'} ] - self.running_task_count = 0 + self.running_tasks_count = 0 self.version_info = { 'agentVersion': "1.0.0", 'agentHash': '4023248', @@ -737,7 +737,7 @@ class EC2ContainerServiceBackend(BaseBackend): resource["stringSetValue"].remove(str(port)) else: resource["stringSetValue"].append(str(port)) - container_instance.running_task_count += resource_multiplier * 1 + container_instance.running_tasks_count += resource_multiplier * 1 def deregister_container_instance(self, cluster_str, container_instance_str, force): failures = [] @@ -748,11 +748,11 @@ class EC2ContainerServiceBackend(BaseBackend): container_instance = self.container_instances[cluster_name].get(container_instance_id) if container_instance is None: raise Exception("{0} is not a container id in the cluster") - if not force and container_instance.running_task_count > 0: + if not force and container_instance.running_tasks_count > 0: raise Exception("Found running tasks on the instance.") # Currently assume that people might want to do something based around deregistered instances # with tasks left running on them - but nothing if no tasks were running already - elif force and container_instance.running_task_count > 0: + elif force and container_instance.running_tasks_count > 0: if not self.container_instances.get('orphaned'): self.container_instances['orphaned'] = {} self.container_instances['orphaned'][container_instance_id] = container_instance diff --git a/tests/test_ecs/test_ecs_boto3.py b/tests/test_ecs/test_ecs_boto3.py index 1cc147fc5..990057749 100644 --- a/tests/test_ecs/test_ecs_boto3.py +++ b/tests/test_ecs/test_ecs_boto3.py @@ -1210,6 +1210,7 @@ def test_resource_reservation_and_release(): remaining_resources['MEMORY'].should.equal(registered_resources['MEMORY'] - 400) registered_resources['PORTS'].append('80') remaining_resources['PORTS'].should.equal(registered_resources['PORTS']) + container_instance_description['runningTasksCount'].should.equal(1) client.stop_task( cluster='test_ecs_cluster', task=run_response['tasks'][0].get('taskArn'), @@ -1223,6 +1224,7 @@ def test_resource_reservation_and_release(): remaining_resources['CPU'].should.equal(registered_resources['CPU']) remaining_resources['MEMORY'].should.equal(registered_resources['MEMORY']) remaining_resources['PORTS'].should.equal(registered_resources['PORTS']) + container_instance_description['runningTasksCount'].should.equal(0) @mock_ecs From f3623e3cd3845411a2ff31a71b83f412e86f6a41 Mon Sep 17 00:00:00 2001 From: Terry Cain Date: Thu, 12 Oct 2017 21:59:02 +0100 Subject: [PATCH 379/412] Fix for #1258 (#1260) * Fix for #1258 * Updated doc link --- moto/ec2/responses/instances.py | 9 +++++++-- tests/test_ec2/test_instances.py | 17 +++++++++++++++++ 2 files changed, 24 insertions(+), 2 deletions(-) diff --git a/moto/ec2/responses/instances.py b/moto/ec2/responses/instances.py index 532d703c9..1550fddeb 100644 --- a/moto/ec2/responses/instances.py +++ b/moto/ec2/responses/instances.py @@ -30,7 +30,7 @@ class InstanceResponse(BaseResponse): if max_results and len(reservations) > (start + max_results): next_token = reservations_resp[-1].id template = self.response_template(EC2_DESCRIBE_INSTANCES) - return template.render(reservations=reservations_resp, next_token=next_token) + return template.render(reservations=reservations_resp, next_token=next_token).replace('True', 'true').replace('False', 'false') def run_instances(self): min_count = int(self._get_param('MinCount', if_none='1')) @@ -144,7 +144,12 @@ class InstanceResponse(BaseResponse): """ Handles requests which are generated by code similar to: - instance.modify_attribute('blockDeviceMapping', {'/dev/sda1': True}) + instance.modify_attribute( + BlockDeviceMappings=[{ + 'DeviceName': '/dev/sda1', + 'Ebs': {'DeleteOnTermination': True} + }] + ) The querystring contains information similar to: diff --git a/tests/test_ec2/test_instances.py b/tests/test_ec2/test_instances.py index 04e6a6daa..46bb34d57 100644 --- a/tests/test_ec2/test_instances.py +++ b/tests/test_ec2/test_instances.py @@ -1113,3 +1113,20 @@ def test_get_instance_by_security_group(): assert len(security_group_instances) == 1 assert security_group_instances[0].id == instance.id + + +@mock_ec2 +def test_modify_delete_on_termination(): + ec2_client = boto3.resource('ec2', region_name='us-west-1') + result = ec2_client.create_instances(ImageId='ami-12345678', MinCount=1, MaxCount=1) + instance = result[0] + instance.load() + instance.block_device_mappings[0]['Ebs']['DeleteOnTermination'].should.be(False) + instance.modify_attribute( + BlockDeviceMappings=[{ + 'DeviceName': '/dev/sda1', + 'Ebs': {'DeleteOnTermination': True} + }] + ) + instance.load() + instance.block_device_mappings[0]['Ebs']['DeleteOnTermination'].should.be(True) From a6c38913a7a8576c4192b3501620c062f17745cf Mon Sep 17 00:00:00 2001 From: Tom Elliff Date: Fri, 13 Oct 2017 09:37:39 +0100 Subject: [PATCH 380/412] Add more tests for task count of container instance --- tests/test_ecs/test_ecs_boto3.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/tests/test_ecs/test_ecs_boto3.py b/tests/test_ecs/test_ecs_boto3.py index 990057749..9b6e99b57 100644 --- a/tests/test_ecs/test_ecs_boto3.py +++ b/tests/test_ecs/test_ecs_boto3.py @@ -714,6 +714,9 @@ def test_describe_container_instances(): for ci in response['containerInstances']] for arn in test_instance_arns: response_arns.should.contain(arn) + for instance in response['containerInstances']: + instance.keys().should.contain('runningTasksCount') + instance.keys().should.contain('pendingTasksCount') @mock_ec2 From 2bb3e841d12c5f614971492d241629794f35fe85 Mon Sep 17 00:00:00 2001 From: Terry Cain Date: Mon, 16 Oct 2017 21:56:03 +0100 Subject: [PATCH 381/412] Fixed #1261 dynamodb FilterExpression bugs (#1262) * Fixed #1261 dynamodb FilterExpression bugs FilterExpression was incorrectly handling numbers, stupid typo there. Also >= <= and <> was not being parsed correctly. * Switched up logic a bit for better end result. Fixes #1263 * Fixed another bug --- moto/dynamodb2/comparisons.py | 53 +++++++++++++-------- tests/test_dynamodb2/test_dynamodb.py | 68 ++++++++++++++++++++++++--- 2 files changed, 95 insertions(+), 26 deletions(-) diff --git a/moto/dynamodb2/comparisons.py b/moto/dynamodb2/comparisons.py index 8462c2de5..faeffbaa5 100644 --- a/moto/dynamodb2/comparisons.py +++ b/moto/dynamodb2/comparisons.py @@ -61,15 +61,27 @@ def get_filter_expression(expr, names, values): # Do substitutions for key, value in names.items(): expr = expr.replace(key, value) + + # Store correct types of values for use later + values_map = {} for key, value in values.items(): if 'N' in value: - expr.replace(key, float(value['N'])) + values_map[key] = float(value['N']) + elif 'BOOL' in value: + values_map[key] = value['BOOL'] + elif 'S' in value: + values_map[key] = value['S'] + elif 'NS' in value: + values_map[key] = tuple(value['NS']) + elif 'SS' in value: + values_map[key] = tuple(value['SS']) + elif 'L' in value: + values_map[key] = tuple(value['L']) else: - expr = expr.replace(key, value['S']) + raise NotImplementedError() # Remove all spaces, tbf we could just skip them in the next step. # The number of known options is really small so we can do a fair bit of cheating - #expr = list(re.sub('\s', '', expr)) # 'Id>5ANDattribute_exists(test)ORNOTlength<6' expr = list(expr) # DodgyTokenisation stage 1 @@ -130,13 +142,9 @@ def get_filter_expression(expr, names, values): next_token = six.next(token_iterator) while next_token != ')': - try: - next_token = int(next_token) - except ValueError: - try: - next_token = float(next_token) - except ValueError: - pass + if next_token in values_map: + next_token = values_map[next_token] + tuple_list.append(next_token) next_token = six.next(token_iterator) @@ -149,10 +157,14 @@ def get_filter_expression(expr, names, values): tokens2.append(tuple(tuple_list)) elif token == 'BETWEEN': field = tokens2.pop() - op1 = int(six.next(token_iterator)) + # if values map contains a number, it would be a float + # so we need to int() it anyway + op1 = six.next(token_iterator) + op1 = int(values_map.get(op1, op1)) and_op = six.next(token_iterator) assert and_op == 'AND' - op2 = int(six.next(token_iterator)) + op2 = six.next(token_iterator) + op2 = int(values_map.get(op2, op2)) tokens2.append(['between', field, op1, op2]) elif is_function(token): @@ -169,14 +181,15 @@ def get_filter_expression(expr, names, values): tokens2.append(function_list) else: - try: - token = int(token) - except ValueError: - try: - token = float(token) - except ValueError: - pass - tokens2.append(token) + # Convert tokens back to real types + if token in values_map: + token = values_map[token] + + # Need to join >= <= <> + if len(tokens2) > 0 and ((tokens2[-1] == '>' and token == '=') or (tokens2[-1] == '<' and token == '=') or (tokens2[-1] == '<' and token == '>')): + tokens2.append(tokens2.pop() + token) + else: + tokens2.append(token) # Start of the Shunting-Yard algorithm. <-- Proper beast algorithm! def is_number(val): diff --git a/tests/test_dynamodb2/test_dynamodb.py b/tests/test_dynamodb2/test_dynamodb.py index 85d8feb34..26d380628 100644 --- a/tests/test_dynamodb2/test_dynamodb.py +++ b/tests/test_dynamodb2/test_dynamodb.py @@ -581,24 +581,24 @@ def test_filter_expression(): row2 = moto.dynamodb2.models.Item(None, None, None, None, {'Id': {'N': '8'}, 'Subs': {'N': '10'}, 'Desc': {'S': 'A description'}, 'KV': {'SS': ['test3', 'test4']}}) # AND test - filter_expr = moto.dynamodb2.comparisons.get_filter_expression('Id > 5 AND Subs < 7', {}, {}) + filter_expr = moto.dynamodb2.comparisons.get_filter_expression('Id > :v0 AND Subs < :v1', {}, {':v0': {'N': 5}, ':v1': {'N': 7}}) filter_expr.expr(row1).should.be(True) filter_expr.expr(row2).should.be(False) # OR test - filter_expr = moto.dynamodb2.comparisons.get_filter_expression('Id = 5 OR Id=8', {}, {}) + filter_expr = moto.dynamodb2.comparisons.get_filter_expression('Id = :v0 OR Id=:v1', {}, {':v0': {'N': 5}, ':v1': {'N': 8}}) filter_expr.expr(row1).should.be(True) # BETWEEN test - filter_expr = moto.dynamodb2.comparisons.get_filter_expression('Id BETWEEN 5 AND 10', {}, {}) + filter_expr = moto.dynamodb2.comparisons.get_filter_expression('Id BETWEEN :v0 AND :v1', {}, {':v0': {'N': 5}, ':v1': {'N': 10}}) filter_expr.expr(row1).should.be(True) # PAREN test - filter_expr = moto.dynamodb2.comparisons.get_filter_expression('Id = 8 AND (Subs = 8 OR Subs = 5)', {}, {}) + filter_expr = moto.dynamodb2.comparisons.get_filter_expression('Id = :v0 AND (Subs = :v0 OR Subs = :v1)', {}, {':v0': {'N': 8}, ':v1': {'N': 5}}) filter_expr.expr(row1).should.be(True) # IN test - filter_expr = moto.dynamodb2.comparisons.get_filter_expression('Id IN (7,8, 9)', {}, {}) + filter_expr = moto.dynamodb2.comparisons.get_filter_expression('Id IN :v0', {}, {':v0': {'NS': [7, 8, 9]}}) filter_expr.expr(row1).should.be(True) # attribute function tests @@ -655,6 +655,63 @@ def test_scan_filter(): assert response['Count'] == 1 +@mock_dynamodb2 +def test_scan_filter2(): + client = boto3.client('dynamodb', region_name='us-east-1') + + # Create the DynamoDB table. + client.create_table( + TableName='test1', + AttributeDefinitions=[{'AttributeName': 'client', 'AttributeType': 'S'}, {'AttributeName': 'app', 'AttributeType': 'N'}], + KeySchema=[{'AttributeName': 'client', 'KeyType': 'HASH'}, {'AttributeName': 'app', 'KeyType': 'RANGE'}], + ProvisionedThroughput={'ReadCapacityUnits': 123, 'WriteCapacityUnits': 123} + ) + client.put_item( + TableName='test1', + Item={ + 'client': {'S': 'client1'}, + 'app': {'N': '1'} + } + ) + + response = client.scan( + TableName='test1', + Select='ALL_ATTRIBUTES', + FilterExpression='#tb >= :dt', + ExpressionAttributeNames={"#tb": "app"}, + ExpressionAttributeValues={":dt": {"N": str(1)}} + ) + assert response['Count'] == 1 + + +@mock_dynamodb2 +def test_scan_filter3(): + client = boto3.client('dynamodb', region_name='us-east-1') + dynamodb = boto3.resource('dynamodb', region_name='us-east-1') + + # Create the DynamoDB table. + client.create_table( + TableName='test1', + AttributeDefinitions=[{'AttributeName': 'client', 'AttributeType': 'S'}, {'AttributeName': 'app', 'AttributeType': 'N'}], + KeySchema=[{'AttributeName': 'client', 'KeyType': 'HASH'}, {'AttributeName': 'app', 'KeyType': 'RANGE'}], + ProvisionedThroughput={'ReadCapacityUnits': 123, 'WriteCapacityUnits': 123} + ) + client.put_item( + TableName='test1', + Item={ + 'client': {'S': 'client1'}, + 'app': {'N': '1'}, + 'active': {'BOOL': True} + } + ) + + table = dynamodb.Table('test1') + response = table.scan( + FilterExpression=Attr('active').eq(True) + ) + assert response['Count'] == 1 + + @mock_dynamodb2 def test_bad_scan_filter(): client = boto3.client('dynamodb', region_name='us-east-1') @@ -680,7 +737,6 @@ def test_bad_scan_filter(): raise RuntimeError('Should of raised ResourceInUseException') - @mock_dynamodb2 def test_duplicate_create(): client = boto3.client('dynamodb', region_name='us-east-1') From 49ddb500a839daaee8b916fb0af365825d66467d Mon Sep 17 00:00:00 2001 From: Terry Cain Date: Tue, 17 Oct 2017 01:06:22 +0100 Subject: [PATCH 382/412] AWS X-Ray client mock. (#1255) * X-Ray Client SDK patched Fixes #1250 * Fixed flake8 * Fixed some issues * Fixed flake8 * Fixed more typos * Fixed python2 string * Fixed aws-sdk patch order * Added more test cases to test the patching --- moto/__init__.py | 2 +- moto/awslambda/responses.py | 5 ++ moto/core/responses.py | 6 +- moto/core/utils.py | 93 +++++++++++++++++++++++++++++ moto/dynamodb2/responses.py | 4 +- moto/sqs/responses.py | 6 +- moto/xray/__init__.py | 1 + moto/xray/mock_client.py | 83 +++++++++++++++++++++++++ setup.py | 3 +- tests/test_ecr/test_ecr_boto3.py | 6 +- tests/test_xray/test_xray_client.py | 72 ++++++++++++++++++++++ 11 files changed, 273 insertions(+), 8 deletions(-) create mode 100644 moto/xray/mock_client.py create mode 100644 tests/test_xray/test_xray_client.py diff --git a/moto/__init__.py b/moto/__init__.py index 64baa52ac..79efac862 100644 --- a/moto/__init__.py +++ b/moto/__init__.py @@ -38,7 +38,7 @@ from .sts import mock_sts, mock_sts_deprecated # flake8: noqa from .ssm import mock_ssm # flake8: noqa from .route53 import mock_route53, mock_route53_deprecated # flake8: noqa from .swf import mock_swf, mock_swf_deprecated # flake8: noqa -from .xray import mock_xray # flake8: noqa +from .xray import mock_xray, mock_xray_client, XRaySegment # flake8: noqa from .logs import mock_logs, mock_logs_deprecated # flake8: noqa diff --git a/moto/awslambda/responses.py b/moto/awslambda/responses.py index 5215f63c5..4ba837ea2 100644 --- a/moto/awslambda/responses.py +++ b/moto/awslambda/responses.py @@ -9,6 +9,7 @@ try: except: from urllib.parse import unquote, urlparse, parse_qs +from moto.core.utils import amz_crc32, amzn_request_id from moto.core.responses import BaseResponse @@ -32,6 +33,8 @@ class LambdaResponse(BaseResponse): else: raise ValueError("Cannot handle request") + @amz_crc32 + @amzn_request_id def invoke(self, request, full_url, headers): self.setup_class(request, full_url, headers) if request.method == 'POST': @@ -39,6 +42,8 @@ class LambdaResponse(BaseResponse): else: raise ValueError("Cannot handle request") + @amz_crc32 + @amzn_request_id def invoke_async(self, request, full_url, headers): self.setup_class(request, full_url, headers) if request.method == 'POST': diff --git a/moto/core/responses.py b/moto/core/responses.py index e85054802..572a45229 100644 --- a/moto/core/responses.py +++ b/moto/core/responses.py @@ -199,10 +199,14 @@ class BaseResponse(_TemplateEnvironmentMixin): response = method() except HTTPException as http_error: response = http_error.description, dict(status=http_error.code) + if isinstance(response, six.string_types): return 200, headers, response else: - body, new_headers = response + if len(response) == 2: + body, new_headers = response + else: + status, new_headers, body = response status = new_headers.get('status', 200) headers.update(new_headers) # Cast status to string diff --git a/moto/core/utils.py b/moto/core/utils.py index 9ee0c1814..2ea4dc4a8 100644 --- a/moto/core/utils.py +++ b/moto/core/utils.py @@ -1,10 +1,16 @@ from __future__ import unicode_literals +from functools import wraps +import binascii import datetime import inspect import random import re import six +import string + + +REQUEST_ID_LONG = string.digits + string.ascii_uppercase def camelcase_to_underscores(argument): @@ -194,3 +200,90 @@ def unix_time(dt=None): def unix_time_millis(dt=None): return unix_time(dt) * 1000.0 + + +def gen_amz_crc32(response, headerdict=None): + if not isinstance(response, bytes): + response = response.encode() + + crc = str(binascii.crc32(response)) + + if headerdict is not None and isinstance(headerdict, dict): + headerdict.update({'x-amz-crc32': crc}) + + return crc + + +def gen_amzn_requestid_long(headerdict=None): + req_id = ''.join([random.choice(REQUEST_ID_LONG) for _ in range(0, 52)]) + + if headerdict is not None and isinstance(headerdict, dict): + headerdict.update({'x-amzn-requestid': req_id}) + + return req_id + + +def amz_crc32(f): + @wraps(f) + def _wrapper(*args, **kwargs): + response = f(*args, **kwargs) + + headers = {} + status = 200 + + if isinstance(response, six.string_types): + body = response + else: + if len(response) == 2: + body, new_headers = response + status = new_headers.get('status', 200) + else: + status, new_headers, body = response + headers.update(new_headers) + # Cast status to string + if "status" in headers: + headers['status'] = str(headers['status']) + + try: + # Doesnt work on python2 for some odd unicode strings + gen_amz_crc32(body, headers) + except Exception: + pass + + return status, headers, body + + return _wrapper + + +def amzn_request_id(f): + @wraps(f) + def _wrapper(*args, **kwargs): + response = f(*args, **kwargs) + + headers = {} + status = 200 + + if isinstance(response, six.string_types): + body = response + else: + if len(response) == 2: + body, new_headers = response + status = new_headers.get('status', 200) + else: + status, new_headers, body = response + headers.update(new_headers) + # Cast status to string + if "status" in headers: + headers['status'] = str(headers['status']) + + request_id = gen_amzn_requestid_long(headers) + + # Update request ID in XML + try: + body = body.replace('{{ requestid }}', request_id) + except Exception: # Will just ignore if it cant work on bytes (which are str's on python2) + pass + + return status, headers, body + + return _wrapper diff --git a/moto/dynamodb2/responses.py b/moto/dynamodb2/responses.py index 75e625c73..218cfc21d 100644 --- a/moto/dynamodb2/responses.py +++ b/moto/dynamodb2/responses.py @@ -4,7 +4,7 @@ import six import re from moto.core.responses import BaseResponse -from moto.core.utils import camelcase_to_underscores +from moto.core.utils import camelcase_to_underscores, amzn_request_id from .models import dynamodb_backend2, dynamo_json_dump @@ -24,6 +24,7 @@ class DynamoHandler(BaseResponse): def error(self, type_, message, status=400): return status, self.response_headers, dynamo_json_dump({'__type': type_, 'message': message}) + @amzn_request_id def call_action(self): self.body = json.loads(self.body or '{}') endpoint = self.get_endpoint_name(self.headers) @@ -56,6 +57,7 @@ class DynamoHandler(BaseResponse): response = {"TableNames": tables} if limit and len(all_tables) > start + limit: response["LastEvaluatedTableName"] = tables[-1] + return dynamo_json_dump(response) def create_table(self): diff --git a/moto/sqs/responses.py b/moto/sqs/responses.py index 540bd4e41..63a5036d6 100644 --- a/moto/sqs/responses.py +++ b/moto/sqs/responses.py @@ -2,7 +2,7 @@ from __future__ import unicode_literals from six.moves.urllib.parse import urlparse from moto.core.responses import BaseResponse -from moto.core.utils import camelcase_to_underscores +from moto.core.utils import camelcase_to_underscores, amz_crc32, amzn_request_id from .utils import parse_message_attributes from .models import sqs_backends from .exceptions import ( @@ -52,6 +52,8 @@ class SQSResponse(BaseResponse): return visibility_timeout + @amz_crc32 # crc last as request_id can edit XML + @amzn_request_id def call_action(self): status_code, headers, body = super(SQSResponse, self).call_action() if status_code == 404: @@ -296,7 +298,7 @@ CREATE_QUEUE_RESPONSE = """ {{ queue.visibility_timeout }} - 7a62c49f-347e-4fc4-9331-6e8e7a96aa73 + {{ requestid }} """ diff --git a/moto/xray/__init__.py b/moto/xray/__init__.py index 7b32ca0b0..41f00af58 100644 --- a/moto/xray/__init__.py +++ b/moto/xray/__init__.py @@ -1,6 +1,7 @@ from __future__ import unicode_literals from .models import xray_backends from ..core.models import base_decorator +from .mock_client import mock_xray_client, XRaySegment # noqa xray_backend = xray_backends['us-east-1'] mock_xray = base_decorator(xray_backends) diff --git a/moto/xray/mock_client.py b/moto/xray/mock_client.py new file mode 100644 index 000000000..6e2164d63 --- /dev/null +++ b/moto/xray/mock_client.py @@ -0,0 +1,83 @@ +from functools import wraps +import os +from moto.xray import xray_backends +import aws_xray_sdk.core +from aws_xray_sdk.core.context import Context as AWSContext +from aws_xray_sdk.core.emitters.udp_emitter import UDPEmitter + + +class MockEmitter(UDPEmitter): + """ + Replaces the code that sends UDP to local X-Ray daemon + """ + def __init__(self, daemon_address='127.0.0.1:2000'): + address = os.getenv('AWS_XRAY_DAEMON_ADDRESS_YEAH_NOT_TODAY_MATE', daemon_address) + self._ip, self._port = self._parse_address(address) + + def _xray_backend(self, region): + return xray_backends[region] + + def send_entity(self, entity): + # Hack to get region + # region = entity.subsegments[0].aws['region'] + # xray = self._xray_backend(region) + + # TODO store X-Ray data, pretty sure X-Ray needs refactor for this + pass + + def _send_data(self, data): + raise RuntimeError('Should not be running this') + + +def mock_xray_client(f): + """ + Mocks the X-Ray sdk by pwning its evil singleton with our methods + + The X-Ray SDK has normally been imported and `patched()` called long before we start mocking. + This means the Context() will be very unhappy if an env var isnt present, so we set that, save + the old context, then supply our new context. + We also patch the Emitter by subclassing the UDPEmitter class replacing its methods and pushing + that itno the recorder instance. + """ + @wraps(f) + def _wrapped(*args, **kwargs): + print("Starting X-Ray Patch") + + old_xray_context_var = os.environ.get('AWS_XRAY_CONTEXT_MISSING') + os.environ['AWS_XRAY_CONTEXT_MISSING'] = 'LOG_ERROR' + old_xray_context = aws_xray_sdk.core.xray_recorder._context + old_xray_emitter = aws_xray_sdk.core.xray_recorder._emitter + aws_xray_sdk.core.xray_recorder._context = AWSContext() + aws_xray_sdk.core.xray_recorder._emitter = MockEmitter() + + try: + f(*args, **kwargs) + finally: + + if old_xray_context_var is None: + del os.environ['AWS_XRAY_CONTEXT_MISSING'] + else: + os.environ['AWS_XRAY_CONTEXT_MISSING'] = old_xray_context_var + + aws_xray_sdk.core.xray_recorder._emitter = old_xray_emitter + aws_xray_sdk.core.xray_recorder._context = old_xray_context + + return _wrapped + + +class XRaySegment(object): + """ + XRay is request oriented, when a request comes in, normally middleware like django (or automatically in lambda) will mark + the start of a segment, this stay open during the lifetime of the request. During that time subsegments may be generated + by calling other SDK aware services or using some boto functions. Once the request is finished, middleware will also stop + the segment, thus causing it to be emitted via UDP. + + During testing we're going to have to control the start and end of a segment via context managers. + """ + def __enter__(self): + aws_xray_sdk.core.xray_recorder.begin_segment(name='moto_mock', traceid=None, parent_id=None, sampling=1) + + return self + + def __exit__(self, exc_type, exc_val, exc_tb): + aws_xray_sdk.core.xray_recorder.end_segment() diff --git a/setup.py b/setup.py index 207c5dd2e..9e423bdd4 100755 --- a/setup.py +++ b/setup.py @@ -19,7 +19,8 @@ install_requires = [ "pytz", "python-dateutil<3.0.0,>=2.1", "mock", - "docker>=2.5.1" + "docker>=2.5.1", + "aws-xray-sdk==0.92.2" ] extras_require = { diff --git a/tests/test_ecr/test_ecr_boto3.py b/tests/test_ecr/test_ecr_boto3.py index 67d1a2cab..00628e22f 100644 --- a/tests/test_ecr/test_ecr_boto3.py +++ b/tests/test_ecr/test_ecr_boto3.py @@ -414,7 +414,8 @@ def test_get_authorization_token_assume_region(): client = boto3.client('ecr', region_name='us-east-1') auth_token_response = client.get_authorization_token() - list(auth_token_response.keys()).should.equal(['authorizationData', 'ResponseMetadata']) + auth_token_response.should.contain('authorizationData') + auth_token_response.should.contain('ResponseMetadata') auth_token_response['authorizationData'].should.equal([ { 'authorizationToken': 'QVdTOnVzLWVhc3QtMS1hdXRoLXRva2Vu', @@ -429,7 +430,8 @@ def test_get_authorization_token_explicit_regions(): client = boto3.client('ecr', region_name='us-east-1') auth_token_response = client.get_authorization_token(registryIds=['us-east-1', 'us-west-1']) - list(auth_token_response.keys()).should.equal(['authorizationData', 'ResponseMetadata']) + auth_token_response.should.contain('authorizationData') + auth_token_response.should.contain('ResponseMetadata') auth_token_response['authorizationData'].should.equal([ { 'authorizationToken': 'QVdTOnVzLWVhc3QtMS1hdXRoLXRva2Vu', diff --git a/tests/test_xray/test_xray_client.py b/tests/test_xray/test_xray_client.py new file mode 100644 index 000000000..0cd948950 --- /dev/null +++ b/tests/test_xray/test_xray_client.py @@ -0,0 +1,72 @@ +from __future__ import unicode_literals +from moto import mock_xray_client, XRaySegment, mock_dynamodb2 +import sure # noqa +import boto3 + +from moto.xray.mock_client import MockEmitter +import aws_xray_sdk.core as xray_core +import aws_xray_sdk.core.patcher as xray_core_patcher + +import botocore.client +import botocore.endpoint +original_make_api_call = botocore.client.BaseClient._make_api_call +original_encode_headers = botocore.endpoint.Endpoint._encode_headers + +import requests +original_session_request = requests.Session.request +original_session_prep_request = requests.Session.prepare_request + + +@mock_xray_client +@mock_dynamodb2 +def test_xray_dynamo_request_id(): + # Could be ran in any order, so we need to tell sdk that its been unpatched + xray_core_patcher._PATCHED_MODULES = set() + xray_core.patch_all() + + client = boto3.client('dynamodb', region_name='us-east-1') + + with XRaySegment(): + resp = client.list_tables() + resp['ResponseMetadata'].should.contain('RequestId') + id1 = resp['ResponseMetadata']['RequestId'] + + with XRaySegment(): + client.list_tables() + resp = client.list_tables() + id2 = resp['ResponseMetadata']['RequestId'] + + id1.should_not.equal(id2) + + setattr(botocore.client.BaseClient, '_make_api_call', original_make_api_call) + setattr(botocore.endpoint.Endpoint, '_encode_headers', original_encode_headers) + setattr(requests.Session, 'request', original_session_request) + setattr(requests.Session, 'prepare_request', original_session_prep_request) + + +@mock_xray_client +def test_xray_udp_emitter_patched(): + # Could be ran in any order, so we need to tell sdk that its been unpatched + xray_core_patcher._PATCHED_MODULES = set() + xray_core.patch_all() + + assert isinstance(xray_core.xray_recorder._emitter, MockEmitter) + + setattr(botocore.client.BaseClient, '_make_api_call', original_make_api_call) + setattr(botocore.endpoint.Endpoint, '_encode_headers', original_encode_headers) + setattr(requests.Session, 'request', original_session_request) + setattr(requests.Session, 'prepare_request', original_session_prep_request) + + +@mock_xray_client +def test_xray_context_patched(): + # Could be ran in any order, so we need to tell sdk that its been unpatched + xray_core_patcher._PATCHED_MODULES = set() + xray_core.patch_all() + + xray_core.xray_recorder._context.context_missing.should.equal('LOG_ERROR') + + setattr(botocore.client.BaseClient, '_make_api_call', original_make_api_call) + setattr(botocore.endpoint.Endpoint, '_encode_headers', original_encode_headers) + setattr(requests.Session, 'request', original_session_request) + setattr(requests.Session, 'prepare_request', original_session_prep_request) From 5ef236e96666debfac0cf8374afc798a525c8141 Mon Sep 17 00:00:00 2001 From: John Kerkstra Date: Mon, 16 Oct 2017 19:09:51 -0500 Subject: [PATCH 383/412] Add attach_ and detach_instances methods to autoscaling service (#1264) * add detach_instances functionality to autoscaling service * use ASG_NAME_TAG constant * cleanup models method a bit, add mocked DetachInstancesResult to response template * add attach_instances method --- moto/autoscaling/exceptions.py | 14 ++ moto/autoscaling/models.py | 85 +++++++++--- moto/autoscaling/responses.py | 55 ++++++++ tests/test_autoscaling/test_autoscaling.py | 144 +++++++++++++++++++++ 4 files changed, 277 insertions(+), 21 deletions(-) create mode 100644 moto/autoscaling/exceptions.py diff --git a/moto/autoscaling/exceptions.py b/moto/autoscaling/exceptions.py new file mode 100644 index 000000000..15b2e4f4a --- /dev/null +++ b/moto/autoscaling/exceptions.py @@ -0,0 +1,14 @@ +from __future__ import unicode_literals +from moto.core.exceptions import RESTError + + +class AutoscalingClientError(RESTError): + code = 500 + + +class ResourceContentionError(AutoscalingClientError): + + def __init__(self): + super(ResourceContentionError, self).__init__( + "ResourceContentionError", + "You already have a pending update to an Auto Scaling resource (for example, a group, instance, or load balancer).") diff --git a/moto/autoscaling/models.py b/moto/autoscaling/models.py index 9df9fea12..4bdebf955 100644 --- a/moto/autoscaling/models.py +++ b/moto/autoscaling/models.py @@ -5,6 +5,9 @@ from moto.core import BaseBackend, BaseModel from moto.ec2 import ec2_backends from moto.elb import elb_backends from moto.elb.exceptions import LoadBalancerNotFoundError +from .exceptions import ( + ResourceContentionError, +) # http://docs.aws.amazon.com/AutoScaling/latest/DeveloperGuide/AS_Concepts.html#Cooldown DEFAULT_COOLDOWN = 300 @@ -259,27 +262,8 @@ class FakeAutoScalingGroup(BaseModel): # Need more instances count_needed = int(self.desired_capacity) - int(curr_instance_count) - propagated_tags = {} - for tag in self.tags: - # boto uses 'propagate_at_launch - # boto3 and cloudformation use PropagateAtLaunch - if 'propagate_at_launch' in tag and tag['propagate_at_launch'] == 'true': - propagated_tags[tag['key']] = tag['value'] - if 'PropagateAtLaunch' in tag and tag['PropagateAtLaunch']: - propagated_tags[tag['Key']] = tag['Value'] - - propagated_tags[ASG_NAME_TAG] = self.name - reservation = self.autoscaling_backend.ec2_backend.add_instances( - self.launch_config.image_id, - count_needed, - self.launch_config.user_data, - self.launch_config.security_groups, - instance_type=self.launch_config.instance_type, - tags={'instance': propagated_tags} - ) - for instance in reservation.instances: - instance.autoscaling_group = self - self.instance_states.append(InstanceState(instance)) + propagated_tags = self.get_propagated_tags() + self.replace_autoscaling_group_instances(count_needed, propagated_tags) else: # Need to remove some instances count_to_remove = curr_instance_count - self.desired_capacity @@ -290,6 +274,31 @@ class FakeAutoScalingGroup(BaseModel): instance_ids_to_remove) self.instance_states = self.instance_states[count_to_remove:] + def get_propagated_tags(self): + propagated_tags = {} + for tag in self.tags: + # boto uses 'propagate_at_launch + # boto3 and cloudformation use PropagateAtLaunch + if 'propagate_at_launch' in tag and tag['propagate_at_launch'] == 'true': + propagated_tags[tag['key']] = tag['value'] + if 'PropagateAtLaunch' in tag and tag['PropagateAtLaunch']: + propagated_tags[tag['Key']] = tag['Value'] + return propagated_tags + + def replace_autoscaling_group_instances(self, count_needed, propagated_tags): + propagated_tags[ASG_NAME_TAG] = self.name + reservation = self.autoscaling_backend.ec2_backend.add_instances( + self.launch_config.image_id, + count_needed, + self.launch_config.user_data, + self.launch_config.security_groups, + instance_type=self.launch_config.instance_type, + tags={'instance': propagated_tags} + ) + for instance in reservation.instances: + instance.autoscaling_group = self + self.instance_states.append(InstanceState(instance)) + class AutoScalingBackend(BaseBackend): def __init__(self, ec2_backend, elb_backend): @@ -409,6 +418,40 @@ class AutoScalingBackend(BaseBackend): instance_states.extend(group.instance_states) return instance_states + def attach_instances(self, group_name, instance_ids): + group = self.autoscaling_groups[group_name] + original_size = len(group.instance_states) + + if (original_size + len(instance_ids)) > group.max_size: + raise ResourceContentionError + else: + group.desired_capacity = original_size + len(instance_ids) + new_instances = [InstanceState(self.ec2_backend.get_instance(x)) for x in instance_ids] + for instance in new_instances: + self.ec2_backend.create_tags([instance.instance.id], {ASG_NAME_TAG: group.name}) + group.instance_states.extend(new_instances) + self.update_attached_elbs(group.name) + + def detach_instances(self, group_name, instance_ids, should_decrement): + group = self.autoscaling_groups[group_name] + original_size = len(group.instance_states) + + detached_instances = [x for x in group.instance_states if x.instance.id in instance_ids] + for instance in detached_instances: + self.ec2_backend.delete_tags([instance.instance.id], {ASG_NAME_TAG: group.name}) + + new_instance_state = [x for x in group.instance_states if x.instance.id not in instance_ids] + group.instance_states = new_instance_state + + if should_decrement: + group.desired_capacity = original_size - len(instance_ids) + else: + count_needed = len(instance_ids) + group.replace_autoscaling_group_instances(count_needed, group.get_propagated_tags()) + + self.update_attached_elbs(group_name) + return detached_instances + def set_desired_capacity(self, group_name, desired_capacity): group = self.autoscaling_groups[group_name] group.set_desired_capacity(desired_capacity) diff --git a/moto/autoscaling/responses.py b/moto/autoscaling/responses.py index 2c3bddd79..cba660139 100644 --- a/moto/autoscaling/responses.py +++ b/moto/autoscaling/responses.py @@ -87,6 +87,27 @@ class AutoScalingResponse(BaseResponse): template = self.response_template(CREATE_AUTOSCALING_GROUP_TEMPLATE) return template.render() + def attach_instances(self): + group_name = self._get_param('AutoScalingGroupName') + instance_ids = self._get_multi_param("InstanceIds.member") + self.autoscaling_backend.attach_instances( + group_name, instance_ids) + template = self.response_template(ATTACH_INSTANCES_TEMPLATE) + return template.render() + + def detach_instances(self): + group_name = self._get_param('AutoScalingGroupName') + instance_ids = self._get_multi_param("InstanceIds.member") + should_decrement_string = self._get_param('ShouldDecrementDesiredCapacity') + if should_decrement_string == 'true': + should_decrement = True + else: + should_decrement = False + detached_instances = self.autoscaling_backend.detach_instances( + group_name, instance_ids, should_decrement) + template = self.response_template(DETACH_INSTANCES_TEMPLATE) + return template.render(detached_instances=detached_instances) + def describe_auto_scaling_groups(self): names = self._get_multi_param("AutoScalingGroupNames.member") token = self._get_param("NextToken") @@ -284,6 +305,40 @@ CREATE_AUTOSCALING_GROUP_TEMPLATE = """ + + + +8d798a29-f083-11e1-bdfb-cb223EXAMPLE + +""" + +DETACH_INSTANCES_TEMPLATE = """ + + + {% for instance in detached_instances %} + + 5091cb52-547a-47ce-a236-c9ccbc2cb2c9EXAMPLE + {{ group_name }} + + At 2017-10-15T15:55:21Z instance {{ instance.instance.id }} was detached in response to a user request. + + Detaching EC2 instance: {{ instance.instance.id }} + 2017-10-15T15:55:21Z + 2017-10-15T15:55:21Z + InProgress + InProgress + 50 +
details
+
+ {% endfor %} +
+
+ +8d798a29-f083-11e1-bdfb-cb223EXAMPLE + +
""" + DESCRIBE_AUTOSCALING_GROUPS_TEMPLATE = """ diff --git a/tests/test_autoscaling/test_autoscaling.py b/tests/test_autoscaling/test_autoscaling.py index b919eb71c..d2f890c4d 100644 --- a/tests/test_autoscaling/test_autoscaling.py +++ b/tests/test_autoscaling/test_autoscaling.py @@ -653,3 +653,147 @@ def test_autoscaling_describe_policies_boto3(): response['ScalingPolicies'].should.have.length_of(1) response['ScalingPolicies'][0][ 'PolicyName'].should.equal('test_policy_down') + +@mock_autoscaling +@mock_ec2 +def test_detach_one_instance_decrement(): + client = boto3.client('autoscaling', region_name='us-east-1') + _ = client.create_launch_configuration( + LaunchConfigurationName='test_launch_configuration' + ) + client.create_auto_scaling_group( + AutoScalingGroupName='test_asg', + LaunchConfigurationName='test_launch_configuration', + MinSize=0, + MaxSize=2, + DesiredCapacity=2, + Tags=[ + {'ResourceId': 'test_asg', + 'ResourceType': 'auto-scaling-group', + 'Key': 'propogated-tag-key', + 'Value': 'propogate-tag-value', + 'PropagateAtLaunch': True + }] + ) + response = client.describe_auto_scaling_groups( + AutoScalingGroupNames=['test_asg'] + ) + instance_to_detach = response['AutoScalingGroups'][0]['Instances'][0]['InstanceId'] + instance_to_keep = response['AutoScalingGroups'][0]['Instances'][1]['InstanceId'] + + ec2_client = boto3.client('ec2', region_name='us-east-1') + + response = ec2_client.describe_instances(InstanceIds=[instance_to_detach]) + + response = client.detach_instances( + AutoScalingGroupName='test_asg', + InstanceIds=[instance_to_detach], + ShouldDecrementDesiredCapacity=True + ) + response['ResponseMetadata']['HTTPStatusCode'].should.equal(200) + + response = client.describe_auto_scaling_groups( + AutoScalingGroupNames=['test_asg'] + ) + response['AutoScalingGroups'][0]['Instances'].should.have.length_of(1) + + # test to ensure tag has been removed + response = ec2_client.describe_instances(InstanceIds=[instance_to_detach]) + tags = response['Reservations'][0]['Instances'][0]['Tags'] + tags.should.have.length_of(1) + + # test to ensure tag is present on other instance + response = ec2_client.describe_instances(InstanceIds=[instance_to_keep]) + tags = response['Reservations'][0]['Instances'][0]['Tags'] + tags.should.have.length_of(2) + +@mock_autoscaling +@mock_ec2 +def test_detach_one_instance(): + client = boto3.client('autoscaling', region_name='us-east-1') + _ = client.create_launch_configuration( + LaunchConfigurationName='test_launch_configuration' + ) + client.create_auto_scaling_group( + AutoScalingGroupName='test_asg', + LaunchConfigurationName='test_launch_configuration', + MinSize=0, + MaxSize=2, + DesiredCapacity=2, + Tags=[ + {'ResourceId': 'test_asg', + 'ResourceType': 'auto-scaling-group', + 'Key': 'propogated-tag-key', + 'Value': 'propogate-tag-value', + 'PropagateAtLaunch': True + }] + ) + response = client.describe_auto_scaling_groups( + AutoScalingGroupNames=['test_asg'] + ) + instance_to_detach = response['AutoScalingGroups'][0]['Instances'][0]['InstanceId'] + instance_to_keep = response['AutoScalingGroups'][0]['Instances'][1]['InstanceId'] + + ec2_client = boto3.client('ec2', region_name='us-east-1') + + response = ec2_client.describe_instances(InstanceIds=[instance_to_detach]) + + response = client.detach_instances( + AutoScalingGroupName='test_asg', + InstanceIds=[instance_to_detach], + ShouldDecrementDesiredCapacity=False + ) + response['ResponseMetadata']['HTTPStatusCode'].should.equal(200) + + response = client.describe_auto_scaling_groups( + AutoScalingGroupNames=['test_asg'] + ) + # test to ensure instance was replaced + response['AutoScalingGroups'][0]['Instances'].should.have.length_of(2) + + response = ec2_client.describe_instances(InstanceIds=[instance_to_detach]) + tags = response['Reservations'][0]['Instances'][0]['Tags'] + tags.should.have.length_of(1) + + response = ec2_client.describe_instances(InstanceIds=[instance_to_keep]) + tags = response['Reservations'][0]['Instances'][0]['Tags'] + tags.should.have.length_of(2) + +@mock_autoscaling +@mock_ec2 +def test_attach_one_instance(): + client = boto3.client('autoscaling', region_name='us-east-1') + _ = client.create_launch_configuration( + LaunchConfigurationName='test_launch_configuration' + ) + client.create_auto_scaling_group( + AutoScalingGroupName='test_asg', + LaunchConfigurationName='test_launch_configuration', + MinSize=0, + MaxSize=4, + DesiredCapacity=2, + Tags=[ + {'ResourceId': 'test_asg', + 'ResourceType': 'auto-scaling-group', + 'Key': 'propogated-tag-key', + 'Value': 'propogate-tag-value', + 'PropagateAtLaunch': True + }] + ) + response = client.describe_auto_scaling_groups( + AutoScalingGroupNames=['test_asg'] + ) + + ec2 = boto3.resource('ec2', 'us-east-1') + instances_to_add = [x.id for x in ec2.create_instances(ImageId='', MinCount=1, MaxCount=1)] + + response = client.attach_instances( + AutoScalingGroupName='test_asg', + InstanceIds=instances_to_add + ) + response['ResponseMetadata']['HTTPStatusCode'].should.equal(200) + + response = client.describe_auto_scaling_groups( + AutoScalingGroupNames=['test_asg'] + ) + response['AutoScalingGroups'][0]['Instances'].should.have.length_of(3) From 0af3427c15bf7e9f61cc2503849f676c684251f4 Mon Sep 17 00:00:00 2001 From: John Kerkstra Date: Mon, 16 Oct 2017 21:07:00 -0500 Subject: [PATCH 384/412] Add autoscaling load balancer methods (#1265) * add attach_load_balancers, detach_load_balancers, describe_load_balancers methods * prefer using amzn_request_id decorator to generate unique request ids --- moto/autoscaling/models.py | 18 +++ moto/autoscaling/responses.py | 67 ++++++++++- tests/test_autoscaling/test_autoscaling.py | 127 ++++++++++++++++++++- 3 files changed, 209 insertions(+), 3 deletions(-) diff --git a/moto/autoscaling/models.py b/moto/autoscaling/models.py index 4bdebf955..377890c40 100644 --- a/moto/autoscaling/models.py +++ b/moto/autoscaling/models.py @@ -539,6 +539,24 @@ class AutoScalingBackend(BaseBackend): group.tags = new_tags + def attach_load_balancers(self, group_name, load_balancer_names): + group = self.autoscaling_groups[group_name] + group.load_balancers.extend(load_balancer_names) + self.update_attached_elbs(group_name) + + def describe_load_balancers(self, group_name): + return self.autoscaling_groups[group_name].load_balancers + + def detach_load_balancers(self, group_name, load_balancer_names): + group = self.autoscaling_groups[group_name] + group_instance_ids = set( + state.instance.id for state in group.instance_states) + elbs = self.elb_backend.describe_load_balancers(names=group.load_balancers) + for elb in elbs: + self.elb_backend.deregister_instances( + elb.name, group_instance_ids) + group.load_balancers = [x for x in group.load_balancers if x not in load_balancer_names] + autoscaling_backends = {} for region, ec2_backend in ec2_backends.items(): diff --git a/moto/autoscaling/responses.py b/moto/autoscaling/responses.py index cba660139..832103775 100644 --- a/moto/autoscaling/responses.py +++ b/moto/autoscaling/responses.py @@ -1,6 +1,7 @@ from __future__ import unicode_literals from moto.core.responses import BaseResponse +from moto.core.utils import amz_crc32, amzn_request_id from .models import autoscaling_backends @@ -87,6 +88,8 @@ class AutoScalingResponse(BaseResponse): template = self.response_template(CREATE_AUTOSCALING_GROUP_TEMPLATE) return template.render() + @amz_crc32 + @amzn_request_id def attach_instances(self): group_name = self._get_param('AutoScalingGroupName') instance_ids = self._get_multi_param("InstanceIds.member") @@ -95,6 +98,8 @@ class AutoScalingResponse(BaseResponse): template = self.response_template(ATTACH_INSTANCES_TEMPLATE) return template.render() + @amz_crc32 + @amzn_request_id def detach_instances(self): group_name = self._get_param('AutoScalingGroupName') instance_ids = self._get_multi_param("InstanceIds.member") @@ -207,6 +212,34 @@ class AutoScalingResponse(BaseResponse): template = self.response_template(EXECUTE_POLICY_TEMPLATE) return template.render() + @amz_crc32 + @amzn_request_id + def attach_load_balancers(self): + group_name = self._get_param('AutoScalingGroupName') + load_balancer_names = self._get_multi_param("LoadBalancerNames.member") + self.autoscaling_backend.attach_load_balancers( + group_name, load_balancer_names) + template = self.response_template(ATTACH_LOAD_BALANCERS_TEMPLATE) + return template.render() + + @amz_crc32 + @amzn_request_id + def describe_load_balancers(self): + group_name = self._get_param('AutoScalingGroupName') + load_balancers = self.autoscaling_backend.describe_load_balancers(group_name) + template = self.response_template(DESCRIBE_LOAD_BALANCERS_TEMPLATE) + return template.render(load_balancers=load_balancers) + + @amz_crc32 + @amzn_request_id + def detach_load_balancers(self): + group_name = self._get_param('AutoScalingGroupName') + load_balancer_names = self._get_multi_param("LoadBalancerNames.member") + self.autoscaling_backend.detach_load_balancers( + group_name, load_balancer_names) + template = self.response_template(DETACH_LOAD_BALANCERS_TEMPLATE) + return template.render() + CREATE_LAUNCH_CONFIGURATION_TEMPLATE = """ @@ -309,7 +342,7 @@ ATTACH_INSTANCES_TEMPLATE = """ + + +{{ requestid }} + +""" + +DESCRIBE_LOAD_BALANCERS_TEMPLATE = """ + + + {% for load_balancer in load_balancers %} + + {{ load_balancer }} + Added + + {% endfor %} + + + +{{ requestid }} + +""" + +DETACH_LOAD_BALANCERS_TEMPLATE = """ + + +{{ requestid }} + +""" diff --git a/tests/test_autoscaling/test_autoscaling.py b/tests/test_autoscaling/test_autoscaling.py index d2f890c4d..def4d7077 100644 --- a/tests/test_autoscaling/test_autoscaling.py +++ b/tests/test_autoscaling/test_autoscaling.py @@ -8,7 +8,7 @@ from boto.ec2.autoscale import Tag import boto.ec2.elb import sure # noqa -from moto import mock_autoscaling, mock_ec2_deprecated, mock_elb_deprecated, mock_autoscaling_deprecated, mock_ec2 +from moto import mock_autoscaling, mock_ec2_deprecated, mock_elb_deprecated, mock_elb, mock_autoscaling_deprecated, mock_ec2 from tests.helpers import requires_boto_gte @@ -484,6 +484,131 @@ Boto3 ''' +@mock_autoscaling +@mock_elb +def test_describe_load_balancers(): + INSTANCE_COUNT = 2 + + elb_client = boto3.client('elb', region_name='us-east-1') + elb_client.create_load_balancer( + LoadBalancerName='my-lb', + Listeners=[ + {'Protocol': 'tcp', 'LoadBalancerPort': 80, 'InstancePort': 8080}], + AvailabilityZones=['us-east-1a', 'us-east-1b'] + ) + + client = boto3.client('autoscaling', region_name='us-east-1') + _ = client.create_launch_configuration( + LaunchConfigurationName='test_launch_configuration' + ) + _ = client.create_auto_scaling_group( + AutoScalingGroupName='test_asg', + LaunchConfigurationName='test_launch_configuration', + LoadBalancerNames=['my-lb'], + MinSize=0, + MaxSize=INSTANCE_COUNT, + DesiredCapacity=INSTANCE_COUNT, + Tags=[{ + "ResourceId": 'test_asg', + "Key": 'test_key', + "Value": 'test_value', + "PropagateAtLaunch": True + }] + ) + + response = client.describe_load_balancers(AutoScalingGroupName='test_asg') + list(response['LoadBalancers']).should.have.length_of(1) + response['LoadBalancers'][0]['LoadBalancerName'].should.equal('my-lb') + + +@mock_autoscaling +@mock_elb +def test_attach_load_balancer(): + INSTANCE_COUNT = 2 + + elb_client = boto3.client('elb', region_name='us-east-1') + elb_client.create_load_balancer( + LoadBalancerName='my-lb', + Listeners=[ + {'Protocol': 'tcp', 'LoadBalancerPort': 80, 'InstancePort': 8080}], + AvailabilityZones=['us-east-1a', 'us-east-1b'] + ) + + client = boto3.client('autoscaling', region_name='us-east-1') + _ = client.create_launch_configuration( + LaunchConfigurationName='test_launch_configuration' + ) + _ = client.create_auto_scaling_group( + AutoScalingGroupName='test_asg', + LaunchConfigurationName='test_launch_configuration', + MinSize=0, + MaxSize=INSTANCE_COUNT, + DesiredCapacity=INSTANCE_COUNT, + Tags=[{ + "ResourceId": 'test_asg', + "Key": 'test_key', + "Value": 'test_value', + "PropagateAtLaunch": True + }] + ) + + response = client.attach_load_balancers( + AutoScalingGroupName='test_asg', + LoadBalancerNames=['my-lb']) + response['ResponseMetadata']['HTTPStatusCode'].should.equal(200) + + response = elb_client.describe_load_balancers( + LoadBalancerNames=['my-lb'] + ) + list(response['LoadBalancerDescriptions'][0]['Instances']).should.have.length_of(INSTANCE_COUNT) + + +@mock_autoscaling +@mock_elb +def test_detach_load_balancer(): + INSTANCE_COUNT = 2 + + elb_client = boto3.client('elb', region_name='us-east-1') + elb_client.create_load_balancer( + LoadBalancerName='my-lb', + Listeners=[ + {'Protocol': 'tcp', 'LoadBalancerPort': 80, 'InstancePort': 8080}], + AvailabilityZones=['us-east-1a', 'us-east-1b'] + ) + + client = boto3.client('autoscaling', region_name='us-east-1') + _ = client.create_launch_configuration( + LaunchConfigurationName='test_launch_configuration' + ) + _ = client.create_auto_scaling_group( + AutoScalingGroupName='test_asg', + LaunchConfigurationName='test_launch_configuration', + LoadBalancerNames=['my-lb'], + MinSize=0, + MaxSize=INSTANCE_COUNT, + DesiredCapacity=INSTANCE_COUNT, + Tags=[{ + "ResourceId": 'test_asg', + "Key": 'test_key', + "Value": 'test_value', + "PropagateAtLaunch": True + }] + ) + + response = client.detach_load_balancers( + AutoScalingGroupName='test_asg', + LoadBalancerNames=['my-lb']) + response['ResponseMetadata']['HTTPStatusCode'].should.equal(200) + + response = elb_client.describe_load_balancers( + LoadBalancerNames=['my-lb'] + ) + list(response['LoadBalancerDescriptions'][0]['Instances']).should.have.length_of(0) + + response = client.describe_load_balancers(AutoScalingGroupName='test_asg') + list(response['LoadBalancers']).should.have.length_of(0) + + @mock_autoscaling def test_create_autoscaling_group_boto3(): client = boto3.client('autoscaling', region_name='us-east-1') From b8bb6c2dcfe2c241928523a1d0c1aa186fb867fa Mon Sep 17 00:00:00 2001 From: John Kerkstra Date: Tue, 17 Oct 2017 00:04:47 -0500 Subject: [PATCH 385/412] Fix bug with update_attached_elbs (#1266) * fixed bug where we were using elb_backend.describe_load_balancers incorrectly, returning all available load balancers when we wanted none. * improve skip, clean up tests --- moto/autoscaling/models.py | 4 ++ tests/test_autoscaling/test_autoscaling.py | 49 +++++++++++++++++++--- 2 files changed, 47 insertions(+), 6 deletions(-) diff --git a/moto/autoscaling/models.py b/moto/autoscaling/models.py index 377890c40..a921c74ab 100644 --- a/moto/autoscaling/models.py +++ b/moto/autoscaling/models.py @@ -504,6 +504,10 @@ class AutoScalingBackend(BaseBackend): group_instance_ids = set( state.instance.id for state in group.instance_states) + # skip this if group.load_balancers is empty + # otherwise elb_backend.describe_load_balancers returns all available load balancers + if not group.load_balancers: + return try: elbs = self.elb_backend.describe_load_balancers( names=group.load_balancers) diff --git a/tests/test_autoscaling/test_autoscaling.py b/tests/test_autoscaling/test_autoscaling.py index def4d7077..b0bbc88a8 100644 --- a/tests/test_autoscaling/test_autoscaling.py +++ b/tests/test_autoscaling/test_autoscaling.py @@ -498,10 +498,10 @@ def test_describe_load_balancers(): ) client = boto3.client('autoscaling', region_name='us-east-1') - _ = client.create_launch_configuration( + client.create_launch_configuration( LaunchConfigurationName='test_launch_configuration' ) - _ = client.create_auto_scaling_group( + client.create_auto_scaling_group( AutoScalingGroupName='test_asg', LaunchConfigurationName='test_launch_configuration', LoadBalancerNames=['my-lb'], @@ -520,6 +520,43 @@ def test_describe_load_balancers(): list(response['LoadBalancers']).should.have.length_of(1) response['LoadBalancers'][0]['LoadBalancerName'].should.equal('my-lb') +@mock_autoscaling +@mock_elb +def test_create_elb_and_autoscaling_group_no_relationship(): + INSTANCE_COUNT = 2 + ELB_NAME = 'my-elb' + + elb_client = boto3.client('elb', region_name='us-east-1') + elb_client.create_load_balancer( + LoadBalancerName=ELB_NAME, + Listeners=[ + {'Protocol': 'tcp', 'LoadBalancerPort': 80, 'InstancePort': 8080}], + AvailabilityZones=['us-east-1a', 'us-east-1b'] + ) + + client = boto3.client('autoscaling', region_name='us-east-1') + client.create_launch_configuration( + LaunchConfigurationName='test_launch_configuration' + ) + + client.create_auto_scaling_group( + AutoScalingGroupName='test_asg', + LaunchConfigurationName='test_launch_configuration', + MinSize=0, + MaxSize=INSTANCE_COUNT, + DesiredCapacity=INSTANCE_COUNT, + ) + + # autoscaling group and elb should have no relationship + response = client.describe_load_balancers( + AutoScalingGroupName='test_asg' + ) + list(response['LoadBalancers']).should.have.length_of(0) + response = elb_client.describe_load_balancers( + LoadBalancerNames=[ELB_NAME] + ) + list(response['LoadBalancerDescriptions'][0]['Instances']).should.have.length_of(0) + @mock_autoscaling @mock_elb @@ -535,10 +572,10 @@ def test_attach_load_balancer(): ) client = boto3.client('autoscaling', region_name='us-east-1') - _ = client.create_launch_configuration( + client.create_launch_configuration( LaunchConfigurationName='test_launch_configuration' ) - _ = client.create_auto_scaling_group( + client.create_auto_scaling_group( AutoScalingGroupName='test_asg', LaunchConfigurationName='test_launch_configuration', MinSize=0, @@ -577,10 +614,10 @@ def test_detach_load_balancer(): ) client = boto3.client('autoscaling', region_name='us-east-1') - _ = client.create_launch_configuration( + client.create_launch_configuration( LaunchConfigurationName='test_launch_configuration' ) - _ = client.create_auto_scaling_group( + client.create_auto_scaling_group( AutoScalingGroupName='test_asg', LaunchConfigurationName='test_launch_configuration', LoadBalancerNames=['my-lb'], From 194da53a0edcd38b920318d9a353705232f6f2a4 Mon Sep 17 00:00:00 2001 From: Andrew Miller Date: Tue, 17 Oct 2017 18:42:29 +0100 Subject: [PATCH 386/412] Correct the type of a default attribute in SNS PlatformEndpoint (#1267) The `Enabled` Attribute in the PlatformEndpoint of SNS current returns a boolean, however, the 'enabled' property is expecting a string as `.lower()` is called on the result. This change simply changes the default from `True` to `'True'` so the property works as expected. --- moto/sns/models.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/moto/sns/models.py b/moto/sns/models.py index 5b7277d22..4bab049b4 100644 --- a/moto/sns/models.py +++ b/moto/sns/models.py @@ -146,7 +146,7 @@ class PlatformEndpoint(BaseModel): if 'Token' not in self.attributes: self.attributes['Token'] = self.token if 'Enabled' not in self.attributes: - self.attributes['Enabled'] = True + self.attributes['Enabled'] = 'True' @property def enabled(self): From 317dbbd1a3d679549f384dc6870155b82a85dd5e Mon Sep 17 00:00:00 2001 From: Jack Danger Date: Tue, 17 Oct 2017 18:13:27 -0700 Subject: [PATCH 387/412] requiring minimum botocore (#1268) Boto and Boto3 can be a little old but Moto will throw an error if botocoe doesn't even know about some of the services it supports. As of this commit Polly is new enough some users are running into exceptions. --- setup.py | 1 + 1 file changed, 1 insertion(+) diff --git a/setup.py b/setup.py index 9e423bdd4..e84ac3467 100755 --- a/setup.py +++ b/setup.py @@ -9,6 +9,7 @@ install_requires = [ "Jinja2>=2.8", "boto>=2.36.0", "boto3>=1.2.1", + "botocore>=1.7.12", "cookies", "cryptography>=2.0.0", "requests>=2.5", From b286123425ee59225dcc741b32ae61c02c8c897d Mon Sep 17 00:00:00 2001 From: Jack Danger Date: Tue, 17 Oct 2017 18:33:57 -0700 Subject: [PATCH 388/412] bumping to version 1.1.23 (#1269) --- setup.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/setup.py b/setup.py index e84ac3467..5cf32ade7 100755 --- a/setup.py +++ b/setup.py @@ -38,7 +38,7 @@ else: setup( name='moto', - version='1.1.22', + version='1.1.23', description='A library that allows your python tests to easily' ' mock out the boto library', author='Steve Pulec', From b8a0cfd6f75c266cf5844635007af9f77b469f51 Mon Sep 17 00:00:00 2001 From: John Kerkstra Date: Wed, 18 Oct 2017 16:23:14 -0500 Subject: [PATCH 389/412] filter out already related elbs to prevent duplicates (#1270) --- moto/autoscaling/models.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/moto/autoscaling/models.py b/moto/autoscaling/models.py index a921c74ab..fd8efd54f 100644 --- a/moto/autoscaling/models.py +++ b/moto/autoscaling/models.py @@ -545,7 +545,8 @@ class AutoScalingBackend(BaseBackend): def attach_load_balancers(self, group_name, load_balancer_names): group = self.autoscaling_groups[group_name] - group.load_balancers.extend(load_balancer_names) + group.load_balancers.extend( + [x for x in load_balancer_names if x not in group.load_balancers]) self.update_attached_elbs(group_name) def describe_load_balancers(self, group_name): From b40c5e557e93a1cacb87228aadc6083d5ccf9ec3 Mon Sep 17 00:00:00 2001 From: Terry Cain Date: Thu, 19 Oct 2017 00:22:35 +0100 Subject: [PATCH 390/412] Fixed S3 versioning bug + minor cleanup (#1272) * Fixed S3 versioning bug + minor cleanup Fixes 1271 * flake8 --- moto/s3/models.py | 45 +++++++++++++++------------------------- tests/test_s3/test_s3.py | 23 ++++++++++++++++++++ 2 files changed, 40 insertions(+), 28 deletions(-) diff --git a/moto/s3/models.py b/moto/s3/models.py index ae05292f2..91d3c1e2d 100644 --- a/moto/s3/models.py +++ b/moto/s3/models.py @@ -81,6 +81,9 @@ class FakeKey(BaseModel): def restore(self, days): self._expiry = datetime.datetime.utcnow() + datetime.timedelta(days) + def increment_version(self): + self._version_id += 1 + @property def etag(self): if self._etag is None: @@ -323,19 +326,10 @@ class CorsRule(BaseModel): def __init__(self, allowed_methods, allowed_origins, allowed_headers=None, expose_headers=None, max_age_seconds=None): - # Python 2 and 3 have different string types for handling unicodes. Python 2 wants `basestring`, - # whereas Python 3 is OK with str. This causes issues with the XML parser, which returns - # unicode strings in Python 2. So, need to do this to make it work in both Python 2 and 3: - import sys - if sys.version_info >= (3, 0): - str_type = str - else: - str_type = basestring # noqa - - self.allowed_methods = [allowed_methods] if isinstance(allowed_methods, str_type) else allowed_methods - self.allowed_origins = [allowed_origins] if isinstance(allowed_origins, str_type) else allowed_origins - self.allowed_headers = [allowed_headers] if isinstance(allowed_headers, str_type) else allowed_headers - self.exposed_headers = [expose_headers] if isinstance(expose_headers, str_type) else expose_headers + self.allowed_methods = [allowed_methods] if isinstance(allowed_methods, six.string_types) else allowed_methods + self.allowed_origins = [allowed_origins] if isinstance(allowed_origins, six.string_types) else allowed_origins + self.allowed_headers = [allowed_headers] if isinstance(allowed_headers, six.string_types) else allowed_headers + self.exposed_headers = [expose_headers] if isinstance(expose_headers, six.string_types) else expose_headers self.max_age_seconds = max_age_seconds @@ -389,25 +383,16 @@ class FakeBucket(BaseModel): if len(rules) > 100: raise MalformedXML() - # Python 2 and 3 have different string types for handling unicodes. Python 2 wants `basestring`, - # whereas Python 3 is OK with str. This causes issues with the XML parser, which returns - # unicode strings in Python 2. So, need to do this to make it work in both Python 2 and 3: - import sys - if sys.version_info >= (3, 0): - str_type = str - else: - str_type = basestring # noqa - for rule in rules: - assert isinstance(rule["AllowedMethod"], list) or isinstance(rule["AllowedMethod"], str_type) - assert isinstance(rule["AllowedOrigin"], list) or isinstance(rule["AllowedOrigin"], str_type) + assert isinstance(rule["AllowedMethod"], list) or isinstance(rule["AllowedMethod"], six.string_types) + assert isinstance(rule["AllowedOrigin"], list) or isinstance(rule["AllowedOrigin"], six.string_types) assert isinstance(rule.get("AllowedHeader", []), list) or isinstance(rule.get("AllowedHeader", ""), - str_type) + six.string_types) assert isinstance(rule.get("ExposedHeader", []), list) or isinstance(rule.get("ExposedHeader", ""), - str_type) - assert isinstance(rule.get("MaxAgeSeconds", "0"), str_type) + six.string_types) + assert isinstance(rule.get("MaxAgeSeconds", "0"), six.string_types) - if isinstance(rule["AllowedMethod"], str_type): + if isinstance(rule["AllowedMethod"], six.string_types): methods = [rule["AllowedMethod"]] else: methods = rule["AllowedMethod"] @@ -745,6 +730,10 @@ class S3Backend(BaseBackend): if dest_key_name != src_key_name: key = key.copy(dest_key_name) dest_bucket.keys[dest_key_name] = key + + # By this point, the destination key must exist, or KeyError + if dest_bucket.is_versioned: + dest_bucket.keys[dest_key_name].increment_version() if storage is not None: key.set_storage_class(storage) if acl is not None: diff --git a/tests/test_s3/test_s3.py b/tests/test_s3/test_s3.py index e4cb499b9..87668d8b7 100644 --- a/tests/test_s3/test_s3.py +++ b/tests/test_s3/test_s3.py @@ -1364,6 +1364,29 @@ def test_boto3_head_object_with_versioning(): old_head_object['ContentLength'].should.equal(len(old_content)) +@mock_s3 +def test_boto3_copy_object_with_versioning(): + client = boto3.client('s3', region_name='us-east-1') + + client.create_bucket(Bucket='blah', CreateBucketConfiguration={'LocationConstraint': 'eu-west-1'}) + client.put_bucket_versioning(Bucket='blah', VersioningConfiguration={'Status': 'Enabled'}) + + client.put_object(Bucket='blah', Key='test1', Body=b'test1') + client.put_object(Bucket='blah', Key='test2', Body=b'test2') + + obj1_version = client.get_object(Bucket='blah', Key='test1')['VersionId'] + obj2_version = client.get_object(Bucket='blah', Key='test2')['VersionId'] + + # Versions should be the same + obj1_version.should.equal(obj2_version) + + client.copy_object(CopySource={'Bucket': 'blah', 'Key': 'test1'}, Bucket='blah', Key='test2') + obj2_version_new = client.get_object(Bucket='blah', Key='test2')['VersionId'] + + # Version should be different to previous version + obj2_version_new.should_not.equal(obj2_version) + + @mock_s3 def test_boto3_head_object_if_modified_since(): s3 = boto3.client('s3', region_name='us-east-1') From 760f466a9cbeb958dc1251f152c6c10fcd9d70de Mon Sep 17 00:00:00 2001 From: Argishti Rostamian Date: Wed, 18 Oct 2017 17:30:00 -0700 Subject: [PATCH 391/412] Fix timezone mismatch in acm certificate created date and now date check --- moto/acm/models.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/moto/acm/models.py b/moto/acm/models.py index de26529a4..39be8945d 100644 --- a/moto/acm/models.py +++ b/moto/acm/models.py @@ -170,7 +170,7 @@ class CertBundle(BaseModel): try: self._cert = cryptography.x509.load_pem_x509_certificate(self.cert, default_backend()) - now = datetime.datetime.now() + now = datetime.datetime.utcnow() if self._cert.not_valid_after < now: raise AWSValidationException('The certificate has expired, is not valid.') From 427705c9f0bee1c938445b41525375d902240264 Mon Sep 17 00:00:00 2001 From: Argishti Rostamian Date: Thu, 19 Oct 2017 14:53:30 -0700 Subject: [PATCH 392/412] check is subject_alt_names is not none before checking length --- moto/acm/responses.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/moto/acm/responses.py b/moto/acm/responses.py index 7bf12bbb8..431a8cf60 100644 --- a/moto/acm/responses.py +++ b/moto/acm/responses.py @@ -185,7 +185,7 @@ class AWSCertificateManagerResponse(BaseResponse): idempotency_token = self._get_param('IdempotencyToken') subject_alt_names = self._get_param('SubjectAlternativeNames') - if len(subject_alt_names) > 10: + if subject_alt_names is not None and len(subject_alt_names) > 10: # There is initial AWS limit of 10 msg = 'An ACM limit has been exceeded. Need to request SAN limit to be raised' return json.dumps({'__type': 'LimitExceededException', 'message': msg}), dict(status=400) From 453da4c8b349d9777bac7fbc4667b0a11188806e Mon Sep 17 00:00:00 2001 From: Terry Cain Date: Fri, 20 Oct 2017 00:51:04 +0100 Subject: [PATCH 393/412] Added CreateEnvironment to cloudformation --- moto/batch/models.py | 46 ++++++++++-- moto/cloudformation/parsing.py | 4 + tests/test_batch/test_cloudformation.py | 98 +++++++++++++++++++++++++ 3 files changed, 141 insertions(+), 7 deletions(-) create mode 100644 tests/test_batch/test_cloudformation.py diff --git a/moto/batch/models.py b/moto/batch/models.py index 7f75225f7..0fe3016ca 100644 --- a/moto/batch/models.py +++ b/moto/batch/models.py @@ -28,7 +28,7 @@ from moto.iam.exceptions import IAMNotFoundException _orig_adapter_send = requests.adapters.HTTPAdapter.send logger = logging.getLogger(__name__) DEFAULT_ACCOUNT_ID = 123456789012 -COMPUTE_ENVIRONMENT_NAME_REGEX = re.compile(r'^[A-Za-z0-9_]{1,128}$') +COMPUTE_ENVIRONMENT_NAME_REGEX = re.compile(r'^[A-Za-z0-9][A-Za-z0-9_-]{1,126}[A-Za-z0-9]$') def datetime2int(date): @@ -38,7 +38,7 @@ def datetime2int(date): class ComputeEnvironment(BaseModel): def __init__(self, compute_environment_name, _type, state, compute_resources, service_role, region_name): self.name = compute_environment_name - self.type = _type + self.env_type = _type self.state = state self.compute_resources = compute_resources self.service_role = service_role @@ -55,6 +55,33 @@ class ComputeEnvironment(BaseModel): self.ecs_arn = arn self.ecs_name = name + @property + def physical_resource_id(self): + return self.arn + + @classmethod + def create_from_cloudformation_json(cls, resource_name, cloudformation_json, region_name): + backend = batch_backends[region_name] + properties = cloudformation_json['Properties'] + + # Need to deal with difference case from cloudformation compute_resources, e.g. instanceRole vs InstanceRole + # Hacky fix to normalise keys + new_comp_res = {} + for key, value in properties['ComputeResources'].items(): + new_key = key[0].lower() + key[1:] + new_comp_res[new_key] = value + + env = backend.create_compute_environment( + resource_name, + properties['Type'], + properties.get('State', 'ENABLED'), + new_comp_res, + properties['ServiceRole'] + ) + arn = env[1] + + return backend.get_compute_environment_by_arn(arn) + class JobQueue(BaseModel): def __init__(self, name, priority, state, environments, env_order_json, region_name): @@ -517,10 +544,10 @@ class BatchBackend(BaseBackend): 'ecsClusterArn': environment.ecs_arn, 'serviceRole': environment.service_role, 'state': environment.state, - 'type': environment.type, + 'type': environment.env_type, 'status': 'VALID' } - if environment.type == 'MANAGED': + if environment.env_type == 'MANAGED': json_part['computeResources'] = environment.compute_resources result.append(json_part) @@ -530,7 +557,7 @@ class BatchBackend(BaseBackend): def create_compute_environment(self, compute_environment_name, _type, state, compute_resources, service_role): # Validate if COMPUTE_ENVIRONMENT_NAME_REGEX.match(compute_environment_name) is None: - raise InvalidParameterValueException('Compute environment name does not match ^[A-Za-z0-9_]{1,128}$') + raise InvalidParameterValueException('Compute environment name does not match ^[A-Za-z0-9][A-Za-z0-9_-]{1,126}[A-Za-z0-9]$') if self.get_compute_environment_by_name(compute_environment_name) is not None: raise InvalidParameterValueException('A compute environment already exists with the name {0}'.format(compute_environment_name)) @@ -617,7 +644,9 @@ class BatchBackend(BaseBackend): if len(cr['instanceTypes']) == 0: raise InvalidParameterValueException('At least 1 instance type must be provided') for instance_type in cr['instanceTypes']: - if instance_type not in EC2_INSTANCE_TYPES: + if instance_type == 'optimal': + pass # Optimal should pick from latest of current gen + elif instance_type not in EC2_INSTANCE_TYPES: raise InvalidParameterValueException('Instance type {0} does not exist'.format(instance_type)) for sec_id in cr['securityGroupIds']: @@ -657,6 +686,9 @@ class BatchBackend(BaseBackend): instances = [] for instance_type in instance_types: + if instance_type == 'optimal': + instance_type = 'm4.4xlarge' + instance_vcpus.append( (EC2_INSTANCE_TYPES[instance_type]['vcpus'], instance_type) ) @@ -700,7 +732,7 @@ class BatchBackend(BaseBackend): # Delete ECS cluster self.ecs_backend.delete_cluster(compute_env.ecs_name) - if compute_env.type == 'MANAGED': + if compute_env.env_type == 'MANAGED': # Delete compute envrionment instance_ids = [instance.id for instance in compute_env.instances] self.ec2_backend.terminate_instances(instance_ids) diff --git a/moto/cloudformation/parsing.py b/moto/cloudformation/parsing.py index 923ada058..05a408be1 100644 --- a/moto/cloudformation/parsing.py +++ b/moto/cloudformation/parsing.py @@ -8,6 +8,7 @@ import re from moto.autoscaling import models as autoscaling_models from moto.awslambda import models as lambda_models +from moto.batch import models as batch_models from moto.cloudwatch import models as cloudwatch_models from moto.datapipeline import models as datapipeline_models from moto.dynamodb import models as dynamodb_models @@ -31,6 +32,9 @@ from boto.cloudformation.stack import Output MODEL_MAP = { "AWS::AutoScaling::AutoScalingGroup": autoscaling_models.FakeAutoScalingGroup, "AWS::AutoScaling::LaunchConfiguration": autoscaling_models.FakeLaunchConfiguration, + "AWS::Batch::JobDefinition": batch_models.JobDefinition, + "AWS::Batch::JobQueue": batch_models.JobQueue, + "AWS::Batch::ComputeEnvironment": batch_models.ComputeEnvironment, "AWS::DynamoDB::Table": dynamodb_models.Table, "AWS::Kinesis::Stream": kinesis_models.Stream, "AWS::Lambda::EventSourceMapping": lambda_models.EventSourceMapping, diff --git a/tests/test_batch/test_cloudformation.py b/tests/test_batch/test_cloudformation.py new file mode 100644 index 000000000..b0203af93 --- /dev/null +++ b/tests/test_batch/test_cloudformation.py @@ -0,0 +1,98 @@ +from __future__ import unicode_literals + +import time +import datetime +import boto3 +from botocore.exceptions import ClientError +import sure # noqa +from moto import mock_batch, mock_iam, mock_ec2, mock_ecs, mock_logs, mock_cloudformation +import functools +import nose +import json + +DEFAULT_REGION = 'eu-central-1' + + +def _get_clients(): + return boto3.client('ec2', region_name=DEFAULT_REGION), \ + boto3.client('iam', region_name=DEFAULT_REGION), \ + boto3.client('ecs', region_name=DEFAULT_REGION), \ + boto3.client('logs', region_name=DEFAULT_REGION), \ + boto3.client('batch', region_name=DEFAULT_REGION) + + +def _setup(ec2_client, iam_client): + """ + Do prerequisite setup + :return: VPC ID, Subnet ID, Security group ID, IAM Role ARN + :rtype: tuple + """ + resp = ec2_client.create_vpc(CidrBlock='172.30.0.0/24') + vpc_id = resp['Vpc']['VpcId'] + resp = ec2_client.create_subnet( + AvailabilityZone='eu-central-1a', + CidrBlock='172.30.0.0/25', + VpcId=vpc_id + ) + subnet_id = resp['Subnet']['SubnetId'] + resp = ec2_client.create_security_group( + Description='test_sg_desc', + GroupName='test_sg', + VpcId=vpc_id + ) + sg_id = resp['GroupId'] + + resp = iam_client.create_role( + RoleName='TestRole', + AssumeRolePolicyDocument='some_policy' + ) + iam_arn = resp['Role']['Arn'] + + return vpc_id, subnet_id, sg_id, iam_arn + + +@mock_cloudformation() +@mock_ec2 +@mock_ecs +@mock_iam +@mock_batch +def test_create_env_cf(): + ec2_client, iam_client, ecs_client, logs_client, batch_client = _get_clients() + vpc_id, subnet_id, sg_id, iam_arn = _setup(ec2_client, iam_client) + + create_environment_template = { + 'Resources': { + "ComputeEnvironment": { + "Type": "AWS::Batch::ComputeEnvironment", + "Properties": { + "Type": "MANAGED", + "ComputeResources": { + "Type": "EC2", + "MinvCpus": 0, + "DesiredvCpus": 0, + "MaxvCpus": 64, + "InstanceTypes": [ + "optimal" + ], + "Subnets": [subnet_id], + "SecurityGroupIds": [sg_id], + "InstanceRole": iam_arn + }, + "ServiceRole": iam_arn + } + } + } + } + cf_json = json.dumps(create_environment_template) + + cf_conn = boto3.client('cloudformation', DEFAULT_REGION) + stack_id = cf_conn.create_stack( + StackName='test_stack', + TemplateBody=cf_json, + )['StackId'] + + stack_resources = cf_conn.list_stack_resources(StackName=stack_id) + + stack_resources['StackResourceSummaries'][0]['ResourceStatus'].should.equal('CREATE_COMPLETE') + stack_resources['StackResourceSummaries'][0]['PhysicalResourceId'].startswith('arn:aws:batch:') + stack_resources['StackResourceSummaries'][0]['PhysicalResourceId'].should.contain('test_stack') From 9805a279c7077c74a34b4017e2938ef354c07998 Mon Sep 17 00:00:00 2001 From: Terry Cain Date: Fri, 20 Oct 2017 01:06:30 +0100 Subject: [PATCH 394/412] Added JobQueue to cloudformation --- moto/batch/models.py | 29 +++++++++++ tests/test_batch/test_cloudformation.py | 66 +++++++++++++++++++++++++ 2 files changed, 95 insertions(+) diff --git a/moto/batch/models.py b/moto/batch/models.py index 0fe3016ca..14572dd78 100644 --- a/moto/batch/models.py +++ b/moto/batch/models.py @@ -121,6 +121,35 @@ class JobQueue(BaseModel): return result + @property + def physical_resource_id(self): + return self.arn + + @classmethod + def create_from_cloudformation_json(cls, resource_name, cloudformation_json, region_name): + backend = batch_backends[region_name] + properties = cloudformation_json['Properties'] + + # Need to deal with difference case from cloudformation compute_resources, e.g. instanceRole vs InstanceRole + # Hacky fix to normalise keys, is making me think I want to start spamming cAsEiNsEnSiTiVe dictionaries + compute_envs = [] + for compute_env in properties['ComputeEnvironmentOrder']: + tmp_compute_env_order = {} + for key, value in compute_env.items(): + new_key = key[0].lower() + key[1:] + tmp_compute_env_order[new_key] = value + compute_envs.append(tmp_compute_env_order) + + queue = backend.create_job_queue( + queue_name=resource_name, + priority=properties['Priority'], + state=properties.get('State', 'ENABLED'), + compute_env_order=compute_envs + ) + arn = queue[1] + + return backend.get_job_queue_by_arn(arn) + class JobDefinition(BaseModel): def __init__(self, name, parameters, _type, container_properties, region_name, revision=0, retry_strategy=0): diff --git a/tests/test_batch/test_cloudformation.py b/tests/test_batch/test_cloudformation.py index b0203af93..bc9bd53e4 100644 --- a/tests/test_batch/test_cloudformation.py +++ b/tests/test_batch/test_cloudformation.py @@ -94,5 +94,71 @@ def test_create_env_cf(): stack_resources = cf_conn.list_stack_resources(StackName=stack_id) stack_resources['StackResourceSummaries'][0]['ResourceStatus'].should.equal('CREATE_COMPLETE') + # Spot checks on the ARN stack_resources['StackResourceSummaries'][0]['PhysicalResourceId'].startswith('arn:aws:batch:') stack_resources['StackResourceSummaries'][0]['PhysicalResourceId'].should.contain('test_stack') + + +@mock_cloudformation() +@mock_ec2 +@mock_ecs +@mock_iam +@mock_batch +def test_create_job_queue_cf(): + ec2_client, iam_client, ecs_client, logs_client, batch_client = _get_clients() + vpc_id, subnet_id, sg_id, iam_arn = _setup(ec2_client, iam_client) + + create_environment_template = { + 'Resources': { + "ComputeEnvironment": { + "Type": "AWS::Batch::ComputeEnvironment", + "Properties": { + "Type": "MANAGED", + "ComputeResources": { + "Type": "EC2", + "MinvCpus": 0, + "DesiredvCpus": 0, + "MaxvCpus": 64, + "InstanceTypes": [ + "optimal" + ], + "Subnets": [subnet_id], + "SecurityGroupIds": [sg_id], + "InstanceRole": iam_arn + }, + "ServiceRole": iam_arn + } + }, + + "JobQueue": { + "Type": "AWS::Batch::JobQueue", + "Properties": { + "Priority": 1, + "ComputeEnvironmentOrder": [ + { + "Order": 1, + "ComputeEnvironment": {"Ref": "ComputeEnvironment"} + } + ] + } + }, + } + } + cf_json = json.dumps(create_environment_template) + + cf_conn = boto3.client('cloudformation', DEFAULT_REGION) + stack_id = cf_conn.create_stack( + StackName='test_stack', + TemplateBody=cf_json, + )['StackId'] + + stack_resources = cf_conn.list_stack_resources(StackName=stack_id) + len(stack_resources['StackResourceSummaries']).should.equal(2) + + job_queue_resource = list(filter(lambda item: item['ResourceType'] == 'AWS::Batch::JobQueue', stack_resources['StackResourceSummaries']))[0] + + job_queue_resource['ResourceStatus'].should.equal('CREATE_COMPLETE') + # Spot checks on the ARN + job_queue_resource['PhysicalResourceId'].startswith('arn:aws:batch:') + job_queue_resource['PhysicalResourceId'].should.contain('test_stack') + job_queue_resource['PhysicalResourceId'].should.contain('job-queue/') From f02f4646ea8f0c6d7af76258a97afe4535518235 Mon Sep 17 00:00:00 2001 From: Argishti Rostamian Date: Thu, 19 Oct 2017 17:47:21 -0700 Subject: [PATCH 395/412] add test --- tests/test_acm/test_acm.py | 13 +++++++++++++ 1 file changed, 13 insertions(+) diff --git a/tests/test_acm/test_acm.py b/tests/test_acm/test_acm.py index 96e362d1e..db1969645 100644 --- a/tests/test_acm/test_acm.py +++ b/tests/test_acm/test_acm.py @@ -287,6 +287,19 @@ def test_request_certificate(): ) resp.should.contain('CertificateArn') +@mock_acm +def test_request_certificate_no_san(): + client = boto3.client('acm', region_name='eu-central-1') + + resp = client.request_certificate( + DomainName='google.com' + ) + resp.should.contain('CertificateArn') + + resp2 = client.describe_certificate( + CertificateArn=resp['CertificateArn'] + ) + resp2.should.contain('Certificate') # # Also tests the SAN code # # requires Pull: https://github.com/spulec/freezegun/pull/210 From 3fc9cb9035d62f336f47093724511026d80fb8bf Mon Sep 17 00:00:00 2001 From: Jack Danger Date: Thu, 19 Oct 2017 22:39:31 -0700 Subject: [PATCH 396/412] ship dashboard template for moto server --- MANIFEST.in | 1 + 1 file changed, 1 insertion(+) diff --git a/MANIFEST.in b/MANIFEST.in index 7e219f463..cd1f1e886 100644 --- a/MANIFEST.in +++ b/MANIFEST.in @@ -1,4 +1,5 @@ include README.md LICENSE AUTHORS.md include requirements.txt requirements-dev.txt tox.ini include moto/ec2/resources/instance_types.json +recursive-include moto/templates * recursive-include tests * From 60cac09e93b1f907e80beb4512ede0e98693cb87 Mon Sep 17 00:00:00 2001 From: Toshiya Kawasaki Date: Fri, 20 Oct 2017 18:33:48 +0900 Subject: [PATCH 397/412] fix scaffold template --- scripts/template/lib/models.py.j2 | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/scripts/template/lib/models.py.j2 b/scripts/template/lib/models.py.j2 index 2a0097c1d..623321884 100644 --- a/scripts/template/lib/models.py.j2 +++ b/scripts/template/lib/models.py.j2 @@ -17,4 +17,4 @@ class {{ service_class }}Backend(BaseBackend): available_regions = boto3.session.Session().get_available_regions("{{ service }}") -{{ service }}_backends = {region: {{ service_class }}Backend for region in available_regions} +{{ service }}_backends = {region: {{ service_class }}Backend(region) for region in available_regions} From 456e5cd25c2bb8c7784da40d3f681d8aa0874007 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jorge=20Marcial=20=C3=81lvarez=20Gago?= Date: Fri, 20 Oct 2017 12:16:53 +0200 Subject: [PATCH 398/412] Fix Error in get-logs request LogEvent instance is not JSON serializable --- moto/logs/responses.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/moto/logs/responses.py b/moto/logs/responses.py index 4cb9caa6a..1828aa1ed 100644 --- a/moto/logs/responses.py +++ b/moto/logs/responses.py @@ -89,7 +89,7 @@ class LogsResponse(BaseResponse): self.logs_backend.get_log_events(log_group_name, log_stream_name, start_time, end_time, limit, next_token, start_from_head) return json.dumps({ - "events": events, + "events": [ob.__dict__ for ob in events], "nextBackwardToken": next_backward_token, "nextForwardToken": next_foward_token }) From bca8e1129083df2e7d17acadc2b4f9c6c280e6a3 Mon Sep 17 00:00:00 2001 From: Terry Cain Date: Fri, 20 Oct 2017 13:19:55 +0100 Subject: [PATCH 399/412] Fixes #1276 SNS Subject validation --- moto/sns/models.py | 5 ++++- moto/sns/responses.py | 9 +++++++- tests/test_sns/test_publishing_boto3.py | 30 +++++++++++++++++++++++++ 3 files changed, 42 insertions(+), 2 deletions(-) diff --git a/moto/sns/models.py b/moto/sns/models.py index 4bab049b4..42b82adbc 100644 --- a/moto/sns/models.py +++ b/moto/sns/models.py @@ -256,7 +256,10 @@ class SNSBackend(BaseBackend): else: return self._get_values_nexttoken(self.subscriptions, next_token) - def publish(self, arn, message): + def publish(self, arn, message, subject=None): + if subject is not None and len(subject) > 100: + raise ValueError('Subject must be less than 100 characters') + try: topic = self.get_topic(arn) message_id = topic.publish(message) diff --git a/moto/sns/responses.py b/moto/sns/responses.py index 85764aa58..3b4aade80 100644 --- a/moto/sns/responses.py +++ b/moto/sns/responses.py @@ -239,6 +239,8 @@ class SNSResponse(BaseResponse): target_arn = self._get_param('TargetArn') topic_arn = self._get_param('TopicArn') phone_number = self._get_param('PhoneNumber') + subject = self._get_param('Subject') + if phone_number is not None: # Check phone is correct syntax (e164) if not is_e164(phone_number): @@ -261,7 +263,12 @@ class SNSResponse(BaseResponse): arn = topic_arn message = self._get_param('Message') - message_id = self.backend.publish(arn, message) + + try: + message_id = self.backend.publish(arn, message, subject=subject) + except ValueError as err: + error_response = self._error('InvalidParameter', str(err)) + return error_response, dict(status=400) if self.request_json: return json.dumps({ diff --git a/tests/test_sns/test_publishing_boto3.py b/tests/test_sns/test_publishing_boto3.py index 6228f212f..15726ba38 100644 --- a/tests/test_sns/test_publishing_boto3.py +++ b/tests/test_sns/test_publishing_boto3.py @@ -177,3 +177,33 @@ def test_publish_to_http(): response = conn.publish( TopicArn=topic_arn, Message="my message", Subject="my subject") message_id = response['MessageId'] + + +@mock_sqs +@mock_sns +def test_publish_subject(): + conn = boto3.client('sns', region_name='us-east-1') + conn.create_topic(Name="some-topic") + response = conn.list_topics() + topic_arn = response["Topics"][0]['TopicArn'] + + sqs_conn = boto3.resource('sqs', region_name='us-east-1') + sqs_conn.create_queue(QueueName="test-queue") + + conn.subscribe(TopicArn=topic_arn, + Protocol="sqs", + Endpoint="arn:aws:sqs:us-east-1:123456789012:test-queue") + message = 'my message' + subject1 = 'test subject' + subject2 = 'test subject' * 20 + with freeze_time("2015-01-01 12:00:00"): + published_message = conn.publish(TopicArn=topic_arn, Message=message, Subject=subject1) + + # Just that it doesnt error is a pass + try: + with freeze_time("2015-01-01 12:00:00"): + published_message = conn.publish(TopicArn=topic_arn, Message=message, Subject=subject2) + except ClientError as err: + err.response['Error']['Code'].should.equal('InvalidParameter') + else: + raise RuntimeError('Should of raised an InvalidParameter exception') From dce81cf875158cfe6a718d40777367182935d231 Mon Sep 17 00:00:00 2001 From: Terry Cain Date: Fri, 20 Oct 2017 14:52:24 +0100 Subject: [PATCH 400/412] Fixed typos ;-) Thanks @hwine --- moto/sns/models.py | 2 +- tests/test_sns/test_publishing_boto3.py | 6 +++--- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/moto/sns/models.py b/moto/sns/models.py index 42b82adbc..856255be5 100644 --- a/moto/sns/models.py +++ b/moto/sns/models.py @@ -257,7 +257,7 @@ class SNSBackend(BaseBackend): return self._get_values_nexttoken(self.subscriptions, next_token) def publish(self, arn, message, subject=None): - if subject is not None and len(subject) > 100: + if subject is not None and len(subject) >= 100: raise ValueError('Subject must be less than 100 characters') try: diff --git a/tests/test_sns/test_publishing_boto3.py b/tests/test_sns/test_publishing_boto3.py index 15726ba38..1540ceb84 100644 --- a/tests/test_sns/test_publishing_boto3.py +++ b/tests/test_sns/test_publishing_boto3.py @@ -197,13 +197,13 @@ def test_publish_subject(): subject1 = 'test subject' subject2 = 'test subject' * 20 with freeze_time("2015-01-01 12:00:00"): - published_message = conn.publish(TopicArn=topic_arn, Message=message, Subject=subject1) + conn.publish(TopicArn=topic_arn, Message=message, Subject=subject1) # Just that it doesnt error is a pass try: with freeze_time("2015-01-01 12:00:00"): - published_message = conn.publish(TopicArn=topic_arn, Message=message, Subject=subject2) + conn.publish(TopicArn=topic_arn, Message=message, Subject=subject2) except ClientError as err: err.response['Error']['Code'].should.equal('InvalidParameter') else: - raise RuntimeError('Should of raised an InvalidParameter exception') + raise RuntimeError('Should have raised an InvalidParameter exception') From 629503398c097c17f1ba83d3c842d86638eaffda Mon Sep 17 00:00:00 2001 From: Terry Cain Date: Fri, 20 Oct 2017 19:10:31 +0100 Subject: [PATCH 401/412] Added JobDefinition to cloudformation --- moto/batch/models.py | 40 +++++++----- moto/batch/utils.py | 9 +++ tests/test_batch/test_cloudformation.py | 83 +++++++++++++++++++++++++ 3 files changed, 116 insertions(+), 16 deletions(-) diff --git a/moto/batch/models.py b/moto/batch/models.py index 14572dd78..8b3b81ccb 100644 --- a/moto/batch/models.py +++ b/moto/batch/models.py @@ -19,7 +19,7 @@ from moto.ecs import ecs_backends from moto.logs import logs_backends from .exceptions import InvalidParameterValueException, InternalFailure, ClientException -from .utils import make_arn_for_compute_env, make_arn_for_job_queue, make_arn_for_task_def +from .utils import make_arn_for_compute_env, make_arn_for_job_queue, make_arn_for_task_def, lowercase_first_key from moto.ec2.exceptions import InvalidSubnetIdError from moto.ec2.models import INSTANCE_TYPES as EC2_INSTANCE_TYPES from moto.iam.exceptions import IAMNotFoundException @@ -64,18 +64,11 @@ class ComputeEnvironment(BaseModel): backend = batch_backends[region_name] properties = cloudformation_json['Properties'] - # Need to deal with difference case from cloudformation compute_resources, e.g. instanceRole vs InstanceRole - # Hacky fix to normalise keys - new_comp_res = {} - for key, value in properties['ComputeResources'].items(): - new_key = key[0].lower() + key[1:] - new_comp_res[new_key] = value - env = backend.create_compute_environment( resource_name, properties['Type'], properties.get('State', 'ENABLED'), - new_comp_res, + lowercase_first_key(properties['ComputeResources']), properties['ServiceRole'] ) arn = env[1] @@ -132,13 +125,7 @@ class JobQueue(BaseModel): # Need to deal with difference case from cloudformation compute_resources, e.g. instanceRole vs InstanceRole # Hacky fix to normalise keys, is making me think I want to start spamming cAsEiNsEnSiTiVe dictionaries - compute_envs = [] - for compute_env in properties['ComputeEnvironmentOrder']: - tmp_compute_env_order = {} - for key, value in compute_env.items(): - new_key = key[0].lower() + key[1:] - tmp_compute_env_order[new_key] = value - compute_envs.append(tmp_compute_env_order) + compute_envs = [lowercase_first_key(dict_item) for dict_item in properties['ComputeEnvironmentOrder']] queue = backend.create_job_queue( queue_name=resource_name, @@ -228,6 +215,27 @@ class JobDefinition(BaseModel): return result + @property + def physical_resource_id(self): + return self.arn + + @classmethod + def create_from_cloudformation_json(cls, resource_name, cloudformation_json, region_name): + backend = batch_backends[region_name] + properties = cloudformation_json['Properties'] + + res = backend.register_job_definition( + def_name=resource_name, + parameters=lowercase_first_key(properties.get('Parameters', {})), + _type='container', + retry_strategy=lowercase_first_key(properties['RetryStrategy']), + container_properties=lowercase_first_key(properties['ContainerProperties']) + ) + + arn = res[1] + + return backend.get_job_definition_by_arn(arn) + class Job(threading.Thread, BaseModel): def __init__(self, name, job_def, job_queue, log_backend): diff --git a/moto/batch/utils.py b/moto/batch/utils.py index 6cdd381f7..829a55f12 100644 --- a/moto/batch/utils.py +++ b/moto/batch/utils.py @@ -11,3 +11,12 @@ def make_arn_for_job_queue(account_id, name, region_name): def make_arn_for_task_def(account_id, name, revision, region_name): return "arn:aws:batch:{0}:{1}:job-definition/{2}:{3}".format(region_name, account_id, name, revision) + + +def lowercase_first_key(some_dict): + new_dict = {} + for key, value in some_dict.items(): + new_key = key[0].lower() + key[1:] + new_dict[new_key] = value + + return new_dict diff --git a/tests/test_batch/test_cloudformation.py b/tests/test_batch/test_cloudformation.py index bc9bd53e4..1e37aa3a6 100644 --- a/tests/test_batch/test_cloudformation.py +++ b/tests/test_batch/test_cloudformation.py @@ -162,3 +162,86 @@ def test_create_job_queue_cf(): job_queue_resource['PhysicalResourceId'].startswith('arn:aws:batch:') job_queue_resource['PhysicalResourceId'].should.contain('test_stack') job_queue_resource['PhysicalResourceId'].should.contain('job-queue/') + + +@mock_cloudformation() +@mock_ec2 +@mock_ecs +@mock_iam +@mock_batch +def test_create_job_def_cf(): + ec2_client, iam_client, ecs_client, logs_client, batch_client = _get_clients() + vpc_id, subnet_id, sg_id, iam_arn = _setup(ec2_client, iam_client) + + create_environment_template = { + 'Resources': { + "ComputeEnvironment": { + "Type": "AWS::Batch::ComputeEnvironment", + "Properties": { + "Type": "MANAGED", + "ComputeResources": { + "Type": "EC2", + "MinvCpus": 0, + "DesiredvCpus": 0, + "MaxvCpus": 64, + "InstanceTypes": [ + "optimal" + ], + "Subnets": [subnet_id], + "SecurityGroupIds": [sg_id], + "InstanceRole": iam_arn + }, + "ServiceRole": iam_arn + } + }, + + "JobQueue": { + "Type": "AWS::Batch::JobQueue", + "Properties": { + "Priority": 1, + "ComputeEnvironmentOrder": [ + { + "Order": 1, + "ComputeEnvironment": {"Ref": "ComputeEnvironment"} + } + ] + } + }, + + "JobDefinition": { + "Type": "AWS::Batch::JobDefinition", + "Properties": { + "Type": "container", + "ContainerProperties": { + "Image": { + "Fn::Join": ["", ["137112412989.dkr.ecr.", {"Ref": "AWS::Region"}, ".amazonaws.com/amazonlinux:latest"]] + }, + "Vcpus": 2, + "Memory": 2000, + "Command": ["echo", "Hello world"] + }, + "RetryStrategy": { + "Attempts": 1 + } + } + }, + } + } + cf_json = json.dumps(create_environment_template) + + cf_conn = boto3.client('cloudformation', DEFAULT_REGION) + stack_id = cf_conn.create_stack( + StackName='test_stack', + TemplateBody=cf_json, + )['StackId'] + + stack_resources = cf_conn.list_stack_resources(StackName=stack_id) + len(stack_resources['StackResourceSummaries']).should.equal(3) + + job_def_resource = list(filter(lambda item: item['ResourceType'] == 'AWS::Batch::JobDefinition', stack_resources['StackResourceSummaries']))[0] + + job_def_resource['ResourceStatus'].should.equal('CREATE_COMPLETE') + # Spot checks on the ARN + job_def_resource['PhysicalResourceId'].startswith('arn:aws:batch:') + job_def_resource['PhysicalResourceId'].should.contain('test_stack-JobDef') + job_def_resource['PhysicalResourceId'].should.contain('job-definition/') From 21c3775cb17dd947c3a85cbd27aa9e4412b369cd Mon Sep 17 00:00:00 2001 From: Paul Carleton Date: Fri, 20 Oct 2017 10:50:00 -0700 Subject: [PATCH 402/412] Add health status to autoscaling instances --- moto/autoscaling/models.py | 3 ++- moto/autoscaling/responses.py | 4 ++-- tests/test_autoscaling/test_autoscaling.py | 23 ++++++++++++++++++++++ 3 files changed, 27 insertions(+), 3 deletions(-) diff --git a/moto/autoscaling/models.py b/moto/autoscaling/models.py index fd8efd54f..84601343d 100644 --- a/moto/autoscaling/models.py +++ b/moto/autoscaling/models.py @@ -16,9 +16,10 @@ ASG_NAME_TAG = "aws:autoscaling:groupName" class InstanceState(object): - def __init__(self, instance, lifecycle_state="InService"): + def __init__(self, instance, lifecycle_state="InService", health_status="Healthy"): self.instance = instance self.lifecycle_state = lifecycle_state + self.health_status = health_status class FakeScalingPolicy(BaseModel): diff --git a/moto/autoscaling/responses.py b/moto/autoscaling/responses.py index 832103775..16148ab16 100644 --- a/moto/autoscaling/responses.py +++ b/moto/autoscaling/responses.py @@ -397,7 +397,7 @@ DESCRIBE_AUTOSCALING_GROUPS_TEMPLATE = """ {% for instance_state in group.instance_states %} - HEALTHY + {{ instance_state.health_status }} us-east-1e {{ instance_state.instance.id }} {{ group.launch_config_name }} @@ -472,7 +472,7 @@ DESCRIBE_AUTOSCALING_INSTANCES_TEMPLATE = """ {% for instance_state in instance_states %} - HEALTHY + {{ instance_state.health_status }} {{ instance_state.instance.autoscaling_group.name }} us-east-1e {{ instance_state.instance.id }} diff --git a/tests/test_autoscaling/test_autoscaling.py b/tests/test_autoscaling/test_autoscaling.py index b0bbc88a8..c31444af3 100644 --- a/tests/test_autoscaling/test_autoscaling.py +++ b/tests/test_autoscaling/test_autoscaling.py @@ -311,6 +311,7 @@ def test_autoscaling_group_describe_instances(): instances = list(conn.get_all_autoscaling_instances()) instances.should.have.length_of(2) instances[0].launch_config_name.should.equal('tester') + instances[0].health_status.should.equal('Healthy') autoscale_instance_ids = [instance.instance_id for instance in instances] ec2_conn = boto.connect_ec2() @@ -959,3 +960,25 @@ def test_attach_one_instance(): AutoScalingGroupNames=['test_asg'] ) response['AutoScalingGroups'][0]['Instances'].should.have.length_of(3) + +@mock_autoscaling +@mock_ec2 +def test_describe_instance_health(): + client = boto3.client('autoscaling', region_name='us-east-1') + _ = client.create_launch_configuration( + LaunchConfigurationName='test_launch_configuration' + ) + client.create_auto_scaling_group( + AutoScalingGroupName='test_asg', + LaunchConfigurationName='test_launch_configuration', + MinSize=2, + MaxSize=4, + DesiredCapacity=2, + ) + + response = client.describe_auto_scaling_groups( + AutoScalingGroupNames=['test_asg'] + ) + + instance1 = response['AutoScalingGroups'][0]['Instances'][0] + instance1['HealthStatus'].should.equal('Healthy') From 685e9163a836904787fdb04ffaabae8050d611d3 Mon Sep 17 00:00:00 2001 From: Paul Carleton Date: Fri, 20 Oct 2017 11:25:09 -0700 Subject: [PATCH 403/412] Add set-instance-health support --- moto/autoscaling/models.py | 6 +++++ moto/autoscaling/responses.py | 19 +++++++++++++ tests/test_autoscaling/test_autoscaling.py | 31 ++++++++++++++++++++++ 3 files changed, 56 insertions(+) diff --git a/moto/autoscaling/models.py b/moto/autoscaling/models.py index 84601343d..71343a8e0 100644 --- a/moto/autoscaling/models.py +++ b/moto/autoscaling/models.py @@ -433,6 +433,12 @@ class AutoScalingBackend(BaseBackend): group.instance_states.extend(new_instances) self.update_attached_elbs(group.name) + def set_instance_health(self, instance_id, health_status, should_respect_grace_period): + instance = self.ec2_backend.get_instance(instance_id) + instance_state = next(instance_state for group in self.autoscaling_groups.values() + for instance_state in group.instance_states if instance_state.instance.id == instance.id) + instance_state.health_status = health_status + def detach_instances(self, group_name, instance_ids, should_decrement): group = self.autoscaling_groups[group_name] original_size = len(group.instance_states) diff --git a/moto/autoscaling/responses.py b/moto/autoscaling/responses.py index 16148ab16..05146feba 100644 --- a/moto/autoscaling/responses.py +++ b/moto/autoscaling/responses.py @@ -98,6 +98,18 @@ class AutoScalingResponse(BaseResponse): template = self.response_template(ATTACH_INSTANCES_TEMPLATE) return template.render() + @amz_crc32 + @amzn_request_id + def set_instance_health(self): + instance_id = self._get_param('InstanceId') + health_status = self._get_param("HealthStatus") + if health_status not in ['Healthy', 'Unhealthy']: + raise ValueError('Valid instance health states are: [Healthy, Unhealthy]') + should_respect_grace_period = self._get_param("ShouldRespectGracePeriod") + self.autoscaling_backend.set_instance_health(instance_id, health_status, should_respect_grace_period) + template = self.response_template(SET_INSTANCE_HEALTH_TEMPLATE) + return template.render() + @amz_crc32 @amzn_request_id def detach_instances(self): @@ -568,3 +580,10 @@ DETACH_LOAD_BALANCERS_TEMPLATE = """ + + +{{ requestid }} + +""" diff --git a/tests/test_autoscaling/test_autoscaling.py b/tests/test_autoscaling/test_autoscaling.py index c31444af3..9f5d4b952 100644 --- a/tests/test_autoscaling/test_autoscaling.py +++ b/tests/test_autoscaling/test_autoscaling.py @@ -982,3 +982,34 @@ def test_describe_instance_health(): instance1 = response['AutoScalingGroups'][0]['Instances'][0] instance1['HealthStatus'].should.equal('Healthy') + +@mock_autoscaling +@mock_ec2 +def test_set_instance_health(): + client = boto3.client('autoscaling', region_name='us-east-1') + _ = client.create_launch_configuration( + LaunchConfigurationName='test_launch_configuration' + ) + client.create_auto_scaling_group( + AutoScalingGroupName='test_asg', + LaunchConfigurationName='test_launch_configuration', + MinSize=2, + MaxSize=4, + DesiredCapacity=2, + ) + + response = client.describe_auto_scaling_groups( + AutoScalingGroupNames=['test_asg'] + ) + + instance1 = response['AutoScalingGroups'][0]['Instances'][0] + instance1['HealthStatus'].should.equal('Healthy') + + client.set_instance_health(InstanceId=instance1['InstanceId'], HealthStatus='Unhealthy') + + response = client.describe_auto_scaling_groups( + AutoScalingGroupNames=['test_asg'] + ) + + instance1 = response['AutoScalingGroups'][0]['Instances'][0] + instance1['HealthStatus'].should.equal('Unhealthy') From 993b0920832345a414888e5368e479fd586f27c0 Mon Sep 17 00:00:00 2001 From: captainkerk Date: Tue, 17 Oct 2017 02:42:17 +0000 Subject: [PATCH 404/412] add target_group support to autoscaling service --- moto/autoscaling/models.py | 57 ++++++++- moto/autoscaling/responses.py | 68 ++++++++++- tests/test_autoscaling/test_autoscaling.py | 5 + tests/test_autoscaling/test_elbv2.py | 131 +++++++++++++++++++++ 4 files changed, 253 insertions(+), 8 deletions(-) create mode 100644 tests/test_autoscaling/test_elbv2.py diff --git a/moto/autoscaling/models.py b/moto/autoscaling/models.py index fd8efd54f..d082daa25 100644 --- a/moto/autoscaling/models.py +++ b/moto/autoscaling/models.py @@ -4,6 +4,7 @@ from moto.compat import OrderedDict from moto.core import BaseBackend, BaseModel from moto.ec2 import ec2_backends from moto.elb import elb_backends +from moto.elbv2 import elbv2_backends from moto.elb.exceptions import LoadBalancerNotFoundError from .exceptions import ( ResourceContentionError, @@ -149,7 +150,7 @@ class FakeAutoScalingGroup(BaseModel): def __init__(self, name, availability_zones, desired_capacity, max_size, min_size, launch_config_name, vpc_zone_identifier, default_cooldown, health_check_period, health_check_type, - load_balancers, placement_group, termination_policies, + load_balancers, target_group_arns, placement_group, termination_policies, autoscaling_backend, tags): self.autoscaling_backend = autoscaling_backend self.name = name @@ -166,6 +167,7 @@ class FakeAutoScalingGroup(BaseModel): self.health_check_period = health_check_period self.health_check_type = health_check_type if health_check_type else "EC2" self.load_balancers = load_balancers + self.target_group_arns = target_group_arns self.placement_group = placement_group self.termination_policies = termination_policies @@ -179,6 +181,7 @@ class FakeAutoScalingGroup(BaseModel): launch_config_name = properties.get("LaunchConfigurationName") load_balancer_names = properties.get("LoadBalancerNames", []) + target_group_arns = properties.get("TargetGroupARNs", []) backend = autoscaling_backends[region_name] group = backend.create_autoscaling_group( @@ -194,6 +197,7 @@ class FakeAutoScalingGroup(BaseModel): health_check_period=properties.get("HealthCheckGracePeriod"), health_check_type=properties.get("HealthCheckType"), load_balancers=load_balancer_names, + target_group_arns=target_group_arns, placement_group=None, termination_policies=properties.get("TerminationPolicies", []), tags=properties.get("Tags", []), @@ -299,20 +303,26 @@ class FakeAutoScalingGroup(BaseModel): instance.autoscaling_group = self self.instance_states.append(InstanceState(instance)) + def append_target_groups(self, target_group_arns): + append = [x for x in target_group_arns if x not in self.target_group_arns] + self.target_group_arns.extend(append) + class AutoScalingBackend(BaseBackend): - def __init__(self, ec2_backend, elb_backend): + def __init__(self, ec2_backend, elb_backend, elbv2_backend): self.autoscaling_groups = OrderedDict() self.launch_configurations = OrderedDict() self.policies = {} self.ec2_backend = ec2_backend self.elb_backend = elb_backend + self.elbv2_backend = elbv2_backend def reset(self): ec2_backend = self.ec2_backend elb_backend = self.elb_backend + elbv2_backend = self.elbv2_backend self.__dict__ = {} - self.__init__(ec2_backend, elb_backend) + self.__init__(ec2_backend, elb_backend, elbv2_backend) def create_launch_configuration(self, name, image_id, key_name, kernel_id, ramdisk_id, security_groups, user_data, instance_type, @@ -352,7 +362,8 @@ class AutoScalingBackend(BaseBackend): launch_config_name, vpc_zone_identifier, default_cooldown, health_check_period, health_check_type, load_balancers, - placement_group, termination_policies, tags): + target_group_arns, placement_group, + termination_policies, tags): def make_int(value): return int(value) if value is not None else value @@ -378,6 +389,7 @@ class AutoScalingBackend(BaseBackend): health_check_period=health_check_period, health_check_type=health_check_type, load_balancers=load_balancers, + target_group_arns=target_group_arns, placement_group=placement_group, termination_policies=termination_policies, autoscaling_backend=self, @@ -386,6 +398,7 @@ class AutoScalingBackend(BaseBackend): self.autoscaling_groups[name] = group self.update_attached_elbs(group.name) + self.update_attached_target_groups(group.name) return group def update_autoscaling_group(self, name, availability_zones, @@ -522,8 +535,25 @@ class AutoScalingBackend(BaseBackend): self.elb_backend.deregister_instances( elb.name, elb_instace_ids - group_instance_ids) - def create_or_update_tags(self, tags): + def update_attached_target_groups(self, group_name): + group = self.autoscaling_groups[group_name] + group_instance_ids = set( + state.instance.id for state in group.instance_states) + # no action necessary if target_group_arns is empty + if not group.target_group_arns: + return + + target_groups = self.elbv2_backend.describe_target_groups( + target_group_arns=group.target_group_arns, + load_balancer_arn=None, + names=None) + + for target_group in target_groups: + asg_targets = [{'id': x, 'port': target_group.port} for x in group_instance_ids] + self.elbv2_backend.register_targets(target_group.arn, (asg_targets)) + + def create_or_update_tags(self, tags): for tag in tags: group_name = tag["resource_id"] group = self.autoscaling_groups[group_name] @@ -562,8 +592,23 @@ class AutoScalingBackend(BaseBackend): elb.name, group_instance_ids) group.load_balancers = [x for x in group.load_balancers if x not in load_balancer_names] + def attach_load_balancer_target_groups(self, group_name, target_group_arns): + group = self.autoscaling_groups[group_name] + group.append_target_groups(target_group_arns) + self.update_attached_target_groups(group_name) + + def describe_load_balancer_target_groups(self, group_name): + return self.autoscaling_groups[group_name].target_group_arns + + def detach_load_balancer_target_groups(self, group_name, target_group_arns): + group = self.autoscaling_groups[group_name] + group.target_group_arns = [x for x in group.target_group_arns if x not in target_group_arns] + for target_group in target_group_arns: + asg_targets = [{'id': x.instance.id} for x in group.instance_states] + self.elbv2_backend.deregister_targets(target_group, (asg_targets)) + autoscaling_backends = {} for region, ec2_backend in ec2_backends.items(): autoscaling_backends[region] = AutoScalingBackend( - ec2_backend, elb_backends[region]) + ec2_backend, elb_backends[region], elbv2_backends[region]) diff --git a/moto/autoscaling/responses.py b/moto/autoscaling/responses.py index 832103775..c44df3357 100644 --- a/moto/autoscaling/responses.py +++ b/moto/autoscaling/responses.py @@ -80,6 +80,7 @@ class AutoScalingResponse(BaseResponse): health_check_period=self._get_int_param('HealthCheckGracePeriod'), health_check_type=self._get_param('HealthCheckType'), load_balancers=self._get_multi_param('LoadBalancerNames.member'), + target_group_arns=self._get_multi_param('TargetGroupARNs.member'), placement_group=self._get_param('PlacementGroup'), termination_policies=self._get_multi_param( 'TerminationPolicies.member'), @@ -92,7 +93,7 @@ class AutoScalingResponse(BaseResponse): @amzn_request_id def attach_instances(self): group_name = self._get_param('AutoScalingGroupName') - instance_ids = self._get_multi_param("InstanceIds.member") + instance_ids = self._get_multi_param('InstanceIds.member') self.autoscaling_backend.attach_instances( group_name, instance_ids) template = self.response_template(ATTACH_INSTANCES_TEMPLATE) @@ -102,7 +103,7 @@ class AutoScalingResponse(BaseResponse): @amzn_request_id def detach_instances(self): group_name = self._get_param('AutoScalingGroupName') - instance_ids = self._get_multi_param("InstanceIds.member") + instance_ids = self._get_multi_param('InstanceIds.member') should_decrement_string = self._get_param('ShouldDecrementDesiredCapacity') if should_decrement_string == 'true': should_decrement = True @@ -113,6 +114,37 @@ class AutoScalingResponse(BaseResponse): template = self.response_template(DETACH_INSTANCES_TEMPLATE) return template.render(detached_instances=detached_instances) + @amz_crc32 + @amzn_request_id + def attach_load_balancer_target_groups(self): + group_name = self._get_param('AutoScalingGroupName') + target_group_arns = self._get_multi_param('TargetGroupARNs.member') + + self.autoscaling_backend.attach_load_balancer_target_groups( + group_name, target_group_arns) + template = self.response_template(ATTACH_LOAD_BALANCER_TARGET_GROUPS_TEMPLATE) + return template.render() + + @amz_crc32 + @amzn_request_id + def describe_load_balancer_target_groups(self): + group_name = self._get_param('AutoScalingGroupName') + target_group_arns = self.autoscaling_backend.describe_load_balancer_target_groups( + group_name) + template = self.response_template(DESCRIBE_LOAD_BALANCER_TARGET_GROUPS) + return template.render(target_group_arns=target_group_arns) + + @amz_crc32 + @amzn_request_id + def detach_load_balancer_target_groups(self): + group_name = self._get_param('AutoScalingGroupName') + target_group_arns = self._get_multi_param('TargetGroupARNs.member') + + self.autoscaling_backend.detach_load_balancer_target_groups( + group_name, target_group_arns) + template = self.response_template(DETACH_LOAD_BALANCER_TARGET_GROUPS_TEMPLATE) + return template.render() + def describe_auto_scaling_groups(self): names = self._get_multi_param("AutoScalingGroupNames.member") token = self._get_param("NextToken") @@ -338,6 +370,14 @@ CREATE_AUTOSCALING_GROUP_TEMPLATE = """ + + + +{{ requestid }} + +""" + ATTACH_INSTANCES_TEMPLATE = """ @@ -346,6 +386,22 @@ ATTACH_INSTANCES_TEMPLATE = """ + + + {% for arn in target_group_arns %} + + {{ arn }} + Added + + {% endfor %} + + + +{{ requestid }} + +""" + DETACH_INSTANCES_TEMPLATE = """ @@ -372,6 +428,14 @@ DETACH_INSTANCES_TEMPLATE = """ + + + +{{ requestid }} + +""" + DESCRIBE_AUTOSCALING_GROUPS_TEMPLATE = """ diff --git a/tests/test_autoscaling/test_autoscaling.py b/tests/test_autoscaling/test_autoscaling.py index b0bbc88a8..48074d5a7 100644 --- a/tests/test_autoscaling/test_autoscaling.py +++ b/tests/test_autoscaling/test_autoscaling.py @@ -599,6 +599,11 @@ def test_attach_load_balancer(): ) list(response['LoadBalancerDescriptions'][0]['Instances']).should.have.length_of(INSTANCE_COUNT) + response = client.describe_auto_scaling_groups( + AutoScalingGroupNames=["test_asg"] + ) + list(response['AutoScalingGroups'][0]['LoadBalancerNames']).should.have.length_of(1) + @mock_autoscaling @mock_elb diff --git a/tests/test_autoscaling/test_elbv2.py b/tests/test_autoscaling/test_elbv2.py new file mode 100644 index 000000000..89ec4a399 --- /dev/null +++ b/tests/test_autoscaling/test_elbv2.py @@ -0,0 +1,131 @@ +from __future__ import unicode_literals +import boto3 + +from moto import mock_autoscaling, mock_ec2, mock_elbv2 + +@mock_elbv2 +@mock_ec2 +@mock_autoscaling +def test_attach_detach_target_groups(): + INSTANCE_COUNT = 2 + client = boto3.client('autoscaling', region_name='us-east-1') + elbv2_client = boto3.client('elbv2', region_name='us-east-1') + ec2 = boto3.resource('ec2', region_name='us-east-1') + + vpc = ec2.create_vpc(CidrBlock='172.28.7.0/24', InstanceTenancy='default') + + response = elbv2_client.create_target_group( + Name='a-target', + Protocol='HTTP', + Port=8080, + VpcId=vpc.id, + HealthCheckProtocol='HTTP', + HealthCheckPort='8080', + HealthCheckPath='/', + HealthCheckIntervalSeconds=5, + HealthCheckTimeoutSeconds=5, + HealthyThresholdCount=5, + UnhealthyThresholdCount=2, + Matcher={'HttpCode': '200'}) + target_group_arn = response['TargetGroups'][0]['TargetGroupArn'] + + client.create_launch_configuration( + LaunchConfigurationName='test_launch_configuration') + + # create asg, attach to target group on create + client.create_auto_scaling_group( + AutoScalingGroupName='test_asg', + LaunchConfigurationName='test_launch_configuration', + MinSize=0, + MaxSize=INSTANCE_COUNT, + DesiredCapacity=INSTANCE_COUNT, + TargetGroupARNs=[target_group_arn], + VPCZoneIdentifier=vpc.id) + # create asg without attaching to target group + client.create_auto_scaling_group( + AutoScalingGroupName='test_asg2', + LaunchConfigurationName='test_launch_configuration', + MinSize=0, + MaxSize=INSTANCE_COUNT, + DesiredCapacity=INSTANCE_COUNT, + VPCZoneIdentifier=vpc.id) + + response = client.describe_load_balancer_target_groups( + AutoScalingGroupName='test_asg') + list(response['LoadBalancerTargetGroups']).should.have.length_of(1) + + response = elbv2_client.describe_target_health( + TargetGroupArn=target_group_arn) + list(response['TargetHealthDescriptions']).should.have.length_of(INSTANCE_COUNT) + + client.attach_load_balancer_target_groups( + AutoScalingGroupName='test_asg2', + TargetGroupARNs=[target_group_arn]) + + response = elbv2_client.describe_target_health( + TargetGroupArn=target_group_arn) + list(response['TargetHealthDescriptions']).should.have.length_of(INSTANCE_COUNT * 2) + + response = client.detach_load_balancer_target_groups( + AutoScalingGroupName='test_asg2', + TargetGroupARNs=[target_group_arn]) + response = elbv2_client.describe_target_health( + TargetGroupArn=target_group_arn) + list(response['TargetHealthDescriptions']).should.have.length_of(INSTANCE_COUNT) + +@mock_elbv2 +@mock_ec2 +@mock_autoscaling +def test_detach_all_target_groups(): + INSTANCE_COUNT = 2 + client = boto3.client('autoscaling', region_name='us-east-1') + elbv2_client = boto3.client('elbv2', region_name='us-east-1') + ec2 = boto3.resource('ec2', region_name='us-east-1') + + vpc = ec2.create_vpc(CidrBlock='172.28.7.0/24', InstanceTenancy='default') + + response = elbv2_client.create_target_group( + Name='a-target', + Protocol='HTTP', + Port=8080, + VpcId=vpc.id, + HealthCheckProtocol='HTTP', + HealthCheckPort='8080', + HealthCheckPath='/', + HealthCheckIntervalSeconds=5, + HealthCheckTimeoutSeconds=5, + HealthyThresholdCount=5, + UnhealthyThresholdCount=2, + Matcher={'HttpCode': '200'}) + target_group_arn = response['TargetGroups'][0]['TargetGroupArn'] + + client.create_launch_configuration( + LaunchConfigurationName='test_launch_configuration') + + client.create_auto_scaling_group( + AutoScalingGroupName='test_asg', + LaunchConfigurationName='test_launch_configuration', + MinSize=0, + MaxSize=INSTANCE_COUNT, + DesiredCapacity=INSTANCE_COUNT, + TargetGroupARNs=[target_group_arn], + VPCZoneIdentifier=vpc.id) + + response = client.describe_load_balancer_target_groups( + AutoScalingGroupName='test_asg') + list(response['LoadBalancerTargetGroups']).should.have.length_of(1) + + response = elbv2_client.describe_target_health( + TargetGroupArn=target_group_arn) + list(response['TargetHealthDescriptions']).should.have.length_of(INSTANCE_COUNT) + + response = client.detach_load_balancer_target_groups( + AutoScalingGroupName='test_asg', + TargetGroupARNs=[target_group_arn]) + + response = elbv2_client.describe_target_health( + TargetGroupArn=target_group_arn) + list(response['TargetHealthDescriptions']).should.have.length_of(0) + response = client.describe_load_balancer_target_groups( + AutoScalingGroupName='test_asg') + list(response['LoadBalancerTargetGroups']).should.have.length_of(0) From ae6f9dcb71f8c4d3231540daeda09706d0045710 Mon Sep 17 00:00:00 2001 From: Terry Cain Date: Sat, 21 Oct 2017 22:10:45 +0100 Subject: [PATCH 405/412] Fixes #1282 --- moto/route53/models.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/moto/route53/models.py b/moto/route53/models.py index d12f4ee7a..f0e52086d 100644 --- a/moto/route53/models.py +++ b/moto/route53/models.py @@ -209,7 +209,7 @@ class FakeZone(BaseModel): @property def physical_resource_id(self): - return self.name + return self.id @classmethod def create_from_cloudformation_json(cls, resource_name, cloudformation_json, region_name): From f247cd0f489dc44dc1072ff8323d3a913eb8a476 Mon Sep 17 00:00:00 2001 From: Terry Cain Date: Sat, 21 Oct 2017 22:41:19 +0100 Subject: [PATCH 406/412] Fixed uses of HostedZone --- .../route53_ec2_instance_with_public_ip.py | 13 ++++++++++--- .../fixtures/route53_health_check.py | 2 +- .../fixtures/route53_roundrobin.py | 15 +++++++++++---- 3 files changed, 22 insertions(+), 8 deletions(-) diff --git a/tests/test_cloudformation/fixtures/route53_ec2_instance_with_public_ip.py b/tests/test_cloudformation/fixtures/route53_ec2_instance_with_public_ip.py index 5e66bbd86..43a11104b 100644 --- a/tests/test_cloudformation/fixtures/route53_ec2_instance_with_public_ip.py +++ b/tests/test_cloudformation/fixtures/route53_ec2_instance_with_public_ip.py @@ -1,6 +1,13 @@ from __future__ import unicode_literals template = { + "Parameters": { + "R53ZoneName": { + "Type": "String", + "Default": "my_zone" + } + }, + "Resources": { "Ec2Instance": { "Type": "AWS::EC2::Instance", @@ -13,20 +20,20 @@ template = { "HostedZone": { "Type": "AWS::Route53::HostedZone", "Properties": { - "Name": "my_zone" + "Name": {"Ref": "R53ZoneName"} } }, "myDNSRecord": { "Type": "AWS::Route53::RecordSet", "Properties": { - "HostedZoneName": {"Ref": "HostedZone"}, + "HostedZoneId": {"Ref": "HostedZone"}, "Comment": "DNS name for my instance.", "Name": { "Fn::Join": ["", [ {"Ref": "Ec2Instance"}, ".", {"Ref": "AWS::Region"}, ".", - {"Ref": "HostedZone"}, "." + {"Ref": "R53ZoneName"}, "." ]] }, "Type": "A", diff --git a/tests/test_cloudformation/fixtures/route53_health_check.py b/tests/test_cloudformation/fixtures/route53_health_check.py index f6a2c9b8e..420cd38ba 100644 --- a/tests/test_cloudformation/fixtures/route53_health_check.py +++ b/tests/test_cloudformation/fixtures/route53_health_check.py @@ -26,7 +26,7 @@ template = { "myDNSRecord": { "Type": "AWS::Route53::RecordSet", "Properties": { - "HostedZoneName": {"Ref": "HostedZone"}, + "HostedZoneId": {"Ref": "HostedZone"}, "Comment": "DNS name for my instance.", "Name": "my_record_set", "Type": "A", diff --git a/tests/test_cloudformation/fixtures/route53_roundrobin.py b/tests/test_cloudformation/fixtures/route53_roundrobin.py index da4fecd4d..199e3e088 100644 --- a/tests/test_cloudformation/fixtures/route53_roundrobin.py +++ b/tests/test_cloudformation/fixtures/route53_roundrobin.py @@ -5,30 +5,37 @@ template = { "Description": "AWS CloudFormation Sample Template Route53_RoundRobin: Sample template showing how to use weighted round robin (WRR) DNS entried via Amazon Route 53. This contrived sample uses weighted CNAME records to illustrate that the weighting influences the return records. It assumes that you already have a Hosted Zone registered with Amazon Route 53. **WARNING** This template creates one or more AWS resources. You will be billed for the AWS resources used if you create a stack from this template.", + "Parameters": { + "R53ZoneName": { + "Type": "String", + "Default": "my_zone" + } + }, + "Resources": { "MyZone": { "Type": "AWS::Route53::HostedZone", "Properties": { - "Name": "my_zone" + "Name": {"Ref": "R53ZoneName"} } }, "MyDNSRecord": { "Type": "AWS::Route53::RecordSetGroup", "Properties": { - "HostedZoneName": {"Ref": "MyZone"}, + "HostedZoneId": {"Ref": "MyZone"}, "Comment": "Contrived example to redirect to aws.amazon.com 75% of the time and www.amazon.com 25% of the time.", "RecordSets": [{ "SetIdentifier": {"Fn::Join": [" ", [{"Ref": "AWS::StackName"}, "AWS"]]}, - "Name": {"Fn::Join": ["", [{"Ref": "AWS::StackName"}, ".", {"Ref": "AWS::Region"}, ".", {"Ref": "MyZone"}, "."]]}, + "Name": {"Fn::Join": ["", [{"Ref": "AWS::StackName"}, ".", {"Ref": "AWS::Region"}, ".", {"Ref": "R53ZoneName"}, "."]]}, "Type": "CNAME", "TTL": "900", "ResourceRecords": ["aws.amazon.com"], "Weight": "3" }, { "SetIdentifier": {"Fn::Join": [" ", [{"Ref": "AWS::StackName"}, "Amazon"]]}, - "Name": {"Fn::Join": ["", [{"Ref": "AWS::StackName"}, ".", {"Ref": "AWS::Region"}, ".", {"Ref": "MyZone"}, "."]]}, + "Name": {"Fn::Join": ["", [{"Ref": "AWS::StackName"}, ".", {"Ref": "AWS::Region"}, ".", {"Ref": "R53ZoneName"}, "."]]}, "Type": "CNAME", "TTL": "900", "ResourceRecords": ["www.amazon.com"], From 97ab15d8837d3e0aaa5bc0efaf457e731fc7acd9 Mon Sep 17 00:00:00 2001 From: captainkerk Date: Sat, 21 Oct 2017 22:10:36 +0000 Subject: [PATCH 407/412] boto3 update group method doesnt accept LoadBalancerNames --- moto/autoscaling/models.py | 8 ++++---- moto/autoscaling/responses.py | 1 - 2 files changed, 4 insertions(+), 5 deletions(-) diff --git a/moto/autoscaling/models.py b/moto/autoscaling/models.py index fd8efd54f..c508d6b24 100644 --- a/moto/autoscaling/models.py +++ b/moto/autoscaling/models.py @@ -224,7 +224,7 @@ class FakeAutoScalingGroup(BaseModel): def update(self, availability_zones, desired_capacity, max_size, min_size, launch_config_name, vpc_zone_identifier, default_cooldown, - health_check_period, health_check_type, load_balancers, + health_check_period, health_check_type, placement_group, termination_policies): if availability_zones: self.availability_zones = availability_zones @@ -392,13 +392,13 @@ class AutoScalingBackend(BaseBackend): desired_capacity, max_size, min_size, launch_config_name, vpc_zone_identifier, default_cooldown, health_check_period, - health_check_type, load_balancers, - placement_group, termination_policies): + health_check_type, placement_group, + termination_policies): group = self.autoscaling_groups[name] group.update(availability_zones, desired_capacity, max_size, min_size, launch_config_name, vpc_zone_identifier, default_cooldown, health_check_period, health_check_type, - load_balancers, placement_group, termination_policies) + placement_group, termination_policies) return group def describe_autoscaling_groups(self, names): diff --git a/moto/autoscaling/responses.py b/moto/autoscaling/responses.py index 832103775..fe2f9467d 100644 --- a/moto/autoscaling/responses.py +++ b/moto/autoscaling/responses.py @@ -145,7 +145,6 @@ class AutoScalingResponse(BaseResponse): default_cooldown=self._get_int_param('DefaultCooldown'), health_check_period=self._get_int_param('HealthCheckGracePeriod'), health_check_type=self._get_param('HealthCheckType'), - load_balancers=self._get_multi_param('LoadBalancerNames.member'), placement_group=self._get_param('PlacementGroup'), termination_policies=self._get_multi_param( 'TerminationPolicies.member'), From ca3a3633e9a1448e20f11ae026460a468fe9c0b6 Mon Sep 17 00:00:00 2001 From: Terry Cain Date: Sun, 22 Oct 2017 13:36:23 +0100 Subject: [PATCH 408/412] Called terminatejob from canceljob --- moto/batch/responses.py | 10 +--------- 1 file changed, 1 insertion(+), 9 deletions(-) diff --git a/moto/batch/responses.py b/moto/batch/responses.py index 96094068d..e626b7d4c 100644 --- a/moto/batch/responses.py +++ b/moto/batch/responses.py @@ -293,12 +293,4 @@ class BatchResponse(BaseResponse): # CancelJob def canceljob(self): # Theres some AWS semantics on the differences but for us they're identical ;-) - job_id = self._get_param('jobId') - reason = self._get_param('reason') - - try: - self.batch_backend.terminate_job(job_id, reason) - except AWSError as err: - return err.response() - - return '' + return self.terminatejob() From fbc984933bce2caeb3855d8f0a747b6a447ac0da Mon Sep 17 00:00:00 2001 From: Terry Cain Date: Sun, 22 Oct 2017 21:36:39 +0100 Subject: [PATCH 409/412] Added server test --- tests/test_batch/test_server.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/tests/test_batch/test_server.py b/tests/test_batch/test_server.py index 7c0d2b3a1..4a74260a8 100644 --- a/tests/test_batch/test_server.py +++ b/tests/test_batch/test_server.py @@ -9,8 +9,11 @@ from moto import mock_batch Test the different server responses ''' + @mock_batch def test_batch_list(): backend = server.create_backend_app("batch") test_client = backend.test_client() - # do test \ No newline at end of file + + res = test_client.get('/v1/describecomputeenvironments') + res.status_code.should.equal(200) From e9852c381b55f56d75a7b6c3376f7e6bf8c0d83e Mon Sep 17 00:00:00 2001 From: Terry Cain Date: Sun, 22 Oct 2017 23:20:00 +0100 Subject: [PATCH 410/412] Make improvements to filter expression, added NOT keyword --- moto/dynamodb2/comparisons.py | 83 +++++++++++++++++---------- tests/test_dynamodb2/test_dynamodb.py | 38 +++++++++++- 2 files changed, 91 insertions(+), 30 deletions(-) diff --git a/moto/dynamodb2/comparisons.py b/moto/dynamodb2/comparisons.py index faeffbaa5..68051460e 100644 --- a/moto/dynamodb2/comparisons.py +++ b/moto/dynamodb2/comparisons.py @@ -43,16 +43,14 @@ def get_comparison_func(range_comparison): return COMPARISON_FUNCS.get(range_comparison) -# +class RecursionStopIteration(StopIteration): + pass + + def get_filter_expression(expr, names, values): # Examples # expr = 'Id > 5 AND attribute_exists(test) AND Id BETWEEN 5 AND 6 OR length < 6 AND contains(test, 1) AND 5 IN (4,5, 6) OR (Id < 5 AND 5 > Id)' # expr = 'Id > 5 AND Subs < 7' - - # Need to do some dodgyness for NOT i think. - if 'NOT' in expr: - raise NotImplementedError('NOT not supported yet') - if names is None: names = {} if values is None: @@ -82,7 +80,7 @@ def get_filter_expression(expr, names, values): # Remove all spaces, tbf we could just skip them in the next step. # The number of known options is really small so we can do a fair bit of cheating - expr = list(expr) + expr = list(expr.strip()) # DodgyTokenisation stage 1 def is_value(val): @@ -134,27 +132,31 @@ def get_filter_expression(expr, names, values): return val in ('<', '>', '=', '>=', '<=', '<>', 'BETWEEN', 'IN', 'AND', 'OR', 'NOT') # DodgyTokenisation stage 2, it groups together some elements to make RPN'ing it later easier. - tokens2 = [] - token_iterator = iter(tokens) - for token in token_iterator: - if token == '(': - tuple_list = [] + def handle_token(token, tokens2, token_iterator): + # ok so this essentially groups up some tokens to make later parsing easier, + # when it encounters brackets it will recurse and then unrecurse when RecursionStopIteration is raised. + if token == ')': + raise RecursionStopIteration() # Should be recursive so this should work + elif token == '(': + temp_list = [] - next_token = six.next(token_iterator) - while next_token != ')': - if next_token in values_map: - next_token = values_map[next_token] - - tuple_list.append(next_token) - next_token = six.next(token_iterator) + try: + while True: + next_token = six.next(token_iterator) + handle_token(next_token, temp_list, token_iterator) + except RecursionStopIteration: + pass # Continue + except StopIteration: + ValueError('Malformed filter expression, type1') # Sigh, we only want to group a tuple if it doesnt contain operators - if any([is_op(item) for item in tuple_list]): + if any([is_op(item) for item in temp_list]): + # Its an expression tokens2.append('(') - tokens2.extend(tuple_list) + tokens2.extend(temp_list) tokens2.append(')') else: - tokens2.append(tuple(tuple_list)) + tokens2.append(tuple(temp_list)) elif token == 'BETWEEN': field = tokens2.pop() # if values map contains a number, it would be a float @@ -166,7 +168,6 @@ def get_filter_expression(expr, names, values): op2 = six.next(token_iterator) op2 = int(values_map.get(op2, op2)) tokens2.append(['between', field, op1, op2]) - elif is_function(token): function_list = [token] @@ -179,7 +180,6 @@ def get_filter_expression(expr, names, values): next_token = six.next(token_iterator) tokens2.append(function_list) - else: # Convert tokens back to real types if token in values_map: @@ -191,6 +191,11 @@ def get_filter_expression(expr, names, values): else: tokens2.append(token) + tokens2 = [] + token_iterator = iter(tokens) + for token in token_iterator: + handle_token(token, tokens2, token_iterator) + # Start of the Shunting-Yard algorithm. <-- Proper beast algorithm! def is_number(val): return val not in ('<', '>', '=', '>=', '<=', '<>', 'BETWEEN', 'IN', 'AND', 'OR', 'NOT') @@ -218,7 +223,9 @@ def get_filter_expression(expr, names, values): output.append(token) else: # Must be operator kw - while len(op_stack) > 0 and OPS[op_stack[-1]] <= OPS[token]: + + # Cheat, NOT is our only RIGHT associative operator, should really have dict of operator associativity + while len(op_stack) > 0 and OPS[op_stack[-1]] <= OPS[token] and op_stack[-1] != 'NOT': output.append(op_stack.pop()) op_stack.append(token) while len(op_stack) > 0: @@ -242,17 +249,22 @@ def get_filter_expression(expr, names, values): stack = [] for token in output: if is_op(token): - op2 = stack.pop() - op1 = stack.pop() - op_cls = OP_CLASS[token] + + if token == 'NOT': + op1 = stack.pop() + op2 = True + else: + op2 = stack.pop() + op1 = stack.pop() + stack.append(op_cls(op1, op2)) else: stack.append(to_func(token)) result = stack.pop(0) if len(stack) > 0: - raise ValueError('Malformed filter expression') + raise ValueError('Malformed filter expression, type2') return result @@ -313,6 +325,18 @@ class Func(object): return 'Func(...)'.format(self.FUNC) +class OpNot(Op): + OP = 'NOT' + + def expr(self, item): + lhs = self._lhs(item) + + return not lhs + + def __str__(self): + return '({0} {1})'.format(self.OP, self.lhs) + + class OpAnd(Op): OP = 'AND' @@ -483,6 +507,7 @@ class FuncBetween(Func): OP_CLASS = { + 'NOT': OpNot, 'AND': OpAnd, 'OR': OpOr, 'IN': OpIn, diff --git a/tests/test_dynamodb2/test_dynamodb.py b/tests/test_dynamodb2/test_dynamodb.py index 26d380628..5df03f8d8 100644 --- a/tests/test_dynamodb2/test_dynamodb.py +++ b/tests/test_dynamodb2/test_dynamodb.py @@ -576,10 +576,17 @@ def test_get_item_returns_consumed_capacity(): def test_filter_expression(): - # TODO NOT not yet supported row1 = moto.dynamodb2.models.Item(None, None, None, None, {'Id': {'N': '8'}, 'Subs': {'N': '5'}, 'Desc': {'S': 'Some description'}, 'KV': {'SS': ['test1', 'test2']}}) row2 = moto.dynamodb2.models.Item(None, None, None, None, {'Id': {'N': '8'}, 'Subs': {'N': '10'}, 'Desc': {'S': 'A description'}, 'KV': {'SS': ['test3', 'test4']}}) + # NOT test 1 + filter_expr = moto.dynamodb2.comparisons.get_filter_expression('NOT attribute_not_exists(Id)', {}, {}) + filter_expr.expr(row1).should.be(True) + + # NOT test 2 + filter_expr = moto.dynamodb2.comparisons.get_filter_expression('NOT (Id = :v0)', {}, {':v0': {'N': 8}}) + filter_expr.expr(row1).should.be(False) # Id = 8 so should be false + # AND test filter_expr = moto.dynamodb2.comparisons.get_filter_expression('Id > :v0 AND Subs < :v1', {}, {':v0': {'N': 5}, ':v1': {'N': 7}}) filter_expr.expr(row1).should.be(True) @@ -622,6 +629,14 @@ def test_filter_expression(): filter_expr = moto.dynamodb2.comparisons.get_filter_expression('size(Desc) > size(KV)', {}, {}) filter_expr.expr(row1).should.be(True) + # Expression from @batkuip + filter_expr = moto.dynamodb2.comparisons.get_filter_expression( + '(#n0 < :v0 AND attribute_not_exists(#n1))', + {'#n0': 'Subs', '#n1': 'fanout_ts'}, + {':v0': {'N': '7'}} + ) + filter_expr.expr(row1).should.be(True) + @mock_dynamodb2 def test_scan_filter(): @@ -712,6 +727,27 @@ def test_scan_filter3(): assert response['Count'] == 1 +@mock_dynamodb2 +def test_scan_filter4(): + client = boto3.client('dynamodb', region_name='us-east-1') + dynamodb = boto3.resource('dynamodb', region_name='us-east-1') + + # Create the DynamoDB table. + client.create_table( + TableName='test1', + AttributeDefinitions=[{'AttributeName': 'client', 'AttributeType': 'S'}, {'AttributeName': 'app', 'AttributeType': 'N'}], + KeySchema=[{'AttributeName': 'client', 'KeyType': 'HASH'}, {'AttributeName': 'app', 'KeyType': 'RANGE'}], + ProvisionedThroughput={'ReadCapacityUnits': 123, 'WriteCapacityUnits': 123} + ) + + table = dynamodb.Table('test1') + response = table.scan( + FilterExpression=Attr('epoch_ts').lt(7) & Attr('fanout_ts').not_exists() + ) + # Just testing + assert response['Count'] == 0 + + @mock_dynamodb2 def test_bad_scan_filter(): client = boto3.client('dynamodb', region_name='us-east-1') From 5c5511956276a02765fb48576bce29e68c860ff3 Mon Sep 17 00:00:00 2001 From: Florent Rivoire Date: Mon, 23 Oct 2017 10:47:55 +0200 Subject: [PATCH 411/412] Do not freeze requirement aws-xray-sdk to a specific version (>= instead of ==) --- setup.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/setup.py b/setup.py index 5cf32ade7..bdb8a1dd6 100755 --- a/setup.py +++ b/setup.py @@ -21,7 +21,7 @@ install_requires = [ "python-dateutil<3.0.0,>=2.1", "mock", "docker>=2.5.1", - "aws-xray-sdk==0.92.2" + "aws-xray-sdk>=0.93" ] extras_require = { From 9eca6119bd36d1124c182eabf585660e69947112 Mon Sep 17 00:00:00 2001 From: Toshiya Kawasaki Date: Mon, 23 Oct 2017 22:51:02 +0900 Subject: [PATCH 412/412] Fix flake8 version to 3.4.1 --- requirements-dev.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements-dev.txt b/requirements-dev.txt index 1c001305e..cdd88ab2f 100644 --- a/requirements-dev.txt +++ b/requirements-dev.txt @@ -3,7 +3,7 @@ mock nose sure==1.2.24 coverage -flake8 +flake8==3.4.1 freezegun flask boto>=2.45.0