From 80c11d676cdf86a09ed6a9e6edf390a8c730a2b6 Mon Sep 17 00:00:00 2001 From: Xu Liu Date: Tue, 23 Apr 2019 11:53:00 -0400 Subject: [PATCH 01/25] Use cast_value when comparing DynamoType --- moto/dynamodb2/models.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/moto/dynamodb2/models.py b/moto/dynamodb2/models.py index 0f4594aa4..10fe75c0f 100644 --- a/moto/dynamodb2/models.py +++ b/moto/dynamodb2/models.py @@ -45,16 +45,16 @@ class DynamoType(object): ) def __lt__(self, other): - return self.value < other.value + return self.cast_value < other.cast_value def __le__(self, other): - return self.value <= other.value + return self.cast_value <= other.cast_value def __gt__(self, other): - return self.value > other.value + return self.cast_value > other.cast_value def __ge__(self, other): - return self.value >= other.value + return self.cast_value >= other.cast_value def __repr__(self): return "DynamoType: {0}".format(self.to_json()) From 4dec187d80a59cbe0d1a8b78d58dab20131516f7 Mon Sep 17 00:00:00 2001 From: Bert Blommers Date: Sat, 5 Oct 2019 15:20:43 +0100 Subject: [PATCH 02/25] #1834 - Check item size in DynamoDB --- moto/dynamodb2/responses.py | 4 ++++ tests/test_dynamodb2/test_dynamodb.py | 21 +++++++++++++++++++++ 2 files changed, 25 insertions(+) diff --git a/moto/dynamodb2/responses.py b/moto/dynamodb2/responses.py index d07beefd6..e7f5f2825 100644 --- a/moto/dynamodb2/responses.py +++ b/moto/dynamodb2/responses.py @@ -225,6 +225,10 @@ class DynamoHandler(BaseResponse): er = 'com.amazonaws.dynamodb.v20111205#ValidationException' return self.error(er, 'Return values set to invalid value') + if len(str(item).encode('utf-8')) > 405000: + er = 'com.amazonaws.dynamodb.v20111205#ValidationException' + return self.error(er, 'Item size has exceeded the maximum allowed size') + if has_empty_keys_or_values(item): return get_empty_str_error() diff --git a/tests/test_dynamodb2/test_dynamodb.py b/tests/test_dynamodb2/test_dynamodb.py index b0952f101..4fd0fb472 100644 --- a/tests/test_dynamodb2/test_dynamodb.py +++ b/tests/test_dynamodb2/test_dynamodb.py @@ -2278,6 +2278,27 @@ def test_index_with_unknown_attributes_should_fail(): ex.exception.response['Error']['Message'].should.contain(expected_exception) +# https://github.com/spulec/moto/issues/1874 +@mock_dynamodb2 +def test_item_size_is_under_400KB(): + dynamodb = boto3.client('dynamodb', region_name='us-east-1') + + dynamodb.create_table( + TableName='moto-test', + KeySchema=[{'AttributeName': 'id', 'KeyType': 'HASH'}], + AttributeDefinitions=[{'AttributeName': 'id', 'AttributeType': 'S'}], + ProvisionedThroughput={'ReadCapacityUnits': 1, 'WriteCapacityUnits': 1} + ) + + with assert_raises(ClientError) as ex: + large_item = 'x' * 410 * 1000 + dynamodb.put_item( + TableName='moto-test', + Item={'id': {'S': 'foo'}, 'item': {'S': large_item}}) + ex.exception.response['Error']['Code'].should.equal('ValidationException') + ex.exception.response['Error']['Message'].should.equal('Item size has exceeded the maximum allowed size') + + def _create_user_table(): client = boto3.client('dynamodb', region_name='us-east-1') client.create_table( From dc89b47b40273fb25069f303fe57528f3323b00c Mon Sep 17 00:00:00 2001 From: Bert Blommers Date: Sun, 6 Oct 2019 14:08:58 +0100 Subject: [PATCH 03/25] #1874 - Count item size based on contents of actual dictionary --- moto/dynamodb2/exceptions.py | 5 +++ moto/dynamodb2/models.py | 57 ++++++++++++++++++++++++++- moto/dynamodb2/responses.py | 12 +++--- tests/test_dynamodb2/test_dynamodb.py | 15 +++++-- 4 files changed, 78 insertions(+), 11 deletions(-) diff --git a/moto/dynamodb2/exceptions.py b/moto/dynamodb2/exceptions.py index 9df973292..88bb732a5 100644 --- a/moto/dynamodb2/exceptions.py +++ b/moto/dynamodb2/exceptions.py @@ -1,2 +1,7 @@ class InvalidIndexNameError(ValueError): pass + + +class ItemSizeTooLarge(Exception): + message = 'Item size has exceeded the maximum allowed size' + pass diff --git a/moto/dynamodb2/models.py b/moto/dynamodb2/models.py index 4ef4461cd..b97cd94e7 100644 --- a/moto/dynamodb2/models.py +++ b/moto/dynamodb2/models.py @@ -16,7 +16,7 @@ from moto.core.exceptions import JsonRESTError from .comparisons import get_comparison_func from .comparisons import get_filter_expression from .comparisons import get_expected -from .exceptions import InvalidIndexNameError +from .exceptions import InvalidIndexNameError, ItemSizeTooLarge class DynamoJsonEncoder(json.JSONEncoder): @@ -30,6 +30,10 @@ def dynamo_json_dump(dynamo_object): return json.dumps(dynamo_object, cls=DynamoJsonEncoder) +def bytesize(val): + return len(str(val).encode('utf-8')) + + class DynamoType(object): """ http://docs.aws.amazon.com/amazondynamodb/latest/developerguide/DataModel.html#DataModelDataTypes @@ -99,6 +103,22 @@ class DynamoType(object): return None + def size(self): + if self.is_number(): + value_size = len(str(self.value)) + elif self.is_set(): + sub_type = self.type[0] + value_size = sum([DynamoType({sub_type: v}).size() for v in self.value]) + elif self.is_list(): + value_size = sum([DynamoType(v).size() for v in self.value]) + elif self.is_map(): + value_size = sum([bytesize(k) + DynamoType(v).size() for k, v in self.value.items()]) + elif type(self.value) == bool: + value_size = 1 + else: + value_size = bytesize(self.value) + return value_size + def to_json(self): return {self.type: self.value} @@ -126,6 +146,39 @@ class DynamoType(object): return self.type == other.type +# https://github.com/spulec/moto/issues/1874 +# Ensure that the total size of an item does not exceed 400kb +class LimitedSizeDict(dict): + def __init__(self, *args, **kwargs): + self.update(*args, **kwargs) + + def __setitem__(self, key, value): + current_item_size = sum([item.size() if type(item) == DynamoType else bytesize(str(item)) for item in (self.keys() + self.values())]) + new_item_size = bytesize(key) + (value.size() if type(value) == DynamoType else bytesize(str(value))) + # Official limit is set to 400000 (400KB) + # Manual testing confirms that the actual limit is between 409 and 410KB + # We'll set the limit to something in between to be safe + if (current_item_size + new_item_size) > 405000: + raise ItemSizeTooLarge + super(LimitedSizeDict, self).__setitem__(key, value) + + def update(self, *args, **kwargs): + if args: + if len(args) > 1: + raise TypeError("update expected at most 1 arguments, " + "got %d" % len(args)) + other = dict(args[0]) + for key in other: + self[key] = other[key] + for key in kwargs: + self[key] = kwargs[key] + + def setdefault(self, key, value=None): + if key not in self: + self[key] = value + return self[key] + + class Item(BaseModel): def __init__(self, hash_key, hash_key_type, range_key, range_key_type, attrs): @@ -134,7 +187,7 @@ class Item(BaseModel): self.range_key = range_key self.range_key_type = range_key_type - self.attrs = {} + self.attrs = LimitedSizeDict() for key, value in attrs.items(): self.attrs[key] = DynamoType(value) diff --git a/moto/dynamodb2/responses.py b/moto/dynamodb2/responses.py index e7f5f2825..c04cf0ed6 100644 --- a/moto/dynamodb2/responses.py +++ b/moto/dynamodb2/responses.py @@ -6,7 +6,7 @@ import re from moto.core.responses import BaseResponse from moto.core.utils import camelcase_to_underscores, amzn_request_id -from .exceptions import InvalidIndexNameError +from .exceptions import InvalidIndexNameError, ItemSizeTooLarge from .models import dynamodb_backends, dynamo_json_dump @@ -225,10 +225,6 @@ class DynamoHandler(BaseResponse): er = 'com.amazonaws.dynamodb.v20111205#ValidationException' return self.error(er, 'Return values set to invalid value') - if len(str(item).encode('utf-8')) > 405000: - er = 'com.amazonaws.dynamodb.v20111205#ValidationException' - return self.error(er, 'Item size has exceeded the maximum allowed size') - if has_empty_keys_or_values(item): return get_empty_str_error() @@ -259,6 +255,9 @@ class DynamoHandler(BaseResponse): name, item, expected, condition_expression, expression_attribute_names, expression_attribute_values, overwrite) + except ItemSizeTooLarge: + er = 'com.amazonaws.dynamodb.v20111205#ValidationException' + return self.error(er, ItemSizeTooLarge.message) except ValueError: er = 'com.amazonaws.dynamodb.v20111205#ConditionalCheckFailedException' return self.error(er, 'A condition specified in the operation could not be evaluated.') @@ -648,6 +647,9 @@ class DynamoHandler(BaseResponse): name, key, update_expression, attribute_updates, expression_attribute_names, expression_attribute_values, expected, condition_expression ) + except ItemSizeTooLarge: + er = 'com.amazonaws.dynamodb.v20111205#ValidationException' + return self.error(er, ItemSizeTooLarge.message) except ValueError: er = 'com.amazonaws.dynamodb.v20111205#ConditionalCheckFailedException' return self.error(er, 'A condition specified in the operation could not be evaluated.') diff --git a/tests/test_dynamodb2/test_dynamodb.py b/tests/test_dynamodb2/test_dynamodb.py index 4fd0fb472..bedd6812f 100644 --- a/tests/test_dynamodb2/test_dynamodb.py +++ b/tests/test_dynamodb2/test_dynamodb.py @@ -2282,6 +2282,7 @@ def test_index_with_unknown_attributes_should_fail(): @mock_dynamodb2 def test_item_size_is_under_400KB(): dynamodb = boto3.client('dynamodb', region_name='us-east-1') + res = boto3.resource('dynamodb') dynamodb.create_table( TableName='moto-test', @@ -2289,12 +2290,18 @@ def test_item_size_is_under_400KB(): AttributeDefinitions=[{'AttributeName': 'id', 'AttributeType': 'S'}], ProvisionedThroughput={'ReadCapacityUnits': 1, 'WriteCapacityUnits': 1} ) + table = res.Table('moto-test') + large_item = 'x' * 410 * 1000 with assert_raises(ClientError) as ex: - large_item = 'x' * 410 * 1000 - dynamodb.put_item( - TableName='moto-test', - Item={'id': {'S': 'foo'}, 'item': {'S': large_item}}) + dynamodb.put_item(TableName='moto-test', Item={'id': {'S': 'foo'}, 'item': {'S': large_item}}) + with assert_raises(ClientError) as ex: + table.put_item(Item={'id': 'bar', 'item': large_item}) + with assert_raises(ClientError) as ex: + dynamodb.update_item(TableName='moto-test', + Key={'id': {'S': 'foo2'}}, + UpdateExpression='set item=:Item', + ExpressionAttributeValues={':Item': {'S': large_item}}) ex.exception.response['Error']['Code'].should.equal('ValidationException') ex.exception.response['Error']['Message'].should.equal('Item size has exceeded the maximum allowed size') From a389fdcd95be945e6cc876723422b5b3b0b3273a Mon Sep 17 00:00:00 2001 From: Bert Blommers Date: Sun, 6 Oct 2019 15:24:37 +0100 Subject: [PATCH 04/25] #1874 - Adjust for Python2/3 incompatibilities --- moto/dynamodb2/models.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/moto/dynamodb2/models.py b/moto/dynamodb2/models.py index b97cd94e7..9b0e74a1d 100644 --- a/moto/dynamodb2/models.py +++ b/moto/dynamodb2/models.py @@ -153,7 +153,7 @@ class LimitedSizeDict(dict): self.update(*args, **kwargs) def __setitem__(self, key, value): - current_item_size = sum([item.size() if type(item) == DynamoType else bytesize(str(item)) for item in (self.keys() + self.values())]) + current_item_size = sum([item.size() if type(item) == DynamoType else bytesize(str(item)) for item in (list(self.keys()) + list(self.values()))]) new_item_size = bytesize(key) + (value.size() if type(value) == DynamoType else bytesize(str(value))) # Official limit is set to 400000 (400KB) # Manual testing confirms that the actual limit is between 409 and 410KB From 91b13f998f80e62945e1bf0a4c96329307fde990 Mon Sep 17 00:00:00 2001 From: Bert Blommers Date: Mon, 7 Oct 2019 11:11:22 +0100 Subject: [PATCH 05/25] Feature: Add option for DynamoDB stream to kick off lambda --- moto/awslambda/models.py | 27 ++++++++++++++++ moto/dynamodb2/models.py | 10 ++++++ tests/test_awslambda/test_lambda.py | 50 ++++++++++++++++++++++++++++- 3 files changed, 86 insertions(+), 1 deletion(-) diff --git a/moto/awslambda/models.py b/moto/awslambda/models.py index acc7a5257..2630abe1b 100644 --- a/moto/awslambda/models.py +++ b/moto/awslambda/models.py @@ -33,6 +33,8 @@ from moto.s3.exceptions import MissingBucket, MissingKey from moto import settings from .utils import make_function_arn, make_function_ver_arn from moto.sqs import sqs_backends +from moto.dynamodb2 import dynamodb_backends2 +from moto.dynamodbstreams import dynamodbstreams_backends logger = logging.getLogger(__name__) @@ -692,6 +694,18 @@ class LambdaBackend(BaseBackend): queue.lambda_event_source_mappings[esm.function_arn] = esm return esm + try: + stream = json.loads(dynamodbstreams_backends[self.region_name].describe_stream(spec['EventSourceArn'])) + spec.update({'FunctionArn': func.function_arn}) + esm = EventSourceMapping(spec) + self._event_source_mappings[esm.uuid] = esm + table_name = stream['StreamDescription']['TableName'] + table = dynamodb_backends2[self.region_name].get_table(table_name) + table.lambda_event_source_mappings[esm.function_arn] = esm + + return esm + except Exception: + pass # No DynamoDB stream exists raise RESTError('ResourceNotFoundException', 'Invalid EventSourceArn') def publish_function(self, function_name): @@ -811,6 +825,19 @@ class LambdaBackend(BaseBackend): func = self._lambdas.get_function(function_name, qualifier) func.invoke(json.dumps(event), {}, {}) + def send_dynamodb_items(self, function_arn, items, source): + event = {'Records': [ + { + 'eventID': item.to_json()['eventID'], + 'eventName': 'INSERT', + 'eventVersion': item.to_json()['eventVersion'], + 'eventSource': item.to_json()['eventSource'], + 'awsRegion': 'us-east-1', + 'dynamodb': item.to_json()['dynamodb'], + 'eventSourceARN': source} for item in items]} + func = self._lambdas.get_arn(function_arn) + func.invoke(json.dumps(event), {}, {}) + def list_tags(self, resource): return self.get_function_by_arn(resource).tags diff --git a/moto/dynamodb2/models.py b/moto/dynamodb2/models.py index 4ef4461cd..265f7697a 100644 --- a/moto/dynamodb2/models.py +++ b/moto/dynamodb2/models.py @@ -409,6 +409,15 @@ class StreamShard(BaseModel): seq = len(self.items) + self.starting_sequence_number self.items.append( StreamRecord(self.table, t, event_name, old, new, seq)) + result = None + from moto.awslambda import lambda_backends + for arn, esm in self.table.lambda_event_source_mappings.items(): + region = arn[len('arn:aws:lambda:'):arn.index(':', len('arn:aws:lambda:'))] + + result = lambda_backends[region].send_dynamodb_items(arn, self.items, esm.event_source_arn) + + if result: + self.items = [] def get(self, start, quantity): start -= self.starting_sequence_number @@ -451,6 +460,7 @@ class Table(BaseModel): # 'AttributeName': 'string' # Can contain this } self.set_stream_specification(streams) + self.lambda_event_source_mappings = {} @classmethod def create_from_cloudformation_json(cls, resource_name, cloudformation_json, region_name): diff --git a/tests/test_awslambda/test_lambda.py b/tests/test_awslambda/test_lambda.py index 9467b0803..d916c9918 100644 --- a/tests/test_awslambda/test_lambda.py +++ b/tests/test_awslambda/test_lambda.py @@ -12,7 +12,7 @@ import zipfile import sure # noqa from freezegun import freeze_time -from moto import mock_lambda, mock_s3, mock_ec2, mock_sns, mock_logs, settings, mock_sqs +from moto import mock_dynamodb2, mock_lambda, mock_s3, mock_ec2, mock_sns, mock_logs, settings, mock_sqs from nose.tools import assert_raises from botocore.exceptions import ClientError @@ -1027,6 +1027,54 @@ def test_invoke_function_from_sqs(): assert False, "Test Failed" +@mock_logs +@mock_lambda +@mock_dynamodb2 +def test_invoke_function_from_dynamodb(): + logs_conn = boto3.client("logs") + dynamodb = boto3.client('dynamodb') + table_name = 'table_with_stream' + table = dynamodb.create_table(TableName=table_name, + KeySchema=[{'AttributeName':'id','KeyType':'HASH'}], + AttributeDefinitions=[{'AttributeName':'id','AttributeType':'S'}], + StreamSpecification={'StreamEnabled': True, + 'StreamViewType': 'NEW_AND_OLD_IMAGES'}) + + conn = boto3.client('lambda') + func = conn.create_function(FunctionName='testFunction', Runtime='python2.7', + Role='test-iam-role', + Handler='lambda_function.lambda_handler', + Code={'ZipFile': get_test_zip_file3()}, + Description='test lambda function executed after a DynamoDB table is updated', + Timeout=3, MemorySize=128, Publish=True) + + response = conn.create_event_source_mapping( + EventSourceArn=table['TableDescription']['LatestStreamArn'], + FunctionName=func['FunctionArn'] + ) + + assert response['EventSourceArn'] == table['TableDescription']['LatestStreamArn'] + assert response['State'] == 'Enabled' + + dynamodb.put_item(TableName=table_name, Item={'id': { 'S': 'item 1' }}) + start = time.time() + while (time.time() - start) < 30: + result = logs_conn.describe_log_streams(logGroupName='/aws/lambda/testFunction') + log_streams = result.get('logStreams') + if not log_streams: + time.sleep(1) + continue + + assert len(log_streams) == 1 + result = logs_conn.get_log_events(logGroupName='/aws/lambda/testFunction', logStreamName=log_streams[0]['logStreamName']) + for event in result.get('events'): + if event['message'] == 'get_test_zip_file3 success': + return + time.sleep(1) + + assert False, "Test Failed" + + @mock_logs @mock_lambda @mock_sqs From 2d11e8feb4bee67f2586d0c3169cd782980fae0f Mon Sep 17 00:00:00 2001 From: Bert Blommers Date: Tue, 8 Oct 2019 14:11:21 +0100 Subject: [PATCH 06/25] Code improvements --- moto/awslambda/models.py | 23 ++++++++++------------- 1 file changed, 10 insertions(+), 13 deletions(-) diff --git a/moto/awslambda/models.py b/moto/awslambda/models.py index 2630abe1b..7d015a096 100644 --- a/moto/awslambda/models.py +++ b/moto/awslambda/models.py @@ -694,18 +694,15 @@ class LambdaBackend(BaseBackend): queue.lambda_event_source_mappings[esm.function_arn] = esm return esm - try: - stream = json.loads(dynamodbstreams_backends[self.region_name].describe_stream(spec['EventSourceArn'])) - spec.update({'FunctionArn': func.function_arn}) - esm = EventSourceMapping(spec) - self._event_source_mappings[esm.uuid] = esm - table_name = stream['StreamDescription']['TableName'] - table = dynamodb_backends2[self.region_name].get_table(table_name) - table.lambda_event_source_mappings[esm.function_arn] = esm - - return esm - except Exception: - pass # No DynamoDB stream exists + for stream in json.loads(dynamodbstreams_backends[self.region_name].list_streams())['Streams']: + if stream['StreamArn'] == spec['EventSourceArn']: + spec.update({'FunctionArn': func.function_arn}) + esm = EventSourceMapping(spec) + self._event_source_mappings[esm.uuid] = esm + table_name = stream['TableName'] + table = dynamodb_backends2[self.region_name].get_table(table_name) + table.lambda_event_source_mappings[esm.function_arn] = esm + return esm raise RESTError('ResourceNotFoundException', 'Invalid EventSourceArn') def publish_function(self, function_name): @@ -832,7 +829,7 @@ class LambdaBackend(BaseBackend): 'eventName': 'INSERT', 'eventVersion': item.to_json()['eventVersion'], 'eventSource': item.to_json()['eventSource'], - 'awsRegion': 'us-east-1', + 'awsRegion': self.region_name, 'dynamodb': item.to_json()['dynamodb'], 'eventSourceARN': source} for item in items]} func = self._lambdas.get_arn(function_arn) From c9348bc45ca6d251149404c5cd95150bbed5e0c8 Mon Sep 17 00:00:00 2001 From: Bert Blommers Date: Tue, 8 Oct 2019 15:51:46 +0100 Subject: [PATCH 07/25] #1874 - Add tests for more complex items --- tests/test_dynamodb2/test_dynamodb.py | 28 +++++++++++++++++++-------- 1 file changed, 20 insertions(+), 8 deletions(-) diff --git a/tests/test_dynamodb2/test_dynamodb.py b/tests/test_dynamodb2/test_dynamodb.py index bedd6812f..d3997a59e 100644 --- a/tests/test_dynamodb2/test_dynamodb.py +++ b/tests/test_dynamodb2/test_dynamodb.py @@ -2293,15 +2293,27 @@ def test_item_size_is_under_400KB(): table = res.Table('moto-test') large_item = 'x' * 410 * 1000 + assert_failure_due_to_item_size(func=dynamodb.put_item, + TableName='moto-test', + Item={'id': {'S': 'foo'}, 'item': {'S': large_item}}) + assert_failure_due_to_item_size(func=table.put_item, + Item={'id': 'bar', 'item': large_item}) + assert_failure_due_to_item_size(func=dynamodb.update_item, + TableName='moto-test', + Key={'id': {'S': 'foo2'}}, + UpdateExpression='set item=:Item', + ExpressionAttributeValues={':Item': {'S': large_item}}) + # Assert op fails when updating a nested item + assert_failure_due_to_item_size(func=table.put_item, + Item={'id': 'bar', 'itemlist': [{'item': large_item}]}) + assert_failure_due_to_item_size(func=dynamodb.put_item, + TableName='moto-test', + Item={'id': {'S': 'foo'}, 'itemlist': {'L': [{'M': {'item1': {'S': large_item}}}]}}) + + +def assert_failure_due_to_item_size(func, **kwargs): with assert_raises(ClientError) as ex: - dynamodb.put_item(TableName='moto-test', Item={'id': {'S': 'foo'}, 'item': {'S': large_item}}) - with assert_raises(ClientError) as ex: - table.put_item(Item={'id': 'bar', 'item': large_item}) - with assert_raises(ClientError) as ex: - dynamodb.update_item(TableName='moto-test', - Key={'id': {'S': 'foo2'}}, - UpdateExpression='set item=:Item', - ExpressionAttributeValues={':Item': {'S': large_item}}) + func(**kwargs) ex.exception.response['Error']['Code'].should.equal('ValidationException') ex.exception.response['Error']['Message'].should.equal('Item size has exceeded the maximum allowed size') From cb43796dafbf1d7b92745bc2eacaf79915a859f9 Mon Sep 17 00:00:00 2001 From: gruebel Date: Tue, 8 Oct 2019 21:29:09 +0200 Subject: [PATCH 08/25] Add ProjectionExpression & ExpressionAttributeNames o DynamoDB get_item & batch_get_item --- moto/dynamodb2/models.py | 25 ++- moto/dynamodb2/responses.py | 40 ++++- tests/test_dynamodb2/test_dynamodb.py | 212 +++++++++++++++++++++++++- 3 files changed, 266 insertions(+), 11 deletions(-) diff --git a/moto/dynamodb2/models.py b/moto/dynamodb2/models.py index c06014488..2e4782a7f 100644 --- a/moto/dynamodb2/models.py +++ b/moto/dynamodb2/models.py @@ -619,18 +619,29 @@ class Table(BaseModel): def has_range_key(self): return self.range_key_attr is not None - def get_item(self, hash_key, range_key=None): + def get_item(self, hash_key, range_key=None, projection_expression=None): if self.has_range_key and not range_key: raise ValueError( "Table has a range key, but no range key was passed into get_item") try: + result = None + if range_key: - return self.items[hash_key][range_key] + result = self.items[hash_key][range_key] + elif hash_key in self.items: + result = self.items[hash_key] - if hash_key in self.items: - return self.items[hash_key] + if projection_expression and result: + expressions = [x.strip() for x in projection_expression.split(',')] + result = copy.deepcopy(result) + for attr in list(result.attrs): + if attr not in expressions: + result.attrs.pop(attr) - raise KeyError + if not result: + raise KeyError + + return result except KeyError: return None @@ -996,12 +1007,12 @@ class DynamoDBBackend(BaseBackend): def get_table(self, table_name): return self.tables.get(table_name) - def get_item(self, table_name, keys): + def get_item(self, table_name, keys, projection_expression=None): table = self.get_table(table_name) if not table: raise ValueError("No table found") hash_key, range_key = self.get_keys_value(table, keys) - return table.get_item(hash_key, range_key) + return table.get_item(hash_key, range_key, projection_expression) def query(self, table_name, hash_key_dict, range_comparison, range_value_dicts, limit, exclusive_start_key, scan_index_forward, projection_expression, index_name=None, diff --git a/moto/dynamodb2/responses.py b/moto/dynamodb2/responses.py index d07beefd6..da94c4dc1 100644 --- a/moto/dynamodb2/responses.py +++ b/moto/dynamodb2/responses.py @@ -305,8 +305,26 @@ class DynamoHandler(BaseResponse): def get_item(self): name = self.body['TableName'] key = self.body['Key'] + projection_expression = self.body.get('ProjectionExpression') + expression_attribute_names = self.body.get('ExpressionAttributeNames', {}) + + if projection_expression and expression_attribute_names: + expressions = [x.strip() for x in projection_expression.split(',')] + projection_expression = None + for expression in expressions: + if projection_expression is not None: + projection_expression = projection_expression + ", " + else: + projection_expression = "" + + if expression in expression_attribute_names: + projection_expression = projection_expression + \ + expression_attribute_names[expression] + else: + projection_expression = projection_expression + expression + try: - item = self.dynamodb_backend.get_item(name, key) + item = self.dynamodb_backend.get_item(name, key, projection_expression) except ValueError: er = 'com.amazon.coral.validate#ValidationException' return self.error(er, 'Validation Exception') @@ -338,9 +356,27 @@ class DynamoHandler(BaseResponse): er = 'com.amazon.coral.validate#ValidationException' return self.error(er, 'Provided list of item keys contains duplicates') attributes_to_get = table_request.get('AttributesToGet') + projection_expression = table_request.get('ProjectionExpression') + expression_attribute_names = table_request.get('ExpressionAttributeNames', {}) + + if projection_expression and expression_attribute_names: + expressions = [x.strip() for x in projection_expression.split(',')] + projection_expression = None + for expression in expressions: + if projection_expression is not None: + projection_expression = projection_expression + ", " + else: + projection_expression = "" + + if expression in expression_attribute_names: + projection_expression = projection_expression + \ + expression_attribute_names[expression] + else: + projection_expression = projection_expression + expression + results["Responses"][table_name] = [] for key in keys: - item = self.dynamodb_backend.get_item(table_name, key) + item = self.dynamodb_backend.get_item(table_name, key, projection_expression) if item: item_describe = item.describe_attrs(attributes_to_get) results["Responses"][table_name].append( diff --git a/tests/test_dynamodb2/test_dynamodb.py b/tests/test_dynamodb2/test_dynamodb.py index b0952f101..fd6681ab6 100644 --- a/tests/test_dynamodb2/test_dynamodb.py +++ b/tests/test_dynamodb2/test_dynamodb.py @@ -369,7 +369,80 @@ def test_query_returns_consumed_capacity(): @mock_dynamodb2 -def test_basic_projection_expressions(): +def test_basic_projection_expression_using_get_item(): + dynamodb = boto3.resource('dynamodb', region_name='us-east-1') + + # Create the DynamoDB table. + table = dynamodb.create_table( + TableName='users', + KeySchema=[ + { + 'AttributeName': 'forum_name', + 'KeyType': 'HASH' + }, + { + 'AttributeName': 'subject', + 'KeyType': 'RANGE' + }, + ], + AttributeDefinitions=[ + { + 'AttributeName': 'forum_name', + 'AttributeType': 'S' + }, + { + 'AttributeName': 'subject', + 'AttributeType': 'S' + }, + ], + ProvisionedThroughput={ + 'ReadCapacityUnits': 5, + 'WriteCapacityUnits': 5 + } + ) + table = dynamodb.Table('users') + + table.put_item(Item={ + 'forum_name': 'the-key', + 'subject': '123', + 'body': 'some test message' + }) + + table.put_item(Item={ + 'forum_name': 'not-the-key', + 'subject': '123', + 'body': 'some other test message' + }) + result = table.get_item( + Key = { + 'forum_name': 'the-key', + 'subject': '123' + }, + ProjectionExpression='body, subject' + ) + + assert result['Item'] == { + 'subject': '123', + 'body': 'some test message' + } + + # The projection expression should not remove data from storage + result = table.get_item( + Key = { + 'forum_name': 'the-key', + 'subject': '123' + } + ) + + assert result['Item'] == { + 'forum_name': 'the-key', + 'subject': '123', + 'body': 'some test message' + } + + +@mock_dynamodb2 +def test_basic_projection_expressions_using_query(): dynamodb = boto3.resource('dynamodb', region_name='us-east-1') # Create the DynamoDB table. @@ -452,6 +525,7 @@ def test_basic_projection_expressions(): assert 'body' in results['Items'][1] assert 'forum_name' in results['Items'][1] + @mock_dynamodb2 def test_basic_projection_expressions_using_scan(): dynamodb = boto3.resource('dynamodb', region_name='us-east-1') @@ -538,7 +612,73 @@ def test_basic_projection_expressions_using_scan(): @mock_dynamodb2 -def test_basic_projection_expressions_with_attr_expression_names(): +def test_basic_projection_expression_using_get_item_with_attr_expression_names(): + dynamodb = boto3.resource('dynamodb', region_name='us-east-1') + + # Create the DynamoDB table. + table = dynamodb.create_table( + TableName='users', + KeySchema=[ + { + 'AttributeName': 'forum_name', + 'KeyType': 'HASH' + }, + { + 'AttributeName': 'subject', + 'KeyType': 'RANGE' + }, + ], + AttributeDefinitions=[ + { + 'AttributeName': 'forum_name', + 'AttributeType': 'S' + }, + { + 'AttributeName': 'subject', + 'AttributeType': 'S' + }, + ], + ProvisionedThroughput={ + 'ReadCapacityUnits': 5, + 'WriteCapacityUnits': 5 + } + ) + table = dynamodb.Table('users') + + table.put_item(Item={ + 'forum_name': 'the-key', + 'subject': '123', + 'body': 'some test message', + 'attachment': 'something' + }) + + table.put_item(Item={ + 'forum_name': 'not-the-key', + 'subject': '123', + 'body': 'some other test message', + 'attachment': 'something' + }) + result = table.get_item( + Key={ + 'forum_name': 'the-key', + 'subject': '123' + }, + ProjectionExpression='#rl, #rt, subject', + ExpressionAttributeNames={ + '#rl': 'body', + '#rt': 'attachment' + }, + ) + + assert result['Item'] == { + 'subject': '123', + 'body': 'some test message', + 'attachment': 'something' + } + + +@mock_dynamodb2 +def test_basic_projection_expressions_using_query_with_attr_expression_names(): dynamodb = boto3.resource('dynamodb', region_name='us-east-1') # Create the DynamoDB table. @@ -603,6 +743,7 @@ def test_basic_projection_expressions_with_attr_expression_names(): assert 'attachment' in results['Items'][0] assert results['Items'][0]['attachment'] == 'something' + @mock_dynamodb2 def test_basic_projection_expressions_using_scan_with_attr_expression_names(): dynamodb = boto3.resource('dynamodb', region_name='us-east-1') @@ -2233,6 +2374,73 @@ def test_batch_items_returns_all(): assert [item['username']['S'] for item in returned_items] == ['user1', 'user2', 'user3'] +@mock_dynamodb2 +def test_batch_items_with_basic_projection_expression(): + dynamodb = _create_user_table() + returned_items = dynamodb.batch_get_item(RequestItems={ + 'users': { + 'Keys': [{ + 'username': {'S': 'user0'} + }, { + 'username': {'S': 'user1'} + }, { + 'username': {'S': 'user2'} + }, { + 'username': {'S': 'user3'} + }], + 'ConsistentRead': True, + 'ProjectionExpression': 'username' + } + })['Responses']['users'] + assert len(returned_items) == 3 + assert [item['username']['S'] for item in returned_items] == ['user1', 'user2', 'user3'] + assert [item.get('foo') for item in returned_items] == [None, None, None] + + # The projection expression should not remove data from storage + returned_items = dynamodb.batch_get_item(RequestItems = { + 'users': { + 'Keys': [{ + 'username': {'S': 'user0'} + }, { + 'username': {'S': 'user1'} + }, { + 'username': {'S': 'user2'} + }, { + 'username': {'S': 'user3'} + }], + 'ConsistentRead': True + } + })['Responses']['users'] + assert [item['username']['S'] for item in returned_items] == ['user1', 'user2', 'user3'] + assert [item['foo']['S'] for item in returned_items] == ['bar', 'bar', 'bar'] + + +@mock_dynamodb2 +def test_batch_items_with_basic_projection_expression_and_attr_expression_names(): + dynamodb = _create_user_table() + returned_items = dynamodb.batch_get_item(RequestItems={ + 'users': { + 'Keys': [{ + 'username': {'S': 'user0'} + }, { + 'username': {'S': 'user1'} + }, { + 'username': {'S': 'user2'} + }, { + 'username': {'S': 'user3'} + }], + 'ConsistentRead': True, + 'ProjectionExpression': '#rl', + 'ExpressionAttributeNames': { + '#rl': 'username' + }, + } + })['Responses']['users'] + assert len(returned_items) == 3 + assert [item['username']['S'] for item in returned_items] == ['user1', 'user2', 'user3'] + assert [item.get('foo') for item in returned_items] == [None, None, None] + + @mock_dynamodb2 def test_batch_items_should_throw_exception_for_duplicate_request(): client = _create_user_table() From 41af98c98b6c372c1c158f42611dccdf4dcf6c7e Mon Sep 17 00:00:00 2001 From: Seth Black Date: Tue, 8 Oct 2019 15:59:03 -0500 Subject: [PATCH 09/25] added UpdateFunctionCode and UpdateFunctionConfiguration and associated test cases --- moto/awslambda/models.py | 65 +++++++++++++++++ moto/awslambda/responses.py | 36 ++++++++++ moto/awslambda/urls.py | 4 +- tests/test_awslambda/test_lambda.py | 106 ++++++++++++++++++++++++++++ 4 files changed, 210 insertions(+), 1 deletion(-) diff --git a/moto/awslambda/models.py b/moto/awslambda/models.py index acc7a5257..f2400ec38 100644 --- a/moto/awslambda/models.py +++ b/moto/awslambda/models.py @@ -273,6 +273,71 @@ class LambdaFunction(BaseModel): "Configuration": self.get_configuration(), } + def update_configuration(self, config_updates): + for key, value in config_updates.items(): + if key == "Description": + self.description = value + elif key == "Handler": + self.handler = value + elif key == "MemorySize": + self.memory_size = value + elif key == "Role": + self.role = value + elif key == "Runtime": + self.run_time = value + elif key == "Timeout": + self.timeout = value + elif key == "VpcConfig": + self.vpc_config = value + + return self.get_configuration() + + def update_function_code(self, spec): + if 'DryRun' in spec and spec['DryRun']: + return self.get_configuration() + + if 'Publish' in spec and spec['Publish']: + self.set_version(self.version + 1) + + if 'ZipFile' in spec: + # using the "hackery" from __init__" because it seems to work + # TODOs and FIXMEs included, because they'll need to be fixed + # in both places now + try: + to_unzip_code = base64.b64decode( + bytes(spec['ZipFile'], 'utf-8')) + except Exception: + to_unzip_code = base64.b64decode(spec['ZipFile']) + + self.code_bytes = to_unzip_code + self.code_size = len(to_unzip_code) + self.code_sha_256 = hashlib.sha256(to_unzip_code).hexdigest() + + # TODO: we should be putting this in a lambda bucket + self.code['UUID'] = str(uuid.uuid4()) + self.code['S3Key'] = '{}-{}'.format(self.function_name, self.code['UUID']) + else: + key = None + try: + # FIXME: does not validate bucket region + key = s3_backend.get_key(spec['S3Bucket'], spec['S3Key']) + except MissingBucket: + if do_validate_s3(): + raise ValueError( + "InvalidParameterValueException", + "Error occurred while GetObject. S3 Error Code: NoSuchBucket. S3 Error Message: The specified bucket does not exist") + except MissingKey: + if do_validate_s3(): + raise ValueError( + "InvalidParameterValueException", + "Error occurred while GetObject. S3 Error Code: NoSuchKey. S3 Error Message: The specified key does not exist.") + if key: + self.code_bytes = key.value + self.code_size = key.size + self.code_sha_256 = hashlib.sha256(key.value).hexdigest() + + return self.get_configuration() + @staticmethod def convert(s): try: diff --git a/moto/awslambda/responses.py b/moto/awslambda/responses.py index 1e7feb0d0..e22fee152 100644 --- a/moto/awslambda/responses.py +++ b/moto/awslambda/responses.py @@ -122,6 +122,18 @@ class LambdaResponse(BaseResponse): if request.method == 'POST': return self._add_policy(request, full_url, headers) + def configuration(self, request, full_url, headers): + if request.method == 'PUT': + return self._put_configuration(request, full_url) + else: + raise ValueError("Cannot handle request") + + def code(self, request, full_url, headers): + if request.method == 'PUT': + return self._put_code(request, full_url, headers) + else: + raise ValueError("Cannot handle request") + def _add_policy(self, request, full_url, headers): path = request.path if hasattr(request, 'path') else path_url(request.url) function_name = path.split('/')[-2] @@ -308,3 +320,27 @@ class LambdaResponse(BaseResponse): return 204, {}, "{}" else: return 404, {}, "{}" + + def _put_configuration(self, request, full_url): + function_name = self._get_param('FunctionName', None) + qualifier = self._get_param('Qualifier', None) + + fn = self.lambda_backend.get_function(function_name, qualifier) + + if fn: + config = fn.update_configuration(json.loads(request.body)) + return 200, {}, json.dumps(config) + else: + return 404, {}, "{}" + + def _put_code(self, request, full_url, headers): + function_name = self._get_param('FunctionName', None) + qualifier = self._get_param('Qualifier', None) + + fn = self.lambda_backend.get_function(function_name, qualifier) + + if fn: + config = fn.update_function_code(json.loads(request.body)) + return 200, {}, json.dumps(config) + else: + return 404, {}, "{}" diff --git a/moto/awslambda/urls.py b/moto/awslambda/urls.py index fb2c6ee7e..a306feff5 100644 --- a/moto/awslambda/urls.py +++ b/moto/awslambda/urls.py @@ -16,5 +16,7 @@ url_paths = { r'{0}/(?P[^/]+)/functions/(?P[\w_-]+)/invocations/?$': response.invoke, r'{0}/(?P[^/]+)/functions/(?P[\w_-]+)/invoke-async/?$': response.invoke_async, r'{0}/(?P[^/]+)/tags/(?P.+)': response.tag, - r'{0}/(?P[^/]+)/functions/(?P[\w_-]+)/policy/?$': response.policy + r'{0}/(?P[^/]+)/functions/(?P[\w_-]+)/policy/?$': response.policy, + r'{0}/(?P[^/]+)/functions/(?P[\w_-]+)/configuration/?$': response.configuration, + r'{0}/(?P[^/]+)/functions/(?P[\w_-]+)/code/?$': response.code } diff --git a/tests/test_awslambda/test_lambda.py b/tests/test_awslambda/test_lambda.py index 9467b0803..0a6df7cca 100644 --- a/tests/test_awslambda/test_lambda.py +++ b/tests/test_awslambda/test_lambda.py @@ -1245,3 +1245,109 @@ def test_delete_event_source_mapping(): assert response['State'] == 'Deleting' conn.get_event_source_mapping.when.called_with(UUID=response['UUID'])\ .should.throw(botocore.client.ClientError) + + +@mock_lambda +@mock_s3 +def test_update_configuration(): + s3_conn = boto3.client('s3', 'us-west-2') + s3_conn.create_bucket(Bucket='test-bucket') + + zip_content = get_test_zip_file2() + s3_conn.put_object(Bucket='test-bucket', Key='test.zip', Body=zip_content) + conn = boto3.client('lambda', 'us-west-2') + + fxn = conn.create_function( + FunctionName='testFunction', + Runtime='python2.7', + Role='test-iam-role', + Handler='lambda_function.lambda_handler', + Code={ + 'S3Bucket': 'test-bucket', + 'S3Key': 'test.zip', + }, + Description='test lambda function', + Timeout=3, + MemorySize=128, + Publish=True, + ) + + assert fxn['Description'] == 'test lambda function' + assert fxn['Handler'] == 'lambda_function.lambda_handler' + assert fxn['MemorySize'] == 128 + assert fxn['Runtime'] == 'python2.7' + assert fxn['Timeout'] == 3 + + updated_config = conn.update_function_configuration( + FunctionName='testFunction', + Description='updated test lambda function', + Handler='lambda_function.new_lambda_handler', + Runtime='python3.6', + Timeout=7 + ) + + assert updated_config['ResponseMetadata']['HTTPStatusCode'] == 200 + assert updated_config['Description'] == 'updated test lambda function' + assert updated_config['Handler'] == 'lambda_function.new_lambda_handler' + assert updated_config['MemorySize'] == 128 + assert updated_config['Runtime'] == 'python3.6' + assert updated_config['Timeout'] == 7 + + +@mock_lambda +@freeze_time('2015-01-01 00:00:00') +def test_update_function(): + conn = boto3.client('lambda', 'us-west-2') + + zip_content_one = get_test_zip_file1() + + fxn = conn.create_function( + FunctionName='testFunction', + Runtime='python2.7', + Role='test-iam-role', + Handler='lambda_function.lambda_handler', + Code={ + 'ZipFile': zip_content_one, + }, + Description='test lambda function', + Timeout=3, + MemorySize=128, + Publish=True, + ) + + zip_content_two = get_test_zip_file2() + + conn.update_function_code( + FunctionName='testFunction', + ZipFile=zip_content_two, + Publish=True + ) + + response = conn.get_function( + FunctionName='testFunction' + ) + response['Configuration'].pop('LastModified') + + response['ResponseMetadata']['HTTPStatusCode'].should.equal(200) + assert len(response['Code']) == 2 + assert response['Code']['RepositoryType'] == 'S3' + assert response['Code']['Location'].startswith('s3://awslambda-{0}-tasks.s3-{0}.amazonaws.com'.format(_lambda_region)) + response['Configuration'].should.equal( + { + "CodeSha256": hashlib.sha256(zip_content_two).hexdigest(), + "CodeSize": len(zip_content_two), + "Description": "test lambda function", + "FunctionArn": 'arn:aws:lambda:{}:123456789012:function:testFunction:2'.format(_lambda_region), + "FunctionName": "testFunction", + "Handler": "lambda_function.lambda_handler", + "MemorySize": 128, + "Role": "test-iam-role", + "Runtime": "python2.7", + "Timeout": 3, + "Version": '$LATEST', + "VpcConfig": { + "SecurityGroupIds": [], + "SubnetIds": [], + } + }, + ) From e759b565da499bf04e90e9914fc372c2da3449f8 Mon Sep 17 00:00:00 2001 From: Bert Blommers Date: Wed, 9 Oct 2019 10:02:40 +0100 Subject: [PATCH 10/25] #1874 - Change tests to use resource/client when appropriate --- tests/test_dynamodb2/test_dynamodb.py | 15 +++++++-------- 1 file changed, 7 insertions(+), 8 deletions(-) diff --git a/tests/test_dynamodb2/test_dynamodb.py b/tests/test_dynamodb2/test_dynamodb.py index 46d5f8f37..097d101ef 100644 --- a/tests/test_dynamodb2/test_dynamodb.py +++ b/tests/test_dynamodb2/test_dynamodb.py @@ -2298,8 +2298,8 @@ def test_index_with_unknown_attributes_should_fail(): # https://github.com/spulec/moto/issues/1874 @mock_dynamodb2 def test_item_size_is_under_400KB(): - dynamodb = boto3.client('dynamodb', region_name='us-east-1') - res = boto3.resource('dynamodb') + dynamodb = boto3.resource('dynamodb') + client = boto3.client('dynamodb') dynamodb.create_table( TableName='moto-test', @@ -2307,15 +2307,14 @@ def test_item_size_is_under_400KB(): AttributeDefinitions=[{'AttributeName': 'id', 'AttributeType': 'S'}], ProvisionedThroughput={'ReadCapacityUnits': 1, 'WriteCapacityUnits': 1} ) - table = res.Table('moto-test') + table = dynamodb.Table('moto-test') large_item = 'x' * 410 * 1000 - assert_failure_due_to_item_size(func=dynamodb.put_item, + assert_failure_due_to_item_size(func=client.put_item, TableName='moto-test', Item={'id': {'S': 'foo'}, 'item': {'S': large_item}}) - assert_failure_due_to_item_size(func=table.put_item, - Item={'id': 'bar', 'item': large_item}) - assert_failure_due_to_item_size(func=dynamodb.update_item, + assert_failure_due_to_item_size(func=table.put_item, Item = {'id': 'bar', 'item': large_item}) + assert_failure_due_to_item_size(func=client.update_item, TableName='moto-test', Key={'id': {'S': 'foo2'}}, UpdateExpression='set item=:Item', @@ -2323,7 +2322,7 @@ def test_item_size_is_under_400KB(): # Assert op fails when updating a nested item assert_failure_due_to_item_size(func=table.put_item, Item={'id': 'bar', 'itemlist': [{'item': large_item}]}) - assert_failure_due_to_item_size(func=dynamodb.put_item, + assert_failure_due_to_item_size(func=client.put_item, TableName='moto-test', Item={'id': {'S': 'foo'}, 'itemlist': {'L': [{'M': {'item1': {'S': large_item}}}]}}) From 20dc8ae5c4b44439d302809bd8c4a74179bb35d1 Mon Sep 17 00:00:00 2001 From: Seth Black Date: Wed, 9 Oct 2019 15:15:10 -0500 Subject: [PATCH 11/25] getting tests working in server mode --- moto/awslambda/models.py | 20 +++++++++++--------- moto/awslambda/responses.py | 18 ++++++++++-------- tests/test_awslambda/test_lambda.py | 3 +-- 3 files changed, 22 insertions(+), 19 deletions(-) diff --git a/moto/awslambda/models.py b/moto/awslambda/models.py index f2400ec38..a1643cd03 100644 --- a/moto/awslambda/models.py +++ b/moto/awslambda/models.py @@ -292,22 +292,24 @@ class LambdaFunction(BaseModel): return self.get_configuration() - def update_function_code(self, spec): - if 'DryRun' in spec and spec['DryRun']: + def update_function_code(self, updated_spec): + if 'DryRun' in updated_spec and updated_spec['DryRun']: return self.get_configuration() - if 'Publish' in spec and spec['Publish']: + if 'Publish' in updated_spec and updated_spec['Publish']: self.set_version(self.version + 1) - if 'ZipFile' in spec: - # using the "hackery" from __init__" because it seems to work + if 'ZipFile' in updated_spec: + self.code['ZipFile'] = updated_spec['ZipFile'] + + # using the "hackery" from __init__ because it seems to work # TODOs and FIXMEs included, because they'll need to be fixed # in both places now try: to_unzip_code = base64.b64decode( - bytes(spec['ZipFile'], 'utf-8')) + bytes(updated_spec['ZipFile'], 'utf-8')) except Exception: - to_unzip_code = base64.b64decode(spec['ZipFile']) + to_unzip_code = base64.b64decode(updated_spec['ZipFile']) self.code_bytes = to_unzip_code self.code_size = len(to_unzip_code) @@ -316,11 +318,11 @@ class LambdaFunction(BaseModel): # TODO: we should be putting this in a lambda bucket self.code['UUID'] = str(uuid.uuid4()) self.code['S3Key'] = '{}-{}'.format(self.function_name, self.code['UUID']) - else: + elif 'S3Bucket' in updated_spec and 'S3Key' in updated_spec: key = None try: # FIXME: does not validate bucket region - key = s3_backend.get_key(spec['S3Bucket'], spec['S3Key']) + key = s3_backend.get_key(updated_spec['S3Bucket'], updated_spec['S3Key']) except MissingBucket: if do_validate_s3(): raise ValueError( diff --git a/moto/awslambda/responses.py b/moto/awslambda/responses.py index e22fee152..2041aa660 100644 --- a/moto/awslambda/responses.py +++ b/moto/awslambda/responses.py @@ -123,14 +123,16 @@ class LambdaResponse(BaseResponse): return self._add_policy(request, full_url, headers) def configuration(self, request, full_url, headers): + self.setup_class(request, full_url, headers) if request.method == 'PUT': - return self._put_configuration(request, full_url) + return self._put_configuration(request) else: raise ValueError("Cannot handle request") def code(self, request, full_url, headers): + self.setup_class(request, full_url, headers) if request.method == 'PUT': - return self._put_code(request, full_url, headers) + return self._put_code() else: raise ValueError("Cannot handle request") @@ -321,26 +323,26 @@ class LambdaResponse(BaseResponse): else: return 404, {}, "{}" - def _put_configuration(self, request, full_url): - function_name = self._get_param('FunctionName', None) + def _put_configuration(self, request): + function_name = self.path.rsplit('/', 2)[-2] qualifier = self._get_param('Qualifier', None) fn = self.lambda_backend.get_function(function_name, qualifier) if fn: - config = fn.update_configuration(json.loads(request.body)) + config = fn.update_configuration(self.json_body) return 200, {}, json.dumps(config) else: return 404, {}, "{}" - def _put_code(self, request, full_url, headers): - function_name = self._get_param('FunctionName', None) + def _put_code(self): + function_name = self.path.rsplit('/', 2)[-2] qualifier = self._get_param('Qualifier', None) fn = self.lambda_backend.get_function(function_name, qualifier) if fn: - config = fn.update_function_code(json.loads(request.body)) + config = fn.update_function_code(self.json_body) return 200, {}, json.dumps(config) else: return 404, {}, "{}" diff --git a/tests/test_awslambda/test_lambda.py b/tests/test_awslambda/test_lambda.py index 0a6df7cca..20a806de2 100644 --- a/tests/test_awslambda/test_lambda.py +++ b/tests/test_awslambda/test_lambda.py @@ -1295,7 +1295,6 @@ def test_update_configuration(): @mock_lambda -@freeze_time('2015-01-01 00:00:00') def test_update_function(): conn = boto3.client('lambda', 'us-west-2') @@ -1317,7 +1316,7 @@ def test_update_function(): zip_content_two = get_test_zip_file2() - conn.update_function_code( + fxn_updated = conn.update_function_code( FunctionName='testFunction', ZipFile=zip_content_two, Publish=True From dff24cb032fc11d80b50034423613b5739b9b492 Mon Sep 17 00:00:00 2001 From: Seth Black Date: Wed, 9 Oct 2019 16:20:49 -0500 Subject: [PATCH 12/25] bringing up test percentage --- moto/awslambda/models.py | 3 -- moto/awslambda/responses.py | 3 ++ tests/test_awslambda/test_lambda.py | 81 ++++++++++++++++++++++++++--- 3 files changed, 77 insertions(+), 10 deletions(-) diff --git a/moto/awslambda/models.py b/moto/awslambda/models.py index a1643cd03..bd85ded9a 100644 --- a/moto/awslambda/models.py +++ b/moto/awslambda/models.py @@ -296,9 +296,6 @@ class LambdaFunction(BaseModel): if 'DryRun' in updated_spec and updated_spec['DryRun']: return self.get_configuration() - if 'Publish' in updated_spec and updated_spec['Publish']: - self.set_version(self.version + 1) - if 'ZipFile' in updated_spec: self.code['ZipFile'] = updated_spec['ZipFile'] diff --git a/moto/awslambda/responses.py b/moto/awslambda/responses.py index 2041aa660..83a1cefca 100644 --- a/moto/awslambda/responses.py +++ b/moto/awslambda/responses.py @@ -342,6 +342,9 @@ class LambdaResponse(BaseResponse): fn = self.lambda_backend.get_function(function_name, qualifier) if fn: + if self.json_body.get('Publish', False): + fn = self.lambda_backend.publish_function(function_name) + config = fn.update_function_code(self.json_body) return 200, {}, json.dumps(config) else: diff --git a/tests/test_awslambda/test_lambda.py b/tests/test_awslambda/test_lambda.py index 20a806de2..d6ee3f7f9 100644 --- a/tests/test_awslambda/test_lambda.py +++ b/tests/test_awslambda/test_lambda.py @@ -1295,13 +1295,13 @@ def test_update_configuration(): @mock_lambda -def test_update_function(): +def test_update_function_zip(): conn = boto3.client('lambda', 'us-west-2') zip_content_one = get_test_zip_file1() fxn = conn.create_function( - FunctionName='testFunction', + FunctionName='testFunctionZip', Runtime='python2.7', Role='test-iam-role', Handler='lambda_function.lambda_handler', @@ -1317,13 +1317,14 @@ def test_update_function(): zip_content_two = get_test_zip_file2() fxn_updated = conn.update_function_code( - FunctionName='testFunction', + FunctionName='testFunctionZip', ZipFile=zip_content_two, Publish=True ) response = conn.get_function( - FunctionName='testFunction' + FunctionName='testFunctionZip', + Qualifier='2' ) response['Configuration'].pop('LastModified') @@ -1336,14 +1337,80 @@ def test_update_function(): "CodeSha256": hashlib.sha256(zip_content_two).hexdigest(), "CodeSize": len(zip_content_two), "Description": "test lambda function", - "FunctionArn": 'arn:aws:lambda:{}:123456789012:function:testFunction:2'.format(_lambda_region), - "FunctionName": "testFunction", + "FunctionArn": 'arn:aws:lambda:{}:123456789012:function:testFunctionZip:2'.format(_lambda_region), + "FunctionName": "testFunctionZip", "Handler": "lambda_function.lambda_handler", "MemorySize": 128, "Role": "test-iam-role", "Runtime": "python2.7", "Timeout": 3, - "Version": '$LATEST', + "Version": '2', + "VpcConfig": { + "SecurityGroupIds": [], + "SubnetIds": [], + } + }, + ) + +@mock_lambda +@mock_s3 +def test_update_function_s3(): + s3_conn = boto3.client('s3', 'us-west-2') + s3_conn.create_bucket(Bucket='test-bucket') + + zip_content = get_test_zip_file1() + s3_conn.put_object(Bucket='test-bucket', Key='test.zip', Body=zip_content) + + conn = boto3.client('lambda', 'us-west-2') + + fxn = conn.create_function( + FunctionName='testFunctionS3', + Runtime='python2.7', + Role='test-iam-role', + Handler='lambda_function.lambda_handler', + Code={ + 'S3Bucket': 'test-bucket', + 'S3Key': 'test.zip', + }, + Description='test lambda function', + Timeout=3, + MemorySize=128, + Publish=True, + ) + + zip_content_two = get_test_zip_file2() + s3_conn.put_object(Bucket='test-bucket', Key='test2.zip', Body=zip_content_two) + + fxn_updated = conn.update_function_code( + FunctionName='testFunctionS3', + S3Bucket='test-bucket', + S3Key='test2.zip', + Publish=True + ) + + response = conn.get_function( + FunctionName='testFunctionS3', + Qualifier='2' + ) + response['Configuration'].pop('LastModified') + + response['ResponseMetadata']['HTTPStatusCode'].should.equal(200) + assert len(response['Code']) == 2 + assert response['Code']['RepositoryType'] == 'S3' + assert response['Code']['Location'].startswith('s3://awslambda-{0}-tasks.s3-{0}.amazonaws.com'.format(_lambda_region)) + response['Configuration'].should.equal( + { + "CodeSha256": hashlib.sha256(zip_content_two).hexdigest(), + "CodeSize": len(zip_content_two), + "Description": "test lambda function", + "FunctionArn": 'arn:aws:lambda:{}:123456789012:function:testFunctionS3:2'.format(_lambda_region), + "FunctionName": "testFunctionS3", + "Handler": "lambda_function.lambda_handler", + "MemorySize": 128, + "Role": "test-iam-role", + "Runtime": "python2.7", + "Timeout": 3, + "Version": '2', "VpcConfig": { "SecurityGroupIds": [], "SubnetIds": [], From ed6d780dab3df6ed003183fddb6bd218b556a3b8 Mon Sep 17 00:00:00 2001 From: Bert Blommers Date: Thu, 10 Oct 2019 09:14:22 +0100 Subject: [PATCH 13/25] Remove dead code --- moto/awslambda/models.py | 8 --- moto/dynamodb2/comparisons.py | 15 ------ moto/dynamodb2/models.py | 6 +-- moto/rds/models.py | 91 ----------------------------------- 4 files changed, 3 insertions(+), 117 deletions(-) diff --git a/moto/awslambda/models.py b/moto/awslambda/models.py index acc7a5257..ca0456700 100644 --- a/moto/awslambda/models.py +++ b/moto/awslambda/models.py @@ -280,14 +280,6 @@ class LambdaFunction(BaseModel): except Exception: return s - @staticmethod - def is_json(test_str): - try: - response = json.loads(test_str) - except Exception: - response = test_str - return response - def _invoke_lambda(self, code, event=None, context=None): # TODO: context not yet implemented if event is None: diff --git a/moto/dynamodb2/comparisons.py b/moto/dynamodb2/comparisons.py index dbc0bd57d..b08a5c195 100644 --- a/moto/dynamodb2/comparisons.py +++ b/moto/dynamodb2/comparisons.py @@ -799,21 +799,6 @@ class ConditionExpressionParser: else: # pragma: no cover raise ValueError("Unknown expression node kind %r" % node.kind) - def _print_debug(self, nodes): # pragma: no cover - print('ROOT') - for node in nodes: - self._print_node_recursive(node, depth=1) - - def _print_node_recursive(self, node, depth=0): # pragma: no cover - if len(node.children) > 0: - print(' ' * depth, node.nonterminal, node.kind) - for child in node.children: - self._print_node_recursive(child, depth=depth + 1) - else: - print(' ' * depth, node.nonterminal, node.kind, node.value) - - - def _assert(self, condition, message, nodes): if not condition: raise ValueError(message + " " + " ".join([t.text for t in nodes])) diff --git a/moto/dynamodb2/models.py b/moto/dynamodb2/models.py index b2ff7ffc3..28e2a3509 100644 --- a/moto/dynamodb2/models.py +++ b/moto/dynamodb2/models.py @@ -824,7 +824,7 @@ class Table(BaseModel): exclusive_start_key, index_name) return results, scanned_count, last_evaluated_key - def _trim_results(self, results, limit, exclusive_start_key, scaned_index=None): + def _trim_results(self, results, limit, exclusive_start_key, scanned_index=None): if exclusive_start_key is not None: hash_key = DynamoType(exclusive_start_key.get(self.hash_key_attr)) range_key = exclusive_start_key.get(self.range_key_attr) @@ -844,10 +844,10 @@ class Table(BaseModel): if results[-1].range_key is not None: last_evaluated_key[self.range_key_attr] = results[-1].range_key - if scaned_index: + if scanned_index: all_indexes = self.all_indexes() indexes_by_name = dict((i['IndexName'], i) for i in all_indexes) - idx = indexes_by_name[scaned_index] + idx = indexes_by_name[scanned_index] idx_col_list = [i['AttributeName'] for i in idx['KeySchema']] for col in idx_col_list: last_evaluated_key[col] = results[-1].attrs[col] diff --git a/moto/rds/models.py b/moto/rds/models.py index feecefe0c..592516b34 100644 --- a/moto/rds/models.py +++ b/moto/rds/models.py @@ -1,7 +1,5 @@ from __future__ import unicode_literals -import datetime - import boto.rds from jinja2 import Template @@ -14,95 +12,6 @@ from moto.rds2.models import rds2_backends class Database(BaseModel): - def __init__(self, **kwargs): - self.status = "available" - - self.is_replica = False - self.replicas = [] - - self.region = kwargs.get('region') - self.engine = kwargs.get("engine") - self.engine_version = kwargs.get("engine_version") - if self.engine_version is None: - self.engine_version = "5.6.21" - self.iops = kwargs.get("iops") - self.storage_encrypted = kwargs.get("storage_encrypted", False) - if self.storage_encrypted: - self.kms_key_id = kwargs.get("kms_key_id", "default_kms_key_id") - else: - self.kms_key_id = kwargs.get("kms_key_id") - self.storage_type = kwargs.get("storage_type") - self.master_username = kwargs.get('master_username') - self.master_password = kwargs.get('master_password') - self.auto_minor_version_upgrade = kwargs.get( - 'auto_minor_version_upgrade') - if self.auto_minor_version_upgrade is None: - self.auto_minor_version_upgrade = True - self.allocated_storage = kwargs.get('allocated_storage') - self.db_instance_identifier = kwargs.get('db_instance_identifier') - self.source_db_identifier = kwargs.get("source_db_identifier") - self.db_instance_class = kwargs.get('db_instance_class') - self.port = kwargs.get('port') - self.db_name = kwargs.get("db_name") - self.publicly_accessible = kwargs.get("publicly_accessible") - if self.publicly_accessible is None: - self.publicly_accessible = True - - self.copy_tags_to_snapshot = kwargs.get("copy_tags_to_snapshot") - if self.copy_tags_to_snapshot is None: - self.copy_tags_to_snapshot = False - - self.backup_retention_period = kwargs.get("backup_retention_period") - if self.backup_retention_period is None: - self.backup_retention_period = 1 - - self.availability_zone = kwargs.get("availability_zone") - self.multi_az = kwargs.get("multi_az") - self.db_subnet_group_name = kwargs.get("db_subnet_group_name") - self.instance_create_time = str(datetime.datetime.utcnow()) - if self.db_subnet_group_name: - self.db_subnet_group = rds_backends[ - self.region].describe_subnet_groups(self.db_subnet_group_name)[0] - else: - self.db_subnet_group = [] - - self.security_groups = kwargs.get('security_groups', []) - - # PreferredBackupWindow - # PreferredMaintenanceWindow - # backup_retention_period = self._get_param("BackupRetentionPeriod") - # OptionGroupName - # DBParameterGroupName - # VpcSecurityGroupIds.member.N - - @property - def db_instance_arn(self): - return "arn:aws:rds:{0}:1234567890:db:{1}".format( - self.region, self.db_instance_identifier) - - @property - def physical_resource_id(self): - return self.db_instance_identifier - - @property - def address(self): - return "{0}.aaaaaaaaaa.{1}.rds.amazonaws.com".format(self.db_instance_identifier, self.region) - - def add_replica(self, replica): - self.replicas.append(replica.db_instance_identifier) - - def remove_replica(self, replica): - self.replicas.remove(replica.db_instance_identifier) - - def set_as_replica(self): - self.is_replica = True - self.replicas = [] - - def update(self, db_kwargs): - for key, value in db_kwargs.items(): - if value is not None: - setattr(self, key, value) - def get_cfn_attribute(self, attribute_name): if attribute_name == 'Endpoint.Address': return self.address From cf899eecff6a8b1471ef454bba9bcbdd8ed68fa0 Mon Sep 17 00:00:00 2001 From: Bert Blommers Date: Thu, 10 Oct 2019 09:33:11 +0100 Subject: [PATCH 14/25] #2384 - Add test case --- tests/test_dynamodb2/test_dynamodb.py | 29 +++++++++++++++++++++++++++ 1 file changed, 29 insertions(+) diff --git a/tests/test_dynamodb2/test_dynamodb.py b/tests/test_dynamodb2/test_dynamodb.py index c4cf05c59..ef5da005a 100644 --- a/tests/test_dynamodb2/test_dynamodb.py +++ b/tests/test_dynamodb2/test_dynamodb.py @@ -2295,6 +2295,35 @@ def test_index_with_unknown_attributes_should_fail(): ex.exception.response['Error']['Message'].should.contain(expected_exception) +@mock_dynamodb2 +def test_sorted_query_with_numerical_sort_key(): + dynamodb = boto3.resource('dynamodb', region_name='us-east-1') + dynamodb.create_table(TableName="CarCollection", + KeySchema=[{ 'AttributeName': "CarModel", 'KeyType': 'HASH'}, + {'AttributeName': "CarPrice", 'KeyType': 'RANGE'}], + AttributeDefinitions=[{'AttributeName': "CarModel", 'AttributeType': "S"}, + {'AttributeName': "CarPrice", 'AttributeType': "N"}], + ProvisionedThroughput={'ReadCapacityUnits': 1, 'WriteCapacityUnits': 1}) + + def create_item(price): + return {"CarModel": "M", "CarPrice": price} + + table = dynamodb.Table('CarCollection') + items = list(map(create_item, [2, 1, 10, 3])) + for item in items: + table.put_item(Item=item) + + response = table.query(KeyConditionExpression=Key('CarModel').eq("M")) + + response_items = response['Items'] + assert len(items) == len(response_items) + assert all(isinstance(item["CarPrice"], Decimal) for item in response_items) + response_prices = [item["CarPrice"] for item in response_items] + expected_prices = [Decimal(item["CarPrice"]) for item in items] + expected_prices.sort() + assert expected_prices == response_prices, "result items are not sorted by numerical value" + + def _create_user_table(): client = boto3.client('dynamodb', region_name='us-east-1') client.create_table( From 0723fcf6e786b26a4220c79f080296472a9e3a28 Mon Sep 17 00:00:00 2001 From: Jack Danger Date: Thu, 10 Oct 2019 12:00:31 -0700 Subject: [PATCH 15/25] Supporting more modern Firehose features This implements ExtendedS3DestinationConfiguration, allowing serialization to Parquet and ORC formats as part of a Firehose destination. --- moto/kinesis/models.py | 29 ++++---- moto/kinesis/responses.py | 21 +++--- tests/test_kinesis/test_firehose.py | 102 ++++++++++++++++++++++++++-- 3 files changed, 117 insertions(+), 35 deletions(-) diff --git a/moto/kinesis/models.py b/moto/kinesis/models.py index e7a389981..965f3367a 100644 --- a/moto/kinesis/models.py +++ b/moto/kinesis/models.py @@ -222,12 +222,8 @@ class DeliveryStream(BaseModel): self.redshift_role_arn = stream_kwargs.get('redshift_role_arn') self.redshift_copy_command = stream_kwargs.get('redshift_copy_command') - self.s3_role_arn = stream_kwargs.get('s3_role_arn') - self.s3_bucket_arn = stream_kwargs.get('s3_bucket_arn') - self.s3_prefix = stream_kwargs.get('s3_prefix') - self.s3_compression_format = stream_kwargs.get( - 's3_compression_format', 'UNCOMPRESSED') - self.s3_buffering_hings = stream_kwargs.get('s3_buffering_hings') + self.s3_config = stream_kwargs.get('s3_config') + self.extended_s3_config = stream_kwargs.get('extended_s3_config') self.redshift_s3_role_arn = stream_kwargs.get('redshift_s3_role_arn') self.redshift_s3_bucket_arn = stream_kwargs.get( @@ -235,8 +231,8 @@ class DeliveryStream(BaseModel): self.redshift_s3_prefix = stream_kwargs.get('redshift_s3_prefix') self.redshift_s3_compression_format = stream_kwargs.get( 'redshift_s3_compression_format', 'UNCOMPRESSED') - self.redshift_s3_buffering_hings = stream_kwargs.get( - 'redshift_s3_buffering_hings') + self.redshift_s3_buffering_hints = stream_kwargs.get( + 'redshift_s3_buffering_hints') self.records = [] self.status = 'ACTIVE' @@ -248,16 +244,15 @@ class DeliveryStream(BaseModel): return 'arn:aws:firehose:us-east-1:123456789012:deliverystream/{0}'.format(self.name) def destinations_to_dict(self): - if self.s3_role_arn: + if self.s3_config: return [{ 'DestinationId': 'string', - 'S3DestinationDescription': { - 'RoleARN': self.s3_role_arn, - 'BucketARN': self.s3_bucket_arn, - 'Prefix': self.s3_prefix, - 'BufferingHints': self.s3_buffering_hings, - 'CompressionFormat': self.s3_compression_format, - } + 'S3DestinationDescription': self.s3_config, + }] + elif self.extended_s3_config: + return [{ + 'DestinationId': 'string', + 'ExtendedS3DestinationDescription': self.extended_s3_config, }] else: return [{ @@ -268,7 +263,7 @@ class DeliveryStream(BaseModel): "RoleARN": self.redshift_role_arn, "S3DestinationDescription": { "BucketARN": self.redshift_s3_bucket_arn, - "BufferingHints": self.redshift_s3_buffering_hings, + "BufferingHints": self.redshift_s3_buffering_hints, "CompressionFormat": self.redshift_s3_compression_format, "Prefix": self.redshift_s3_prefix, "RoleARN": self.redshift_s3_role_arn diff --git a/moto/kinesis/responses.py b/moto/kinesis/responses.py index 3a81bd9f4..aa2b8c225 100644 --- a/moto/kinesis/responses.py +++ b/moto/kinesis/responses.py @@ -149,6 +149,10 @@ class KinesisResponse(BaseResponse): stream_name = self.parameters['DeliveryStreamName'] redshift_config = self.parameters.get( 'RedshiftDestinationConfiguration') + s3_config = self.parameters.get( + 'S3DestinationConfiguration') + extended_s3_config = self.parameters.get( + 'ExtendedS3DestinationConfiguration') if redshift_config: redshift_s3_config = redshift_config['S3Configuration'] @@ -163,18 +167,13 @@ class KinesisResponse(BaseResponse): 'redshift_s3_bucket_arn': redshift_s3_config['BucketARN'], 'redshift_s3_prefix': redshift_s3_config['Prefix'], 'redshift_s3_compression_format': redshift_s3_config.get('CompressionFormat'), - 'redshift_s3_buffering_hings': redshift_s3_config['BufferingHints'], - } - else: - # S3 Config - s3_config = self.parameters['S3DestinationConfiguration'] - stream_kwargs = { - 's3_role_arn': s3_config['RoleARN'], - 's3_bucket_arn': s3_config['BucketARN'], - 's3_prefix': s3_config['Prefix'], - 's3_compression_format': s3_config.get('CompressionFormat'), - 's3_buffering_hings': s3_config['BufferingHints'], + 'redshift_s3_buffering_hints': redshift_s3_config['BufferingHints'], } + elif s3_config: + stream_kwargs = {'s3_config': s3_config} + elif extended_s3_config: + stream_kwargs = {'extended_s3_config': extended_s3_config} + stream = self.kinesis_backend.create_delivery_stream( stream_name, **stream_kwargs) return json.dumps({ diff --git a/tests/test_kinesis/test_firehose.py b/tests/test_kinesis/test_firehose.py index 6ab46c6f9..91c1038d3 100644 --- a/tests/test_kinesis/test_firehose.py +++ b/tests/test_kinesis/test_firehose.py @@ -9,7 +9,41 @@ import sure # noqa from moto import mock_kinesis -def create_stream(client, stream_name): +def create_s3_delivery_stream(client, stream_name): + return client.create_delivery_stream( + DeliveryStreamName=stream_name, + DeliveryStreamType="DirectPut", + ExtendedS3DestinationConfiguration={ + 'RoleARN': 'arn:aws:iam::123456789012:role/firehose_delivery_role', + 'BucketARN': 'arn:aws:s3:::kinesis-test', + 'Prefix': 'myFolder/', + 'CompressionFormat': 'UNCOMPRESSED', + 'DataFormatConversionConfiguration': { + 'Enabled': True, + 'InputFormatConfiguration': { + 'Deserializer': { + 'HiveJsonSerDe': { + }, + }, + }, + 'OutputFormatConfiguration': { + 'Serializer': { + 'ParquetSerDe': { + 'Compression': 'SNAPPY', + }, + }, + }, + 'SchemaConfiguration': { + 'DatabaseName': stream_name, + 'RoleARN': 'arn:aws:iam::123456789012:role/firehose_delivery_role', + 'TableName': 'outputTable', + }, + }, + }) + + + +def create_redshift_delivery_stream(client, stream_name): return client.create_delivery_stream( DeliveryStreamName=stream_name, RedshiftDestinationConfiguration={ @@ -36,10 +70,10 @@ def create_stream(client, stream_name): @mock_kinesis -def test_create_stream(): +def test_create_redshift_delivery_stream(): client = boto3.client('firehose', region_name='us-east-1') - response = create_stream(client, 'stream1') + response = create_redshift_delivery_stream(client, 'stream1') stream_arn = response['DeliveryStreamARN'] response = client.describe_delivery_stream(DeliveryStreamName='stream1') @@ -82,6 +116,60 @@ def test_create_stream(): }) +@mock_kinesis +def test_create_s3_delivery_stream(): + client = boto3.client('firehose', region_name='us-east-1') + + response = create_s3_delivery_stream(client, 'stream1') + stream_arn = response['DeliveryStreamARN'] + + response = client.describe_delivery_stream(DeliveryStreamName='stream1') + stream_description = response['DeliveryStreamDescription'] + + # Sure and Freezegun don't play nicely together + _ = stream_description.pop('CreateTimestamp') + _ = stream_description.pop('LastUpdateTimestamp') + + stream_description.should.equal({ + 'DeliveryStreamName': 'stream1', + 'DeliveryStreamARN': stream_arn, + 'DeliveryStreamStatus': 'ACTIVE', + 'VersionId': 'string', + 'Destinations': [ + { + 'DestinationId': 'string', + 'ExtendedS3DestinationDescription': { + 'RoleARN': 'arn:aws:iam::123456789012:role/firehose_delivery_role', + 'BucketARN': 'arn:aws:s3:::kinesis-test', + 'Prefix': 'myFolder/', + 'CompressionFormat': 'UNCOMPRESSED', + 'DataFormatConversionConfiguration': { + 'Enabled': True, + 'InputFormatConfiguration': { + 'Deserializer': { + 'HiveJsonSerDe': { + }, + }, + }, + 'OutputFormatConfiguration': { + 'Serializer': { + 'ParquetSerDe': { + 'Compression': 'SNAPPY', + }, + }, + }, + 'SchemaConfiguration': { + 'DatabaseName': 'stream1', + 'RoleARN': 'arn:aws:iam::123456789012:role/firehose_delivery_role', + 'TableName': 'outputTable', + }, + }, + }, + }, + ], + "HasMoreDestinations": False, + }) + @mock_kinesis def test_create_stream_without_redshift(): client = boto3.client('firehose', region_name='us-east-1') @@ -145,8 +233,8 @@ def test_deescribe_non_existant_stream(): def test_list_and_delete_stream(): client = boto3.client('firehose', region_name='us-east-1') - create_stream(client, 'stream1') - create_stream(client, 'stream2') + create_redshift_delivery_stream(client, 'stream1') + create_redshift_delivery_stream(client, 'stream2') set(client.list_delivery_streams()['DeliveryStreamNames']).should.equal( set(['stream1', 'stream2'])) @@ -161,7 +249,7 @@ def test_list_and_delete_stream(): def test_put_record(): client = boto3.client('firehose', region_name='us-east-1') - create_stream(client, 'stream1') + create_redshift_delivery_stream(client, 'stream1') client.put_record( DeliveryStreamName='stream1', Record={ @@ -174,7 +262,7 @@ def test_put_record(): def test_put_record_batch(): client = boto3.client('firehose', region_name='us-east-1') - create_stream(client, 'stream1') + create_redshift_delivery_stream(client, 'stream1') client.put_record_batch( DeliveryStreamName='stream1', Records=[ From 97c4174f30dccb1b876cec70eeee5c251eb7267d Mon Sep 17 00:00:00 2001 From: gruebel Date: Fri, 11 Oct 2019 14:30:25 +0200 Subject: [PATCH 16/25] Extract projection_expression adjustment --- moto/dynamodb2/responses.py | 70 +++++++++++---------------- tests/test_dynamodb2/test_dynamodb.py | 31 ++++++------ 2 files changed, 45 insertions(+), 56 deletions(-) diff --git a/moto/dynamodb2/responses.py b/moto/dynamodb2/responses.py index da94c4dc1..ddac56caf 100644 --- a/moto/dynamodb2/responses.py +++ b/moto/dynamodb2/responses.py @@ -308,20 +308,9 @@ class DynamoHandler(BaseResponse): projection_expression = self.body.get('ProjectionExpression') expression_attribute_names = self.body.get('ExpressionAttributeNames', {}) - if projection_expression and expression_attribute_names: - expressions = [x.strip() for x in projection_expression.split(',')] - projection_expression = None - for expression in expressions: - if projection_expression is not None: - projection_expression = projection_expression + ", " - else: - projection_expression = "" - - if expression in expression_attribute_names: - projection_expression = projection_expression + \ - expression_attribute_names[expression] - else: - projection_expression = projection_expression + expression + projection_expression = self._adjust_projection_expression( + projection_expression, expression_attribute_names + ) try: item = self.dynamodb_backend.get_item(name, key, projection_expression) @@ -359,20 +348,9 @@ class DynamoHandler(BaseResponse): projection_expression = table_request.get('ProjectionExpression') expression_attribute_names = table_request.get('ExpressionAttributeNames', {}) - if projection_expression and expression_attribute_names: - expressions = [x.strip() for x in projection_expression.split(',')] - projection_expression = None - for expression in expressions: - if projection_expression is not None: - projection_expression = projection_expression + ", " - else: - projection_expression = "" - - if expression in expression_attribute_names: - projection_expression = projection_expression + \ - expression_attribute_names[expression] - else: - projection_expression = projection_expression + expression + projection_expression = self._adjust_projection_expression( + projection_expression, expression_attribute_names + ) results["Responses"][table_name] = [] for key in keys: @@ -406,20 +384,9 @@ class DynamoHandler(BaseResponse): filter_expression = self.body.get('FilterExpression') expression_attribute_values = self.body.get('ExpressionAttributeValues', {}) - if projection_expression and expression_attribute_names: - expressions = [x.strip() for x in projection_expression.split(',')] - projection_expression = None - for expression in expressions: - if projection_expression is not None: - projection_expression = projection_expression + ", " - else: - projection_expression = "" - - if expression in expression_attribute_names: - projection_expression = projection_expression + \ - expression_attribute_names[expression] - else: - projection_expression = projection_expression + expression + projection_expression = self._adjust_projection_expression( + projection_expression, expression_attribute_names + ) filter_kwargs = {} @@ -555,6 +522,25 @@ class DynamoHandler(BaseResponse): return dynamo_json_dump(result) + def _adjust_projection_expression(self, projection_expression, expression_attribute_names): + if projection_expression and expression_attribute_names: + expressions = [x.strip() for x in projection_expression.split(',')] + projection_expr = None + for expression in expressions: + if projection_expr is not None: + projection_expr = projection_expr + ", " + else: + projection_expr = "" + + if expression in expression_attribute_names: + projection_expr = projection_expr + \ + expression_attribute_names[expression] + else: + projection_expr = projection_expr + expression + return projection_expr + + return projection_expression + def scan(self): name = self.body['TableName'] diff --git a/tests/test_dynamodb2/test_dynamodb.py b/tests/test_dynamodb2/test_dynamodb.py index fd6681ab6..f8a1c1ef8 100644 --- a/tests/test_dynamodb2/test_dynamodb.py +++ b/tests/test_dynamodb2/test_dynamodb.py @@ -421,10 +421,10 @@ def test_basic_projection_expression_using_get_item(): ProjectionExpression='body, subject' ) - assert result['Item'] == { + result['Item'].should.be.equal({ 'subject': '123', 'body': 'some test message' - } + }) # The projection expression should not remove data from storage result = table.get_item( @@ -434,11 +434,11 @@ def test_basic_projection_expression_using_get_item(): } ) - assert result['Item'] == { + result['Item'].should.be.equal({ 'forum_name': 'the-key', 'subject': '123', 'body': 'some test message' - } + }) @mock_dynamodb2 @@ -670,11 +670,11 @@ def test_basic_projection_expression_using_get_item_with_attr_expression_names() }, ) - assert result['Item'] == { + result['Item'].should.be.equal({ 'subject': '123', 'body': 'some test message', 'attachment': 'something' - } + }) @mock_dynamodb2 @@ -2392,9 +2392,10 @@ def test_batch_items_with_basic_projection_expression(): 'ProjectionExpression': 'username' } })['Responses']['users'] - assert len(returned_items) == 3 - assert [item['username']['S'] for item in returned_items] == ['user1', 'user2', 'user3'] - assert [item.get('foo') for item in returned_items] == [None, None, None] + + returned_items.should.have.length_of(3) + [item['username']['S'] for item in returned_items].should.be.equal(['user1', 'user2', 'user3']) + [item.get('foo') for item in returned_items].should.be.equal([None, None, None]) # The projection expression should not remove data from storage returned_items = dynamodb.batch_get_item(RequestItems = { @@ -2411,8 +2412,9 @@ def test_batch_items_with_basic_projection_expression(): 'ConsistentRead': True } })['Responses']['users'] - assert [item['username']['S'] for item in returned_items] == ['user1', 'user2', 'user3'] - assert [item['foo']['S'] for item in returned_items] == ['bar', 'bar', 'bar'] + + [item['username']['S'] for item in returned_items].should.be.equal(['user1', 'user2', 'user3']) + [item['foo']['S'] for item in returned_items].should.be.equal(['bar', 'bar', 'bar']) @mock_dynamodb2 @@ -2436,9 +2438,10 @@ def test_batch_items_with_basic_projection_expression_and_attr_expression_names( }, } })['Responses']['users'] - assert len(returned_items) == 3 - assert [item['username']['S'] for item in returned_items] == ['user1', 'user2', 'user3'] - assert [item.get('foo') for item in returned_items] == [None, None, None] + + returned_items.should.have.length_of(3) + [item['username']['S'] for item in returned_items].should.be.equal(['user1', 'user2', 'user3']) + [item.get('foo') for item in returned_items].should.be.equal([None, None, None]) @mock_dynamodb2 From 8d527743d5010feb70adcfe1f5b6c7d50bfa66e5 Mon Sep 17 00:00:00 2001 From: gruebel Date: Fri, 11 Oct 2019 17:58:48 +0200 Subject: [PATCH 17/25] Add sns.list_tags_for_resource --- IMPLEMENTATION_COVERAGE.md | 4 ++-- moto/sns/models.py | 9 +++++++- moto/sns/responses.py | 35 ++++++++++++++++++++++++++++- tests/test_sns/test_topics_boto3.py | 31 +++++++++++++++++++++++++ 4 files changed, 75 insertions(+), 4 deletions(-) diff --git a/IMPLEMENTATION_COVERAGE.md b/IMPLEMENTATION_COVERAGE.md index 764df13d1..62565a067 100644 --- a/IMPLEMENTATION_COVERAGE.md +++ b/IMPLEMENTATION_COVERAGE.md @@ -5866,7 +5866,7 @@ - [ ] update_job ## sns -48% implemented +52% implemented - [ ] add_permission - [ ] check_if_phone_number_is_opted_out - [ ] confirm_subscription @@ -5886,7 +5886,7 @@ - [X] list_platform_applications - [X] list_subscriptions - [ ] list_subscriptions_by_topic -- [ ] list_tags_for_resource +- [x] list_tags_for_resource - [X] list_topics - [ ] opt_in_phone_number - [X] publish diff --git a/moto/sns/models.py b/moto/sns/models.py index 92e6c61de..d7f15338b 100644 --- a/moto/sns/models.py +++ b/moto/sns/models.py @@ -44,6 +44,8 @@ class Topic(BaseModel): self.subscriptions_confimed = 0 self.subscriptions_deleted = 0 + self._tags = {} + def publish(self, message, subject=None, message_attributes=None): message_id = six.text_type(uuid.uuid4()) subscriptions, _ = self.sns_backend.list_subscriptions(self.arn) @@ -277,7 +279,7 @@ class SNSBackend(BaseBackend): def update_sms_attributes(self, attrs): self.sms_attributes.update(attrs) - def create_topic(self, name, attributes=None): + def create_topic(self, name, attributes=None, tags=None): fails_constraints = not re.match(r'^[a-zA-Z0-9_-]{1,256}$', name) if fails_constraints: raise InvalidParameterValue("Topic names must be made up of only uppercase and lowercase ASCII letters, numbers, underscores, and hyphens, and must be between 1 and 256 characters long.") @@ -285,6 +287,8 @@ class SNSBackend(BaseBackend): if attributes: for attribute in attributes: setattr(candidate_topic, camelcase_to_underscores(attribute), attributes[attribute]) + if tags: + candidate_topic._tags = tags if candidate_topic.arn in self.topics: return self.topics[candidate_topic.arn] else: @@ -499,6 +503,9 @@ class SNSBackend(BaseBackend): raise SNSInvalidParameter("Invalid parameter: FilterPolicy: Match value must be String, number, true, false, or null") + def list_tags_for_resource(self, resource_arn): + return self.topics[resource_arn]._tags + sns_backends = {} for region in Session().get_available_regions('sns'): diff --git a/moto/sns/responses.py b/moto/sns/responses.py index 578c5ea65..7cc085be1 100644 --- a/moto/sns/responses.py +++ b/moto/sns/responses.py @@ -30,6 +30,10 @@ class SNSResponse(BaseResponse): in attributes ) + def _get_tags(self): + tags = self._get_list_prefix('Tags.member') + return {tag['key']: tag['value'] for tag in tags} + def _parse_message_attributes(self, prefix='', value_namespace='Value.'): message_attributes = self._get_object_map( 'MessageAttributes.entry', @@ -85,7 +89,8 @@ class SNSResponse(BaseResponse): def create_topic(self): name = self._get_param('Name') attributes = self._get_attributes() - topic = self.backend.create_topic(name, attributes) + tags = self._get_tags() + topic = self.backend.create_topic(name, attributes, tags) if self.request_json: return json.dumps({ @@ -691,6 +696,18 @@ class SNSResponse(BaseResponse): template = self.response_template(CONFIRM_SUBSCRIPTION_TEMPLATE) return template.render(sub_arn='{0}:68762e72-e9b1-410a-8b3b-903da69ee1d5'.format(arn)) + def list_tags_for_resource(self): + arn = self._get_param('ResourceArn') + + if arn not in self.backend.topics: + error_response = self._error('ResourceNotFound', 'Resource does not exist') + return error_response, dict(status=404) + + result = self.backend.list_tags_for_resource(arn) + + template = self.response_template(LIST_TAGS_FOR_RESOURCE_TEMPLATE) + return template.render(tags=result) + CREATE_TOPIC_TEMPLATE = """ @@ -1072,3 +1089,19 @@ CONFIRM_SUBSCRIPTION_TEMPLATE = """ + + + {% for name, value in tags.items() %} + + {{ name }} + {{ value }} + + {% endfor %} + + + + 97fa763f-861b-5223-a946-20251f2a42e2 + +""" diff --git a/tests/test_sns/test_topics_boto3.py b/tests/test_sns/test_topics_boto3.py index 870fa6f6e..4a994e724 100644 --- a/tests/test_sns/test_topics_boto3.py +++ b/tests/test_sns/test_topics_boto3.py @@ -44,6 +44,37 @@ def test_create_topic_with_attributes(): attributes['DisplayName'].should.equal('test-topic') +@mock_sns +def test_create_topic_with_tags(): + conn = boto3.client("sns", region_name="us-east-1") + conn.create_topic( + Name='some-topic-with-attribute', + Tags=[ + { + 'Key': 'tag_key_1', + 'Value': 'tag_value_1' + }, + { + 'Key': 'tag_key_2', + 'Value': 'tag_value_2' + } + ] + ) + topics_json = conn.list_topics() + topic_arn = topics_json["Topics"][0]['TopicArn'] + + conn.list_tags_for_resource(ResourceArn=topic_arn)['Tags'].should.equal([ + { + 'Key': 'tag_key_1', + 'Value': 'tag_value_1' + }, + { + 'Key': 'tag_key_2', + 'Value': 'tag_value_2' + } + ]) + + @mock_sns def test_create_topic_should_be_indempodent(): conn = boto3.client("sns", region_name="us-east-1") From 017cbb52ca7230de20ab004387ad1bcaaed163e4 Mon Sep 17 00:00:00 2001 From: Jamie Starke Date: Fri, 11 Oct 2019 22:13:55 -0700 Subject: [PATCH 18/25] Added Exception to for describe_container_instances to match aws api --- moto/ecs/models.py | 2 ++ tests/test_ecs/test_ecs_boto3.py | 6 ++++++ 2 files changed, 8 insertions(+) diff --git a/moto/ecs/models.py b/moto/ecs/models.py index 9416db84a..5aa9ae2cb 100644 --- a/moto/ecs/models.py +++ b/moto/ecs/models.py @@ -790,6 +790,8 @@ class EC2ContainerServiceBackend(BaseBackend): cluster_name = cluster_str.split('/')[-1] if cluster_name not in self.clusters: raise Exception("{0} is not a cluster".format(cluster_name)) + if not list_container_instance_ids: + raise JsonRESTError('InvalidParameterException', 'Container instance cannot be empty') failures = [] container_instance_objects = [] for container_instance_id in list_container_instance_ids: diff --git a/tests/test_ecs/test_ecs_boto3.py b/tests/test_ecs/test_ecs_boto3.py index 1b501f519..16d7a4d0d 100644 --- a/tests/test_ecs/test_ecs_boto3.py +++ b/tests/test_ecs/test_ecs_boto3.py @@ -889,6 +889,12 @@ def test_describe_container_instances(): instance.keys().should.contain('runningTasksCount') instance.keys().should.contain('pendingTasksCount') + with assert_raises(ClientError) as e: + ecs_client.describe_container_instances( + cluster=test_cluster_name, + containerInstances=[] + ) + @mock_ec2 @mock_ecs From 726775678cadc53f7340fa69e8e7c4effefa39df Mon Sep 17 00:00:00 2001 From: gruebel Date: Sat, 12 Oct 2019 20:36:15 +0200 Subject: [PATCH 19/25] Add sns.tag_resource --- IMPLEMENTATION_COVERAGE.md | 4 +- moto/sns/exceptions.py | 16 ++++ moto/sns/models.py | 17 +++- moto/sns/responses.py | 19 +++- tests/test_sns/test_topics_boto3.py | 138 +++++++++++++++++++++++++++- 5 files changed, 183 insertions(+), 11 deletions(-) diff --git a/IMPLEMENTATION_COVERAGE.md b/IMPLEMENTATION_COVERAGE.md index 62565a067..972031ad2 100644 --- a/IMPLEMENTATION_COVERAGE.md +++ b/IMPLEMENTATION_COVERAGE.md @@ -5866,7 +5866,7 @@ - [ ] update_job ## sns -52% implemented +55% implemented - [ ] add_permission - [ ] check_if_phone_number_is_opted_out - [ ] confirm_subscription @@ -5897,7 +5897,7 @@ - [X] set_subscription_attributes - [ ] set_topic_attributes - [X] subscribe -- [ ] tag_resource +- [x] tag_resource - [X] unsubscribe - [ ] untag_resource diff --git a/moto/sns/exceptions.py b/moto/sns/exceptions.py index 706b3b5cc..6d29e7acb 100644 --- a/moto/sns/exceptions.py +++ b/moto/sns/exceptions.py @@ -10,6 +10,14 @@ class SNSNotFoundError(RESTError): "NotFound", message) +class ResourceNotFoundError(RESTError): + code = 404 + + def __init__(self): + super(ResourceNotFoundError, self).__init__( + 'ResourceNotFound', 'Resource does not exist') + + class DuplicateSnsEndpointError(RESTError): code = 400 @@ -42,6 +50,14 @@ class InvalidParameterValue(RESTError): "InvalidParameterValue", message) +class TagLimitExceededError(RESTError): + code = 400 + + def __init__(self): + super(TagLimitExceededError, self).__init__( + 'TagLimitExceeded', 'Could not complete request: tag quota of per resource exceeded') + + class InternalError(RESTError): code = 500 diff --git a/moto/sns/models.py b/moto/sns/models.py index d7f15338b..55b243e45 100644 --- a/moto/sns/models.py +++ b/moto/sns/models.py @@ -18,7 +18,7 @@ from moto.awslambda import lambda_backends from .exceptions import ( SNSNotFoundError, DuplicateSnsEndpointError, SnsEndpointDisabled, SNSInvalidParameter, - InvalidParameterValue, InternalError + InvalidParameterValue, InternalError, ResourceNotFoundError, TagLimitExceededError ) from .utils import make_arn_for_topic, make_arn_for_subscription @@ -504,8 +504,23 @@ class SNSBackend(BaseBackend): raise SNSInvalidParameter("Invalid parameter: FilterPolicy: Match value must be String, number, true, false, or null") def list_tags_for_resource(self, resource_arn): + if resource_arn not in self.topics: + raise ResourceNotFoundError + return self.topics[resource_arn]._tags + def tag_resource(self, resource_arn, tags): + if resource_arn not in self.topics: + raise ResourceNotFoundError + + updated_tags = self.topics[resource_arn]._tags.copy() + updated_tags.update(tags) + + if len(updated_tags) > 50: + raise TagLimitExceededError + + self.topics[resource_arn]._tags = updated_tags + sns_backends = {} for region in Session().get_available_regions('sns'): diff --git a/moto/sns/responses.py b/moto/sns/responses.py index 7cc085be1..3b8383d1f 100644 --- a/moto/sns/responses.py +++ b/moto/sns/responses.py @@ -699,15 +699,19 @@ class SNSResponse(BaseResponse): def list_tags_for_resource(self): arn = self._get_param('ResourceArn') - if arn not in self.backend.topics: - error_response = self._error('ResourceNotFound', 'Resource does not exist') - return error_response, dict(status=404) - result = self.backend.list_tags_for_resource(arn) template = self.response_template(LIST_TAGS_FOR_RESOURCE_TEMPLATE) return template.render(tags=result) + def tag_resource(self): + arn = self._get_param('ResourceArn') + tags = self._get_tags() + + self.backend.tag_resource(arn, tags) + + return self.response_template(TAG_RESOURCE_TEMPLATE).render() + CREATE_TOPIC_TEMPLATE = """ @@ -1105,3 +1109,10 @@ LIST_TAGS_FOR_RESOURCE_TEMPLATE = """ + + + fd4ab1da-692f-50a7-95ad-e7c665877d98 + +""" diff --git a/tests/test_sns/test_topics_boto3.py b/tests/test_sns/test_topics_boto3.py index 4a994e724..ace825af5 100644 --- a/tests/test_sns/test_topics_boto3.py +++ b/tests/test_sns/test_topics_boto3.py @@ -47,8 +47,8 @@ def test_create_topic_with_attributes(): @mock_sns def test_create_topic_with_tags(): conn = boto3.client("sns", region_name="us-east-1") - conn.create_topic( - Name='some-topic-with-attribute', + response = conn.create_topic( + Name='some-topic-with-tags', Tags=[ { 'Key': 'tag_key_1', @@ -60,8 +60,7 @@ def test_create_topic_with_tags(): } ] ) - topics_json = conn.list_topics() - topic_arn = topics_json["Topics"][0]['TopicArn'] + topic_arn = response['TopicArn'] conn.list_tags_for_resource(ResourceArn=topic_arn)['Tags'].should.equal([ { @@ -231,3 +230,134 @@ def test_add_remove_permissions(): TopicArn=response['TopicArn'], Label='Test1234' ) + + +@mock_sns +def test_tag_topic(): + conn = boto3.client('sns', region_name='us-east-1') + response = conn.create_topic( + Name = 'some-topic-with-tags' + ) + topic_arn = response['TopicArn'] + + conn.tag_resource( + ResourceArn=topic_arn, + Tags=[ + { + 'Key': 'tag_key_1', + 'Value': 'tag_value_1' + } + ] + ) + conn.list_tags_for_resource(ResourceArn = topic_arn)['Tags'].should.equal([ + { + 'Key': 'tag_key_1', + 'Value': 'tag_value_1' + } + ]) + + conn.tag_resource( + ResourceArn=topic_arn, + Tags=[ + { + 'Key': 'tag_key_2', + 'Value': 'tag_value_2' + } + ] + ) + conn.list_tags_for_resource(ResourceArn = topic_arn)['Tags'].should.equal([ + { + 'Key': 'tag_key_1', + 'Value': 'tag_value_1' + }, + { + 'Key': 'tag_key_2', + 'Value': 'tag_value_2' + } + ]) + + conn.tag_resource( + ResourceArn = topic_arn, + Tags = [ + { + 'Key': 'tag_key_1', + 'Value': 'tag_value_X' + } + ] + ) + conn.list_tags_for_resource(ResourceArn = topic_arn)['Tags'].should.equal([ + { + 'Key': 'tag_key_1', + 'Value': 'tag_value_X' + }, + { + 'Key': 'tag_key_2', + 'Value': 'tag_value_2' + } + ]) + + +@mock_sns +def test_list_tags_for_resource_error(): + conn = boto3.client('sns', region_name = 'us-east-1') + conn.create_topic( + Name = 'some-topic-with-tags', + Tags = [ + { + 'Key': 'tag_key_1', + 'Value': 'tag_value_X' + } + ] + ) + + conn.list_tags_for_resource.when.called_with( + ResourceArn = 'not-existing-topic' + ).should.throw( + ClientError, + 'Resource does not exist' + ) + + +@mock_sns +def test_tag_resource_errors(): + conn = boto3.client('sns', region_name = 'us-east-1') + response = conn.create_topic( + Name = 'some-topic-with-tags', + Tags = [ + { + 'Key': 'tag_key_1', + 'Value': 'tag_value_X' + } + ] + ) + topic_arn = response['TopicArn'] + + conn.tag_resource.when.called_with( + ResourceArn = 'not-existing-topic', + Tags = [ + { + 'Key': 'tag_key_1', + 'Value': 'tag_value_1' + } + ] + ).should.throw( + ClientError, + 'Resource does not exist' + ) + + too_many_tags = [{'Key': 'tag_key_{}'.format(i), 'Value': 'tag_value_{}'.format(i)} for i in range(51)] + conn.tag_resource.when.called_with( + ResourceArn = topic_arn, + Tags = too_many_tags + ).should.throw( + ClientError, + 'Could not complete request: tag quota of per resource exceeded' + ) + + # when the request fails, the tags should not be updated + conn.list_tags_for_resource(ResourceArn = topic_arn)['Tags'].should.equal([ + { + 'Key': 'tag_key_1', + 'Value': 'tag_value_X' + } + ]) From 26ef792690243c026c71a19e7d70c74e9c44276b Mon Sep 17 00:00:00 2001 From: gruebel Date: Sat, 12 Oct 2019 21:10:51 +0200 Subject: [PATCH 20/25] Add sns.untag_resource --- IMPLEMENTATION_COVERAGE.md | 4 +- moto/sns/models.py | 7 +++ moto/sns/responses.py | 15 +++++++ tests/test_sns/test_topics_boto3.py | 70 +++++++++++++++++++++++++++++ 4 files changed, 94 insertions(+), 2 deletions(-) diff --git a/IMPLEMENTATION_COVERAGE.md b/IMPLEMENTATION_COVERAGE.md index 972031ad2..57f169b8a 100644 --- a/IMPLEMENTATION_COVERAGE.md +++ b/IMPLEMENTATION_COVERAGE.md @@ -5866,7 +5866,7 @@ - [ ] update_job ## sns -55% implemented +58% implemented - [ ] add_permission - [ ] check_if_phone_number_is_opted_out - [ ] confirm_subscription @@ -5899,7 +5899,7 @@ - [X] subscribe - [x] tag_resource - [X] unsubscribe -- [ ] untag_resource +- [x] untag_resource ## sqs 65% implemented diff --git a/moto/sns/models.py b/moto/sns/models.py index 55b243e45..51b5c2b2c 100644 --- a/moto/sns/models.py +++ b/moto/sns/models.py @@ -521,6 +521,13 @@ class SNSBackend(BaseBackend): self.topics[resource_arn]._tags = updated_tags + def untag_resource(self, resource_arn, tag_keys): + if resource_arn not in self.topics: + raise ResourceNotFoundError + + for key in tag_keys: + self.topics[resource_arn]._tags.pop(key, None) + sns_backends = {} for region in Session().get_available_regions('sns'): diff --git a/moto/sns/responses.py b/moto/sns/responses.py index 3b8383d1f..c315e2a87 100644 --- a/moto/sns/responses.py +++ b/moto/sns/responses.py @@ -712,6 +712,14 @@ class SNSResponse(BaseResponse): return self.response_template(TAG_RESOURCE_TEMPLATE).render() + def untag_resource(self): + arn = self._get_param('ResourceArn') + tag_keys = self._get_multi_param('TagKeys.member') + + self.backend.untag_resource(arn, tag_keys) + + return self.response_template(UNTAG_RESOURCE_TEMPLATE).render() + CREATE_TOPIC_TEMPLATE = """ @@ -1116,3 +1124,10 @@ TAG_RESOURCE_TEMPLATE = """ + + + 14eb7b1a-4cbd-5a56-80db-2d06412df769 + +""" diff --git a/tests/test_sns/test_topics_boto3.py b/tests/test_sns/test_topics_boto3.py index ace825af5..05c8f74b4 100644 --- a/tests/test_sns/test_topics_boto3.py +++ b/tests/test_sns/test_topics_boto3.py @@ -297,6 +297,52 @@ def test_tag_topic(): ]) +@mock_sns +def test_untag_topic(): + conn = boto3.client('sns', region_name = 'us-east-1') + response = conn.create_topic( + Name = 'some-topic-with-tags', + Tags = [ + { + 'Key': 'tag_key_1', + 'Value': 'tag_value_1' + }, + { + 'Key': 'tag_key_2', + 'Value': 'tag_value_2' + } + ] + ) + topic_arn = response['TopicArn'] + + conn.untag_resource( + ResourceArn = topic_arn, + TagKeys = [ + 'tag_key_1' + ] + ) + conn.list_tags_for_resource(ResourceArn = topic_arn)['Tags'].should.equal([ + { + 'Key': 'tag_key_2', + 'Value': 'tag_value_2' + } + ]) + + # removing a non existing tag should not raise any error + conn.untag_resource( + ResourceArn = topic_arn, + TagKeys = [ + 'not-existing-tag' + ] + ) + conn.list_tags_for_resource(ResourceArn = topic_arn)['Tags'].should.equal([ + { + 'Key': 'tag_key_2', + 'Value': 'tag_value_2' + } + ]) + + @mock_sns def test_list_tags_for_resource_error(): conn = boto3.client('sns', region_name = 'us-east-1') @@ -361,3 +407,27 @@ def test_tag_resource_errors(): 'Value': 'tag_value_X' } ]) + + +@mock_sns +def test_untag_resource_error(): + conn = boto3.client('sns', region_name = 'us-east-1') + conn.create_topic( + Name = 'some-topic-with-tags', + Tags = [ + { + 'Key': 'tag_key_1', + 'Value': 'tag_value_X' + } + ] + ) + + conn.untag_resource.when.called_with( + ResourceArn = 'not-existing-topic', + TagKeys = [ + 'tag_key_1' + ] + ).should.throw( + ClientError, + 'Resource does not exist' + ) From 49045fe6807d11aa28a57f65eb38f08a887d49f3 Mon Sep 17 00:00:00 2001 From: gruebel Date: Sun, 13 Oct 2019 14:51:31 +0200 Subject: [PATCH 21/25] Add tags parameter to sqs.create_queue --- moto/sqs/models.py | 8 +++++- moto/sqs/responses.py | 13 ++++++--- tests/test_sqs/test_sqs.py | 58 ++++++++++++++++++++++++++++++++++++++ 3 files changed, 74 insertions(+), 5 deletions(-) diff --git a/moto/sqs/models.py b/moto/sqs/models.py index 188e25e9e..260447dcf 100644 --- a/moto/sqs/models.py +++ b/moto/sqs/models.py @@ -415,7 +415,7 @@ class SQSBackend(BaseBackend): self.__dict__ = {} self.__init__(region_name) - def create_queue(self, name, **kwargs): + def create_queue(self, name, tags, **kwargs): queue = self.queues.get(name) if queue: try: @@ -454,6 +454,8 @@ class SQSBackend(BaseBackend): pass queue = Queue(name, region=self.region_name, **kwargs) self.queues[name] = queue + + queue.tags = tags return queue def list_queues(self, queue_name_prefix): @@ -654,6 +656,10 @@ class SQSBackend(BaseBackend): def untag_queue(self, queue_name, tag_keys): queue = self.get_queue(queue_name) + + if len(tag_keys) == 0: + raise RESTError('InvalidParameterValue', 'Tag keys must be between 1 and 128 characters in length.') + for key in tag_keys: try: del queue.tags[key] diff --git a/moto/sqs/responses.py b/moto/sqs/responses.py index 5ddaf8849..ce1dcbf8c 100644 --- a/moto/sqs/responses.py +++ b/moto/sqs/responses.py @@ -33,6 +33,12 @@ class SQSResponse(BaseResponse): self._attribute = self._get_map_prefix('Attribute', key_end='.Name', value_end='.Value') return self._attribute + @property + def tags(self): + if not hasattr(self, '_tags'): + self._tags = self._get_map_prefix('Tag', key_end='.Key', value_end='.Value') + return self._tags + def _get_queue_name(self): try: queue_name = self.querystring.get('QueueUrl')[0].split("/")[-1] @@ -73,12 +79,12 @@ class SQSResponse(BaseResponse): queue_name = self._get_param("QueueName") try: - queue = self.sqs_backend.create_queue(queue_name, **self.attribute) + queue = self.sqs_backend.create_queue(queue_name, self.tags, **self.attribute) except MessageAttributesInvalid as e: return self._error('InvalidParameterValue', e.description) template = self.response_template(CREATE_QUEUE_RESPONSE) - return template.render(queue=queue, request_url=request_url) + return template.render(queue_url=queue.url(request_url)) def get_queue_url(self): request_url = urlparse(self.uri) @@ -416,8 +422,7 @@ class SQSResponse(BaseResponse): CREATE_QUEUE_RESPONSE = """ - {{ queue.url(request_url) }} - {{ queue.visibility_timeout }} + {{ queue_url }} diff --git a/tests/test_sqs/test_sqs.py b/tests/test_sqs/test_sqs.py index 56d82ea61..6d06b87cc 100644 --- a/tests/test_sqs/test_sqs.py +++ b/tests/test_sqs/test_sqs.py @@ -17,6 +17,7 @@ import time import uuid from moto import settings, mock_sqs, mock_sqs_deprecated +from moto.sqs.exceptions import QueueDoesNotExist from tests.helpers import requires_boto_gte import tests.backport_assert_raises # noqa from nose.tools import assert_raises @@ -140,6 +141,22 @@ def test_create_queue_kms(): queue.attributes.get('KmsDataKeyReusePeriodSeconds').should.equal('600') +@mock_sqs +def test_create_queue_with_tags(): + client = boto3.client('sqs', region_name='us-east-1') + response = client.create_queue( + QueueName = 'test-queue-with-tags', + tags = { + 'tag_key_1': 'tag_value_1' + } + ) + queue_url = response['QueueUrl'] + + client.list_queue_tags(QueueUrl = queue_url)['Tags'].should.equal({ + 'tag_key_1': 'tag_value_1' + }) + + @mock_sqs def test_get_nonexistent_queue(): sqs = boto3.resource('sqs', region_name='us-east-1') @@ -959,6 +976,47 @@ def test_tags(): resp['Tags'].should.contain('test1') resp['Tags'].should_not.contain('test2') + # removing a non existing tag should not raise any error + client.untag_queue( + QueueUrl=queue_url, + TagKeys=[ + 'not-existing-tag' + ] + ) + client.list_queue_tags(QueueUrl=queue_url)['Tags'].should.equal({ + 'test1': 'value1' + }) + + +@mock_sqs +def test_untag_queue_errors(): + client = boto3.client('sqs', region_name='us-east-1') + + response = client.create_queue( + QueueName='test-queue-with-tags', + tags={ + 'tag_key_1': 'tag_value_1' + } + ) + queue_url = response['QueueUrl'] + + client.untag_queue.when.called_with( + QueueUrl=queue_url + '-not-existing', + TagKeys=[ + 'tag_key_1' + ] + ).should.throw( + QueueDoesNotExist + ) + + client.untag_queue.when.called_with( + QueueUrl=queue_url, + TagKeys=[] + ).should.throw( + ClientError, + 'Tag keys must be between 1 and 128 characters in length.' + ) + @mock_sqs def test_create_fifo_queue_with_dlq(): From 0d8c81a211020c873f7cafc552b5a6e133571158 Mon Sep 17 00:00:00 2001 From: gruebel Date: Sun, 13 Oct 2019 17:18:11 +0200 Subject: [PATCH 22/25] fix CloudFormation tests --- moto/sqs/models.py | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/moto/sqs/models.py b/moto/sqs/models.py index 260447dcf..922da704f 100644 --- a/moto/sqs/models.py +++ b/moto/sqs/models.py @@ -415,7 +415,7 @@ class SQSBackend(BaseBackend): self.__dict__ = {} self.__init__(region_name) - def create_queue(self, name, tags, **kwargs): + def create_queue(self, name, tags=None, **kwargs): queue = self.queues.get(name) if queue: try: @@ -455,7 +455,9 @@ class SQSBackend(BaseBackend): queue = Queue(name, region=self.region_name, **kwargs) self.queues[name] = queue - queue.tags = tags + if tags: + queue.tags = tags + return queue def list_queues(self, queue_name_prefix): From 2a2c3e80f0d6a2c9498a1d54fdf1fd23fb2ef02b Mon Sep 17 00:00:00 2001 From: Mike Grima Date: Thu, 3 Oct 2019 18:00:07 -0700 Subject: [PATCH 23/25] Adding support for querying the AWS Config resource for S3. - Need to add support still for batch requests and batch aggregation requests in a future PR --- CONFIG_README.md | 19 +- moto/config/exceptions.py | 22 ++ moto/config/models.py | 36 +++- moto/config/responses.py | 10 +- moto/core/models.py | 29 ++- moto/s3/config.py | 32 +++ moto/s3/models.py | 229 +++++++++++++++++++- tests/test_config/test_config.py | 32 +++ tests/test_s3/test_s3.py | 360 ++++++++++++++++++++++++++++++- 9 files changed, 746 insertions(+), 23 deletions(-) diff --git a/CONFIG_README.md b/CONFIG_README.md index 9c158a5e4..356bb87a0 100644 --- a/CONFIG_README.md +++ b/CONFIG_README.md @@ -87,7 +87,7 @@ that look like this: ] ``` -It's recommended to read the comment for the `ConfigQueryModel` [base class here](moto/core/models.py). +It's recommended to read the comment for the `ConfigQueryModel`'s `list_config_service_resources` function in [base class here](moto/core/models.py). ^^ The AWS Config code will see this and format it correct for both aggregated and non-aggregated calls. @@ -102,6 +102,19 @@ An example of a working implementation of this is [S3](moto/s3/config.py). Pagination should generally be able to pull out the resource across any region so should be sharded by `region-item-name` -- not done for S3 because S3 has a globally unique name space. - ### Describing Resources -TODO: Need to fill this in when it's implemented +Fetching a resource's configuration has some similarities to listing resources, but it requires more work (to implement). Due to the +various ways that a resource can be configured, some work will need to be done to ensure that the Config dict returned is correct. + +For most resource types the following is true: + +1. There are regional backends with their own sets of data +1. Config aggregation can pull data from any backend region -- we assume that everything lives in the same account + +The current implementation is for S3. S3 is very complex and depending on how the bucket is configured will depend on what Config will +return for it. + +When implementing resource config fetching, you will need to return at a minimum `None` if the resource is not found, or a `dict` that looks +like what AWS Config would return. + +It's recommended to read the comment for the `ConfigQueryModel` 's `get_config_resource` function in [base class here](moto/core/models.py). diff --git a/moto/config/exceptions.py b/moto/config/exceptions.py index 0b87b329e..611f3640c 100644 --- a/moto/config/exceptions.py +++ b/moto/config/exceptions.py @@ -254,3 +254,25 @@ class TooManyResourceIds(JsonRESTError): def __init__(self): super(TooManyResourceIds, self).__init__('ValidationException', "The specified list had more than 20 resource ID's. " "It must have '20' or less items") + + +class ResourceNotDiscoveredException(JsonRESTError): + code = 400 + + def __init__(self, type, resource): + super(ResourceNotDiscoveredException, self).__init__('ResourceNotDiscoveredException', + 'Resource {resource} of resourceType:{type} is unknown or has not been ' + 'discovered'.format(resource=resource, type=type)) + + +class TooManyResourceKeys(JsonRESTError): + code = 400 + + def __init__(self, bad_list): + message = '1 validation error detected: Value \'{bad_list}\' at ' \ + '\'resourceKeys\' failed to satisfy constraint: ' \ + 'Member must have length less than or equal to 100'.format(bad_list=bad_list) + # For PY2: + message = str(message) + + super(TooManyResourceKeys, self).__init__("ValidationException", message) diff --git a/moto/config/models.py b/moto/config/models.py index 3c41dcdc7..c93c03085 100644 --- a/moto/config/models.py +++ b/moto/config/models.py @@ -17,7 +17,8 @@ from moto.config.exceptions import InvalidResourceTypeException, InvalidDelivery InvalidSNSTopicARNException, MaxNumberOfDeliveryChannelsExceededException, NoAvailableDeliveryChannelException, \ NoSuchDeliveryChannelException, LastDeliveryChannelDeleteFailedException, TagKeyTooBig, \ TooManyTags, TagValueTooBig, TooManyAccountSources, InvalidParameterValueException, InvalidNextTokenException, \ - NoSuchConfigurationAggregatorException, InvalidTagCharacters, DuplicateTags, InvalidLimit, InvalidResourceParameters, TooManyResourceIds + NoSuchConfigurationAggregatorException, InvalidTagCharacters, DuplicateTags, InvalidLimit, InvalidResourceParameters, \ + TooManyResourceIds, ResourceNotDiscoveredException from moto.core import BaseBackend, BaseModel from moto.s3.config import s3_config_query @@ -790,6 +791,39 @@ class ConfigBackend(BaseBackend): return result + def get_resource_config_history(self, type, id, backend_region): + """Returns the configuration of an item in the AWS Config format of the resource for the current regional backend. + + NOTE: This is --NOT-- returning history as it is not supported in moto at this time. (PR's welcome!) + As such, the later_time, earlier_time, limit, and next_token are ignored as this will only + return 1 item. (If no items, it raises an exception) + """ + # If the type isn't implemented then we won't find the item: + if type not in RESOURCE_MAP: + raise ResourceNotDiscoveredException(type, id) + + # Is the resource type global? + if RESOURCE_MAP[type].backends.get('global'): + backend_region = 'global' + + # If the backend region isn't implemented then we won't find the item: + if not RESOURCE_MAP[type].backends.get(backend_region): + raise ResourceNotDiscoveredException(type, id) + + # Get the item: + item = RESOURCE_MAP[type].get_config_resource(id, backend_region=backend_region) + if not item: + raise ResourceNotDiscoveredException(type, id) + + item['accountId'] = DEFAULT_ACCOUNT_ID + + return {'configurationItems': [item]} + + def batch_get_resource_config(self, resource_keys, backend_region): + """Returns the configuration of an item in the AWS Config format of the resource for the current regional backend.""" + # Can't have more than 100 items + pass + config_backends = {} boto3_session = Session() diff --git a/moto/config/responses.py b/moto/config/responses.py index e022997ac..470911446 100644 --- a/moto/config/responses.py +++ b/moto/config/responses.py @@ -102,6 +102,12 @@ class ConfigResponse(BaseResponse): self._get_param('NextToken')) return json.dumps(schema) + def get_resource_config_history(self): + schema = self.config_backend.get_resource_config_history(self._get_param('resourceType'), + self._get_param('resourceId'), + self.region) + return json.dumps(schema) + """ def batch_get_resource_config(self): # TODO implement me! @@ -110,8 +116,4 @@ class ConfigResponse(BaseResponse): def batch_get_aggregate_resource_config(self): # TODO implement me! return "" - - def get_resource_config_history(self): - # TODO implement me! - return "" """ diff --git a/moto/core/models.py b/moto/core/models.py index a68ad9de9..e0ff5ba42 100644 --- a/moto/core/models.py +++ b/moto/core/models.py @@ -554,7 +554,7 @@ class ConfigQueryModel(object): This supports both aggregated and non-aggregated listing. The following notes the difference: - - Non Aggregated Listing - + - Non-Aggregated Listing - This only lists resources within a region. The way that this is implemented in moto is based on the region for the resource backend. @@ -593,8 +593,31 @@ class ConfigQueryModel(object): """ raise NotImplementedError() - def get_config_resource(self): - """TODO implement me.""" + def get_config_resource(self, resource_id, resource_name=None, backend_region=None, resource_region=None): + """For AWS Config. This will query the backend for the specific resource type configuration. + + This supports both aggregated, and non-aggregated fetching -- for batched fetching -- the Config batching requests + will call this function N times to fetch the N objects needing to be fetched. + + - Non-Aggregated Fetching - + This only fetches a resource config within a region. The way that this is implemented in moto is based on the region + for the resource backend. + + You must set the `backend_region` to the region that the API request arrived from. `resource_region` should be set to `None`. + + - Aggregated Fetching - + This fetches resources from all potential regional backends. For non-global resource types, this should collect a full + list of resources from all the backends, and then be able to filter from the resource region. This is because an + aggregator can aggregate resources from multiple regions. In moto, aggregated regions will *assume full aggregation + from all resources in all regions for a given resource type*. + + ... + :param resource_id: + :param resource_name: + :param backend_region: + :param resource_region: + :return: + """ raise NotImplementedError() diff --git a/moto/s3/config.py b/moto/s3/config.py index 9f81b3684..1b5afd070 100644 --- a/moto/s3/config.py +++ b/moto/s3/config.py @@ -1,3 +1,5 @@ +import json + from moto.core.exceptions import InvalidNextTokenException from moto.core.models import ConfigQueryModel from moto.s3 import s3_backends @@ -66,5 +68,35 @@ class S3ConfigQuery(ConfigQueryModel): return [{'type': 'AWS::S3::Bucket', 'id': bucket, 'name': bucket, 'region': self.backends['global'].buckets[bucket].region_name} for bucket in bucket_list], new_token + def get_config_resource(self, resource_id, resource_name=None, backend_region=None, resource_region=None): + # backend_region is ignored for S3 as the backend is 'global' + + # Get the bucket: + bucket = self.backends['global'].buckets.get(resource_id, {}) + + if not bucket: + return + + # Are we filtering based on region? + if resource_region and bucket.region_name != resource_region: + return + + # Are we also filtering on bucket name? + if resource_name and bucket.name != resource_name: + return + + # Format the bucket to the AWS Config format: + config_data = bucket.to_config_dict() + + # The 'configuration' field is also a JSON string: + config_data['configuration'] = json.dumps(config_data['configuration']) + + # Supplementary config need all values converted to JSON strings if they are not strings already: + for field, value in config_data['supplementaryConfiguration'].items(): + if not isinstance(value, str): + config_data['supplementaryConfiguration'][field] = json.dumps(value) + + return config_data + s3_config_query = S3ConfigQuery(s3_backends) diff --git a/moto/s3/models.py b/moto/s3/models.py index 67293f385..ef49d7f95 100644 --- a/moto/s3/models.py +++ b/moto/s3/models.py @@ -1,4 +1,6 @@ +# -*- coding: utf-8 -*- from __future__ import unicode_literals + import os import base64 import datetime @@ -10,6 +12,7 @@ import random import string import tempfile import sys +import time import uuid import six @@ -32,6 +35,7 @@ STORAGE_CLASS = ["STANDARD", "REDUCED_REDUNDANCY", "STANDARD_IA", "ONEZONE_IA", "INTELLIGENT_TIERING", "GLACIER", "DEEP_ARCHIVE"] DEFAULT_KEY_BUFFER_SIZE = 16 * 1024 * 1024 DEFAULT_TEXT_ENCODING = sys.getdefaultencoding() +OWNER = '75aa57f09aa0c8caeab4f8c24e99d10f8e7faeebf76c078efc7c6caea54ba06a' class FakeDeleteMarker(BaseModel): @@ -316,6 +320,14 @@ PERMISSION_READ = 'READ' PERMISSION_WRITE_ACP = 'WRITE_ACP' PERMISSION_READ_ACP = 'READ_ACP' +CAMEL_CASED_PERMISSIONS = { + 'FULL_CONTROL': 'FullControl', + 'WRITE': 'Write', + 'READ': 'Read', + 'WRITE_ACP': 'WriteAcp', + 'READ_ACP': 'ReadAcp' +} + class FakeGrant(BaseModel): @@ -346,10 +358,43 @@ class FakeAcl(BaseModel): def __repr__(self): return "FakeAcl(grants: {})".format(self.grants) + def to_config_dict(self): + """Returns the object into the format expected by AWS Config""" + data = { + 'grantSet': None, # Always setting this to None. Feel free to change. + 'owner': {'displayName': None, 'id': OWNER} + } + + # Add details for each Grant: + grant_list = [] + for grant in self.grants: + permissions = grant.permissions if isinstance(grant.permissions, list) else [grant.permissions] + for permission in permissions: + for grantee in grant.grantees: + # Config does not add the owner if its permissions are FULL_CONTROL: + if permission == 'FULL_CONTROL' and grantee.id == OWNER: + continue + + if grantee.uri: + grant_list.append({'grantee': grantee.uri.split('http://acs.amazonaws.com/groups/s3/')[1], + 'permission': CAMEL_CASED_PERMISSIONS[permission]}) + else: + grant_list.append({ + 'grantee': { + 'id': grantee.id, + 'displayName': None if not grantee.display_name else grantee.display_name + }, + 'permission': CAMEL_CASED_PERMISSIONS[permission] + }) + + if grant_list: + data['grantList'] = grant_list + + return data + def get_canned_acl(acl): - owner_grantee = FakeGrantee( - id='75aa57f09aa0c8caeab4f8c24e99d10f8e7faeebf76c078efc7c6caea54ba06a') + owner_grantee = FakeGrantee(id=OWNER) grants = [FakeGrant([owner_grantee], [PERMISSION_FULL_CONTROL])] if acl == 'private': pass # no other permissions @@ -401,6 +446,34 @@ class LifecycleFilter(BaseModel): self.tag = tag self.and_filter = and_filter + def to_config_dict(self): + if self.prefix is not None: + return { + 'predicate': { + 'type': 'LifecyclePrefixPredicate', + 'prefix': self.prefix + } + } + + elif self.tag: + return { + 'predicate': { + 'type': 'LifecycleTagPredicate', + 'tag': { + 'key': self.tag.key, + 'value': self.tag.value + } + } + } + + else: + return { + 'predicate': { + 'type': 'LifecycleAndOperator', + 'operands': self.and_filter.to_config_dict() + } + } + class LifecycleAndFilter(BaseModel): @@ -408,6 +481,17 @@ class LifecycleAndFilter(BaseModel): self.prefix = prefix self.tags = tags + def to_config_dict(self): + data = [] + + if self.prefix is not None: + data.append({'type': 'LifecyclePrefixPredicate', 'prefix': self.prefix}) + + for tag in self.tags: + data.append({'type': 'LifecycleTagPredicate', 'tag': {'key': tag.key, 'value': tag.value}}) + + return data + class LifecycleRule(BaseModel): @@ -430,6 +514,46 @@ class LifecycleRule(BaseModel): self.nvt_storage_class = nvt_storage_class self.aimu_days = aimu_days + def to_config_dict(self): + """Converts the object to the AWS Config data dict. + + Note: The following are missing that should be added in the future: + - transitions (returns None for now) + - noncurrentVersionTransitions (returns None for now) + - LifeCycle Filters that are NOT prefix + + :param kwargs: + :return: + """ + + lifecycle_dict = { + 'id': self.id, + 'prefix': self.prefix, + 'status': self.status, + 'expirationInDays': self.expiration_days, + 'expiredObjectDeleteMarker': self.expired_object_delete_marker, + 'noncurrentVersionExpirationInDays': -1 or self.nve_noncurrent_days, + 'expirationDate': self.expiration_date, + 'transitions': None, # Replace me with logic to fill in + 'noncurrentVersionTransitions': None, # Replace me with logic to fill in + } + + if self.aimu_days: + lifecycle_dict['abortIncompleteMultipartUpload'] = {'daysAfterInitiation': self.aimu_days} + else: + lifecycle_dict['abortIncompleteMultipartUpload'] = None + + # Format the filter: + if self.prefix is None and self.filter is None: + lifecycle_dict['filter'] = {'predicate': None} + + elif self.prefix: + lifecycle_dict['filter'] = None + else: + lifecycle_dict['filter'] = self.filter.to_config_dict() + + return lifecycle_dict + class CorsRule(BaseModel): @@ -450,6 +574,23 @@ class Notification(BaseModel): self.events = events self.filters = filters if filters else {} + def to_config_dict(self): + data = {} + + # Type and ARN will be filled in by NotificationConfiguration's to_config_dict: + data['events'] = [event for event in self.events] + + if self.filters: + data['filter'] = {'s3KeyFilter': {'filterRules': [ + {'name': fr['Name'], 'value': fr['Value']} for fr in self.filters['S3Key']['FilterRule'] + ]}} + else: + data['filter'] = None + + data['objectPrefixes'] = [] # Not sure why this is a thing since AWS just seems to return this as filters ¯\_(ツ)_/¯ + + return data + class NotificationConfiguration(BaseModel): @@ -461,6 +602,29 @@ class NotificationConfiguration(BaseModel): self.cloud_function = [Notification(c["CloudFunction"], c["Event"], filters=c.get("Filter"), id=c.get("Id")) for c in cloud_function] if cloud_function else [] + def to_config_dict(self): + data = {'configurations': {}} + + for topic in self.topic: + topic_config = topic.to_config_dict() + topic_config['topicARN'] = topic.arn + topic_config['type'] = 'TopicConfiguration' + data['configurations'][topic.id] = topic_config + + for queue in self.queue: + queue_config = queue.to_config_dict() + queue_config['queueARN'] = queue.arn + queue_config['type'] = 'QueueConfiguration' + data['configurations'][queue.id] = queue_config + + for cloud_function in self.cloud_function: + cf_config = cloud_function.to_config_dict() + cf_config['queueARN'] = cloud_function.arn + cf_config['type'] = 'LambdaConfiguration' + data['configurations'][cloud_function.id] = cf_config + + return data + class FakeBucket(BaseModel): @@ -735,6 +899,67 @@ class FakeBucket(BaseModel): bucket = s3_backend.create_bucket(resource_name, region_name) return bucket + def to_config_dict(self): + """Return the AWS Config JSON format of this S3 bucket. + + Note: The following features are not implemented and will need to be if you care about them: + - Bucket Accelerate Configuration + """ + config_dict = { + 'version': '1.3', + 'configurationItemCaptureTime': str(self.creation_date), + 'configurationItemStatus': 'ResourceDiscovered', + 'configurationStateId': str(int(time.mktime(self.creation_date.timetuple()))), # PY2 and 3 compatible + 'configurationItemMD5Hash': '', + 'arn': "arn:aws:s3:::{}".format(self.name), + 'resourceType': 'AWS::S3::Bucket', + 'resourceId': self.name, + 'resourceName': self.name, + 'awsRegion': self.region_name, + 'availabilityZone': 'Regional', + 'resourceCreationTime': str(self.creation_date), + 'relatedEvents': [], + 'relationships': [], + 'tags': {tag.key: tag.value for tag in self.tagging.tag_set.tags}, + 'configuration': { + 'name': self.name, + 'owner': {'id': OWNER}, + 'creationDate': self.creation_date.isoformat() + } + } + + # Make the supplementary configuration: + # TODO: Implement Public Access Block Support + s_config = {'AccessControlList': self.acl.to_config_dict()} + + # TODO implement Accelerate Configuration: + s_config['BucketAccelerateConfiguration'] = {'status': None} + + if self.rules: + s_config['BucketLifecycleConfiguration'] = { + "rules": [rule.to_config_dict() for rule in self.rules] + } + + s_config['BucketLoggingConfiguration'] = { + 'destinationBucketName': self.logging.get('TargetBucket', None), + 'logFilePrefix': self.logging.get('TargetPrefix', None) + } + + s_config['BucketPolicy'] = { + 'policyText': self.policy if self.policy else None + } + + s_config['IsRequesterPaysEnabled'] = 'false' if self.payer == 'BucketOwner' else 'true' + + if self.notification_configuration: + s_config['BucketNotificationConfiguration'] = self.notification_configuration.to_config_dict() + else: + s_config['BucketNotificationConfiguration'] = {'configurations': {}} + + config_dict['supplementaryConfiguration'] = s_config + + return config_dict + class S3Backend(BaseBackend): diff --git a/tests/test_config/test_config.py b/tests/test_config/test_config.py index df0052e14..e8d09c9c3 100644 --- a/tests/test_config/test_config.py +++ b/tests/test_config/test_config.py @@ -1184,3 +1184,35 @@ def test_list_aggregate_discovered_resource(): with assert_raises(ClientError) as ce: client.list_aggregate_discovered_resources(ConfigurationAggregatorName='testing', ResourceType='AWS::S3::Bucket', Limit=101) assert '101' in ce.exception.response['Error']['Message'] + + +@mock_config +@mock_s3 +def test_get_resource_config_history(): + """NOTE: We are only really testing the Config part. For each individual service, please add tests + for that individual service's "get_config_resource" function. + """ + client = boto3.client('config', region_name='us-west-2') + + # With an invalid resource type: + with assert_raises(ClientError) as ce: + client.get_resource_config_history(resourceType='NOT::A::RESOURCE', resourceId='notcreatedyet') + assert ce.exception.response['Error'] == {'Message': 'Resource notcreatedyet of resourceType:NOT::A::RESOURCE is unknown or has ' + 'not been discovered', 'Code': 'ResourceNotDiscoveredException'} + + # With nothing created yet: + with assert_raises(ClientError) as ce: + client.get_resource_config_history(resourceType='AWS::S3::Bucket', resourceId='notcreatedyet') + assert ce.exception.response['Error'] == {'Message': 'Resource notcreatedyet of resourceType:AWS::S3::Bucket is unknown or has ' + 'not been discovered', 'Code': 'ResourceNotDiscoveredException'} + + # Create an S3 bucket: + s3_client = boto3.client('s3', region_name='us-west-2') + for x in range(0, 10): + s3_client.create_bucket(Bucket='bucket{}'.format(x), CreateBucketConfiguration={'LocationConstraint': 'us-west-2'}) + + # Now try: + result = client.get_resource_config_history(resourceType='AWS::S3::Bucket', resourceId='bucket1')['configurationItems'] + assert len(result) == 1 + assert result[0]['resourceName'] == result[0]['resourceId'] == 'bucket1' + assert result[0]['arn'] == 'arn:aws:s3:::bucket1' diff --git a/tests/test_s3/test_s3.py b/tests/test_s3/test_s3.py index 4ec403854..292093893 100644 --- a/tests/test_s3/test_s3.py +++ b/tests/test_s3/test_s3.py @@ -289,8 +289,8 @@ def test_multipart_etag_quotes_stripped(): part2 = b'1' etag2 = multipart.upload_part_from_file(BytesIO(part2), 2).etag # Strip quotes from etags - etag1 = etag1.replace('"','') - etag2 = etag2.replace('"','') + etag1 = etag1.replace('"', '') + etag2 = etag2.replace('"', '') xml = "{0}{1}" xml = xml.format(1, etag1) + xml.format(2, etag2) xml = "{0}".format(xml) @@ -1592,7 +1592,8 @@ def test_boto3_copy_object_with_versioning(): response = client.create_multipart_upload(Bucket='blah', Key='test4') upload_id = response['UploadId'] - response = client.upload_part_copy(Bucket='blah', Key='test4', CopySource={'Bucket': 'blah', 'Key': 'test3', 'VersionId': obj3_version_new}, + response = client.upload_part_copy(Bucket='blah', Key='test4', + CopySource={'Bucket': 'blah', 'Key': 'test3', 'VersionId': obj3_version_new}, UploadId=upload_id, PartNumber=1) etag = response["CopyPartResult"]["ETag"] client.complete_multipart_upload( @@ -2284,7 +2285,7 @@ def test_put_bucket_notification(): assert not result.get("QueueConfigurations") assert result["LambdaFunctionConfigurations"][0]["Id"] assert result["LambdaFunctionConfigurations"][0]["LambdaFunctionArn"] == \ - "arn:aws:lambda:us-east-1:012345678910:function:lambda" + "arn:aws:lambda:us-east-1:012345678910:function:lambda" assert result["LambdaFunctionConfigurations"][0]["Events"][0] == "s3:ObjectCreated:*" assert len(result["LambdaFunctionConfigurations"][0]["Events"]) == 1 assert len(result["LambdaFunctionConfigurations"][0]["Filter"]["Key"]["FilterRules"]) == 1 @@ -2367,7 +2368,7 @@ def test_put_bucket_notification_errors(): assert err.exception.response["Error"]["Code"] == "InvalidArgument" assert err.exception.response["Error"]["Message"] == \ - "The notification destination service region is not valid for the bucket location constraint" + "The notification destination service region is not valid for the bucket location constraint" # Invalid event name: with assert_raises(ClientError) as err: @@ -2949,7 +2950,7 @@ TEST_XML = """\ def test_boto3_bucket_name_too_long(): s3 = boto3.client('s3', region_name='us-east-1') with assert_raises(ClientError) as exc: - s3.create_bucket(Bucket='x'*64) + s3.create_bucket(Bucket='x' * 64) exc.exception.response['Error']['Code'].should.equal('InvalidBucketName') @@ -2957,7 +2958,7 @@ def test_boto3_bucket_name_too_long(): def test_boto3_bucket_name_too_short(): s3 = boto3.client('s3', region_name='us-east-1') with assert_raises(ClientError) as exc: - s3.create_bucket(Bucket='x'*2) + s3.create_bucket(Bucket='x' * 2) exc.exception.response['Error']['Code'].should.equal('InvalidBucketName') @@ -2979,7 +2980,7 @@ def test_can_enable_bucket_acceleration(): Bucket=bucket_name, AccelerateConfiguration={'Status': 'Enabled'}, ) - resp.keys().should.have.length_of(1) # Response contains nothing (only HTTP headers) + resp.keys().should.have.length_of(1) # Response contains nothing (only HTTP headers) resp = s3.get_bucket_accelerate_configuration(Bucket=bucket_name) resp.should.have.key('Status') resp['Status'].should.equal('Enabled') @@ -2998,7 +2999,7 @@ def test_can_suspend_bucket_acceleration(): Bucket=bucket_name, AccelerateConfiguration={'Status': 'Suspended'}, ) - resp.keys().should.have.length_of(1) # Response contains nothing (only HTTP headers) + resp.keys().should.have.length_of(1) # Response contains nothing (only HTTP headers) resp = s3.get_bucket_accelerate_configuration(Bucket=bucket_name) resp.should.have.key('Status') resp['Status'].should.equal('Suspended') @@ -3013,7 +3014,7 @@ def test_suspending_acceleration_on_not_configured_bucket_does_nothing(): Bucket=bucket_name, AccelerateConfiguration={'Status': 'Suspended'}, ) - resp.keys().should.have.length_of(1) # Response contains nothing (only HTTP headers) + resp.keys().should.have.length_of(1) # Response contains nothing (only HTTP headers) resp = s3.get_bucket_accelerate_configuration(Bucket=bucket_name) resp.shouldnt.have.key('Status') @@ -3173,3 +3174,342 @@ def test_list_config_discovered_resources(): s3_config_query.list_config_service_resources(None, None, 1, 'notabucket') assert 'The nextToken provided is invalid' in inte.exception.message + + +@mock_s3 +def test_s3_lifecycle_config_dict(): + from moto.s3.config import s3_config_query + + # With 1 bucket in us-west-2: + s3_config_query.backends['global'].create_bucket('bucket1', 'us-west-2') + + # And a lifecycle policy + lifecycle = [ + { + 'ID': 'rule1', + 'Status': 'Enabled', + 'Filter': {'Prefix': ''}, + 'Expiration': {'Days': 1} + }, + { + 'ID': 'rule2', + 'Status': 'Enabled', + 'Filter': { + 'And': { + 'Prefix': 'some/path', + 'Tag': [ + {'Key': 'TheKey', 'Value': 'TheValue'} + ] + } + }, + 'Expiration': {'Days': 1} + }, + { + 'ID': 'rule3', + 'Status': 'Enabled', + 'Filter': {}, + 'Expiration': {'Days': 1} + }, + { + 'ID': 'rule4', + 'Status': 'Enabled', + 'Filter': {'Prefix': ''}, + 'AbortIncompleteMultipartUpload': {'DaysAfterInitiation': 1} + } + ] + s3_config_query.backends['global'].set_bucket_lifecycle('bucket1', lifecycle) + + # Get the rules for this: + lifecycles = [rule.to_config_dict() for rule in s3_config_query.backends['global'].buckets['bucket1'].rules] + + # Verify the first: + assert lifecycles[0] == { + 'id': 'rule1', + 'prefix': None, + 'status': 'Enabled', + 'expirationInDays': 1, + 'expiredObjectDeleteMarker': None, + 'noncurrentVersionExpirationInDays': -1, + 'expirationDate': None, + 'transitions': None, + 'noncurrentVersionTransitions': None, + 'abortIncompleteMultipartUpload': None, + 'filter': { + 'predicate': { + 'type': 'LifecyclePrefixPredicate', + 'prefix': '' + } + } + } + + # Verify the second: + assert lifecycles[1] == { + 'id': 'rule2', + 'prefix': None, + 'status': 'Enabled', + 'expirationInDays': 1, + 'expiredObjectDeleteMarker': None, + 'noncurrentVersionExpirationInDays': -1, + 'expirationDate': None, + 'transitions': None, + 'noncurrentVersionTransitions': None, + 'abortIncompleteMultipartUpload': None, + 'filter': { + 'predicate': { + 'type': 'LifecycleAndOperator', + 'operands': [ + { + 'type': 'LifecyclePrefixPredicate', + 'prefix': 'some/path' + }, + { + 'type': 'LifecycleTagPredicate', + 'tag': { + 'key': 'TheKey', + 'value': 'TheValue' + } + }, + ] + } + } + } + + # And the third: + assert lifecycles[2] == { + 'id': 'rule3', + 'prefix': None, + 'status': 'Enabled', + 'expirationInDays': 1, + 'expiredObjectDeleteMarker': None, + 'noncurrentVersionExpirationInDays': -1, + 'expirationDate': None, + 'transitions': None, + 'noncurrentVersionTransitions': None, + 'abortIncompleteMultipartUpload': None, + 'filter': {'predicate': None} + } + + # And the last: + assert lifecycles[3] == { + 'id': 'rule4', + 'prefix': None, + 'status': 'Enabled', + 'expirationInDays': None, + 'expiredObjectDeleteMarker': None, + 'noncurrentVersionExpirationInDays': -1, + 'expirationDate': None, + 'transitions': None, + 'noncurrentVersionTransitions': None, + 'abortIncompleteMultipartUpload': {'daysAfterInitiation': 1}, + 'filter': { + 'predicate': { + 'type': 'LifecyclePrefixPredicate', + 'prefix': '' + } + } + } + + +@mock_s3 +def test_s3_notification_config_dict(): + from moto.s3.config import s3_config_query + + # With 1 bucket in us-west-2: + s3_config_query.backends['global'].create_bucket('bucket1', 'us-west-2') + + # And some notifications: + notifications = { + 'TopicConfiguration': [{ + 'Id': 'Topic', + "Topic": 'arn:aws:sns:us-west-2:012345678910:mytopic', + "Event": [ + "s3:ReducedRedundancyLostObject", + "s3:ObjectRestore:Completed" + ] + }], + 'QueueConfiguration': [{ + 'Id': 'Queue', + 'Queue': 'arn:aws:sqs:us-west-2:012345678910:myqueue', + 'Event': [ + "s3:ObjectRemoved:Delete" + ], + 'Filter': { + 'S3Key': { + 'FilterRule': [ + { + 'Name': 'prefix', + 'Value': 'stuff/here/' + } + ] + } + } + }], + 'CloudFunctionConfiguration': [{ + 'Id': 'Lambda', + 'CloudFunction': 'arn:aws:lambda:us-west-2:012345678910:function:mylambda', + 'Event': [ + "s3:ObjectCreated:Post", + "s3:ObjectCreated:Copy", + "s3:ObjectCreated:Put" + ], + 'Filter': { + 'S3Key': { + 'FilterRule': [ + { + 'Name': 'suffix', + 'Value': '.png' + } + ] + } + } + }] + } + + s3_config_query.backends['global'].put_bucket_notification_configuration('bucket1', notifications) + + # Get the notifications for this: + notifications = s3_config_query.backends['global'].buckets['bucket1'].notification_configuration.to_config_dict() + + # Verify it all: + assert notifications == { + 'configurations': { + 'Topic': { + 'events': ['s3:ReducedRedundancyLostObject', 's3:ObjectRestore:Completed'], + 'filter': None, + 'objectPrefixes': [], + 'topicARN': 'arn:aws:sns:us-west-2:012345678910:mytopic', + 'type': 'TopicConfiguration' + }, + 'Queue': { + 'events': ['s3:ObjectRemoved:Delete'], + 'filter': { + 's3KeyFilter': { + 'filterRules': [{ + 'name': 'prefix', + 'value': 'stuff/here/' + }] + } + }, + 'objectPrefixes': [], + 'queueARN': 'arn:aws:sqs:us-west-2:012345678910:myqueue', + 'type': 'QueueConfiguration' + }, + 'Lambda': { + 'events': ['s3:ObjectCreated:Post', 's3:ObjectCreated:Copy', 's3:ObjectCreated:Put'], + 'filter': { + 's3KeyFilter': { + 'filterRules': [{ + 'name': 'suffix', + 'value': '.png' + }] + } + }, + 'objectPrefixes': [], + 'queueARN': 'arn:aws:lambda:us-west-2:012345678910:function:mylambda', + 'type': 'LambdaConfiguration' + } + } + } + + +@mock_s3 +def test_s3_acl_to_config_dict(): + from moto.s3.config import s3_config_query + from moto.s3.models import FakeAcl, FakeGrant, FakeGrantee, OWNER + + # With 1 bucket in us-west-2: + s3_config_query.backends['global'].create_bucket('logbucket', 'us-west-2') + + # Get the config dict with nothing other than the owner details: + acls = s3_config_query.backends['global'].buckets['logbucket'].acl.to_config_dict() + assert acls == { + 'grantSet': None, + 'owner': {'displayName': None, 'id': OWNER} + } + + # Add some Log Bucket ACLs: + log_acls = FakeAcl([ + FakeGrant([FakeGrantee(uri="http://acs.amazonaws.com/groups/s3/LogDelivery")], "WRITE"), + FakeGrant([FakeGrantee(uri="http://acs.amazonaws.com/groups/s3/LogDelivery")], "READ_ACP"), + FakeGrant([FakeGrantee(id=OWNER)], "FULL_CONTROL") + ]) + s3_config_query.backends['global'].set_bucket_acl('logbucket', log_acls) + + acls = s3_config_query.backends['global'].buckets['logbucket'].acl.to_config_dict() + assert acls == { + 'grantSet': None, + 'grantList': [{'grantee': 'LogDelivery', 'permission': 'Write'}, {'grantee': 'LogDelivery', 'permission': 'ReadAcp'}], + 'owner': {'displayName': None, 'id': OWNER} + } + + # Give the owner less than full_control permissions: + log_acls = FakeAcl([FakeGrant([FakeGrantee(id=OWNER)], "READ_ACP"), FakeGrant([FakeGrantee(id=OWNER)], "WRITE_ACP")]) + s3_config_query.backends['global'].set_bucket_acl('logbucket', log_acls) + acls = s3_config_query.backends['global'].buckets['logbucket'].acl.to_config_dict() + assert acls == { + 'grantSet': None, + 'grantList': [ + {'grantee': {'id': OWNER, 'displayName': None}, 'permission': 'ReadAcp'}, + {'grantee': {'id': OWNER, 'displayName': None}, 'permission': 'WriteAcp'} + ], + 'owner': {'displayName': None, 'id': OWNER} + } + + +@mock_s3 +def test_s3_config_dict(): + from moto.s3.config import s3_config_query + from moto.s3.models import FakeAcl, FakeGrant, FakeGrantee, FakeTag, FakeTagging, FakeTagSet, OWNER + + # Without any buckets: + assert not s3_config_query.get_config_resource('some_bucket') + + tags = FakeTagging(FakeTagSet([FakeTag('someTag', 'someValue'), FakeTag('someOtherTag', 'someOtherValue')])) + + # With 1 bucket in us-west-2: + s3_config_query.backends['global'].create_bucket('bucket1', 'us-west-2') + s3_config_query.backends['global'].put_bucket_tagging('bucket1', tags) + + # With a log bucket: + s3_config_query.backends['global'].create_bucket('logbucket', 'us-west-2') + log_acls = FakeAcl([ + FakeGrant([FakeGrantee(uri="http://acs.amazonaws.com/groups/s3/LogDelivery")], "WRITE"), + FakeGrant([FakeGrantee(uri="http://acs.amazonaws.com/groups/s3/LogDelivery")], "READ_ACP"), + FakeGrant([FakeGrantee(id=OWNER)], "FULL_CONTROL") + ]) + + s3_config_query.backends['global'].set_bucket_acl('logbucket', log_acls) + s3_config_query.backends['global'].put_bucket_logging('bucket1', {'TargetBucket': 'logbucket', 'TargetPrefix': ''}) + + # Get the us-west-2 bucket and verify that it works properly: + bucket1_result = s3_config_query.get_config_resource('bucket1') + + # Just verify a few things: + assert bucket1_result['arn'] == 'arn:aws:s3:::bucket1' + assert bucket1_result['awsRegion'] == 'us-west-2' + assert bucket1_result['resourceName'] == bucket1_result['resourceId'] == 'bucket1' + assert bucket1_result['tags'] == {'someTag': 'someValue', 'someOtherTag': 'someOtherValue'} + assert isinstance(bucket1_result['configuration'], str) + exist_list = ['AccessControlList', 'BucketAccelerateConfiguration', 'BucketLoggingConfiguration', 'BucketPolicy', + 'IsRequesterPaysEnabled', 'BucketNotificationConfiguration'] + for exist in exist_list: + assert isinstance(bucket1_result['supplementaryConfiguration'][exist], str) + + # Verify the logging config: + assert json.loads(bucket1_result['supplementaryConfiguration']['BucketLoggingConfiguration']) == \ + {'destinationBucketName': 'logbucket', 'logFilePrefix': ''} + + # Verify the policy: + assert json.loads(bucket1_result['supplementaryConfiguration']['BucketPolicy']) == {'policyText': None} + + # Filter by correct region: + assert bucket1_result == s3_config_query.get_config_resource('bucket1', resource_region='us-west-2') + + # By incorrect region: + assert not s3_config_query.get_config_resource('bucket1', resource_region='eu-west-1') + + # With correct resource ID and name: + assert bucket1_result == s3_config_query.get_config_resource('bucket1', resource_name='bucket1') + + # With an incorrect resource name: + assert not s3_config_query.get_config_resource('bucket1', resource_name='eu-bucket-1') From ce3f1eed66d0d435904898c0bb7a11d3f2057bf1 Mon Sep 17 00:00:00 2001 From: gruebel Date: Sun, 13 Oct 2019 20:32:53 +0200 Subject: [PATCH 24/25] fix test server error --- moto/sqs/responses.py | 6 +++++- tests/test_sqs/test_sqs.py | 4 ++-- 2 files changed, 7 insertions(+), 3 deletions(-) diff --git a/moto/sqs/responses.py b/moto/sqs/responses.py index ce1dcbf8c..747fa2363 100644 --- a/moto/sqs/responses.py +++ b/moto/sqs/responses.py @@ -406,7 +406,11 @@ class SQSResponse(BaseResponse): queue_name = self._get_queue_name() tag_keys = self._get_multi_param('TagKey') - self.sqs_backend.untag_queue(queue_name, tag_keys) + try: + self.sqs_backend.untag_queue(queue_name, tag_keys) + except QueueDoesNotExist as e: + return self._error('AWS.SimpleQueueService.NonExistentQueue', + e.description) template = self.response_template(UNTAG_QUEUE_RESPONSE) return template.render() diff --git a/tests/test_sqs/test_sqs.py b/tests/test_sqs/test_sqs.py index 6d06b87cc..bc9fa8e4d 100644 --- a/tests/test_sqs/test_sqs.py +++ b/tests/test_sqs/test_sqs.py @@ -17,7 +17,6 @@ import time import uuid from moto import settings, mock_sqs, mock_sqs_deprecated -from moto.sqs.exceptions import QueueDoesNotExist from tests.helpers import requires_boto_gte import tests.backport_assert_raises # noqa from nose.tools import assert_raises @@ -1006,7 +1005,8 @@ def test_untag_queue_errors(): 'tag_key_1' ] ).should.throw( - QueueDoesNotExist + ClientError, + "The specified queue does not exist for this wsdl version." ) client.untag_queue.when.called_with( From 96f0666df99ccc156deb1019ecce2ddedc6fa24d Mon Sep 17 00:00:00 2001 From: Mike Grima Date: Sun, 13 Oct 2019 11:10:37 -0700 Subject: [PATCH 25/25] Added AWS Config batching capabilities - Support for aggregated and non-aggregated batching - Currently only implemented for S3 --- moto/config/models.py | 94 ++++++++++++++++++++--- moto/config/responses.py | 12 +-- tests/test_config/test_config.py | 124 +++++++++++++++++++++++++++++++ 3 files changed, 213 insertions(+), 17 deletions(-) diff --git a/moto/config/models.py b/moto/config/models.py index c93c03085..8d9096454 100644 --- a/moto/config/models.py +++ b/moto/config/models.py @@ -18,7 +18,7 @@ from moto.config.exceptions import InvalidResourceTypeException, InvalidDelivery NoSuchDeliveryChannelException, LastDeliveryChannelDeleteFailedException, TagKeyTooBig, \ TooManyTags, TagValueTooBig, TooManyAccountSources, InvalidParameterValueException, InvalidNextTokenException, \ NoSuchConfigurationAggregatorException, InvalidTagCharacters, DuplicateTags, InvalidLimit, InvalidResourceParameters, \ - TooManyResourceIds, ResourceNotDiscoveredException + TooManyResourceIds, ResourceNotDiscoveredException, TooManyResourceKeys from moto.core import BaseBackend, BaseModel from moto.s3.config import s3_config_query @@ -791,7 +791,7 @@ class ConfigBackend(BaseBackend): return result - def get_resource_config_history(self, type, id, backend_region): + def get_resource_config_history(self, resource_type, id, backend_region): """Returns the configuration of an item in the AWS Config format of the resource for the current regional backend. NOTE: This is --NOT-- returning history as it is not supported in moto at this time. (PR's welcome!) @@ -799,30 +799,102 @@ class ConfigBackend(BaseBackend): return 1 item. (If no items, it raises an exception) """ # If the type isn't implemented then we won't find the item: - if type not in RESOURCE_MAP: - raise ResourceNotDiscoveredException(type, id) + if resource_type not in RESOURCE_MAP: + raise ResourceNotDiscoveredException(resource_type, id) # Is the resource type global? - if RESOURCE_MAP[type].backends.get('global'): + if RESOURCE_MAP[resource_type].backends.get('global'): backend_region = 'global' # If the backend region isn't implemented then we won't find the item: - if not RESOURCE_MAP[type].backends.get(backend_region): - raise ResourceNotDiscoveredException(type, id) + if not RESOURCE_MAP[resource_type].backends.get(backend_region): + raise ResourceNotDiscoveredException(resource_type, id) # Get the item: - item = RESOURCE_MAP[type].get_config_resource(id, backend_region=backend_region) + item = RESOURCE_MAP[resource_type].get_config_resource(id, backend_region=backend_region) if not item: - raise ResourceNotDiscoveredException(type, id) + raise ResourceNotDiscoveredException(resource_type, id) item['accountId'] = DEFAULT_ACCOUNT_ID return {'configurationItems': [item]} def batch_get_resource_config(self, resource_keys, backend_region): - """Returns the configuration of an item in the AWS Config format of the resource for the current regional backend.""" + """Returns the configuration of an item in the AWS Config format of the resource for the current regional backend. + + :param resource_keys: + :param backend_region: + """ # Can't have more than 100 items - pass + if len(resource_keys) > 100: + raise TooManyResourceKeys(['com.amazonaws.starling.dove.ResourceKey@12345'] * len(resource_keys)) + + results = [] + for resource in resource_keys: + # Does the resource type exist? + if not RESOURCE_MAP.get(resource['resourceType']): + # Not found so skip. + continue + + # Is the resource type global? + if RESOURCE_MAP[resource['resourceType']].backends.get('global'): + backend_region = 'global' + + # If the backend region isn't implemented then we won't find the item: + if not RESOURCE_MAP[resource['resourceType']].backends.get(backend_region): + continue + + # Get the item: + item = RESOURCE_MAP[resource['resourceType']].get_config_resource(resource['resourceId'], backend_region=backend_region) + if not item: + continue + + item['accountId'] = DEFAULT_ACCOUNT_ID + + results.append(item) + + return {'baseConfigurationItems': results, 'unprocessedResourceKeys': []} # At this time, moto is not adding unprocessed items. + + def batch_get_aggregate_resource_config(self, aggregator_name, resource_identifiers): + """Returns the configuration of an item in the AWS Config format of the resource for the current regional backend. + + As far a moto goes -- the only real difference between this function and the `batch_get_resource_config` function is that + this will require a Config Aggregator be set up a priori and can search based on resource regions. + + Note: moto will IGNORE the resource account ID in the search query. + """ + if not self.config_aggregators.get(aggregator_name): + raise NoSuchConfigurationAggregatorException() + + # Can't have more than 100 items + if len(resource_identifiers) > 100: + raise TooManyResourceKeys(['com.amazonaws.starling.dove.AggregateResourceIdentifier@12345'] * len(resource_identifiers)) + + found = [] + not_found = [] + for identifier in resource_identifiers: + resource_type = identifier['ResourceType'] + resource_region = identifier['SourceRegion'] + resource_id = identifier['ResourceId'] + resource_name = identifier.get('ResourceName', None) + + # Does the resource type exist? + if not RESOURCE_MAP.get(resource_type): + not_found.append(identifier) + continue + + # Get the item: + item = RESOURCE_MAP[resource_type].get_config_resource(resource_id, resource_name=resource_name, + resource_region=resource_region) + if not item: + not_found.append(identifier) + continue + + item['accountId'] = DEFAULT_ACCOUNT_ID + + found.append(item) + + return {'BaseConfigurationItems': found, 'UnprocessedResourceIdentifiers': not_found} config_backends = {} diff --git a/moto/config/responses.py b/moto/config/responses.py index 470911446..f10d48b71 100644 --- a/moto/config/responses.py +++ b/moto/config/responses.py @@ -108,12 +108,12 @@ class ConfigResponse(BaseResponse): self.region) return json.dumps(schema) - """ def batch_get_resource_config(self): - # TODO implement me! - return "" + schema = self.config_backend.batch_get_resource_config(self._get_param('resourceKeys'), + self.region) + return json.dumps(schema) def batch_get_aggregate_resource_config(self): - # TODO implement me! - return "" - """ + schema = self.config_backend.batch_get_aggregate_resource_config(self._get_param('ConfigurationAggregatorName'), + self._get_param('ResourceIdentifiers')) + return json.dumps(schema) diff --git a/tests/test_config/test_config.py b/tests/test_config/test_config.py index e8d09c9c3..ff91e6675 100644 --- a/tests/test_config/test_config.py +++ b/tests/test_config/test_config.py @@ -1216,3 +1216,127 @@ def test_get_resource_config_history(): assert len(result) == 1 assert result[0]['resourceName'] == result[0]['resourceId'] == 'bucket1' assert result[0]['arn'] == 'arn:aws:s3:::bucket1' + + +@mock_config +@mock_s3 +def test_batch_get_resource_config(): + """NOTE: We are only really testing the Config part. For each individual service, please add tests + for that individual service's "get_config_resource" function. + """ + client = boto3.client('config', region_name='us-west-2') + + # With more than 100 resourceKeys: + with assert_raises(ClientError) as ce: + client.batch_get_resource_config(resourceKeys=[{'resourceType': 'AWS::S3::Bucket', 'resourceId': 'someBucket'}] * 101) + assert 'Member must have length less than or equal to 100' in ce.exception.response['Error']['Message'] + + # With invalid resource types and resources that don't exist: + result = client.batch_get_resource_config(resourceKeys=[ + {'resourceType': 'NOT::A::RESOURCE', 'resourceId': 'NotAThing'}, {'resourceType': 'AWS::S3::Bucket', 'resourceId': 'NotAThing'}, + ]) + + assert not result['baseConfigurationItems'] + assert not result['unprocessedResourceKeys'] + + # Create some S3 buckets: + s3_client = boto3.client('s3', region_name='us-west-2') + for x in range(0, 10): + s3_client.create_bucket(Bucket='bucket{}'.format(x), CreateBucketConfiguration={'LocationConstraint': 'us-west-2'}) + + # Get them all: + keys = [{'resourceType': 'AWS::S3::Bucket', 'resourceId': 'bucket{}'.format(x)} for x in range(0, 10)] + result = client.batch_get_resource_config(resourceKeys=keys) + assert len(result['baseConfigurationItems']) == 10 + buckets_missing = ['bucket{}'.format(x) for x in range(0, 10)] + for r in result['baseConfigurationItems']: + buckets_missing.remove(r['resourceName']) + + assert not buckets_missing + + +@mock_config +@mock_s3 +def test_batch_get_aggregate_resource_config(): + """NOTE: We are only really testing the Config part. For each individual service, please add tests + for that individual service's "get_config_resource" function. + """ + from moto.config.models import DEFAULT_ACCOUNT_ID + client = boto3.client('config', region_name='us-west-2') + + # Without an aggregator: + bad_ri = {'SourceAccountId': '000000000000', 'SourceRegion': 'not-a-region', 'ResourceType': 'NOT::A::RESOURCE', 'ResourceId': 'nope'} + with assert_raises(ClientError) as ce: + client.batch_get_aggregate_resource_config(ConfigurationAggregatorName='lolno', ResourceIdentifiers=[bad_ri]) + assert 'The configuration aggregator does not exist' in ce.exception.response['Error']['Message'] + + # Create the aggregator: + account_aggregation_source = { + 'AccountIds': [ + '012345678910', + '111111111111', + '222222222222' + ], + 'AllAwsRegions': True + } + client.put_configuration_aggregator( + ConfigurationAggregatorName='testing', + AccountAggregationSources=[account_aggregation_source] + ) + + # With more than 100 items: + with assert_raises(ClientError) as ce: + client.batch_get_aggregate_resource_config(ConfigurationAggregatorName='testing', ResourceIdentifiers=[bad_ri] * 101) + assert 'Member must have length less than or equal to 100' in ce.exception.response['Error']['Message'] + + # Create some S3 buckets: + s3_client = boto3.client('s3', region_name='us-west-2') + for x in range(0, 10): + s3_client.create_bucket(Bucket='bucket{}'.format(x), CreateBucketConfiguration={'LocationConstraint': 'us-west-2'}) + + s3_client_eu = boto3.client('s3', region_name='eu-west-1') + for x in range(10, 12): + s3_client_eu.create_bucket(Bucket='eu-bucket{}'.format(x), CreateBucketConfiguration={'LocationConstraint': 'eu-west-1'}) + + # Now try with resources that exist and ones that don't: + identifiers = [{'SourceAccountId': DEFAULT_ACCOUNT_ID, 'SourceRegion': 'us-west-2', 'ResourceType': 'AWS::S3::Bucket', + 'ResourceId': 'bucket{}'.format(x)} for x in range(0, 10)] + identifiers += [{'SourceAccountId': DEFAULT_ACCOUNT_ID, 'SourceRegion': 'eu-west-1', 'ResourceType': 'AWS::S3::Bucket', + 'ResourceId': 'eu-bucket{}'.format(x)} for x in range(10, 12)] + identifiers += [bad_ri] + + result = client.batch_get_aggregate_resource_config(ConfigurationAggregatorName='testing', ResourceIdentifiers=identifiers) + assert len(result['UnprocessedResourceIdentifiers']) == 1 + assert result['UnprocessedResourceIdentifiers'][0] == bad_ri + + # Verify all the buckets are there: + assert len(result['BaseConfigurationItems']) == 12 + missing_buckets = ['bucket{}'.format(x) for x in range(0, 10)] + ['eu-bucket{}'.format(x) for x in range(10, 12)] + + for r in result['BaseConfigurationItems']: + missing_buckets.remove(r['resourceName']) + + assert not missing_buckets + + # Verify that if the resource name and ID are correct that things are good: + identifiers = [{'SourceAccountId': DEFAULT_ACCOUNT_ID, 'SourceRegion': 'us-west-2', 'ResourceType': 'AWS::S3::Bucket', + 'ResourceId': 'bucket1', 'ResourceName': 'bucket1'}] + result = client.batch_get_aggregate_resource_config(ConfigurationAggregatorName='testing', ResourceIdentifiers=identifiers) + assert not result['UnprocessedResourceIdentifiers'] + assert len(result['BaseConfigurationItems']) == 1 and result['BaseConfigurationItems'][0]['resourceName'] == 'bucket1' + + # Verify that if the resource name and ID mismatch that we don't get a result: + identifiers = [{'SourceAccountId': DEFAULT_ACCOUNT_ID, 'SourceRegion': 'us-west-2', 'ResourceType': 'AWS::S3::Bucket', + 'ResourceId': 'bucket1', 'ResourceName': 'bucket2'}] + result = client.batch_get_aggregate_resource_config(ConfigurationAggregatorName='testing', ResourceIdentifiers=identifiers) + assert not result['BaseConfigurationItems'] + assert len(result['UnprocessedResourceIdentifiers']) == 1 + assert len(result['UnprocessedResourceIdentifiers']) == 1 and result['UnprocessedResourceIdentifiers'][0]['ResourceName'] == 'bucket2' + + # Verify that if the region is incorrect that we don't get a result: + identifiers = [{'SourceAccountId': DEFAULT_ACCOUNT_ID, 'SourceRegion': 'eu-west-1', 'ResourceType': 'AWS::S3::Bucket', + 'ResourceId': 'bucket1'}] + result = client.batch_get_aggregate_resource_config(ConfigurationAggregatorName='testing', ResourceIdentifiers=identifiers) + assert not result['BaseConfigurationItems'] + assert len(result['UnprocessedResourceIdentifiers']) == 1 + assert len(result['UnprocessedResourceIdentifiers']) == 1 and result['UnprocessedResourceIdentifiers'][0]['SourceRegion'] == 'eu-west-1'