From f3623e3cd3845411a2ff31a71b83f412e86f6a41 Mon Sep 17 00:00:00 2001 From: Terry Cain Date: Thu, 12 Oct 2017 21:59:02 +0100 Subject: [PATCH 01/11] Fix for #1258 (#1260) * Fix for #1258 * Updated doc link --- moto/ec2/responses/instances.py | 9 +++++++-- tests/test_ec2/test_instances.py | 17 +++++++++++++++++ 2 files changed, 24 insertions(+), 2 deletions(-) diff --git a/moto/ec2/responses/instances.py b/moto/ec2/responses/instances.py index 532d703c9..1550fddeb 100644 --- a/moto/ec2/responses/instances.py +++ b/moto/ec2/responses/instances.py @@ -30,7 +30,7 @@ class InstanceResponse(BaseResponse): if max_results and len(reservations) > (start + max_results): next_token = reservations_resp[-1].id template = self.response_template(EC2_DESCRIBE_INSTANCES) - return template.render(reservations=reservations_resp, next_token=next_token) + return template.render(reservations=reservations_resp, next_token=next_token).replace('True', 'true').replace('False', 'false') def run_instances(self): min_count = int(self._get_param('MinCount', if_none='1')) @@ -144,7 +144,12 @@ class InstanceResponse(BaseResponse): """ Handles requests which are generated by code similar to: - instance.modify_attribute('blockDeviceMapping', {'/dev/sda1': True}) + instance.modify_attribute( + BlockDeviceMappings=[{ + 'DeviceName': '/dev/sda1', + 'Ebs': {'DeleteOnTermination': True} + }] + ) The querystring contains information similar to: diff --git a/tests/test_ec2/test_instances.py b/tests/test_ec2/test_instances.py index 04e6a6daa..46bb34d57 100644 --- a/tests/test_ec2/test_instances.py +++ b/tests/test_ec2/test_instances.py @@ -1113,3 +1113,20 @@ def test_get_instance_by_security_group(): assert len(security_group_instances) == 1 assert security_group_instances[0].id == instance.id + + +@mock_ec2 +def test_modify_delete_on_termination(): + ec2_client = boto3.resource('ec2', region_name='us-west-1') + result = ec2_client.create_instances(ImageId='ami-12345678', MinCount=1, MaxCount=1) + instance = result[0] + instance.load() + instance.block_device_mappings[0]['Ebs']['DeleteOnTermination'].should.be(False) + instance.modify_attribute( + BlockDeviceMappings=[{ + 'DeviceName': '/dev/sda1', + 'Ebs': {'DeleteOnTermination': True} + }] + ) + instance.load() + instance.block_device_mappings[0]['Ebs']['DeleteOnTermination'].should.be(True) From 2bb3e841d12c5f614971492d241629794f35fe85 Mon Sep 17 00:00:00 2001 From: Terry Cain Date: Mon, 16 Oct 2017 21:56:03 +0100 Subject: [PATCH 02/11] Fixed #1261 dynamodb FilterExpression bugs (#1262) * Fixed #1261 dynamodb FilterExpression bugs FilterExpression was incorrectly handling numbers, stupid typo there. Also >= <= and <> was not being parsed correctly. * Switched up logic a bit for better end result. Fixes #1263 * Fixed another bug --- moto/dynamodb2/comparisons.py | 53 +++++++++++++-------- tests/test_dynamodb2/test_dynamodb.py | 68 ++++++++++++++++++++++++--- 2 files changed, 95 insertions(+), 26 deletions(-) diff --git a/moto/dynamodb2/comparisons.py b/moto/dynamodb2/comparisons.py index 8462c2de5..faeffbaa5 100644 --- a/moto/dynamodb2/comparisons.py +++ b/moto/dynamodb2/comparisons.py @@ -61,15 +61,27 @@ def get_filter_expression(expr, names, values): # Do substitutions for key, value in names.items(): expr = expr.replace(key, value) + + # Store correct types of values for use later + values_map = {} for key, value in values.items(): if 'N' in value: - expr.replace(key, float(value['N'])) + values_map[key] = float(value['N']) + elif 'BOOL' in value: + values_map[key] = value['BOOL'] + elif 'S' in value: + values_map[key] = value['S'] + elif 'NS' in value: + values_map[key] = tuple(value['NS']) + elif 'SS' in value: + values_map[key] = tuple(value['SS']) + elif 'L' in value: + values_map[key] = tuple(value['L']) else: - expr = expr.replace(key, value['S']) + raise NotImplementedError() # Remove all spaces, tbf we could just skip them in the next step. # The number of known options is really small so we can do a fair bit of cheating - #expr = list(re.sub('\s', '', expr)) # 'Id>5ANDattribute_exists(test)ORNOTlength<6' expr = list(expr) # DodgyTokenisation stage 1 @@ -130,13 +142,9 @@ def get_filter_expression(expr, names, values): next_token = six.next(token_iterator) while next_token != ')': - try: - next_token = int(next_token) - except ValueError: - try: - next_token = float(next_token) - except ValueError: - pass + if next_token in values_map: + next_token = values_map[next_token] + tuple_list.append(next_token) next_token = six.next(token_iterator) @@ -149,10 +157,14 @@ def get_filter_expression(expr, names, values): tokens2.append(tuple(tuple_list)) elif token == 'BETWEEN': field = tokens2.pop() - op1 = int(six.next(token_iterator)) + # if values map contains a number, it would be a float + # so we need to int() it anyway + op1 = six.next(token_iterator) + op1 = int(values_map.get(op1, op1)) and_op = six.next(token_iterator) assert and_op == 'AND' - op2 = int(six.next(token_iterator)) + op2 = six.next(token_iterator) + op2 = int(values_map.get(op2, op2)) tokens2.append(['between', field, op1, op2]) elif is_function(token): @@ -169,14 +181,15 @@ def get_filter_expression(expr, names, values): tokens2.append(function_list) else: - try: - token = int(token) - except ValueError: - try: - token = float(token) - except ValueError: - pass - tokens2.append(token) + # Convert tokens back to real types + if token in values_map: + token = values_map[token] + + # Need to join >= <= <> + if len(tokens2) > 0 and ((tokens2[-1] == '>' and token == '=') or (tokens2[-1] == '<' and token == '=') or (tokens2[-1] == '<' and token == '>')): + tokens2.append(tokens2.pop() + token) + else: + tokens2.append(token) # Start of the Shunting-Yard algorithm. <-- Proper beast algorithm! def is_number(val): diff --git a/tests/test_dynamodb2/test_dynamodb.py b/tests/test_dynamodb2/test_dynamodb.py index 85d8feb34..26d380628 100644 --- a/tests/test_dynamodb2/test_dynamodb.py +++ b/tests/test_dynamodb2/test_dynamodb.py @@ -581,24 +581,24 @@ def test_filter_expression(): row2 = moto.dynamodb2.models.Item(None, None, None, None, {'Id': {'N': '8'}, 'Subs': {'N': '10'}, 'Desc': {'S': 'A description'}, 'KV': {'SS': ['test3', 'test4']}}) # AND test - filter_expr = moto.dynamodb2.comparisons.get_filter_expression('Id > 5 AND Subs < 7', {}, {}) + filter_expr = moto.dynamodb2.comparisons.get_filter_expression('Id > :v0 AND Subs < :v1', {}, {':v0': {'N': 5}, ':v1': {'N': 7}}) filter_expr.expr(row1).should.be(True) filter_expr.expr(row2).should.be(False) # OR test - filter_expr = moto.dynamodb2.comparisons.get_filter_expression('Id = 5 OR Id=8', {}, {}) + filter_expr = moto.dynamodb2.comparisons.get_filter_expression('Id = :v0 OR Id=:v1', {}, {':v0': {'N': 5}, ':v1': {'N': 8}}) filter_expr.expr(row1).should.be(True) # BETWEEN test - filter_expr = moto.dynamodb2.comparisons.get_filter_expression('Id BETWEEN 5 AND 10', {}, {}) + filter_expr = moto.dynamodb2.comparisons.get_filter_expression('Id BETWEEN :v0 AND :v1', {}, {':v0': {'N': 5}, ':v1': {'N': 10}}) filter_expr.expr(row1).should.be(True) # PAREN test - filter_expr = moto.dynamodb2.comparisons.get_filter_expression('Id = 8 AND (Subs = 8 OR Subs = 5)', {}, {}) + filter_expr = moto.dynamodb2.comparisons.get_filter_expression('Id = :v0 AND (Subs = :v0 OR Subs = :v1)', {}, {':v0': {'N': 8}, ':v1': {'N': 5}}) filter_expr.expr(row1).should.be(True) # IN test - filter_expr = moto.dynamodb2.comparisons.get_filter_expression('Id IN (7,8, 9)', {}, {}) + filter_expr = moto.dynamodb2.comparisons.get_filter_expression('Id IN :v0', {}, {':v0': {'NS': [7, 8, 9]}}) filter_expr.expr(row1).should.be(True) # attribute function tests @@ -655,6 +655,63 @@ def test_scan_filter(): assert response['Count'] == 1 +@mock_dynamodb2 +def test_scan_filter2(): + client = boto3.client('dynamodb', region_name='us-east-1') + + # Create the DynamoDB table. + client.create_table( + TableName='test1', + AttributeDefinitions=[{'AttributeName': 'client', 'AttributeType': 'S'}, {'AttributeName': 'app', 'AttributeType': 'N'}], + KeySchema=[{'AttributeName': 'client', 'KeyType': 'HASH'}, {'AttributeName': 'app', 'KeyType': 'RANGE'}], + ProvisionedThroughput={'ReadCapacityUnits': 123, 'WriteCapacityUnits': 123} + ) + client.put_item( + TableName='test1', + Item={ + 'client': {'S': 'client1'}, + 'app': {'N': '1'} + } + ) + + response = client.scan( + TableName='test1', + Select='ALL_ATTRIBUTES', + FilterExpression='#tb >= :dt', + ExpressionAttributeNames={"#tb": "app"}, + ExpressionAttributeValues={":dt": {"N": str(1)}} + ) + assert response['Count'] == 1 + + +@mock_dynamodb2 +def test_scan_filter3(): + client = boto3.client('dynamodb', region_name='us-east-1') + dynamodb = boto3.resource('dynamodb', region_name='us-east-1') + + # Create the DynamoDB table. + client.create_table( + TableName='test1', + AttributeDefinitions=[{'AttributeName': 'client', 'AttributeType': 'S'}, {'AttributeName': 'app', 'AttributeType': 'N'}], + KeySchema=[{'AttributeName': 'client', 'KeyType': 'HASH'}, {'AttributeName': 'app', 'KeyType': 'RANGE'}], + ProvisionedThroughput={'ReadCapacityUnits': 123, 'WriteCapacityUnits': 123} + ) + client.put_item( + TableName='test1', + Item={ + 'client': {'S': 'client1'}, + 'app': {'N': '1'}, + 'active': {'BOOL': True} + } + ) + + table = dynamodb.Table('test1') + response = table.scan( + FilterExpression=Attr('active').eq(True) + ) + assert response['Count'] == 1 + + @mock_dynamodb2 def test_bad_scan_filter(): client = boto3.client('dynamodb', region_name='us-east-1') @@ -680,7 +737,6 @@ def test_bad_scan_filter(): raise RuntimeError('Should of raised ResourceInUseException') - @mock_dynamodb2 def test_duplicate_create(): client = boto3.client('dynamodb', region_name='us-east-1') From 49ddb500a839daaee8b916fb0af365825d66467d Mon Sep 17 00:00:00 2001 From: Terry Cain Date: Tue, 17 Oct 2017 01:06:22 +0100 Subject: [PATCH 03/11] AWS X-Ray client mock. (#1255) * X-Ray Client SDK patched Fixes #1250 * Fixed flake8 * Fixed some issues * Fixed flake8 * Fixed more typos * Fixed python2 string * Fixed aws-sdk patch order * Added more test cases to test the patching --- moto/__init__.py | 2 +- moto/awslambda/responses.py | 5 ++ moto/core/responses.py | 6 +- moto/core/utils.py | 93 +++++++++++++++++++++++++++++ moto/dynamodb2/responses.py | 4 +- moto/sqs/responses.py | 6 +- moto/xray/__init__.py | 1 + moto/xray/mock_client.py | 83 +++++++++++++++++++++++++ setup.py | 3 +- tests/test_ecr/test_ecr_boto3.py | 6 +- tests/test_xray/test_xray_client.py | 72 ++++++++++++++++++++++ 11 files changed, 273 insertions(+), 8 deletions(-) create mode 100644 moto/xray/mock_client.py create mode 100644 tests/test_xray/test_xray_client.py diff --git a/moto/__init__.py b/moto/__init__.py index 64baa52ac..79efac862 100644 --- a/moto/__init__.py +++ b/moto/__init__.py @@ -38,7 +38,7 @@ from .sts import mock_sts, mock_sts_deprecated # flake8: noqa from .ssm import mock_ssm # flake8: noqa from .route53 import mock_route53, mock_route53_deprecated # flake8: noqa from .swf import mock_swf, mock_swf_deprecated # flake8: noqa -from .xray import mock_xray # flake8: noqa +from .xray import mock_xray, mock_xray_client, XRaySegment # flake8: noqa from .logs import mock_logs, mock_logs_deprecated # flake8: noqa diff --git a/moto/awslambda/responses.py b/moto/awslambda/responses.py index 5215f63c5..4ba837ea2 100644 --- a/moto/awslambda/responses.py +++ b/moto/awslambda/responses.py @@ -9,6 +9,7 @@ try: except: from urllib.parse import unquote, urlparse, parse_qs +from moto.core.utils import amz_crc32, amzn_request_id from moto.core.responses import BaseResponse @@ -32,6 +33,8 @@ class LambdaResponse(BaseResponse): else: raise ValueError("Cannot handle request") + @amz_crc32 + @amzn_request_id def invoke(self, request, full_url, headers): self.setup_class(request, full_url, headers) if request.method == 'POST': @@ -39,6 +42,8 @@ class LambdaResponse(BaseResponse): else: raise ValueError("Cannot handle request") + @amz_crc32 + @amzn_request_id def invoke_async(self, request, full_url, headers): self.setup_class(request, full_url, headers) if request.method == 'POST': diff --git a/moto/core/responses.py b/moto/core/responses.py index e85054802..572a45229 100644 --- a/moto/core/responses.py +++ b/moto/core/responses.py @@ -199,10 +199,14 @@ class BaseResponse(_TemplateEnvironmentMixin): response = method() except HTTPException as http_error: response = http_error.description, dict(status=http_error.code) + if isinstance(response, six.string_types): return 200, headers, response else: - body, new_headers = response + if len(response) == 2: + body, new_headers = response + else: + status, new_headers, body = response status = new_headers.get('status', 200) headers.update(new_headers) # Cast status to string diff --git a/moto/core/utils.py b/moto/core/utils.py index 9ee0c1814..2ea4dc4a8 100644 --- a/moto/core/utils.py +++ b/moto/core/utils.py @@ -1,10 +1,16 @@ from __future__ import unicode_literals +from functools import wraps +import binascii import datetime import inspect import random import re import six +import string + + +REQUEST_ID_LONG = string.digits + string.ascii_uppercase def camelcase_to_underscores(argument): @@ -194,3 +200,90 @@ def unix_time(dt=None): def unix_time_millis(dt=None): return unix_time(dt) * 1000.0 + + +def gen_amz_crc32(response, headerdict=None): + if not isinstance(response, bytes): + response = response.encode() + + crc = str(binascii.crc32(response)) + + if headerdict is not None and isinstance(headerdict, dict): + headerdict.update({'x-amz-crc32': crc}) + + return crc + + +def gen_amzn_requestid_long(headerdict=None): + req_id = ''.join([random.choice(REQUEST_ID_LONG) for _ in range(0, 52)]) + + if headerdict is not None and isinstance(headerdict, dict): + headerdict.update({'x-amzn-requestid': req_id}) + + return req_id + + +def amz_crc32(f): + @wraps(f) + def _wrapper(*args, **kwargs): + response = f(*args, **kwargs) + + headers = {} + status = 200 + + if isinstance(response, six.string_types): + body = response + else: + if len(response) == 2: + body, new_headers = response + status = new_headers.get('status', 200) + else: + status, new_headers, body = response + headers.update(new_headers) + # Cast status to string + if "status" in headers: + headers['status'] = str(headers['status']) + + try: + # Doesnt work on python2 for some odd unicode strings + gen_amz_crc32(body, headers) + except Exception: + pass + + return status, headers, body + + return _wrapper + + +def amzn_request_id(f): + @wraps(f) + def _wrapper(*args, **kwargs): + response = f(*args, **kwargs) + + headers = {} + status = 200 + + if isinstance(response, six.string_types): + body = response + else: + if len(response) == 2: + body, new_headers = response + status = new_headers.get('status', 200) + else: + status, new_headers, body = response + headers.update(new_headers) + # Cast status to string + if "status" in headers: + headers['status'] = str(headers['status']) + + request_id = gen_amzn_requestid_long(headers) + + # Update request ID in XML + try: + body = body.replace('{{ requestid }}', request_id) + except Exception: # Will just ignore if it cant work on bytes (which are str's on python2) + pass + + return status, headers, body + + return _wrapper diff --git a/moto/dynamodb2/responses.py b/moto/dynamodb2/responses.py index 75e625c73..218cfc21d 100644 --- a/moto/dynamodb2/responses.py +++ b/moto/dynamodb2/responses.py @@ -4,7 +4,7 @@ import six import re from moto.core.responses import BaseResponse -from moto.core.utils import camelcase_to_underscores +from moto.core.utils import camelcase_to_underscores, amzn_request_id from .models import dynamodb_backend2, dynamo_json_dump @@ -24,6 +24,7 @@ class DynamoHandler(BaseResponse): def error(self, type_, message, status=400): return status, self.response_headers, dynamo_json_dump({'__type': type_, 'message': message}) + @amzn_request_id def call_action(self): self.body = json.loads(self.body or '{}') endpoint = self.get_endpoint_name(self.headers) @@ -56,6 +57,7 @@ class DynamoHandler(BaseResponse): response = {"TableNames": tables} if limit and len(all_tables) > start + limit: response["LastEvaluatedTableName"] = tables[-1] + return dynamo_json_dump(response) def create_table(self): diff --git a/moto/sqs/responses.py b/moto/sqs/responses.py index 540bd4e41..63a5036d6 100644 --- a/moto/sqs/responses.py +++ b/moto/sqs/responses.py @@ -2,7 +2,7 @@ from __future__ import unicode_literals from six.moves.urllib.parse import urlparse from moto.core.responses import BaseResponse -from moto.core.utils import camelcase_to_underscores +from moto.core.utils import camelcase_to_underscores, amz_crc32, amzn_request_id from .utils import parse_message_attributes from .models import sqs_backends from .exceptions import ( @@ -52,6 +52,8 @@ class SQSResponse(BaseResponse): return visibility_timeout + @amz_crc32 # crc last as request_id can edit XML + @amzn_request_id def call_action(self): status_code, headers, body = super(SQSResponse, self).call_action() if status_code == 404: @@ -296,7 +298,7 @@ CREATE_QUEUE_RESPONSE = """ {{ queue.visibility_timeout }} - 7a62c49f-347e-4fc4-9331-6e8e7a96aa73 + {{ requestid }} """ diff --git a/moto/xray/__init__.py b/moto/xray/__init__.py index 7b32ca0b0..41f00af58 100644 --- a/moto/xray/__init__.py +++ b/moto/xray/__init__.py @@ -1,6 +1,7 @@ from __future__ import unicode_literals from .models import xray_backends from ..core.models import base_decorator +from .mock_client import mock_xray_client, XRaySegment # noqa xray_backend = xray_backends['us-east-1'] mock_xray = base_decorator(xray_backends) diff --git a/moto/xray/mock_client.py b/moto/xray/mock_client.py new file mode 100644 index 000000000..6e2164d63 --- /dev/null +++ b/moto/xray/mock_client.py @@ -0,0 +1,83 @@ +from functools import wraps +import os +from moto.xray import xray_backends +import aws_xray_sdk.core +from aws_xray_sdk.core.context import Context as AWSContext +from aws_xray_sdk.core.emitters.udp_emitter import UDPEmitter + + +class MockEmitter(UDPEmitter): + """ + Replaces the code that sends UDP to local X-Ray daemon + """ + def __init__(self, daemon_address='127.0.0.1:2000'): + address = os.getenv('AWS_XRAY_DAEMON_ADDRESS_YEAH_NOT_TODAY_MATE', daemon_address) + self._ip, self._port = self._parse_address(address) + + def _xray_backend(self, region): + return xray_backends[region] + + def send_entity(self, entity): + # Hack to get region + # region = entity.subsegments[0].aws['region'] + # xray = self._xray_backend(region) + + # TODO store X-Ray data, pretty sure X-Ray needs refactor for this + pass + + def _send_data(self, data): + raise RuntimeError('Should not be running this') + + +def mock_xray_client(f): + """ + Mocks the X-Ray sdk by pwning its evil singleton with our methods + + The X-Ray SDK has normally been imported and `patched()` called long before we start mocking. + This means the Context() will be very unhappy if an env var isnt present, so we set that, save + the old context, then supply our new context. + We also patch the Emitter by subclassing the UDPEmitter class replacing its methods and pushing + that itno the recorder instance. + """ + @wraps(f) + def _wrapped(*args, **kwargs): + print("Starting X-Ray Patch") + + old_xray_context_var = os.environ.get('AWS_XRAY_CONTEXT_MISSING') + os.environ['AWS_XRAY_CONTEXT_MISSING'] = 'LOG_ERROR' + old_xray_context = aws_xray_sdk.core.xray_recorder._context + old_xray_emitter = aws_xray_sdk.core.xray_recorder._emitter + aws_xray_sdk.core.xray_recorder._context = AWSContext() + aws_xray_sdk.core.xray_recorder._emitter = MockEmitter() + + try: + f(*args, **kwargs) + finally: + + if old_xray_context_var is None: + del os.environ['AWS_XRAY_CONTEXT_MISSING'] + else: + os.environ['AWS_XRAY_CONTEXT_MISSING'] = old_xray_context_var + + aws_xray_sdk.core.xray_recorder._emitter = old_xray_emitter + aws_xray_sdk.core.xray_recorder._context = old_xray_context + + return _wrapped + + +class XRaySegment(object): + """ + XRay is request oriented, when a request comes in, normally middleware like django (or automatically in lambda) will mark + the start of a segment, this stay open during the lifetime of the request. During that time subsegments may be generated + by calling other SDK aware services or using some boto functions. Once the request is finished, middleware will also stop + the segment, thus causing it to be emitted via UDP. + + During testing we're going to have to control the start and end of a segment via context managers. + """ + def __enter__(self): + aws_xray_sdk.core.xray_recorder.begin_segment(name='moto_mock', traceid=None, parent_id=None, sampling=1) + + return self + + def __exit__(self, exc_type, exc_val, exc_tb): + aws_xray_sdk.core.xray_recorder.end_segment() diff --git a/setup.py b/setup.py index 207c5dd2e..9e423bdd4 100755 --- a/setup.py +++ b/setup.py @@ -19,7 +19,8 @@ install_requires = [ "pytz", "python-dateutil<3.0.0,>=2.1", "mock", - "docker>=2.5.1" + "docker>=2.5.1", + "aws-xray-sdk==0.92.2" ] extras_require = { diff --git a/tests/test_ecr/test_ecr_boto3.py b/tests/test_ecr/test_ecr_boto3.py index 67d1a2cab..00628e22f 100644 --- a/tests/test_ecr/test_ecr_boto3.py +++ b/tests/test_ecr/test_ecr_boto3.py @@ -414,7 +414,8 @@ def test_get_authorization_token_assume_region(): client = boto3.client('ecr', region_name='us-east-1') auth_token_response = client.get_authorization_token() - list(auth_token_response.keys()).should.equal(['authorizationData', 'ResponseMetadata']) + auth_token_response.should.contain('authorizationData') + auth_token_response.should.contain('ResponseMetadata') auth_token_response['authorizationData'].should.equal([ { 'authorizationToken': 'QVdTOnVzLWVhc3QtMS1hdXRoLXRva2Vu', @@ -429,7 +430,8 @@ def test_get_authorization_token_explicit_regions(): client = boto3.client('ecr', region_name='us-east-1') auth_token_response = client.get_authorization_token(registryIds=['us-east-1', 'us-west-1']) - list(auth_token_response.keys()).should.equal(['authorizationData', 'ResponseMetadata']) + auth_token_response.should.contain('authorizationData') + auth_token_response.should.contain('ResponseMetadata') auth_token_response['authorizationData'].should.equal([ { 'authorizationToken': 'QVdTOnVzLWVhc3QtMS1hdXRoLXRva2Vu', diff --git a/tests/test_xray/test_xray_client.py b/tests/test_xray/test_xray_client.py new file mode 100644 index 000000000..0cd948950 --- /dev/null +++ b/tests/test_xray/test_xray_client.py @@ -0,0 +1,72 @@ +from __future__ import unicode_literals +from moto import mock_xray_client, XRaySegment, mock_dynamodb2 +import sure # noqa +import boto3 + +from moto.xray.mock_client import MockEmitter +import aws_xray_sdk.core as xray_core +import aws_xray_sdk.core.patcher as xray_core_patcher + +import botocore.client +import botocore.endpoint +original_make_api_call = botocore.client.BaseClient._make_api_call +original_encode_headers = botocore.endpoint.Endpoint._encode_headers + +import requests +original_session_request = requests.Session.request +original_session_prep_request = requests.Session.prepare_request + + +@mock_xray_client +@mock_dynamodb2 +def test_xray_dynamo_request_id(): + # Could be ran in any order, so we need to tell sdk that its been unpatched + xray_core_patcher._PATCHED_MODULES = set() + xray_core.patch_all() + + client = boto3.client('dynamodb', region_name='us-east-1') + + with XRaySegment(): + resp = client.list_tables() + resp['ResponseMetadata'].should.contain('RequestId') + id1 = resp['ResponseMetadata']['RequestId'] + + with XRaySegment(): + client.list_tables() + resp = client.list_tables() + id2 = resp['ResponseMetadata']['RequestId'] + + id1.should_not.equal(id2) + + setattr(botocore.client.BaseClient, '_make_api_call', original_make_api_call) + setattr(botocore.endpoint.Endpoint, '_encode_headers', original_encode_headers) + setattr(requests.Session, 'request', original_session_request) + setattr(requests.Session, 'prepare_request', original_session_prep_request) + + +@mock_xray_client +def test_xray_udp_emitter_patched(): + # Could be ran in any order, so we need to tell sdk that its been unpatched + xray_core_patcher._PATCHED_MODULES = set() + xray_core.patch_all() + + assert isinstance(xray_core.xray_recorder._emitter, MockEmitter) + + setattr(botocore.client.BaseClient, '_make_api_call', original_make_api_call) + setattr(botocore.endpoint.Endpoint, '_encode_headers', original_encode_headers) + setattr(requests.Session, 'request', original_session_request) + setattr(requests.Session, 'prepare_request', original_session_prep_request) + + +@mock_xray_client +def test_xray_context_patched(): + # Could be ran in any order, so we need to tell sdk that its been unpatched + xray_core_patcher._PATCHED_MODULES = set() + xray_core.patch_all() + + xray_core.xray_recorder._context.context_missing.should.equal('LOG_ERROR') + + setattr(botocore.client.BaseClient, '_make_api_call', original_make_api_call) + setattr(botocore.endpoint.Endpoint, '_encode_headers', original_encode_headers) + setattr(requests.Session, 'request', original_session_request) + setattr(requests.Session, 'prepare_request', original_session_prep_request) From 5ef236e96666debfac0cf8374afc798a525c8141 Mon Sep 17 00:00:00 2001 From: John Kerkstra Date: Mon, 16 Oct 2017 19:09:51 -0500 Subject: [PATCH 04/11] Add attach_ and detach_instances methods to autoscaling service (#1264) * add detach_instances functionality to autoscaling service * use ASG_NAME_TAG constant * cleanup models method a bit, add mocked DetachInstancesResult to response template * add attach_instances method --- moto/autoscaling/exceptions.py | 14 ++ moto/autoscaling/models.py | 85 +++++++++--- moto/autoscaling/responses.py | 55 ++++++++ tests/test_autoscaling/test_autoscaling.py | 144 +++++++++++++++++++++ 4 files changed, 277 insertions(+), 21 deletions(-) create mode 100644 moto/autoscaling/exceptions.py diff --git a/moto/autoscaling/exceptions.py b/moto/autoscaling/exceptions.py new file mode 100644 index 000000000..15b2e4f4a --- /dev/null +++ b/moto/autoscaling/exceptions.py @@ -0,0 +1,14 @@ +from __future__ import unicode_literals +from moto.core.exceptions import RESTError + + +class AutoscalingClientError(RESTError): + code = 500 + + +class ResourceContentionError(AutoscalingClientError): + + def __init__(self): + super(ResourceContentionError, self).__init__( + "ResourceContentionError", + "You already have a pending update to an Auto Scaling resource (for example, a group, instance, or load balancer).") diff --git a/moto/autoscaling/models.py b/moto/autoscaling/models.py index 9df9fea12..4bdebf955 100644 --- a/moto/autoscaling/models.py +++ b/moto/autoscaling/models.py @@ -5,6 +5,9 @@ from moto.core import BaseBackend, BaseModel from moto.ec2 import ec2_backends from moto.elb import elb_backends from moto.elb.exceptions import LoadBalancerNotFoundError +from .exceptions import ( + ResourceContentionError, +) # http://docs.aws.amazon.com/AutoScaling/latest/DeveloperGuide/AS_Concepts.html#Cooldown DEFAULT_COOLDOWN = 300 @@ -259,27 +262,8 @@ class FakeAutoScalingGroup(BaseModel): # Need more instances count_needed = int(self.desired_capacity) - int(curr_instance_count) - propagated_tags = {} - for tag in self.tags: - # boto uses 'propagate_at_launch - # boto3 and cloudformation use PropagateAtLaunch - if 'propagate_at_launch' in tag and tag['propagate_at_launch'] == 'true': - propagated_tags[tag['key']] = tag['value'] - if 'PropagateAtLaunch' in tag and tag['PropagateAtLaunch']: - propagated_tags[tag['Key']] = tag['Value'] - - propagated_tags[ASG_NAME_TAG] = self.name - reservation = self.autoscaling_backend.ec2_backend.add_instances( - self.launch_config.image_id, - count_needed, - self.launch_config.user_data, - self.launch_config.security_groups, - instance_type=self.launch_config.instance_type, - tags={'instance': propagated_tags} - ) - for instance in reservation.instances: - instance.autoscaling_group = self - self.instance_states.append(InstanceState(instance)) + propagated_tags = self.get_propagated_tags() + self.replace_autoscaling_group_instances(count_needed, propagated_tags) else: # Need to remove some instances count_to_remove = curr_instance_count - self.desired_capacity @@ -290,6 +274,31 @@ class FakeAutoScalingGroup(BaseModel): instance_ids_to_remove) self.instance_states = self.instance_states[count_to_remove:] + def get_propagated_tags(self): + propagated_tags = {} + for tag in self.tags: + # boto uses 'propagate_at_launch + # boto3 and cloudformation use PropagateAtLaunch + if 'propagate_at_launch' in tag and tag['propagate_at_launch'] == 'true': + propagated_tags[tag['key']] = tag['value'] + if 'PropagateAtLaunch' in tag and tag['PropagateAtLaunch']: + propagated_tags[tag['Key']] = tag['Value'] + return propagated_tags + + def replace_autoscaling_group_instances(self, count_needed, propagated_tags): + propagated_tags[ASG_NAME_TAG] = self.name + reservation = self.autoscaling_backend.ec2_backend.add_instances( + self.launch_config.image_id, + count_needed, + self.launch_config.user_data, + self.launch_config.security_groups, + instance_type=self.launch_config.instance_type, + tags={'instance': propagated_tags} + ) + for instance in reservation.instances: + instance.autoscaling_group = self + self.instance_states.append(InstanceState(instance)) + class AutoScalingBackend(BaseBackend): def __init__(self, ec2_backend, elb_backend): @@ -409,6 +418,40 @@ class AutoScalingBackend(BaseBackend): instance_states.extend(group.instance_states) return instance_states + def attach_instances(self, group_name, instance_ids): + group = self.autoscaling_groups[group_name] + original_size = len(group.instance_states) + + if (original_size + len(instance_ids)) > group.max_size: + raise ResourceContentionError + else: + group.desired_capacity = original_size + len(instance_ids) + new_instances = [InstanceState(self.ec2_backend.get_instance(x)) for x in instance_ids] + for instance in new_instances: + self.ec2_backend.create_tags([instance.instance.id], {ASG_NAME_TAG: group.name}) + group.instance_states.extend(new_instances) + self.update_attached_elbs(group.name) + + def detach_instances(self, group_name, instance_ids, should_decrement): + group = self.autoscaling_groups[group_name] + original_size = len(group.instance_states) + + detached_instances = [x for x in group.instance_states if x.instance.id in instance_ids] + for instance in detached_instances: + self.ec2_backend.delete_tags([instance.instance.id], {ASG_NAME_TAG: group.name}) + + new_instance_state = [x for x in group.instance_states if x.instance.id not in instance_ids] + group.instance_states = new_instance_state + + if should_decrement: + group.desired_capacity = original_size - len(instance_ids) + else: + count_needed = len(instance_ids) + group.replace_autoscaling_group_instances(count_needed, group.get_propagated_tags()) + + self.update_attached_elbs(group_name) + return detached_instances + def set_desired_capacity(self, group_name, desired_capacity): group = self.autoscaling_groups[group_name] group.set_desired_capacity(desired_capacity) diff --git a/moto/autoscaling/responses.py b/moto/autoscaling/responses.py index 2c3bddd79..cba660139 100644 --- a/moto/autoscaling/responses.py +++ b/moto/autoscaling/responses.py @@ -87,6 +87,27 @@ class AutoScalingResponse(BaseResponse): template = self.response_template(CREATE_AUTOSCALING_GROUP_TEMPLATE) return template.render() + def attach_instances(self): + group_name = self._get_param('AutoScalingGroupName') + instance_ids = self._get_multi_param("InstanceIds.member") + self.autoscaling_backend.attach_instances( + group_name, instance_ids) + template = self.response_template(ATTACH_INSTANCES_TEMPLATE) + return template.render() + + def detach_instances(self): + group_name = self._get_param('AutoScalingGroupName') + instance_ids = self._get_multi_param("InstanceIds.member") + should_decrement_string = self._get_param('ShouldDecrementDesiredCapacity') + if should_decrement_string == 'true': + should_decrement = True + else: + should_decrement = False + detached_instances = self.autoscaling_backend.detach_instances( + group_name, instance_ids, should_decrement) + template = self.response_template(DETACH_INSTANCES_TEMPLATE) + return template.render(detached_instances=detached_instances) + def describe_auto_scaling_groups(self): names = self._get_multi_param("AutoScalingGroupNames.member") token = self._get_param("NextToken") @@ -284,6 +305,40 @@ CREATE_AUTOSCALING_GROUP_TEMPLATE = """ + + + +8d798a29-f083-11e1-bdfb-cb223EXAMPLE + +""" + +DETACH_INSTANCES_TEMPLATE = """ + + + {% for instance in detached_instances %} + + 5091cb52-547a-47ce-a236-c9ccbc2cb2c9EXAMPLE + {{ group_name }} + + At 2017-10-15T15:55:21Z instance {{ instance.instance.id }} was detached in response to a user request. + + Detaching EC2 instance: {{ instance.instance.id }} + 2017-10-15T15:55:21Z + 2017-10-15T15:55:21Z + InProgress + InProgress + 50 +
details
+
+ {% endfor %} +
+
+ +8d798a29-f083-11e1-bdfb-cb223EXAMPLE + +
""" + DESCRIBE_AUTOSCALING_GROUPS_TEMPLATE = """ diff --git a/tests/test_autoscaling/test_autoscaling.py b/tests/test_autoscaling/test_autoscaling.py index b919eb71c..d2f890c4d 100644 --- a/tests/test_autoscaling/test_autoscaling.py +++ b/tests/test_autoscaling/test_autoscaling.py @@ -653,3 +653,147 @@ def test_autoscaling_describe_policies_boto3(): response['ScalingPolicies'].should.have.length_of(1) response['ScalingPolicies'][0][ 'PolicyName'].should.equal('test_policy_down') + +@mock_autoscaling +@mock_ec2 +def test_detach_one_instance_decrement(): + client = boto3.client('autoscaling', region_name='us-east-1') + _ = client.create_launch_configuration( + LaunchConfigurationName='test_launch_configuration' + ) + client.create_auto_scaling_group( + AutoScalingGroupName='test_asg', + LaunchConfigurationName='test_launch_configuration', + MinSize=0, + MaxSize=2, + DesiredCapacity=2, + Tags=[ + {'ResourceId': 'test_asg', + 'ResourceType': 'auto-scaling-group', + 'Key': 'propogated-tag-key', + 'Value': 'propogate-tag-value', + 'PropagateAtLaunch': True + }] + ) + response = client.describe_auto_scaling_groups( + AutoScalingGroupNames=['test_asg'] + ) + instance_to_detach = response['AutoScalingGroups'][0]['Instances'][0]['InstanceId'] + instance_to_keep = response['AutoScalingGroups'][0]['Instances'][1]['InstanceId'] + + ec2_client = boto3.client('ec2', region_name='us-east-1') + + response = ec2_client.describe_instances(InstanceIds=[instance_to_detach]) + + response = client.detach_instances( + AutoScalingGroupName='test_asg', + InstanceIds=[instance_to_detach], + ShouldDecrementDesiredCapacity=True + ) + response['ResponseMetadata']['HTTPStatusCode'].should.equal(200) + + response = client.describe_auto_scaling_groups( + AutoScalingGroupNames=['test_asg'] + ) + response['AutoScalingGroups'][0]['Instances'].should.have.length_of(1) + + # test to ensure tag has been removed + response = ec2_client.describe_instances(InstanceIds=[instance_to_detach]) + tags = response['Reservations'][0]['Instances'][0]['Tags'] + tags.should.have.length_of(1) + + # test to ensure tag is present on other instance + response = ec2_client.describe_instances(InstanceIds=[instance_to_keep]) + tags = response['Reservations'][0]['Instances'][0]['Tags'] + tags.should.have.length_of(2) + +@mock_autoscaling +@mock_ec2 +def test_detach_one_instance(): + client = boto3.client('autoscaling', region_name='us-east-1') + _ = client.create_launch_configuration( + LaunchConfigurationName='test_launch_configuration' + ) + client.create_auto_scaling_group( + AutoScalingGroupName='test_asg', + LaunchConfigurationName='test_launch_configuration', + MinSize=0, + MaxSize=2, + DesiredCapacity=2, + Tags=[ + {'ResourceId': 'test_asg', + 'ResourceType': 'auto-scaling-group', + 'Key': 'propogated-tag-key', + 'Value': 'propogate-tag-value', + 'PropagateAtLaunch': True + }] + ) + response = client.describe_auto_scaling_groups( + AutoScalingGroupNames=['test_asg'] + ) + instance_to_detach = response['AutoScalingGroups'][0]['Instances'][0]['InstanceId'] + instance_to_keep = response['AutoScalingGroups'][0]['Instances'][1]['InstanceId'] + + ec2_client = boto3.client('ec2', region_name='us-east-1') + + response = ec2_client.describe_instances(InstanceIds=[instance_to_detach]) + + response = client.detach_instances( + AutoScalingGroupName='test_asg', + InstanceIds=[instance_to_detach], + ShouldDecrementDesiredCapacity=False + ) + response['ResponseMetadata']['HTTPStatusCode'].should.equal(200) + + response = client.describe_auto_scaling_groups( + AutoScalingGroupNames=['test_asg'] + ) + # test to ensure instance was replaced + response['AutoScalingGroups'][0]['Instances'].should.have.length_of(2) + + response = ec2_client.describe_instances(InstanceIds=[instance_to_detach]) + tags = response['Reservations'][0]['Instances'][0]['Tags'] + tags.should.have.length_of(1) + + response = ec2_client.describe_instances(InstanceIds=[instance_to_keep]) + tags = response['Reservations'][0]['Instances'][0]['Tags'] + tags.should.have.length_of(2) + +@mock_autoscaling +@mock_ec2 +def test_attach_one_instance(): + client = boto3.client('autoscaling', region_name='us-east-1') + _ = client.create_launch_configuration( + LaunchConfigurationName='test_launch_configuration' + ) + client.create_auto_scaling_group( + AutoScalingGroupName='test_asg', + LaunchConfigurationName='test_launch_configuration', + MinSize=0, + MaxSize=4, + DesiredCapacity=2, + Tags=[ + {'ResourceId': 'test_asg', + 'ResourceType': 'auto-scaling-group', + 'Key': 'propogated-tag-key', + 'Value': 'propogate-tag-value', + 'PropagateAtLaunch': True + }] + ) + response = client.describe_auto_scaling_groups( + AutoScalingGroupNames=['test_asg'] + ) + + ec2 = boto3.resource('ec2', 'us-east-1') + instances_to_add = [x.id for x in ec2.create_instances(ImageId='', MinCount=1, MaxCount=1)] + + response = client.attach_instances( + AutoScalingGroupName='test_asg', + InstanceIds=instances_to_add + ) + response['ResponseMetadata']['HTTPStatusCode'].should.equal(200) + + response = client.describe_auto_scaling_groups( + AutoScalingGroupNames=['test_asg'] + ) + response['AutoScalingGroups'][0]['Instances'].should.have.length_of(3) From 0af3427c15bf7e9f61cc2503849f676c684251f4 Mon Sep 17 00:00:00 2001 From: John Kerkstra Date: Mon, 16 Oct 2017 21:07:00 -0500 Subject: [PATCH 05/11] Add autoscaling load balancer methods (#1265) * add attach_load_balancers, detach_load_balancers, describe_load_balancers methods * prefer using amzn_request_id decorator to generate unique request ids --- moto/autoscaling/models.py | 18 +++ moto/autoscaling/responses.py | 67 ++++++++++- tests/test_autoscaling/test_autoscaling.py | 127 ++++++++++++++++++++- 3 files changed, 209 insertions(+), 3 deletions(-) diff --git a/moto/autoscaling/models.py b/moto/autoscaling/models.py index 4bdebf955..377890c40 100644 --- a/moto/autoscaling/models.py +++ b/moto/autoscaling/models.py @@ -539,6 +539,24 @@ class AutoScalingBackend(BaseBackend): group.tags = new_tags + def attach_load_balancers(self, group_name, load_balancer_names): + group = self.autoscaling_groups[group_name] + group.load_balancers.extend(load_balancer_names) + self.update_attached_elbs(group_name) + + def describe_load_balancers(self, group_name): + return self.autoscaling_groups[group_name].load_balancers + + def detach_load_balancers(self, group_name, load_balancer_names): + group = self.autoscaling_groups[group_name] + group_instance_ids = set( + state.instance.id for state in group.instance_states) + elbs = self.elb_backend.describe_load_balancers(names=group.load_balancers) + for elb in elbs: + self.elb_backend.deregister_instances( + elb.name, group_instance_ids) + group.load_balancers = [x for x in group.load_balancers if x not in load_balancer_names] + autoscaling_backends = {} for region, ec2_backend in ec2_backends.items(): diff --git a/moto/autoscaling/responses.py b/moto/autoscaling/responses.py index cba660139..832103775 100644 --- a/moto/autoscaling/responses.py +++ b/moto/autoscaling/responses.py @@ -1,6 +1,7 @@ from __future__ import unicode_literals from moto.core.responses import BaseResponse +from moto.core.utils import amz_crc32, amzn_request_id from .models import autoscaling_backends @@ -87,6 +88,8 @@ class AutoScalingResponse(BaseResponse): template = self.response_template(CREATE_AUTOSCALING_GROUP_TEMPLATE) return template.render() + @amz_crc32 + @amzn_request_id def attach_instances(self): group_name = self._get_param('AutoScalingGroupName') instance_ids = self._get_multi_param("InstanceIds.member") @@ -95,6 +98,8 @@ class AutoScalingResponse(BaseResponse): template = self.response_template(ATTACH_INSTANCES_TEMPLATE) return template.render() + @amz_crc32 + @amzn_request_id def detach_instances(self): group_name = self._get_param('AutoScalingGroupName') instance_ids = self._get_multi_param("InstanceIds.member") @@ -207,6 +212,34 @@ class AutoScalingResponse(BaseResponse): template = self.response_template(EXECUTE_POLICY_TEMPLATE) return template.render() + @amz_crc32 + @amzn_request_id + def attach_load_balancers(self): + group_name = self._get_param('AutoScalingGroupName') + load_balancer_names = self._get_multi_param("LoadBalancerNames.member") + self.autoscaling_backend.attach_load_balancers( + group_name, load_balancer_names) + template = self.response_template(ATTACH_LOAD_BALANCERS_TEMPLATE) + return template.render() + + @amz_crc32 + @amzn_request_id + def describe_load_balancers(self): + group_name = self._get_param('AutoScalingGroupName') + load_balancers = self.autoscaling_backend.describe_load_balancers(group_name) + template = self.response_template(DESCRIBE_LOAD_BALANCERS_TEMPLATE) + return template.render(load_balancers=load_balancers) + + @amz_crc32 + @amzn_request_id + def detach_load_balancers(self): + group_name = self._get_param('AutoScalingGroupName') + load_balancer_names = self._get_multi_param("LoadBalancerNames.member") + self.autoscaling_backend.detach_load_balancers( + group_name, load_balancer_names) + template = self.response_template(DETACH_LOAD_BALANCERS_TEMPLATE) + return template.render() + CREATE_LAUNCH_CONFIGURATION_TEMPLATE = """ @@ -309,7 +342,7 @@ ATTACH_INSTANCES_TEMPLATE = """ + + +{{ requestid }} + +""" + +DESCRIBE_LOAD_BALANCERS_TEMPLATE = """ + + + {% for load_balancer in load_balancers %} + + {{ load_balancer }} + Added + + {% endfor %} + + + +{{ requestid }} + +""" + +DETACH_LOAD_BALANCERS_TEMPLATE = """ + + +{{ requestid }} + +""" diff --git a/tests/test_autoscaling/test_autoscaling.py b/tests/test_autoscaling/test_autoscaling.py index d2f890c4d..def4d7077 100644 --- a/tests/test_autoscaling/test_autoscaling.py +++ b/tests/test_autoscaling/test_autoscaling.py @@ -8,7 +8,7 @@ from boto.ec2.autoscale import Tag import boto.ec2.elb import sure # noqa -from moto import mock_autoscaling, mock_ec2_deprecated, mock_elb_deprecated, mock_autoscaling_deprecated, mock_ec2 +from moto import mock_autoscaling, mock_ec2_deprecated, mock_elb_deprecated, mock_elb, mock_autoscaling_deprecated, mock_ec2 from tests.helpers import requires_boto_gte @@ -484,6 +484,131 @@ Boto3 ''' +@mock_autoscaling +@mock_elb +def test_describe_load_balancers(): + INSTANCE_COUNT = 2 + + elb_client = boto3.client('elb', region_name='us-east-1') + elb_client.create_load_balancer( + LoadBalancerName='my-lb', + Listeners=[ + {'Protocol': 'tcp', 'LoadBalancerPort': 80, 'InstancePort': 8080}], + AvailabilityZones=['us-east-1a', 'us-east-1b'] + ) + + client = boto3.client('autoscaling', region_name='us-east-1') + _ = client.create_launch_configuration( + LaunchConfigurationName='test_launch_configuration' + ) + _ = client.create_auto_scaling_group( + AutoScalingGroupName='test_asg', + LaunchConfigurationName='test_launch_configuration', + LoadBalancerNames=['my-lb'], + MinSize=0, + MaxSize=INSTANCE_COUNT, + DesiredCapacity=INSTANCE_COUNT, + Tags=[{ + "ResourceId": 'test_asg', + "Key": 'test_key', + "Value": 'test_value', + "PropagateAtLaunch": True + }] + ) + + response = client.describe_load_balancers(AutoScalingGroupName='test_asg') + list(response['LoadBalancers']).should.have.length_of(1) + response['LoadBalancers'][0]['LoadBalancerName'].should.equal('my-lb') + + +@mock_autoscaling +@mock_elb +def test_attach_load_balancer(): + INSTANCE_COUNT = 2 + + elb_client = boto3.client('elb', region_name='us-east-1') + elb_client.create_load_balancer( + LoadBalancerName='my-lb', + Listeners=[ + {'Protocol': 'tcp', 'LoadBalancerPort': 80, 'InstancePort': 8080}], + AvailabilityZones=['us-east-1a', 'us-east-1b'] + ) + + client = boto3.client('autoscaling', region_name='us-east-1') + _ = client.create_launch_configuration( + LaunchConfigurationName='test_launch_configuration' + ) + _ = client.create_auto_scaling_group( + AutoScalingGroupName='test_asg', + LaunchConfigurationName='test_launch_configuration', + MinSize=0, + MaxSize=INSTANCE_COUNT, + DesiredCapacity=INSTANCE_COUNT, + Tags=[{ + "ResourceId": 'test_asg', + "Key": 'test_key', + "Value": 'test_value', + "PropagateAtLaunch": True + }] + ) + + response = client.attach_load_balancers( + AutoScalingGroupName='test_asg', + LoadBalancerNames=['my-lb']) + response['ResponseMetadata']['HTTPStatusCode'].should.equal(200) + + response = elb_client.describe_load_balancers( + LoadBalancerNames=['my-lb'] + ) + list(response['LoadBalancerDescriptions'][0]['Instances']).should.have.length_of(INSTANCE_COUNT) + + +@mock_autoscaling +@mock_elb +def test_detach_load_balancer(): + INSTANCE_COUNT = 2 + + elb_client = boto3.client('elb', region_name='us-east-1') + elb_client.create_load_balancer( + LoadBalancerName='my-lb', + Listeners=[ + {'Protocol': 'tcp', 'LoadBalancerPort': 80, 'InstancePort': 8080}], + AvailabilityZones=['us-east-1a', 'us-east-1b'] + ) + + client = boto3.client('autoscaling', region_name='us-east-1') + _ = client.create_launch_configuration( + LaunchConfigurationName='test_launch_configuration' + ) + _ = client.create_auto_scaling_group( + AutoScalingGroupName='test_asg', + LaunchConfigurationName='test_launch_configuration', + LoadBalancerNames=['my-lb'], + MinSize=0, + MaxSize=INSTANCE_COUNT, + DesiredCapacity=INSTANCE_COUNT, + Tags=[{ + "ResourceId": 'test_asg', + "Key": 'test_key', + "Value": 'test_value', + "PropagateAtLaunch": True + }] + ) + + response = client.detach_load_balancers( + AutoScalingGroupName='test_asg', + LoadBalancerNames=['my-lb']) + response['ResponseMetadata']['HTTPStatusCode'].should.equal(200) + + response = elb_client.describe_load_balancers( + LoadBalancerNames=['my-lb'] + ) + list(response['LoadBalancerDescriptions'][0]['Instances']).should.have.length_of(0) + + response = client.describe_load_balancers(AutoScalingGroupName='test_asg') + list(response['LoadBalancers']).should.have.length_of(0) + + @mock_autoscaling def test_create_autoscaling_group_boto3(): client = boto3.client('autoscaling', region_name='us-east-1') From b8bb6c2dcfe2c241928523a1d0c1aa186fb867fa Mon Sep 17 00:00:00 2001 From: John Kerkstra Date: Tue, 17 Oct 2017 00:04:47 -0500 Subject: [PATCH 06/11] Fix bug with update_attached_elbs (#1266) * fixed bug where we were using elb_backend.describe_load_balancers incorrectly, returning all available load balancers when we wanted none. * improve skip, clean up tests --- moto/autoscaling/models.py | 4 ++ tests/test_autoscaling/test_autoscaling.py | 49 +++++++++++++++++++--- 2 files changed, 47 insertions(+), 6 deletions(-) diff --git a/moto/autoscaling/models.py b/moto/autoscaling/models.py index 377890c40..a921c74ab 100644 --- a/moto/autoscaling/models.py +++ b/moto/autoscaling/models.py @@ -504,6 +504,10 @@ class AutoScalingBackend(BaseBackend): group_instance_ids = set( state.instance.id for state in group.instance_states) + # skip this if group.load_balancers is empty + # otherwise elb_backend.describe_load_balancers returns all available load balancers + if not group.load_balancers: + return try: elbs = self.elb_backend.describe_load_balancers( names=group.load_balancers) diff --git a/tests/test_autoscaling/test_autoscaling.py b/tests/test_autoscaling/test_autoscaling.py index def4d7077..b0bbc88a8 100644 --- a/tests/test_autoscaling/test_autoscaling.py +++ b/tests/test_autoscaling/test_autoscaling.py @@ -498,10 +498,10 @@ def test_describe_load_balancers(): ) client = boto3.client('autoscaling', region_name='us-east-1') - _ = client.create_launch_configuration( + client.create_launch_configuration( LaunchConfigurationName='test_launch_configuration' ) - _ = client.create_auto_scaling_group( + client.create_auto_scaling_group( AutoScalingGroupName='test_asg', LaunchConfigurationName='test_launch_configuration', LoadBalancerNames=['my-lb'], @@ -520,6 +520,43 @@ def test_describe_load_balancers(): list(response['LoadBalancers']).should.have.length_of(1) response['LoadBalancers'][0]['LoadBalancerName'].should.equal('my-lb') +@mock_autoscaling +@mock_elb +def test_create_elb_and_autoscaling_group_no_relationship(): + INSTANCE_COUNT = 2 + ELB_NAME = 'my-elb' + + elb_client = boto3.client('elb', region_name='us-east-1') + elb_client.create_load_balancer( + LoadBalancerName=ELB_NAME, + Listeners=[ + {'Protocol': 'tcp', 'LoadBalancerPort': 80, 'InstancePort': 8080}], + AvailabilityZones=['us-east-1a', 'us-east-1b'] + ) + + client = boto3.client('autoscaling', region_name='us-east-1') + client.create_launch_configuration( + LaunchConfigurationName='test_launch_configuration' + ) + + client.create_auto_scaling_group( + AutoScalingGroupName='test_asg', + LaunchConfigurationName='test_launch_configuration', + MinSize=0, + MaxSize=INSTANCE_COUNT, + DesiredCapacity=INSTANCE_COUNT, + ) + + # autoscaling group and elb should have no relationship + response = client.describe_load_balancers( + AutoScalingGroupName='test_asg' + ) + list(response['LoadBalancers']).should.have.length_of(0) + response = elb_client.describe_load_balancers( + LoadBalancerNames=[ELB_NAME] + ) + list(response['LoadBalancerDescriptions'][0]['Instances']).should.have.length_of(0) + @mock_autoscaling @mock_elb @@ -535,10 +572,10 @@ def test_attach_load_balancer(): ) client = boto3.client('autoscaling', region_name='us-east-1') - _ = client.create_launch_configuration( + client.create_launch_configuration( LaunchConfigurationName='test_launch_configuration' ) - _ = client.create_auto_scaling_group( + client.create_auto_scaling_group( AutoScalingGroupName='test_asg', LaunchConfigurationName='test_launch_configuration', MinSize=0, @@ -577,10 +614,10 @@ def test_detach_load_balancer(): ) client = boto3.client('autoscaling', region_name='us-east-1') - _ = client.create_launch_configuration( + client.create_launch_configuration( LaunchConfigurationName='test_launch_configuration' ) - _ = client.create_auto_scaling_group( + client.create_auto_scaling_group( AutoScalingGroupName='test_asg', LaunchConfigurationName='test_launch_configuration', LoadBalancerNames=['my-lb'], From 194da53a0edcd38b920318d9a353705232f6f2a4 Mon Sep 17 00:00:00 2001 From: Andrew Miller Date: Tue, 17 Oct 2017 18:42:29 +0100 Subject: [PATCH 07/11] Correct the type of a default attribute in SNS PlatformEndpoint (#1267) The `Enabled` Attribute in the PlatformEndpoint of SNS current returns a boolean, however, the 'enabled' property is expecting a string as `.lower()` is called on the result. This change simply changes the default from `True` to `'True'` so the property works as expected. --- moto/sns/models.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/moto/sns/models.py b/moto/sns/models.py index 5b7277d22..4bab049b4 100644 --- a/moto/sns/models.py +++ b/moto/sns/models.py @@ -146,7 +146,7 @@ class PlatformEndpoint(BaseModel): if 'Token' not in self.attributes: self.attributes['Token'] = self.token if 'Enabled' not in self.attributes: - self.attributes['Enabled'] = True + self.attributes['Enabled'] = 'True' @property def enabled(self): From 317dbbd1a3d679549f384dc6870155b82a85dd5e Mon Sep 17 00:00:00 2001 From: Jack Danger Date: Tue, 17 Oct 2017 18:13:27 -0700 Subject: [PATCH 08/11] requiring minimum botocore (#1268) Boto and Boto3 can be a little old but Moto will throw an error if botocoe doesn't even know about some of the services it supports. As of this commit Polly is new enough some users are running into exceptions. --- setup.py | 1 + 1 file changed, 1 insertion(+) diff --git a/setup.py b/setup.py index 9e423bdd4..e84ac3467 100755 --- a/setup.py +++ b/setup.py @@ -9,6 +9,7 @@ install_requires = [ "Jinja2>=2.8", "boto>=2.36.0", "boto3>=1.2.1", + "botocore>=1.7.12", "cookies", "cryptography>=2.0.0", "requests>=2.5", From b286123425ee59225dcc741b32ae61c02c8c897d Mon Sep 17 00:00:00 2001 From: Jack Danger Date: Tue, 17 Oct 2017 18:33:57 -0700 Subject: [PATCH 09/11] bumping to version 1.1.23 (#1269) --- setup.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/setup.py b/setup.py index e84ac3467..5cf32ade7 100755 --- a/setup.py +++ b/setup.py @@ -38,7 +38,7 @@ else: setup( name='moto', - version='1.1.22', + version='1.1.23', description='A library that allows your python tests to easily' ' mock out the boto library', author='Steve Pulec', From b8a0cfd6f75c266cf5844635007af9f77b469f51 Mon Sep 17 00:00:00 2001 From: John Kerkstra Date: Wed, 18 Oct 2017 16:23:14 -0500 Subject: [PATCH 10/11] filter out already related elbs to prevent duplicates (#1270) --- moto/autoscaling/models.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/moto/autoscaling/models.py b/moto/autoscaling/models.py index a921c74ab..fd8efd54f 100644 --- a/moto/autoscaling/models.py +++ b/moto/autoscaling/models.py @@ -545,7 +545,8 @@ class AutoScalingBackend(BaseBackend): def attach_load_balancers(self, group_name, load_balancer_names): group = self.autoscaling_groups[group_name] - group.load_balancers.extend(load_balancer_names) + group.load_balancers.extend( + [x for x in load_balancer_names if x not in group.load_balancers]) self.update_attached_elbs(group_name) def describe_load_balancers(self, group_name): From b40c5e557e93a1cacb87228aadc6083d5ccf9ec3 Mon Sep 17 00:00:00 2001 From: Terry Cain Date: Thu, 19 Oct 2017 00:22:35 +0100 Subject: [PATCH 11/11] Fixed S3 versioning bug + minor cleanup (#1272) * Fixed S3 versioning bug + minor cleanup Fixes 1271 * flake8 --- moto/s3/models.py | 45 +++++++++++++++------------------------- tests/test_s3/test_s3.py | 23 ++++++++++++++++++++ 2 files changed, 40 insertions(+), 28 deletions(-) diff --git a/moto/s3/models.py b/moto/s3/models.py index ae05292f2..91d3c1e2d 100644 --- a/moto/s3/models.py +++ b/moto/s3/models.py @@ -81,6 +81,9 @@ class FakeKey(BaseModel): def restore(self, days): self._expiry = datetime.datetime.utcnow() + datetime.timedelta(days) + def increment_version(self): + self._version_id += 1 + @property def etag(self): if self._etag is None: @@ -323,19 +326,10 @@ class CorsRule(BaseModel): def __init__(self, allowed_methods, allowed_origins, allowed_headers=None, expose_headers=None, max_age_seconds=None): - # Python 2 and 3 have different string types for handling unicodes. Python 2 wants `basestring`, - # whereas Python 3 is OK with str. This causes issues with the XML parser, which returns - # unicode strings in Python 2. So, need to do this to make it work in both Python 2 and 3: - import sys - if sys.version_info >= (3, 0): - str_type = str - else: - str_type = basestring # noqa - - self.allowed_methods = [allowed_methods] if isinstance(allowed_methods, str_type) else allowed_methods - self.allowed_origins = [allowed_origins] if isinstance(allowed_origins, str_type) else allowed_origins - self.allowed_headers = [allowed_headers] if isinstance(allowed_headers, str_type) else allowed_headers - self.exposed_headers = [expose_headers] if isinstance(expose_headers, str_type) else expose_headers + self.allowed_methods = [allowed_methods] if isinstance(allowed_methods, six.string_types) else allowed_methods + self.allowed_origins = [allowed_origins] if isinstance(allowed_origins, six.string_types) else allowed_origins + self.allowed_headers = [allowed_headers] if isinstance(allowed_headers, six.string_types) else allowed_headers + self.exposed_headers = [expose_headers] if isinstance(expose_headers, six.string_types) else expose_headers self.max_age_seconds = max_age_seconds @@ -389,25 +383,16 @@ class FakeBucket(BaseModel): if len(rules) > 100: raise MalformedXML() - # Python 2 and 3 have different string types for handling unicodes. Python 2 wants `basestring`, - # whereas Python 3 is OK with str. This causes issues with the XML parser, which returns - # unicode strings in Python 2. So, need to do this to make it work in both Python 2 and 3: - import sys - if sys.version_info >= (3, 0): - str_type = str - else: - str_type = basestring # noqa - for rule in rules: - assert isinstance(rule["AllowedMethod"], list) or isinstance(rule["AllowedMethod"], str_type) - assert isinstance(rule["AllowedOrigin"], list) or isinstance(rule["AllowedOrigin"], str_type) + assert isinstance(rule["AllowedMethod"], list) or isinstance(rule["AllowedMethod"], six.string_types) + assert isinstance(rule["AllowedOrigin"], list) or isinstance(rule["AllowedOrigin"], six.string_types) assert isinstance(rule.get("AllowedHeader", []), list) or isinstance(rule.get("AllowedHeader", ""), - str_type) + six.string_types) assert isinstance(rule.get("ExposedHeader", []), list) or isinstance(rule.get("ExposedHeader", ""), - str_type) - assert isinstance(rule.get("MaxAgeSeconds", "0"), str_type) + six.string_types) + assert isinstance(rule.get("MaxAgeSeconds", "0"), six.string_types) - if isinstance(rule["AllowedMethod"], str_type): + if isinstance(rule["AllowedMethod"], six.string_types): methods = [rule["AllowedMethod"]] else: methods = rule["AllowedMethod"] @@ -745,6 +730,10 @@ class S3Backend(BaseBackend): if dest_key_name != src_key_name: key = key.copy(dest_key_name) dest_bucket.keys[dest_key_name] = key + + # By this point, the destination key must exist, or KeyError + if dest_bucket.is_versioned: + dest_bucket.keys[dest_key_name].increment_version() if storage is not None: key.set_storage_class(storage) if acl is not None: diff --git a/tests/test_s3/test_s3.py b/tests/test_s3/test_s3.py index e4cb499b9..87668d8b7 100644 --- a/tests/test_s3/test_s3.py +++ b/tests/test_s3/test_s3.py @@ -1364,6 +1364,29 @@ def test_boto3_head_object_with_versioning(): old_head_object['ContentLength'].should.equal(len(old_content)) +@mock_s3 +def test_boto3_copy_object_with_versioning(): + client = boto3.client('s3', region_name='us-east-1') + + client.create_bucket(Bucket='blah', CreateBucketConfiguration={'LocationConstraint': 'eu-west-1'}) + client.put_bucket_versioning(Bucket='blah', VersioningConfiguration={'Status': 'Enabled'}) + + client.put_object(Bucket='blah', Key='test1', Body=b'test1') + client.put_object(Bucket='blah', Key='test2', Body=b'test2') + + obj1_version = client.get_object(Bucket='blah', Key='test1')['VersionId'] + obj2_version = client.get_object(Bucket='blah', Key='test2')['VersionId'] + + # Versions should be the same + obj1_version.should.equal(obj2_version) + + client.copy_object(CopySource={'Bucket': 'blah', 'Key': 'test1'}, Bucket='blah', Key='test2') + obj2_version_new = client.get_object(Bucket='blah', Key='test2')['VersionId'] + + # Version should be different to previous version + obj2_version_new.should_not.equal(obj2_version) + + @mock_s3 def test_boto3_head_object_if_modified_since(): s3 = boto3.client('s3', region_name='us-east-1')