From f37bad0e0070c87c0be5b0077cb8635d88a09c34 Mon Sep 17 00:00:00 2001 From: Steve Pulec Date: Thu, 23 Feb 2017 21:37:43 -0500 Subject: [PATCH] Lints. --- moto/__init__.py | 2 +- moto/apigateway/__init__.py | 2 +- moto/apigateway/exceptions.py | 4 +- moto/apigateway/models.py | 163 +++--- moto/apigateway/responses.py | 61 ++- moto/autoscaling/__init__.py | 2 +- moto/autoscaling/models.py | 67 ++- moto/autoscaling/responses.py | 39 +- moto/awslambda/__init__.py | 2 +- moto/awslambda/models.py | 23 +- moto/awslambda/responses.py | 2 - moto/cloudformation/__init__.py | 5 +- moto/cloudformation/exceptions.py | 4 +- moto/cloudformation/models.py | 14 +- moto/cloudformation/parsing.py | 58 ++- moto/cloudformation/responses.py | 21 +- moto/cloudwatch/__init__.py | 2 +- moto/cloudwatch/models.py | 12 +- moto/cloudwatch/responses.py | 27 +- moto/core/exceptions.py | 6 +- moto/core/models.py | 20 +- moto/core/responses.py | 44 +- moto/core/utils.py | 2 +- moto/datapipeline/__init__.py | 2 +- moto/datapipeline/models.py | 14 +- moto/datapipeline/responses.py | 9 +- moto/dynamodb/models.py | 14 +- moto/dynamodb/responses.py | 18 +- moto/dynamodb2/__init__.py | 2 +- moto/dynamodb2/comparisons.py | 12 +- moto/dynamodb2/models.py | 80 +-- moto/dynamodb2/responses.py | 79 +-- moto/ec2/__init__.py | 2 +- moto/ec2/exceptions.py | 41 ++ moto/ec2/models.py | 467 ++++++++++++------ moto/ec2/responses/__init__.py | 1 + moto/ec2/responses/amazon_dev_pay.py | 4 +- moto/ec2/responses/amis.py | 31 +- .../availability_zones_and_regions.py | 2 + moto/ec2/responses/customer_gateways.py | 6 +- moto/ec2/responses/dhcp_options.py | 7 +- moto/ec2/responses/elastic_block_store.py | 62 ++- moto/ec2/responses/elastic_ip_addresses.py | 58 ++- .../responses/elastic_network_interfaces.py | 35 +- moto/ec2/responses/general.py | 1 + moto/ec2/responses/instances.py | 46 +- moto/ec2/responses/internet_gateways.py | 7 +- moto/ec2/responses/ip_addresses.py | 7 +- moto/ec2/responses/key_pairs.py | 6 +- moto/ec2/responses/monitoring.py | 7 +- moto/ec2/responses/nat_gateways.py | 3 +- moto/ec2/responses/network_acls.py | 3 +- moto/ec2/responses/placement_groups.py | 10 +- moto/ec2/responses/reserved_instances.py | 19 +- moto/ec2/responses/route_tables.py | 49 +- moto/ec2/responses/security_groups.py | 12 +- moto/ec2/responses/spot_fleets.py | 16 +- moto/ec2/responses/spot_instances.py | 30 +- moto/ec2/responses/subnets.py | 4 +- moto/ec2/responses/tags.py | 6 +- .../ec2/responses/virtual_private_gateways.py | 2 + moto/ec2/responses/vm_export.py | 10 +- moto/ec2/responses/vm_import.py | 13 +- moto/ec2/responses/vpc_peering_connections.py | 19 +- moto/ec2/responses/vpcs.py | 7 +- moto/ec2/responses/vpn_connections.py | 10 +- moto/ec2/responses/windows.py | 13 +- moto/ec2/utils.py | 65 ++- moto/ecs/__init__.py | 2 +- moto/ecs/models.py | 181 ++++--- moto/ecs/responses.py | 117 ++--- moto/elb/__init__.py | 2 +- moto/elb/exceptions.py | 6 +- moto/elb/models.py | 57 ++- moto/elb/responses.py | 90 ++-- moto/emr/__init__.py | 2 +- moto/emr/models.py | 53 +- moto/emr/responses.py | 66 ++- moto/emr/utils.py | 6 +- moto/events/models.py | 16 +- moto/events/responses.py | 9 +- moto/glacier/__init__.py | 2 +- moto/glacier/models.py | 1 + moto/glacier/responses.py | 3 +- moto/iam/__init__.py | 2 +- moto/iam/models.py | 70 ++- moto/iam/responses.py | 36 +- moto/instance_metadata/__init__.py | 2 +- moto/instance_metadata/models.py | 1 + moto/instance_metadata/responses.py | 4 +- moto/kinesis/__init__.py | 2 +- moto/kinesis/exceptions.py | 5 + moto/kinesis/models.py | 77 +-- moto/kinesis/responses.py | 33 +- moto/kinesis/utils.py | 3 +- moto/kms/__init__.py | 2 +- moto/kms/models.py | 5 +- moto/kms/responses.py | 30 +- moto/opsworks/__init__.py | 2 +- moto/opsworks/exceptions.py | 2 + moto/opsworks/models.py | 28 +- moto/opsworks/responses.py | 36 +- moto/packages/httpretty/__init__.py | 1 + moto/packages/httpretty/compat.py | 3 + moto/packages/httpretty/core.py | 42 +- moto/packages/httpretty/errors.py | 1 + moto/packages/responses/responses.py | 5 +- moto/packages/responses/setup.py | 1 + moto/packages/responses/test_responses.py | 1 + moto/rds/__init__.py | 2 +- moto/rds/exceptions.py | 4 + moto/rds/models.py | 27 +- moto/rds/responses.py | 27 +- moto/rds2/__init__.py | 2 +- moto/rds2/exceptions.py | 6 + moto/rds2/models.py | 167 ++++--- moto/rds2/responses.py | 47 +- moto/redshift/__init__.py | 2 +- moto/redshift/exceptions.py | 6 + moto/redshift/models.py | 46 +- moto/redshift/responses.py | 21 +- moto/route53/models.py | 37 +- moto/route53/responses.py | 78 +-- moto/s3/__init__.py | 2 +- moto/s3/exceptions.py | 2 + moto/s3/models.py | 50 +- moto/s3/responses.py | 63 ++- moto/s3/utils.py | 3 +- moto/server.py | 15 +- moto/ses/__init__.py | 2 +- moto/ses/models.py | 5 + moto/sns/__init__.py | 2 +- moto/sns/models.py | 33 +- moto/sns/responses.py | 25 +- moto/sqs/__init__.py | 2 +- moto/sqs/models.py | 13 +- moto/sqs/responses.py | 35 +- moto/sqs/utils.py | 19 +- moto/sts/models.py | 4 + moto/sts/responses.py | 1 + moto/swf/__init__.py | 2 +- moto/swf/exceptions.py | 23 +- moto/swf/models/__init__.py | 37 +- moto/swf/models/activity_task.py | 1 + moto/swf/models/activity_type.py | 1 + moto/swf/models/decision_task.py | 4 +- moto/swf/models/domain.py | 1 + moto/swf/models/generic_type.py | 1 + moto/swf/models/history_event.py | 4 +- moto/swf/models/timeout.py | 1 + moto/swf/models/workflow_execution.py | 36 +- moto/swf/models/workflow_type.py | 1 + moto/swf/responses.py | 51 +- tests/backport_assert_raises.py | 1 + tests/helpers.py | 5 +- tests/test_apigateway/test_apigateway.py | 283 ++++++----- tests/test_autoscaling/test_autoscaling.py | 122 ++--- .../test_launch_configurations.py | 18 +- tests/test_awslambda/test_lambda.py | 51 +- .../rds_mysql_with_db_parameter_group.py | 361 +++++++------- .../fixtures/rds_mysql_with_read_replica.py | 355 ++++++------- .../test_cloudformation/fixtures/redshift.py | 360 +++++++------- .../route53_ec2_instance_with_public_ip.py | 54 +- .../fixtures/route53_health_check.py | 48 +- .../fixtures/route53_roundrobin.py | 76 +-- .../test_cloudformation_stack_crud.py | 47 +- .../test_cloudformation_stack_crud_boto3.py | 21 +- .../test_cloudformation_stack_integration.py | 306 +++++++----- tests/test_cloudformation/test_server.py | 11 +- .../test_cloudformation/test_stack_parsing.py | 19 +- tests/test_cloudwatch/test_cloudwatch.py | 16 +- tests/test_core/test_decorator_calls.py | 5 +- tests/test_core/test_instance_metadata.py | 6 +- tests/test_core/test_responses.py | 24 +- tests/test_core/test_server.py | 9 +- tests/test_core/test_url_mapping.py | 3 +- tests/test_datapipeline/test_datapipeline.py | 9 +- tests/test_datapipeline/test_server.py | 7 +- tests/test_dynamodb/test_dynamodb.py | 9 +- .../test_dynamodb_table_with_range_key.py | 24 +- .../test_dynamodb_table_without_range_key.py | 3 +- tests/test_dynamodb2/test_dynamodb.py | 15 +- .../test_dynamodb_table_with_range_key.py | 130 +++-- .../test_dynamodb_table_without_range_key.py | 21 +- tests/test_ec2/test_amis.py | 129 +++-- tests/test_ec2/test_customer_gateways.py | 12 +- tests/test_ec2/test_dhcp_options.py | 45 +- tests/test_ec2/test_elastic_block_store.py | 169 ++++--- tests/test_ec2/test_elastic_ip_addresses.py | 85 ++-- .../test_elastic_network_interfaces.py | 90 ++-- tests/test_ec2/test_instances.py | 251 ++++++---- tests/test_ec2/test_internet_gateways.py | 39 +- tests/test_ec2/test_key_pairs.py | 9 +- tests/test_ec2/test_nat_gateway.py | 21 +- tests/test_ec2/test_regions.py | 14 +- tests/test_ec2/test_route_tables.py | 92 ++-- tests/test_ec2/test_security_groups.py | 178 ++++--- tests/test_ec2/test_server.py | 3 +- tests/test_ec2/test_spot_fleet.py | 112 +++-- tests/test_ec2/test_spot_instances.py | 64 +-- tests/test_ec2/test_subnets.py | 66 ++- tests/test_ec2/test_tags.py | 26 +- .../test_ec2/test_virtual_private_gateways.py | 1 + tests/test_ec2/test_vpc_peering.py | 1 - tests/test_ec2/test_vpcs.py | 17 +- tests/test_ec2/test_vpn_connections.py | 9 +- tests/test_ecs/test_ecs_boto3.py | 245 +++++---- tests/test_elb/test_elb.py | 243 +++++---- tests/test_emr/test_emr.py | 24 +- tests/test_emr/test_emr_boto3.py | 93 ++-- tests/test_glacier/test_glacier_jobs.py | 18 +- tests/test_glacier/test_glacier_server.py | 3 +- tests/test_iam/test_iam.py | 93 ++-- tests/test_iam/test_iam_groups.py | 6 +- tests/test_iam/test_server.py | 5 +- tests/test_kinesis/test_firehose.py | 10 +- tests/test_kinesis/test_kinesis.py | 107 ++-- tests/test_kms/test_kms.py | 155 ++++-- tests/test_opsworks/test_instances.py | 8 +- tests/test_opsworks/test_layers.py | 4 +- tests/test_opsworks/test_stack.py | 2 - tests/test_rds/test_rds.py | 49 +- tests/test_rds2/test_rds2.py | 302 +++++++---- tests/test_rds2/test_server.py | 2 +- tests/test_redshift/test_redshift.py | 120 +++-- tests/test_redshift/test_server.py | 3 +- tests/test_route53/test_route53.py | 112 +++-- tests/test_s3/test_s3.py | 146 ++++-- tests/test_s3/test_s3_lifecycle.py | 4 +- tests/test_s3/test_s3_utils.py | 7 +- tests/test_s3/test_server.py | 6 +- .../test_bucket_path_server.py | 3 +- .../test_s3bucket_path/test_s3bucket_path.py | 25 +- .../test_s3bucket_path_utils.py | 3 +- tests/test_ses/test_ses.py | 29 +- tests/test_sns/test_application.py | 89 ++-- tests/test_sns/test_application_boto3.py | 36 +- tests/test_sns/test_publishing.py | 18 +- tests/test_sns/test_publishing_boto3.py | 3 +- tests/test_sns/test_server.py | 6 +- tests/test_sns/test_subscriptions.py | 45 +- tests/test_sns/test_subscriptions_boto3.py | 18 +- tests/test_sns/test_topics.py | 42 +- tests/test_sns/test_topics_boto3.py | 18 +- tests/test_sqs/test_server.py | 9 +- tests/test_sqs/test_sqs.py | 43 +- tests/test_sts/test_sts.py | 25 +- tests/test_swf/models/test_activity_task.py | 3 +- tests/test_swf/models/test_decision_task.py | 3 +- tests/test_swf/models/test_domain.py | 27 +- tests/test_swf/models/test_generic_type.py | 10 +- .../models/test_workflow_execution.py | 33 +- .../test_swf/responses/test_activity_tasks.py | 78 ++- .../test_swf/responses/test_activity_types.py | 15 +- .../test_swf/responses/test_decision_tasks.py | 51 +- tests/test_swf/responses/test_domains.py | 3 +- tests/test_swf/responses/test_timeouts.py | 27 +- .../responses/test_workflow_executions.py | 27 +- .../test_swf/responses/test_workflow_types.py | 18 +- tests/test_swf/utils.py | 6 +- 260 files changed, 6370 insertions(+), 3773 deletions(-) diff --git a/moto/__init__.py b/moto/__init__.py index 5a16a0a8e..546603b00 100644 --- a/moto/__init__.py +++ b/moto/__init__.py @@ -1,6 +1,6 @@ from __future__ import unicode_literals import logging -#logging.getLogger('boto').setLevel(logging.CRITICAL) +# logging.getLogger('boto').setLevel(logging.CRITICAL) __title__ = 'moto' __version__ = '0.4.31' diff --git a/moto/apigateway/__init__.py b/moto/apigateway/__init__.py index c6ea9a3bc..98b2058d9 100644 --- a/moto/apigateway/__init__.py +++ b/moto/apigateway/__init__.py @@ -1,6 +1,6 @@ from __future__ import unicode_literals from .models import apigateway_backends -from ..core.models import MockAWS, base_decorator, HttprettyMockAWS, deprecated_base_decorator +from ..core.models import base_decorator, deprecated_base_decorator apigateway_backend = apigateway_backends['us-east-1'] mock_apigateway = base_decorator(apigateway_backends) diff --git a/moto/apigateway/exceptions.py b/moto/apigateway/exceptions.py index 77a1c932a..d4cf8d1c7 100644 --- a/moto/apigateway/exceptions.py +++ b/moto/apigateway/exceptions.py @@ -4,9 +4,7 @@ from moto.core.exceptions import RESTError class StageNotFoundException(RESTError): code = 404 + def __init__(self): super(StageNotFoundException, self).__init__( "NotFoundException", "Invalid stage identifier specified") - - - diff --git a/moto/apigateway/models.py b/moto/apigateway/models.py index 4b09f44bc..6585d19f5 100644 --- a/moto/apigateway/models.py +++ b/moto/apigateway/models.py @@ -14,15 +14,18 @@ STAGE_URL = "https://{api_id}.execute-api.{region_name}.amazonaws.com/{stage_nam class Deployment(dict): + def __init__(self, deployment_id, name, description=""): super(Deployment, self).__init__() self['id'] = deployment_id self['stageName'] = name self['description'] = description - self['createdDate'] = iso_8601_datetime_with_milliseconds(datetime.datetime.now()) + self['createdDate'] = iso_8601_datetime_with_milliseconds( + datetime.datetime.now()) class IntegrationResponse(dict): + def __init__(self, status_code, selection_pattern=None): self['responseTemplates'] = {"application/json": None} self['statusCode'] = status_code @@ -31,6 +34,7 @@ class IntegrationResponse(dict): class Integration(dict): + def __init__(self, integration_type, uri, http_method, request_templates=None): super(Integration, self).__init__() self['type'] = integration_type @@ -42,7 +46,8 @@ class Integration(dict): } def create_integration_response(self, status_code, selection_pattern): - integration_response = IntegrationResponse(status_code, selection_pattern) + integration_response = IntegrationResponse( + status_code, selection_pattern) self["integrationResponses"][status_code] = integration_response return integration_response @@ -54,12 +59,14 @@ class Integration(dict): class MethodResponse(dict): + def __init__(self, status_code): super(MethodResponse, self).__init__() self['statusCode'] = status_code class Method(dict): + def __init__(self, method_type, authorization_type): super(Method, self).__init__() self.update(dict( @@ -86,6 +93,7 @@ class Method(dict): class Resource(object): + def __init__(self, id, region_name, api_id, path_part, parent_id): self.id = id self.region_name = region_name @@ -127,14 +135,17 @@ class Resource(object): if integration_type == 'HTTP': uri = integration['uri'] - requests_func = getattr(requests, integration['httpMethod'].lower()) + requests_func = getattr(requests, integration[ + 'httpMethod'].lower()) response = requests_func(uri) else: - raise NotImplementedError("The {0} type has not been implemented".format(integration_type)) + raise NotImplementedError( + "The {0} type has not been implemented".format(integration_type)) return response.status_code, response.text def add_method(self, method_type, authorization_type): - method = Method(method_type=method_type, authorization_type=authorization_type) + method = Method(method_type=method_type, + authorization_type=authorization_type) self.resource_methods[method_type] = method return method @@ -142,7 +153,8 @@ class Resource(object): return self.resource_methods[method_type] def add_integration(self, method_type, integration_type, uri, request_templates=None): - integration = Integration(integration_type, uri, method_type, request_templates=request_templates) + integration = Integration( + integration_type, uri, method_type, request_templates=request_templates) self.resource_methods[method_type]['methodIntegration'] = integration return integration @@ -155,9 +167,8 @@ class Resource(object): class Stage(dict): - def __init__(self, name=None, deployment_id=None, variables=None, - description='',cacheClusterEnabled=False,cacheClusterSize=None): + description='', cacheClusterEnabled=False, cacheClusterSize=None): super(Stage, self).__init__() if variables is None: variables = {} @@ -190,21 +201,24 @@ class Stage(dict): elif op['op'] == 'replace': # Method Settings drop into here # (e.g., path could be '/*/*/logging/loglevel') - split_path = op['path'].split('/',3) - if len(split_path)!=4: + split_path = op['path'].split('/', 3) + if len(split_path) != 4: continue - self._patch_method_setting('/'.join(split_path[1:3]),split_path[3],op['value']) + self._patch_method_setting( + '/'.join(split_path[1:3]), split_path[3], op['value']) else: - raise Exception('Patch operation "%s" not implemented' % op['op']) + raise Exception( + 'Patch operation "%s" not implemented' % op['op']) return self - def _patch_method_setting(self,resource_path_and_method,key,value): + def _patch_method_setting(self, resource_path_and_method, key, value): updated_key = self._method_settings_translations(key) if updated_key is not None: if resource_path_and_method not in self['methodSettings']: - self['methodSettings'][resource_path_and_method] = self._get_default_method_settings() - self['methodSettings'][resource_path_and_method][updated_key] = self._convert_to_type(updated_key,value) - + self['methodSettings'][ + resource_path_and_method] = self._get_default_method_settings() + self['methodSettings'][resource_path_and_method][ + updated_key] = self._convert_to_type(updated_key, value) def _get_default_method_settings(self): return { @@ -219,18 +233,18 @@ class Stage(dict): "requireAuthorizationForCacheControl": True } - def _method_settings_translations(self,key): + def _method_settings_translations(self, key): mappings = { - 'metrics/enabled' :'metricsEnabled', - 'logging/loglevel' : 'loggingLevel', - 'logging/dataTrace' : 'dataTraceEnabled' , - 'throttling/burstLimit' : 'throttlingBurstLimit', - 'throttling/rateLimit' : 'throttlingRateLimit', - 'caching/enabled' : 'cachingEnabled', - 'caching/ttlInSeconds' : 'cacheTtlInSeconds', - 'caching/dataEncrypted' : 'cacheDataEncrypted', - 'caching/requireAuthorizationForCacheControl' : 'requireAuthorizationForCacheControl', - 'caching/unauthorizedCacheControlHeaderStrategy' : 'unauthorizedCacheControlHeaderStrategy' + 'metrics/enabled': 'metricsEnabled', + 'logging/loglevel': 'loggingLevel', + 'logging/dataTrace': 'dataTraceEnabled', + 'throttling/burstLimit': 'throttlingBurstLimit', + 'throttling/rateLimit': 'throttlingRateLimit', + 'caching/enabled': 'cachingEnabled', + 'caching/ttlInSeconds': 'cacheTtlInSeconds', + 'caching/dataEncrypted': 'cacheDataEncrypted', + 'caching/requireAuthorizationForCacheControl': 'requireAuthorizationForCacheControl', + 'caching/unauthorizedCacheControlHeaderStrategy': 'unauthorizedCacheControlHeaderStrategy' } if key in mappings: @@ -238,21 +252,21 @@ class Stage(dict): else: None - def _str2bool(self,v): + def _str2bool(self, v): return v.lower() == "true" - def _convert_to_type(self,key,val): + def _convert_to_type(self, key, val): type_mappings = { - 'metricsEnabled' : 'bool', - 'loggingLevel' : 'str', - 'dataTraceEnabled' : 'bool', - 'throttlingBurstLimit' : 'int', - 'throttlingRateLimit' : 'float', - 'cachingEnabled' : 'bool', - 'cacheTtlInSeconds' : 'int', - 'cacheDataEncrypted' : 'bool', - 'requireAuthorizationForCacheControl' :'bool', - 'unauthorizedCacheControlHeaderStrategy' : 'str' + 'metricsEnabled': 'bool', + 'loggingLevel': 'str', + 'dataTraceEnabled': 'bool', + 'throttlingBurstLimit': 'int', + 'throttlingRateLimit': 'float', + 'cachingEnabled': 'bool', + 'cacheTtlInSeconds': 'int', + 'cacheDataEncrypted': 'bool', + 'requireAuthorizationForCacheControl': 'bool', + 'unauthorizedCacheControlHeaderStrategy': 'str' } if key in type_mappings: @@ -261,7 +275,7 @@ class Stage(dict): if type_value == 'bool': return self._str2bool(val) elif type_value == 'int': - return int(val) + return int(val) elif type_value == 'float': return float(val) else: @@ -269,10 +283,8 @@ class Stage(dict): else: return str(val) - - - def _apply_operation_to_variables(self,op): - key = op['path'][op['path'].rindex("variables/")+10:] + def _apply_operation_to_variables(self, op): + key = op['path'][op['path'].rindex("variables/") + 10:] if op['op'] == 'remove': self['variables'].pop(key, None) elif op['op'] == 'replace': @@ -281,8 +293,8 @@ class Stage(dict): raise Exception('Patch operation "%s" not implemented' % op['op']) - class RestAPI(object): + def __init__(self, id, region_name, name, description): self.id = id self.region_name = region_name @@ -306,7 +318,8 @@ class RestAPI(object): def add_child(self, path, parent_id=None): child_id = create_id() - child = Resource(id=child_id, region_name=self.region_name, api_id=self.id, path_part=path, parent_id=parent_id) + child = Resource(id=child_id, region_name=self.region_name, + api_id=self.id, path_part=path, parent_id=parent_id) self.resources[child_id] = child return child @@ -326,25 +339,28 @@ class RestAPI(object): return status_code, {}, response def update_integration_mocks(self, stage_name): - stage_url = STAGE_URL.format(api_id=self.id.upper(), region_name=self.region_name, stage_name=stage_name) - responses.add_callback(responses.GET, stage_url, callback=self.resource_callback) + stage_url = STAGE_URL.format(api_id=self.id.upper(), + region_name=self.region_name, stage_name=stage_name) + responses.add_callback(responses.GET, stage_url, + callback=self.resource_callback) - def create_stage(self, name, deployment_id,variables=None,description='',cacheClusterEnabled=None,cacheClusterSize=None): + def create_stage(self, name, deployment_id, variables=None, description='', cacheClusterEnabled=None, cacheClusterSize=None): if variables is None: variables = {} - stage = Stage(name=name, deployment_id=deployment_id,variables=variables, - description=description,cacheClusterSize=cacheClusterSize,cacheClusterEnabled=cacheClusterEnabled) + stage = Stage(name=name, deployment_id=deployment_id, variables=variables, + description=description, cacheClusterSize=cacheClusterSize, cacheClusterEnabled=cacheClusterEnabled) self.stages[name] = stage self.update_integration_mocks(name) return stage - def create_deployment(self, name, description="",stage_variables=None): + def create_deployment(self, name, description="", stage_variables=None): if stage_variables is None: stage_variables = {} deployment_id = create_id() deployment = Deployment(deployment_id, name, description) self.deployments[deployment_id] = deployment - self.stages[name] = Stage(name=name, deployment_id=deployment_id,variables=stage_variables) + self.stages[name] = Stage( + name=name, deployment_id=deployment_id, variables=stage_variables) self.update_integration_mocks(name) return deployment @@ -353,7 +369,7 @@ class RestAPI(object): return self.deployments[deployment_id] def get_stages(self): - return list(self.stages.values()) + return list(self.stages.values()) def get_deployments(self): return list(self.deployments.values()) @@ -363,6 +379,7 @@ class RestAPI(object): class APIGatewayBackend(BaseBackend): + def __init__(self, region_name): super(APIGatewayBackend, self).__init__() self.apis = {} @@ -429,19 +446,17 @@ class APIGatewayBackend(BaseBackend): else: return stage - def get_stages(self, function_id): api = self.get_rest_api(function_id) return api.get_stages() - def create_stage(self, function_id, stage_name, deploymentId, - variables=None,description='',cacheClusterEnabled=None,cacheClusterSize=None): + variables=None, description='', cacheClusterEnabled=None, cacheClusterSize=None): if variables is None: variables = {} api = self.get_rest_api(function_id) - api.create_stage(stage_name,deploymentId,variables=variables, - description=description,cacheClusterEnabled=cacheClusterEnabled,cacheClusterSize=cacheClusterSize) + api.create_stage(stage_name, deploymentId, variables=variables, + description=description, cacheClusterEnabled=cacheClusterEnabled, cacheClusterSize=cacheClusterSize) return api.stages.get(stage_name) def update_stage(self, function_id, stage_name, patch_operations): @@ -467,10 +482,10 @@ class APIGatewayBackend(BaseBackend): return method_response def create_integration(self, function_id, resource_id, method_type, integration_type, uri, - request_templates=None): + request_templates=None): resource = self.get_resource(function_id, resource_id) integration = resource.add_integration(method_type, integration_type, uri, - request_templates=request_templates) + request_templates=request_templates) return integration def get_integration(self, function_id, resource_id, method_type): @@ -482,25 +497,31 @@ class APIGatewayBackend(BaseBackend): return resource.delete_integration(method_type) def create_integration_response(self, function_id, resource_id, method_type, status_code, selection_pattern): - integration = self.get_integration(function_id, resource_id, method_type) - integration_response = integration.create_integration_response(status_code, selection_pattern) + integration = self.get_integration( + function_id, resource_id, method_type) + integration_response = integration.create_integration_response( + status_code, selection_pattern) return integration_response def get_integration_response(self, function_id, resource_id, method_type, status_code): - integration = self.get_integration(function_id, resource_id, method_type) - integration_response = integration.get_integration_response(status_code) + integration = self.get_integration( + function_id, resource_id, method_type) + integration_response = integration.get_integration_response( + status_code) return integration_response def delete_integration_response(self, function_id, resource_id, method_type, status_code): - integration = self.get_integration(function_id, resource_id, method_type) - integration_response = integration.delete_integration_response(status_code) + integration = self.get_integration( + function_id, resource_id, method_type) + integration_response = integration.delete_integration_response( + status_code) return integration_response - def create_deployment(self, function_id, name, description ="", stage_variables=None): + def create_deployment(self, function_id, name, description="", stage_variables=None): if stage_variables is None: stage_variables = {} api = self.get_rest_api(function_id) - deployment = api.create_deployment(name, description,stage_variables) + deployment = api.create_deployment(name, description, stage_variables) return deployment def get_deployment(self, function_id, deployment_id): @@ -515,6 +536,8 @@ class APIGatewayBackend(BaseBackend): api = self.get_rest_api(function_id) return api.delete_deployment(deployment_id) + apigateway_backends = {} -for region_name in ['us-east-1', 'us-west-2', 'eu-west-1', 'ap-northeast-1']: # Not available in boto yet +# Not available in boto yet +for region_name in ['us-east-1', 'us-west-2', 'eu-west-1', 'ap-northeast-1']: apigateway_backends[region_name] = APIGatewayBackend(region_name) diff --git a/moto/apigateway/responses.py b/moto/apigateway/responses.py index a7bb28c6e..443fd4060 100644 --- a/moto/apigateway/responses.py +++ b/moto/apigateway/responses.py @@ -12,7 +12,6 @@ class APIGatewayResponse(BaseResponse): def _get_param(self, key): return json.loads(self.body).get(key) - def _get_param_with_default_value(self, key, default): jsonbody = json.loads(self.body) @@ -69,7 +68,8 @@ class APIGatewayResponse(BaseResponse): resource = self.backend.get_resource(function_id, resource_id) elif self.method == 'POST': path_part = self._get_param("pathPart") - resource = self.backend.create_resource(function_id, resource_id, path_part) + resource = self.backend.create_resource( + function_id, resource_id, path_part) elif self.method == 'DELETE': resource = self.backend.delete_resource(function_id, resource_id) return 200, {}, json.dumps(resource.to_dict()) @@ -82,11 +82,13 @@ class APIGatewayResponse(BaseResponse): method_type = url_path_parts[6] if self.method == 'GET': - method = self.backend.get_method(function_id, resource_id, method_type) + method = self.backend.get_method( + function_id, resource_id, method_type) return 200, {}, json.dumps(method) elif self.method == 'PUT': authorization_type = self._get_param("authorizationType") - method = self.backend.create_method(function_id, resource_id, method_type, authorization_type) + method = self.backend.create_method( + function_id, resource_id, method_type, authorization_type) return 200, {}, json.dumps(method) def resource_method_responses(self, request, full_url, headers): @@ -98,11 +100,14 @@ class APIGatewayResponse(BaseResponse): response_code = url_path_parts[8] if self.method == 'GET': - method_response = self.backend.get_method_response(function_id, resource_id, method_type, response_code) + method_response = self.backend.get_method_response( + function_id, resource_id, method_type, response_code) elif self.method == 'PUT': - method_response = self.backend.create_method_response(function_id, resource_id, method_type, response_code) + method_response = self.backend.create_method_response( + function_id, resource_id, method_type, response_code) elif self.method == 'DELETE': - method_response = self.backend.delete_method_response(function_id, resource_id, method_type, response_code) + method_response = self.backend.delete_method_response( + function_id, resource_id, method_type, response_code) return 200, {}, json.dumps(method_response) def restapis_stages(self, request, full_url, headers): @@ -113,10 +118,13 @@ class APIGatewayResponse(BaseResponse): if self.method == 'POST': stage_name = self._get_param("stageName") deployment_id = self._get_param("deploymentId") - stage_variables = self._get_param_with_default_value('variables',{}) - description = self._get_param_with_default_value('description','') - cacheClusterEnabled = self._get_param_with_default_value('cacheClusterEnabled',False) - cacheClusterSize = self._get_param_with_default_value('cacheClusterSize',None) + stage_variables = self._get_param_with_default_value( + 'variables', {}) + description = self._get_param_with_default_value('description', '') + cacheClusterEnabled = self._get_param_with_default_value( + 'cacheClusterEnabled', False) + cacheClusterSize = self._get_param_with_default_value( + 'cacheClusterSize', None) stage_response = self.backend.create_stage(function_id, stage_name, deployment_id, variables=stage_variables, description=description, @@ -135,12 +143,14 @@ class APIGatewayResponse(BaseResponse): if self.method == 'GET': try: - stage_response = self.backend.get_stage(function_id, stage_name) + stage_response = self.backend.get_stage( + function_id, stage_name) except StageNotFoundException as error: - return error.code, {},'{{"message":"{0}","code":"{1}"}}'.format(error.message,error.error_type) + return error.code, {}, '{{"message":"{0}","code":"{1}"}}'.format(error.message, error.error_type) elif self.method == 'PATCH': patch_operations = self._get_param('patchOperations') - stage_response = self.backend.update_stage(function_id, stage_name, patch_operations) + stage_response = self.backend.update_stage( + function_id, stage_name, patch_operations) return 200, {}, json.dumps(stage_response) def integrations(self, request, full_url, headers): @@ -151,14 +161,17 @@ class APIGatewayResponse(BaseResponse): method_type = url_path_parts[6] if self.method == 'GET': - integration_response = self.backend.get_integration(function_id, resource_id, method_type) + integration_response = self.backend.get_integration( + function_id, resource_id, method_type) elif self.method == 'PUT': integration_type = self._get_param('type') uri = self._get_param('uri') request_templates = self._get_param('requestTemplates') - integration_response = self.backend.create_integration(function_id, resource_id, method_type, integration_type, uri, request_templates=request_templates) + integration_response = self.backend.create_integration( + function_id, resource_id, method_type, integration_type, uri, request_templates=request_templates) elif self.method == 'DELETE': - integration_response = self.backend.delete_integration(function_id, resource_id, method_type) + integration_response = self.backend.delete_integration( + function_id, resource_id, method_type) return 200, {}, json.dumps(integration_response) def integration_responses(self, request, full_url, headers): @@ -193,9 +206,11 @@ class APIGatewayResponse(BaseResponse): return 200, {}, json.dumps({"item": deployments}) elif self.method == 'POST': name = self._get_param("stageName") - description = self._get_param_with_default_value("description","") - stage_variables = self._get_param_with_default_value('variables',{}) - deployment = self.backend.create_deployment(function_id, name, description,stage_variables) + description = self._get_param_with_default_value("description", "") + stage_variables = self._get_param_with_default_value( + 'variables', {}) + deployment = self.backend.create_deployment( + function_id, name, description, stage_variables) return 200, {}, json.dumps(deployment) def individual_deployment(self, request, full_url, headers): @@ -205,7 +220,9 @@ class APIGatewayResponse(BaseResponse): deployment_id = url_path_parts[4] if self.method == 'GET': - deployment = self.backend.get_deployment(function_id, deployment_id) + deployment = self.backend.get_deployment( + function_id, deployment_id) elif self.method == 'DELETE': - deployment = self.backend.delete_deployment(function_id, deployment_id) + deployment = self.backend.delete_deployment( + function_id, deployment_id) return 200, {}, json.dumps(deployment) diff --git a/moto/autoscaling/__init__.py b/moto/autoscaling/__init__.py index 9b5842788..b2b8b0bae 100644 --- a/moto/autoscaling/__init__.py +++ b/moto/autoscaling/__init__.py @@ -1,6 +1,6 @@ from __future__ import unicode_literals from .models import autoscaling_backends -from ..core.models import MockAWS, base_decorator, HttprettyMockAWS, deprecated_base_decorator +from ..core.models import base_decorator, deprecated_base_decorator autoscaling_backend = autoscaling_backends['us-east-1'] mock_autoscaling = base_decorator(autoscaling_backends) diff --git a/moto/autoscaling/models.py b/moto/autoscaling/models.py index 53a0f62df..18dfcb5fe 100644 --- a/moto/autoscaling/models.py +++ b/moto/autoscaling/models.py @@ -10,12 +10,14 @@ DEFAULT_COOLDOWN = 300 class InstanceState(object): + def __init__(self, instance, lifecycle_state="InService"): self.instance = instance self.lifecycle_state = lifecycle_state class FakeScalingPolicy(object): + def __init__(self, name, policy_type, adjustment_type, as_name, scaling_adjustment, cooldown, autoscaling_backend): self.name = name @@ -31,14 +33,18 @@ class FakeScalingPolicy(object): def execute(self): if self.adjustment_type == 'ExactCapacity': - self.autoscaling_backend.set_desired_capacity(self.as_name, self.scaling_adjustment) + self.autoscaling_backend.set_desired_capacity( + self.as_name, self.scaling_adjustment) elif self.adjustment_type == 'ChangeInCapacity': - self.autoscaling_backend.change_capacity(self.as_name, self.scaling_adjustment) + self.autoscaling_backend.change_capacity( + self.as_name, self.scaling_adjustment) elif self.adjustment_type == 'PercentChangeInCapacity': - self.autoscaling_backend.change_capacity_percent(self.as_name, self.scaling_adjustment) + self.autoscaling_backend.change_capacity_percent( + self.as_name, self.scaling_adjustment) class FakeLaunchConfiguration(object): + def __init__(self, name, image_id, key_name, ramdisk_id, kernel_id, security_groups, user_data, instance_type, instance_monitoring, instance_profile_name, spot_price, ebs_optimized, associate_public_ip_address, block_device_mapping_dict): @@ -77,14 +83,16 @@ class FakeLaunchConfiguration(object): instance_profile_name=instance_profile_name, spot_price=properties.get("SpotPrice"), ebs_optimized=properties.get("EbsOptimized"), - associate_public_ip_address=properties.get("AssociatePublicIpAddress"), + associate_public_ip_address=properties.get( + "AssociatePublicIpAddress"), block_device_mappings=properties.get("BlockDeviceMapping.member") ) return config @classmethod def update_from_cloudformation_json(cls, original_resource, new_resource_name, cloudformation_json, region_name): - cls.delete_from_cloudformation_json(original_resource.name, cloudformation_json, region_name) + cls.delete_from_cloudformation_json( + original_resource.name, cloudformation_json, region_name) return cls.create_from_cloudformation_json(new_resource_name, cloudformation_json, region_name) @classmethod @@ -126,7 +134,8 @@ class FakeLaunchConfiguration(object): else: block_type.volume_type = mapping.get('ebs._volume_type') block_type.snapshot_id = mapping.get('ebs._snapshot_id') - block_type.delete_on_termination = mapping.get('ebs._delete_on_termination') + block_type.delete_on_termination = mapping.get( + 'ebs._delete_on_termination') block_type.size = mapping.get('ebs._volume_size') block_type.iops = mapping.get('ebs._iops') block_device_map[mount_point] = block_type @@ -134,6 +143,7 @@ class FakeLaunchConfiguration(object): class FakeAutoScalingGroup(object): + def __init__(self, name, availability_zones, desired_capacity, max_size, min_size, launch_config_name, vpc_zone_identifier, default_cooldown, health_check_period, health_check_type, @@ -145,7 +155,8 @@ class FakeAutoScalingGroup(object): self.max_size = max_size self.min_size = min_size - self.launch_config = self.autoscaling_backend.launch_configurations[launch_config_name] + self.launch_config = self.autoscaling_backend.launch_configurations[ + launch_config_name] self.launch_config_name = launch_config_name self.vpc_zone_identifier = vpc_zone_identifier @@ -175,7 +186,8 @@ class FakeAutoScalingGroup(object): max_size=properties.get("MaxSize"), min_size=properties.get("MinSize"), launch_config_name=launch_config_name, - vpc_zone_identifier=(','.join(properties.get("VPCZoneIdentifier", [])) or None), + vpc_zone_identifier=( + ','.join(properties.get("VPCZoneIdentifier", [])) or None), default_cooldown=properties.get("Cooldown"), health_check_period=properties.get("HealthCheckGracePeriod"), health_check_type=properties.get("HealthCheckType"), @@ -188,7 +200,8 @@ class FakeAutoScalingGroup(object): @classmethod def update_from_cloudformation_json(cls, original_resource, new_resource_name, cloudformation_json, region_name): - cls.delete_from_cloudformation_json(original_resource.name, cloudformation_json, region_name) + cls.delete_from_cloudformation_json( + original_resource.name, cloudformation_json, region_name) return cls.create_from_cloudformation_json(new_resource_name, cloudformation_json, region_name) @classmethod @@ -219,7 +232,8 @@ class FakeAutoScalingGroup(object): self.min_size = min_size if launch_config_name: - self.launch_config = self.autoscaling_backend.launch_configurations[launch_config_name] + self.launch_config = self.autoscaling_backend.launch_configurations[ + launch_config_name] self.launch_config_name = launch_config_name if vpc_zone_identifier is not None: self.vpc_zone_identifier = vpc_zone_identifier @@ -244,7 +258,8 @@ class FakeAutoScalingGroup(object): if self.desired_capacity > curr_instance_count: # Need more instances - count_needed = int(self.desired_capacity) - int(curr_instance_count) + count_needed = int(self.desired_capacity) - \ + int(curr_instance_count) reservation = self.autoscaling_backend.ec2_backend.add_instances( self.launch_config.image_id, count_needed, @@ -259,8 +274,10 @@ class FakeAutoScalingGroup(object): # Need to remove some instances count_to_remove = curr_instance_count - self.desired_capacity instances_to_remove = self.instance_states[:count_to_remove] - instance_ids_to_remove = [instance.instance.id for instance in instances_to_remove] - self.autoscaling_backend.ec2_backend.terminate_instances(instance_ids_to_remove) + instance_ids_to_remove = [ + instance.instance.id for instance in instances_to_remove] + self.autoscaling_backend.ec2_backend.terminate_instances( + instance_ids_to_remove) self.instance_states = self.instance_states[count_to_remove:] @@ -419,8 +436,8 @@ class AutoScalingBackend(BaseBackend): def describe_policies(self, autoscaling_group_name=None, policy_names=None, policy_types=None): return [policy for policy in self.policies.values() if (not autoscaling_group_name or policy.as_name == autoscaling_group_name) and - (not policy_names or policy.name in policy_names) and - (not policy_types or policy.policy_type in policy_types)] + (not policy_names or policy.name in policy_names) and + (not policy_types or policy.policy_type in policy_types)] def delete_policy(self, group_name): self.policies.pop(group_name, None) @@ -431,18 +448,22 @@ class AutoScalingBackend(BaseBackend): def update_attached_elbs(self, group_name): group = self.autoscaling_groups[group_name] - group_instance_ids = set(state.instance.id for state in group.instance_states) + group_instance_ids = set( + state.instance.id for state in group.instance_states) try: - elbs = self.elb_backend.describe_load_balancers(names=group.load_balancers) + elbs = self.elb_backend.describe_load_balancers( + names=group.load_balancers) except LoadBalancerNotFoundError: # ELBs can be deleted before their autoscaling group return for elb in elbs: elb_instace_ids = set(elb.instance_ids) - self.elb_backend.register_instances(elb.name, group_instance_ids - elb_instace_ids) - self.elb_backend.deregister_instances(elb.name, elb_instace_ids - group_instance_ids) + self.elb_backend.register_instances( + elb.name, group_instance_ids - elb_instace_ids) + self.elb_backend.deregister_instances( + elb.name, elb_instace_ids - group_instance_ids) def create_or_update_tags(self, tags): @@ -452,19 +473,21 @@ class AutoScalingBackend(BaseBackend): old_tags = group.tags new_tags = [] - #if key was in old_tags, update old tag + # if key was in old_tags, update old tag for old_tag in old_tags: if old_tag["key"] == tag["key"]: new_tags.append(tag) else: new_tags.append(old_tag) - #if key was never in old_tag's add it (create tag) + # if key was never in old_tag's add it (create tag) if not any(new_tag['key'] == tag['key'] for new_tag in new_tags): new_tags.append(tag) group.tags = new_tags + autoscaling_backends = {} for region, ec2_backend in ec2_backends.items(): - autoscaling_backends[region] = AutoScalingBackend(ec2_backend, elb_backends[region]) + autoscaling_backends[region] = AutoScalingBackend( + ec2_backend, elb_backends[region]) diff --git a/moto/autoscaling/responses.py b/moto/autoscaling/responses.py index 976199131..b1d160320 100644 --- a/moto/autoscaling/responses.py +++ b/moto/autoscaling/responses.py @@ -11,7 +11,8 @@ class AutoScalingResponse(BaseResponse): return autoscaling_backends[self.region] def create_launch_configuration(self): - instance_monitoring_string = self._get_param('InstanceMonitoring.Enabled') + instance_monitoring_string = self._get_param( + 'InstanceMonitoring.Enabled') if instance_monitoring_string == 'true': instance_monitoring = True else: @@ -29,28 +30,35 @@ class AutoScalingResponse(BaseResponse): instance_profile_name=self._get_param('IamInstanceProfile'), spot_price=self._get_param('SpotPrice'), ebs_optimized=self._get_param('EbsOptimized'), - associate_public_ip_address=self._get_param("AssociatePublicIpAddress"), - block_device_mappings=self._get_list_prefix('BlockDeviceMappings.member') + associate_public_ip_address=self._get_param( + "AssociatePublicIpAddress"), + block_device_mappings=self._get_list_prefix( + 'BlockDeviceMappings.member') ) template = self.response_template(CREATE_LAUNCH_CONFIGURATION_TEMPLATE) return template.render() def describe_launch_configurations(self): names = self._get_multi_param('LaunchConfigurationNames.member') - launch_configurations = self.autoscaling_backend.describe_launch_configurations(names) - template = self.response_template(DESCRIBE_LAUNCH_CONFIGURATIONS_TEMPLATE) + launch_configurations = self.autoscaling_backend.describe_launch_configurations( + names) + template = self.response_template( + DESCRIBE_LAUNCH_CONFIGURATIONS_TEMPLATE) return template.render(launch_configurations=launch_configurations) def delete_launch_configuration(self): - launch_configurations_name = self.querystring.get('LaunchConfigurationName')[0] - self.autoscaling_backend.delete_launch_configuration(launch_configurations_name) + launch_configurations_name = self.querystring.get( + 'LaunchConfigurationName')[0] + self.autoscaling_backend.delete_launch_configuration( + launch_configurations_name) template = self.response_template(DELETE_LAUNCH_CONFIGURATION_TEMPLATE) return template.render() def create_auto_scaling_group(self): self.autoscaling_backend.create_autoscaling_group( name=self._get_param('AutoScalingGroupName'), - availability_zones=self._get_multi_param('AvailabilityZones.member'), + availability_zones=self._get_multi_param( + 'AvailabilityZones.member'), desired_capacity=self._get_int_param('DesiredCapacity'), max_size=self._get_int_param('MaxSize'), min_size=self._get_int_param('MinSize'), @@ -61,7 +69,8 @@ class AutoScalingResponse(BaseResponse): health_check_type=self._get_param('HealthCheckType'), load_balancers=self._get_multi_param('LoadBalancerNames.member'), placement_group=self._get_param('PlacementGroup'), - termination_policies=self._get_multi_param('TerminationPolicies.member'), + termination_policies=self._get_multi_param( + 'TerminationPolicies.member'), tags=self._get_list_prefix('Tags.member'), ) template = self.response_template(CREATE_AUTOSCALING_GROUP_TEMPLATE) @@ -76,7 +85,8 @@ class AutoScalingResponse(BaseResponse): def update_auto_scaling_group(self): self.autoscaling_backend.update_autoscaling_group( name=self._get_param('AutoScalingGroupName'), - availability_zones=self._get_multi_param('AvailabilityZones.member'), + availability_zones=self._get_multi_param( + 'AvailabilityZones.member'), desired_capacity=self._get_int_param('DesiredCapacity'), max_size=self._get_int_param('MaxSize'), min_size=self._get_int_param('MinSize'), @@ -87,7 +97,8 @@ class AutoScalingResponse(BaseResponse): health_check_type=self._get_param('HealthCheckType'), load_balancers=self._get_multi_param('LoadBalancerNames.member'), placement_group=self._get_param('PlacementGroup'), - termination_policies=self._get_multi_param('TerminationPolicies.member'), + termination_policies=self._get_multi_param( + 'TerminationPolicies.member'), ) template = self.response_template(UPDATE_AUTOSCALING_GROUP_TEMPLATE) return template.render() @@ -101,7 +112,8 @@ class AutoScalingResponse(BaseResponse): def set_desired_capacity(self): group_name = self._get_param('AutoScalingGroupName') desired_capacity = self._get_int_param('DesiredCapacity') - self.autoscaling_backend.set_desired_capacity(group_name, desired_capacity) + self.autoscaling_backend.set_desired_capacity( + group_name, desired_capacity) template = self.response_template(SET_DESIRED_CAPACITY_TEMPLATE) return template.render() @@ -114,7 +126,8 @@ class AutoScalingResponse(BaseResponse): def describe_auto_scaling_instances(self): instance_states = self.autoscaling_backend.describe_autoscaling_instances() - template = self.response_template(DESCRIBE_AUTOSCALING_INSTANCES_TEMPLATE) + template = self.response_template( + DESCRIBE_AUTOSCALING_INSTANCES_TEMPLATE) return template.render(instance_states=instance_states) def put_scaling_policy(self): diff --git a/moto/awslambda/__init__.py b/moto/awslambda/__init__.py index 46bc90fbd..f0d694654 100644 --- a/moto/awslambda/__init__.py +++ b/moto/awslambda/__init__.py @@ -1,6 +1,6 @@ from __future__ import unicode_literals from .models import lambda_backends -from ..core.models import MockAWS, base_decorator, HttprettyMockAWS, deprecated_base_decorator +from ..core.models import base_decorator, deprecated_base_decorator lambda_backend = lambda_backends['us-east-1'] mock_lambda = base_decorator(lambda_backends) diff --git a/moto/awslambda/models.py b/moto/awslambda/models.py index 1fc139eb7..46d227300 100644 --- a/moto/awslambda/models.py +++ b/moto/awslambda/models.py @@ -32,19 +32,22 @@ class LambdaFunction(object): # optional self.description = spec.get('Description', '') self.memory_size = spec.get('MemorySize', 128) - self.publish = spec.get('Publish', False) # this is ignored currently + self.publish = spec.get('Publish', False) # this is ignored currently self.timeout = spec.get('Timeout', 3) # this isn't finished yet. it needs to find out the VpcId value - self._vpc_config = spec.get('VpcConfig', {'SubnetIds': [], 'SecurityGroupIds': []}) + self._vpc_config = spec.get( + 'VpcConfig', {'SubnetIds': [], 'SecurityGroupIds': []}) # auto-generated self.version = '$LATEST' self.last_modified = datetime.datetime.utcnow().strftime('%Y-%m-%d %H:%M:%S') if 'ZipFile' in self.code: - # more hackery to handle unicode/bytes/str in python3 and python2 - argh! + # more hackery to handle unicode/bytes/str in python3 and python2 - + # argh! try: - to_unzip_code = base64.b64decode(bytes(self.code['ZipFile'], 'utf-8')) + to_unzip_code = base64.b64decode( + bytes(self.code['ZipFile'], 'utf-8')) except Exception: to_unzip_code = base64.b64decode(self.code['ZipFile']) @@ -58,7 +61,8 @@ class LambdaFunction(object): # validate s3 bucket try: # FIXME: does not validate bucket region - key = s3_backend.get_key(self.code['S3Bucket'], self.code['S3Key']) + key = s3_backend.get_key( + self.code['S3Bucket'], self.code['S3Key']) except MissingBucket: raise ValueError( "InvalidParameterValueException", @@ -72,7 +76,8 @@ class LambdaFunction(object): else: self.code_size = key.size self.code_sha_256 = hashlib.sha256(key.value).hexdigest() - self.function_arn = 'arn:aws:lambda:123456789012:function:{0}'.format(self.function_name) + self.function_arn = 'arn:aws:lambda:123456789012:function:{0}'.format( + self.function_name) @property def vpc_config(self): @@ -130,7 +135,6 @@ class LambdaFunction(object): self.convert(self.code), self.convert('print(json.dumps(lambda_handler(%s, %s)))' % (self.is_json(self.convert(event)), context))]) - #print("moto_lambda_debug: ", mycode) except Exception as ex: print("Exception %s", ex) @@ -182,7 +186,8 @@ class LambdaFunction(object): 'Runtime': properties['Runtime'], } optional_properties = 'Description MemorySize Publish Timeout VpcConfig'.split() - # NOTE: Not doing `properties.get(k, DEFAULT)` to avoid duplicating the default logic + # NOTE: Not doing `properties.get(k, DEFAULT)` to avoid duplicating the + # default logic for prop in optional_properties: if prop in properties: spec[prop] = properties[prop] @@ -219,6 +224,6 @@ lambda_backends = {} for region in boto.awslambda.regions(): lambda_backends[region.name] = LambdaBackend() -# Handle us forgotten regions, unless Lambda truly only runs out of US and EU????? +# Handle us forgotten regions, unless Lambda truly only runs out of US and for region in ['ap-southeast-2']: lambda_backends[region] = LambdaBackend() diff --git a/moto/awslambda/responses.py b/moto/awslambda/responses.py index 3fc756efa..b7664c314 100644 --- a/moto/awslambda/responses.py +++ b/moto/awslambda/responses.py @@ -2,10 +2,8 @@ from __future__ import unicode_literals import json import re -import uuid from moto.core.responses import BaseResponse -from .models import lambda_backends class LambdaResponse(BaseResponse): diff --git a/moto/cloudformation/__init__.py b/moto/cloudformation/__init__.py index 47e840ec6..b73e3ab6c 100644 --- a/moto/cloudformation/__init__.py +++ b/moto/cloudformation/__init__.py @@ -1,7 +1,8 @@ from __future__ import unicode_literals from .models import cloudformation_backends -from ..core.models import MockAWS, base_decorator, HttprettyMockAWS, deprecated_base_decorator +from ..core.models import base_decorator, deprecated_base_decorator cloudformation_backend = cloudformation_backends['us-east-1'] mock_cloudformation = base_decorator(cloudformation_backends) -mock_cloudformation_deprecated = deprecated_base_decorator(cloudformation_backends) +mock_cloudformation_deprecated = deprecated_base_decorator( + cloudformation_backends) diff --git a/moto/cloudformation/exceptions.py b/moto/cloudformation/exceptions.py index ed2856826..56a95382a 100644 --- a/moto/cloudformation/exceptions.py +++ b/moto/cloudformation/exceptions.py @@ -9,9 +9,10 @@ class UnformattedGetAttTemplateException(Exception): class ValidationError(BadRequest): + def __init__(self, name_or_id, message=None): if message is None: - message="Stack with id {0} does not exist".format(name_or_id) + message = "Stack with id {0} does not exist".format(name_or_id) template = Template(ERROR_RESPONSE) super(ValidationError, self).__init__() @@ -22,6 +23,7 @@ class ValidationError(BadRequest): class MissingParameterError(BadRequest): + def __init__(self, parameter_name): template = Template(ERROR_RESPONSE) super(MissingParameterError, self).__init__() diff --git a/moto/cloudformation/models.py b/moto/cloudformation/models.py index 1f091251b..0a3dcc62d 100644 --- a/moto/cloudformation/models.py +++ b/moto/cloudformation/models.py @@ -11,6 +11,7 @@ from .exceptions import ValidationError class FakeStack(object): + def __init__(self, stack_id, name, template, parameters, region_name, notification_arns=None, tags=None, role_arn=None): self.stack_id = stack_id self.name = name @@ -22,7 +23,8 @@ class FakeStack(object): self.role_arn = role_arn self.tags = tags if tags else {} self.events = [] - self._add_stack_event("CREATE_IN_PROGRESS", resource_status_reason="User Initiated") + self._add_stack_event("CREATE_IN_PROGRESS", + resource_status_reason="User Initiated") self.description = self.template_dict.get('Description') self.resource_map = self._create_resource_map() @@ -31,7 +33,8 @@ class FakeStack(object): self.status = 'CREATE_COMPLETE' def _create_resource_map(self): - resource_map = ResourceMap(self.stack_id, self.name, self.parameters, self.tags, self.region_name, self.template_dict) + resource_map = ResourceMap( + self.stack_id, self.name, self.parameters, self.tags, self.region_name, self.template_dict) resource_map.create() return resource_map @@ -79,7 +82,8 @@ class FakeStack(object): return self.output_map.values() def update(self, template, role_arn=None): - self._add_stack_event("UPDATE_IN_PROGRESS", resource_status_reason="User Initiated") + self._add_stack_event("UPDATE_IN_PROGRESS", + resource_status_reason="User Initiated") self.template = template self.resource_map.update(json.loads(template)) self.output_map = self._create_output_map() @@ -88,13 +92,15 @@ class FakeStack(object): self.role_arn = role_arn def delete(self): - self._add_stack_event("DELETE_IN_PROGRESS", resource_status_reason="User Initiated") + self._add_stack_event("DELETE_IN_PROGRESS", + resource_status_reason="User Initiated") self.resource_map.delete() self._add_stack_event("DELETE_COMPLETE") self.status = "DELETE_COMPLETE" class FakeEvent(object): + def __init__(self, stack_id, stack_name, logical_resource_id, physical_resource_id, resource_type, resource_status, resource_status_reason=None, resource_properties=None): self.stack_id = stack_id self.stack_name = stack_name diff --git a/moto/cloudformation/parsing.py b/moto/cloudformation/parsing.py index 521658cee..f2ba08522 100644 --- a/moto/cloudformation/parsing.py +++ b/moto/cloudformation/parsing.py @@ -94,6 +94,7 @@ logger = logging.getLogger("moto") class LazyDict(dict): + def __getitem__(self, key): val = dict.__getitem__(self, key) if callable(val): @@ -133,7 +134,8 @@ def clean_json(resource_json, resources_map): try: return resource.get_cfn_attribute(resource_json['Fn::GetAtt'][1]) except NotImplementedError as n: - logger.warning(n.message.format(resource_json['Fn::GetAtt'][0])) + logger.warning(n.message.format( + resource_json['Fn::GetAtt'][0])) except UnformattedGetAttTemplateException: raise BotoServerError( UnformattedGetAttTemplateException.status_code, @@ -152,7 +154,8 @@ def clean_json(resource_json, resources_map): join_list = [] for val in resource_json['Fn::Join'][1]: cleaned_val = clean_json(val, resources_map) - join_list.append('{0}'.format(cleaned_val) if cleaned_val else '{0}'.format(val)) + join_list.append('{0}'.format(cleaned_val) + if cleaned_val else '{0}'.format(val)) return resource_json['Fn::Join'][0].join(join_list) cleaned_json = {} @@ -215,14 +218,16 @@ def parse_and_create_resource(logical_id, resource_json, resources_map, region_n if not resource_tuple: return None resource_class, resource_json, resource_name = resource_tuple - resource = resource_class.create_from_cloudformation_json(resource_name, resource_json, region_name) + resource = resource_class.create_from_cloudformation_json( + resource_name, resource_json, region_name) resource.type = resource_type resource.logical_resource_id = logical_id return resource def parse_and_update_resource(logical_id, resource_json, resources_map, region_name): - resource_class, new_resource_json, new_resource_name = parse_resource(logical_id, resource_json, resources_map) + resource_class, new_resource_json, new_resource_name = parse_resource( + logical_id, resource_json, resources_map) original_resource = resources_map[logical_id] new_resource = resource_class.update_from_cloudformation_json( original_resource=original_resource, @@ -236,8 +241,10 @@ def parse_and_update_resource(logical_id, resource_json, resources_map, region_n def parse_and_delete_resource(logical_id, resource_json, resources_map, region_name): - resource_class, resource_json, resource_name = parse_resource(logical_id, resource_json, resources_map) - resource_class.delete_from_cloudformation_json(resource_name, resource_json, region_name) + resource_class, resource_json, resource_name = parse_resource( + logical_id, resource_json, resources_map) + resource_class.delete_from_cloudformation_json( + resource_name, resource_json, region_name) def parse_condition(condition, resources_map, condition_map): @@ -312,7 +319,8 @@ class ResourceMap(collections.Mapping): resource_json = self._resource_json_map.get(resource_logical_id) if not resource_json: raise KeyError(resource_logical_id) - new_resource = parse_and_create_resource(resource_logical_id, resource_json, self, self._region_name) + new_resource = parse_and_create_resource( + resource_logical_id, resource_json, self, self._region_name) if new_resource is not None: self._parsed_resources[resource_logical_id] = new_resource return new_resource @@ -343,7 +351,8 @@ class ResourceMap(collections.Mapping): value = value.split(',') self.resolved_parameters[key] = value - # Check if there are any non-default params that were not passed input params + # Check if there are any non-default params that were not passed input + # params for key, value in self.resolved_parameters.items(): if value is None: raise MissingParameterError(key) @@ -355,10 +364,11 @@ class ResourceMap(collections.Mapping): lazy_condition_map = LazyDict() for condition_name, condition in conditions.items(): lazy_condition_map[condition_name] = functools.partial(parse_condition, - condition, self._parsed_resources, lazy_condition_map) + condition, self._parsed_resources, lazy_condition_map) for condition_name in lazy_condition_map: - self._parsed_resources[condition_name] = lazy_condition_map[condition_name] + self._parsed_resources[ + condition_name] = lazy_condition_map[condition_name] def create(self): self.load_mapping() @@ -368,11 +378,12 @@ class ResourceMap(collections.Mapping): # Since this is a lazy map, to create every object we just need to # iterate through self. self.tags.update({'aws:cloudformation:stack-name': self.get('AWS::StackName'), - 'aws:cloudformation:stack-id': self.get('AWS::StackId')}) + 'aws:cloudformation:stack-id': self.get('AWS::StackId')}) for resource in self.resources: if isinstance(self[resource], ec2_models.TaggedEC2Resource): self.tags['aws:cloudformation:logical-id'] = resource - ec2_models.ec2_backends[self._region_name].create_tags([self[resource].physical_resource_id], self.tags) + ec2_models.ec2_backends[self._region_name].create_tags( + [self[resource].physical_resource_id], self.tags) def update(self, template): self.load_mapping() @@ -386,24 +397,29 @@ class ResourceMap(collections.Mapping): new_resource_names = set(new_template) - set(old_template) for resource_name in new_resource_names: resource_json = new_template[resource_name] - new_resource = parse_and_create_resource(resource_name, resource_json, self, self._region_name) + new_resource = parse_and_create_resource( + resource_name, resource_json, self, self._region_name) self._parsed_resources[resource_name] = new_resource removed_resource_nams = set(old_template) - set(new_template) for resource_name in removed_resource_nams: resource_json = old_template[resource_name] - parse_and_delete_resource(resource_name, resource_json, self, self._region_name) + parse_and_delete_resource( + resource_name, resource_json, self, self._region_name) self._parsed_resources.pop(resource_name) - resources_to_update = set(name for name in new_template if name in old_template and new_template[name] != old_template[name]) + resources_to_update = set(name for name in new_template if name in old_template and new_template[ + name] != old_template[name]) tries = 1 while resources_to_update and tries < 5: for resource_name in resources_to_update.copy(): resource_json = new_template[resource_name] try: - changed_resource = parse_and_update_resource(resource_name, resource_json, self, self._region_name) + changed_resource = parse_and_update_resource( + resource_name, resource_json, self, self._region_name) except Exception as e: - # skip over dependency violations, and try again in a second pass + # skip over dependency violations, and try again in a + # second pass last_exception = e else: self._parsed_resources[resource_name] = changed_resource @@ -422,7 +438,8 @@ class ResourceMap(collections.Mapping): if parsed_resource and hasattr(parsed_resource, 'delete'): parsed_resource.delete(self._region_name) except Exception as e: - # skip over dependency violations, and try again in a second pass + # skip over dependency violations, and try again in a + # second pass last_exception = e else: remaining_resources.remove(resource) @@ -430,7 +447,9 @@ class ResourceMap(collections.Mapping): if tries == 5: raise last_exception + class OutputMap(collections.Mapping): + def __init__(self, resources, template): self._template = template self._output_json_map = template.get('Outputs') @@ -446,7 +465,8 @@ class OutputMap(collections.Mapping): return self._parsed_outputs[output_logical_id] else: output_json = self._output_json_map.get(output_logical_id) - new_output = parse_output(output_logical_id, output_json, self._resource_map) + new_output = parse_output( + output_logical_id, output_json, self._resource_map) self._parsed_outputs[output_logical_id] = new_output return new_output diff --git a/moto/cloudformation/responses.py b/moto/cloudformation/responses.py index 3b8f53895..272310d27 100644 --- a/moto/cloudformation/responses.py +++ b/moto/cloudformation/responses.py @@ -18,7 +18,8 @@ class CloudFormationResponse(BaseResponse): def _get_stack_from_s3_url(self, template_url): template_url_parts = urlparse(template_url) if "localhost" in template_url: - bucket_name, key_name = template_url_parts.path.lstrip("/").split("/") + bucket_name, key_name = template_url_parts.path.lstrip( + "/").split("/") else: bucket_name = template_url_parts.netloc.split(".")[0] key_name = template_url_parts.path.lstrip("/") @@ -32,7 +33,8 @@ class CloudFormationResponse(BaseResponse): template_url = self._get_param('TemplateURL') role_arn = self._get_param('RoleARN') parameters_list = self._get_list_prefix("Parameters.member") - tags = dict((item['key'], item['value']) for item in self._get_list_prefix("Tags.member")) + tags = dict((item['key'], item['value']) + for item in self._get_list_prefix("Tags.member")) # Hack dict-comprehension parameters = dict([ @@ -42,7 +44,8 @@ class CloudFormationResponse(BaseResponse): ]) if template_url: stack_body = self._get_stack_from_s3_url(template_url) - stack_notification_arns = self._get_multi_param('NotificationARNs.member') + stack_notification_arns = self._get_multi_param( + 'NotificationARNs.member') stack = self.cloudformation_backend.create_stack( name=stack_name, @@ -86,7 +89,8 @@ class CloudFormationResponse(BaseResponse): else: raise ValidationError(logical_resource_id) - template = self.response_template(DESCRIBE_STACK_RESOURCE_RESPONSE_TEMPLATE) + template = self.response_template( + DESCRIBE_STACK_RESOURCE_RESPONSE_TEMPLATE) return template.render(stack=stack, resource=resource) def describe_stack_resources(self): @@ -110,7 +114,8 @@ class CloudFormationResponse(BaseResponse): def list_stack_resources(self): stack_name_or_id = self._get_param('StackName') - resources = self.cloudformation_backend.list_stack_resources(stack_name_or_id) + resources = self.cloudformation_backend.list_stack_resources( + stack_name_or_id) template = self.response_template(LIST_STACKS_RESOURCES_RESPONSE) return template.render(resources=resources) @@ -138,13 +143,15 @@ class CloudFormationResponse(BaseResponse): stack_name = self._get_param('StackName') role_arn = self._get_param('RoleARN') if self._get_param('UsePreviousTemplate') == "true": - stack_body = self.cloudformation_backend.get_stack(stack_name).template + stack_body = self.cloudformation_backend.get_stack( + stack_name).template else: stack_body = self._get_param('TemplateBody') stack = self.cloudformation_backend.get_stack(stack_name) if stack.status == 'ROLLBACK_COMPLETE': - raise ValidationError(stack.stack_id, message="Stack:{0} is in ROLLBACK_COMPLETE state and can not be updated.".format(stack.stack_id)) + raise ValidationError( + stack.stack_id, message="Stack:{0} is in ROLLBACK_COMPLETE state and can not be updated.".format(stack.stack_id)) stack = self.cloudformation_backend.update_stack( name=stack_name, diff --git a/moto/cloudwatch/__init__.py b/moto/cloudwatch/__init__.py index 17d1c0c50..861fb703a 100644 --- a/moto/cloudwatch/__init__.py +++ b/moto/cloudwatch/__init__.py @@ -1,5 +1,5 @@ from .models import cloudwatch_backends -from ..core.models import MockAWS, base_decorator, HttprettyMockAWS, deprecated_base_decorator +from ..core.models import base_decorator, deprecated_base_decorator cloudwatch_backend = cloudwatch_backends['us-east-1'] mock_cloudwatch = base_decorator(cloudwatch_backends) diff --git a/moto/cloudwatch/models.py b/moto/cloudwatch/models.py index 320bc476f..7257286ba 100644 --- a/moto/cloudwatch/models.py +++ b/moto/cloudwatch/models.py @@ -4,12 +4,14 @@ import datetime class Dimension(object): + def __init__(self, name, value): self.name = name self.value = value class FakeAlarm(object): + def __init__(self, name, namespace, metric_name, comparison_operator, evaluation_periods, period, threshold, statistic, description, dimensions, alarm_actions, ok_actions, insufficient_data_actions, unit): @@ -22,7 +24,8 @@ class FakeAlarm(object): self.threshold = threshold self.statistic = statistic self.description = description - self.dimensions = [Dimension(dimension['name'], dimension['value']) for dimension in dimensions] + self.dimensions = [Dimension(dimension['name'], dimension[ + 'value']) for dimension in dimensions] self.alarm_actions = alarm_actions self.ok_actions = ok_actions self.insufficient_data_actions = insufficient_data_actions @@ -32,11 +35,13 @@ class FakeAlarm(object): class MetricDatum(object): + def __init__(self, namespace, name, value, dimensions): self.namespace = namespace self.name = name self.value = value - self.dimensions = [Dimension(dimension['name'], dimension['value']) for dimension in dimensions] + self.dimensions = [Dimension(dimension['name'], dimension[ + 'value']) for dimension in dimensions] class CloudWatchBackend(BaseBackend): @@ -99,7 +104,8 @@ class CloudWatchBackend(BaseBackend): def put_metric_data(self, namespace, metric_data): for name, value, dimensions in metric_data: - self.metric_data.append(MetricDatum(namespace, name, value, dimensions)) + self.metric_data.append(MetricDatum( + namespace, name, value, dimensions)) def get_all_metrics(self): return self.metric_data diff --git a/moto/cloudwatch/responses.py b/moto/cloudwatch/responses.py index 0d2cfacf5..d06fe21d7 100644 --- a/moto/cloudwatch/responses.py +++ b/moto/cloudwatch/responses.py @@ -1,6 +1,5 @@ from moto.core.responses import BaseResponse from .models import cloudwatch_backends -import logging class CloudWatchResponse(BaseResponse): @@ -18,7 +17,8 @@ class CloudWatchResponse(BaseResponse): dimensions = self._get_list_prefix('Dimensions.member') alarm_actions = self._get_multi_param('AlarmActions.member') ok_actions = self._get_multi_param('OKActions.member') - insufficient_data_actions = self._get_multi_param("InsufficientDataActions.member") + insufficient_data_actions = self._get_multi_param( + "InsufficientDataActions.member") unit = self._get_param('Unit') cloudwatch_backend = cloudwatch_backends[self.region] alarm = cloudwatch_backend.put_metric_alarm(name, namespace, metric_name, @@ -40,14 +40,16 @@ class CloudWatchResponse(BaseResponse): cloudwatch_backend = cloudwatch_backends[self.region] if action_prefix: - alarms = cloudwatch_backend.get_alarms_by_action_prefix(action_prefix) + alarms = cloudwatch_backend.get_alarms_by_action_prefix( + action_prefix) elif alarm_name_prefix: - alarms = cloudwatch_backend.get_alarms_by_alarm_name_prefix(alarm_name_prefix) + alarms = cloudwatch_backend.get_alarms_by_alarm_name_prefix( + alarm_name_prefix) elif alarm_names: alarms = cloudwatch_backend.get_alarms_by_alarm_names(alarm_names) elif state_value: alarms = cloudwatch_backend.get_alarms_by_state_value(state_value) - else : + else: alarms = cloudwatch_backend.get_all_alarms() template = self.response_template(DESCRIBE_ALARMS_TEMPLATE) @@ -66,19 +68,24 @@ class CloudWatchResponse(BaseResponse): metric_index = 1 while True: try: - metric_name = self.querystring['MetricData.member.{0}.MetricName'.format(metric_index)][0] + metric_name = self.querystring[ + 'MetricData.member.{0}.MetricName'.format(metric_index)][0] except KeyError: break - value = self.querystring.get('MetricData.member.{0}.Value'.format(metric_index), [None])[0] + value = self.querystring.get( + 'MetricData.member.{0}.Value'.format(metric_index), [None])[0] dimensions = [] dimension_index = 1 while True: try: - dimension_name = self.querystring['MetricData.member.{0}.Dimensions.member.{1}.Name'.format(metric_index, dimension_index)][0] + dimension_name = self.querystring[ + 'MetricData.member.{0}.Dimensions.member.{1}.Name'.format(metric_index, dimension_index)][0] except KeyError: break - dimension_value = self.querystring['MetricData.member.{0}.Dimensions.member.{1}.Value'.format(metric_index, dimension_index)][0] - dimensions.append({'name': dimension_name, 'value': dimension_value}) + dimension_value = self.querystring[ + 'MetricData.member.{0}.Dimensions.member.{1}.Value'.format(metric_index, dimension_index)][0] + dimensions.append( + {'name': dimension_name, 'value': dimension_value}) dimension_index += 1 metric_data.append([metric_name, value, dimensions]) metric_index += 1 diff --git a/moto/core/exceptions.py b/moto/core/exceptions.py index d3a87e299..5474707d6 100644 --- a/moto/core/exceptions.py +++ b/moto/core/exceptions.py @@ -2,7 +2,6 @@ from __future__ import unicode_literals from werkzeug.exceptions import HTTPException from jinja2 import DictLoader, Environment -from six import text_type SINGLE_ERROR_RESPONSE = u""" @@ -33,6 +32,7 @@ ERROR_JSON_RESPONSE = u"""{ } """ + class RESTError(HTTPException): templates = { 'single_error': SINGLE_ERROR_RESPONSE, @@ -54,8 +54,10 @@ class DryRunClientError(RESTError): class JsonRESTError(RESTError): + def __init__(self, error_type, message, template='error_json', **kwargs): - super(JsonRESTError, self).__init__(error_type, message, template, **kwargs) + super(JsonRESTError, self).__init__( + error_type, message, template, **kwargs) def get_headers(self, *args, **kwargs): return [('Content-Type', 'application/json')] diff --git a/moto/core/models.py b/moto/core/models.py index 04ff709e0..492a0e2ff 100644 --- a/moto/core/models.py +++ b/moto/core/models.py @@ -3,7 +3,6 @@ from __future__ import absolute_import import functools import inspect -import os import re from moto import settings @@ -15,6 +14,7 @@ from .utils import ( convert_flask_to_responses_response, ) + class BaseMockAWS(object): nested_count = 0 @@ -58,7 +58,6 @@ class BaseMockAWS(object): if self.__class__.nested_count < 0: raise RuntimeError('Called stop() before start().') - if self.__class__.nested_count == 0: self.disable_patching() @@ -96,6 +95,7 @@ class BaseMockAWS(object): class HttprettyMockAWS(BaseMockAWS): + def reset(self): HTTPretty.reset() @@ -118,10 +118,11 @@ class HttprettyMockAWS(BaseMockAWS): RESPONSES_METHODS = [responses.GET, responses.DELETE, responses.HEAD, - responses.OPTIONS, responses.PATCH, responses.POST, responses.PUT] + responses.OPTIONS, responses.PATCH, responses.POST, responses.PUT] class ResponsesMockAWS(BaseMockAWS): + def reset(self): responses.reset() @@ -146,6 +147,7 @@ class ResponsesMockAWS(BaseMockAWS): pass responses.reset() + MockAWS = ResponsesMockAWS @@ -167,12 +169,14 @@ class ServerModeMockAWS(BaseMockAWS): if 'endpoint_url' not in kwargs: kwargs['endpoint_url'] = "http://localhost:8086" return real_boto3_client(*args, **kwargs) + def fake_boto3_resource(*args, **kwargs): if 'endpoint_url' not in kwargs: kwargs['endpoint_url'] = "http://localhost:8086" return real_boto3_resource(*args, **kwargs) self._client_patcher = mock.patch('boto3.client', fake_boto3_client) - self._resource_patcher = mock.patch('boto3.resource', fake_boto3_resource) + self._resource_patcher = mock.patch( + 'boto3.resource', fake_boto3_resource) self._client_patcher.start() self._resource_patcher.start() @@ -181,7 +185,9 @@ class ServerModeMockAWS(BaseMockAWS): self._client_patcher.stop() self._resource_patcher.stop() + class Model(type): + def __new__(self, clsname, bases, namespace): cls = super(Model, self).__new__(self, clsname, bases, namespace) cls.__models__ = {} @@ -203,6 +209,7 @@ class Model(type): class BaseBackend(object): + def reset(self): self.__dict__ = {} self.__init__() @@ -211,7 +218,8 @@ class BaseBackend(object): def _url_module(self): backend_module = self.__class__.__module__ backend_urls_module_name = backend_module.replace("models", "urls") - backend_urls_module = __import__(backend_urls_module_name, fromlist=['url_bases', 'url_paths']) + backend_urls_module = __import__(backend_urls_module_name, fromlist=[ + 'url_bases', 'url_paths']) return backend_urls_module @property @@ -306,6 +314,7 @@ class deprecated_base_decorator(base_decorator): class MotoAPIBackend(BaseBackend): + def reset(self): from moto.backends import BACKENDS for name, backends in BACKENDS.items(): @@ -315,4 +324,5 @@ class MotoAPIBackend(BaseBackend): backend.reset() self.__init__() + moto_api_backend = MotoAPIBackend() diff --git a/moto/core/responses.py b/moto/core/responses.py index e558eb1dd..00e3ba742 100644 --- a/moto/core/responses.py +++ b/moto/core/responses.py @@ -59,6 +59,7 @@ class DynamicDictLoader(DictLoader): Including the fixed (current) method version here to ensure performance benefit even for those using older jinja versions. """ + def get_source(self, environment, template): if template in self.mapping: source = self.mapping[template] @@ -77,7 +78,8 @@ class _TemplateEnvironmentMixin(object): def __init__(self): super(_TemplateEnvironmentMixin, self).__init__() self.loader = DynamicDictLoader({}) - self.environment = Environment(loader=self.loader, autoescape=self.should_autoescape) + self.environment = Environment( + loader=self.loader, autoescape=self.should_autoescape) @property def should_autoescape(self): @@ -127,12 +129,14 @@ class BaseResponse(_TemplateEnvironmentMixin): self.body = self.body.decode('utf-8') if not querystring: - querystring.update(parse_qs(urlparse(full_url).query, keep_blank_values=True)) + querystring.update( + parse_qs(urlparse(full_url).query, keep_blank_values=True)) if not querystring: if 'json' in request.headers.get('content-type', []) and self.aws_service_spec: decoded = json.loads(self.body) - target = request.headers.get('x-amz-target') or request.headers.get('X-Amz-Target') + target = request.headers.get( + 'x-amz-target') or request.headers.get('X-Amz-Target') service, method = target.split('.') input_spec = self.aws_service_spec.input_spec(method) flat = flatten_json_request_body('', decoded, input_spec) @@ -161,7 +165,8 @@ class BaseResponse(_TemplateEnvironmentMixin): if match: region = match.group(1) elif 'Authorization' in request.headers: - region = request.headers['Authorization'].split(",")[0].split("/")[2] + region = request.headers['Authorization'].split(",")[ + 0].split("/")[2] else: region = self.default_region return region @@ -175,7 +180,8 @@ class BaseResponse(_TemplateEnvironmentMixin): action = self.querystring.get('Action', [""])[0] if not action: # Some services use a header for the action # Headers are case-insensitive. Probably a better way to do this. - match = self.headers.get('x-amz-target') or self.headers.get('X-Amz-Target') + match = self.headers.get( + 'x-amz-target') or self.headers.get('X-Amz-Target') if match: action = match.split(".")[-1] @@ -198,7 +204,8 @@ class BaseResponse(_TemplateEnvironmentMixin): headers['status'] = str(headers['status']) return status, headers, body - raise NotImplementedError("The {0} action has not been implemented".format(action)) + raise NotImplementedError( + "The {0} action has not been implemented".format(action)) def _get_param(self, param_name, if_none=None): val = self.querystring.get(param_name) @@ -258,7 +265,8 @@ class BaseResponse(_TemplateEnvironmentMixin): params = {} for key, value in self.querystring.items(): if key.startswith(param_prefix): - params[camelcase_to_underscores(key.replace(param_prefix, ""))] = value[0] + params[camelcase_to_underscores( + key.replace(param_prefix, ""))] = value[0] return params def _get_list_prefix(self, param_prefix): @@ -291,7 +299,8 @@ class BaseResponse(_TemplateEnvironmentMixin): new_items = {} for key, value in self.querystring.items(): if key.startswith(index_prefix): - new_items[camelcase_to_underscores(key.replace(index_prefix, ""))] = value[0] + new_items[camelcase_to_underscores( + key.replace(index_prefix, ""))] = value[0] if not new_items: break results.append(new_items) @@ -327,7 +336,8 @@ class BaseResponse(_TemplateEnvironmentMixin): def is_not_dryrun(self, action): if 'true' in self.querystring.get('DryRun', ['false']): message = 'An error occurred (DryRunOperation) when calling the %s operation: Request would have succeeded, but DryRun flag is set' % action - raise DryRunClientError(error_type="DryRunOperation", message=message) + raise DryRunClientError( + error_type="DryRunOperation", message=message) return True @@ -343,6 +353,7 @@ class MotoAPIResponse(BaseResponse): class _RecursiveDictRef(object): """Store a recursive reference to dict.""" + def __init__(self): self.key = None self.dic = {} @@ -502,12 +513,15 @@ def flatten_json_request_body(prefix, dict_body, spec): if node_type == 'list': for idx, v in enumerate(value, 1): pref = key + '.member.' + str(idx) - flat.update(flatten_json_request_body(pref, v, spec[key]['member'])) + flat.update(flatten_json_request_body( + pref, v, spec[key]['member'])) elif node_type == 'map': for idx, (k, v) in enumerate(value.items(), 1): pref = key + '.entry.' + str(idx) - flat.update(flatten_json_request_body(pref + '.key', k, spec[key]['key'])) - flat.update(flatten_json_request_body(pref + '.value', v, spec[key]['value'])) + flat.update(flatten_json_request_body( + pref + '.key', k, spec[key]['key'])) + flat.update(flatten_json_request_body( + pref + '.value', v, spec[key]['value'])) else: flat.update(flatten_json_request_body(key, value, spec[key])) @@ -542,7 +556,8 @@ def xml_to_json_response(service_spec, operation, xml, result_node=None): # this can happen when with an older version of # botocore for which the node in XML template is not # defined in service spec. - log.warning('Field %s is not defined by the botocore version in use', k) + log.warning( + 'Field %s is not defined by the botocore version in use', k) continue if spec[k]['type'] == 'list': @@ -554,7 +569,8 @@ def xml_to_json_response(service_spec, operation, xml, result_node=None): else: od[k] = [transform(v['member'], spec[k]['member'])] elif isinstance(v['member'], list): - od[k] = [transform(o, spec[k]['member']) for o in v['member']] + od[k] = [transform(o, spec[k]['member']) + for o in v['member']] elif isinstance(v['member'], OrderedDict): od[k] = [transform(v['member'], spec[k]['member'])] else: diff --git a/moto/core/utils.py b/moto/core/utils.py index 11aafbb89..d26694014 100644 --- a/moto/core/utils.py +++ b/moto/core/utils.py @@ -98,7 +98,7 @@ class convert_httpretty_response(object): result = self.callback(request, url, headers) status, headers, response = result if 'server' not in headers: - headers["server"] = "amazon.com" + headers["server"] = "amazon.com" return status, headers, response diff --git a/moto/datapipeline/__init__.py b/moto/datapipeline/__init__.py index cebcf22bf..2565ddd5a 100644 --- a/moto/datapipeline/__init__.py +++ b/moto/datapipeline/__init__.py @@ -1,6 +1,6 @@ from __future__ import unicode_literals from .models import datapipeline_backends -from ..core.models import MockAWS, base_decorator, HttprettyMockAWS, deprecated_base_decorator +from ..core.models import base_decorator, deprecated_base_decorator datapipeline_backend = datapipeline_backends['us-east-1'] mock_datapipeline = base_decorator(datapipeline_backends) diff --git a/moto/datapipeline/models.py b/moto/datapipeline/models.py index b6a70b5f1..0cb33e4ed 100644 --- a/moto/datapipeline/models.py +++ b/moto/datapipeline/models.py @@ -7,6 +7,7 @@ from .utils import get_random_pipeline_id, remove_capitalization_of_dict_keys class PipelineObject(object): + def __init__(self, object_id, name, fields): self.object_id = object_id self.name = name @@ -21,6 +22,7 @@ class PipelineObject(object): class Pipeline(object): + def __init__(self, name, unique_id): self.name = name self.unique_id = unique_id @@ -82,7 +84,8 @@ class Pipeline(object): def set_pipeline_objects(self, pipeline_objects): self.objects = [ - PipelineObject(pipeline_object['id'], pipeline_object['name'], pipeline_object['fields']) + PipelineObject(pipeline_object['id'], pipeline_object[ + 'name'], pipeline_object['fields']) for pipeline_object in remove_capitalization_of_dict_keys(pipeline_objects) ] @@ -95,8 +98,10 @@ class Pipeline(object): properties = cloudformation_json["Properties"] cloudformation_unique_id = "cf-" + properties["Name"] - pipeline = datapipeline_backend.create_pipeline(properties["Name"], cloudformation_unique_id) - datapipeline_backend.put_pipeline_definition(pipeline.pipeline_id, properties["PipelineObjects"]) + pipeline = datapipeline_backend.create_pipeline( + properties["Name"], cloudformation_unique_id) + datapipeline_backend.put_pipeline_definition( + pipeline.pipeline_id, properties["PipelineObjects"]) if properties["Activate"]: pipeline.activate() @@ -117,7 +122,8 @@ class DataPipelineBackend(BaseBackend): return self.pipelines.values() def describe_pipelines(self, pipeline_ids): - pipelines = [pipeline for pipeline in self.pipelines.values() if pipeline.pipeline_id in pipeline_ids] + pipelines = [pipeline for pipeline in self.pipelines.values( + ) if pipeline.pipeline_id in pipeline_ids] return pipelines def get_pipeline(self, pipeline_id): diff --git a/moto/datapipeline/responses.py b/moto/datapipeline/responses.py index 2607f685d..f3644fd5c 100644 --- a/moto/datapipeline/responses.py +++ b/moto/datapipeline/responses.py @@ -52,12 +52,14 @@ class DataPipelineResponse(BaseResponse): pipeline_id = self.parameters["pipelineId"] pipeline_objects = self.parameters["pipelineObjects"] - self.datapipeline_backend.put_pipeline_definition(pipeline_id, pipeline_objects) + self.datapipeline_backend.put_pipeline_definition( + pipeline_id, pipeline_objects) return json.dumps({"errored": False}) def get_pipeline_definition(self): pipeline_id = self.parameters["pipelineId"] - pipeline_definition = self.datapipeline_backend.get_pipeline_definition(pipeline_id) + pipeline_definition = self.datapipeline_backend.get_pipeline_definition( + pipeline_id) return json.dumps({ "pipelineObjects": [pipeline_object.to_json() for pipeline_object in pipeline_definition] }) @@ -66,7 +68,8 @@ class DataPipelineResponse(BaseResponse): pipeline_id = self.parameters["pipelineId"] object_ids = self.parameters["objectIds"] - pipeline_objects = self.datapipeline_backend.describe_objects(object_ids, pipeline_id) + pipeline_objects = self.datapipeline_backend.describe_objects( + object_ids, pipeline_id) return json.dumps({ "hasMoreResults": False, "marker": None, diff --git a/moto/dynamodb/models.py b/moto/dynamodb/models.py index dd58eb4de..db50dbcc6 100644 --- a/moto/dynamodb/models.py +++ b/moto/dynamodb/models.py @@ -10,6 +10,7 @@ from .comparisons import get_comparison_func class DynamoJsonEncoder(json.JSONEncoder): + def default(self, obj): if hasattr(obj, 'to_json'): return obj.to_json() @@ -53,6 +54,7 @@ class DynamoType(object): class Item(object): + def __init__(self, hash_key, hash_key_type, range_key, range_key_type, attrs): self.hash_key = hash_key self.hash_key_type = hash_key_type @@ -157,7 +159,8 @@ class Table(object): else: range_value = None - item = Item(hash_value, self.hash_key_type, range_value, self.range_key_type, item_attrs) + item = Item(hash_value, self.hash_key_type, range_value, + self.range_key_type, item_attrs) if range_value: self.items[hash_value][range_value] = item @@ -167,7 +170,8 @@ class Table(object): def get_item(self, hash_key, range_key): if self.has_range_key and not range_key: - raise ValueError("Table has a range key, but no range key was passed into get_item") + raise ValueError( + "Table has a range key, but no range key was passed into get_item") try: if range_key: return self.items[hash_key][range_key] @@ -222,7 +226,8 @@ class Table(object): # Comparison is NULL and we don't have the attribute continue else: - # No attribute found and comparison is no NULL. This item fails + # No attribute found and comparison is no NULL. This item + # fails passes_all_conditions = False break @@ -283,7 +288,8 @@ class DynamoDBBackend(BaseBackend): return None, None hash_key = DynamoType(hash_key_dict) - range_values = [DynamoType(range_value) for range_value in range_value_dicts] + range_values = [DynamoType(range_value) + for range_value in range_value_dicts] return table.query(hash_key, range_comparison, range_values) diff --git a/moto/dynamodb/responses.py b/moto/dynamodb/responses.py index 59cff0395..0da3e5045 100644 --- a/moto/dynamodb/responses.py +++ b/moto/dynamodb/responses.py @@ -130,7 +130,8 @@ class DynamoHandler(BaseResponse): throughput = self.body["ProvisionedThroughput"] new_read_units = throughput["ReadCapacityUnits"] new_write_units = throughput["WriteCapacityUnits"] - table = dynamodb_backend.update_table_throughput(name, new_read_units, new_write_units) + table = dynamodb_backend.update_table_throughput( + name, new_read_units, new_write_units) return dynamo_json_dump(table.describe) def describe_table(self): @@ -169,7 +170,8 @@ class DynamoHandler(BaseResponse): key = request['Key'] hash_key = key['HashKeyElement'] range_key = key.get('RangeKeyElement') - item = dynamodb_backend.delete_item(table_name, hash_key, range_key) + item = dynamodb_backend.delete_item( + table_name, hash_key, range_key) response = { "Responses": { @@ -221,11 +223,13 @@ class DynamoHandler(BaseResponse): for key in keys: hash_key = key["HashKeyElement"] range_key = key.get("RangeKeyElement") - item = dynamodb_backend.get_item(table_name, hash_key, range_key) + item = dynamodb_backend.get_item( + table_name, hash_key, range_key) if item: item_describe = item.describe_attrs(attributes_to_get) items.append(item_describe) - results["Responses"][table_name] = {"Items": items, "ConsumedCapacityUnits": 1} + results["Responses"][table_name] = { + "Items": items, "ConsumedCapacityUnits": 1} return dynamo_json_dump(results) def query(self): @@ -239,7 +243,8 @@ class DynamoHandler(BaseResponse): range_comparison = None range_values = [] - items, last_page = dynamodb_backend.query(name, hash_key, range_comparison, range_values) + items, last_page = dynamodb_backend.query( + name, hash_key, range_comparison, range_values) if items is None: er = 'com.amazonaws.dynamodb.v20111205#ResourceNotFoundException' @@ -265,7 +270,8 @@ class DynamoHandler(BaseResponse): filters = {} scan_filters = self.body.get('ScanFilter', {}) for attribute_name, scan_filter in scan_filters.items(): - # Keys are attribute names. Values are tuples of (comparison, comparison_value) + # Keys are attribute names. Values are tuples of (comparison, + # comparison_value) comparison_operator = scan_filter["ComparisonOperator"] comparison_values = scan_filter.get("AttributeValueList", []) filters[attribute_name] = (comparison_operator, comparison_values) diff --git a/moto/dynamodb2/__init__.py b/moto/dynamodb2/__init__.py index 7a1f07352..ad3f042d2 100644 --- a/moto/dynamodb2/__init__.py +++ b/moto/dynamodb2/__init__.py @@ -3,4 +3,4 @@ from .models import dynamodb_backend2 dynamodb_backends2 = {"global": dynamodb_backend2} mock_dynamodb2 = dynamodb_backend2.decorator -mock_dynamodb2_deprecated = dynamodb_backend2.deprecated_decorator \ No newline at end of file +mock_dynamodb2_deprecated = dynamodb_backend2.deprecated_decorator diff --git a/moto/dynamodb2/comparisons.py b/moto/dynamodb2/comparisons.py index 1dc723df0..0b323ecd5 100644 --- a/moto/dynamodb2/comparisons.py +++ b/moto/dynamodb2/comparisons.py @@ -1,12 +1,12 @@ from __future__ import unicode_literals # TODO add tests for all of these -EQ_FUNCTION = lambda item_value, test_value: item_value == test_value -NE_FUNCTION = lambda item_value, test_value: item_value != test_value -LE_FUNCTION = lambda item_value, test_value: item_value <= test_value -LT_FUNCTION = lambda item_value, test_value: item_value < test_value -GE_FUNCTION = lambda item_value, test_value: item_value >= test_value -GT_FUNCTION = lambda item_value, test_value: item_value > test_value +EQ_FUNCTION = lambda item_value, test_value: item_value == test_value # flake8: noqa +NE_FUNCTION = lambda item_value, test_value: item_value != test_value # flake8: noqa +LE_FUNCTION = lambda item_value, test_value: item_value <= test_value # flake8: noqa +LT_FUNCTION = lambda item_value, test_value: item_value < test_value # flake8: noqa +GE_FUNCTION = lambda item_value, test_value: item_value >= test_value # flake8: noqa +GT_FUNCTION = lambda item_value, test_value: item_value > test_value # flake8: noqa COMPARISON_FUNCS = { 'EQ': EQ_FUNCTION, diff --git a/moto/dynamodb2/models.py b/moto/dynamodb2/models.py index 0adbae946..15c30e590 100644 --- a/moto/dynamodb2/models.py +++ b/moto/dynamodb2/models.py @@ -11,6 +11,7 @@ from .comparisons import get_comparison_func class DynamoJsonEncoder(json.JSONEncoder): + def default(self, obj): if hasattr(obj, 'to_json'): return obj.to_json() @@ -76,6 +77,7 @@ class DynamoType(object): class Item(object): + def __init__(self, hash_key, hash_key_type, range_key, range_key_type, attrs): self.hash_key = hash_key self.hash_key_type = hash_key_type @@ -131,14 +133,15 @@ class Item(object): elif action == 'SET' or action == 'set': key, value = value.split("=") if value in expression_attribute_values: - self.attrs[key] = DynamoType(expression_attribute_values[value]) + self.attrs[key] = DynamoType( + expression_attribute_values[value]) else: self.attrs[key] = DynamoType({"S": value}) def update_with_attribute_updates(self, attribute_updates): for attribute_name, update_action in attribute_updates.items(): action = update_action['Action'] - if action == 'DELETE' and not 'Value' in update_action: + if action == 'DELETE' and 'Value' not in update_action: if attribute_name in self.attrs: del self.attrs[attribute_name] continue @@ -158,14 +161,16 @@ class Item(object): self.attrs[attribute_name] = DynamoType({"S": new_value}) elif action == 'ADD': if set(update_action['Value'].keys()) == set(['N']): - existing = self.attrs.get(attribute_name, DynamoType({"N": '0'})) + existing = self.attrs.get( + attribute_name, DynamoType({"N": '0'})) self.attrs[attribute_name] = DynamoType({"N": str( - decimal.Decimal(existing.value) + - decimal.Decimal(new_value) + decimal.Decimal(existing.value) + + decimal.Decimal(new_value) )}) else: # TODO: implement other data types - raise NotImplementedError('ADD not supported for %s' % ', '.join(update_action['Value'].keys())) + raise NotImplementedError( + 'ADD not supported for %s' % ', '.join(update_action['Value'].keys())) class Table(object): @@ -186,7 +191,8 @@ class Table(object): self.range_key_attr = elem["AttributeName"] self.range_key_type = elem["KeyType"] if throughput is None: - self.throughput = {'WriteCapacityUnits': 10, 'ReadCapacityUnits': 10} + self.throughput = { + 'WriteCapacityUnits': 10, 'ReadCapacityUnits': 10} else: self.throughput = throughput self.throughput["NumberOfDecreasesToday"] = 0 @@ -250,14 +256,16 @@ class Table(object): else: range_value = None - item = Item(hash_value, self.hash_key_type, range_value, self.range_key_type, item_attrs) + item = Item(hash_value, self.hash_key_type, range_value, + self.range_key_type, item_attrs) if not overwrite: if expected is None: expected = {} lookup_range_value = range_value else: - expected_range_value = expected.get(self.range_key_attr, {}).get("Value") + expected_range_value = expected.get( + self.range_key_attr, {}).get("Value") if(expected_range_value is None): lookup_range_value = range_value else: @@ -281,8 +289,10 @@ class Table(object): elif 'Value' in val and DynamoType(val['Value']).value != current_attr[key].value: raise ValueError("The conditional request failed") elif 'ComparisonOperator' in val: - comparison_func = get_comparison_func(val['ComparisonOperator']) - dynamo_types = [DynamoType(ele) for ele in val["AttributeValueList"]] + comparison_func = get_comparison_func( + val['ComparisonOperator']) + dynamo_types = [DynamoType(ele) for ele in val[ + "AttributeValueList"]] for t in dynamo_types: if not comparison_func(current_attr[key].value, t.value): raise ValueError('The conditional request failed') @@ -304,7 +314,8 @@ class Table(object): def get_item(self, hash_key, range_key=None): if self.has_range_key and not range_key: - raise ValueError("Table has a range key, but no range key was passed into get_item") + raise ValueError( + "Table has a range key, but no range key was passed into get_item") try: if range_key: return self.items[hash_key][range_key] @@ -339,9 +350,11 @@ class Table(object): index = indexes_by_name[index_name] try: - index_hash_key = [key for key in index['KeySchema'] if key['KeyType'] == 'HASH'][0] + index_hash_key = [key for key in index[ + 'KeySchema'] if key['KeyType'] == 'HASH'][0] except IndexError: - raise ValueError('Missing Hash Key. KeySchema: %s' % index['KeySchema']) + raise ValueError('Missing Hash Key. KeySchema: %s' % + index['KeySchema']) possible_results = [] for item in self.all_items(): @@ -351,17 +364,20 @@ class Table(object): if item_hash_key and item_hash_key == hash_key: possible_results.append(item) else: - possible_results = [item for item in list(self.all_items()) if isinstance(item, Item) and item.hash_key == hash_key] + possible_results = [item for item in list(self.all_items()) if isinstance( + item, Item) and item.hash_key == hash_key] if index_name: try: - index_range_key = [key for key in index['KeySchema'] if key['KeyType'] == 'RANGE'][0] + index_range_key = [key for key in index[ + 'KeySchema'] if key['KeyType'] == 'RANGE'][0] except IndexError: index_range_key = None if range_comparison: if index_name and not index_range_key: - raise ValueError('Range Key comparison but no range key found for index: %s' % index_name) + raise ValueError( + 'Range Key comparison but no range key found for index: %s' % index_name) elif index_name: for result in possible_results: @@ -375,19 +391,21 @@ class Table(object): if filter_kwargs: for result in possible_results: for field, value in filter_kwargs.items(): - dynamo_types = [DynamoType(ele) for ele in value["AttributeValueList"]] + dynamo_types = [DynamoType(ele) for ele in value[ + "AttributeValueList"]] if result.attrs.get(field).compare(value['ComparisonOperator'], dynamo_types): results.append(result) if not range_comparison and not filter_kwargs: - # If we're not filtering on range key or on an index return all values + # If we're not filtering on range key or on an index return all + # values results = possible_results if index_name: if index_range_key: results.sort(key=lambda item: item.attrs[index_range_key['AttributeName']].value - if item.attrs.get(index_range_key['AttributeName']) else None) + if item.attrs.get(index_range_key['AttributeName']) else None) else: results.sort(key=lambda item: item.range_key) @@ -427,7 +445,8 @@ class Table(object): # Comparison is NULL and we don't have the attribute continue else: - # No attribute found and comparison is no NULL. This item fails + # No attribute found and comparison is no NULL. This item + # fails passes_all_conditions = False break @@ -460,7 +479,6 @@ class Table(object): return results, last_evaluated_key - def lookup(self, *args, **kwargs): if not self.schema: self.describe() @@ -517,7 +535,8 @@ class DynamoDBBackend(BaseBackend): if gsi_to_create: if gsi_to_create['IndexName'] in gsis_by_name: - raise ValueError('Global Secondary Index already exists: %s' % gsi_to_create['IndexName']) + raise ValueError( + 'Global Secondary Index already exists: %s' % gsi_to_create['IndexName']) gsis_by_name[gsi_to_create['IndexName']] = gsi_to_create @@ -555,9 +574,11 @@ class DynamoDBBackend(BaseBackend): def get_keys_value(self, table, keys): if table.hash_key_attr not in keys or (table.has_range_key and table.range_key_attr not in keys): - raise ValueError("Table has a range key, but no range key was passed into get_item") + raise ValueError( + "Table has a range key, but no range key was passed into get_item") hash_key = DynamoType(keys[table.hash_key_attr]) - range_key = DynamoType(keys[table.range_key_attr]) if table.has_range_key else None + range_key = DynamoType( + keys[table.range_key_attr]) if table.has_range_key else None return hash_key, range_key def get_table(self, table_name): @@ -577,7 +598,8 @@ class DynamoDBBackend(BaseBackend): return None, None hash_key = DynamoType(hash_key_dict) - range_values = [DynamoType(range_value) for range_value in range_value_dicts] + range_values = [DynamoType(range_value) + for range_value in range_value_dicts] return table.query(hash_key, range_comparison, range_values, limit, exclusive_start_key, scan_index_forward, index_name, **filter_kwargs) @@ -598,7 +620,8 @@ class DynamoDBBackend(BaseBackend): table = self.get_table(table_name) if all([table.hash_key_attr in key, table.range_key_attr in key]): - # Covers cases where table has hash and range keys, ``key`` param will be a dict + # Covers cases where table has hash and range keys, ``key`` param + # will be a dict hash_value = DynamoType(key[table.hash_key_attr]) range_value = DynamoType(key[table.range_key_attr]) elif table.hash_key_attr in key: @@ -629,7 +652,8 @@ class DynamoDBBackend(BaseBackend): item = table.get_item(hash_value, range_value) if update_expression: - item.update(update_expression, expression_attribute_names, expression_attribute_values) + item.update(update_expression, expression_attribute_names, + expression_attribute_values) else: item.update_with_attribute_updates(attribute_updates) return item diff --git a/moto/dynamodb2/responses.py b/moto/dynamodb2/responses.py index 0957bfa89..3ceda0be1 100644 --- a/moto/dynamodb2/responses.py +++ b/moto/dynamodb2/responses.py @@ -104,11 +104,11 @@ class DynamoHandler(BaseResponse): local_secondary_indexes = body.get("LocalSecondaryIndexes", []) table = dynamodb_backend2.create_table(table_name, - schema=key_schema, - throughput=throughput, - attr=attr, - global_indexes=global_indexes, - indexes=local_secondary_indexes) + schema=key_schema, + throughput=throughput, + attr=attr, + global_indexes=global_indexes, + indexes=local_secondary_indexes) if table is not None: return dynamo_json_dump(table.describe()) else: @@ -127,7 +127,8 @@ class DynamoHandler(BaseResponse): def update_table(self): name = self.body['TableName'] if 'GlobalSecondaryIndexUpdates' in self.body: - table = dynamodb_backend2.update_table_global_indexes(name, self.body['GlobalSecondaryIndexUpdates']) + table = dynamodb_backend2.update_table_global_indexes( + name, self.body['GlobalSecondaryIndexUpdates']) if 'ProvisionedThroughput' in self.body: throughput = self.body["ProvisionedThroughput"] table = dynamodb_backend2.update_table_throughput(name, throughput) @@ -151,17 +152,20 @@ class DynamoHandler(BaseResponse): else: expected = None - # Attempt to parse simple ConditionExpressions into an Expected expression + # Attempt to parse simple ConditionExpressions into an Expected + # expression if not expected: condition_expression = self.body.get('ConditionExpression') if condition_expression and 'OR' not in condition_expression: - cond_items = [c.strip() for c in condition_expression.split('AND')] + cond_items = [c.strip() + for c in condition_expression.split('AND')] if cond_items: expected = {} overwrite = False exists_re = re.compile('^attribute_exists\((.*)\)$') - not_exists_re = re.compile('^attribute_not_exists\((.*)\)$') + not_exists_re = re.compile( + '^attribute_not_exists\((.*)\)$') for cond in cond_items: exists_m = exists_re.match(cond) @@ -172,7 +176,8 @@ class DynamoHandler(BaseResponse): expected[not_exists_m.group(1)] = {'Exists': False} try: - result = dynamodb_backend2.put_item(name, item, expected, overwrite) + result = dynamodb_backend2.put_item( + name, item, expected, overwrite) except Exception: er = 'com.amazonaws.dynamodb.v20111205#ConditionalCheckFailedException' return self.error(er) @@ -249,7 +254,8 @@ class DynamoHandler(BaseResponse): item = dynamodb_backend2.get_item(table_name, key) if item: item_describe = item.describe_attrs(attributes_to_get) - results["Responses"][table_name].append(item_describe["Item"]) + results["Responses"][table_name].append( + item_describe["Item"]) results["ConsumedCapacity"].append({ "CapacityUnits": len(keys), @@ -268,8 +274,10 @@ class DynamoHandler(BaseResponse): table = dynamodb_backend2.get_table(name) index_name = self.body.get('IndexName') if index_name: - all_indexes = (table.global_indexes or []) + (table.indexes or []) - indexes_by_name = dict((i['IndexName'], i) for i in all_indexes) + all_indexes = (table.global_indexes or []) + \ + (table.indexes or []) + indexes_by_name = dict((i['IndexName'], i) + for i in all_indexes) if index_name not in indexes_by_name: raise ValueError('Invalid index: %s for table: %s. Available indexes are: %s' % ( index_name, name, ', '.join(indexes_by_name.keys()) @@ -279,16 +287,21 @@ class DynamoHandler(BaseResponse): else: index = table.schema - key_map = [column for _, column in sorted((k, v) for k, v in self.body['ExpressionAttributeNames'].items())] + key_map = [column for _, column in sorted( + (k, v) for k, v in self.body['ExpressionAttributeNames'].items())] if " AND " in key_condition_expression: expressions = key_condition_expression.split(" AND ", 1) - index_hash_key = [key for key in index if key['KeyType'] == 'HASH'][0] - hash_key_index_in_key_map = key_map.index(index_hash_key['AttributeName']) + index_hash_key = [ + key for key in index if key['KeyType'] == 'HASH'][0] + hash_key_index_in_key_map = key_map.index( + index_hash_key['AttributeName']) - hash_key_expression = expressions.pop(hash_key_index_in_key_map).strip('()') - # TODO implement more than one range expression and OR operators + hash_key_expression = expressions.pop( + hash_key_index_in_key_map).strip('()') + # TODO implement more than one range expression and OR + # operators range_key_expression = expressions[0].strip('()') range_key_expression_components = range_key_expression.split() range_comparison = range_key_expression_components[1] @@ -304,7 +317,8 @@ class DynamoHandler(BaseResponse): value_alias_map[range_key_expression_components[1]], ] else: - range_values = [value_alias_map[range_key_expression_components[2]]] + range_values = [value_alias_map[ + range_key_expression_components[2]]] else: hash_key_expression = key_condition_expression range_comparison = None @@ -316,14 +330,16 @@ class DynamoHandler(BaseResponse): # 'KeyConditions': {u'forum_name': {u'ComparisonOperator': u'EQ', u'AttributeValueList': [{u'S': u'the-key'}]}} key_conditions = self.body.get('KeyConditions') if key_conditions: - hash_key_name, range_key_name = dynamodb_backend2.get_table_keys_name(name, key_conditions.keys()) + hash_key_name, range_key_name = dynamodb_backend2.get_table_keys_name( + name, key_conditions.keys()) for key, value in key_conditions.items(): if key not in (hash_key_name, range_key_name): filter_kwargs[key] = value if hash_key_name is None: er = "'com.amazonaws.dynamodb.v20120810#ResourceNotFoundException" return self.error(er) - hash_key = key_conditions[hash_key_name]['AttributeValueList'][0] + hash_key = key_conditions[hash_key_name][ + 'AttributeValueList'][0] if len(key_conditions) == 1: range_comparison = None range_values = [] @@ -334,8 +350,10 @@ class DynamoHandler(BaseResponse): else: range_condition = key_conditions.get(range_key_name) if range_condition: - range_comparison = range_condition['ComparisonOperator'] - range_values = range_condition['AttributeValueList'] + range_comparison = range_condition[ + 'ComparisonOperator'] + range_values = range_condition[ + 'AttributeValueList'] else: range_comparison = None range_values = [] @@ -369,7 +387,8 @@ class DynamoHandler(BaseResponse): filters = {} scan_filters = self.body.get('ScanFilter', {}) for attribute_name, scan_filter in scan_filters.items(): - # Keys are attribute names. Values are tuples of (comparison, comparison_value) + # Keys are attribute names. Values are tuples of (comparison, + # comparison_value) comparison_operator = scan_filter["ComparisonOperator"] comparison_values = scan_filter.get("AttributeValueList", []) filters[attribute_name] = (comparison_operator, comparison_values) @@ -416,16 +435,20 @@ class DynamoHandler(BaseResponse): key = self.body['Key'] update_expression = self.body.get('UpdateExpression') attribute_updates = self.body.get('AttributeUpdates') - expression_attribute_names = self.body.get('ExpressionAttributeNames', {}) - expression_attribute_values = self.body.get('ExpressionAttributeValues', {}) + expression_attribute_names = self.body.get( + 'ExpressionAttributeNames', {}) + expression_attribute_values = self.body.get( + 'ExpressionAttributeValues', {}) existing_item = dynamodb_backend2.get_item(name, key) # Support spaces between operators in an update expression # E.g. `a = b + c` -> `a=b+c` if update_expression: - update_expression = re.sub('\s*([=\+-])\s*', '\\1', update_expression) + update_expression = re.sub( + '\s*([=\+-])\s*', '\\1', update_expression) - item = dynamodb_backend2.update_item(name, key, update_expression, attribute_updates, expression_attribute_names, expression_attribute_values) + item = dynamodb_backend2.update_item( + name, key, update_expression, attribute_updates, expression_attribute_names, expression_attribute_values) item_dict = item.to_json() item_dict['ConsumedCapacityUnits'] = 0.5 diff --git a/moto/ec2/__init__.py b/moto/ec2/__init__.py index 608173577..ba8cbe0a0 100644 --- a/moto/ec2/__init__.py +++ b/moto/ec2/__init__.py @@ -1,6 +1,6 @@ from __future__ import unicode_literals from .models import ec2_backends -from ..core.models import MockAWS, base_decorator, HttprettyMockAWS, deprecated_base_decorator +from ..core.models import base_decorator, deprecated_base_decorator ec2_backend = ec2_backends['us-east-1'] mock_ec2 = base_decorator(ec2_backends) diff --git a/moto/ec2/exceptions.py b/moto/ec2/exceptions.py index 79ceb776f..d32118b82 100644 --- a/moto/ec2/exceptions.py +++ b/moto/ec2/exceptions.py @@ -7,12 +7,14 @@ class EC2ClientError(RESTError): class DependencyViolationError(EC2ClientError): + def __init__(self, message): super(DependencyViolationError, self).__init__( "DependencyViolation", message) class MissingParameterError(EC2ClientError): + def __init__(self, parameter): super(MissingParameterError, self).__init__( "MissingParameter", @@ -21,6 +23,7 @@ class MissingParameterError(EC2ClientError): class InvalidDHCPOptionsIdError(EC2ClientError): + def __init__(self, dhcp_options_id): super(InvalidDHCPOptionsIdError, self).__init__( "InvalidDhcpOptionID.NotFound", @@ -29,6 +32,7 @@ class InvalidDHCPOptionsIdError(EC2ClientError): class MalformedDHCPOptionsIdError(EC2ClientError): + def __init__(self, dhcp_options_id): super(MalformedDHCPOptionsIdError, self).__init__( "InvalidDhcpOptionsId.Malformed", @@ -37,6 +41,7 @@ class MalformedDHCPOptionsIdError(EC2ClientError): class InvalidKeyPairNameError(EC2ClientError): + def __init__(self, key): super(InvalidKeyPairNameError, self).__init__( "InvalidKeyPair.NotFound", @@ -45,6 +50,7 @@ class InvalidKeyPairNameError(EC2ClientError): class InvalidKeyPairDuplicateError(EC2ClientError): + def __init__(self, key): super(InvalidKeyPairDuplicateError, self).__init__( "InvalidKeyPair.Duplicate", @@ -53,6 +59,7 @@ class InvalidKeyPairDuplicateError(EC2ClientError): class InvalidVPCIdError(EC2ClientError): + def __init__(self, vpc_id): super(InvalidVPCIdError, self).__init__( "InvalidVpcID.NotFound", @@ -61,6 +68,7 @@ class InvalidVPCIdError(EC2ClientError): class InvalidSubnetIdError(EC2ClientError): + def __init__(self, subnet_id): super(InvalidSubnetIdError, self).__init__( "InvalidSubnetID.NotFound", @@ -69,6 +77,7 @@ class InvalidSubnetIdError(EC2ClientError): class InvalidNetworkAclIdError(EC2ClientError): + def __init__(self, network_acl_id): super(InvalidNetworkAclIdError, self).__init__( "InvalidNetworkAclID.NotFound", @@ -77,6 +86,7 @@ class InvalidNetworkAclIdError(EC2ClientError): class InvalidVpnGatewayIdError(EC2ClientError): + def __init__(self, network_acl_id): super(InvalidVpnGatewayIdError, self).__init__( "InvalidVpnGatewayID.NotFound", @@ -85,6 +95,7 @@ class InvalidVpnGatewayIdError(EC2ClientError): class InvalidVpnConnectionIdError(EC2ClientError): + def __init__(self, network_acl_id): super(InvalidVpnConnectionIdError, self).__init__( "InvalidVpnConnectionID.NotFound", @@ -93,6 +104,7 @@ class InvalidVpnConnectionIdError(EC2ClientError): class InvalidCustomerGatewayIdError(EC2ClientError): + def __init__(self, customer_gateway_id): super(InvalidCustomerGatewayIdError, self).__init__( "InvalidCustomerGatewayID.NotFound", @@ -101,6 +113,7 @@ class InvalidCustomerGatewayIdError(EC2ClientError): class InvalidNetworkInterfaceIdError(EC2ClientError): + def __init__(self, eni_id): super(InvalidNetworkInterfaceIdError, self).__init__( "InvalidNetworkInterfaceID.NotFound", @@ -109,6 +122,7 @@ class InvalidNetworkInterfaceIdError(EC2ClientError): class InvalidNetworkAttachmentIdError(EC2ClientError): + def __init__(self, attachment_id): super(InvalidNetworkAttachmentIdError, self).__init__( "InvalidAttachmentID.NotFound", @@ -117,6 +131,7 @@ class InvalidNetworkAttachmentIdError(EC2ClientError): class InvalidSecurityGroupDuplicateError(EC2ClientError): + def __init__(self, name): super(InvalidSecurityGroupDuplicateError, self).__init__( "InvalidGroup.Duplicate", @@ -125,6 +140,7 @@ class InvalidSecurityGroupDuplicateError(EC2ClientError): class InvalidSecurityGroupNotFoundError(EC2ClientError): + def __init__(self, name): super(InvalidSecurityGroupNotFoundError, self).__init__( "InvalidGroup.NotFound", @@ -133,6 +149,7 @@ class InvalidSecurityGroupNotFoundError(EC2ClientError): class InvalidPermissionNotFoundError(EC2ClientError): + def __init__(self): super(InvalidPermissionNotFoundError, self).__init__( "InvalidPermission.NotFound", @@ -140,6 +157,7 @@ class InvalidPermissionNotFoundError(EC2ClientError): class InvalidRouteTableIdError(EC2ClientError): + def __init__(self, route_table_id): super(InvalidRouteTableIdError, self).__init__( "InvalidRouteTableID.NotFound", @@ -148,6 +166,7 @@ class InvalidRouteTableIdError(EC2ClientError): class InvalidRouteError(EC2ClientError): + def __init__(self, route_table_id, cidr): super(InvalidRouteError, self).__init__( "InvalidRoute.NotFound", @@ -156,6 +175,7 @@ class InvalidRouteError(EC2ClientError): class InvalidInstanceIdError(EC2ClientError): + def __init__(self, instance_id): super(InvalidInstanceIdError, self).__init__( "InvalidInstanceID.NotFound", @@ -164,6 +184,7 @@ class InvalidInstanceIdError(EC2ClientError): class InvalidAMIIdError(EC2ClientError): + def __init__(self, ami_id): super(InvalidAMIIdError, self).__init__( "InvalidAMIID.NotFound", @@ -172,6 +193,7 @@ class InvalidAMIIdError(EC2ClientError): class InvalidAMIAttributeItemValueError(EC2ClientError): + def __init__(self, attribute, value): super(InvalidAMIAttributeItemValueError, self).__init__( "InvalidAMIAttributeItemValue", @@ -180,6 +202,7 @@ class InvalidAMIAttributeItemValueError(EC2ClientError): class MalformedAMIIdError(EC2ClientError): + def __init__(self, ami_id): super(MalformedAMIIdError, self).__init__( "InvalidAMIID.Malformed", @@ -188,6 +211,7 @@ class MalformedAMIIdError(EC2ClientError): class InvalidSnapshotIdError(EC2ClientError): + def __init__(self, snapshot_id): super(InvalidSnapshotIdError, self).__init__( "InvalidSnapshot.NotFound", @@ -195,6 +219,7 @@ class InvalidSnapshotIdError(EC2ClientError): class InvalidVolumeIdError(EC2ClientError): + def __init__(self, volume_id): super(InvalidVolumeIdError, self).__init__( "InvalidVolume.NotFound", @@ -203,6 +228,7 @@ class InvalidVolumeIdError(EC2ClientError): class InvalidVolumeAttachmentError(EC2ClientError): + def __init__(self, volume_id, instance_id): super(InvalidVolumeAttachmentError, self).__init__( "InvalidAttachment.NotFound", @@ -211,6 +237,7 @@ class InvalidVolumeAttachmentError(EC2ClientError): class InvalidDomainError(EC2ClientError): + def __init__(self, domain): super(InvalidDomainError, self).__init__( "InvalidParameterValue", @@ -219,6 +246,7 @@ class InvalidDomainError(EC2ClientError): class InvalidAddressError(EC2ClientError): + def __init__(self, ip): super(InvalidAddressError, self).__init__( "InvalidAddress.NotFound", @@ -227,6 +255,7 @@ class InvalidAddressError(EC2ClientError): class InvalidAllocationIdError(EC2ClientError): + def __init__(self, allocation_id): super(InvalidAllocationIdError, self).__init__( "InvalidAllocationID.NotFound", @@ -235,6 +264,7 @@ class InvalidAllocationIdError(EC2ClientError): class InvalidAssociationIdError(EC2ClientError): + def __init__(self, association_id): super(InvalidAssociationIdError, self).__init__( "InvalidAssociationID.NotFound", @@ -243,6 +273,7 @@ class InvalidAssociationIdError(EC2ClientError): class InvalidVPCPeeringConnectionIdError(EC2ClientError): + def __init__(self, vpc_peering_connection_id): super(InvalidVPCPeeringConnectionIdError, self).__init__( "InvalidVpcPeeringConnectionId.NotFound", @@ -251,6 +282,7 @@ class InvalidVPCPeeringConnectionIdError(EC2ClientError): class InvalidVPCPeeringConnectionStateTransitionError(EC2ClientError): + def __init__(self, vpc_peering_connection_id): super(InvalidVPCPeeringConnectionStateTransitionError, self).__init__( "InvalidStateTransition", @@ -259,6 +291,7 @@ class InvalidVPCPeeringConnectionStateTransitionError(EC2ClientError): class InvalidParameterValueError(EC2ClientError): + def __init__(self, parameter_value): super(InvalidParameterValueError, self).__init__( "InvalidParameterValue", @@ -267,6 +300,7 @@ class InvalidParameterValueError(EC2ClientError): class InvalidParameterValueErrorTagNull(EC2ClientError): + def __init__(self): super(InvalidParameterValueErrorTagNull, self).__init__( "InvalidParameterValue", @@ -274,6 +308,7 @@ class InvalidParameterValueErrorTagNull(EC2ClientError): class InvalidInternetGatewayIdError(EC2ClientError): + def __init__(self, internet_gateway_id): super(InvalidInternetGatewayIdError, self).__init__( "InvalidInternetGatewayID.NotFound", @@ -282,6 +317,7 @@ class InvalidInternetGatewayIdError(EC2ClientError): class GatewayNotAttachedError(EC2ClientError): + def __init__(self, internet_gateway_id, vpc_id): super(GatewayNotAttachedError, self).__init__( "Gateway.NotAttached", @@ -290,6 +326,7 @@ class GatewayNotAttachedError(EC2ClientError): class ResourceAlreadyAssociatedError(EC2ClientError): + def __init__(self, resource_id): super(ResourceAlreadyAssociatedError, self).__init__( "Resource.AlreadyAssociated", @@ -298,6 +335,7 @@ class ResourceAlreadyAssociatedError(EC2ClientError): class TagLimitExceeded(EC2ClientError): + def __init__(self): super(TagLimitExceeded, self).__init__( "TagLimitExceeded", @@ -305,6 +343,7 @@ class TagLimitExceeded(EC2ClientError): class InvalidID(EC2ClientError): + def __init__(self, resource_id): super(InvalidID, self).__init__( "InvalidID", @@ -313,6 +352,7 @@ class InvalidID(EC2ClientError): class InvalidCIDRSubnetError(EC2ClientError): + def __init__(self, cidr): super(InvalidCIDRSubnetError, self).__init__( "InvalidParameterValue", @@ -321,6 +361,7 @@ class InvalidCIDRSubnetError(EC2ClientError): class RulesPerSecurityGroupLimitExceededError(EC2ClientError): + def __init__(self): super(RulesPerSecurityGroupLimitExceededError, self).__init__( "RulesPerSecurityGroupLimitExceeded", diff --git a/moto/ec2/models.py b/moto/ec2/models.py index 30769fd7e..2e6b5e5b6 100755 --- a/moto/ec2/models.py +++ b/moto/ec2/models.py @@ -1,6 +1,5 @@ from __future__ import unicode_literals -import boto import copy import itertools import re @@ -117,20 +116,24 @@ def validate_resource_ids(resource_ids): class InstanceState(object): + def __init__(self, name='pending', code=0): self.name = name self.code = code class StateReason(object): + def __init__(self, message="", code=""): self.message = message self.code = code class TaggedEC2Resource(object): + def get_tags(self, *args, **kwargs): - tags = self.ec2_backend.describe_tags(filters={'resource-id': [self.id]}) + tags = self.ec2_backend.describe_tags( + filters={'resource-id': [self.id]}) return tags def add_tag(self, key, value): @@ -155,8 +158,9 @@ class TaggedEC2Resource(object): class NetworkInterface(TaggedEC2Resource): + def __init__(self, ec2_backend, subnet, private_ip_address, device_index=0, - public_ip_auto_assign=True, group_ids=None): + public_ip_auto_assign=True, group_ids=None): self.ec2_backend = ec2_backend self.id = random_eni_id() self.device_index = device_index @@ -181,7 +185,8 @@ class NetworkInterface(TaggedEC2Resource): group = self.ec2_backend.get_security_group_from_id(group_id) if not group: # Create with specific group ID. - group = SecurityGroup(self.ec2_backend, group_id, group_id, group_id, vpc_id=subnet.vpc_id) + group = SecurityGroup( + self.ec2_backend, group_id, group_id, group_id, vpc_id=subnet.vpc_id) self.ec2_backend.groups[subnet.vpc_id][group_id] = group if group: self._group_set.append(group) @@ -231,7 +236,8 @@ class NetworkInterface(TaggedEC2Resource): if attribute_name == 'PrimaryPrivateIpAddress': return self.private_ip_address elif attribute_name == 'SecondaryPrivateIpAddresses': - raise NotImplementedError('"Fn::GetAtt" : [ "{0}" , "SecondaryPrivateIpAddresses" ]"') + raise NotImplementedError( + '"Fn::GetAtt" : [ "{0}" , "SecondaryPrivateIpAddresses" ]"') raise UnformattedGetAttTemplateException() @property @@ -250,23 +256,27 @@ class NetworkInterface(TaggedEC2Resource): elif filter_name == 'group-id': return [group.id for group in self._group_set] - filter_value = super(NetworkInterface, self).get_filter_value(filter_name) + filter_value = super( + NetworkInterface, self).get_filter_value(filter_name) if filter_value is None: self.ec2_backend.raise_not_implemented_error( - "The filter '{0}' for DescribeNetworkInterfaces".format(filter_name) + "The filter '{0}' for DescribeNetworkInterfaces".format( + filter_name) ) return filter_value class NetworkInterfaceBackend(object): + def __init__(self): self.enis = {} super(NetworkInterfaceBackend, self).__init__() def create_network_interface(self, subnet, private_ip_address, group_ids=None, **kwargs): - eni = NetworkInterface(self, subnet, private_ip_address, group_ids=group_ids, **kwargs) + eni = NetworkInterface( + self, subnet, private_ip_address, group_ids=group_ids, **kwargs) self.enis[eni.id] = eni return eni @@ -289,7 +299,8 @@ class NetworkInterfaceBackend(object): for (_filter, _filter_value) in filters.items(): if _filter == 'network-interface-id': _filter = 'id' - enis = [eni for eni in enis if getattr(eni, _filter) in _filter_value] + enis = [eni for eni in enis if getattr( + eni, _filter) in _filter_value] elif _filter == 'group-id': original_enis = enis enis = [] @@ -299,7 +310,8 @@ class NetworkInterfaceBackend(object): enis.append(eni) break else: - self.raise_not_implemented_error("The filter '{0}' for DescribeNetworkInterfaces".format(_filter)) + self.raise_not_implemented_error( + "The filter '{0}' for DescribeNetworkInterfaces".format(_filter)) return enis def attach_network_interface(self, eni_id, instance_id, device_index): @@ -330,13 +342,15 @@ class NetworkInterfaceBackend(object): if eni_ids: enis = [eni for eni in enis if eni.id in eni_ids] if len(enis) != len(eni_ids): - invalid_id = list(set(eni_ids).difference(set([eni.id for eni in enis])))[0] + invalid_id = list(set(eni_ids).difference( + set([eni.id for eni in enis])))[0] raise InvalidNetworkInterfaceIdError(invalid_id) return generic_filter(filters, enis) class Instance(BotoInstance, TaggedEC2Resource): + def __init__(self, ec2_backend, image_id, user_data, security_groups, **kwargs): super(Instance, self).__init__() self.ec2_backend = ec2_backend @@ -367,7 +381,8 @@ class Instance(BotoInstance, TaggedEC2Resource): self.virtualization_type = ami.virtualization_type if ami else 'paravirtual' self.architecture = ami.architecture if ami else 'x86_64' - # handle weird bug around user_data -- something grabs the repr(), so it must be clean + # handle weird bug around user_data -- something grabs the repr(), so + # it must be clean if isinstance(self.user_data, list) and len(self.user_data) > 0: if six.PY3 and isinstance(self.user_data[0], six.binary_type): # string will have a "b" prefix -- need to get rid of it @@ -393,7 +408,8 @@ class Instance(BotoInstance, TaggedEC2Resource): associate_public_ip=associate_public_ip) def setup_defaults(self): - # Default have an instance with root volume should you not wish to override with attach volume cmd. + # Default have an instance with root volume should you not wish to + # override with attach volume cmd. volume = self.ec2_backend.create_volume(8, 'us-east-1a') self.ec2_backend.attach_volume(volume.id, self.id, '/dev/sda1') @@ -429,7 +445,8 @@ class Instance(BotoInstance, TaggedEC2Resource): ec2_backend = ec2_backends[region_name] security_group_ids = properties.get('SecurityGroups', []) - group_names = [ec2_backend.get_security_group_from_id(group_id).name for group_id in security_group_ids] + group_names = [ec2_backend.get_security_group_from_id( + group_id).name for group_id in security_group_ids] reservation = ec2_backend.add_instances( image_id=properties['ImageId'], @@ -464,7 +481,8 @@ class Instance(BotoInstance, TaggedEC2Resource): self._state.name = "stopped" self._state.code = 80 - self._reason = "User initiated ({0})".format(datetime.utcnow().strftime('%Y-%m-%d %H:%M:%S UTC')) + self._reason = "User initiated ({0})".format( + datetime.utcnow().strftime('%Y-%m-%d %H:%M:%S UTC')) self._state_reason = StateReason("Client.UserInitiatedShutdown: User initiated shutdown", "Client.UserInitiatedShutdown") @@ -480,7 +498,8 @@ class Instance(BotoInstance, TaggedEC2Resource): self._state.name = "terminated" self._state.code = 48 - self._reason = "User initiated ({0})".format(datetime.utcnow().strftime('%Y-%m-%d %H:%M:%S UTC')) + self._reason = "User initiated ({0})".format( + datetime.utcnow().strftime('%Y-%m-%d %H:%M:%S UTC')) self._state_reason = StateReason("Client.UserInitiatedShutdown: User initiated shutdown", "Client.UserInitiatedShutdown") @@ -514,7 +533,8 @@ class Instance(BotoInstance, TaggedEC2Resource): 'AssociatePublicIpAddress': associate_public_ip} primary_nic = dict((k, v) for k, v in primary_nic.items() if v) - # If empty NIC spec but primary NIC values provided, create NIC from them. + # If empty NIC spec but primary NIC values provided, create NIC from + # them. if primary_nic and not nic_spec: nic_spec[0] = primary_nic nic_spec[0]['DeviceIndex'] = 0 @@ -544,10 +564,12 @@ class Instance(BotoInstance, TaggedEC2Resource): group_ids = [group_id] if group_id else [] use_nic = self.ec2_backend.create_network_interface(subnet, - nic.get('PrivateIpAddress'), - device_index=device_index, - public_ip_auto_assign=nic.get('AssociatePublicIpAddress', False), - group_ids=group_ids) + nic.get( + 'PrivateIpAddress'), + device_index=device_index, + public_ip_auto_assign=nic.get( + 'AssociatePublicIpAddress', False), + group_ids=group_ids) self.attach_eni(use_nic, device_index) @@ -559,7 +581,8 @@ class Instance(BotoInstance, TaggedEC2Resource): device_index = int(device_index) self.nics[device_index] = eni - eni.instance = self # This is used upon associate/disassociate public IP. + # This is used upon associate/disassociate public IP. + eni.instance = self eni.attachment_id = random_eni_attach_id() eni.device_index = device_index @@ -639,7 +662,8 @@ class InstanceBackend(object): def terminate_instances(self, instance_ids): terminated_instances = [] if not instance_ids: - raise EC2ClientError("InvalidParameterCombination", "No instances specified") + raise EC2ClientError( + "InvalidParameterCombination", "No instances specified") for instance in self.get_multi_instances_by_id(instance_ids): instance.terminate() terminated_instances.append(instance) @@ -716,16 +740,21 @@ class InstanceBackend(object): """ reservations = [] for reservation in self.all_reservations(make_copy=True): - reservation_instance_ids = [instance.id for instance in reservation.instances] - matching_reservation = any(instance_id in reservation_instance_ids for instance_id in instance_ids) + reservation_instance_ids = [ + instance.id for instance in reservation.instances] + matching_reservation = any( + instance_id in reservation_instance_ids for instance_id in instance_ids) if matching_reservation: # We need to make a copy of the reservation because we have to modify the # instances to limit to those requested - reservation.instances = [instance for instance in reservation.instances if instance.id in instance_ids] + reservation.instances = [ + instance for instance in reservation.instances if instance.id in instance_ids] reservations.append(reservation) - found_instance_ids = [instance.id for reservation in reservations for instance in reservation.instances] + found_instance_ids = [ + instance.id for reservation in reservations for instance in reservation.instances] if len(found_instance_ids) != len(instance_ids): - invalid_id = list(set(instance_ids).difference(set(found_instance_ids)))[0] + invalid_id = list(set(instance_ids).difference( + set(found_instance_ids)))[0] raise InvalidInstanceIdError(invalid_id) if filters is not None: reservations = filter_reservations(reservations, filters) @@ -735,9 +764,11 @@ class InstanceBackend(object): if make_copy: # Return copies so that other functions can modify them with changing # the originals - reservations = [copy.deepcopy(reservation) for reservation in self.reservations.values()] + reservations = [copy.deepcopy(reservation) + for reservation in self.reservations.values()] else: - reservations = [reservation for reservation in self.reservations.values()] + reservations = [ + reservation for reservation in self.reservations.values()] if filters is not None: reservations = filter_reservations(reservations, filters) return reservations @@ -848,16 +879,19 @@ class TagBackend(object): if tag_filter in self.VALID_TAG_FILTERS: if tag_filter == 'key': for value in filters[tag_filter]: - key_filters.append(re.compile(simple_aws_filter_to_re(value))) + key_filters.append(re.compile( + simple_aws_filter_to_re(value))) if tag_filter == 'resource-id': for value in filters[tag_filter]: - resource_id_filters.append(re.compile(simple_aws_filter_to_re(value))) + resource_id_filters.append( + re.compile(simple_aws_filter_to_re(value))) if tag_filter == 'resource-type': for value in filters[tag_filter]: resource_type_filters.append(value) if tag_filter == 'value': for value in filters[tag_filter]: - value_filters.append(re.compile(simple_aws_filter_to_re(value))) + value_filters.append(re.compile( + simple_aws_filter_to_re(value))) for resource_id, tags in self.tags.items(): for key, value in tags.items(): add_result = False @@ -907,8 +941,9 @@ class TagBackend(object): class Ami(TaggedEC2Resource): + def __init__(self, ec2_backend, ami_id, instance=None, source_ami=None, - name=None, description=None): + name=None, description=None): self.ec2_backend = ec2_backend self.id = ami_id self.state = "available" @@ -948,7 +983,8 @@ class Ami(TaggedEC2Resource): # AWS auto-creates these, we should reflect the same. volume = self.ec2_backend.create_volume(15, "us-east-1a") - self.ebs_snapshot = self.ec2_backend.create_snapshot(volume.id, "Auto-created snapshot for AMI %s" % self.id) + self.ebs_snapshot = self.ec2_backend.create_snapshot( + volume.id, "Auto-created snapshot for AMI %s" % self.id) @property def is_public(self): @@ -977,12 +1013,14 @@ class Ami(TaggedEC2Resource): filter_value = super(Ami, self).get_filter_value(filter_name) if filter_value is None: - self.ec2_backend.raise_not_implemented_error("The filter '{0}' for DescribeImages".format(filter_name)) + self.ec2_backend.raise_not_implemented_error( + "The filter '{0}' for DescribeImages".format(filter_name)) return filter_value class AmiBackend(object): + def __init__(self): self.amis = {} super(AmiBackend, self).__init__() @@ -991,14 +1029,17 @@ class AmiBackend(object): # TODO: check that instance exists and pull info from it. ami_id = random_ami_id() instance = self.get_instance(instance_id) - ami = Ami(self, ami_id, instance=instance, source_ami=None, name=name, description=description) + ami = Ami(self, ami_id, instance=instance, source_ami=None, + name=name, description=description) self.amis[ami_id] = ami return ami def copy_image(self, source_image_id, source_region, name=None, description=None): - source_ami = ec2_backends[source_region].describe_images(ami_ids=[source_image_id])[0] + source_ami = ec2_backends[source_region].describe_images( + ami_ids=[source_image_id])[0] ami_id = random_ami_id() - ami = Ami(self, ami_id, instance=None, source_ami=source_ami, name=name, description=description) + ami = Ami(self, ami_id, instance=None, source_ami=source_ami, + name=name, description=description) self.amis[ami_id] = ami return ami @@ -1074,12 +1115,14 @@ class AmiBackend(object): class Region(object): + def __init__(self, name, endpoint): self.name = name self.endpoint = endpoint class Zone(object): + def __init__(self, name, region_name): self.name = name self.region_name = region_name @@ -1122,6 +1165,7 @@ class RegionsAndZonesBackend(object): class SecurityRule(object): + def __init__(self, ip_protocol, from_port, to_port, ip_ranges, source_groups): self.ip_protocol = ip_protocol self.from_port = from_port @@ -1144,6 +1188,7 @@ class SecurityRule(object): class SecurityGroup(TaggedEC2Resource): + def __init__(self, ec2_backend, group_id, name, description, vpc_id=None): self.ec2_backend = ec2_backend self.id = group_id @@ -1189,19 +1234,22 @@ class SecurityGroup(TaggedEC2Resource): @classmethod def update_from_cloudformation_json(cls, original_resource, new_resource_name, cloudformation_json, region_name): - cls._delete_security_group_given_vpc_id(original_resource.name, original_resource.vpc_id, region_name) + cls._delete_security_group_given_vpc_id( + original_resource.name, original_resource.vpc_id, region_name) return cls.create_from_cloudformation_json(new_resource_name, cloudformation_json, region_name) @classmethod def delete_from_cloudformation_json(cls, resource_name, cloudformation_json, region_name): properties = cloudformation_json['Properties'] vpc_id = properties.get('VpcId') - cls._delete_security_group_given_vpc_id(resource_name, vpc_id, region_name) + cls._delete_security_group_given_vpc_id( + resource_name, vpc_id, region_name) @classmethod def _delete_security_group_given_vpc_id(cls, resource_name, vpc_id, region_name): ec2_backend = ec2_backends[region_name] - security_group = ec2_backend.get_security_group_from_name(resource_name, vpc_id) + security_group = ec2_backend.get_security_group_from_name( + resource_name, vpc_id) if security_group: security_group.delete(region_name) @@ -1304,13 +1352,14 @@ class SecurityGroupBackend(object): return group def describe_security_groups(self, group_ids=None, groupnames=None, filters=None): - all_groups = itertools.chain(*[x.values() for x in self.groups.values()]) + all_groups = itertools.chain(*[x.values() + for x in self.groups.values()]) groups = [] if group_ids or groupnames or filters: for group in all_groups: - if ((group_ids and not group.id in group_ids) or - (groupnames and not group.name in groupnames)): + if ((group_ids and group.id not in group_ids) or + (groupnames and group.name not in groupnames)): continue if filters and not group.matches_filters(filters): continue @@ -1322,7 +1371,8 @@ class SecurityGroupBackend(object): def _delete_security_group(self, vpc_id, group_id): if self.groups[vpc_id][group_id].enis: - raise DependencyViolationError("{0} is being utilized by {1}".format(group_id, 'ENIs')) + raise DependencyViolationError( + "{0} is being utilized by {1}".format(group_id, 'ENIs')) return self.groups[vpc_id].pop(group_id) def delete_security_group(self, name=None, group_id=None): @@ -1333,7 +1383,8 @@ class SecurityGroupBackend(object): return self._delete_security_group(vpc_id, group_id) raise InvalidSecurityGroupNotFoundError(group_id) elif name: - # Group Name. Has to be in standard EC2, VPC needs to be identified by group_id + # Group Name. Has to be in standard EC2, VPC needs to be + # identified by group_id group = self.get_security_group_from_name(name) if group: return self._delete_security_group(None, group.id) @@ -1341,7 +1392,8 @@ class SecurityGroupBackend(object): def get_security_group_from_id(self, group_id): # 2 levels of chaining necessary since it's a complex structure - all_groups = itertools.chain.from_iterable([x.values() for x in self.groups.values()]) + all_groups = itertools.chain.from_iterable( + [x.values() for x in self.groups.values()]) for group in all_groups: if group.id == group_id: return group @@ -1384,7 +1436,8 @@ class SecurityGroupBackend(object): source_groups = [] for source_group_name in source_group_names: - source_group = self.get_security_group_from_name(source_group_name, vpc_id) + source_group = self.get_security_group_from_name( + source_group_name, vpc_id) if source_group: source_groups.append(source_group) @@ -1394,7 +1447,8 @@ class SecurityGroupBackend(object): if source_group: source_groups.append(source_group) - security_rule = SecurityRule(ip_protocol, from_port, to_port, ip_ranges, source_groups) + security_rule = SecurityRule( + ip_protocol, from_port, to_port, ip_ranges, source_groups) group.add_ingress_rule(security_rule) def revoke_security_group_ingress(self, @@ -1411,7 +1465,8 @@ class SecurityGroupBackend(object): source_groups = [] for source_group_name in source_group_names: - source_group = self.get_security_group_from_name(source_group_name, vpc_id) + source_group = self.get_security_group_from_name( + source_group_name, vpc_id) if source_group: source_groups.append(source_group) @@ -1420,7 +1475,8 @@ class SecurityGroupBackend(object): if source_group: source_groups.append(source_group) - security_rule = SecurityRule(ip_protocol, from_port, to_port, ip_ranges, source_groups) + security_rule = SecurityRule( + ip_protocol, from_port, to_port, ip_ranges, source_groups) if security_rule in group.ingress_rules: group.ingress_rules.remove(security_rule) return security_rule @@ -1453,7 +1509,8 @@ class SecurityGroupBackend(object): source_groups = [] for source_group_name in source_group_names: - source_group = self.get_security_group_from_name(source_group_name, vpc_id) + source_group = self.get_security_group_from_name( + source_group_name, vpc_id) if source_group: source_groups.append(source_group) @@ -1463,7 +1520,8 @@ class SecurityGroupBackend(object): if source_group: source_groups.append(source_group) - security_rule = SecurityRule(ip_protocol, from_port, to_port, ip_ranges, source_groups) + security_rule = SecurityRule( + ip_protocol, from_port, to_port, ip_ranges, source_groups) group.add_egress_rule(security_rule) def revoke_security_group_egress(self, @@ -1480,7 +1538,8 @@ class SecurityGroupBackend(object): source_groups = [] for source_group_name in source_group_names: - source_group = self.get_security_group_from_name(source_group_name, vpc_id) + source_group = self.get_security_group_from_name( + source_group_name, vpc_id) if source_group: source_groups.append(source_group) @@ -1489,7 +1548,8 @@ class SecurityGroupBackend(object): if source_group: source_groups.append(source_group) - security_rule = SecurityRule(ip_protocol, from_port, to_port, ip_ranges, source_groups) + security_rule = SecurityRule( + ip_protocol, from_port, to_port, ip_ranges, source_groups) if security_rule in group.egress_rules: group.egress_rules.remove(security_rule) return security_rule @@ -1528,7 +1588,8 @@ class SecurityGroupIngress(object): from_port = properties.get("FromPort") source_security_group_id = properties.get("SourceSecurityGroupId") source_security_group_name = properties.get("SourceSecurityGroupName") - # source_security_owner_id = properties.get("SourceSecurityGroupOwnerId") # IGNORED AT THE MOMENT + # source_security_owner_id = + # properties.get("SourceSecurityGroupOwnerId") # IGNORED AT THE MOMENT to_port = properties.get("ToPort") assert group_id or group_name @@ -1549,9 +1610,11 @@ class SecurityGroupIngress(object): ip_ranges = [] if group_id: - security_group = ec2_backend.describe_security_groups(group_ids=[group_id])[0] + security_group = ec2_backend.describe_security_groups(group_ids=[group_id])[ + 0] else: - security_group = ec2_backend.describe_security_groups(groupnames=[group_name])[0] + security_group = ec2_backend.describe_security_groups( + groupnames=[group_name])[0] ec2_backend.authorize_security_group_ingress( group_name_or_id=security_group.id, @@ -1567,6 +1630,7 @@ class SecurityGroupIngress(object): class VolumeAttachment(object): + def __init__(self, volume, instance, device, status): self.volume = volume self.attach_time = utc_date_and_time() @@ -1591,6 +1655,7 @@ class VolumeAttachment(object): class Volume(TaggedEC2Resource): + def __init__(self, ec2_backend, volume_id, size, zone, snapshot_id=None, encrypted=False): self.id = volume_id self.size = size @@ -1657,12 +1722,14 @@ class Volume(TaggedEC2Resource): filter_value = super(Volume, self).get_filter_value(filter_name) if filter_value is None: - self.ec2_backend.raise_not_implemented_error("The filter '{0}' for DescribeVolumes".format(filter_name)) + self.ec2_backend.raise_not_implemented_error( + "The filter '{0}' for DescribeVolumes".format(filter_name)) return filter_value class Snapshot(TaggedEC2Resource): + def __init__(self, ec2_backend, snapshot_id, volume, description, encrypted=False): self.id = snapshot_id self.volume = volume @@ -1696,12 +1763,14 @@ class Snapshot(TaggedEC2Resource): filter_value = super(Snapshot, self).get_filter_value(filter_name) if filter_value is None: - self.ec2_backend.raise_not_implemented_error("The filter '{0}' for DescribeSnapshots".format(filter_name)) + self.ec2_backend.raise_not_implemented_error( + "The filter '{0}' for DescribeSnapshots".format(filter_name)) return filter_value class EBSBackend(object): + def __init__(self): self.volumes = {} self.attachments = {} @@ -1745,7 +1814,8 @@ class EBSBackend(object): if not volume or not instance: return False - volume.attachment = VolumeAttachment(volume, instance, device_path, 'attached') + volume.attachment = VolumeAttachment( + volume, instance, device_path, 'attached') # Modify instance to capture mount of block device. bdt = BlockDeviceType(volume_id=volume_id, status=volume.status, size=volume.size, attach_time=utc_date_and_time()) @@ -1767,7 +1837,8 @@ class EBSBackend(object): def create_snapshot(self, volume_id, description): snapshot_id = random_snapshot_id() volume = self.get_volume(volume_id) - snapshot = Snapshot(self, snapshot_id, volume, description, volume.encrypted) + snapshot = Snapshot(self, snapshot_id, volume, + description, volume.encrypted) self.snapshots[snapshot_id] = snapshot return snapshot @@ -1794,7 +1865,8 @@ class EBSBackend(object): def add_create_volume_permission(self, snapshot_id, user_id=None, group=None): if user_id: - self.raise_not_implemented_error("The UserId parameter for ModifySnapshotAttribute") + self.raise_not_implemented_error( + "The UserId parameter for ModifySnapshotAttribute") if group != 'all': raise InvalidAMIAttributeItemValueError("UserGroup", group) @@ -1804,7 +1876,8 @@ class EBSBackend(object): def remove_create_volume_permission(self, snapshot_id, user_id=None, group=None): if user_id: - self.raise_not_implemented_error("The UserId parameter for ModifySnapshotAttribute") + self.raise_not_implemented_error( + "The UserId parameter for ModifySnapshotAttribute") if group != 'all': raise InvalidAMIAttributeItemValueError("UserGroup", group) @@ -1814,6 +1887,7 @@ class EBSBackend(object): class VPC(TaggedEC2Resource): + def __init__(self, ec2_backend, vpc_id, cidr_block, is_default, instance_tenancy='default'): self.ec2_backend = ec2_backend self.id = vpc_id @@ -1862,19 +1936,22 @@ class VPC(TaggedEC2Resource): filter_value = super(VPC, self).get_filter_value(filter_name) if filter_value is None: - self.ec2_backend.raise_not_implemented_error("The filter '{0}' for DescribeVPCs".format(filter_name)) + self.ec2_backend.raise_not_implemented_error( + "The filter '{0}' for DescribeVPCs".format(filter_name)) return filter_value class VPCBackend(object): + def __init__(self): self.vpcs = {} super(VPCBackend, self).__init__() def create_vpc(self, cidr_block, instance_tenancy='default'): vpc_id = random_vpc_id() - vpc = VPC(self, vpc_id, cidr_block, len(self.vpcs) == 0, instance_tenancy) + vpc = VPC(self, vpc_id, cidr_block, len( + self.vpcs) == 0, instance_tenancy) self.vpcs[vpc_id] = vpc # AWS creates a default main route table and security group. @@ -1885,7 +1962,8 @@ class VPCBackend(object): default = self.get_security_group_from_name('default', vpc_id=vpc_id) if not default: - self.create_security_group('default', 'default VPC security group', vpc_id=vpc_id) + self.create_security_group( + 'default', 'default VPC security group', vpc_id=vpc_id) return vpc @@ -1945,6 +2023,7 @@ class VPCBackend(object): class VPCPeeringConnectionStatus(object): + def __init__(self, code='initiating-request', message=''): self.code = code self.message = message @@ -1967,6 +2046,7 @@ class VPCPeeringConnectionStatus(object): class VPCPeeringConnection(TaggedEC2Resource): + def __init__(self, vpc_pcx_id, vpc, peer_vpc): self.id = vpc_pcx_id self.vpc = vpc @@ -1991,6 +2071,7 @@ class VPCPeeringConnection(TaggedEC2Resource): class VPCPeeringConnectionBackend(object): + def __init__(self): self.vpc_pcxs = {} super(VPCPeeringConnectionBackend, self).__init__() @@ -2032,6 +2113,7 @@ class VPCPeeringConnectionBackend(object): class Subnet(TaggedEC2Resource): + def __init__(self, ec2_backend, subnet_id, vpc_id, cidr_block, availability_zone, default_for_az, map_public_ip_on_launch): self.ec2_backend = ec2_backend @@ -2101,18 +2183,21 @@ class Subnet(TaggedEC2Resource): filter_value = super(Subnet, self).get_filter_value(filter_name) if filter_value is None: - self.ec2_backend.raise_not_implemented_error("The filter '{0}' for DescribeSubnets".format(filter_name)) + self.ec2_backend.raise_not_implemented_error( + "The filter '{0}' for DescribeSubnets".format(filter_name)) return filter_value def get_cfn_attribute(self, attribute_name): from moto.cloudformation.exceptions import UnformattedGetAttTemplateException if attribute_name == 'AvailabilityZone': - raise NotImplementedError('"Fn::GetAtt" : [ "{0}" , "AvailabilityZone" ]"') + raise NotImplementedError( + '"Fn::GetAtt" : [ "{0}" , "AvailabilityZone" ]"') raise UnformattedGetAttTemplateException() class SubnetBackend(object): + def __init__(self): # maps availability zone to dict of (subnet_id, subnet) self.subnets = defaultdict(dict) @@ -2126,7 +2211,7 @@ class SubnetBackend(object): def create_subnet(self, vpc_id, cidr_block, availability_zone): subnet_id = random_subnet_id() - vpc = self.get_vpc(vpc_id) # Validate VPC exists + self.get_vpc(vpc_id) # Validate VPC exists # if this is the first subnet for an availability zone, # consider it the default @@ -2166,6 +2251,7 @@ class SubnetBackend(object): class SubnetRouteTableAssociation(object): + def __init__(self, route_table_id, subnet_id): self.route_table_id = route_table_id self.subnet_id = subnet_id @@ -2186,17 +2272,21 @@ class SubnetRouteTableAssociation(object): class SubnetRouteTableAssociationBackend(object): + def __init__(self): self.subnet_associations = {} super(SubnetRouteTableAssociationBackend, self).__init__() def create_subnet_association(self, route_table_id, subnet_id): - subnet_association = SubnetRouteTableAssociation(route_table_id, subnet_id) - self.subnet_associations["{0}:{1}".format(route_table_id, subnet_id)] = subnet_association + subnet_association = SubnetRouteTableAssociation( + route_table_id, subnet_id) + self.subnet_associations["{0}:{1}".format( + route_table_id, subnet_id)] = subnet_association return subnet_association class RouteTable(TaggedEC2Resource): + def __init__(self, ec2_backend, route_table_id, vpc_id, main=False): self.ec2_backend = ec2_backend self.id = route_table_id @@ -2242,12 +2332,14 @@ class RouteTable(TaggedEC2Resource): filter_value = super(RouteTable, self).get_filter_value(filter_name) if filter_value is None: - self.ec2_backend.raise_not_implemented_error("The filter '{0}' for DescribeRouteTables".format(filter_name)) + self.ec2_backend.raise_not_implemented_error( + "The filter '{0}' for DescribeRouteTables".format(filter_name)) return filter_value class RouteTableBackend(object): + def __init__(self): self.route_tables = {} super(RouteTableBackend, self).__init__() @@ -2273,9 +2365,11 @@ class RouteTableBackend(object): route_tables = self.route_tables.values() if route_table_ids: - route_tables = [route_table for route_table in route_tables if route_table.id in route_table_ids] + route_tables = [ + route_table for route_table in route_tables if route_table.id in route_table_ids] if len(route_tables) != len(route_table_ids): - invalid_id = list(set(route_table_ids).difference(set([route_table.id for route_table in route_tables])))[0] + invalid_id = list(set(route_table_ids).difference( + set([route_table.id for route_table in route_tables])))[0] raise InvalidRouteTableIdError(invalid_id) return generic_filter(filters, route_tables) @@ -2292,7 +2386,8 @@ class RouteTableBackend(object): def associate_route_table(self, route_table_id, subnet_id): # Idempotent if association already exists. - route_tables_by_subnet = self.get_all_route_tables(filters={'association.subnet-id': [subnet_id]}) + route_tables_by_subnet = self.get_all_route_tables( + filters={'association.subnet-id': [subnet_id]}) if route_tables_by_subnet: for association_id, check_subnet_id in route_tables_by_subnet[0].associations.items(): if subnet_id == check_subnet_id: @@ -2318,7 +2413,8 @@ class RouteTableBackend(object): return association_id # Find route table which currently has the association, error if none. - route_tables_by_association_id = self.get_all_route_tables(filters={'association.route-table-association-id': [association_id]}) + route_tables_by_association_id = self.get_all_route_tables( + filters={'association.route-table-association-id': [association_id]}) if not route_tables_by_association_id: raise InvalidAssociationIdError(association_id) @@ -2329,6 +2425,7 @@ class RouteTableBackend(object): class Route(object): + def __init__(self, route_table, destination_cidr_block, local=False, gateway=None, instance=None, interface=None, vpc_pcx=None): self.id = generate_route_id(route_table.id, destination_cidr_block) @@ -2363,6 +2460,7 @@ class Route(object): class RouteBackend(object): + def __init__(self): super(RouteBackend, self).__init__() @@ -2372,7 +2470,8 @@ class RouteBackend(object): route_table = self.get_route_table(route_table_id) if interface_id: - self.raise_not_implemented_error("CreateRoute to NetworkInterfaceId") + self.raise_not_implemented_error( + "CreateRoute to NetworkInterfaceId") gateway = None if gateway_id: @@ -2383,21 +2482,23 @@ class RouteBackend(object): route = Route(route_table, destination_cidr_block, local=local, gateway=gateway, - instance=self.get_instance(instance_id) if instance_id else None, + instance=self.get_instance( + instance_id) if instance_id else None, interface=None, vpc_pcx=self.get_vpc_peering_connection(vpc_peering_connection_id) if vpc_peering_connection_id else None) route_table.routes[route.id] = route return route def replace_route(self, route_table_id, destination_cidr_block, - gateway_id=None, instance_id=None, interface_id=None, - vpc_peering_connection_id=None): + gateway_id=None, instance_id=None, interface_id=None, + vpc_peering_connection_id=None): route_table = self.get_route_table(route_table_id) route_id = generate_route_id(route_table.id, destination_cidr_block) route = route_table.routes[route_id] if interface_id: - self.raise_not_implemented_error("ReplaceRoute to NetworkInterfaceId") + self.raise_not_implemented_error( + "ReplaceRoute to NetworkInterfaceId") route.gateway = None if gateway_id: @@ -2406,9 +2507,11 @@ class RouteBackend(object): elif EC2_RESOURCE_TO_PREFIX['internet-gateway'] in gateway_id: route.gateway = self.get_internet_gateway(gateway_id) - route.instance = self.get_instance(instance_id) if instance_id else None + route.instance = self.get_instance( + instance_id) if instance_id else None route.interface = None - route.vpc_pcx = self.get_vpc_peering_connection(vpc_peering_connection_id) if vpc_peering_connection_id else None + route.vpc_pcx = self.get_vpc_peering_connection( + vpc_peering_connection_id) if vpc_peering_connection_id else None route_table.routes[route.id] = route return route @@ -2428,6 +2531,7 @@ class RouteBackend(object): class InternetGateway(TaggedEC2Resource): + def __init__(self, ec2_backend): self.ec2_backend = ec2_backend self.id = random_internet_gateway_id() @@ -2451,6 +2555,7 @@ class InternetGateway(TaggedEC2Resource): class InternetGatewayBackend(object): + def __init__(self): self.internet_gateways = {} super(InternetGatewayBackend, self).__init__() @@ -2505,6 +2610,7 @@ class InternetGatewayBackend(object): class VPCGatewayAttachment(object): + def __init__(self, gateway_id, vpc_id): self.gateway_id = gateway_id self.vpc_id = vpc_id @@ -2518,7 +2624,8 @@ class VPCGatewayAttachment(object): gateway_id=properties['InternetGatewayId'], vpc_id=properties['VpcId'], ) - ec2_backend.attach_internet_gateway(properties['InternetGatewayId'], properties['VpcId']) + ec2_backend.attach_internet_gateway( + properties['InternetGatewayId'], properties['VpcId']) return attachment @property @@ -2527,6 +2634,7 @@ class VPCGatewayAttachment(object): class VPCGatewayAttachmentBackend(object): + def __init__(self): self.gateway_attachments = {} super(VPCGatewayAttachmentBackend, self).__init__() @@ -2538,6 +2646,7 @@ class VPCGatewayAttachmentBackend(object): class SpotInstanceRequest(BotoSpotRequest, TaggedEC2Resource): + def __init__(self, ec2_backend, spot_request_id, price, image_id, type, valid_from, valid_until, launch_group, availability_zone_group, key_name, security_groups, user_data, instance_type, placement, @@ -2567,12 +2676,14 @@ class SpotInstanceRequest(BotoSpotRequest, TaggedEC2Resource): if security_groups: for group_name in security_groups: - group = self.ec2_backend.get_security_group_from_name(group_name) + group = self.ec2_backend.get_security_group_from_name( + group_name) if group: ls.groups.append(group) else: # If not security groups, add the default - default_group = self.ec2_backend.get_security_group_from_name("default") + default_group = self.ec2_backend.get_security_group_from_name( + "default") ls.groups.append(default_group) self.instance = self.launch_instance() @@ -2582,10 +2693,12 @@ class SpotInstanceRequest(BotoSpotRequest, TaggedEC2Resource): return self.state if filter_name == 'spot-instance-request-id': return self.id - filter_value = super(SpotInstanceRequest, self).get_filter_value(filter_name) + filter_value = super(SpotInstanceRequest, + self).get_filter_value(filter_name) if filter_value is None: - self.ec2_backend.raise_not_implemented_error("The filter '{0}' for DescribeSpotInstanceRequests".format(filter_name)) + self.ec2_backend.raise_not_implemented_error( + "The filter '{0}' for DescribeSpotInstanceRequests".format(filter_name)) return filter_value @@ -2604,6 +2717,7 @@ class SpotInstanceRequest(BotoSpotRequest, TaggedEC2Resource): @six.add_metaclass(Model) class SpotRequestBackend(object): + def __init__(self): self.spot_instance_requests = {} super(SpotRequestBackend, self).__init__() @@ -2617,10 +2731,10 @@ class SpotRequestBackend(object): for _ in range(count): spot_request_id = random_spot_request_id() request = SpotInstanceRequest(self, - spot_request_id, price, image_id, type, valid_from, valid_until, - launch_group, availability_zone_group, key_name, security_groups, - user_data, instance_type, placement, kernel_id, ramdisk_id, - monitoring_enabled, subnet_id) + spot_request_id, price, image_id, type, valid_from, valid_until, + launch_group, availability_zone_group, key_name, security_groups, + user_data, instance_type, placement, kernel_id, ramdisk_id, + monitoring_enabled, subnet_id) self.spot_instance_requests[spot_request_id] = request requests.append(request) return requests @@ -2639,9 +2753,10 @@ class SpotRequestBackend(object): class SpotFleetLaunchSpec(object): + def __init__(self, ebs_optimized, group_set, iam_instance_profile, image_id, - instance_type, key_name, monitoring, spot_price, subnet_id, user_data, - weighted_capacity): + instance_type, key_name, monitoring, spot_price, subnet_id, user_data, + weighted_capacity): self.ebs_optimized = ebs_optimized self.group_set = group_set self.iam_instance_profile = iam_instance_profile @@ -2658,7 +2773,7 @@ class SpotFleetLaunchSpec(object): class SpotFleetRequest(TaggedEC2Resource): def __init__(self, ec2_backend, spot_fleet_request_id, spot_price, - target_capacity, iam_fleet_role, allocation_strategy, launch_specs): + target_capacity, iam_fleet_role, allocation_strategy, launch_specs): self.ec2_backend = ec2_backend self.id = spot_fleet_request_id @@ -2672,18 +2787,19 @@ class SpotFleetRequest(TaggedEC2Resource): self.launch_specs = [] for spec in launch_specs: self.launch_specs.append(SpotFleetLaunchSpec( - ebs_optimized=spec['ebs_optimized'], - group_set=[val for key, val in spec.items() if key.startswith("group_set")], - iam_instance_profile=spec.get('iam_instance_profile._arn'), - image_id=spec['image_id'], - instance_type=spec['instance_type'], - key_name=spec.get('key_name'), - monitoring=spec.get('monitoring._enabled'), - spot_price=spec.get('spot_price', self.spot_price), - subnet_id=spec['subnet_id'], - user_data=spec.get('user_data'), - weighted_capacity=spec['weighted_capacity'], - ) + ebs_optimized=spec['ebs_optimized'], + group_set=[val for key, val in spec.items( + ) if key.startswith("group_set")], + iam_instance_profile=spec.get('iam_instance_profile._arn'), + image_id=spec['image_id'], + instance_type=spec['instance_type'], + key_name=spec.get('key_name'), + monitoring=spec.get('monitoring._enabled'), + spot_price=spec.get('spot_price', self.spot_price), + subnet_id=spec['subnet_id'], + user_data=spec.get('user_data'), + weighted_capacity=spec['weighted_capacity'], + ) ) self.spot_requests = [] @@ -2695,7 +2811,8 @@ class SpotFleetRequest(TaggedEC2Resource): @classmethod def create_from_cloudformation_json(cls, resource_name, cloudformation_json, region_name): - properties = cloudformation_json['Properties']['SpotFleetRequestConfigData'] + properties = cloudformation_json[ + 'Properties']['SpotFleetRequestConfigData'] ec2_backend = ec2_backends[region_name] spot_price = properties['SpotPrice'] @@ -2704,17 +2821,17 @@ class SpotFleetRequest(TaggedEC2Resource): allocation_strategy = properties['AllocationStrategy'] launch_specs = properties["LaunchSpecifications"] launch_specs = [ - dict([(camelcase_to_underscores(key), val) for key, val in launch_spec.items()]) + dict([(camelcase_to_underscores(key), val) + for key, val in launch_spec.items()]) for launch_spec in launch_specs ] spot_fleet_request = ec2_backend.request_spot_fleet(spot_price, - target_capacity, iam_fleet_role, allocation_strategy, launch_specs) + target_capacity, iam_fleet_role, allocation_strategy, launch_specs) return spot_fleet_request - def get_launch_spec_counts(self): weight_map = defaultdict(int) @@ -2722,39 +2839,42 @@ class SpotFleetRequest(TaggedEC2Resource): weight_so_far = 0 launch_spec_index = 0 while True: - launch_spec = self.launch_specs[launch_spec_index % len(self.launch_specs)] + launch_spec = self.launch_specs[ + launch_spec_index % len(self.launch_specs)] weight_map[launch_spec] += 1 weight_so_far += launch_spec.weighted_capacity if weight_so_far >= self.target_capacity: break launch_spec_index += 1 else: # lowestPrice - cheapest_spec = sorted(self.launch_specs, key=lambda spec: float(spec.spot_price))[0] + cheapest_spec = sorted( + self.launch_specs, key=lambda spec: float(spec.spot_price))[0] extra = 1 if self.target_capacity % cheapest_spec.weighted_capacity else 0 - weight_map[cheapest_spec] = int(self.target_capacity // cheapest_spec.weighted_capacity) + extra + weight_map[cheapest_spec] = int( + self.target_capacity // cheapest_spec.weighted_capacity) + extra return weight_map.items() def create_spot_requests(self): for launch_spec, count in self.get_launch_spec_counts(): requests = self.ec2_backend.request_spot_instances( - price=launch_spec.spot_price, - image_id=launch_spec.image_id, - count=count, - type="persistent", - valid_from=None, - valid_until=None, - launch_group=None, - availability_zone_group=None, - key_name=launch_spec.key_name, - security_groups=launch_spec.group_set, - user_data=launch_spec.user_data, - instance_type=launch_spec.instance_type, - placement=None, - kernel_id=None, - ramdisk_id=None, - monitoring_enabled=launch_spec.monitoring, - subnet_id=launch_spec.subnet_id, + price=launch_spec.spot_price, + image_id=launch_spec.image_id, + count=count, + type="persistent", + valid_from=None, + valid_until=None, + launch_group=None, + availability_zone_group=None, + key_name=launch_spec.key_name, + security_groups=launch_spec.group_set, + user_data=launch_spec.user_data, + instance_type=launch_spec.instance_type, + placement=None, + kernel_id=None, + ramdisk_id=None, + monitoring_enabled=launch_spec.monitoring, + subnet_id=launch_spec.subnet_id, ) self.spot_requests.extend(requests) return self.spot_requests @@ -2764,16 +2884,17 @@ class SpotFleetRequest(TaggedEC2Resource): class SpotFleetBackend(object): + def __init__(self): self.spot_fleet_requests = {} super(SpotFleetBackend, self).__init__() def request_spot_fleet(self, spot_price, target_capacity, iam_fleet_role, - allocation_strategy, launch_specs): + allocation_strategy, launch_specs): spot_fleet_request_id = random_spot_fleet_request_id() request = SpotFleetRequest(self, spot_fleet_request_id, spot_price, - target_capacity, iam_fleet_role, allocation_strategy, launch_specs) + target_capacity, iam_fleet_role, allocation_strategy, launch_specs) self.spot_fleet_requests[spot_fleet_request_id] = request return request @@ -2788,7 +2909,8 @@ class SpotFleetBackend(object): requests = self.spot_fleet_requests.values() if spot_fleet_request_ids: - requests = [request for request in requests if request.id in spot_fleet_request_ids] + requests = [ + request for request in requests if request.id in spot_fleet_request_ids] return requests @@ -2803,6 +2925,7 @@ class SpotFleetBackend(object): class ElasticAddress(object): + def __init__(self, domain): self.public_ip = random_ip() self.allocation_id = random_eip_allocation_id() if domain == "vpc" else None @@ -2894,8 +3017,10 @@ class ElasticAddressBackend(object): eips = self.address_by_allocation([allocation_id]) eip = eips[0] - new_instance_association = bool(instance and (not eip.instance or eip.instance.id == instance.id)) - new_eni_association = bool(eni and (not eip.eni or eni.id == eip.eni.id)) + new_instance_association = bool(instance and ( + not eip.instance or eip.instance.id == instance.id)) + new_eni_association = bool( + eni and (not eip.eni or eni.id == eip.eni.id)) if new_instance_association or new_eni_association or reassociate: eip.instance = instance @@ -2948,6 +3073,7 @@ class ElasticAddressBackend(object): class DHCPOptionsSet(TaggedEC2Resource): + def __init__(self, ec2_backend, domain_name_servers=None, domain_name=None, ntp_servers=None, netbios_name_servers=None, netbios_node_type=None): @@ -2983,10 +3109,12 @@ class DHCPOptionsSet(TaggedEC2Resource): values = [item for item in list(self._options.values()) if item] return itertools.chain(*values) - filter_value = super(DHCPOptionsSet, self).get_filter_value(filter_name) + filter_value = super( + DHCPOptionsSet, self).get_filter_value(filter_name) if filter_value is None: - self.ec2_backend.raise_not_implemented_error("The filter '{0}' for DescribeDhcpOptions".format(filter_name)) + self.ec2_backend.raise_not_implemented_error( + "The filter '{0}' for DescribeDhcpOptions".format(filter_name)) return filter_value @@ -2996,6 +3124,7 @@ class DHCPOptionsSet(TaggedEC2Resource): class DHCPOptionsSetBackend(object): + def __init__(self): self.dhcp_options_sets = {} super(DHCPOptionsSetBackend, self).__init__() @@ -3040,7 +3169,8 @@ class DHCPOptionsSetBackend(object): if options_id in self.dhcp_options_sets: if self.dhcp_options_sets[options_id].vpc: - raise DependencyViolationError("Cannot delete assigned DHCP options.") + raise DependencyViolationError( + "Cannot delete assigned DHCP options.") self.dhcp_options_sets.pop(options_id) else: raise InvalidDHCPOptionsIdError(options_id) @@ -3050,15 +3180,18 @@ class DHCPOptionsSetBackend(object): dhcp_options_sets = self.dhcp_options_sets.values() if dhcp_options_ids: - dhcp_options_sets = [dhcp_options_set for dhcp_options_set in dhcp_options_sets if dhcp_options_set.id in dhcp_options_ids] + dhcp_options_sets = [ + dhcp_options_set for dhcp_options_set in dhcp_options_sets if dhcp_options_set.id in dhcp_options_ids] if len(dhcp_options_sets) != len(dhcp_options_ids): - invalid_id = list(set(dhcp_options_ids).difference(set([dhcp_options_set.id for dhcp_options_set in dhcp_options_sets])))[0] + invalid_id = list(set(dhcp_options_ids).difference( + set([dhcp_options_set.id for dhcp_options_set in dhcp_options_sets])))[0] raise InvalidDHCPOptionsIdError(invalid_id) return generic_filter(filters, dhcp_options_sets) class VPNConnection(TaggedEC2Resource): + def __init__(self, ec2_backend, id, type, customer_gateway_id, vpn_gateway_id): self.ec2_backend = ec2_backend @@ -3074,6 +3207,7 @@ class VPNConnection(TaggedEC2Resource): class VPNConnectionBackend(object): + def __init__(self): self.vpn_connections = {} super(VPNConnectionBackend, self).__init__() @@ -3116,13 +3250,15 @@ class VPNConnectionBackend(object): vpn_connections = [vpn_connection for vpn_connection in vpn_connections if vpn_connection.id in vpn_connection_ids] if len(vpn_connections) != len(vpn_connection_ids): - invalid_id = list(set(vpn_connection_ids).difference(set([vpn_connection.id for vpn_connection in vpn_connections])))[0] + invalid_id = list(set(vpn_connection_ids).difference( + set([vpn_connection.id for vpn_connection in vpn_connections])))[0] raise InvalidVpnConnectionIdError(invalid_id) return generic_filter(filters, vpn_connections) class NetworkAclBackend(object): + def __init__(self): self.network_acls = {} super(NetworkAclBackend, self).__init__() @@ -3147,7 +3283,8 @@ class NetworkAclBackend(object): network_acls = [network_acl for network_acl in network_acls if network_acl.id in network_acl_ids] if len(network_acls) != len(network_acl_ids): - invalid_id = list(set(network_acl_ids).difference(set([network_acl.id for network_acl in network_acls])))[0] + invalid_id = list(set(network_acl_ids).difference( + set([network_acl.id for network_acl in network_acls])))[0] raise InvalidRouteTableIdError(invalid_id) return generic_filter(filters, network_acls) @@ -3177,7 +3314,7 @@ class NetworkAclBackend(object): # lookup existing association for subnet and delete it default_acl = next(value for key, value in self.network_acls.items() - if association_id in value.associations.keys()) + if association_id in value.associations.keys()) subnet_id = None for key, value in default_acl.associations.items(): @@ -3203,6 +3340,7 @@ class NetworkAclBackend(object): class NetworkAclAssociation(object): + def __init__(self, ec2_backend, new_association_id, subnet_id, network_acl_id): self.ec2_backend = ec2_backend @@ -3214,6 +3352,7 @@ class NetworkAclAssociation(object): class NetworkAcl(TaggedEC2Resource): + def __init__(self, ec2_backend, network_acl_id, vpc_id, default=False): self.ec2_backend = ec2_backend self.id = network_acl_id @@ -3235,12 +3374,14 @@ class NetworkAcl(TaggedEC2Resource): filter_value = super(NetworkAcl, self).get_filter_value(filter_name) if filter_value is None: - self.ec2_backend.raise_not_implemented_error("The filter '{0}' for DescribeNetworkAcls".format(filter_name)) + self.ec2_backend.raise_not_implemented_error( + "The filter '{0}' for DescribeNetworkAcls".format(filter_name)) return filter_value class NetworkAclEntry(TaggedEC2Resource): + def __init__(self, ec2_backend, network_acl_id, rule_number, protocol, rule_action, egress, cidr_block, icmp_code, icmp_type, port_range_from, @@ -3259,6 +3400,7 @@ class NetworkAclEntry(TaggedEC2Resource): class VpnGateway(TaggedEC2Resource): + def __init__(self, ec2_backend, id, type): self.ec2_backend = ec2_backend self.id = id @@ -3268,6 +3410,7 @@ class VpnGateway(TaggedEC2Resource): class VpnGatewayAttachment(object): + def __init__(self, vpc_id, state): self.vpc_id = vpc_id self.state = state @@ -3275,6 +3418,7 @@ class VpnGatewayAttachment(object): class VpnGatewayBackend(object): + def __init__(self): self.vpn_gateways = {} super(VpnGatewayBackend, self).__init__() @@ -3318,6 +3462,7 @@ class VpnGatewayBackend(object): class CustomerGateway(TaggedEC2Resource): + def __init__(self, ec2_backend, id, type, ip_address, bgp_asn): self.ec2_backend = ec2_backend self.id = id @@ -3329,13 +3474,15 @@ class CustomerGateway(TaggedEC2Resource): class CustomerGatewayBackend(object): + def __init__(self): self.customer_gateways = {} super(CustomerGatewayBackend, self).__init__() def create_customer_gateway(self, type='ipsec.1', ip_address=None, bgp_asn=None): customer_gateway_id = random_customer_gateway_id() - customer_gateway = CustomerGateway(self, customer_gateway_id, type, ip_address, bgp_asn) + customer_gateway = CustomerGateway( + self, customer_gateway_id, type, ip_address, bgp_asn) self.customer_gateways[customer_gateway_id] = customer_gateway return customer_gateway @@ -3344,7 +3491,8 @@ class CustomerGatewayBackend(object): return generic_filter(filters, customer_gateways) def get_customer_gateway(self, customer_gateway_id): - customer_gateway = self.customer_gateways.get(customer_gateway_id, None) + customer_gateway = self.customer_gateways.get( + customer_gateway_id, None) if not customer_gateway: raise InvalidCustomerGatewayIdError(customer_gateway_id) return customer_gateway @@ -3370,10 +3518,12 @@ class NatGateway(object): self._created_at = datetime.utcnow() self._backend = backend # NOTE: this is the core of NAT Gateways creation - self._eni = self._backend.create_network_interface(backend.get_subnet(self.subnet_id), self.private_ip) + self._eni = self._backend.create_network_interface( + backend.get_subnet(self.subnet_id), self.private_ip) # associate allocation with ENI - self._backend.associate_address(eni=self._eni, allocation_id=self.allocation_id) + self._backend.associate_address( + eni=self._eni, allocation_id=self.allocation_id) @property def vpc_id(self): @@ -3427,7 +3577,7 @@ class EC2Backend(BaseBackend, InstanceBackend, TagBackend, AmiBackend, VPCPeeringConnectionBackend, RouteTableBackend, RouteBackend, InternetGatewayBackend, VPCGatewayAttachmentBackend, SpotFleetBackend, - SpotRequestBackend,ElasticAddressBackend, KeyPairBackend, + SpotRequestBackend, ElasticAddressBackend, KeyPairBackend, DHCPOptionsSetBackend, NetworkAclBackend, VpnGatewayBackend, CustomerGatewayBackend, NatGatewayBackend): @@ -3463,7 +3613,8 @@ class EC2Backend(BaseBackend, InstanceBackend, TagBackend, AmiBackend, self.__dict__ = {} self.__init__(region_name) - # Use this to generate a proper error template response when in a response handler. + # Use this to generate a proper error template response when in a response + # handler. def raise_error(self, code, message): raise EC2ClientError(code, message) @@ -3485,11 +3636,13 @@ class EC2Backend(BaseBackend, InstanceBackend, TagBackend, AmiBackend, elif resource_prefix == EC2_RESOURCE_TO_PREFIX['instance']: self.get_instance_by_id(instance_id=resource_id) elif resource_prefix == EC2_RESOURCE_TO_PREFIX['internet-gateway']: - self.describe_internet_gateways(internet_gateway_ids=[resource_id]) + self.describe_internet_gateways( + internet_gateway_ids=[resource_id]) elif resource_prefix == EC2_RESOURCE_TO_PREFIX['network-acl']: self.get_all_network_acls() elif resource_prefix == EC2_RESOURCE_TO_PREFIX['network-interface']: - self.describe_network_interfaces(filters={'network-interface-id': resource_id}) + self.describe_network_interfaces( + filters={'network-interface-id': resource_id}) elif resource_prefix == EC2_RESOURCE_TO_PREFIX['reserved-instance']: self.raise_not_implemented_error('DescribeReservedInstances') elif resource_prefix == EC2_RESOURCE_TO_PREFIX['route-table']: @@ -3499,7 +3652,8 @@ class EC2Backend(BaseBackend, InstanceBackend, TagBackend, AmiBackend, elif resource_prefix == EC2_RESOURCE_TO_PREFIX['snapshot']: self.get_snapshot(snapshot_id=resource_id) elif resource_prefix == EC2_RESOURCE_TO_PREFIX['spot-instance-request']: - self.describe_spot_instance_requests(filters={'spot-instance-request-id': resource_id}) + self.describe_spot_instance_requests( + filters={'spot-instance-request-id': resource_id}) elif resource_prefix == EC2_RESOURCE_TO_PREFIX['subnet']: self.get_subnet(subnet_id=resource_id) elif resource_prefix == EC2_RESOURCE_TO_PREFIX['volume']: @@ -3514,6 +3668,7 @@ class EC2Backend(BaseBackend, InstanceBackend, TagBackend, AmiBackend, self.get_vpn_gateway(vpn_gateway_id=resource_id) return True + ec2_backends = {} for region in RegionsAndZonesBackend.regions: ec2_backends[region.name] = EC2Backend(region.name) diff --git a/moto/ec2/responses/__init__.py b/moto/ec2/responses/__init__.py index 2049998ad..449d25a45 100644 --- a/moto/ec2/responses/__init__.py +++ b/moto/ec2/responses/__init__.py @@ -66,6 +66,7 @@ class EC2Response( Windows, NatGateways, ): + @property def ec2_backend(self): from moto.ec2.models import ec2_backends diff --git a/moto/ec2/responses/amazon_dev_pay.py b/moto/ec2/responses/amazon_dev_pay.py index af10a8d68..14df3f004 100644 --- a/moto/ec2/responses/amazon_dev_pay.py +++ b/moto/ec2/responses/amazon_dev_pay.py @@ -3,5 +3,7 @@ from moto.core.responses import BaseResponse class AmazonDevPay(BaseResponse): + def confirm_product_instance(self): - raise NotImplementedError('AmazonDevPay.confirm_product_instance is not yet implemented') + raise NotImplementedError( + 'AmazonDevPay.confirm_product_instance is not yet implemented') diff --git a/moto/ec2/responses/amis.py b/moto/ec2/responses/amis.py index b60452a3f..42bfba209 100755 --- a/moto/ec2/responses/amis.py +++ b/moto/ec2/responses/amis.py @@ -5,6 +5,7 @@ from moto.ec2.utils import instance_ids_from_querystring, image_ids_from_queryst class AmisResponse(BaseResponse): + def create_image(self): name = self.querystring.get('Name')[0] if "Description" in self.querystring: @@ -14,17 +15,21 @@ class AmisResponse(BaseResponse): instance_ids = instance_ids_from_querystring(self.querystring) instance_id = instance_ids[0] if self.is_not_dryrun('CreateImage'): - image = self.ec2_backend.create_image(instance_id, name, description) + image = self.ec2_backend.create_image( + instance_id, name, description) template = self.response_template(CREATE_IMAGE_RESPONSE) return template.render(image=image) def copy_image(self): source_image_id = self.querystring.get('SourceImageId')[0] source_region = self.querystring.get('SourceRegion')[0] - name = self.querystring.get('Name')[0] if self.querystring.get('Name') else None - description = self.querystring.get('Description')[0] if self.querystring.get('Description') else None + name = self.querystring.get( + 'Name')[0] if self.querystring.get('Name') else None + description = self.querystring.get( + 'Description')[0] if self.querystring.get('Description') else None if self.is_not_dryrun('CopyImage'): - image = self.ec2_backend.copy_image(source_image_id, source_region, name, description) + image = self.ec2_backend.copy_image( + source_image_id, source_region, name, description) template = self.response_template(COPY_IMAGE_RESPONSE) return template.render(image=image) @@ -38,7 +43,8 @@ class AmisResponse(BaseResponse): def describe_images(self): ami_ids = image_ids_from_querystring(self.querystring) filters = filters_from_querystring(self.querystring) - images = self.ec2_backend.describe_images(ami_ids=ami_ids, filters=filters) + images = self.ec2_backend.describe_images( + ami_ids=ami_ids, filters=filters) template = self.response_template(DESCRIBE_IMAGES_RESPONSE) return template.render(images=images) @@ -56,18 +62,22 @@ class AmisResponse(BaseResponse): user_ids = sequence_from_querystring('UserId', self.querystring) if self.is_not_dryrun('ModifyImageAttribute'): if (operation_type == 'add'): - self.ec2_backend.add_launch_permission(ami_id, user_ids=user_ids, group=group) + self.ec2_backend.add_launch_permission( + ami_id, user_ids=user_ids, group=group) elif (operation_type == 'remove'): - self.ec2_backend.remove_launch_permission(ami_id, user_ids=user_ids, group=group) + self.ec2_backend.remove_launch_permission( + ami_id, user_ids=user_ids, group=group) return MODIFY_IMAGE_ATTRIBUTE_RESPONSE def register_image(self): if self.is_not_dryrun('RegisterImage'): - raise NotImplementedError('AMIs.register_image is not yet implemented') + raise NotImplementedError( + 'AMIs.register_image is not yet implemented') def reset_image_attribute(self): if self.is_not_dryrun('ResetImageAttribute'): - raise NotImplementedError('AMIs.reset_image_attribute is not yet implemented') + raise NotImplementedError( + 'AMIs.reset_image_attribute is not yet implemented') CREATE_IMAGE_RESPONSE = """ @@ -80,7 +90,8 @@ COPY_IMAGE_RESPONSE = """ 59dbff89-35bd-4eac-99ed-be587EXAMPLE diff --git a/moto/ec2/responses/availability_zones_and_regions.py b/moto/ec2/responses/availability_zones_and_regions.py index 50869e934..3d0a5ab05 100644 --- a/moto/ec2/responses/availability_zones_and_regions.py +++ b/moto/ec2/responses/availability_zones_and_regions.py @@ -3,6 +3,7 @@ from moto.core.responses import BaseResponse class AvailabilityZonesAndRegions(BaseResponse): + def describe_availability_zones(self): zones = self.ec2_backend.describe_availability_zones() template = self.response_template(DESCRIBE_ZONES_RESPONSE) @@ -13,6 +14,7 @@ class AvailabilityZonesAndRegions(BaseResponse): template = self.response_template(DESCRIBE_REGIONS_RESPONSE) return template.render(regions=regions) + DESCRIBE_REGIONS_RESPONSE = """ 59dbff89-35bd-4eac-99ed-be587EXAMPLE diff --git a/moto/ec2/responses/customer_gateways.py b/moto/ec2/responses/customer_gateways.py index 85f50fbcd..6da2ed2f8 100644 --- a/moto/ec2/responses/customer_gateways.py +++ b/moto/ec2/responses/customer_gateways.py @@ -10,13 +10,15 @@ class CustomerGateways(BaseResponse): type = self.querystring.get('Type', None)[0] ip_address = self.querystring.get('IpAddress', None)[0] bgp_asn = self.querystring.get('BgpAsn', None)[0] - customer_gateway = self.ec2_backend.create_customer_gateway(type, ip_address=ip_address, bgp_asn=bgp_asn) + customer_gateway = self.ec2_backend.create_customer_gateway( + type, ip_address=ip_address, bgp_asn=bgp_asn) template = self.response_template(CREATE_CUSTOMER_GATEWAY_RESPONSE) return template.render(customer_gateway=customer_gateway) def delete_customer_gateway(self): customer_gateway_id = self.querystring.get('CustomerGatewayId')[0] - delete_status = self.ec2_backend.delete_customer_gateway(customer_gateway_id) + delete_status = self.ec2_backend.delete_customer_gateway( + customer_gateway_id) template = self.response_template(DELETE_CUSTOMER_GATEWAY_RESPONSE) return template.render(customer_gateway=delete_status) diff --git a/moto/ec2/responses/dhcp_options.py b/moto/ec2/responses/dhcp_options.py index b9d1469b5..450ef1bf9 100644 --- a/moto/ec2/responses/dhcp_options.py +++ b/moto/ec2/responses/dhcp_options.py @@ -7,6 +7,7 @@ from moto.ec2.utils import ( class DHCPOptions(BaseResponse): + def associate_dhcp_options(self): dhcp_opt_id = self.querystring.get("DhcpOptionsId", [None])[0] vpc_id = self.querystring.get("VpcId", [None])[0] @@ -48,9 +49,11 @@ class DHCPOptions(BaseResponse): return template.render(delete_status=delete_status) def describe_dhcp_options(self): - dhcp_opt_ids = sequence_from_querystring("DhcpOptionsId", self.querystring) + dhcp_opt_ids = sequence_from_querystring( + "DhcpOptionsId", self.querystring) filters = filters_from_querystring(self.querystring) - dhcp_opts = self.ec2_backend.get_all_dhcp_options(dhcp_opt_ids, filters) + dhcp_opts = self.ec2_backend.get_all_dhcp_options( + dhcp_opt_ids, filters) template = self.response_template(DESCRIBE_DHCP_OPTIONS_RESPONSE) return template.render(dhcp_options=dhcp_opts) diff --git a/moto/ec2/responses/elastic_block_store.py b/moto/ec2/responses/elastic_block_store.py index ddbf30e68..0773ffbe2 100644 --- a/moto/ec2/responses/elastic_block_store.py +++ b/moto/ec2/responses/elastic_block_store.py @@ -10,13 +10,15 @@ class ElasticBlockStore(BaseResponse): instance_id = self.querystring.get('InstanceId')[0] device_path = self.querystring.get('Device')[0] if self.is_not_dryrun('AttachVolume'): - attachment = self.ec2_backend.attach_volume(volume_id, instance_id, device_path) + attachment = self.ec2_backend.attach_volume( + volume_id, instance_id, device_path) template = self.response_template(ATTACHED_VOLUME_RESPONSE) return template.render(attachment=attachment) def copy_snapshot(self): if self.is_not_dryrun('CopySnapshot'): - raise NotImplementedError('ElasticBlockStore.copy_snapshot is not yet implemented') + raise NotImplementedError( + 'ElasticBlockStore.copy_snapshot is not yet implemented') def create_snapshot(self): description = self.querystring.get('Description', [None])[0] @@ -32,7 +34,8 @@ class ElasticBlockStore(BaseResponse): snapshot_id = self.querystring.get('SnapshotId', [None])[0] encrypted = self.querystring.get('Encrypted', ['false'])[0] if self.is_not_dryrun('CreateVolume'): - volume = self.ec2_backend.create_volume(size, zone, snapshot_id, encrypted) + volume = self.ec2_backend.create_volume( + size, zone, snapshot_id, encrypted) template = self.response_template(CREATE_VOLUME_RESPONSE) return template.render(volume=volume) @@ -50,51 +53,64 @@ class ElasticBlockStore(BaseResponse): def describe_snapshots(self): filters = filters_from_querystring(self.querystring) - # querystring for multiple snapshotids results in SnapshotId.1, SnapshotId.2 etc - snapshot_ids = ','.join([','.join(s[1]) for s in self.querystring.items() if 'SnapshotId' in s[0]]) + # querystring for multiple snapshotids results in SnapshotId.1, + # SnapshotId.2 etc + snapshot_ids = ','.join( + [','.join(s[1]) for s in self.querystring.items() if 'SnapshotId' in s[0]]) snapshots = self.ec2_backend.describe_snapshots(filters=filters) # Describe snapshots to handle filter on snapshot_ids - snapshots = [s for s in snapshots if s.id in snapshot_ids] if snapshot_ids else snapshots + snapshots = [ + s for s in snapshots if s.id in snapshot_ids] if snapshot_ids else snapshots template = self.response_template(DESCRIBE_SNAPSHOTS_RESPONSE) return template.render(snapshots=snapshots) def describe_volumes(self): filters = filters_from_querystring(self.querystring) - # querystring for multiple volumeids results in VolumeId.1, VolumeId.2 etc - volume_ids = ','.join([','.join(v[1]) for v in self.querystring.items() if 'VolumeId' in v[0]]) + # querystring for multiple volumeids results in VolumeId.1, VolumeId.2 + # etc + volume_ids = ','.join( + [','.join(v[1]) for v in self.querystring.items() if 'VolumeId' in v[0]]) volumes = self.ec2_backend.describe_volumes(filters=filters) # Describe volumes to handle filter on volume_ids - volumes = [v for v in volumes if v.id in volume_ids] if volume_ids else volumes + volumes = [ + v for v in volumes if v.id in volume_ids] if volume_ids else volumes template = self.response_template(DESCRIBE_VOLUMES_RESPONSE) return template.render(volumes=volumes) def describe_volume_attribute(self): - raise NotImplementedError('ElasticBlockStore.describe_volume_attribute is not yet implemented') + raise NotImplementedError( + 'ElasticBlockStore.describe_volume_attribute is not yet implemented') def describe_volume_status(self): - raise NotImplementedError('ElasticBlockStore.describe_volume_status is not yet implemented') + raise NotImplementedError( + 'ElasticBlockStore.describe_volume_status is not yet implemented') def detach_volume(self): volume_id = self.querystring.get('VolumeId')[0] instance_id = self.querystring.get('InstanceId')[0] device_path = self.querystring.get('Device')[0] if self.is_not_dryrun('DetachVolume'): - attachment = self.ec2_backend.detach_volume(volume_id, instance_id, device_path) + attachment = self.ec2_backend.detach_volume( + volume_id, instance_id, device_path) template = self.response_template(DETATCH_VOLUME_RESPONSE) return template.render(attachment=attachment) def enable_volume_io(self): if self.is_not_dryrun('EnableVolumeIO'): - raise NotImplementedError('ElasticBlockStore.enable_volume_io is not yet implemented') + raise NotImplementedError( + 'ElasticBlockStore.enable_volume_io is not yet implemented') def import_volume(self): if self.is_not_dryrun('ImportVolume'): - raise NotImplementedError('ElasticBlockStore.import_volume is not yet implemented') + raise NotImplementedError( + 'ElasticBlockStore.import_volume is not yet implemented') def describe_snapshot_attribute(self): snapshot_id = self.querystring.get('SnapshotId')[0] - groups = self.ec2_backend.get_create_volume_permission_groups(snapshot_id) - template = self.response_template(DESCRIBE_SNAPSHOT_ATTRIBUTES_RESPONSE) + groups = self.ec2_backend.get_create_volume_permission_groups( + snapshot_id) + template = self.response_template( + DESCRIBE_SNAPSHOT_ATTRIBUTES_RESPONSE) return template.render(snapshot_id=snapshot_id, groups=groups) def modify_snapshot_attribute(self): @@ -104,18 +120,22 @@ class ElasticBlockStore(BaseResponse): user_id = self.querystring.get('UserId.1', [None])[0] if self.is_not_dryrun('ModifySnapshotAttribute'): if (operation_type == 'add'): - self.ec2_backend.add_create_volume_permission(snapshot_id, user_id=user_id, group=group) + self.ec2_backend.add_create_volume_permission( + snapshot_id, user_id=user_id, group=group) elif (operation_type == 'remove'): - self.ec2_backend.remove_create_volume_permission(snapshot_id, user_id=user_id, group=group) + self.ec2_backend.remove_create_volume_permission( + snapshot_id, user_id=user_id, group=group) return MODIFY_SNAPSHOT_ATTRIBUTE_RESPONSE def modify_volume_attribute(self): if self.is_not_dryrun('ModifyVolumeAttribute'): - raise NotImplementedError('ElasticBlockStore.modify_volume_attribute is not yet implemented') + raise NotImplementedError( + 'ElasticBlockStore.modify_volume_attribute is not yet implemented') def reset_snapshot_attribute(self): if self.is_not_dryrun('ResetSnapshotAttribute'): - raise NotImplementedError('ElasticBlockStore.reset_snapshot_attribute is not yet implemented') + raise NotImplementedError( + 'ElasticBlockStore.reset_snapshot_attribute is not yet implemented') CREATE_VOLUME_RESPONSE = """ @@ -272,4 +292,4 @@ MODIFY_SNAPSHOT_ATTRIBUTE_RESPONSE = """ 666d2944-9276-4d6a-be12-1f4ada972fd8 true -""" \ No newline at end of file +""" diff --git a/moto/ec2/responses/elastic_ip_addresses.py b/moto/ec2/responses/elastic_ip_addresses.py index 3ae75671f..a64a33bb5 100644 --- a/moto/ec2/responses/elastic_ip_addresses.py +++ b/moto/ec2/responses/elastic_ip_addresses.py @@ -4,6 +4,7 @@ from moto.ec2.utils import sequence_from_querystring class ElasticIPAddresses(BaseResponse): + def allocate_address(self): if "Domain" in self.querystring: domain = self.querystring.get('Domain')[0] @@ -18,11 +19,14 @@ class ElasticIPAddresses(BaseResponse): instance = eni = None if "InstanceId" in self.querystring: - instance = self.ec2_backend.get_instance(self.querystring['InstanceId'][0]) + instance = self.ec2_backend.get_instance( + self.querystring['InstanceId'][0]) elif "NetworkInterfaceId" in self.querystring: - eni = self.ec2_backend.get_network_interface(self.querystring['NetworkInterfaceId'][0]) + eni = self.ec2_backend.get_network_interface( + self.querystring['NetworkInterfaceId'][0]) else: - self.ec2_backend.raise_error("MissingParameter", "Invalid request, expect InstanceId/NetworkId parameter.") + self.ec2_backend.raise_error( + "MissingParameter", "Invalid request, expect InstanceId/NetworkId parameter.") reassociate = False if "AllowReassociation" in self.querystring: @@ -31,13 +35,17 @@ class ElasticIPAddresses(BaseResponse): if self.is_not_dryrun('AssociateAddress'): if instance or eni: if "PublicIp" in self.querystring: - eip = self.ec2_backend.associate_address(instance=instance, eni=eni, address=self.querystring['PublicIp'][0], reassociate=reassociate) + eip = self.ec2_backend.associate_address(instance=instance, eni=eni, address=self.querystring[ + 'PublicIp'][0], reassociate=reassociate) elif "AllocationId" in self.querystring: - eip = self.ec2_backend.associate_address(instance=instance, eni=eni, allocation_id=self.querystring['AllocationId'][0], reassociate=reassociate) + eip = self.ec2_backend.associate_address(instance=instance, eni=eni, allocation_id=self.querystring[ + 'AllocationId'][0], reassociate=reassociate) else: - self.ec2_backend.raise_error("MissingParameter", "Invalid request, expect PublicIp/AllocationId parameter.") + self.ec2_backend.raise_error( + "MissingParameter", "Invalid request, expect PublicIp/AllocationId parameter.") else: - self.ec2_backend.raise_error("MissingParameter", "Invalid request, expect either instance or ENI.") + self.ec2_backend.raise_error( + "MissingParameter", "Invalid request, expect either instance or ENI.") template = self.response_template(ASSOCIATE_ADDRESS_RESPONSE) return template.render(address=eip) @@ -46,17 +54,23 @@ class ElasticIPAddresses(BaseResponse): template = self.response_template(DESCRIBE_ADDRESS_RESPONSE) if "Filter.1.Name" in self.querystring: - filter_by = sequence_from_querystring("Filter.1.Name", self.querystring)[0] - filter_value = sequence_from_querystring("Filter.1.Value", self.querystring) + filter_by = sequence_from_querystring( + "Filter.1.Name", self.querystring)[0] + filter_value = sequence_from_querystring( + "Filter.1.Value", self.querystring) if filter_by == 'instance-id': - addresses = filter(lambda x: x.instance.id == filter_value[0], self.ec2_backend.describe_addresses()) + addresses = filter(lambda x: x.instance.id == filter_value[ + 0], self.ec2_backend.describe_addresses()) else: - raise NotImplementedError("Filtering not supported in describe_address.") + raise NotImplementedError( + "Filtering not supported in describe_address.") elif "PublicIp.1" in self.querystring: - public_ips = sequence_from_querystring("PublicIp", self.querystring) + public_ips = sequence_from_querystring( + "PublicIp", self.querystring) addresses = self.ec2_backend.address_by_ip(public_ips) elif "AllocationId.1" in self.querystring: - allocation_ids = sequence_from_querystring("AllocationId", self.querystring) + allocation_ids = sequence_from_querystring( + "AllocationId", self.querystring) addresses = self.ec2_backend.address_by_allocation(allocation_ids) else: addresses = self.ec2_backend.describe_addresses() @@ -65,22 +79,28 @@ class ElasticIPAddresses(BaseResponse): def disassociate_address(self): if self.is_not_dryrun('DisAssociateAddress'): if "PublicIp" in self.querystring: - self.ec2_backend.disassociate_address(address=self.querystring['PublicIp'][0]) + self.ec2_backend.disassociate_address( + address=self.querystring['PublicIp'][0]) elif "AssociationId" in self.querystring: - self.ec2_backend.disassociate_address(association_id=self.querystring['AssociationId'][0]) + self.ec2_backend.disassociate_address( + association_id=self.querystring['AssociationId'][0]) else: - self.ec2_backend.raise_error("MissingParameter", "Invalid request, expect PublicIp/AssociationId parameter.") + self.ec2_backend.raise_error( + "MissingParameter", "Invalid request, expect PublicIp/AssociationId parameter.") return self.response_template(DISASSOCIATE_ADDRESS_RESPONSE).render() def release_address(self): if self.is_not_dryrun('ReleaseAddress'): if "PublicIp" in self.querystring: - self.ec2_backend.release_address(address=self.querystring['PublicIp'][0]) + self.ec2_backend.release_address( + address=self.querystring['PublicIp'][0]) elif "AllocationId" in self.querystring: - self.ec2_backend.release_address(allocation_id=self.querystring['AllocationId'][0]) + self.ec2_backend.release_address( + allocation_id=self.querystring['AllocationId'][0]) else: - self.ec2_backend.raise_error("MissingParameter", "Invalid request, expect PublicIp/AllocationId parameter.") + self.ec2_backend.raise_error( + "MissingParameter", "Invalid request, expect PublicIp/AllocationId parameter.") return self.response_template(RELEASE_ADDRESS_RESPONSE).render() diff --git a/moto/ec2/responses/elastic_network_interfaces.py b/moto/ec2/responses/elastic_network_interfaces.py index c1c7383cb..cbe76e306 100644 --- a/moto/ec2/responses/elastic_network_interfaces.py +++ b/moto/ec2/responses/elastic_network_interfaces.py @@ -4,28 +4,35 @@ from moto.ec2.utils import sequence_from_querystring, filters_from_querystring class ElasticNetworkInterfaces(BaseResponse): + def create_network_interface(self): subnet_id = self.querystring.get('SubnetId')[0] - private_ip_address = self.querystring.get('PrivateIpAddress', [None])[0] + private_ip_address = self.querystring.get( + 'PrivateIpAddress', [None])[0] groups = sequence_from_querystring('SecurityGroupId', self.querystring) subnet = self.ec2_backend.get_subnet(subnet_id) if self.is_not_dryrun('CreateNetworkInterface'): - eni = self.ec2_backend.create_network_interface(subnet, private_ip_address, groups) - template = self.response_template(CREATE_NETWORK_INTERFACE_RESPONSE) + eni = self.ec2_backend.create_network_interface( + subnet, private_ip_address, groups) + template = self.response_template( + CREATE_NETWORK_INTERFACE_RESPONSE) return template.render(eni=eni) def delete_network_interface(self): eni_id = self.querystring.get('NetworkInterfaceId')[0] if self.is_not_dryrun('DeleteNetworkInterface'): self.ec2_backend.delete_network_interface(eni_id) - template = self.response_template(DELETE_NETWORK_INTERFACE_RESPONSE) + template = self.response_template( + DELETE_NETWORK_INTERFACE_RESPONSE) return template.render() def describe_network_interface_attribute(self): - raise NotImplementedError('ElasticNetworkInterfaces(AmazonVPC).describe_network_interface_attribute is not yet implemented') + raise NotImplementedError( + 'ElasticNetworkInterfaces(AmazonVPC).describe_network_interface_attribute is not yet implemented') def describe_network_interfaces(self): - eni_ids = sequence_from_querystring('NetworkInterfaceId', self.querystring) + eni_ids = sequence_from_querystring( + 'NetworkInterfaceId', self.querystring) filters = filters_from_querystring(self.querystring) enis = self.ec2_backend.get_all_network_interfaces(eni_ids, filters) template = self.response_template(DESCRIBE_NETWORK_INTERFACES_RESPONSE) @@ -36,15 +43,18 @@ class ElasticNetworkInterfaces(BaseResponse): instance_id = self.querystring.get('InstanceId')[0] device_index = self.querystring.get('DeviceIndex')[0] if self.is_not_dryrun('AttachNetworkInterface'): - attachment_id = self.ec2_backend.attach_network_interface(eni_id, instance_id, device_index) - template = self.response_template(ATTACH_NETWORK_INTERFACE_RESPONSE) + attachment_id = self.ec2_backend.attach_network_interface( + eni_id, instance_id, device_index) + template = self.response_template( + ATTACH_NETWORK_INTERFACE_RESPONSE) return template.render(attachment_id=attachment_id) def detach_network_interface(self): attachment_id = self.querystring.get('AttachmentId')[0] if self.is_not_dryrun('DetachNetworkInterface'): self.ec2_backend.detach_network_interface(attachment_id) - template = self.response_template(DETACH_NETWORK_INTERFACE_RESPONSE) + template = self.response_template( + DETACH_NETWORK_INTERFACE_RESPONSE) return template.render() def modify_network_interface_attribute(self): @@ -52,12 +62,15 @@ class ElasticNetworkInterfaces(BaseResponse): eni_id = self.querystring.get('NetworkInterfaceId')[0] group_id = self.querystring.get('SecurityGroupId.1')[0] if self.is_not_dryrun('ModifyNetworkInterface'): - self.ec2_backend.modify_network_interface_attribute(eni_id, group_id) + self.ec2_backend.modify_network_interface_attribute( + eni_id, group_id) return MODIFY_NETWORK_INTERFACE_ATTRIBUTE_RESPONSE def reset_network_interface_attribute(self): if self.is_not_dryrun('ResetNetworkInterface'): - raise NotImplementedError('ElasticNetworkInterfaces(AmazonVPC).reset_network_interface_attribute is not yet implemented') + raise NotImplementedError( + 'ElasticNetworkInterfaces(AmazonVPC).reset_network_interface_attribute is not yet implemented') + CREATE_NETWORK_INTERFACE_RESPONSE = """ diff --git a/moto/ec2/responses/general.py b/moto/ec2/responses/general.py index 9fce05ccf..bd95c1975 100644 --- a/moto/ec2/responses/general.py +++ b/moto/ec2/responses/general.py @@ -4,6 +4,7 @@ from moto.ec2.utils import instance_ids_from_querystring class General(BaseResponse): + def get_console_output(self): self.instance_ids = instance_ids_from_querystring(self.querystring) instance_id = self.instance_ids[0] diff --git a/moto/ec2/responses/instances.py b/moto/ec2/responses/instances.py index 3c5a087d9..4da7b880f 100644 --- a/moto/ec2/responses/instances.py +++ b/moto/ec2/responses/instances.py @@ -5,14 +5,18 @@ from moto.core.utils import camelcase_to_underscores from moto.ec2.utils import instance_ids_from_querystring, filters_from_querystring, \ dict_from_querystring, optional_from_querystring + class InstanceResponse(BaseResponse): + def describe_instances(self): filter_dict = filters_from_querystring(self.querystring) instance_ids = instance_ids_from_querystring(self.querystring) if instance_ids: - reservations = self.ec2_backend.get_reservations_by_instance_ids(instance_ids, filters=filter_dict) + reservations = self.ec2_backend.get_reservations_by_instance_ids( + instance_ids, filters=filter_dict) else: - reservations = self.ec2_backend.all_reservations(make_copy=True, filters=filter_dict) + reservations = self.ec2_backend.all_reservations( + make_copy=True, filters=filter_dict) template = self.response_template(EC2_DESCRIBE_INSTANCES) return template.render(reservations=reservations) @@ -25,10 +29,12 @@ class InstanceResponse(BaseResponse): security_group_ids = self._get_multi_param('SecurityGroupId') nics = dict_from_querystring("NetworkInterface", self.querystring) instance_type = self.querystring.get("InstanceType", ["m1.small"])[0] - placement = self.querystring.get("Placement.AvailabilityZone", [None])[0] + placement = self.querystring.get( + "Placement.AvailabilityZone", [None])[0] subnet_id = self.querystring.get("SubnetId", [None])[0] private_ip = self.querystring.get("PrivateIpAddress", [None])[0] - associate_public_ip = self.querystring.get("AssociatePublicIpAddress", [None])[0] + associate_public_ip = self.querystring.get( + "AssociatePublicIpAddress", [None])[0] key_name = self.querystring.get("KeyName", [None])[0] if self.is_not_dryrun('RunInstance'): @@ -72,10 +78,11 @@ class InstanceResponse(BaseResponse): def describe_instance_status(self): instance_ids = instance_ids_from_querystring(self.querystring) include_all_instances = optional_from_querystring('IncludeAllInstances', - self.querystring) == 'true' + self.querystring) == 'true' if instance_ids: - instances = self.ec2_backend.get_multi_instances_by_id(instance_ids) + instances = self.ec2_backend.get_multi_instances_by_id( + instance_ids) elif include_all_instances: instances = self.ec2_backend.all_instances() else: @@ -85,7 +92,8 @@ class InstanceResponse(BaseResponse): return template.render(instances=instances) def describe_instance_types(self): - instance_types = [InstanceType(name='t1.micro', cores=1, memory=644874240, disk=0)] + instance_types = [InstanceType( + name='t1.micro', cores=1, memory=644874240, disk=0)] template = self.response_template(EC2_DESCRIBE_INSTANCE_TYPES) return template.render(instance_types=instance_types) @@ -96,10 +104,12 @@ class InstanceResponse(BaseResponse): key = camelcase_to_underscores(attribute) instance_ids = instance_ids_from_querystring(self.querystring) instance_id = instance_ids[0] - instance, value = self.ec2_backend.describe_instance_attribute(instance_id, key) + instance, value = self.ec2_backend.describe_instance_attribute( + instance_id, key) if key == "group_set": - template = self.response_template(EC2_DESCRIBE_INSTANCE_GROUPSET_ATTRIBUTE) + template = self.response_template( + EC2_DESCRIBE_INSTANCE_GROUPSET_ATTRIBUTE) else: template = self.response_template(EC2_DESCRIBE_INSTANCE_ATTRIBUTE) @@ -152,7 +162,8 @@ class InstanceResponse(BaseResponse): instance = self.ec2_backend.get_instance(instance_id) if self.is_not_dryrun('ModifyInstanceAttribute'): - block_device_type = instance.block_device_mapping[device_name_value] + block_device_type = instance.block_device_mapping[ + device_name_value] block_device_type.delete_on_termination = del_on_term_value # +1 for the next device @@ -171,24 +182,27 @@ class InstanceResponse(BaseResponse): if not attribute_key: return - if self.is_not_dryrun('Modify'+attribute_key.split(".")[0]): + if self.is_not_dryrun('Modify' + attribute_key.split(".")[0]): value = self.querystring.get(attribute_key)[0] - normalized_attribute = camelcase_to_underscores(attribute_key.split(".")[0]) + normalized_attribute = camelcase_to_underscores( + attribute_key.split(".")[0]) instance_ids = instance_ids_from_querystring(self.querystring) instance_id = instance_ids[0] - self.ec2_backend.modify_instance_attribute(instance_id, normalized_attribute, value) + self.ec2_backend.modify_instance_attribute( + instance_id, normalized_attribute, value) return EC2_MODIFY_INSTANCE_ATTRIBUTE def _security_grp_instance_attribute_handler(self): new_security_grp_list = [] for key, value in self.querystring.items(): - if 'GroupId.' in key: + if 'GroupId.' in key: new_security_grp_list.append(self.querystring.get(key)[0]) instance_ids = instance_ids_from_querystring(self.querystring) instance_id = instance_ids[0] if self.is_not_dryrun('ModifyInstanceSecurityGroups'): - self.ec2_backend.modify_instance_security_groups(instance_id, new_security_grp_list) + self.ec2_backend.modify_instance_security_groups( + instance_id, new_security_grp_list) return EC2_MODIFY_INSTANCE_ATTRIBUTE @@ -630,4 +644,4 @@ EC2_DESCRIBE_INSTANCE_TYPES = """ {% endfor %} -""" \ No newline at end of file +""" diff --git a/moto/ec2/responses/internet_gateways.py b/moto/ec2/responses/internet_gateways.py index 5b7a824f0..4a3da0b34 100644 --- a/moto/ec2/responses/internet_gateways.py +++ b/moto/ec2/responses/internet_gateways.py @@ -7,6 +7,7 @@ from moto.ec2.utils import ( class InternetGateways(BaseResponse): + def attach_internet_gateway(self): igw_id = self.querystring.get("InternetGatewayId", [None])[0] vpc_id = self.querystring.get("VpcId", [None])[0] @@ -33,9 +34,11 @@ class InternetGateways(BaseResponse): if "InternetGatewayId.1" in self.querystring: igw_ids = sequence_from_querystring( "InternetGatewayId", self.querystring) - igws = self.ec2_backend.describe_internet_gateways(igw_ids, filters=filter_dict) + igws = self.ec2_backend.describe_internet_gateways( + igw_ids, filters=filter_dict) else: - igws = self.ec2_backend.describe_internet_gateways(filters=filter_dict) + igws = self.ec2_backend.describe_internet_gateways( + filters=filter_dict) template = self.response_template(DESCRIBE_INTERNET_GATEWAYS_RESPONSE) return template.render(internet_gateways=igws) diff --git a/moto/ec2/responses/ip_addresses.py b/moto/ec2/responses/ip_addresses.py index 995719202..fab5cbddc 100644 --- a/moto/ec2/responses/ip_addresses.py +++ b/moto/ec2/responses/ip_addresses.py @@ -4,10 +4,13 @@ from moto.core.responses import BaseResponse class IPAddresses(BaseResponse): + def assign_private_ip_addresses(self): if self.is_not_dryrun('AssignPrivateIPAddress'): - raise NotImplementedError('IPAddresses.assign_private_ip_addresses is not yet implemented') + raise NotImplementedError( + 'IPAddresses.assign_private_ip_addresses is not yet implemented') def unassign_private_ip_addresses(self): if self.is_not_dryrun('UnAssignPrivateIPAddress'): - raise NotImplementedError('IPAddresses.unassign_private_ip_addresses is not yet implemented') + raise NotImplementedError( + 'IPAddresses.unassign_private_ip_addresses is not yet implemented') diff --git a/moto/ec2/responses/key_pairs.py b/moto/ec2/responses/key_pairs.py index 72f8715ec..936df2cd3 100644 --- a/moto/ec2/responses/key_pairs.py +++ b/moto/ec2/responses/key_pairs.py @@ -16,14 +16,16 @@ class KeyPairs(BaseResponse): def delete_key_pair(self): name = self.querystring.get('KeyName')[0] if self.is_not_dryrun('DeleteKeyPair'): - success = six.text_type(self.ec2_backend.delete_key_pair(name)).lower() + success = six.text_type( + self.ec2_backend.delete_key_pair(name)).lower() return self.response_template(DELETE_KEY_PAIR_RESPONSE).render(success=success) def describe_key_pairs(self): names = keypair_names_from_querystring(self.querystring) filters = filters_from_querystring(self.querystring) if len(filters) > 0: - raise NotImplementedError('Using filters in KeyPairs.describe_key_pairs is not yet implemented') + raise NotImplementedError( + 'Using filters in KeyPairs.describe_key_pairs is not yet implemented') keypairs = self.ec2_backend.describe_key_pairs(names) template = self.response_template(DESCRIBE_KEY_PAIRS_RESPONSE) diff --git a/moto/ec2/responses/monitoring.py b/moto/ec2/responses/monitoring.py index 3d40a1479..2024abe7e 100644 --- a/moto/ec2/responses/monitoring.py +++ b/moto/ec2/responses/monitoring.py @@ -3,10 +3,13 @@ from moto.core.responses import BaseResponse class Monitoring(BaseResponse): + def monitor_instances(self): if self.is_not_dryrun('MonitorInstances'): - raise NotImplementedError('Monitoring.monitor_instances is not yet implemented') + raise NotImplementedError( + 'Monitoring.monitor_instances is not yet implemented') def unmonitor_instances(self): if self.is_not_dryrun('UnMonitorInstances'): - raise NotImplementedError('Monitoring.unmonitor_instances is not yet implemented') + raise NotImplementedError( + 'Monitoring.unmonitor_instances is not yet implemented') diff --git a/moto/ec2/responses/nat_gateways.py b/moto/ec2/responses/nat_gateways.py index 98d383d47..ce9479e82 100644 --- a/moto/ec2/responses/nat_gateways.py +++ b/moto/ec2/responses/nat_gateways.py @@ -8,7 +8,8 @@ class NatGateways(BaseResponse): def create_nat_gateway(self): subnet_id = self._get_param('SubnetId') allocation_id = self._get_param('AllocationId') - nat_gateway = self.ec2_backend.create_nat_gateway(subnet_id=subnet_id, allocation_id=allocation_id) + nat_gateway = self.ec2_backend.create_nat_gateway( + subnet_id=subnet_id, allocation_id=allocation_id) template = self.response_template(CREATE_NAT_GATEWAY) return template.render(nat_gateway=nat_gateway) diff --git a/moto/ec2/responses/network_acls.py b/moto/ec2/responses/network_acls.py index 8093e18c8..bf9833d13 100644 --- a/moto/ec2/responses/network_acls.py +++ b/moto/ec2/responses/network_acls.py @@ -45,7 +45,8 @@ class NetworkACLs(BaseResponse): def describe_network_acls(self): network_acl_ids = network_acl_ids_from_querystring(self.querystring) filters = filters_from_querystring(self.querystring) - network_acls = self.ec2_backend.get_all_network_acls(network_acl_ids, filters) + network_acls = self.ec2_backend.get_all_network_acls( + network_acl_ids, filters) template = self.response_template(DESCRIBE_NETWORK_ACL_RESPONSE) return template.render(network_acls=network_acls) diff --git a/moto/ec2/responses/placement_groups.py b/moto/ec2/responses/placement_groups.py index 88926490f..06930f700 100644 --- a/moto/ec2/responses/placement_groups.py +++ b/moto/ec2/responses/placement_groups.py @@ -3,13 +3,17 @@ from moto.core.responses import BaseResponse class PlacementGroups(BaseResponse): + def create_placement_group(self): if self.is_not_dryrun('CreatePlacementGroup'): - raise NotImplementedError('PlacementGroups.create_placement_group is not yet implemented') + raise NotImplementedError( + 'PlacementGroups.create_placement_group is not yet implemented') def delete_placement_group(self): if self.is_not_dryrun('DeletePlacementGroup'): - raise NotImplementedError('PlacementGroups.delete_placement_group is not yet implemented') + raise NotImplementedError( + 'PlacementGroups.delete_placement_group is not yet implemented') def describe_placement_groups(self): - raise NotImplementedError('PlacementGroups.describe_placement_groups is not yet implemented') + raise NotImplementedError( + 'PlacementGroups.describe_placement_groups is not yet implemented') diff --git a/moto/ec2/responses/reserved_instances.py b/moto/ec2/responses/reserved_instances.py index be27260c8..07bd6661e 100644 --- a/moto/ec2/responses/reserved_instances.py +++ b/moto/ec2/responses/reserved_instances.py @@ -3,23 +3,30 @@ from moto.core.responses import BaseResponse class ReservedInstances(BaseResponse): + def cancel_reserved_instances_listing(self): if self.is_not_dryrun('CancelReservedInstances'): - raise NotImplementedError('ReservedInstances.cancel_reserved_instances_listing is not yet implemented') + raise NotImplementedError( + 'ReservedInstances.cancel_reserved_instances_listing is not yet implemented') def create_reserved_instances_listing(self): if self.is_not_dryrun('CreateReservedInstances'): - raise NotImplementedError('ReservedInstances.create_reserved_instances_listing is not yet implemented') + raise NotImplementedError( + 'ReservedInstances.create_reserved_instances_listing is not yet implemented') def describe_reserved_instances(self): - raise NotImplementedError('ReservedInstances.describe_reserved_instances is not yet implemented') + raise NotImplementedError( + 'ReservedInstances.describe_reserved_instances is not yet implemented') def describe_reserved_instances_listings(self): - raise NotImplementedError('ReservedInstances.describe_reserved_instances_listings is not yet implemented') + raise NotImplementedError( + 'ReservedInstances.describe_reserved_instances_listings is not yet implemented') def describe_reserved_instances_offerings(self): - raise NotImplementedError('ReservedInstances.describe_reserved_instances_offerings is not yet implemented') + raise NotImplementedError( + 'ReservedInstances.describe_reserved_instances_offerings is not yet implemented') def purchase_reserved_instances_offering(self): if self.is_not_dryrun('PurchaseReservedInstances'): - raise NotImplementedError('ReservedInstances.purchase_reserved_instances_offering is not yet implemented') + raise NotImplementedError( + 'ReservedInstances.purchase_reserved_instances_offering is not yet implemented') diff --git a/moto/ec2/responses/route_tables.py b/moto/ec2/responses/route_tables.py index 04fdf1d25..6f68a6553 100644 --- a/moto/ec2/responses/route_tables.py +++ b/moto/ec2/responses/route_tables.py @@ -8,24 +8,28 @@ class RouteTables(BaseResponse): def associate_route_table(self): route_table_id = self.querystring.get('RouteTableId')[0] subnet_id = self.querystring.get('SubnetId')[0] - association_id = self.ec2_backend.associate_route_table(route_table_id, subnet_id) + association_id = self.ec2_backend.associate_route_table( + route_table_id, subnet_id) template = self.response_template(ASSOCIATE_ROUTE_TABLE_RESPONSE) return template.render(association_id=association_id) def create_route(self): route_table_id = self.querystring.get('RouteTableId')[0] - destination_cidr_block = self.querystring.get('DestinationCidrBlock')[0] + destination_cidr_block = self.querystring.get( + 'DestinationCidrBlock')[0] gateway_id = optional_from_querystring('GatewayId', self.querystring) instance_id = optional_from_querystring('InstanceId', self.querystring) - interface_id = optional_from_querystring('NetworkInterfaceId', self.querystring) - pcx_id = optional_from_querystring('VpcPeeringConnectionId', self.querystring) + interface_id = optional_from_querystring( + 'NetworkInterfaceId', self.querystring) + pcx_id = optional_from_querystring( + 'VpcPeeringConnectionId', self.querystring) self.ec2_backend.create_route(route_table_id, destination_cidr_block, - gateway_id=gateway_id, - instance_id=instance_id, - interface_id=interface_id, - vpc_peering_connection_id=pcx_id) + gateway_id=gateway_id, + instance_id=instance_id, + interface_id=interface_id, + vpc_peering_connection_id=pcx_id) template = self.response_template(CREATE_ROUTE_RESPONSE) return template.render() @@ -38,7 +42,8 @@ class RouteTables(BaseResponse): def delete_route(self): route_table_id = self.querystring.get('RouteTableId')[0] - destination_cidr_block = self.querystring.get('DestinationCidrBlock')[0] + destination_cidr_block = self.querystring.get( + 'DestinationCidrBlock')[0] self.ec2_backend.delete_route(route_table_id, destination_cidr_block) template = self.response_template(DELETE_ROUTE_RESPONSE) return template.render() @@ -52,7 +57,8 @@ class RouteTables(BaseResponse): def describe_route_tables(self): route_table_ids = route_table_ids_from_querystring(self.querystring) filters = filters_from_querystring(self.querystring) - route_tables = self.ec2_backend.get_all_route_tables(route_table_ids, filters) + route_tables = self.ec2_backend.get_all_route_tables( + route_table_ids, filters) template = self.response_template(DESCRIBE_ROUTE_TABLES_RESPONSE) return template.render(route_tables=route_tables) @@ -64,18 +70,21 @@ class RouteTables(BaseResponse): def replace_route(self): route_table_id = self.querystring.get('RouteTableId')[0] - destination_cidr_block = self.querystring.get('DestinationCidrBlock')[0] + destination_cidr_block = self.querystring.get( + 'DestinationCidrBlock')[0] gateway_id = optional_from_querystring('GatewayId', self.querystring) instance_id = optional_from_querystring('InstanceId', self.querystring) - interface_id = optional_from_querystring('NetworkInterfaceId', self.querystring) - pcx_id = optional_from_querystring('VpcPeeringConnectionId', self.querystring) + interface_id = optional_from_querystring( + 'NetworkInterfaceId', self.querystring) + pcx_id = optional_from_querystring( + 'VpcPeeringConnectionId', self.querystring) self.ec2_backend.replace_route(route_table_id, destination_cidr_block, - gateway_id=gateway_id, - instance_id=instance_id, - interface_id=interface_id, - vpc_peering_connection_id=pcx_id) + gateway_id=gateway_id, + instance_id=instance_id, + interface_id=interface_id, + vpc_peering_connection_id=pcx_id) template = self.response_template(REPLACE_ROUTE_RESPONSE) return template.render() @@ -83,8 +92,10 @@ class RouteTables(BaseResponse): def replace_route_table_association(self): route_table_id = self.querystring.get('RouteTableId')[0] association_id = self.querystring.get('AssociationId')[0] - new_association_id = self.ec2_backend.replace_route_table_association(association_id, route_table_id) - template = self.response_template(REPLACE_ROUTE_TABLE_ASSOCIATION_RESPONSE) + new_association_id = self.ec2_backend.replace_route_table_association( + association_id, route_table_id) + template = self.response_template( + REPLACE_ROUTE_TABLE_ASSOCIATION_RESPONSE) return template.render(association_id=new_association_id) diff --git a/moto/ec2/responses/security_groups.py b/moto/ec2/responses/security_groups.py index 3451dc1ef..6f485fa31 100644 --- a/moto/ec2/responses/security_groups.py +++ b/moto/ec2/responses/security_groups.py @@ -1,7 +1,5 @@ from __future__ import unicode_literals -import collections - from moto.core.responses import BaseResponse from moto.ec2.utils import filters_from_querystring @@ -55,10 +53,11 @@ def process_rules_from_querystring(querystring): source_groups.append(group_dict['GroupName'][0]) yield (group_name_or_id, ip_protocol, from_port, to_port, ip_ranges, - source_groups, source_group_ids) + source_groups, source_group_ids) class SecurityGroups(BaseResponse): + def authorize_security_group_egress(self): if self.is_not_dryrun('GrantSecurityGroupEgress'): for args in process_rules_from_querystring(self.querystring): @@ -77,12 +76,15 @@ class SecurityGroups(BaseResponse): vpc_id = self.querystring.get("VpcId", [None])[0] if self.is_not_dryrun('CreateSecurityGroup'): - group = self.ec2_backend.create_security_group(name, description, vpc_id=vpc_id) + group = self.ec2_backend.create_security_group( + name, description, vpc_id=vpc_id) template = self.response_template(CREATE_SECURITY_GROUP_RESPONSE) return template.render(group=group) def delete_security_group(self): - # TODO this should raise an error if there are instances in the group. See http://docs.aws.amazon.com/AWSEC2/latest/APIReference/ApiReference-query-DeleteSecurityGroup.html + # TODO this should raise an error if there are instances in the group. + # See + # http://docs.aws.amazon.com/AWSEC2/latest/APIReference/ApiReference-query-DeleteSecurityGroup.html name = self.querystring.get('GroupName') sg_id = self.querystring.get('GroupId') diff --git a/moto/ec2/responses/spot_fleets.py b/moto/ec2/responses/spot_fleets.py index 3004cc0bb..e39d9b178 100644 --- a/moto/ec2/responses/spot_fleets.py +++ b/moto/ec2/responses/spot_fleets.py @@ -7,21 +7,25 @@ class SpotFleets(BaseResponse): def cancel_spot_fleet_requests(self): spot_fleet_request_ids = self._get_multi_param("SpotFleetRequestId.") terminate_instances = self._get_param("TerminateInstances") - spot_fleets = self.ec2_backend.cancel_spot_fleet_requests(spot_fleet_request_ids, terminate_instances) + spot_fleets = self.ec2_backend.cancel_spot_fleet_requests( + spot_fleet_request_ids, terminate_instances) template = self.response_template(CANCEL_SPOT_FLEETS_TEMPLATE) return template.render(spot_fleets=spot_fleets) def describe_spot_fleet_instances(self): spot_fleet_request_id = self._get_param("SpotFleetRequestId") - spot_requests = self.ec2_backend.describe_spot_fleet_instances(spot_fleet_request_id) - template = self.response_template(DESCRIBE_SPOT_FLEET_INSTANCES_TEMPLATE) + spot_requests = self.ec2_backend.describe_spot_fleet_instances( + spot_fleet_request_id) + template = self.response_template( + DESCRIBE_SPOT_FLEET_INSTANCES_TEMPLATE) return template.render(spot_request_id=spot_fleet_request_id, spot_requests=spot_requests) def describe_spot_fleet_requests(self): spot_fleet_request_ids = self._get_multi_param("SpotFleetRequestId.") - requests = self.ec2_backend.describe_spot_fleet_requests(spot_fleet_request_ids) + requests = self.ec2_backend.describe_spot_fleet_requests( + spot_fleet_request_ids) template = self.response_template(DESCRIBE_SPOT_FLEET_TEMPLATE) return template.render(requests=requests) @@ -32,7 +36,8 @@ class SpotFleets(BaseResponse): iam_fleet_role = spot_config['iam_fleet_role'] allocation_strategy = spot_config['allocation_strategy'] - launch_specs = self._get_list_prefix("SpotFleetRequestConfig.LaunchSpecifications") + launch_specs = self._get_list_prefix( + "SpotFleetRequestConfig.LaunchSpecifications") request = self.ec2_backend.request_spot_fleet( spot_price=spot_price, @@ -45,6 +50,7 @@ class SpotFleets(BaseResponse): template = self.response_template(REQUEST_SPOT_FLEET_TEMPLATE) return template.render(request=request) + REQUEST_SPOT_FLEET_TEMPLATE = """ 60262cc5-2bd4-4c8d-98ed-example {{ request.id }} diff --git a/moto/ec2/responses/spot_instances.py b/moto/ec2/responses/spot_instances.py index 96e5a1ba4..b0e80a320 100644 --- a/moto/ec2/responses/spot_instances.py +++ b/moto/ec2/responses/spot_instances.py @@ -8,29 +8,35 @@ class SpotInstances(BaseResponse): def cancel_spot_instance_requests(self): request_ids = self._get_multi_param('SpotInstanceRequestId') if self.is_not_dryrun('CancelSpotInstance'): - requests = self.ec2_backend.cancel_spot_instance_requests(request_ids) + requests = self.ec2_backend.cancel_spot_instance_requests( + request_ids) template = self.response_template(CANCEL_SPOT_INSTANCES_TEMPLATE) return template.render(requests=requests) def create_spot_datafeed_subscription(self): if self.is_not_dryrun('CreateSpotDatafeedSubscription'): - raise NotImplementedError('SpotInstances.create_spot_datafeed_subscription is not yet implemented') + raise NotImplementedError( + 'SpotInstances.create_spot_datafeed_subscription is not yet implemented') def delete_spot_datafeed_subscription(self): if self.is_not_dryrun('DeleteSpotDatafeedSubscription'): - raise NotImplementedError('SpotInstances.delete_spot_datafeed_subscription is not yet implemented') + raise NotImplementedError( + 'SpotInstances.delete_spot_datafeed_subscription is not yet implemented') def describe_spot_datafeed_subscription(self): - raise NotImplementedError('SpotInstances.describe_spot_datafeed_subscription is not yet implemented') + raise NotImplementedError( + 'SpotInstances.describe_spot_datafeed_subscription is not yet implemented') def describe_spot_instance_requests(self): filters = filters_from_querystring(self.querystring) - requests = self.ec2_backend.describe_spot_instance_requests(filters=filters) + requests = self.ec2_backend.describe_spot_instance_requests( + filters=filters) template = self.response_template(DESCRIBE_SPOT_INSTANCES_TEMPLATE) return template.render(requests=requests) def describe_spot_price_history(self): - raise NotImplementedError('SpotInstances.describe_spot_price_history is not yet implemented') + raise NotImplementedError( + 'SpotInstances.describe_spot_price_history is not yet implemented') def request_spot_instances(self): price = self._get_param('SpotPrice') @@ -42,13 +48,17 @@ class SpotInstances(BaseResponse): launch_group = self._get_param('LaunchGroup') availability_zone_group = self._get_param('AvailabilityZoneGroup') key_name = self._get_param('LaunchSpecification.KeyName') - security_groups = self._get_multi_param('LaunchSpecification.SecurityGroup') + security_groups = self._get_multi_param( + 'LaunchSpecification.SecurityGroup') user_data = self._get_param('LaunchSpecification.UserData') - instance_type = self._get_param('LaunchSpecification.InstanceType', 'm1.small') - placement = self._get_param('LaunchSpecification.Placement.AvailabilityZone') + instance_type = self._get_param( + 'LaunchSpecification.InstanceType', 'm1.small') + placement = self._get_param( + 'LaunchSpecification.Placement.AvailabilityZone') kernel_id = self._get_param('LaunchSpecification.KernelId') ramdisk_id = self._get_param('LaunchSpecification.RamdiskId') - monitoring_enabled = self._get_param('LaunchSpecification.Monitoring.Enabled') + monitoring_enabled = self._get_param( + 'LaunchSpecification.Monitoring.Enabled') subnet_id = self._get_param('LaunchSpecification.SubnetId') if self.is_not_dryrun('RequestSpotInstance'): diff --git a/moto/ec2/responses/subnets.py b/moto/ec2/responses/subnets.py index 9486a3ca1..67fd09a14 100644 --- a/moto/ec2/responses/subnets.py +++ b/moto/ec2/responses/subnets.py @@ -5,13 +5,15 @@ from moto.ec2.utils import filters_from_querystring class Subnets(BaseResponse): + def create_subnet(self): vpc_id = self.querystring.get('VpcId')[0] cidr_block = self.querystring.get('CidrBlock')[0] if 'AvailabilityZone' in self.querystring: availability_zone = self.querystring['AvailabilityZone'][0] else: - zone = random.choice(self.ec2_backend.describe_availability_zones()) + zone = random.choice( + self.ec2_backend.describe_availability_zones()) availability_zone = zone.name subnet = self.ec2_backend.create_subnet( vpc_id, diff --git a/moto/ec2/responses/tags.py b/moto/ec2/responses/tags.py index 8c2c43ba7..a747067fb 100644 --- a/moto/ec2/responses/tags.py +++ b/moto/ec2/responses/tags.py @@ -8,7 +8,8 @@ from moto.ec2.utils import sequence_from_querystring, tags_from_query_string, fi class TagResponse(BaseResponse): def create_tags(self): - resource_ids = sequence_from_querystring('ResourceId', self.querystring) + resource_ids = sequence_from_querystring( + 'ResourceId', self.querystring) validate_resource_ids(resource_ids) self.ec2_backend.do_resources_exist(resource_ids) tags = tags_from_query_string(self.querystring) @@ -17,7 +18,8 @@ class TagResponse(BaseResponse): return CREATE_RESPONSE def delete_tags(self): - resource_ids = sequence_from_querystring('ResourceId', self.querystring) + resource_ids = sequence_from_querystring( + 'ResourceId', self.querystring) validate_resource_ids(resource_ids) tags = tags_from_query_string(self.querystring) if self.is_not_dryrun('DeleteTags'): diff --git a/moto/ec2/responses/virtual_private_gateways.py b/moto/ec2/responses/virtual_private_gateways.py index e167437d5..2a677d36c 100644 --- a/moto/ec2/responses/virtual_private_gateways.py +++ b/moto/ec2/responses/virtual_private_gateways.py @@ -4,6 +4,7 @@ from moto.ec2.utils import filters_from_querystring class VirtualPrivateGateways(BaseResponse): + def attach_vpn_gateway(self): vpn_gateway_id = self.querystring.get('VpnGatewayId')[0] vpc_id = self.querystring.get('VpcId')[0] @@ -42,6 +43,7 @@ class VirtualPrivateGateways(BaseResponse): template = self.response_template(DETACH_VPN_GATEWAY_RESPONSE) return template.render(attachment=attachment) + CREATE_VPN_GATEWAY_RESPONSE = """ 7a62c49f-347e-4fc4-9331-6e8eEXAMPLE diff --git a/moto/ec2/responses/vm_export.py b/moto/ec2/responses/vm_export.py index 98c3dd3ea..6fdf59ba3 100644 --- a/moto/ec2/responses/vm_export.py +++ b/moto/ec2/responses/vm_export.py @@ -3,11 +3,15 @@ from moto.core.responses import BaseResponse class VMExport(BaseResponse): + def cancel_export_task(self): - raise NotImplementedError('VMExport.cancel_export_task is not yet implemented') + raise NotImplementedError( + 'VMExport.cancel_export_task is not yet implemented') def create_instance_export_task(self): - raise NotImplementedError('VMExport.create_instance_export_task is not yet implemented') + raise NotImplementedError( + 'VMExport.create_instance_export_task is not yet implemented') def describe_export_tasks(self): - raise NotImplementedError('VMExport.describe_export_tasks is not yet implemented') + raise NotImplementedError( + 'VMExport.describe_export_tasks is not yet implemented') diff --git a/moto/ec2/responses/vm_import.py b/moto/ec2/responses/vm_import.py index ea88bdc98..8c2ba138c 100644 --- a/moto/ec2/responses/vm_import.py +++ b/moto/ec2/responses/vm_import.py @@ -3,14 +3,19 @@ from moto.core.responses import BaseResponse class VMImport(BaseResponse): + def cancel_conversion_task(self): - raise NotImplementedError('VMImport.cancel_conversion_task is not yet implemented') + raise NotImplementedError( + 'VMImport.cancel_conversion_task is not yet implemented') def describe_conversion_tasks(self): - raise NotImplementedError('VMImport.describe_conversion_tasks is not yet implemented') + raise NotImplementedError( + 'VMImport.describe_conversion_tasks is not yet implemented') def import_instance(self): - raise NotImplementedError('VMImport.import_instance is not yet implemented') + raise NotImplementedError( + 'VMImport.import_instance is not yet implemented') def import_volume(self): - raise NotImplementedError('VMImport.import_volume is not yet implemented') + raise NotImplementedError( + 'VMImport.import_volume is not yet implemented') diff --git a/moto/ec2/responses/vpc_peering_connections.py b/moto/ec2/responses/vpc_peering_connections.py index 704dd7a3e..f6bff4310 100644 --- a/moto/ec2/responses/vpc_peering_connections.py +++ b/moto/ec2/responses/vpc_peering_connections.py @@ -3,34 +3,41 @@ from moto.core.responses import BaseResponse class VPCPeeringConnections(BaseResponse): + def create_vpc_peering_connection(self): vpc = self.ec2_backend.get_vpc(self.querystring.get('VpcId')[0]) - peer_vpc = self.ec2_backend.get_vpc(self.querystring.get('PeerVpcId')[0]) + peer_vpc = self.ec2_backend.get_vpc( + self.querystring.get('PeerVpcId')[0]) vpc_pcx = self.ec2_backend.create_vpc_peering_connection(vpc, peer_vpc) - template = self.response_template(CREATE_VPC_PEERING_CONNECTION_RESPONSE) + template = self.response_template( + CREATE_VPC_PEERING_CONNECTION_RESPONSE) return template.render(vpc_pcx=vpc_pcx) def delete_vpc_peering_connection(self): vpc_pcx_id = self.querystring.get('VpcPeeringConnectionId')[0] vpc_pcx = self.ec2_backend.delete_vpc_peering_connection(vpc_pcx_id) - template = self.response_template(DELETE_VPC_PEERING_CONNECTION_RESPONSE) + template = self.response_template( + DELETE_VPC_PEERING_CONNECTION_RESPONSE) return template.render(vpc_pcx=vpc_pcx) def describe_vpc_peering_connections(self): vpc_pcxs = self.ec2_backend.get_all_vpc_peering_connections() - template = self.response_template(DESCRIBE_VPC_PEERING_CONNECTIONS_RESPONSE) + template = self.response_template( + DESCRIBE_VPC_PEERING_CONNECTIONS_RESPONSE) return template.render(vpc_pcxs=vpc_pcxs) def accept_vpc_peering_connection(self): vpc_pcx_id = self.querystring.get('VpcPeeringConnectionId')[0] vpc_pcx = self.ec2_backend.accept_vpc_peering_connection(vpc_pcx_id) - template = self.response_template(ACCEPT_VPC_PEERING_CONNECTION_RESPONSE) + template = self.response_template( + ACCEPT_VPC_PEERING_CONNECTION_RESPONSE) return template.render(vpc_pcx=vpc_pcx) def reject_vpc_peering_connection(self): vpc_pcx_id = self.querystring.get('VpcPeeringConnectionId')[0] self.ec2_backend.reject_vpc_peering_connection(vpc_pcx_id) - template = self.response_template(REJECT_VPC_PEERING_CONNECTION_RESPONSE) + template = self.response_template( + REJECT_VPC_PEERING_CONNECTION_RESPONSE) return template.render() diff --git a/moto/ec2/responses/vpcs.py b/moto/ec2/responses/vpcs.py index 3d2a99894..129f91a3b 100644 --- a/moto/ec2/responses/vpcs.py +++ b/moto/ec2/responses/vpcs.py @@ -5,9 +5,11 @@ from moto.ec2.utils import filters_from_querystring, vpc_ids_from_querystring class VPCs(BaseResponse): + def create_vpc(self): cidr_block = self.querystring.get('CidrBlock')[0] - instance_tenancy = self.querystring.get('InstanceTenancy', ['default'])[0] + instance_tenancy = self.querystring.get( + 'InstanceTenancy', ['default'])[0] vpc = self.ec2_backend.create_vpc(cidr_block, instance_tenancy) template = self.response_template(CREATE_VPC_RESPONSE) return template.render(vpc=vpc) @@ -40,7 +42,8 @@ class VPCs(BaseResponse): if self.querystring.get('%s.Value' % attribute): attr_name = camelcase_to_underscores(attribute) attr_value = self.querystring.get('%s.Value' % attribute)[0] - self.ec2_backend.modify_vpc_attribute(vpc_id, attr_name, attr_value) + self.ec2_backend.modify_vpc_attribute( + vpc_id, attr_name, attr_value) return MODIFY_VPC_ATTRIBUTE_RESPONSE diff --git a/moto/ec2/responses/vpn_connections.py b/moto/ec2/responses/vpn_connections.py index 7825e7ebb..2a4a7ef99 100644 --- a/moto/ec2/responses/vpn_connections.py +++ b/moto/ec2/responses/vpn_connections.py @@ -4,23 +4,27 @@ from moto.ec2.utils import filters_from_querystring, sequence_from_querystring class VPNConnections(BaseResponse): + def create_vpn_connection(self): type = self.querystring.get("Type", [None])[0] cgw_id = self.querystring.get("CustomerGatewayId", [None])[0] vgw_id = self.querystring.get("VPNGatewayId", [None])[0] static_routes = self.querystring.get("StaticRoutesOnly", [None])[0] - vpn_connection = self.ec2_backend.create_vpn_connection(type, cgw_id, vgw_id, static_routes_only=static_routes) + vpn_connection = self.ec2_backend.create_vpn_connection( + type, cgw_id, vgw_id, static_routes_only=static_routes) template = self.response_template(CREATE_VPN_CONNECTION_RESPONSE) return template.render(vpn_connection=vpn_connection) def delete_vpn_connection(self): vpn_connection_id = self.querystring.get('VpnConnectionId')[0] - vpn_connection = self.ec2_backend.delete_vpn_connection(vpn_connection_id) + vpn_connection = self.ec2_backend.delete_vpn_connection( + vpn_connection_id) template = self.response_template(DELETE_VPN_CONNECTION_RESPONSE) return template.render(vpn_connection=vpn_connection) def describe_vpn_connections(self): - vpn_connection_ids = sequence_from_querystring('VpnConnectionId', self.querystring) + vpn_connection_ids = sequence_from_querystring( + 'VpnConnectionId', self.querystring) filters = filters_from_querystring(self.querystring) vpn_connections = self.ec2_backend.get_all_vpn_connections( vpn_connection_ids=vpn_connection_ids, filters=filters) diff --git a/moto/ec2/responses/windows.py b/moto/ec2/responses/windows.py index 0a5e31a0e..13dfa9b67 100644 --- a/moto/ec2/responses/windows.py +++ b/moto/ec2/responses/windows.py @@ -3,14 +3,19 @@ from moto.core.responses import BaseResponse class Windows(BaseResponse): + def bundle_instance(self): - raise NotImplementedError('Windows.bundle_instance is not yet implemented') + raise NotImplementedError( + 'Windows.bundle_instance is not yet implemented') def cancel_bundle_task(self): - raise NotImplementedError('Windows.cancel_bundle_task is not yet implemented') + raise NotImplementedError( + 'Windows.cancel_bundle_task is not yet implemented') def describe_bundle_tasks(self): - raise NotImplementedError('Windows.describe_bundle_tasks is not yet implemented') + raise NotImplementedError( + 'Windows.describe_bundle_tasks is not yet implemented') def get_password_data(self): - raise NotImplementedError('Windows.get_password_data is not yet implemented') + raise NotImplementedError( + 'Windows.get_password_data is not yet implemented') diff --git a/moto/ec2/utils.py b/moto/ec2/utils.py index 4d0f75254..8cba650a6 100644 --- a/moto/ec2/utils.py +++ b/moto/ec2/utils.py @@ -32,13 +32,15 @@ EC2_RESOURCE_TO_PREFIX = { 'vpn-gateway': 'vgw'} -EC2_PREFIX_TO_RESOURCE = dict((v, k) for (k, v) in EC2_RESOURCE_TO_PREFIX.items()) +EC2_PREFIX_TO_RESOURCE = dict((v, k) + for (k, v) in EC2_RESOURCE_TO_PREFIX.items()) def random_id(prefix='', size=8): chars = list(range(10)) + ['a', 'b', 'c', 'd', 'e', 'f'] - resource_id = ''.join(six.text_type(random.choice(chars)) for x in range(size)) + resource_id = ''.join(six.text_type(random.choice(chars)) + for x in range(size)) return '{0}-{1}'.format(prefix, resource_id) @@ -228,7 +230,8 @@ def tags_from_query_string(querystring_dict): tag_key = querystring_dict.get("Tag.{0}.Key".format(tag_index))[0] tag_value_key = "Tag.{0}.Value".format(tag_index) if tag_value_key in querystring_dict: - response_values[tag_key] = querystring_dict.get(tag_value_key)[0] + response_values[tag_key] = querystring_dict.get(tag_value_key)[ + 0] else: response_values[tag_key] = None return response_values @@ -262,7 +265,8 @@ def dhcp_configuration_from_querystring(querystring, option=u'DhcpConfiguration' key_index = key.split(".")[1] value_index = 1 while True: - value_key = u'{0}.{1}.Value.{2}'.format(option, key_index, value_index) + value_key = u'{0}.{1}.Value.{2}'.format( + option, key_index, value_index) if value_key in querystring: values.extend(querystring[value_key]) else: @@ -337,16 +341,20 @@ def get_obj_tag(obj, filter_name): tags = dict((tag['key'], tag['value']) for tag in obj.get_tags()) return tags.get(tag_name) + def get_obj_tag_names(obj): tags = set((tag['key'] for tag in obj.get_tags())) return tags + def get_obj_tag_values(obj): tags = set((tag['value'] for tag in obj.get_tags())) return tags + def tag_filter_matches(obj, filter_name, filter_values): - regex_filters = [re.compile(simple_aws_filter_to_re(f)) for f in filter_values] + regex_filters = [re.compile(simple_aws_filter_to_re(f)) + for f in filter_values] if filter_name == 'tag-key': tag_values = get_obj_tag_names(obj) elif filter_name == 'tag-value': @@ -400,7 +408,7 @@ def instance_value_in_filter_values(instance_value, filter_values): if not set(filter_values).intersection(set(instance_value)): return False elif instance_value not in filter_values: - return False + return False return True @@ -464,7 +472,8 @@ def is_filter_matching(obj, filter, filter_value): def generic_filter(filters, objects): if filters: for (_filter, _filter_value) in filters.items(): - objects = [obj for obj in objects if is_filter_matching(obj, _filter, _filter_value)] + objects = [obj for obj in objects if is_filter_matching( + obj, _filter, _filter_value)] return objects @@ -480,8 +489,10 @@ def simple_aws_filter_to_re(filter_string): def random_key_pair(): def random_hex(): return chr(random.choice(list(range(48, 58)) + list(range(97, 102)))) + def random_fingerprint(): - return ':'.join([random_hex()+random_hex() for i in range(20)]) + return ':'.join([random_hex() + random_hex() for i in range(20)]) + def random_material(): return ''.join([ chr(random.choice(list(range(65, 91)) + list(range(48, 58)) + @@ -489,7 +500,7 @@ def random_key_pair(): for i in range(1000) ]) material = "---- BEGIN RSA PRIVATE KEY ----" + random_material() + \ - "-----END RSA PRIVATE KEY-----" + "-----END RSA PRIVATE KEY-----" return { 'fingerprint': random_fingerprint(), 'material': material @@ -500,9 +511,11 @@ def get_prefix(resource_id): resource_id_prefix, separator, after = resource_id.partition('-') if resource_id_prefix == EC2_RESOURCE_TO_PREFIX['network-interface']: if after.startswith('attach'): - resource_id_prefix = EC2_RESOURCE_TO_PREFIX['network-interface-attachment'] + resource_id_prefix = EC2_RESOURCE_TO_PREFIX[ + 'network-interface-attachment'] if resource_id_prefix not in EC2_RESOURCE_TO_PREFIX.values(): - uuid4hex = re.compile('[0-9a-f]{12}4[0-9a-f]{3}[89ab][0-9a-f]{15}\Z', re.I) + uuid4hex = re.compile( + '[0-9a-f]{12}4[0-9a-f]{3}[89ab][0-9a-f]{15}\Z', re.I) if uuid4hex.match(resource_id) is not None: resource_id_prefix = EC2_RESOURCE_TO_PREFIX['reserved-instance'] else: @@ -539,20 +552,20 @@ def generate_instance_identity_document(instance): """ document = { - 'devPayProductCodes': None, - 'availabilityZone': instance.placement['AvailabilityZone'], - 'privateIp': instance.private_ip_address, - 'version': '2010-8-31', - 'region': instance.placement['AvailabilityZone'][:-1], - 'instanceId': instance.id, - 'billingProducts': None, - 'instanceType': instance.instance_type, - 'accountId': '012345678910', - 'pendingTime': '2015-11-19T16:32:11Z', - 'imageId': instance.image_id, - 'kernelId': instance.kernel_id, - 'ramdiskId': instance.ramdisk_id, - 'architecture': instance.architecture, - } + 'devPayProductCodes': None, + 'availabilityZone': instance.placement['AvailabilityZone'], + 'privateIp': instance.private_ip_address, + 'version': '2010-8-31', + 'region': instance.placement['AvailabilityZone'][:-1], + 'instanceId': instance.id, + 'billingProducts': None, + 'instanceType': instance.instance_type, + 'accountId': '012345678910', + 'pendingTime': '2015-11-19T16:32:11Z', + 'imageId': instance.image_id, + 'kernelId': instance.kernel_id, + 'ramdiskId': instance.ramdisk_id, + 'architecture': instance.architecture, + } return document diff --git a/moto/ecs/__init__.py b/moto/ecs/__init__.py index 6864355ad..8fb3dd41e 100644 --- a/moto/ecs/__init__.py +++ b/moto/ecs/__init__.py @@ -1,6 +1,6 @@ from __future__ import unicode_literals from .models import ecs_backends -from ..core.models import MockAWS, base_decorator, HttprettyMockAWS, deprecated_base_decorator +from ..core.models import base_decorator, deprecated_base_decorator ecs_backend = ecs_backends['us-east-1'] mock_ecs = base_decorator(ecs_backends) diff --git a/moto/ecs/models.py b/moto/ecs/models.py index 3ce7be8b5..5a046c376 100644 --- a/moto/ecs/models.py +++ b/moto/ecs/models.py @@ -8,6 +8,7 @@ from copy import copy class BaseObject(object): + def camelCase(self, key): words = [] for i, word in enumerate(key.split('_')): @@ -31,9 +32,11 @@ class BaseObject(object): class Cluster(BaseObject): + def __init__(self, cluster_name): self.active_services_count = 0 - self.arn = 'arn:aws:ecs:us-east-1:012345678910:cluster/{0}'.format(cluster_name) + self.arn = 'arn:aws:ecs:us-east-1:012345678910:cluster/{0}'.format( + cluster_name) self.name = cluster_name self.pending_tasks_count = 0 self.registered_container_instances_count = 0 @@ -58,9 +61,12 @@ class Cluster(BaseObject): ecs_backend = ecs_backends[region_name] return ecs_backend.create_cluster( - # ClusterName is optional in CloudFormation, thus create a random name if necessary - cluster_name=properties.get('ClusterName', 'ecscluster{0}'.format(int(random() * 10 ** 6))), + # ClusterName is optional in CloudFormation, thus create a random + # name if necessary + cluster_name=properties.get( + 'ClusterName', 'ecscluster{0}'.format(int(random() * 10 ** 6))), ) + @classmethod def update_from_cloudformation_json(cls, original_resource, new_resource_name, cloudformation_json, region_name): properties = cloudformation_json['Properties'] @@ -69,8 +75,10 @@ class Cluster(BaseObject): ecs_backend = ecs_backends[region_name] ecs_backend.delete_cluster(original_resource.arn) return ecs_backend.create_cluster( - # ClusterName is optional in CloudFormation, thus create a random name if necessary - cluster_name=properties.get('ClusterName', 'ecscluster{0}'.format(int(random() * 10 ** 6))), + # ClusterName is optional in CloudFormation, thus create a + # random name if necessary + cluster_name=properties.get( + 'ClusterName', 'ecscluster{0}'.format(int(random() * 10 ** 6))), ) else: # no-op when nothing changed between old and new resources @@ -78,9 +86,11 @@ class Cluster(BaseObject): class TaskDefinition(BaseObject): + def __init__(self, family, revision, container_definitions, volumes=None): self.family = family - self.arn = 'arn:aws:ecs:us-east-1:012345678910:task-definition/{0}:{1}'.format(family, revision) + self.arn = 'arn:aws:ecs:us-east-1:012345678910:task-definition/{0}:{1}'.format( + family, revision) self.container_definitions = container_definitions if volumes is None: self.volumes = [] @@ -98,7 +108,8 @@ class TaskDefinition(BaseObject): def create_from_cloudformation_json(cls, resource_name, cloudformation_json, region_name): properties = cloudformation_json['Properties'] - family = properties.get('Family', 'task-definition-{0}'.format(int(random() * 10 ** 6))) + family = properties.get( + 'Family', 'task-definition-{0}'.format(int(random() * 10 ** 6))) container_definitions = properties['ContainerDefinitions'] volumes = properties['Volumes'] @@ -110,14 +121,16 @@ class TaskDefinition(BaseObject): def update_from_cloudformation_json(cls, original_resource, new_resource_name, cloudformation_json, region_name): properties = cloudformation_json['Properties'] - family = properties.get('Family', 'task-definition-{0}'.format(int(random() * 10 ** 6))) + family = properties.get( + 'Family', 'task-definition-{0}'.format(int(random() * 10 ** 6))) container_definitions = properties['ContainerDefinitions'] volumes = properties['Volumes'] if (original_resource.family != family or - original_resource.container_definitions != container_definitions or - original_resource.volumes != volumes - # currently TaskRoleArn isn't stored at TaskDefinition instances - ): + original_resource.container_definitions != container_definitions or + original_resource.volumes != volumes): + # currently TaskRoleArn isn't stored at TaskDefinition + # instances + ecs_backend = ecs_backends[region_name] ecs_backend.deregister_task_definition(original_resource.arn) return ecs_backend.register_task_definition( @@ -126,10 +139,13 @@ class TaskDefinition(BaseObject): # no-op when nothing changed between old and new resources return original_resource + class Task(BaseObject): + def __init__(self, cluster, task_definition, container_instance_arn, overrides={}, started_by=''): self.cluster_arn = cluster.arn - self.task_arn = 'arn:aws:ecs:us-east-1:012345678910:task/{0}'.format(str(uuid.uuid1())) + self.task_arn = 'arn:aws:ecs:us-east-1:012345678910:task/{0}'.format( + str(uuid.uuid1())) self.container_instance_arn = container_instance_arn self.last_status = 'RUNNING' self.desired_status = 'RUNNING' @@ -146,9 +162,11 @@ class Task(BaseObject): class Service(BaseObject): + def __init__(self, cluster, service_name, task_definition, desired_count): self.cluster_arn = cluster.arn - self.arn = 'arn:aws:ecs:us-east-1:012345678910:service/{0}'.format(service_name) + self.arn = 'arn:aws:ecs:us-east-1:012345678910:service/{0}'.format( + service_name) self.name = service_name self.status = 'ACTIVE' self.running_count = 0 @@ -209,7 +227,8 @@ class Service(BaseObject): # TODO: LoadBalancers # TODO: Role ecs_backend.delete_service(cluster_name, service_name) - new_service_name = '{0}Service{1}'.format(cluster_name, int(random() * 10 ** 6)) + new_service_name = '{0}Service{1}'.format( + cluster_name, int(random() * 10 ** 6)) return ecs_backend.create_service( cluster_name, new_service_name, task_definition, desired_count) else: @@ -217,20 +236,22 @@ class Service(BaseObject): class ContainerInstance(BaseObject): + def __init__(self, ec2_instance_id): self.ec2_instance_id = ec2_instance_id self.status = 'ACTIVE' self.registeredResources = [] self.agentConnected = True - self.containerInstanceArn = "arn:aws:ecs:us-east-1:012345678910:container-instance/{0}".format(str(uuid.uuid1())) + self.containerInstanceArn = "arn:aws:ecs:us-east-1:012345678910:container-instance/{0}".format( + str(uuid.uuid1())) self.pendingTaskCount = 0 self.remainingResources = [] self.runningTaskCount = 0 self.versionInfo = { - 'agentVersion': "1.0.0", - 'agentHash': '4023248', - 'dockerVersion': 'DockerVersion: 1.5.0' - } + 'agentVersion': "1.0.0", + 'agentHash': '4023248', + 'dockerVersion': 'DockerVersion: 1.5.0' + } @property def response_object(self): @@ -240,9 +261,11 @@ class ContainerInstance(BaseObject): class ContainerInstanceFailure(BaseObject): + def __init__(self, reason, container_instance_id): self.reason = reason - self.arn = "arn:aws:ecs:us-east-1:012345678910:container-instance/{0}".format(container_instance_id) + self.arn = "arn:aws:ecs:us-east-1:012345678910:container-instance/{0}".format( + container_instance_id) @property def response_object(self): @@ -253,6 +276,7 @@ class ContainerInstanceFailure(BaseObject): class EC2ContainerServiceBackend(BaseBackend): + def __init__(self): self.clusters = {} self.task_definitions = {} @@ -261,19 +285,21 @@ class EC2ContainerServiceBackend(BaseBackend): self.container_instances = {} def describe_task_definition(self, task_definition_str): - task_definition_components = task_definition_str.split(':') - if len(task_definition_components) == 2: - family, revision = task_definition_components + task_definition_name = task_definition_str.split('/')[-1] + if ':' in task_definition_name: + family, revision = task_definition_name.split(':') revision = int(revision) else: - family = task_definition_components[0] - revision = -1 + family = task_definition_name + revision = len(self.task_definitions.get(family, [])) + if family in self.task_definitions and 0 < revision <= len(self.task_definitions[family]): return self.task_definitions[family][revision - 1] elif family in self.task_definitions and revision == -1: return self.task_definitions[family][revision] else: - raise Exception("{0} is not a task_definition".format(task_definition_str)) + raise Exception( + "{0} is not a task_definition".format(task_definition_name)) def create_cluster(self, cluster_name): cluster = Cluster(cluster_name) @@ -295,9 +321,11 @@ class EC2ContainerServiceBackend(BaseBackend): for cluster in list_clusters_name: cluster_name = cluster.split('/')[-1] if cluster_name in self.clusters: - list_clusters.append(self.clusters[cluster_name].response_object) + list_clusters.append( + self.clusters[cluster_name].response_object) else: - raise Exception("{0} is not a cluster".format(cluster_name)) + raise Exception( + "{0} is not a cluster".format(cluster_name)) return list_clusters def delete_cluster(self, cluster_str): @@ -313,7 +341,8 @@ class EC2ContainerServiceBackend(BaseBackend): else: self.task_definitions[family] = [] revision = 1 - task_definition = TaskDefinition(family, revision, container_definitions, volumes) + task_definition = TaskDefinition( + family, revision, container_definitions, volumes) self.task_definitions[family].append(task_definition) return task_definition @@ -324,23 +353,10 @@ class EC2ContainerServiceBackend(BaseBackend): """ task_arns = [] for task_definition_list in self.task_definitions.values(): - task_arns.extend([task_definition.arn for task_definition in task_definition_list]) + task_arns.extend( + [task_definition.arn for task_definition in task_definition_list]) return task_arns - def describe_task_definition(self, task_definition_str): - task_definition_name = task_definition_str.split('/')[-1] - if ':' in task_definition_name: - family, revision = task_definition_name.split(':') - revision = int(revision) - else: - family = task_definition_name - revision = len(self.task_definitions.get(family, [])) - - if family in self.task_definitions and 0 < revision <= len(self.task_definitions[family]): - return self.task_definitions[family][revision-1] - else: - raise Exception("{0} is not a task_definition".format(task_definition_name)) - def deregister_task_definition(self, task_definition_str): task_definition_name = task_definition_str.split('/')[-1] family, revision = task_definition_name.split(':') @@ -348,7 +364,8 @@ class EC2ContainerServiceBackend(BaseBackend): if family in self.task_definitions and 0 < revision <= len(self.task_definitions[family]): return self.task_definitions[family].pop(revision - 1) else: - raise Exception("{0} is not a task_definition".format(task_definition_name)) + raise Exception( + "{0} is not a task_definition".format(task_definition_name)) def run_task(self, cluster_str, task_definition_str, count, overrides, started_by): cluster_name = cluster_str.split('/')[-1] @@ -360,14 +377,17 @@ class EC2ContainerServiceBackend(BaseBackend): if cluster_name not in self.tasks: self.tasks[cluster_name] = {} tasks = [] - container_instances = list(self.container_instances.get(cluster_name, {}).keys()) + container_instances = list( + self.container_instances.get(cluster_name, {}).keys()) if not container_instances: - raise Exception("No instances found in cluster {}".format(cluster_name)) + raise Exception( + "No instances found in cluster {}".format(cluster_name)) for _ in range(count or 1): container_instance_arn = self.container_instances[cluster_name][ container_instances[randint(0, len(container_instances) - 1)] ].containerInstanceArn - task = Task(cluster, task_definition, container_instance_arn, overrides or {}, started_by or '') + task = Task(cluster, task_definition, container_instance_arn, + overrides or {}, started_by or '') tasks.append(task) self.tasks[cluster_name][task.task_arn] = task return tasks @@ -385,13 +405,15 @@ class EC2ContainerServiceBackend(BaseBackend): if not container_instances: raise Exception("No container instance list provided") - container_instance_ids = [x.split('/')[-1] for x in container_instances] + container_instance_ids = [x.split('/')[-1] + for x in container_instances] for container_instance_id in container_instance_ids: container_instance_arn = self.container_instances[cluster_name][ container_instance_id ].containerInstanceArn - task = Task(cluster, task_definition, container_instance_arn, overrides or {}, started_by or '') + task = Task(cluster, task_definition, container_instance_arn, + overrides or {}, started_by or '') tasks.append(task) self.tasks[cluster_name][task.task_arn] = task return tasks @@ -418,17 +440,18 @@ class EC2ContainerServiceBackend(BaseBackend): filtered_tasks.append(task) if cluster_str: cluster_name = cluster_str.split('/')[-1] - if cluster_name in self.clusters: - cluster = self.clusters[cluster_name] - else: + if cluster_name not in self.clusters: raise Exception("{0} is not a cluster".format(cluster_name)) - filtered_tasks = list(filter(lambda t: cluster_name in t.cluster_arn, filtered_tasks)) + filtered_tasks = list( + filter(lambda t: cluster_name in t.cluster_arn, filtered_tasks)) if container_instance: - filtered_tasks = list(filter(lambda t: container_instance in t.container_instance_arn, filtered_tasks)) + filtered_tasks = list(filter( + lambda t: container_instance in t.container_instance_arn, filtered_tasks)) if started_by: - filtered_tasks = list(filter(lambda t: started_by == t.started_by, filtered_tasks)) + filtered_tasks = list( + filter(lambda t: started_by == t.started_by, filtered_tasks)) return [t.task_arn for t in filtered_tasks] def stop_task(self, cluster_str, task_str, reason): @@ -441,14 +464,16 @@ class EC2ContainerServiceBackend(BaseBackend): task_id = task_str.split('/')[-1] tasks = self.tasks.get(cluster_name, None) if not tasks: - raise Exception("Cluster {} has no registered tasks".format(cluster_name)) + raise Exception( + "Cluster {} has no registered tasks".format(cluster_name)) for task in tasks.keys(): if task.endswith(task_id): tasks[task].last_status = 'STOPPED' tasks[task].desired_status = 'STOPPED' tasks[task].stopped_reason = reason return tasks[task] - raise Exception("Could not find task {} on cluster {}".format(task_str, cluster_name)) + raise Exception("Could not find task {} on cluster {}".format( + task_str, cluster_name)) def create_service(self, cluster_str, service_name, task_definition_str, desired_count): cluster_name = cluster_str.split('/')[-1] @@ -458,7 +483,8 @@ class EC2ContainerServiceBackend(BaseBackend): raise Exception("{0} is not a cluster".format(cluster_name)) task_definition = self.describe_task_definition(task_definition_str) desired_count = desired_count if desired_count is not None else 0 - service = Service(cluster, service_name, task_definition, desired_count) + service = Service(cluster, service_name, + task_definition, desired_count) cluster_service_pair = '{0}:{1}'.format(cluster_name, service_name) self.services[cluster_service_pair] = service return service @@ -476,7 +502,8 @@ class EC2ContainerServiceBackend(BaseBackend): result = [] for existing_service_name, existing_service_obj in sorted(self.services.items()): for requested_name_or_arn in service_names_or_arns: - cluster_service_pair = '{0}:{1}'.format(cluster_name, requested_name_or_arn) + cluster_service_pair = '{0}:{1}'.format( + cluster_name, requested_name_or_arn) if cluster_service_pair == existing_service_name or existing_service_obj.arn == requested_name_or_arn: result.append(existing_service_obj) return result @@ -486,13 +513,16 @@ class EC2ContainerServiceBackend(BaseBackend): cluster_service_pair = '{0}:{1}'.format(cluster_name, service_name) if cluster_service_pair in self.services: if task_definition_str is not None: - task_definition = self.describe_task_definition(task_definition_str) - self.services[cluster_service_pair].task_definition = task_definition_str + self.describe_task_definition(task_definition_str) + self.services[ + cluster_service_pair].task_definition = task_definition_str if desired_count is not None: - self.services[cluster_service_pair].desired_count = desired_count + self.services[ + cluster_service_pair].desired_count = desired_count return self.services[cluster_service_pair] else: - raise Exception("cluster {0} or service {1} does not exist".format(cluster_name, service_name)) + raise Exception("cluster {0} or service {1} does not exist".format( + cluster_name, service_name)) def delete_service(self, cluster_name, service_name): cluster_service_pair = '{0}:{1}'.format(cluster_name, service_name) @@ -503,7 +533,8 @@ class EC2ContainerServiceBackend(BaseBackend): else: return self.services.pop(cluster_service_pair) else: - raise Exception("cluster {0} or service {1} does not exist".format(cluster_name, service_name)) + raise Exception("cluster {0} or service {1} does not exist".format( + cluster_name, service_name)) def register_container_instance(self, cluster_str, ec2_instance_id): cluster_name = cluster_str.split('/')[-1] @@ -512,14 +543,18 @@ class EC2ContainerServiceBackend(BaseBackend): container_instance = ContainerInstance(ec2_instance_id) if not self.container_instances.get(cluster_name): self.container_instances[cluster_name] = {} - container_instance_id = container_instance.containerInstanceArn.split('/')[-1] - self.container_instances[cluster_name][container_instance_id] = container_instance + container_instance_id = container_instance.containerInstanceArn.split( + '/')[-1] + self.container_instances[cluster_name][ + container_instance_id] = container_instance return container_instance def list_container_instances(self, cluster_str): cluster_name = cluster_str.split('/')[-1] - container_instances_values = self.container_instances.get(cluster_name, {}).values() - container_instances = [ci.containerInstanceArn for ci in container_instances_values] + container_instances_values = self.container_instances.get( + cluster_name, {}).values() + container_instances = [ + ci.containerInstanceArn for ci in container_instances_values] return sorted(container_instances) def describe_container_instances(self, cluster_str, list_container_instance_ids): @@ -529,11 +564,13 @@ class EC2ContainerServiceBackend(BaseBackend): failures = [] container_instance_objects = [] for container_instance_id in list_container_instance_ids: - container_instance = self.container_instances[cluster_name].get(container_instance_id, None) + container_instance = self.container_instances[ + cluster_name].get(container_instance_id, None) if container_instance is not None: container_instance_objects.append(container_instance) else: - failures.append(ContainerInstanceFailure('MISSING', container_instance_id)) + failures.append(ContainerInstanceFailure( + 'MISSING', container_instance_id)) return container_instance_objects, failures diff --git a/moto/ecs/responses.py b/moto/ecs/responses.py index a8c0dddac..b28ec6a4e 100644 --- a/moto/ecs/responses.py +++ b/moto/ecs/responses.py @@ -1,12 +1,12 @@ from __future__ import unicode_literals import json -import uuid from moto.core.responses import BaseResponse from .models import ecs_backends class EC2ContainerServiceResponse(BaseResponse): + @property def ecs_backend(self): return ecs_backends[self.region] @@ -34,8 +34,7 @@ class EC2ContainerServiceResponse(BaseResponse): cluster_arns = self.ecs_backend.list_clusters() return json.dumps({ 'clusterArns': cluster_arns - #, - #'nextToken': str(uuid.uuid1()) + # 'nextToken': str(uuid.uuid1()) }) def describe_clusters(self): @@ -57,7 +56,8 @@ class EC2ContainerServiceResponse(BaseResponse): family = self._get_param('family') container_definitions = self._get_param('containerDefinitions') volumes = self._get_param('volumes') - task_definition = self.ecs_backend.register_task_definition(family, container_definitions, volumes) + task_definition = self.ecs_backend.register_task_definition( + family, container_definitions, volumes) return json.dumps({ 'taskDefinition': task_definition.response_object }) @@ -66,43 +66,7 @@ class EC2ContainerServiceResponse(BaseResponse): task_definition_arns = self.ecs_backend.list_task_definitions() return json.dumps({ 'taskDefinitionArns': task_definition_arns - #, - #'nextToken': str(uuid.uuid1()) - }) - - def describe_task_definition(self): - task_definition_str = self._get_param('taskDefinition') - task_definition = self.ecs_backend.describe_task_definition(task_definition_str) - return json.dumps({ - 'taskDefinition': task_definition.response_object - }) - - def deregister_task_definition(self): - task_definition_str = self._get_param('taskDefinition') - task_definition = self.ecs_backend.deregister_task_definition(task_definition_str) - return json.dumps({ - 'taskDefinition': task_definition.response_object - }) - - def run_task(self): - cluster_str = self._get_param('cluster') - overrides = self._get_param('overrides') - task_definition_str = self._get_param('taskDefinition') - count = self._get_int_param('count') - started_by = self._get_param('startedBy') - tasks = self.ecs_backend.run_task(cluster_str, task_definition_str, count, overrides, started_by) - return json.dumps({ - 'tasks': [task.response_object for task in tasks], - 'failures': [] - }) - - def describe_tasks(self): - cluster = self._get_param('cluster') - tasks = self._get_param('tasks') - data = self.ecs_backend.describe_tasks(cluster, tasks) - return json.dumps({ - 'tasks': [task.response_object for task in data], - 'failures': [] + # 'nextToken': str(uuid.uuid1()) }) def describe_task_definition(self): @@ -113,17 +77,48 @@ class EC2ContainerServiceResponse(BaseResponse): 'failures': [] }) + def deregister_task_definition(self): + task_definition_str = self._get_param('taskDefinition') + task_definition = self.ecs_backend.deregister_task_definition( + task_definition_str) + return json.dumps({ + 'taskDefinition': task_definition.response_object + }) + + def run_task(self): + cluster_str = self._get_param('cluster') + overrides = self._get_param('overrides') + task_definition_str = self._get_param('taskDefinition') + count = self._get_int_param('count') + started_by = self._get_param('startedBy') + tasks = self.ecs_backend.run_task( + cluster_str, task_definition_str, count, overrides, started_by) + return json.dumps({ + 'tasks': [task.response_object for task in tasks], + 'failures': [] + }) + + def describe_tasks(self): + cluster = self._get_param('cluster') + tasks = self._get_param('tasks') + data = self.ecs_backend.describe_tasks(cluster, tasks) + return json.dumps({ + 'tasks': [task.response_object for task in data], + 'failures': [] + }) + def start_task(self): cluster_str = self._get_param('cluster') overrides = self._get_param('overrides') task_definition_str = self._get_param('taskDefinition') container_instances = self._get_param('containerInstances') started_by = self._get_param('startedBy') - tasks = self.ecs_backend.start_task(cluster_str, task_definition_str, container_instances, overrides, started_by) + tasks = self.ecs_backend.start_task( + cluster_str, task_definition_str, container_instances, overrides, started_by) return json.dumps({ 'tasks': [task.response_object for task in tasks], 'failures': [] - }) + }) def list_tasks(self): cluster_str = self._get_param('cluster') @@ -132,11 +127,11 @@ class EC2ContainerServiceResponse(BaseResponse): started_by = self._get_param('startedBy') service_name = self._get_param('serviceName') desiredStatus = self._get_param('desiredStatus') - task_arns = self.ecs_backend.list_tasks(cluster_str, container_instance, family, started_by, service_name, desiredStatus) + task_arns = self.ecs_backend.list_tasks( + cluster_str, container_instance, family, started_by, service_name, desiredStatus) return json.dumps({ 'taskArns': task_arns - }) - + }) def stop_task(self): cluster_str = self._get_param('cluster') @@ -145,15 +140,15 @@ class EC2ContainerServiceResponse(BaseResponse): task = self.ecs_backend.stop_task(cluster_str, task, reason) return json.dumps({ 'task': task.response_object - }) - + }) def create_service(self): cluster_str = self._get_param('cluster') service_name = self._get_param('serviceName') task_definition_str = self._get_param('taskDefinition') desired_count = self._get_int_param('desiredCount') - service = self.ecs_backend.create_service(cluster_str, service_name, task_definition_str, desired_count) + service = self.ecs_backend.create_service( + cluster_str, service_name, task_definition_str, desired_count) return json.dumps({ 'service': service.response_object }) @@ -170,7 +165,8 @@ class EC2ContainerServiceResponse(BaseResponse): def describe_services(self): cluster_str = self._get_param('cluster') service_names = self._get_param('services') - services = self.ecs_backend.describe_services(cluster_str, service_names) + services = self.ecs_backend.describe_services( + cluster_str, service_names) return json.dumps({ 'services': [service.response_object for service in services], 'failures': [] @@ -181,7 +177,8 @@ class EC2ContainerServiceResponse(BaseResponse): service_name = self._get_param('service') task_definition = self._get_param('taskDefinition') desired_count = self._get_int_param('desiredCount') - service = self.ecs_backend.update_service(cluster_str, service_name, task_definition, desired_count) + service = self.ecs_backend.update_service( + cluster_str, service_name, task_definition, desired_count) return json.dumps({ 'service': service.response_object }) @@ -196,17 +193,20 @@ class EC2ContainerServiceResponse(BaseResponse): def register_container_instance(self): cluster_str = self._get_param('cluster') - instance_identity_document_str = self._get_param('instanceIdentityDocument') + instance_identity_document_str = self._get_param( + 'instanceIdentityDocument') instance_identity_document = json.loads(instance_identity_document_str) ec2_instance_id = instance_identity_document["instanceId"] - container_instance = self.ecs_backend.register_container_instance(cluster_str, ec2_instance_id) + container_instance = self.ecs_backend.register_container_instance( + cluster_str, ec2_instance_id) return json.dumps({ - 'containerInstance' : container_instance.response_object + 'containerInstance': container_instance.response_object }) def list_container_instances(self): cluster_str = self._get_param('cluster') - container_instance_arns = self.ecs_backend.list_container_instances(cluster_str) + container_instance_arns = self.ecs_backend.list_container_instances( + cluster_str) return json.dumps({ 'containerInstanceArns': container_instance_arns }) @@ -214,8 +214,9 @@ class EC2ContainerServiceResponse(BaseResponse): def describe_container_instances(self): cluster_str = self._get_param('cluster') list_container_instance_arns = self._get_param('containerInstances') - container_instances, failures = self.ecs_backend.describe_container_instances(cluster_str, list_container_instance_arns) + container_instances, failures = self.ecs_backend.describe_container_instances( + cluster_str, list_container_instance_arns) return json.dumps({ - 'failures': [ci.response_object for ci in failures], - 'containerInstances': [ci.response_object for ci in container_instances] + 'failures': [ci.response_object for ci in failures], + 'containerInstances': [ci.response_object for ci in container_instances] }) diff --git a/moto/elb/__init__.py b/moto/elb/__init__.py index a8e8dab8d..e25f2d486 100644 --- a/moto/elb/__init__.py +++ b/moto/elb/__init__.py @@ -1,6 +1,6 @@ from __future__ import unicode_literals from .models import elb_backends -from ..core.models import MockAWS, base_decorator, HttprettyMockAWS, deprecated_base_decorator +from ..core.models import base_decorator, deprecated_base_decorator elb_backend = elb_backends['us-east-1'] mock_elb = base_decorator(elb_backends) diff --git a/moto/elb/exceptions.py b/moto/elb/exceptions.py index 338f3c95b..897bd6dd1 100644 --- a/moto/elb/exceptions.py +++ b/moto/elb/exceptions.py @@ -7,6 +7,7 @@ class ELBClientError(RESTError): class DuplicateTagKeysError(ELBClientError): + def __init__(self, cidr): super(DuplicateTagKeysError, self).__init__( "DuplicateTagKeys", @@ -15,6 +16,7 @@ class DuplicateTagKeysError(ELBClientError): class LoadBalancerNotFoundError(ELBClientError): + def __init__(self, cidr): super(LoadBalancerNotFoundError, self).__init__( "LoadBalancerNotFound", @@ -23,6 +25,7 @@ class LoadBalancerNotFoundError(ELBClientError): class TooManyTagsError(ELBClientError): + def __init__(self): super(TooManyTagsError, self).__init__( "LoadBalancerNotFound", @@ -30,6 +33,7 @@ class TooManyTagsError(ELBClientError): class BadHealthCheckDefinition(ELBClientError): + def __init__(self): super(BadHealthCheckDefinition, self).__init__( "ValidationError", @@ -37,9 +41,9 @@ class BadHealthCheckDefinition(ELBClientError): class DuplicateLoadBalancerName(ELBClientError): + def __init__(self, name): super(DuplicateLoadBalancerName, self).__init__( "DuplicateLoadBalancerName", "The specified load balancer name already exists for this account: {0}" .format(name)) - diff --git a/moto/elb/models.py b/moto/elb/models.py index 055b08e4d..11559c2e7 100644 --- a/moto/elb/models.py +++ b/moto/elb/models.py @@ -1,6 +1,5 @@ from __future__ import unicode_literals -import boto.ec2.elb from boto.ec2.elb.attributes import ( LbAttributes, ConnectionSettingAttribute, @@ -22,8 +21,8 @@ from .exceptions import ( ) - class FakeHealthCheck(object): + def __init__(self, timeout, healthy_threshold, unhealthy_threshold, interval, target): self.timeout = timeout @@ -36,6 +35,7 @@ class FakeHealthCheck(object): class FakeListener(object): + def __init__(self, load_balancer_port, instance_port, protocol, ssl_certificate_id): self.load_balancer_port = load_balancer_port self.instance_port = instance_port @@ -48,6 +48,7 @@ class FakeListener(object): class FakeBackend(object): + def __init__(self, instance_port): self.instance_port = instance_port self.policy_names = [] @@ -57,6 +58,7 @@ class FakeBackend(object): class FakeLoadBalancer(object): + def __init__(self, name, zones, ports, scheme='internet-facing', vpc_id=None, subnets=None): self.name = name self.health_check = None @@ -78,16 +80,20 @@ class FakeLoadBalancer(object): for port in ports: listener = FakeListener( protocol=(port.get('protocol') or port['Protocol']), - load_balancer_port=(port.get('load_balancer_port') or port['LoadBalancerPort']), - instance_port=(port.get('instance_port') or port['InstancePort']), - ssl_certificate_id=port.get('sslcertificate_id', port.get('SSLCertificateId')), + load_balancer_port=( + port.get('load_balancer_port') or port['LoadBalancerPort']), + instance_port=( + port.get('instance_port') or port['InstancePort']), + ssl_certificate_id=port.get( + 'sslcertificate_id', port.get('SSLCertificateId')), ) self.listeners.append(listener) # it is unclear per the AWS documentation as to when or how backend # information gets set, so let's guess and set it here *shrug* backend = FakeBackend( - instance_port=(port.get('instance_port') or port['InstancePort']), + instance_port=( + port.get('instance_port') or port['InstancePort']), ) self.backends.append(backend) @@ -120,7 +126,8 @@ class FakeLoadBalancer(object): port_policies[port] = policies_for_port for port, policies in port_policies.items(): - elb_backend.set_load_balancer_policies_of_backend_server(new_elb.name, port, list(policies)) + elb_backend.set_load_balancer_policies_of_backend_server( + new_elb.name, port, list(policies)) health_check = properties.get('HealthCheck') if health_check: @@ -137,7 +144,8 @@ class FakeLoadBalancer(object): @classmethod def update_from_cloudformation_json(cls, original_resource, new_resource_name, cloudformation_json, region_name): - cls.delete_from_cloudformation_json(original_resource.name, cloudformation_json, region_name) + cls.delete_from_cloudformation_json( + original_resource.name, cloudformation_json, region_name) return cls.create_from_cloudformation_json(new_resource_name, cloudformation_json, region_name) @classmethod @@ -155,15 +163,19 @@ class FakeLoadBalancer(object): def get_cfn_attribute(self, attribute_name): from moto.cloudformation.exceptions import UnformattedGetAttTemplateException if attribute_name == 'CanonicalHostedZoneName': - raise NotImplementedError('"Fn::GetAtt" : [ "{0}" , "CanonicalHostedZoneName" ]"') + raise NotImplementedError( + '"Fn::GetAtt" : [ "{0}" , "CanonicalHostedZoneName" ]"') elif attribute_name == 'CanonicalHostedZoneNameID': - raise NotImplementedError('"Fn::GetAtt" : [ "{0}" , "CanonicalHostedZoneNameID" ]"') + raise NotImplementedError( + '"Fn::GetAtt" : [ "{0}" , "CanonicalHostedZoneNameID" ]"') elif attribute_name == 'DNSName': return self.dns_name elif attribute_name == 'SourceSecurityGroup.GroupName': - raise NotImplementedError('"Fn::GetAtt" : [ "{0}" , "SourceSecurityGroup.GroupName" ]"') + raise NotImplementedError( + '"Fn::GetAtt" : [ "{0}" , "SourceSecurityGroup.GroupName" ]"') elif attribute_name == 'SourceSecurityGroup.OwnerAlias': - raise NotImplementedError('"Fn::GetAtt" : [ "{0}" , "SourceSecurityGroup.OwnerAlias" ]"') + raise NotImplementedError( + '"Fn::GetAtt" : [ "{0}" , "SourceSecurityGroup.OwnerAlias" ]"') raise UnformattedGetAttTemplateException() @classmethod @@ -224,7 +236,8 @@ class ELBBackend(BaseBackend): vpc_id = subnet.vpc_id if name in self.load_balancers: raise DuplicateLoadBalancerName(name) - new_load_balancer = FakeLoadBalancer(name=name, zones=zones, ports=ports, scheme=scheme, subnets=subnets, vpc_id=vpc_id) + new_load_balancer = FakeLoadBalancer( + name=name, zones=zones, ports=ports, scheme=scheme, subnets=subnets, vpc_id=vpc_id) self.load_balancers[name] = new_load_balancer return new_load_balancer @@ -240,14 +253,16 @@ class ELBBackend(BaseBackend): if lb_port == listener.load_balancer_port: break else: - balancer.listeners.append(FakeListener(lb_port, instance_port, protocol, ssl_certificate_id)) + balancer.listeners.append(FakeListener( + lb_port, instance_port, protocol, ssl_certificate_id)) return balancer def describe_load_balancers(self, names): balancers = self.load_balancers.values() if names: - matched_balancers = [balancer for balancer in balancers if balancer.name in names] + matched_balancers = [ + balancer for balancer in balancers if balancer.name in names] if len(names) != len(matched_balancers): missing_elb = list(set(names) - set(matched_balancers))[0] raise LoadBalancerNotFoundError(missing_elb) @@ -288,7 +303,8 @@ class ELBBackend(BaseBackend): if balancer: for idx, listener in enumerate(balancer.listeners): if lb_port == listener.load_balancer_port: - balancer.listeners[idx].ssl_certificate_id = ssl_certificate_id + balancer.listeners[ + idx].ssl_certificate_id = ssl_certificate_id return balancer @@ -299,7 +315,8 @@ class ELBBackend(BaseBackend): def deregister_instances(self, load_balancer_name, instance_ids): load_balancer = self.get_load_balancer(load_balancer_name) - new_instance_ids = [instance_id for instance_id in load_balancer.instance_ids if instance_id not in instance_ids] + new_instance_ids = [ + instance_id for instance_id in load_balancer.instance_ids if instance_id not in instance_ids] load_balancer.instance_ids = new_instance_ids return load_balancer @@ -342,7 +359,8 @@ class ELBBackend(BaseBackend): def set_load_balancer_policies_of_backend_server(self, load_balancer_name, instance_port, policies): load_balancer = self.get_load_balancer(load_balancer_name) - backend = [b for b in load_balancer.backends if int(b.instance_port) == instance_port][0] + backend = [b for b in load_balancer.backends if int( + b.instance_port) == instance_port][0] backend_idx = load_balancer.backends.index(backend) backend.policy_names = policies load_balancer.backends[backend_idx] = backend @@ -350,7 +368,8 @@ class ELBBackend(BaseBackend): def set_load_balancer_policies_of_listener(self, load_balancer_name, load_balancer_port, policies): load_balancer = self.get_load_balancer(load_balancer_name) - listener = [l for l in load_balancer.listeners if int(l.load_balancer_port) == load_balancer_port][0] + listener = [l for l in load_balancer.listeners if int( + l.load_balancer_port) == load_balancer_port][0] listener_idx = load_balancer.listeners.index(listener) listener.policy_names = policies load_balancer.listeners[listener_idx] = listener diff --git a/moto/elb/responses.py b/moto/elb/responses.py index cba98e4e0..e90de260e 100644 --- a/moto/elb/responses.py +++ b/moto/elb/responses.py @@ -43,9 +43,11 @@ class ELBResponse(BaseResponse): load_balancer_name = self._get_param('LoadBalancerName') ports = self._get_list_prefix("Listeners.member") - self.elb_backend.create_load_balancer_listeners(name=load_balancer_name, ports=ports) + self.elb_backend.create_load_balancer_listeners( + name=load_balancer_name, ports=ports) - template = self.response_template(CREATE_LOAD_BALANCER_LISTENERS_TEMPLATE) + template = self.response_template( + CREATE_LOAD_BALANCER_LISTENERS_TEMPLATE) return template.render() def describe_load_balancers(self): @@ -59,7 +61,8 @@ class ELBResponse(BaseResponse): ports = self._get_multi_param("LoadBalancerPorts.member") ports = [int(port) for port in ports] - self.elb_backend.delete_load_balancer_listeners(load_balancer_name, ports) + self.elb_backend.delete_load_balancer_listeners( + load_balancer_name, ports) template = self.response_template(DELETE_LOAD_BALANCER_LISTENERS) return template.render() @@ -74,7 +77,8 @@ class ELBResponse(BaseResponse): load_balancer_name=self._get_param('LoadBalancerName'), timeout=self._get_param('HealthCheck.Timeout'), healthy_threshold=self._get_param('HealthCheck.HealthyThreshold'), - unhealthy_threshold=self._get_param('HealthCheck.UnhealthyThreshold'), + unhealthy_threshold=self._get_param( + 'HealthCheck.UnhealthyThreshold'), interval=self._get_param('HealthCheck.Interval'), target=self._get_param('HealthCheck.Target'), ) @@ -83,9 +87,11 @@ class ELBResponse(BaseResponse): def register_instances_with_load_balancer(self): load_balancer_name = self._get_param('LoadBalancerName') - instance_ids = [value[0] for key, value in self.querystring.items() if "Instances.member" in key] + instance_ids = [value[0] for key, value in self.querystring.items( + ) if "Instances.member" in key] template = self.response_template(REGISTER_INSTANCES_TEMPLATE) - load_balancer = self.elb_backend.register_instances(load_balancer_name, instance_ids) + load_balancer = self.elb_backend.register_instances( + load_balancer_name, instance_ids) return template.render(load_balancer=load_balancer) def set_load_balancer_listener_sslcertificate(self): @@ -93,16 +99,19 @@ class ELBResponse(BaseResponse): ssl_certificate_id = self.querystring['SSLCertificateId'][0] lb_port = self.querystring['LoadBalancerPort'][0] - self.elb_backend.set_load_balancer_listener_sslcertificate(load_balancer_name, lb_port, ssl_certificate_id) + self.elb_backend.set_load_balancer_listener_sslcertificate( + load_balancer_name, lb_port, ssl_certificate_id) template = self.response_template(SET_LOAD_BALANCER_SSL_CERTIFICATE) return template.render() def deregister_instances_from_load_balancer(self): load_balancer_name = self._get_param('LoadBalancerName') - instance_ids = [value[0] for key, value in self.querystring.items() if "Instances.member" in key] + instance_ids = [value[0] for key, value in self.querystring.items( + ) if "Instances.member" in key] template = self.response_template(DEREGISTER_INSTANCES_TEMPLATE) - load_balancer = self.elb_backend.deregister_instances(load_balancer_name, instance_ids) + load_balancer = self.elb_backend.deregister_instances( + load_balancer_name, instance_ids) return template.render(load_balancer=load_balancer) def describe_load_balancer_attributes(self): @@ -115,11 +124,13 @@ class ELBResponse(BaseResponse): load_balancer_name = self._get_param('LoadBalancerName') load_balancer = self.elb_backend.get_load_balancer(load_balancer_name) - cross_zone = self._get_dict_param("LoadBalancerAttributes.CrossZoneLoadBalancing.") + cross_zone = self._get_dict_param( + "LoadBalancerAttributes.CrossZoneLoadBalancing.") if cross_zone: attribute = CrossZoneLoadBalancingAttribute() attribute.enabled = cross_zone["enabled"] == "true" - self.elb_backend.set_cross_zone_load_balancing_attribute(load_balancer_name, attribute) + self.elb_backend.set_cross_zone_load_balancing_attribute( + load_balancer_name, attribute) access_log = self._get_dict_param("LoadBalancerAttributes.AccessLog.") if access_log: @@ -128,20 +139,25 @@ class ELBResponse(BaseResponse): attribute.s3_bucket_name = access_log['s3_bucket_name'] attribute.s3_bucket_prefix = access_log['s3_bucket_prefix'] attribute.emit_interval = access_log["emit_interval"] - self.elb_backend.set_access_log_attribute(load_balancer_name, attribute) + self.elb_backend.set_access_log_attribute( + load_balancer_name, attribute) - connection_draining = self._get_dict_param("LoadBalancerAttributes.ConnectionDraining.") + connection_draining = self._get_dict_param( + "LoadBalancerAttributes.ConnectionDraining.") if connection_draining: attribute = ConnectionDrainingAttribute() attribute.enabled = connection_draining["enabled"] == "true" attribute.timeout = connection_draining["timeout"] - self.elb_backend.set_connection_draining_attribute(load_balancer_name, attribute) + self.elb_backend.set_connection_draining_attribute( + load_balancer_name, attribute) - connection_settings = self._get_dict_param("LoadBalancerAttributes.ConnectionSettings.") + connection_settings = self._get_dict_param( + "LoadBalancerAttributes.ConnectionSettings.") if connection_settings: attribute = ConnectionSettingAttribute() attribute.idle_timeout = connection_settings["idle_timeout"] - self.elb_backend.set_connection_settings_attribute(load_balancer_name, attribute) + self.elb_backend.set_connection_settings_attribute( + load_balancer_name, attribute) template = self.response_template(MODIFY_ATTRIBUTES_TEMPLATE) return template.render(attributes=load_balancer.attributes) @@ -153,7 +169,8 @@ class ELBResponse(BaseResponse): policy_name = self._get_param("PolicyName") other_policy.policy_name = policy_name - self.elb_backend.create_lb_other_policy(load_balancer_name, other_policy) + self.elb_backend.create_lb_other_policy( + load_balancer_name, other_policy) template = self.response_template(CREATE_LOAD_BALANCER_POLICY_TEMPLATE) return template.render() @@ -165,7 +182,8 @@ class ELBResponse(BaseResponse): policy.policy_name = self._get_param("PolicyName") policy.cookie_name = self._get_param("CookieName") - self.elb_backend.create_app_cookie_stickiness_policy(load_balancer_name, policy) + self.elb_backend.create_app_cookie_stickiness_policy( + load_balancer_name, policy) template = self.response_template(CREATE_LOAD_BALANCER_POLICY_TEMPLATE) return template.render() @@ -181,7 +199,8 @@ class ELBResponse(BaseResponse): else: policy.cookie_expiration_period = None - self.elb_backend.create_lb_cookie_stickiness_policy(load_balancer_name, policy) + self.elb_backend.create_lb_cookie_stickiness_policy( + load_balancer_name, policy) template = self.response_template(CREATE_LOAD_BALANCER_POLICY_TEMPLATE) return template.render() @@ -191,13 +210,16 @@ class ELBResponse(BaseResponse): load_balancer = self.elb_backend.get_load_balancer(load_balancer_name) load_balancer_port = int(self._get_param('LoadBalancerPort')) - mb_listener = [l for l in load_balancer.listeners if int(l.load_balancer_port) == load_balancer_port] + mb_listener = [l for l in load_balancer.listeners if int( + l.load_balancer_port) == load_balancer_port] if mb_listener: policies = self._get_multi_param("PolicyNames.member") - self.elb_backend.set_load_balancer_policies_of_listener(load_balancer_name, load_balancer_port, policies) + self.elb_backend.set_load_balancer_policies_of_listener( + load_balancer_name, load_balancer_port, policies) # else: explode? - template = self.response_template(SET_LOAD_BALANCER_POLICIES_OF_LISTENER_TEMPLATE) + template = self.response_template( + SET_LOAD_BALANCER_POLICIES_OF_LISTENER_TEMPLATE) return template.render() def set_load_balancer_policies_for_backend_server(self): @@ -205,20 +227,25 @@ class ELBResponse(BaseResponse): load_balancer = self.elb_backend.get_load_balancer(load_balancer_name) instance_port = int(self.querystring.get('InstancePort')[0]) - mb_backend = [b for b in load_balancer.backends if int(b.instance_port) == instance_port] + mb_backend = [b for b in load_balancer.backends if int( + b.instance_port) == instance_port] if mb_backend: policies = self._get_multi_param('PolicyNames.member') - self.elb_backend.set_load_balancer_policies_of_backend_server(load_balancer_name, instance_port, policies) + self.elb_backend.set_load_balancer_policies_of_backend_server( + load_balancer_name, instance_port, policies) # else: explode? - template = self.response_template(SET_LOAD_BALANCER_POLICIES_FOR_BACKEND_SERVER_TEMPLATE) + template = self.response_template( + SET_LOAD_BALANCER_POLICIES_FOR_BACKEND_SERVER_TEMPLATE) return template.render() def describe_instance_health(self): load_balancer_name = self._get_param('LoadBalancerName') - instance_ids = [value[0] for key, value in self.querystring.items() if "Instances.member" in key] + instance_ids = [value[0] for key, value in self.querystring.items( + ) if "Instances.member" in key] if len(instance_ids) == 0: - instance_ids = self.elb_backend.get_load_balancer(load_balancer_name).instance_ids + instance_ids = self.elb_backend.get_load_balancer( + load_balancer_name).instance_ids template = self.response_template(DESCRIBE_INSTANCE_HEALTH_TEMPLATE) return template.render(instance_ids=instance_ids) @@ -226,7 +253,6 @@ class ELBResponse(BaseResponse): for key, value in self.querystring.items(): if "LoadBalancerNames.member" in key: - number = key.split('.')[2] load_balancer_name = value[0] elb = self.elb_backend.get_load_balancer(load_balancer_name) if not elb: @@ -241,7 +267,8 @@ class ELBResponse(BaseResponse): for key, value in self.querystring.items(): if "LoadBalancerNames.member" in key: number = key.split('.')[2] - load_balancer_name = self._get_param('LoadBalancerNames.member.{0}'.format(number)) + load_balancer_name = self._get_param( + 'LoadBalancerNames.member.{0}'.format(number)) elb = self.elb_backend.get_load_balancer(load_balancer_name) if not elb: raise LoadBalancerNotFoundError(load_balancer_name) @@ -260,7 +287,8 @@ class ELBResponse(BaseResponse): for key, value in self.querystring.items(): if "LoadBalancerNames.member" in key: number = key.split('.')[2] - load_balancer_name = self._get_param('LoadBalancerNames.member.{0}'.format(number)) + load_balancer_name = self._get_param( + 'LoadBalancerNames.member.{0}'.format(number)) elb = self.elb_backend.get_load_balancer(load_balancer_name) if not elb: raise LoadBalancerNotFoundError(load_balancer_name) @@ -284,7 +312,7 @@ class ELBResponse(BaseResponse): for i in tag_keys: counts[i] = tag_keys.count(i) - counts = sorted(counts.items(), key=lambda i:i[1], reverse=True) + counts = sorted(counts.items(), key=lambda i: i[1], reverse=True) if counts and counts[0][1] > 1: # We have dupes... diff --git a/moto/emr/__init__.py b/moto/emr/__init__.py index fc6b4d4ab..b4223f2cb 100644 --- a/moto/emr/__init__.py +++ b/moto/emr/__init__.py @@ -1,6 +1,6 @@ from __future__ import unicode_literals from .models import emr_backends -from ..core.models import MockAWS, base_decorator, HttprettyMockAWS, deprecated_base_decorator +from ..core.models import base_decorator, deprecated_base_decorator emr_backend = emr_backends['us-east-1'] mock_emr = base_decorator(emr_backends) diff --git a/moto/emr/models.py b/moto/emr/models.py index 155e4a898..94bc45ecc 100644 --- a/moto/emr/models.py +++ b/moto/emr/models.py @@ -11,6 +11,7 @@ from .utils import random_instance_group_id, random_cluster_id, random_step_id class FakeApplication(object): + def __init__(self, name, version, args=None, additional_info=None): self.additional_info = additional_info or {} self.args = args or [] @@ -19,6 +20,7 @@ class FakeApplication(object): class FakeBootstrapAction(object): + def __init__(self, args, name, script_path): self.args = args or [] self.name = name @@ -26,6 +28,7 @@ class FakeBootstrapAction(object): class FakeInstanceGroup(object): + def __init__(self, instance_count, instance_role, instance_type, market='ON_DEMAND', name=None, id=None, bid_price=None): self.id = id or random_instance_group_id() @@ -55,6 +58,7 @@ class FakeInstanceGroup(object): class FakeStep(object): + def __init__(self, state, name='', @@ -78,6 +82,7 @@ class FakeStep(object): class FakeCluster(object): + def __init__(self, emr_backend, name, @@ -135,17 +140,24 @@ class FakeCluster(object): 'instance_type': instance_attrs['slave_instance_type'], 'market': 'ON_DEMAND', 'name': 'slave'}]) - self.additional_master_security_groups = instance_attrs.get('additional_master_security_groups') - self.additional_slave_security_groups = instance_attrs.get('additional_slave_security_groups') + self.additional_master_security_groups = instance_attrs.get( + 'additional_master_security_groups') + self.additional_slave_security_groups = instance_attrs.get( + 'additional_slave_security_groups') self.availability_zone = instance_attrs.get('availability_zone') self.ec2_key_name = instance_attrs.get('ec2_key_name') self.ec2_subnet_id = instance_attrs.get('ec2_subnet_id') self.hadoop_version = instance_attrs.get('hadoop_version') - self.keep_job_flow_alive_when_no_steps = instance_attrs.get('keep_job_flow_alive_when_no_steps') - self.master_security_group = instance_attrs.get('emr_managed_master_security_group') - self.service_access_security_group = instance_attrs.get('service_access_security_group') - self.slave_security_group = instance_attrs.get('emr_managed_slave_security_group') - self.termination_protected = instance_attrs.get('termination_protected') + self.keep_job_flow_alive_when_no_steps = instance_attrs.get( + 'keep_job_flow_alive_when_no_steps') + self.master_security_group = instance_attrs.get( + 'emr_managed_master_security_group') + self.service_access_security_group = instance_attrs.get( + 'service_access_security_group') + self.slave_security_group = instance_attrs.get( + 'emr_managed_slave_security_group') + self.termination_protected = instance_attrs.get( + 'termination_protected') self.release_label = release_label self.requested_ami_version = requested_ami_version @@ -286,7 +298,8 @@ class ElasticMapReduceBackend(BaseBackend): clusters = self.clusters.values() within_two_month = datetime.now(pytz.utc) - timedelta(days=60) - clusters = [c for c in clusters if c.creation_datetime >= within_two_month] + clusters = [ + c for c in clusters if c.creation_datetime >= within_two_month] if job_flow_ids: clusters = [c for c in clusters if c.id in job_flow_ids] @@ -294,10 +307,12 @@ class ElasticMapReduceBackend(BaseBackend): clusters = [c for c in clusters if c.state in job_flow_states] if created_after: created_after = dtparse(created_after) - clusters = [c for c in clusters if c.creation_datetime > created_after] + clusters = [ + c for c in clusters if c.creation_datetime > created_after] if created_before: created_before = dtparse(created_before) - clusters = [c for c in clusters if c.creation_datetime < created_before] + clusters = [ + c for c in clusters if c.creation_datetime < created_before] # Amazon EMR can return a maximum of 512 job flow descriptions return sorted(clusters, key=lambda x: x.id)[:512] @@ -322,7 +337,8 @@ class ElasticMapReduceBackend(BaseBackend): max_items = 50 actions = self.clusters[cluster_id].bootstrap_actions start_idx = 0 if marker is None else int(marker) - marker = None if len(actions) <= start_idx + max_items else str(start_idx + max_items) + marker = None if len(actions) <= start_idx + \ + max_items else str(start_idx + max_items) return actions[start_idx:start_idx + max_items], marker def list_clusters(self, cluster_states=None, created_after=None, @@ -333,13 +349,16 @@ class ElasticMapReduceBackend(BaseBackend): clusters = [c for c in clusters if c.state in cluster_states] if created_after: created_after = dtparse(created_after) - clusters = [c for c in clusters if c.creation_datetime > created_after] + clusters = [ + c for c in clusters if c.creation_datetime > created_after] if created_before: created_before = dtparse(created_before) - clusters = [c for c in clusters if c.creation_datetime < created_before] + clusters = [ + c for c in clusters if c.creation_datetime < created_before] clusters = sorted(clusters, key=lambda x: x.id) start_idx = 0 if marker is None else int(marker) - marker = None if len(clusters) <= start_idx + max_items else str(start_idx + max_items) + marker = None if len(clusters) <= start_idx + \ + max_items else str(start_idx + max_items) return clusters[start_idx:start_idx + max_items], marker def list_instance_groups(self, cluster_id, marker=None): @@ -347,7 +366,8 @@ class ElasticMapReduceBackend(BaseBackend): groups = sorted(self.clusters[cluster_id].instance_groups, key=lambda x: x.id) start_idx = 0 if marker is None else int(marker) - marker = None if len(groups) <= start_idx + max_items else str(start_idx + max_items) + marker = None if len(groups) <= start_idx + \ + max_items else str(start_idx + max_items) return groups[start_idx:start_idx + max_items], marker def list_steps(self, cluster_id, marker=None, step_ids=None, step_states=None): @@ -358,7 +378,8 @@ class ElasticMapReduceBackend(BaseBackend): if step_states: steps = [s for s in steps if s.state in step_states] start_idx = 0 if marker is None else int(marker) - marker = None if len(steps) <= start_idx + max_items else str(start_idx + max_items) + marker = None if len(steps) <= start_idx + \ + max_items else str(start_idx + max_items) return steps[start_idx:start_idx + max_items], marker def modify_instance_groups(self, instance_groups): diff --git a/moto/emr/responses.py b/moto/emr/responses.py index 3869c33ff..91dc8cc11 100644 --- a/moto/emr/responses.py +++ b/moto/emr/responses.py @@ -29,7 +29,8 @@ def generate_boto3_response(operation): {'x-amzn-requestid': '2690d7eb-ed86-11dd-9877-6fad448a8419', 'date': datetime.now(pytz.utc).strftime('%a, %d %b %Y %H:%M:%S %Z'), 'content-type': 'application/x-amz-json-1.1'}) - resp = xml_to_json_response(self.aws_service_spec, operation, rendered) + resp = xml_to_json_response( + self.aws_service_spec, operation, rendered) return '' if resp is None else json.dumps(resp) return rendered return f @@ -63,14 +64,16 @@ class ElasticMapReduceResponse(BaseResponse): instance_groups = self._get_list_prefix('InstanceGroups.member') for item in instance_groups: item['instance_count'] = int(item['instance_count']) - instance_groups = self.backend.add_instance_groups(jobflow_id, instance_groups) + instance_groups = self.backend.add_instance_groups( + jobflow_id, instance_groups) template = self.response_template(ADD_INSTANCE_GROUPS_TEMPLATE) return template.render(instance_groups=instance_groups) @generate_boto3_response('AddJobFlowSteps') def add_job_flow_steps(self): job_flow_id = self._get_param('JobFlowId') - steps = self.backend.add_job_flow_steps(job_flow_id, steps_from_query_string(self._get_list_prefix('Steps.member'))) + steps = self.backend.add_job_flow_steps( + job_flow_id, steps_from_query_string(self._get_list_prefix('Steps.member'))) template = self.response_template(ADD_JOB_FLOW_STEPS_TEMPLATE) return template.render(steps=steps) @@ -104,7 +107,8 @@ class ElasticMapReduceResponse(BaseResponse): created_before = self._get_param('CreatedBefore') job_flow_ids = self._get_multi_param("JobFlowIds.member") job_flow_states = self._get_multi_param('JobFlowStates.member') - clusters = self.backend.describe_job_flows(job_flow_ids, job_flow_states, created_after, created_before) + clusters = self.backend.describe_job_flows( + job_flow_ids, job_flow_states, created_after, created_before) template = self.response_template(DESCRIBE_JOB_FLOWS_TEMPLATE) return template.render(clusters=clusters) @@ -123,7 +127,8 @@ class ElasticMapReduceResponse(BaseResponse): def list_bootstrap_actions(self): cluster_id = self._get_param('ClusterId') marker = self._get_param('Marker') - bootstrap_actions, marker = self.backend.list_bootstrap_actions(cluster_id, marker) + bootstrap_actions, marker = self.backend.list_bootstrap_actions( + cluster_id, marker) template = self.response_template(LIST_BOOTSTRAP_ACTIONS_TEMPLATE) return template.render(bootstrap_actions=bootstrap_actions, marker=marker) @@ -133,7 +138,8 @@ class ElasticMapReduceResponse(BaseResponse): created_after = self._get_param('CreatedAfter') created_before = self._get_param('CreatedBefore') marker = self._get_param('Marker') - clusters, marker = self.backend.list_clusters(cluster_states, created_after, created_before, marker) + clusters, marker = self.backend.list_clusters( + cluster_states, created_after, created_before, marker) template = self.response_template(LIST_CLUSTERS_TEMPLATE) return template.render(clusters=clusters, marker=marker) @@ -141,7 +147,8 @@ class ElasticMapReduceResponse(BaseResponse): def list_instance_groups(self): cluster_id = self._get_param('ClusterId') marker = self._get_param('Marker') - instance_groups, marker = self.backend.list_instance_groups(cluster_id, marker=marker) + instance_groups, marker = self.backend.list_instance_groups( + cluster_id, marker=marker) template = self.response_template(LIST_INSTANCE_GROUPS_TEMPLATE) return template.render(instance_groups=instance_groups, marker=marker) @@ -154,7 +161,8 @@ class ElasticMapReduceResponse(BaseResponse): marker = self._get_param('Marker') step_ids = self._get_multi_param('StepIds.member') step_states = self._get_multi_param('StepStates.member') - steps, marker = self.backend.list_steps(cluster_id, marker=marker, step_ids=step_ids, step_states=step_states) + steps, marker = self.backend.list_steps( + cluster_id, marker=marker, step_ids=step_ids, step_states=step_states) template = self.response_template(LIST_STEPS_TEMPLATE) return template.render(steps=steps, marker=marker) @@ -178,19 +186,27 @@ class ElasticMapReduceResponse(BaseResponse): @generate_boto3_response('RunJobFlow') def run_job_flow(self): instance_attrs = dict( - master_instance_type=self._get_param('Instances.MasterInstanceType'), + master_instance_type=self._get_param( + 'Instances.MasterInstanceType'), slave_instance_type=self._get_param('Instances.SlaveInstanceType'), instance_count=self._get_int_param('Instances.InstanceCount', 1), ec2_key_name=self._get_param('Instances.Ec2KeyName'), ec2_subnet_id=self._get_param('Instances.Ec2SubnetId'), hadoop_version=self._get_param('Instances.HadoopVersion'), - availability_zone=self._get_param('Instances.Placement.AvailabilityZone', self.backend.region_name + 'a'), - keep_job_flow_alive_when_no_steps=self._get_bool_param('Instances.KeepJobFlowAliveWhenNoSteps', False), - termination_protected=self._get_bool_param('Instances.TerminationProtected', False), - emr_managed_master_security_group=self._get_param('Instances.EmrManagedMasterSecurityGroup'), - emr_managed_slave_security_group=self._get_param('Instances.EmrManagedSlaveSecurityGroup'), - service_access_security_group=self._get_param('Instances.ServiceAccessSecurityGroup'), - additional_master_security_groups=self._get_multi_param('Instances.AdditionalMasterSecurityGroups.member.'), + availability_zone=self._get_param( + 'Instances.Placement.AvailabilityZone', self.backend.region_name + 'a'), + keep_job_flow_alive_when_no_steps=self._get_bool_param( + 'Instances.KeepJobFlowAliveWhenNoSteps', False), + termination_protected=self._get_bool_param( + 'Instances.TerminationProtected', False), + emr_managed_master_security_group=self._get_param( + 'Instances.EmrManagedMasterSecurityGroup'), + emr_managed_slave_security_group=self._get_param( + 'Instances.EmrManagedSlaveSecurityGroup'), + service_access_security_group=self._get_param( + 'Instances.ServiceAccessSecurityGroup'), + additional_master_security_groups=self._get_multi_param( + 'Instances.AdditionalMasterSecurityGroups.member.'), additional_slave_security_groups=self._get_multi_param('Instances.AdditionalSlaveSecurityGroups.member.')) kwargs = dict( @@ -198,8 +214,10 @@ class ElasticMapReduceResponse(BaseResponse): log_uri=self._get_param('LogUri'), job_flow_role=self._get_param('JobFlowRole'), service_role=self._get_param('ServiceRole'), - steps=steps_from_query_string(self._get_list_prefix('Steps.member')), - visible_to_all_users=self._get_bool_param('VisibleToAllUsers', False), + steps=steps_from_query_string( + self._get_list_prefix('Steps.member')), + visible_to_all_users=self._get_bool_param( + 'VisibleToAllUsers', False), instance_attrs=instance_attrs, ) @@ -225,7 +243,8 @@ class ElasticMapReduceResponse(BaseResponse): if key.startswith('properties.'): config.pop(key) config['properties'] = {} - map_items = self._get_map_prefix('Configurations.member.{0}.Properties.entry'.format(idx)) + map_items = self._get_map_prefix( + 'Configurations.member.{0}.Properties.entry'.format(idx)) config['properties'] = map_items kwargs['configurations'] = configurations @@ -239,7 +258,8 @@ class ElasticMapReduceResponse(BaseResponse): 'Only one AMI version and release label may be specified. ' 'Provided AMI: {0}, release label: {1}.').format( ami_version, release_label) - raise EmrError(error_type="ValidationException", message=message, template='single_error') + raise EmrError(error_type="ValidationException", + message=message, template='single_error') else: if ami_version: kwargs['requested_ami_version'] = ami_version @@ -256,7 +276,8 @@ class ElasticMapReduceResponse(BaseResponse): self.backend.add_applications( cluster.id, [{'Name': 'Hadoop', 'Version': '0.18'}]) - instance_groups = self._get_list_prefix('Instances.InstanceGroups.member') + instance_groups = self._get_list_prefix( + 'Instances.InstanceGroups.member') if instance_groups: for ig in instance_groups: ig['instance_count'] = int(ig['instance_count']) @@ -274,7 +295,8 @@ class ElasticMapReduceResponse(BaseResponse): def set_termination_protection(self): termination_protection = self._get_param('TerminationProtected') job_ids = self._get_multi_param('JobFlowIds.member') - self.backend.set_termination_protection(job_ids, termination_protection) + self.backend.set_termination_protection( + job_ids, termination_protection) template = self.response_template(SET_TERMINATION_PROTECTION_TEMPLATE) return template.render() diff --git a/moto/emr/utils.py b/moto/emr/utils.py index 328fdd783..4f12522cf 100644 --- a/moto/emr/utils.py +++ b/moto/emr/utils.py @@ -32,7 +32,8 @@ def tags_from_query_string(querystring_dict): tag_key = querystring_dict.get("Tags.{0}.Key".format(tag_index))[0] tag_value_key = "Tags.{0}.Value".format(tag_index) if tag_value_key in querystring_dict: - response_values[tag_key] = querystring_dict.get(tag_value_key)[0] + response_values[tag_key] = querystring_dict.get(tag_value_key)[ + 0] else: response_values[tag_key] = None return response_values @@ -42,7 +43,8 @@ def steps_from_query_string(querystring_dict): steps = [] for step in querystring_dict: step['jar'] = step.pop('hadoop_jar_step._jar') - step['properties'] = dict((o['Key'], o['Value']) for o in step.get('properties', [])) + step['properties'] = dict((o['Key'], o['Value']) + for o in step.get('properties', [])) step['args'] = [] idx = 1 keyfmt = 'hadoop_jar_step._args.member.{0}' diff --git a/moto/events/models.py b/moto/events/models.py index 94cca5ee7..3cf2c3d7a 100644 --- a/moto/events/models.py +++ b/moto/events/models.py @@ -53,7 +53,8 @@ class EventsBackend(BaseBackend): def __init__(self): self.rules = {} - # This array tracks the order in which the rules have been added, since 2.6 doesn't have OrderedDicts. + # This array tracks the order in which the rules have been added, since + # 2.6 doesn't have OrderedDicts. self.rules_order = [] self.next_tokens = {} @@ -106,7 +107,8 @@ class EventsBackend(BaseBackend): matching_rules = [] return_obj = {} - start_index, end_index, new_next_token = self._process_token_and_limits(len(self.rules), next_token, limit) + start_index, end_index, new_next_token = self._process_token_and_limits( + len(self.rules), next_token, limit) for i in range(start_index, end_index): rule = self._get_rule_by_index(i) @@ -130,7 +132,8 @@ class EventsBackend(BaseBackend): matching_rules = [] return_obj = {} - start_index, end_index, new_next_token = self._process_token_and_limits(len(self.rules), next_token, limit) + start_index, end_index, new_next_token = self._process_token_and_limits( + len(self.rules), next_token, limit) for i in range(start_index, end_index): rule = self._get_rule_by_index(i) @@ -144,10 +147,12 @@ class EventsBackend(BaseBackend): return return_obj def list_targets_by_rule(self, rule, next_token=None, limit=None): - # We'll let a KeyError exception be thrown for response to handle if rule doesn't exist. + # We'll let a KeyError exception be thrown for response to handle if + # rule doesn't exist. rule = self.rules[rule] - start_index, end_index, new_next_token = self._process_token_and_limits(len(rule.targets), next_token, limit) + start_index, end_index, new_next_token = self._process_token_and_limits( + len(rule.targets), next_token, limit) returned_targets = [] return_obj = {} @@ -188,4 +193,5 @@ class EventsBackend(BaseBackend): def test_event_pattern(self): raise NotImplementedError() + events_backend = EventsBackend() diff --git a/moto/events/responses.py b/moto/events/responses.py index 75e703706..d03befe12 100644 --- a/moto/events/responses.py +++ b/moto/events/responses.py @@ -87,7 +87,8 @@ class EventsHandler(BaseResponse): if not target_arn: return self.error('ValidationException', 'Parameter TargetArn is required.') - rule_names = events_backend.list_rule_names_by_target(target_arn, next_token, limit) + rule_names = events_backend.list_rule_names_by_target( + target_arn, next_token, limit) return json.dumps(rule_names), self.response_headers @@ -118,7 +119,8 @@ class EventsHandler(BaseResponse): return self.error('ValidationException', 'Parameter Rule is required.') try: - targets = events_backend.list_targets_by_rule(rule_name, next_token, limit) + targets = events_backend.list_targets_by_rule( + rule_name, next_token, limit) except KeyError: return self.error('ResourceNotFoundException', 'Rule ' + rule_name + ' does not exist.') @@ -140,7 +142,8 @@ class EventsHandler(BaseResponse): try: json.loads(event_pattern) except ValueError: - # Not quite as informative as the real error, but it'll work for now. + # Not quite as informative as the real error, but it'll work + # for now. return self.error('InvalidEventPatternException', 'Event pattern is not valid.') if sched_exp: diff --git a/moto/glacier/__init__.py b/moto/glacier/__init__.py index 49b3375e1..1570fa7d4 100644 --- a/moto/glacier/__init__.py +++ b/moto/glacier/__init__.py @@ -1,6 +1,6 @@ from __future__ import unicode_literals from .models import glacier_backends -from ..core.models import MockAWS, base_decorator, HttprettyMockAWS, deprecated_base_decorator +from ..core.models import base_decorator, deprecated_base_decorator glacier_backend = glacier_backends['us-east-1'] mock_glacier = base_decorator(glacier_backends) diff --git a/moto/glacier/models.py b/moto/glacier/models.py index 836e84d37..8e3286887 100644 --- a/moto/glacier/models.py +++ b/moto/glacier/models.py @@ -36,6 +36,7 @@ class ArchiveJob(object): class Vault(object): + def __init__(self, vault_name, region): self.vault_name = vault_name self.region = region diff --git a/moto/glacier/responses.py b/moto/glacier/responses.py index eac9b94c6..cda859b29 100644 --- a/moto/glacier/responses.py +++ b/moto/glacier/responses.py @@ -128,7 +128,8 @@ class GlacierResponse(_TemplateEnvironmentMixin): archive_id = json_body['ArchiveId'] job_id = self.backend.initiate_job(vault_name, archive_id) headers['x-amz-job-id'] = job_id - headers['Location'] = "/{0}/vaults/{1}/jobs/{2}".format(account_id, vault_name, job_id) + headers[ + 'Location'] = "/{0}/vaults/{1}/jobs/{2}".format(account_id, vault_name, job_id) return 202, headers, "" @classmethod diff --git a/moto/iam/__init__.py b/moto/iam/__init__.py index c5110b35d..1dda654ce 100644 --- a/moto/iam/__init__.py +++ b/moto/iam/__init__.py @@ -3,4 +3,4 @@ from .models import iam_backend iam_backends = {"global": iam_backend} mock_iam = iam_backend.decorator -mock_iam_deprecated = iam_backend.deprecated_decorator \ No newline at end of file +mock_iam_deprecated = iam_backend.deprecated_decorator diff --git a/moto/iam/models.py b/moto/iam/models.py index d27722f33..91c4a14d7 100644 --- a/moto/iam/models.py +++ b/moto/iam/models.py @@ -97,6 +97,7 @@ class Role(object): class InstanceProfile(object): + def __init__(self, instance_profile_id, name, path, roles): self.id = instance_profile_id self.name = name @@ -126,6 +127,7 @@ class InstanceProfile(object): class Certificate(object): + def __init__(self, cert_name, cert_body, private_key, cert_chain=None, path=None): self.cert_name = cert_name self.cert_body = cert_body @@ -139,6 +141,7 @@ class Certificate(object): class AccessKey(object): + def __init__(self, user_name): self.user_name = user_name self.access_key_id = random_access_key() @@ -157,6 +160,7 @@ class AccessKey(object): class Group(object): + def __init__(self, name, path='/'): self.name = name self.id = random_resource_id() @@ -176,6 +180,7 @@ class Group(object): class User(object): + def __init__(self, name, path=None): self.name = name self.id = random_resource_id() @@ -184,7 +189,8 @@ class User(object): datetime.utcnow(), "%Y-%m-%d-%H-%M-%S" ) - self.arn = 'arn:aws:iam::123456789012:user{0}{1}'.format(self.path, name) + self.arn = 'arn:aws:iam::123456789012:user{0}{1}'.format( + self.path, name) self.policies = {} self.access_keys = [] self.password = None @@ -194,7 +200,8 @@ class User(object): try: policy_json = self.policies[policy_name] except KeyError: - raise IAMNotFoundException("Policy {0} not found".format(policy_name)) + raise IAMNotFoundException( + "Policy {0} not found".format(policy_name)) return { 'policy_name': policy_name, @@ -207,7 +214,8 @@ class User(object): def delete_policy(self, policy_name): if policy_name not in self.policies: - raise IAMNotFoundException("Policy {0} not found".format(policy_name)) + raise IAMNotFoundException( + "Policy {0} not found".format(policy_name)) del self.policies[policy_name] @@ -225,7 +233,8 @@ class User(object): self.access_keys.remove(key) break else: - raise IAMNotFoundException("Key {0} not found".format(access_key_id)) + raise IAMNotFoundException( + "Key {0} not found".format(access_key_id)) def get_cfn_attribute(self, attribute_name): from moto.cloudformation.exceptions import UnformattedGetAttTemplateException @@ -261,16 +270,18 @@ class User(object): access_key_2_last_rotated = date_created.strftime(date_format) return '{0},{1},{2},{3},{4},{5},not_supported,false,{6},{7},{8},{9},false,N/A,false,N/A'.format(self.name, - self.arn, - date_created.strftime(date_format), - password_enabled, - password_last_used, - date_created.strftime(date_format), - access_key_1_active, - access_key_1_last_rotated, - access_key_2_active, - access_key_2_last_rotated - ) + self.arn, + date_created.strftime( + date_format), + password_enabled, + password_last_used, + date_created.strftime( + date_format), + access_key_1_active, + access_key_1_last_rotated, + access_key_2_active, + access_key_2_last_rotated + ) # predefine AWS managed policies @@ -439,7 +450,8 @@ class IAMBackend(BaseBackend): if scope == 'AWS': policies = [p for p in policies if isinstance(p, AWSManagedPolicy)] elif scope == 'Local': - policies = [p for p in policies if not isinstance(p, AWSManagedPolicy)] + policies = [p for p in policies if not isinstance( + p, AWSManagedPolicy)] if path_prefix: policies = [p for p in policies if p.path.startswith(path_prefix)] @@ -492,7 +504,8 @@ class IAMBackend(BaseBackend): instance_profile_id = random_resource_id() roles = [iam_backend.get_role_by_id(role_id) for role_id in role_ids] - instance_profile = InstanceProfile(instance_profile_id, name, path, roles) + instance_profile = InstanceProfile( + instance_profile_id, name, path, roles) self.instance_profiles[instance_profile_id] = instance_profile return instance_profile @@ -501,7 +514,8 @@ class IAMBackend(BaseBackend): if profile.name == profile_name: return profile - raise IAMNotFoundException("Instance profile {0} not found".format(profile_name)) + raise IAMNotFoundException( + "Instance profile {0} not found".format(profile_name)) def get_instance_profiles(self): return self.instance_profiles.values() @@ -546,7 +560,8 @@ class IAMBackend(BaseBackend): def create_group(self, group_name, path='/'): if group_name in self.groups: - raise IAMConflictException("Group {0} already exists".format(group_name)) + raise IAMConflictException( + "Group {0} already exists".format(group_name)) group = Group(group_name, path) self.groups[group_name] = group @@ -557,7 +572,8 @@ class IAMBackend(BaseBackend): try: group = self.groups[group_name] except KeyError: - raise IAMNotFoundException("Group {0} not found".format(group_name)) + raise IAMNotFoundException( + "Group {0} not found".format(group_name)) return group @@ -575,7 +591,8 @@ class IAMBackend(BaseBackend): def create_user(self, user_name, path='/'): if user_name in self.users: - raise IAMConflictException("EntityAlreadyExists", "User {0} already exists".format(user_name)) + raise IAMConflictException( + "EntityAlreadyExists", "User {0} already exists".format(user_name)) user = User(user_name, path) self.users[user_name] = user @@ -595,7 +612,8 @@ class IAMBackend(BaseBackend): try: users = self.users.values() except KeyError: - raise IAMNotFoundException("Users {0}, {1}, {2} not found".format(path_prefix, marker, max_items)) + raise IAMNotFoundException( + "Users {0}, {1}, {2} not found".format(path_prefix, marker, max_items)) return users @@ -603,13 +621,15 @@ class IAMBackend(BaseBackend): # This does not currently deal with PasswordPolicyViolation. user = self.get_user(user_name) if user.password: - raise IAMConflictException("User {0} already has password".format(user_name)) + raise IAMConflictException( + "User {0} already has password".format(user_name)) user.password = password def delete_login_profile(self, user_name): user = self.get_user(user_name) if not user.password: - raise IAMNotFoundException("Login profile for {0} not found".format(user_name)) + raise IAMNotFoundException( + "Login profile for {0} not found".format(user_name)) user.password = None def add_user_to_group(self, group_name, user_name): @@ -623,7 +643,8 @@ class IAMBackend(BaseBackend): try: group.users.remove(user) except ValueError: - raise IAMNotFoundException("User {0} not in group {1}".format(user_name, group_name)) + raise IAMNotFoundException( + "User {0} not in group {1}".format(user_name, group_name)) def get_user_policy(self, user_name, policy_name): user = self.get_user(user_name) @@ -672,4 +693,5 @@ class IAMBackend(BaseBackend): report += self.users[user].to_csv() return base64.b64encode(report.encode('ascii')).decode('ascii') + iam_backend = IAMBackend() diff --git a/moto/iam/responses.py b/moto/iam/responses.py index 223691e1e..9bddd21df 100644 --- a/moto/iam/responses.py +++ b/moto/iam/responses.py @@ -18,7 +18,8 @@ class IamResponse(BaseResponse): path = self._get_param('Path') policy_document = self._get_param('PolicyDocument') policy_name = self._get_param('PolicyName') - policy = iam_backend.create_policy(description, path, policy_document, policy_name) + policy = iam_backend.create_policy( + description, path, policy_document, policy_name) template = self.response_template(CREATE_POLICY_TEMPLATE) return template.render(policy=policy) @@ -27,7 +28,8 @@ class IamResponse(BaseResponse): max_items = self._get_int_param('MaxItems', 100) path_prefix = self._get_param('PathPrefix', '/') role_name = self._get_param('RoleName') - policies, marker = iam_backend.list_attached_role_policies(role_name, marker=marker, max_items=max_items, path_prefix=path_prefix) + policies, marker = iam_backend.list_attached_role_policies( + role_name, marker=marker, max_items=max_items, path_prefix=path_prefix) template = self.response_template(LIST_ATTACHED_ROLE_POLICIES_TEMPLATE) return template.render(policies=policies, marker=marker) @@ -37,16 +39,19 @@ class IamResponse(BaseResponse): only_attached = self._get_bool_param('OnlyAttached', False) path_prefix = self._get_param('PathPrefix', '/') scope = self._get_param('Scope', 'All') - policies, marker = iam_backend.list_policies(marker, max_items, only_attached, path_prefix, scope) + policies, marker = iam_backend.list_policies( + marker, max_items, only_attached, path_prefix, scope) template = self.response_template(LIST_POLICIES_TEMPLATE) return template.render(policies=policies, marker=marker) def create_role(self): role_name = self._get_param('RoleName') path = self._get_param('Path') - assume_role_policy_document = self._get_param('AssumeRolePolicyDocument') + assume_role_policy_document = self._get_param( + 'AssumeRolePolicyDocument') - role = iam_backend.create_role(role_name, assume_role_policy_document, path) + role = iam_backend.create_role( + role_name, assume_role_policy_document, path) template = self.response_template(CREATE_ROLE_TEMPLATE) return template.render(role=role) @@ -74,7 +79,8 @@ class IamResponse(BaseResponse): def get_role_policy(self): role_name = self._get_param('RoleName') policy_name = self._get_param('PolicyName') - policy_name, policy_document = iam_backend.get_role_policy(role_name, policy_name) + policy_name, policy_document = iam_backend.get_role_policy( + role_name, policy_name) template = self.response_template(GET_ROLE_POLICY_TEMPLATE) return template.render(role_name=role_name, policy_name=policy_name, @@ -91,7 +97,8 @@ class IamResponse(BaseResponse): profile_name = self._get_param('InstanceProfileName') path = self._get_param('Path') - profile = iam_backend.create_instance_profile(profile_name, path, role_ids=[]) + profile = iam_backend.create_instance_profile( + profile_name, path, role_ids=[]) template = self.response_template(CREATE_INSTANCE_PROFILE_TEMPLATE) return template.render(profile=profile) @@ -107,7 +114,8 @@ class IamResponse(BaseResponse): role_name = self._get_param('RoleName') iam_backend.add_role_to_instance_profile(profile_name, role_name) - template = self.response_template(ADD_ROLE_TO_INSTANCE_PROFILE_TEMPLATE) + template = self.response_template( + ADD_ROLE_TO_INSTANCE_PROFILE_TEMPLATE) return template.render() def remove_role_from_instance_profile(self): @@ -115,7 +123,8 @@ class IamResponse(BaseResponse): role_name = self._get_param('RoleName') iam_backend.remove_role_from_instance_profile(profile_name, role_name) - template = self.response_template(REMOVE_ROLE_FROM_INSTANCE_PROFILE_TEMPLATE) + template = self.response_template( + REMOVE_ROLE_FROM_INSTANCE_PROFILE_TEMPLATE) return template.render() def list_roles(self): @@ -132,9 +141,11 @@ class IamResponse(BaseResponse): def list_instance_profiles_for_role(self): role_name = self._get_param('RoleName') - profiles = iam_backend.get_instance_profiles_for_role(role_name=role_name) + profiles = iam_backend.get_instance_profiles_for_role( + role_name=role_name) - template = self.response_template(LIST_INSTANCE_PROFILES_FOR_ROLE_TEMPLATE) + template = self.response_template( + LIST_INSTANCE_PROFILES_FOR_ROLE_TEMPLATE) return template.render(instance_profiles=profiles) def upload_server_certificate(self): @@ -144,7 +155,8 @@ class IamResponse(BaseResponse): private_key = self._get_param('PrivateKey') cert_chain = self._get_param('CertificateName') - cert = iam_backend.upload_server_cert(cert_name, cert_body, private_key, cert_chain=cert_chain, path=path) + cert = iam_backend.upload_server_cert( + cert_name, cert_body, private_key, cert_chain=cert_chain, path=path) template = self.response_template(UPLOAD_CERT_TEMPLATE) return template.render(certificate=cert) diff --git a/moto/instance_metadata/__init__.py b/moto/instance_metadata/__init__.py index 9197bcf7c..d1a674982 100644 --- a/moto/instance_metadata/__init__.py +++ b/moto/instance_metadata/__init__.py @@ -1,4 +1,4 @@ from __future__ import unicode_literals from .models import instance_metadata_backend -instance_metadata_backends = {"global": instance_metadata_backend} \ No newline at end of file +instance_metadata_backends = {"global": instance_metadata_backend} diff --git a/moto/instance_metadata/models.py b/moto/instance_metadata/models.py index b86f86376..8f8d84154 100644 --- a/moto/instance_metadata/models.py +++ b/moto/instance_metadata/models.py @@ -4,4 +4,5 @@ from moto.core.models import BaseBackend class InstanceMetadataBackend(BaseBackend): pass + instance_metadata_backend = InstanceMetadataBackend() diff --git a/moto/instance_metadata/responses.py b/moto/instance_metadata/responses.py index b2de66e7b..2ea9aa9a8 100644 --- a/moto/instance_metadata/responses.py +++ b/moto/instance_metadata/responses.py @@ -7,6 +7,7 @@ from moto.core.responses import BaseResponse class InstanceMetadataResponse(BaseResponse): + def metadata_response(self, request, full_url, headers): """ Mock response for localhost metadata @@ -43,5 +44,6 @@ class InstanceMetadataResponse(BaseResponse): elif path == 'iam/security-credentials/default-role': result = json.dumps(credentials) else: - raise NotImplementedError("The {0} metadata path has not been implemented".format(path)) + raise NotImplementedError( + "The {0} metadata path has not been implemented".format(path)) return 200, headers, result diff --git a/moto/kinesis/__init__.py b/moto/kinesis/__init__.py index c3f06d5b1..7d9767a9f 100644 --- a/moto/kinesis/__init__.py +++ b/moto/kinesis/__init__.py @@ -1,6 +1,6 @@ from __future__ import unicode_literals from .models import kinesis_backends -from ..core.models import MockAWS, base_decorator, HttprettyMockAWS, deprecated_base_decorator +from ..core.models import base_decorator, deprecated_base_decorator kinesis_backend = kinesis_backends['us-east-1'] mock_kinesis = base_decorator(kinesis_backends) diff --git a/moto/kinesis/exceptions.py b/moto/kinesis/exceptions.py index 0fcb3652a..e2fe02775 100644 --- a/moto/kinesis/exceptions.py +++ b/moto/kinesis/exceptions.py @@ -5,6 +5,7 @@ from werkzeug.exceptions import BadRequest class ResourceNotFoundError(BadRequest): + def __init__(self, message): super(ResourceNotFoundError, self).__init__() self.description = json.dumps({ @@ -14,6 +15,7 @@ class ResourceNotFoundError(BadRequest): class ResourceInUseError(BadRequest): + def __init__(self, message): super(ResourceNotFoundError, self).__init__() self.description = json.dumps({ @@ -23,18 +25,21 @@ class ResourceInUseError(BadRequest): class StreamNotFoundError(ResourceNotFoundError): + def __init__(self, stream_name): super(StreamNotFoundError, self).__init__( 'Stream {0} under account 123456789012 not found.'.format(stream_name)) class ShardNotFoundError(ResourceNotFoundError): + def __init__(self, shard_id): super(ShardNotFoundError, self).__init__( 'Shard {0} under account 123456789012 not found.'.format(shard_id)) class InvalidArgumentError(BadRequest): + def __init__(self, message): super(InvalidArgumentError, self).__init__() self.description = json.dumps({ diff --git a/moto/kinesis/models.py b/moto/kinesis/models.py index e0e20da3f..5d80426ae 100644 --- a/moto/kinesis/models.py +++ b/moto/kinesis/models.py @@ -18,6 +18,7 @@ from .utils import compose_shard_iterator, compose_new_shard_iterator, decompose class Record(object): + def __init__(self, partition_key, data, sequence_number, explicit_hash_key): self.partition_key = partition_key self.data = data @@ -33,6 +34,7 @@ class Record(object): class Shard(object): + def __init__(self, shard_id, starting_hash, ending_hash): self._shard_id = shard_id self.starting_hash = starting_hash @@ -64,7 +66,8 @@ class Shard(object): else: last_sequence_number = 0 sequence_number = last_sequence_number + 1 - self.records[sequence_number] = Record(partition_key, data, sequence_number, explicit_hash_key) + self.records[sequence_number] = Record( + partition_key, data, sequence_number, explicit_hash_key) return sequence_number def get_min_sequence_number(self): @@ -107,8 +110,10 @@ class Stream(object): izip_longest = itertools.izip_longest for index, start, end in izip_longest(range(shard_count), - range(0,2**128,2**128//shard_count), - range(2**128//shard_count,2**128,2**128//shard_count), + range(0, 2**128, 2 ** + 128 // shard_count), + range(2**128 // shard_count, 2 ** + 128, 2**128 // shard_count), fillvalue=2**128): shard = Shard(index, start, end) self.shards[shard.shard_id] = shard @@ -152,7 +157,8 @@ class Stream(object): def put_record(self, partition_key, explicit_hash_key, sequence_number_for_ordering, data): shard = self.get_shard_for_key(partition_key, explicit_hash_key) - sequence_number = shard.put_record(partition_key, data, explicit_hash_key) + sequence_number = shard.put_record( + partition_key, data, explicit_hash_key) return sequence_number, shard.shard_id def to_json(self): @@ -168,12 +174,14 @@ class Stream(object): class FirehoseRecord(object): + def __init__(self, record_data): self.record_id = 12345678 self.record_data = record_data class DeliveryStream(object): + def __init__(self, stream_name, **stream_kwargs): self.name = stream_name self.redshift_username = stream_kwargs.get('redshift_username') @@ -185,14 +193,18 @@ class DeliveryStream(object): self.s3_role_arn = stream_kwargs.get('s3_role_arn') self.s3_bucket_arn = stream_kwargs.get('s3_bucket_arn') self.s3_prefix = stream_kwargs.get('s3_prefix') - self.s3_compression_format = stream_kwargs.get('s3_compression_format', 'UNCOMPRESSED') + self.s3_compression_format = stream_kwargs.get( + 's3_compression_format', 'UNCOMPRESSED') self.s3_buffering_hings = stream_kwargs.get('s3_buffering_hings') self.redshift_s3_role_arn = stream_kwargs.get('redshift_s3_role_arn') - self.redshift_s3_bucket_arn = stream_kwargs.get('redshift_s3_bucket_arn') + self.redshift_s3_bucket_arn = stream_kwargs.get( + 'redshift_s3_bucket_arn') self.redshift_s3_prefix = stream_kwargs.get('redshift_s3_prefix') - self.redshift_s3_compression_format = stream_kwargs.get('redshift_s3_compression_format', 'UNCOMPRESSED') - self.redshift_s3_buffering_hings = stream_kwargs.get('redshift_s3_buffering_hings') + self.redshift_s3_compression_format = stream_kwargs.get( + 'redshift_s3_compression_format', 'UNCOMPRESSED') + self.redshift_s3_buffering_hings = stream_kwargs.get( + 'redshift_s3_buffering_hings') self.records = [] self.status = 'ACTIVE' @@ -231,9 +243,8 @@ class DeliveryStream(object): }, "Username": self.redshift_username, }, - } - ] - + } + ] def to_dict(self): return { @@ -261,10 +272,9 @@ class KinesisBackend(BaseBackend): self.streams = {} self.delivery_streams = {} - def create_stream(self, stream_name, shard_count, region): if stream_name in self.streams: - raise ResourceInUseError(stream_name) + raise ResourceInUseError(stream_name) stream = Stream(stream_name, shard_count, region) self.streams[stream_name] = stream return stream @@ -302,7 +312,8 @@ class KinesisBackend(BaseBackend): records, last_sequence_id = shard.get_records(last_sequence_id, limit) - next_shard_iterator = compose_shard_iterator(stream_name, shard, last_sequence_id) + next_shard_iterator = compose_shard_iterator( + stream_name, shard, last_sequence_id) return next_shard_iterator, records @@ -320,7 +331,7 @@ class KinesisBackend(BaseBackend): response = { "FailedRecordCount": 0, - "Records" : [] + "Records": [] } for record in records: @@ -342,7 +353,7 @@ class KinesisBackend(BaseBackend): stream = self.describe_stream(stream_name) if shard_to_split not in stream.shards: - raise ResourceNotFoundError(shard_to_split) + raise ResourceNotFoundError(shard_to_split) if not re.match(r'0|([1-9]\d{0,38})', new_starting_hash_key): raise InvalidArgumentError(new_starting_hash_key) @@ -350,10 +361,12 @@ class KinesisBackend(BaseBackend): shard = stream.shards[shard_to_split] - last_id = sorted(stream.shards.values(), key=attrgetter('_shard_id'))[-1]._shard_id + last_id = sorted(stream.shards.values(), + key=attrgetter('_shard_id'))[-1]._shard_id if shard.starting_hash < new_starting_hash_key < shard.ending_hash: - new_shard = Shard(last_id+1, new_starting_hash_key, shard.ending_hash) + new_shard = Shard( + last_id + 1, new_starting_hash_key, shard.ending_hash) shard.ending_hash = new_starting_hash_key stream.shards[new_shard.shard_id] = new_shard else: @@ -372,10 +385,10 @@ class KinesisBackend(BaseBackend): stream = self.describe_stream(stream_name) if shard_to_merge not in stream.shards: - raise ResourceNotFoundError(shard_to_merge) + raise ResourceNotFoundError(shard_to_merge) if adjacent_shard_to_merge not in stream.shards: - raise ResourceNotFoundError(adjacent_shard_to_merge) + raise ResourceNotFoundError(adjacent_shard_to_merge) shard1 = stream.shards[shard_to_merge] shard2 = stream.shards[adjacent_shard_to_merge] @@ -390,9 +403,11 @@ class KinesisBackend(BaseBackend): del stream.shards[shard2.shard_id] for index in shard2.records: record = shard2.records[index] - shard1.put_record(record.partition_key, record.data, record.explicit_hash_key) + shard1.put_record(record.partition_key, + record.data, record.explicit_hash_key) ''' Firehose ''' + def create_delivery_stream(self, stream_name, **stream_kwargs): stream = DeliveryStream(stream_name, **stream_kwargs) self.delivery_streams[stream_name] = stream @@ -416,19 +431,19 @@ class KinesisBackend(BaseBackend): return record def list_tags_for_stream(self, stream_name, exclusive_start_tag_key=None, limit=None): - stream = self.describe_stream(stream_name) + stream = self.describe_stream(stream_name) tags = [] result = { 'HasMoreTags': False, 'Tags': tags } - for key, val in sorted(stream.tags.items(), key=lambda x:x[0]): - if limit and len(res) >= limit: - result['HasMoreTags'] = True - break - if exclusive_start_tag_key and key < exexclusive_start_tag_key: - continue + for key, val in sorted(stream.tags.items(), key=lambda x: x[0]): + if limit and len(tags) >= limit: + result['HasMoreTags'] = True + break + if exclusive_start_tag_key and key < exclusive_start_tag_key: + continue tags.append({ 'Key': key, @@ -438,14 +453,14 @@ class KinesisBackend(BaseBackend): return result def add_tags_to_stream(self, stream_name, tags): - stream = self.describe_stream(stream_name) + stream = self.describe_stream(stream_name) stream.tags.update(tags) def remove_tags_from_stream(self, stream_name, tag_keys): - stream = self.describe_stream(stream_name) + stream = self.describe_stream(stream_name) for key in tag_keys: if key in stream.tags: - del stream.tags[key] + del stream.tags[key] kinesis_backends = {} diff --git a/moto/kinesis/responses.py b/moto/kinesis/responses.py index 29f6c07ff..8bc81925f 100644 --- a/moto/kinesis/responses.py +++ b/moto/kinesis/responses.py @@ -4,7 +4,6 @@ import json from moto.core.responses import BaseResponse from .models import kinesis_backends -from werkzeug.exceptions import BadRequest class KinesisResponse(BaseResponse): @@ -25,7 +24,8 @@ class KinesisResponse(BaseResponse): def create_stream(self): stream_name = self.parameters.get('StreamName') shard_count = self.parameters.get('ShardCount') - self.kinesis_backend.create_stream(stream_name, shard_count, self.region) + self.kinesis_backend.create_stream( + stream_name, shard_count, self.region) return "" def describe_stream(self): @@ -50,7 +50,8 @@ class KinesisResponse(BaseResponse): stream_name = self.parameters.get("StreamName") shard_id = self.parameters.get("ShardId") shard_iterator_type = self.parameters.get("ShardIteratorType") - starting_sequence_number = self.parameters.get("StartingSequenceNumber") + starting_sequence_number = self.parameters.get( + "StartingSequenceNumber") shard_iterator = self.kinesis_backend.get_shard_iterator( stream_name, shard_id, shard_iterator_type, starting_sequence_number, @@ -64,7 +65,8 @@ class KinesisResponse(BaseResponse): shard_iterator = self.parameters.get("ShardIterator") limit = self.parameters.get("Limit") - next_shard_iterator, records = self.kinesis_backend.get_records(shard_iterator, limit) + next_shard_iterator, records = self.kinesis_backend.get_records( + shard_iterator, limit) return json.dumps({ "NextShardIterator": next_shard_iterator, @@ -77,7 +79,8 @@ class KinesisResponse(BaseResponse): stream_name = self.parameters.get("StreamName") partition_key = self.parameters.get("PartitionKey") explicit_hash_key = self.parameters.get("ExplicitHashKey") - sequence_number_for_ordering = self.parameters.get("SequenceNumberForOrdering") + sequence_number_for_ordering = self.parameters.get( + "SequenceNumberForOrdering") data = self.parameters.get("Data") sequence_number, shard_id = self.kinesis_backend.put_record( @@ -105,7 +108,7 @@ class KinesisResponse(BaseResponse): stream_name = self.parameters.get("StreamName") shard_to_split = self.parameters.get("ShardToSplit") new_starting_hash_key = self.parameters.get("NewStartingHashKey") - response = self.kinesis_backend.split_shard( + self.kinesis_backend.split_shard( stream_name, shard_to_split, new_starting_hash_key ) return "" @@ -114,15 +117,17 @@ class KinesisResponse(BaseResponse): stream_name = self.parameters.get("StreamName") shard_to_merge = self.parameters.get("ShardToMerge") adjacent_shard_to_merge = self.parameters.get("AdjacentShardToMerge") - response = self.kinesis_backend.merge_shards( + self.kinesis_backend.merge_shards( stream_name, shard_to_merge, adjacent_shard_to_merge ) return "" ''' Firehose ''' + def create_delivery_stream(self): stream_name = self.parameters['DeliveryStreamName'] - redshift_config = self.parameters.get('RedshiftDestinationConfiguration') + redshift_config = self.parameters.get( + 'RedshiftDestinationConfiguration') if redshift_config: redshift_s3_config = redshift_config['S3Configuration'] @@ -149,7 +154,8 @@ class KinesisResponse(BaseResponse): 's3_compression_format': s3_config.get('CompressionFormat'), 's3_buffering_hings': s3_config['BufferingHints'], } - stream = self.kinesis_backend.create_delivery_stream(stream_name, **stream_kwargs) + stream = self.kinesis_backend.create_delivery_stream( + stream_name, **stream_kwargs) return json.dumps({ 'DeliveryStreamARN': stream.arn }) @@ -177,7 +183,8 @@ class KinesisResponse(BaseResponse): stream_name = self.parameters['DeliveryStreamName'] record_data = self.parameters['Record']['Data'] - record = self.kinesis_backend.put_firehose_record(stream_name, record_data) + record = self.kinesis_backend.put_firehose_record( + stream_name, record_data) return json.dumps({ "RecordId": record.record_id, }) @@ -188,7 +195,8 @@ class KinesisResponse(BaseResponse): request_responses = [] for record in records: - record_response = self.kinesis_backend.put_firehose_record(stream_name, record['Data']) + record_response = self.kinesis_backend.put_firehose_record( + stream_name, record['Data']) request_responses.append({ "RecordId": record_response.record_id }) @@ -207,7 +215,8 @@ class KinesisResponse(BaseResponse): stream_name = self.parameters.get('StreamName') exclusive_start_tag_key = self.parameters.get('ExclusiveStartTagKey') limit = self.parameters.get('Limit') - response = self.kinesis_backend.list_tags_for_stream(stream_name, exclusive_start_tag_key, limit) + response = self.kinesis_backend.list_tags_for_stream( + stream_name, exclusive_start_tag_key, limit) return json.dumps(response) def remove_tags_from_stream(self): diff --git a/moto/kinesis/utils.py b/moto/kinesis/utils.py index 0d35b4134..190371b2e 100644 --- a/moto/kinesis/utils.py +++ b/moto/kinesis/utils.py @@ -13,7 +13,8 @@ def compose_new_shard_iterator(stream_name, shard, shard_iterator_type, starting elif shard_iterator_type == "LATEST": last_sequence_id = shard.get_max_sequence_number() else: - raise InvalidArgumentError("Invalid ShardIteratorType: {0}".format(shard_iterator_type)) + raise InvalidArgumentError( + "Invalid ShardIteratorType: {0}".format(shard_iterator_type)) return compose_shard_iterator(stream_name, shard, last_sequence_id) diff --git a/moto/kms/__init__.py b/moto/kms/__init__.py index b6bffa804..b4bb0b639 100644 --- a/moto/kms/__init__.py +++ b/moto/kms/__init__.py @@ -1,6 +1,6 @@ from __future__ import unicode_literals from .models import kms_backends -from ..core.models import MockAWS, base_decorator, HttprettyMockAWS, deprecated_base_decorator +from ..core.models import base_decorator, deprecated_base_decorator kms_backend = kms_backends['us-east-1'] mock_kms = base_decorator(kms_backends) diff --git a/moto/kms/models.py b/moto/kms/models.py index 0bfe5791f..37fde9eb8 100644 --- a/moto/kms/models.py +++ b/moto/kms/models.py @@ -7,6 +7,7 @@ from collections import defaultdict class Key(object): + def __init__(self, policy, key_usage, description, region): self.id = generate_key_id() self.policy = policy @@ -77,7 +78,8 @@ class KmsBackend(BaseBackend): return self.keys.pop(key_id) def describe_key(self, key_id): - # allow the different methods (alias, ARN :key/, keyId, ARN alias) to describe key not just KeyId + # allow the different methods (alias, ARN :key/, keyId, ARN alias) to + # describe key not just KeyId key_id = self.get_key_id(key_id) if r'alias/' in str(key_id).lower(): key_id = self.get_key_id_from_alias(key_id.split('alias/')[1]) @@ -128,6 +130,7 @@ class KmsBackend(BaseBackend): def get_key_policy(self, key_id): return self.keys[self.get_key_id(key_id)].policy + kms_backends = {} for region in boto.kms.regions(): kms_backends[region.name] = KmsBackend() diff --git a/moto/kms/responses.py b/moto/kms/responses.py index 7f0659a64..7ed8927a2 100644 --- a/moto/kms/responses.py +++ b/moto/kms/responses.py @@ -18,6 +18,7 @@ reserved_aliases = [ 'alias/aws/rds', ] + class KmsResponse(BaseResponse): @property @@ -33,13 +34,15 @@ class KmsResponse(BaseResponse): key_usage = self.parameters.get('KeyUsage') description = self.parameters.get('Description') - key = self.kms_backend.create_key(policy, key_usage, description, self.region) + key = self.kms_backend.create_key( + policy, key_usage, description, self.region) return json.dumps(key.to_dict()) def describe_key(self): key_id = self.parameters.get('KeyId') try: - key = self.kms_backend.describe_key(self.kms_backend.get_key_id(key_id)) + key = self.kms_backend.describe_key( + self.kms_backend.get_key_id(key_id)) except KeyError: headers = dict(self.headers) headers['status'] = 404 @@ -70,7 +73,8 @@ class KmsResponse(BaseResponse): body={'message': 'Invalid identifier', '__type': 'ValidationException'}) if alias_name in reserved_aliases: - raise JSONResponseError(400, 'Bad Request', body={'__type': 'NotAuthorizedException'}) + raise JSONResponseError(400, 'Bad Request', body={ + '__type': 'NotAuthorizedException'}) if ':' in alias_name: raise JSONResponseError(400, 'Bad Request', body={ @@ -81,7 +85,7 @@ class KmsResponse(BaseResponse): raise JSONResponseError(400, 'Bad Request', body={ 'message': "1 validation error detected: Value '{alias_name}' at 'aliasName' failed to satisfy constraint: Member must satisfy regular expression pattern: ^[a-zA-Z0-9:/_-]+$" .format(**locals()), - '__type': 'ValidationException'}) + '__type': 'ValidationException'}) if self.kms_backend.alias_exists(target_key_id): raise JSONResponseError(400, 'Bad Request', body={ @@ -120,7 +124,7 @@ class KmsResponse(BaseResponse): response_aliases = [ { 'AliasArn': u'arn:aws:kms:{region}:012345678912:{reserved_alias}'.format(region=region, - reserved_alias=reserved_alias), + reserved_alias=reserved_alias), 'AliasName': reserved_alias } for reserved_alias in reserved_aliases ] @@ -147,7 +151,7 @@ class KmsResponse(BaseResponse): self.kms_backend.enable_key_rotation(key_id) except KeyError: raise JSONResponseError(404, 'Not Found', body={ - 'message': "Key 'arn:aws:kms:{region}:012345678912:key/{key_id}' does not exist".format(region=self.region,key_id=key_id), + 'message': "Key 'arn:aws:kms:{region}:012345678912:key/{key_id}' does not exist".format(region=self.region, key_id=key_id), '__type': 'NotFoundException'}) return json.dumps(None) @@ -159,7 +163,7 @@ class KmsResponse(BaseResponse): self.kms_backend.disable_key_rotation(key_id) except KeyError: raise JSONResponseError(404, 'Not Found', body={ - 'message': "Key 'arn:aws:kms:{region}:012345678912:key/{key_id}' does not exist".format(region=self.region,key_id=key_id), + 'message': "Key 'arn:aws:kms:{region}:012345678912:key/{key_id}' does not exist".format(region=self.region, key_id=key_id), '__type': 'NotFoundException'}) return json.dumps(None) @@ -170,7 +174,7 @@ class KmsResponse(BaseResponse): rotation_enabled = self.kms_backend.get_key_rotation_status(key_id) except KeyError: raise JSONResponseError(404, 'Not Found', body={ - 'message': "Key 'arn:aws:kms:{region}:012345678912:key/{key_id}' does not exist".format(region=self.region,key_id=key_id), + 'message': "Key 'arn:aws:kms:{region}:012345678912:key/{key_id}' does not exist".format(region=self.region, key_id=key_id), '__type': 'NotFoundException'}) return json.dumps({'KeyRotationEnabled': rotation_enabled}) @@ -185,7 +189,7 @@ class KmsResponse(BaseResponse): self.kms_backend.put_key_policy(key_id, policy) except KeyError: raise JSONResponseError(404, 'Not Found', body={ - 'message': "Key 'arn:aws:kms:{region}:012345678912:key/{key_id}' does not exist".format(region=self.region,key_id=key_id), + 'message': "Key 'arn:aws:kms:{region}:012345678912:key/{key_id}' does not exist".format(region=self.region, key_id=key_id), '__type': 'NotFoundException'}) return json.dumps(None) @@ -200,7 +204,7 @@ class KmsResponse(BaseResponse): return json.dumps({'Policy': self.kms_backend.get_key_policy(key_id)}) except KeyError: raise JSONResponseError(404, 'Not Found', body={ - 'message': "Key 'arn:aws:kms:{region}:012345678912:key/{key_id}' does not exist".format(region=self.region,key_id=key_id), + 'message': "Key 'arn:aws:kms:{region}:012345678912:key/{key_id}' does not exist".format(region=self.region, key_id=key_id), '__type': 'NotFoundException'}) def list_key_policies(self): @@ -210,7 +214,7 @@ class KmsResponse(BaseResponse): self.kms_backend.describe_key(key_id) except KeyError: raise JSONResponseError(404, 'Not Found', body={ - 'message': "Key 'arn:aws:kms:{region}:012345678912:key/{key_id}' does not exist".format(region=self.region,key_id=key_id), + 'message': "Key 'arn:aws:kms:{region}:012345678912:key/{key_id}' does not exist".format(region=self.region, key_id=key_id), '__type': 'NotFoundException'}) return json.dumps({'Truncated': False, 'PolicyNames': ['default']}) @@ -233,7 +237,9 @@ class KmsResponse(BaseResponse): def _assert_valid_key_id(key_id): if not re.match(r'^[A-F0-9]{8}-[A-F0-9]{4}-[A-F0-9]{4}-[A-F0-9]{4}-[A-F0-9]{12}$', key_id, re.IGNORECASE): - raise JSONResponseError(404, 'Not Found', body={'message': ' Invalid keyId', '__type': 'NotFoundException'}) + raise JSONResponseError(404, 'Not Found', body={ + 'message': ' Invalid keyId', '__type': 'NotFoundException'}) + def _assert_default_policy(policy_name): if policy_name != 'default': diff --git a/moto/opsworks/__init__.py b/moto/opsworks/__init__.py index d2da1a6a8..b492b6a53 100644 --- a/moto/opsworks/__init__.py +++ b/moto/opsworks/__init__.py @@ -1,6 +1,6 @@ from __future__ import unicode_literals from .models import opsworks_backends -from ..core.models import MockAWS, base_decorator, HttprettyMockAWS, deprecated_base_decorator +from ..core.models import base_decorator, deprecated_base_decorator opsworks_backend = opsworks_backends['us-east-1'] mock_opsworks = base_decorator(opsworks_backends) diff --git a/moto/opsworks/exceptions.py b/moto/opsworks/exceptions.py index b408b82f3..00bdffbc5 100644 --- a/moto/opsworks/exceptions.py +++ b/moto/opsworks/exceptions.py @@ -5,6 +5,7 @@ from werkzeug.exceptions import BadRequest class ResourceNotFoundException(BadRequest): + def __init__(self, message): super(ResourceNotFoundException, self).__init__() self.description = json.dumps({ @@ -14,6 +15,7 @@ class ResourceNotFoundException(BadRequest): class ValidationException(BadRequest): + def __init__(self, message): super(ValidationException, self).__init__() self.description = json.dumps({ diff --git a/moto/opsworks/models.py b/moto/opsworks/models.py index 68edade9a..a1b8370dd 100644 --- a/moto/opsworks/models.py +++ b/moto/opsworks/models.py @@ -14,6 +14,7 @@ class OpsworkInstance(object): This metadata exists before any instance reservations are made, and is used to populate a reservation request when "start" is called """ + def __init__(self, stack_id, layer_ids, instance_type, ec2_backend, auto_scale_type=None, hostname=None, @@ -154,8 +155,10 @@ class OpsworkInstance(object): d.update({"ReportedAgentVersion": "2425-20160406102508 (fixed)"}) d.update({"RootDeviceVolumeId": "vol-a20e450a (fixed)"}) if self.ssh_keyname is not None: - d.update({"SshHostDsaKeyFingerprint": "24:36:32:fe:d8:5f:9c:18:b1:ad:37:e9:eb:e8:69:58 (fixed)"}) - d.update({"SshHostRsaKeyFingerprint": "3c:bd:37:52:d7:ca:67:e1:6e:4b:ac:31:86:79:f5:6c (fixed)"}) + d.update( + {"SshHostDsaKeyFingerprint": "24:36:32:fe:d8:5f:9c:18:b1:ad:37:e9:eb:e8:69:58 (fixed)"}) + d.update( + {"SshHostRsaKeyFingerprint": "3c:bd:37:52:d7:ca:67:e1:6e:4b:ac:31:86:79:f5:6c (fixed)"}) d.update({"PrivateDns": self.instance.private_dns}) d.update({"PrivateIp": self.instance.private_ip}) d.update({"PublicDns": getattr(self.instance, 'public_dns', None)}) @@ -164,6 +167,7 @@ class OpsworkInstance(object): class Layer(object): + def __init__(self, stack_id, type, name, shortname, attributes=None, custom_instance_profile_arn=None, @@ -283,11 +287,13 @@ class Layer(object): if self.custom_json is not None: d.update({"CustomJson": self.custom_json}) if self.custom_instance_profile_arn is not None: - d.update({"CustomInstanceProfileArn": self.custom_instance_profile_arn}) + d.update( + {"CustomInstanceProfileArn": self.custom_instance_profile_arn}) return d class Stack(object): + def __init__(self, name, region, service_role_arn, default_instance_profile_arn, vpcid="vpc-1f99bf7a", attributes=None, @@ -393,6 +399,7 @@ class Stack(object): class OpsWorksBackend(BaseBackend): + def __init__(self, ec2_backend): self.stacks = {} self.layers = {} @@ -457,9 +464,12 @@ class OpsWorksBackend(BaseBackend): kwargs.setdefault("subnet_id", stack.default_subnet_id) kwargs.setdefault("root_device_type", stack.default_root_device_type) if layer.custom_instance_profile_arn: - kwargs.setdefault("instance_profile_arn", layer.custom_instance_profile_arn) - kwargs.setdefault("instance_profile_arn", stack.default_instance_profile_arn) - kwargs.setdefault("security_group_ids", layer.custom_security_group_ids) + kwargs.setdefault("instance_profile_arn", + layer.custom_instance_profile_arn) + kwargs.setdefault("instance_profile_arn", + stack.default_instance_profile_arn) + kwargs.setdefault("security_group_ids", + layer.custom_security_group_ids) kwargs.setdefault("associate_public_ip", layer.auto_assign_public_ips) kwargs.setdefault("ebs_optimized", layer.use_ebs_optimized_instances) kwargs.update({"ec2_backend": self.ec2_backend}) @@ -507,14 +517,16 @@ class OpsWorksBackend(BaseBackend): if layer_id not in self.layers: raise ResourceNotFoundException( "Unable to find layer with ID {0}".format(layer_id)) - instances = [i.to_dict() for i in self.instances.values() if layer_id in i.layer_ids] + instances = [i.to_dict() for i in self.instances.values() + if layer_id in i.layer_ids] return instances if stack_id: if stack_id not in self.stacks: raise ResourceNotFoundException( "Unable to find stack with ID {0}".format(stack_id)) - instances = [i.to_dict() for i in self.instances.values() if stack_id==i.stack_id] + instances = [i.to_dict() for i in self.instances.values() + if stack_id == i.stack_id] return instances def start_instance(self, instance_id): diff --git a/moto/opsworks/responses.py b/moto/opsworks/responses.py index 4e0979154..42e0f2c5c 100644 --- a/moto/opsworks/responses.py +++ b/moto/opsworks/responses.py @@ -22,19 +22,24 @@ class OpsWorksResponse(BaseResponse): region=self.parameters.get("Region"), vpcid=self.parameters.get("VpcId"), attributes=self.parameters.get("Attributes"), - default_instance_profile_arn=self.parameters.get("DefaultInstanceProfileArn"), + default_instance_profile_arn=self.parameters.get( + "DefaultInstanceProfileArn"), default_os=self.parameters.get("DefaultOs"), hostname_theme=self.parameters.get("HostnameTheme"), - default_availability_zone=self.parameters.get("DefaultAvailabilityZone"), + default_availability_zone=self.parameters.get( + "DefaultAvailabilityZone"), default_subnet_id=self.parameters.get("DefaultInstanceProfileArn"), custom_json=self.parameters.get("CustomJson"), configuration_manager=self.parameters.get("ConfigurationManager"), chef_configuration=self.parameters.get("ChefConfiguration"), use_custom_cookbooks=self.parameters.get("UseCustomCookbooks"), - use_opsworks_security_groups=self.parameters.get("UseOpsworksSecurityGroups"), - custom_cookbooks_source=self.parameters.get("CustomCookbooksSource"), + use_opsworks_security_groups=self.parameters.get( + "UseOpsworksSecurityGroups"), + custom_cookbooks_source=self.parameters.get( + "CustomCookbooksSource"), default_ssh_keyname=self.parameters.get("DefaultSshKeyName"), - default_root_device_type=self.parameters.get("DefaultRootDeviceType"), + default_root_device_type=self.parameters.get( + "DefaultRootDeviceType"), service_role_arn=self.parameters.get("ServiceRoleArn"), agent_version=self.parameters.get("AgentVersion"), ) @@ -48,18 +53,24 @@ class OpsWorksResponse(BaseResponse): name=self.parameters.get('Name'), shortname=self.parameters.get('Shortname'), attributes=self.parameters.get('Attributes'), - custom_instance_profile_arn=self.parameters.get("CustomInstanceProfileArn"), + custom_instance_profile_arn=self.parameters.get( + "CustomInstanceProfileArn"), custom_json=self.parameters.get("CustomJson"), - custom_security_group_ids=self.parameters.get('CustomSecurityGroupIds'), + custom_security_group_ids=self.parameters.get( + 'CustomSecurityGroupIds'), packages=self.parameters.get('Packages'), volume_configurations=self.parameters.get("VolumeConfigurations"), enable_autohealing=self.parameters.get("EnableAutoHealing"), - auto_assign_elastic_ips=self.parameters.get("AutoAssignElasticIps"), + auto_assign_elastic_ips=self.parameters.get( + "AutoAssignElasticIps"), auto_assign_public_ips=self.parameters.get("AutoAssignPublicIps"), custom_recipes=self.parameters.get("CustomRecipes"), - install_updates_on_boot=self.parameters.get("InstallUpdatesOnBoot"), - use_ebs_optimized_instances=self.parameters.get("UseEbsOptimizedInstances"), - lifecycle_event_configuration=self.parameters.get("LifecycleEventConfiguration") + install_updates_on_boot=self.parameters.get( + "InstallUpdatesOnBoot"), + use_ebs_optimized_instances=self.parameters.get( + "UseEbsOptimizedInstances"), + lifecycle_event_configuration=self.parameters.get( + "LifecycleEventConfiguration") ) layer = self.opsworks_backend.create_layer(**kwargs) return json.dumps({"LayerId": layer.id}, indent=1) @@ -80,7 +91,8 @@ class OpsWorksResponse(BaseResponse): architecture=self.parameters.get("Architecture"), root_device_type=self.parameters.get("RootDeviceType"), block_device_mappings=self.parameters.get("BlockDeviceMappings"), - install_updates_on_boot=self.parameters.get("InstallUpdatesOnBoot"), + install_updates_on_boot=self.parameters.get( + "InstallUpdatesOnBoot"), ebs_optimized=self.parameters.get("EbsOptimized"), agent_version=self.parameters.get("AgentVersion"), ) diff --git a/moto/packages/httpretty/__init__.py b/moto/packages/httpretty/__init__.py index a752b452a..679294a4b 100644 --- a/moto/packages/httpretty/__init__.py +++ b/moto/packages/httpretty/__init__.py @@ -55,6 +55,7 @@ def last_request(): """returns the last request""" return httpretty.last_request + def has_request(): """returns a boolean indicating whether any request has been made""" return not isinstance(httpretty.last_request.headers, EmptyRequestHeaders) diff --git a/moto/packages/httpretty/compat.py b/moto/packages/httpretty/compat.py index 6805cf638..b9e215b13 100644 --- a/moto/packages/httpretty/compat.py +++ b/moto/packages/httpretty/compat.py @@ -38,6 +38,7 @@ if PY3: # pragma: no cover basestring = (str, bytes) class BaseClass(object): + def __repr__(self): return self.__str__() else: # pragma: no cover @@ -49,6 +50,7 @@ else: # pragma: no cover class BaseClass(object): + def __repr__(self): ret = self.__str__() if PY3: # pragma: no cover @@ -63,6 +65,7 @@ try: # pragma: no cover except ImportError: # pragma: no cover from urlparse import urlsplit, urlunsplit, parse_qs, unquote from urllib import quote, quote_plus + def unquote_utf8(qs): if isinstance(qs, text_type): qs = qs.encode('utf-8') diff --git a/moto/packages/httpretty/core.py b/moto/packages/httpretty/core.py index 4764cbba9..b409711cf 100644 --- a/moto/packages/httpretty/core.py +++ b/moto/packages/httpretty/core.py @@ -138,6 +138,7 @@ class HTTPrettyRequest(BaseHTTPRequestHandler, BaseClass): `content-type` headers values: 'application/json' or 'application/x-www-form-urlencoded' """ + def __init__(self, headers, body=''): # first of all, lets make sure that if headers or body are # unicode strings, it must be converted into a utf-8 encoded @@ -149,8 +150,8 @@ class HTTPrettyRequest(BaseHTTPRequestHandler, BaseClass): # `rfile` based on it self.rfile = StringIO(b'\r\n\r\n'.join([self.raw_headers, self.body])) self.wfile = StringIO() # Creating `wfile` as an empty - # StringIO, just to avoid any real - # I/O calls + # StringIO, just to avoid any real + # I/O calls # parsing the request line preemptively self.raw_requestline = self.rfile.readline() @@ -229,12 +230,14 @@ class HTTPrettyRequestEmpty(object): class FakeSockFile(StringIO): + def close(self): self.socket.close() StringIO.close(self) class FakeSSLSocket(object): + def __init__(self, sock, *args, **kw): self._httpretty_sock = sock @@ -243,6 +246,7 @@ class FakeSSLSocket(object): class fakesock(object): + class socket(object): _entry = None debuglevel = 0 @@ -374,13 +378,15 @@ class fakesock(object): self.fd.socket = self try: requestline, _ = data.split(b'\r\n', 1) - method, path, version = parse_requestline(decode_utf8(requestline)) + method, path, version = parse_requestline( + decode_utf8(requestline)) is_parsing_headers = True except ValueError: is_parsing_headers = False if not self._entry: - # If the previous request wasn't mocked, don't mock the subsequent sending of data + # If the previous request wasn't mocked, don't mock the + # subsequent sending of data return self.real_sendall(data, *args, **kw) self.fd.seek(0) @@ -492,6 +498,7 @@ def fake_getaddrinfo( class Entry(BaseClass): + def __init__(self, method, uri, body, adding_headers=None, forcing_headers=None, @@ -543,15 +550,15 @@ class Entry(BaseClass): igot = int(got) except ValueError: warnings.warn( - 'HTTPretty got to register the Content-Length header ' \ + 'HTTPretty got to register the Content-Length header ' 'with "%r" which is not a number' % got, ) if igot > self.body_length: raise HTTPrettyError( - 'HTTPretty got inconsistent parameters. The header ' \ - 'Content-Length you registered expects size "%d" but ' \ - 'the body you registered for that has actually length ' \ + 'HTTPretty got inconsistent parameters. The header ' + 'Content-Length you registered expects size "%d" but ' + 'the body you registered for that has actually length ' '"%d".' % ( igot, self.body_length, ) @@ -588,7 +595,8 @@ class Entry(BaseClass): headers = self.normalize_headers(headers) status = headers.get('status', self.status) if self.body_is_callable: - status, headers, self.body = self.callable_body(self.request, self.info.full_url(), headers) + status, headers, self.body = self.callable_body( + self.request, self.info.full_url(), headers) if self.request.method != "HEAD": headers.update({ 'content-length': len(self.body) @@ -641,6 +649,7 @@ def url_fix(s, charset='utf-8'): class URIInfo(BaseClass): + def __init__(self, username='', password='', @@ -764,7 +773,7 @@ class URIMatcher(object): self.entries = entries - #hash of current_entry pointers, per method. + # hash of current_entry pointers, per method. self.current_entries = {} def matches(self, info): @@ -788,7 +797,7 @@ class URIMatcher(object): if method not in self.current_entries: self.current_entries[method] = 0 - #restrict selection to entries that match the requested method + # restrict selection to entries that match the requested method entries_for_method = [e for e in self.entries if e.method == method] if self.current_entries[method] >= len(entries_for_method): @@ -841,13 +850,14 @@ class httpretty(HttpBaseClass): try: import urllib3 except ImportError: - raise RuntimeError('HTTPretty requires urllib3 installed for recording actual requests.') - + raise RuntimeError( + 'HTTPretty requires urllib3 installed for recording actual requests.') http = urllib3.PoolManager() cls.enable() calls = [] + def record_request(request, uri, headers): cls.disable() @@ -870,7 +880,8 @@ class httpretty(HttpBaseClass): return response.status, response.headers, response.data for method in cls.METHODS: - cls.register_uri(method, re.compile(r'.*', re.M), body=record_request) + cls.register_uri(method, re.compile( + r'.*', re.M), body=record_request) yield cls.disable() @@ -886,7 +897,8 @@ class httpretty(HttpBaseClass): for item in data: uri = item['request']['uri'] method = item['request']['method'] - cls.register_uri(method, uri, body=item['response']['body'], forcing_headers=item['response']['headers']) + cls.register_uri(method, uri, body=item['response'][ + 'body'], forcing_headers=item['response']['headers']) yield cls.disable() diff --git a/moto/packages/httpretty/errors.py b/moto/packages/httpretty/errors.py index cb6479bf5..e2dcad357 100644 --- a/moto/packages/httpretty/errors.py +++ b/moto/packages/httpretty/errors.py @@ -32,6 +32,7 @@ class HTTPrettyError(Exception): class UnmockedError(HTTPrettyError): + def __init__(self): super(UnmockedError, self).__init__( 'No mocking was registered, and real connections are ' diff --git a/moto/packages/responses/responses.py b/moto/packages/responses/responses.py index 735655664..1f5892b25 100644 --- a/moto/packages/responses/responses.py +++ b/moto/packages/responses/responses.py @@ -82,6 +82,7 @@ def get_wrapped(func, wrapper_template, evaldict): class CallList(Sequence, Sized): + def __init__(self): self._calls = [] @@ -298,10 +299,10 @@ class RequestsMock(object): def unbound_on_send(adapter, request, *a, **kwargs): return self._on_request(adapter, request, *a, **kwargs) self._patcher1 = mock.patch('botocore.vendored.requests.adapters.HTTPAdapter.send', - unbound_on_send) + unbound_on_send) self._patcher1.start() self._patcher2 = mock.patch('requests.adapters.HTTPAdapter.send', - unbound_on_send) + unbound_on_send) self._patcher2.start() def stop(self, allow_assert=True): diff --git a/moto/packages/responses/setup.py b/moto/packages/responses/setup.py index bab522865..911c07da4 100644 --- a/moto/packages/responses/setup.py +++ b/moto/packages/responses/setup.py @@ -57,6 +57,7 @@ except Exception: class PyTest(TestCommand): + def finalize_options(self): TestCommand.finalize_options(self) self.test_args = ['test_responses.py'] diff --git a/moto/packages/responses/test_responses.py b/moto/packages/responses/test_responses.py index ba0126ad5..967a535cf 100644 --- a/moto/packages/responses/test_responses.py +++ b/moto/packages/responses/test_responses.py @@ -284,6 +284,7 @@ def test_custom_adapter(): calls = [0] class DummyAdapter(requests.adapters.HTTPAdapter): + def send(self, *a, **k): calls[0] += 1 return super(DummyAdapter, self).send(*a, **k) diff --git a/moto/rds/__init__.py b/moto/rds/__init__.py index 2c8c0ba97..a4086d89c 100644 --- a/moto/rds/__init__.py +++ b/moto/rds/__init__.py @@ -1,6 +1,6 @@ from __future__ import unicode_literals from .models import rds_backends -from ..core.models import MockAWS, base_decorator, HttprettyMockAWS, deprecated_base_decorator +from ..core.models import base_decorator, deprecated_base_decorator rds_backend = rds_backends['us-east-1'] mock_rds = base_decorator(rds_backends) diff --git a/moto/rds/exceptions.py b/moto/rds/exceptions.py index 936b979d2..5bcc95560 100644 --- a/moto/rds/exceptions.py +++ b/moto/rds/exceptions.py @@ -5,6 +5,7 @@ from werkzeug.exceptions import BadRequest class RDSClientError(BadRequest): + def __init__(self, code, message): super(RDSClientError, self).__init__() self.description = json.dumps({ @@ -18,6 +19,7 @@ class RDSClientError(BadRequest): class DBInstanceNotFoundError(RDSClientError): + def __init__(self, database_identifier): super(DBInstanceNotFoundError, self).__init__( 'DBInstanceNotFound', @@ -25,6 +27,7 @@ class DBInstanceNotFoundError(RDSClientError): class DBSecurityGroupNotFoundError(RDSClientError): + def __init__(self, security_group_name): super(DBSecurityGroupNotFoundError, self).__init__( 'DBSecurityGroupNotFound', @@ -32,6 +35,7 @@ class DBSecurityGroupNotFoundError(RDSClientError): class DBSubnetGroupNotFoundError(RDSClientError): + def __init__(self, subnet_group_name): super(DBSubnetGroupNotFoundError, self).__init__( 'DBSubnetGroupNotFound', diff --git a/moto/rds/models.py b/moto/rds/models.py index b63a30737..4334a9f72 100644 --- a/moto/rds/models.py +++ b/moto/rds/models.py @@ -1,6 +1,5 @@ from __future__ import unicode_literals -import copy import datetime import boto.rds @@ -11,10 +10,10 @@ from moto.core import BaseBackend from moto.core.utils import get_random_hex from moto.ec2.models import ec2_backends from moto.rds2.models import rds2_backends -from .exceptions import DBInstanceNotFoundError, DBSecurityGroupNotFoundError, DBSubnetGroupNotFoundError class Database(object): + def __init__(self, **kwargs): self.status = "available" @@ -35,7 +34,8 @@ class Database(object): self.storage_type = kwargs.get("storage_type") self.master_username = kwargs.get('master_username') self.master_password = kwargs.get('master_password') - self.auto_minor_version_upgrade = kwargs.get('auto_minor_version_upgrade') + self.auto_minor_version_upgrade = kwargs.get( + 'auto_minor_version_upgrade') if self.auto_minor_version_upgrade is None: self.auto_minor_version_upgrade = True self.allocated_storage = kwargs.get('allocated_storage') @@ -57,7 +57,8 @@ class Database(object): self.db_subnet_group_name = kwargs.get("db_subnet_group_name") self.instance_create_time = str(datetime.datetime.utcnow()) if self.db_subnet_group_name: - self.db_subnet_group = rds_backends[self.region].describe_subnet_groups(self.db_subnet_group_name)[0] + self.db_subnet_group = rds_backends[ + self.region].describe_subnet_groups(self.db_subnet_group_name)[0] else: self.db_subnet_group = [] @@ -239,6 +240,7 @@ class Database(object): class SecurityGroup(object): + def __init__(self, group_name, description): self.group_name = group_name self.description = description @@ -284,7 +286,8 @@ class SecurityGroup(object): properties = cloudformation_json['Properties'] group_name = resource_name.lower() + get_random_hex(12) description = properties['GroupDescription'] - security_group_ingress_rules = properties.get('DBSecurityGroupIngress', []) + security_group_ingress_rules = properties.get( + 'DBSecurityGroupIngress', []) tags = properties.get('Tags') ec2_backend = ec2_backends[region_name] @@ -300,10 +303,12 @@ class SecurityGroup(object): if ingress_type == "CIDRIP": security_group.authorize_cidr(ingress_value) elif ingress_type == "EC2SecurityGroupName": - subnet = ec2_backend.get_security_group_from_name(ingress_value) + subnet = ec2_backend.get_security_group_from_name( + ingress_value) security_group.authorize_security_group(subnet) elif ingress_type == "EC2SecurityGroupId": - subnet = ec2_backend.get_security_group_from_id(ingress_value) + subnet = ec2_backend.get_security_group_from_id( + ingress_value) security_group.authorize_security_group(subnet) return security_group @@ -313,6 +318,7 @@ class SecurityGroup(object): class SubnetGroup(object): + def __init__(self, subnet_name, description, subnets): self.subnet_name = subnet_name self.description = description @@ -352,7 +358,8 @@ class SubnetGroup(object): tags = properties.get('Tags') ec2_backend = ec2_backends[region_name] - subnets = [ec2_backend.get_subnet(subnet_id) for subnet_id in subnet_ids] + subnets = [ec2_backend.get_subnet(subnet_id) + for subnet_id in subnet_ids] rds_backend = rds_backends[region_name] subnet_group = rds_backend.create_subnet_group( subnet_name, @@ -385,4 +392,6 @@ class RDSBackend(BaseBackend): def rds2_backend(self): return rds2_backends[self.region] -rds_backends = dict((region.name, RDSBackend(region.name)) for region in boto.rds.regions()) + +rds_backends = dict((region.name, RDSBackend(region.name)) + for region in boto.rds.regions()) diff --git a/moto/rds/responses.py b/moto/rds/responses.py index 5207264f6..6b51c8fe6 100644 --- a/moto/rds/responses.py +++ b/moto/rds/responses.py @@ -41,7 +41,8 @@ class RDSResponse(BaseResponse): # VpcSecurityGroupIds.member.N "tags": list(), } - args['tags'] = self.unpack_complex_list_params('Tags.Tag', ('Key', 'Value')) + args['tags'] = self.unpack_complex_list_params( + 'Tags.Tag', ('Key', 'Value')) return args def _get_db_replica_kwargs(self): @@ -65,7 +66,8 @@ class RDSResponse(BaseResponse): while self._get_param('{0}.{1}.{2}'.format(label, count, names[0])): param = dict() for i in range(len(names)): - param[names[i]] = self._get_param('{0}.{1}.{2}'.format(label, count, names[i])) + param[names[i]] = self._get_param( + '{0}.{1}.{2}'.format(label, count, names[i])) unpacked_list.append(param) count += 1 return unpacked_list @@ -93,7 +95,8 @@ class RDSResponse(BaseResponse): def modify_dbinstance(self): db_instance_identifier = self._get_param('DBInstanceIdentifier') db_kwargs = self._get_db_kwargs() - database = self.backend.modify_database(db_instance_identifier, db_kwargs) + database = self.backend.modify_database( + db_instance_identifier, db_kwargs) template = self.response_template(MODIFY_DATABASE_TEMPLATE) return template.render(database=database) @@ -107,26 +110,30 @@ class RDSResponse(BaseResponse): group_name = self._get_param('DBSecurityGroupName') description = self._get_param('DBSecurityGroupDescription') tags = self.unpack_complex_list_params('Tags.Tag', ('Key', 'Value')) - security_group = self.backend.create_security_group(group_name, description, tags) + security_group = self.backend.create_security_group( + group_name, description, tags) template = self.response_template(CREATE_SECURITY_GROUP_TEMPLATE) return template.render(security_group=security_group) def describe_dbsecurity_groups(self): security_group_name = self._get_param('DBSecurityGroupName') - security_groups = self.backend.describe_security_groups(security_group_name) + security_groups = self.backend.describe_security_groups( + security_group_name) template = self.response_template(DESCRIBE_SECURITY_GROUPS_TEMPLATE) return template.render(security_groups=security_groups) def delete_dbsecurity_group(self): security_group_name = self._get_param('DBSecurityGroupName') - security_group = self.backend.delete_security_group(security_group_name) + security_group = self.backend.delete_security_group( + security_group_name) template = self.response_template(DELETE_SECURITY_GROUP_TEMPLATE) return template.render(security_group=security_group) def authorize_dbsecurity_group_ingress(self): security_group_name = self._get_param('DBSecurityGroupName') cidr_ip = self._get_param('CIDRIP') - security_group = self.backend.authorize_security_group(security_group_name, cidr_ip) + security_group = self.backend.authorize_security_group( + security_group_name, cidr_ip) template = self.response_template(AUTHORIZE_SECURITY_GROUP_TEMPLATE) return template.render(security_group=security_group) @@ -134,9 +141,11 @@ class RDSResponse(BaseResponse): subnet_name = self._get_param('DBSubnetGroupName') description = self._get_param('DBSubnetGroupDescription') subnet_ids = self._get_multi_param('SubnetIds.member') - subnets = [ec2_backends[self.region].get_subnet(subnet_id) for subnet_id in subnet_ids] + subnets = [ec2_backends[self.region].get_subnet( + subnet_id) for subnet_id in subnet_ids] tags = self.unpack_complex_list_params('Tags.Tag', ('Key', 'Value')) - subnet_group = self.backend.create_subnet_group(subnet_name, description, subnets, tags) + subnet_group = self.backend.create_subnet_group( + subnet_name, description, subnets, tags) template = self.response_template(CREATE_SUBNET_GROUP_TEMPLATE) return template.render(subnet_group=subnet_group) diff --git a/moto/rds2/__init__.py b/moto/rds2/__init__.py index 0feecfac4..723fa0968 100644 --- a/moto/rds2/__init__.py +++ b/moto/rds2/__init__.py @@ -1,6 +1,6 @@ from __future__ import unicode_literals from .models import rds2_backends -from ..core.models import MockAWS, base_decorator, deprecated_base_decorator +from ..core.models import base_decorator, deprecated_base_decorator rds2_backend = rds2_backends['us-west-1'] mock_rds2 = base_decorator(rds2_backends) diff --git a/moto/rds2/exceptions.py b/moto/rds2/exceptions.py index 6fcae4b56..29e92941d 100644 --- a/moto/rds2/exceptions.py +++ b/moto/rds2/exceptions.py @@ -5,6 +5,7 @@ from werkzeug.exceptions import BadRequest class RDSClientError(BadRequest): + def __init__(self, code, message): super(RDSClientError, self).__init__() template = Template(""" @@ -20,6 +21,7 @@ class RDSClientError(BadRequest): class DBInstanceNotFoundError(RDSClientError): + def __init__(self, database_identifier): super(DBInstanceNotFoundError, self).__init__( 'DBInstanceNotFound', @@ -27,6 +29,7 @@ class DBInstanceNotFoundError(RDSClientError): class DBSecurityGroupNotFoundError(RDSClientError): + def __init__(self, security_group_name): super(DBSecurityGroupNotFoundError, self).__init__( 'DBSecurityGroupNotFound', @@ -34,12 +37,15 @@ class DBSecurityGroupNotFoundError(RDSClientError): class DBSubnetGroupNotFoundError(RDSClientError): + def __init__(self, subnet_group_name): super(DBSubnetGroupNotFoundError, self).__init__( 'DBSubnetGroupNotFound', "Subnet Group {0} not found.".format(subnet_group_name)) + class DBParameterGroupNotFoundError(RDSClientError): + def __init__(self, db_parameter_group_name): super(DBParameterGroupNotFoundError, self).__init__( 'DBParameterGroupNotFound', diff --git a/moto/rds2/models.py b/moto/rds2/models.py index 9bb1f8200..52cb298cd 100644 --- a/moto/rds2/models.py +++ b/moto/rds2/models.py @@ -4,7 +4,6 @@ import copy from collections import defaultdict import boto.rds2 -import json from jinja2 import Template from re import compile as re_compile from moto.cloudformation.exceptions import UnformattedGetAttTemplateException @@ -18,8 +17,8 @@ from .exceptions import (RDSClientError, DBParameterGroupNotFoundError) - class Database(object): + def __init__(self, **kwargs): self.status = "available" self.is_replica = False @@ -49,7 +48,8 @@ class Database(object): self.storage_type = kwargs.get("storage_type") self.master_username = kwargs.get('master_username') self.master_user_password = kwargs.get('master_user_password') - self.auto_minor_version_upgrade = kwargs.get('auto_minor_version_upgrade') + self.auto_minor_version_upgrade = kwargs.get( + 'auto_minor_version_upgrade') if self.auto_minor_version_upgrade is None: self.auto_minor_version_upgrade = True self.allocated_storage = kwargs.get('allocated_storage') @@ -69,18 +69,22 @@ class Database(object): self.multi_az = kwargs.get("multi_az") self.db_subnet_group_name = kwargs.get("db_subnet_group_name") if self.db_subnet_group_name: - self.db_subnet_group = rds2_backends[self.region].describe_subnet_groups(self.db_subnet_group_name)[0] + self.db_subnet_group = rds2_backends[ + self.region].describe_subnet_groups(self.db_subnet_group_name)[0] else: self.db_subnet_group = None self.security_groups = kwargs.get('security_groups', []) self.vpc_security_group_ids = kwargs.get('vpc_security_group_ids', []) - self.preferred_maintenance_window = kwargs.get('preferred_maintenance_window', 'wed:06:38-wed:07:08') + self.preferred_maintenance_window = kwargs.get( + 'preferred_maintenance_window', 'wed:06:38-wed:07:08') self.db_parameter_group_name = kwargs.get('db_parameter_group_name') if self.db_parameter_group_name and self.db_parameter_group_name not in rds2_backends[self.region].db_parameter_groups: - raise DBParameterGroupNotFoundError(self.db_parameter_group_name) + raise DBParameterGroupNotFoundError(self.db_parameter_group_name) - self.preferred_backup_window = kwargs.get('preferred_backup_window', '13:14-13:44') - self.license_model = kwargs.get('license_model', 'general-public-license') + self.preferred_backup_window = kwargs.get( + 'preferred_backup_window', '13:14-13:44') + self.license_model = kwargs.get( + 'license_model', 'general-public-license') self.option_group_name = kwargs.get('option_group_name', None) self.default_option_groups = {"MySQL": "default.mysql5.6", "mysql": "default.mysql5.6", @@ -100,9 +104,9 @@ class Database(object): db_family, db_parameter_group_name = self.default_db_parameter_group_details() description = 'Default parameter group for {0}'.format(db_family) return [DBParameterGroup(name=db_parameter_group_name, - family=db_family, - description=description, - tags={})] + family=db_family, + description=description, + tags={})] else: return [rds2_backends[self.region].db_parameter_groups[self.db_parameter_group_name]] @@ -354,12 +358,14 @@ class Database(object): def add_tags(self, tags): new_keys = [tag_set['Key'] for tag_set in tags] - self.tags = [tag_set for tag_set in self.tags if tag_set['Key'] not in new_keys] + self.tags = [tag_set for tag_set in self.tags if tag_set[ + 'Key'] not in new_keys] self.tags.extend(tags) return self.tags def remove_tags(self, tag_keys): - self.tags = [tag_set for tag_set in self.tags if tag_set['Key'] not in tag_keys] + self.tags = [tag_set for tag_set in self.tags if tag_set[ + 'Key'] not in tag_keys] def delete(self, region_name): backend = rds2_backends[region_name] @@ -367,6 +373,7 @@ class Database(object): class SecurityGroup(object): + def __init__(self, group_name, description, tags): self.group_name = group_name self.description = description @@ -430,7 +437,8 @@ class SecurityGroup(object): properties = cloudformation_json['Properties'] group_name = resource_name.lower() + get_random_hex(12) description = properties['GroupDescription'] - security_group_ingress_rules = properties.get('DBSecurityGroupIngress', []) + security_group_ingress_rules = properties.get( + 'DBSecurityGroupIngress', []) tags = properties.get('Tags') ec2_backend = ec2_backends[region_name] @@ -445,10 +453,12 @@ class SecurityGroup(object): if ingress_type == "CIDRIP": security_group.authorize_cidr(ingress_value) elif ingress_type == "EC2SecurityGroupName": - subnet = ec2_backend.get_security_group_from_name(ingress_value) + subnet = ec2_backend.get_security_group_from_name( + ingress_value) security_group.authorize_security_group(subnet) elif ingress_type == "EC2SecurityGroupId": - subnet = ec2_backend.get_security_group_from_id(ingress_value) + subnet = ec2_backend.get_security_group_from_id( + ingress_value) security_group.authorize_security_group(subnet) return security_group @@ -457,12 +467,14 @@ class SecurityGroup(object): def add_tags(self, tags): new_keys = [tag_set['Key'] for tag_set in tags] - self.tags = [tag_set for tag_set in self.tags if tag_set['Key'] not in new_keys] + self.tags = [tag_set for tag_set in self.tags if tag_set[ + 'Key'] not in new_keys] self.tags.extend(tags) return self.tags def remove_tags(self, tag_keys): - self.tags = [tag_set for tag_set in self.tags if tag_set['Key'] not in tag_keys] + self.tags = [tag_set for tag_set in self.tags if tag_set[ + 'Key'] not in tag_keys] def delete(self, region_name): backend = rds2_backends[region_name] @@ -470,6 +482,7 @@ class SecurityGroup(object): class SubnetGroup(object): + def __init__(self, subnet_name, description, subnets, tags): self.subnet_name = subnet_name self.description = description @@ -530,7 +543,8 @@ class SubnetGroup(object): tags = properties.get('Tags') ec2_backend = ec2_backends[region_name] - subnets = [ec2_backend.get_subnet(subnet_id) for subnet_id in subnet_ids] + subnets = [ec2_backend.get_subnet(subnet_id) + for subnet_id in subnet_ids] rds2_backend = rds2_backends[region_name] subnet_group = rds2_backend.create_subnet_group( subnet_name, @@ -545,12 +559,14 @@ class SubnetGroup(object): def add_tags(self, tags): new_keys = [tag_set['Key'] for tag_set in tags] - self.tags = [tag_set for tag_set in self.tags if tag_set['Key'] not in new_keys] + self.tags = [tag_set for tag_set in self.tags if tag_set[ + 'Key'] not in new_keys] self.tags.extend(tags) return self.tags def remove_tags(self, tag_keys): - self.tags = [tag_set for tag_set in self.tags if tag_set['Key'] not in tag_keys] + self.tags = [tag_set for tag_set in self.tags if tag_set[ + 'Key'] not in tag_keys] def delete(self, region_name): backend = rds2_backends[region_name] @@ -561,7 +577,8 @@ class RDS2Backend(BaseBackend): def __init__(self, region): self.region = region - self.arn_regex = re_compile(r'^arn:aws:rds:.*:[0-9]*:(db|es|og|pg|ri|secgrp|snapshot|subgrp):.*$') + self.arn_regex = re_compile( + r'^arn:aws:rds:.*:[0-9]*:(db|es|og|pg|ri|secgrp|snapshot|subgrp):.*$') self.databases = {} self.db_parameter_groups = {} self.option_groups = {} @@ -699,14 +716,16 @@ class RDS2Backend(BaseBackend): raise RDSClientError('InvalidParameterValue', 'The parameter OptionGroupDescription must be provided and must not be blank.') if option_group_kwargs['engine_name'] not in valid_option_group_engines.keys(): - raise RDSClientError('InvalidParameterValue', 'Invalid DB engine: non-existant') + raise RDSClientError('InvalidParameterValue', + 'Invalid DB engine: non-existant') if option_group_kwargs['major_engine_version'] not in\ valid_option_group_engines[option_group_kwargs['engine_name']]: - raise RDSClientError('InvalidParameterCombination', - 'Cannot find major version {0} for {1}'.format( - option_group_kwargs['major_engine_version'], - option_group_kwargs['engine_name'] - )) + raise RDSClientError('InvalidParameterCombination', + 'Cannot find major version {0} for {1}'.format( + option_group_kwargs[ + 'major_engine_version'], + option_group_kwargs['engine_name'] + )) option_group = OptionGroup(**option_group_kwargs) self.option_groups[option_group_id] = option_group return option_group @@ -715,7 +734,8 @@ class RDS2Backend(BaseBackend): if option_group_name in self.option_groups: return self.option_groups.pop(option_group_name) else: - raise RDSClientError('OptionGroupNotFoundFault', 'Specified OptionGroupName: {0} not found.'.format(option_group_name)) + raise RDSClientError( + 'OptionGroupNotFoundFault', 'Specified OptionGroupName: {0} not found.'.format(option_group_name)) def describe_option_groups(self, option_group_kwargs): option_group_list = [] @@ -746,24 +766,25 @@ class RDS2Backend(BaseBackend): if not len(option_group_list): raise RDSClientError('OptionGroupNotFoundFault', 'Specified OptionGroupName: {0} not found.'.format(option_group_kwargs['name'])) - return option_group_list[marker:max_records+marker] + return option_group_list[marker:max_records + marker] @staticmethod def describe_option_group_options(engine_name, major_engine_version=None): default_option_group_options = {'mysql': {'5.6': '\n \n \n \n 5.611211TrueInnodb Memcached for MySQLMEMCACHED1-4294967295STATIC1TrueSpecifies how many memcached read operations (get) to perform before doing a COMMIT to start a new transactionDAEMON_MEMCACHED_R_BATCH_SIZE1-4294967295STATIC1TrueSpecifies how many memcached write operations, such as add, set, or incr, to perform before doing a COMMIT to start a new transactionDAEMON_MEMCACHED_W_BATCH_SIZE1-1073741824DYNAMIC5TrueSpecifies how often to auto-commit idle connections that use the InnoDB memcached interface.INNODB_API_BK_COMMIT_INTERVAL0,1STATIC0TrueDisables the use of row locks when using the InnoDB memcached interface.INNODB_API_DISABLE_ROWLOCK0,1STATIC0TrueLocks the table used by the InnoDB memcached plugin, so that it cannot be dropped or altered by DDL through the SQL interface.INNODB_API_ENABLE_MDL0-3STATIC0TrueLets you control the transaction isolation level on queries processed by the memcached interface.INNODB_API_TRX_LEVELauto,ascii,binarySTATICautoTrueThe binding protocol to use which can be either auto, ascii, or binary. The default is auto which means the server automatically negotiates the protocol with the client.BINDING_PROTOCOL1-2048STATIC1024TrueThe backlog queue configures how many network connections can be waiting to be processed by memcachedBACKLOG_QUEUE_LIMIT0,1STATIC0TrueDisable the use of compare and swap (CAS) which reduces the per-item size by 8 bytes.CAS_DISABLED1-48STATIC48TrueMinimum chunk size in bytes to allocate for the smallest item\'s key, value, and flags. The default is 48 and you can get a significant memory efficiency gain with a lower value.CHUNK_SIZE1-2STATIC1.25TrueChunk size growth factor that controls the size of each successive chunk with each chunk growing times this amount larger than the previous chunk.CHUNK_SIZE_GROWTH_FACTOR0,1STATIC0TrueIf enabled when there is no more memory to store items, memcached will return an error rather than evicting items.ERROR_ON_MEMORY_EXHAUSTED10-1024STATIC1024TrueMaximum number of concurrent connections. Setting this value to anything less than 10 prevents MySQL from starting.MAX_SIMULTANEOUS_CONNECTIONSv,vv,vvvSTATICvTrueVerbose level for memcached.VERBOSITYmysql\n \n \n \n \n 457f7bb8-9fbf-11e4-9084-5754f80d5144\n \n', - 'all': '\n \n \n \n 5.611211TrueInnodb Memcached for MySQLMEMCACHED1-4294967295STATIC1TrueSpecifies how many memcached read operations (get) to perform before doing a COMMIT to start a new transactionDAEMON_MEMCACHED_R_BATCH_SIZE1-4294967295STATIC1TrueSpecifies how many memcached write operations, such as add, set, or incr, to perform before doing a COMMIT to start a new transactionDAEMON_MEMCACHED_W_BATCH_SIZE1-1073741824DYNAMIC5TrueSpecifies how often to auto-commit idle connections that use the InnoDB memcached interface.INNODB_API_BK_COMMIT_INTERVAL0,1STATIC0TrueDisables the use of row locks when using the InnoDB memcached interface.INNODB_API_DISABLE_ROWLOCK0,1STATIC0TrueLocks the table used by the InnoDB memcached plugin, so that it cannot be dropped or altered by DDL through the SQL interface.INNODB_API_ENABLE_MDL0-3STATIC0TrueLets you control the transaction isolation level on queries processed by the memcached interface.INNODB_API_TRX_LEVELauto,ascii,binarySTATICautoTrueThe binding protocol to use which can be either auto, ascii, or binary. The default is auto which means the server automatically negotiates the protocol with the client.BINDING_PROTOCOL1-2048STATIC1024TrueThe backlog queue configures how many network connections can be waiting to be processed by memcachedBACKLOG_QUEUE_LIMIT0,1STATIC0TrueDisable the use of compare and swap (CAS) which reduces the per-item size by 8 bytes.CAS_DISABLED1-48STATIC48TrueMinimum chunk size in bytes to allocate for the smallest item\'s key, value, and flags. The default is 48 and you can get a significant memory efficiency gain with a lower value.CHUNK_SIZE1-2STATIC1.25TrueChunk size growth factor that controls the size of each successive chunk with each chunk growing times this amount larger than the previous chunk.CHUNK_SIZE_GROWTH_FACTOR0,1STATIC0TrueIf enabled when there is no more memory to store items, memcached will return an error rather than evicting items.ERROR_ON_MEMORY_EXHAUSTED10-1024STATIC1024TrueMaximum number of concurrent connections. Setting this value to anything less than 10 prevents MySQL from starting.MAX_SIMULTANEOUS_CONNECTIONSv,vv,vvvSTATICvTrueVerbose level for memcached.VERBOSITYmysql\n \n \n \n \n 457f7bb8-9fbf-11e4-9084-5754f80d5144\n \n'}, - 'oracle-ee': {'11.2': '\n \n \n \n 11.2XMLDBOracle Application Express Runtime EnvironmentAPEXoracle-ee\n \n 11.2APEXOracle Application Express Development EnvironmentAPEX-DEVoracle-ee\n \n 11.2Oracle Advanced Security - Native Network EncryptionNATIVE_NETWORK_ENCRYPTIONACCEPTED,REJECTED,REQUESTED,REQUIREDSTATICREQUESTEDTrueSpecifies the desired encryption behaviorSQLNET.ENCRYPTION_SERVERACCEPTED,REJECTED,REQUESTED,REQUIREDSTATICREQUESTEDTrueSpecifies the desired data integrity behaviorSQLNET.CRYPTO_CHECKSUM_SERVERRC4_256,AES256,AES192,3DES168,RC4_128,AES128,3DES112,RC4_56,DES,RC4_40,DES40STATICRC4_256,AES256,AES192,3DES168,RC4_128,AES128,3DES112,RC4_56,DES,RC4_40,DES40TrueSpecifies list of encryption algorithms in order of intended useSQLNET.ENCRYPTION_TYPES_SERVERSHA1,MD5STATICSHA1,MD5TrueSpecifies list of checksumming algorithms in order of intended useSQLNET.CRYPTO_CHECKSUM_TYPES_SERVERoracle-ee\n \n 11.21158TrueOracle Enterprise Manager (Database Control only)OEMoracle-ee\n \n 11.2Oracle StatspackSTATSPACKoracle-ee\n \n 11.2TrueTrueOracle Advanced Security - Transparent Data EncryptionTDEoracle-ee\n \n 11.2TrueTrueOracle Advanced Security - TDE with HSMTDE_HSMoracle-ee\n \n 11.2TrueTrueChange time zoneTimezoneAfrica/Cairo,Africa/Casablanca,Africa/Harare,Africa/Monrovia,Africa/Nairobi,Africa/Tripoli,Africa/Windhoek,America/Araguaina,America/Asuncion,America/Bogota,America/Caracas,America/Chihuahua,America/Cuiaba,America/Denver,America/Fortaleza,America/Guatemala,America/Halifax,America/Manaus,America/Matamoros,America/Monterrey,America/Montevideo,America/Phoenix,America/Santiago,America/Tijuana,Asia/Amman,Asia/Ashgabat,Asia/Baghdad,Asia/Baku,Asia/Bangkok,Asia/Beirut,Asia/Calcutta,Asia/Damascus,Asia/Dhaka,Asia/Irkutsk,Asia/Jerusalem,Asia/Kabul,Asia/Karachi,Asia/Kathmandu,Asia/Krasnoyarsk,Asia/Magadan,Asia/Muscat,Asia/Novosibirsk,Asia/Riyadh,Asia/Seoul,Asia/Shanghai,Asia/Singapore,Asia/Taipei,Asia/Tehran,Asia/Tokyo,Asia/Ulaanbaatar,Asia/Vladivostok,Asia/Yakutsk,Asia/Yerevan,Atlantic/Azores,Australia/Adelaide,Australia/Brisbane,Australia/Darwin,Australia/Hobart,Australia/Perth,Australia/Sydney,Brazil/East,Canada/Newfoundland,Canada/Saskatchewan,Europe/Amsterdam,Europe/Athens,Europe/Dublin,Europe/Helsinki,Europe/Istanbul,Europe/Kaliningrad,Europe/Moscow,Europe/Paris,Europe/Prague,Europe/Sarajevo,Pacific/Auckland,Pacific/Fiji,Pacific/Guam,Pacific/Honolulu,Pacific/Samoa,US/Alaska,US/Central,US/Eastern,US/East-Indiana,US/Pacific,UTCDYNAMICUTCTrueSpecifies the timezone the user wants to change the system time toTIME_ZONEoracle-ee\n \n 11.2Oracle XMLDB RepositoryXMLDBoracle-ee\n \n \n \n \n 457f7bb8-9fbf-11e4-9084-5754f80d5144\n \n', - 'all': '\n \n \n \n 11.2XMLDBOracle Application Express Runtime EnvironmentAPEXoracle-ee\n \n 11.2APEXOracle Application Express Development EnvironmentAPEX-DEVoracle-ee\n \n 11.2Oracle Advanced Security - Native Network EncryptionNATIVE_NETWORK_ENCRYPTIONACCEPTED,REJECTED,REQUESTED,REQUIREDSTATICREQUESTEDTrueSpecifies the desired encryption behaviorSQLNET.ENCRYPTION_SERVERACCEPTED,REJECTED,REQUESTED,REQUIREDSTATICREQUESTEDTrueSpecifies the desired data integrity behaviorSQLNET.CRYPTO_CHECKSUM_SERVERRC4_256,AES256,AES192,3DES168,RC4_128,AES128,3DES112,RC4_56,DES,RC4_40,DES40STATICRC4_256,AES256,AES192,3DES168,RC4_128,AES128,3DES112,RC4_56,DES,RC4_40,DES40TrueSpecifies list of encryption algorithms in order of intended useSQLNET.ENCRYPTION_TYPES_SERVERSHA1,MD5STATICSHA1,MD5TrueSpecifies list of checksumming algorithms in order of intended useSQLNET.CRYPTO_CHECKSUM_TYPES_SERVERoracle-ee\n \n 11.21158TrueOracle Enterprise Manager (Database Control only)OEMoracle-ee\n \n 11.2Oracle StatspackSTATSPACKoracle-ee\n \n 11.2TrueTrueOracle Advanced Security - Transparent Data EncryptionTDEoracle-ee\n \n 11.2TrueTrueOracle Advanced Security - TDE with HSMTDE_HSMoracle-ee\n \n 11.2TrueTrueChange time zoneTimezoneAfrica/Cairo,Africa/Casablanca,Africa/Harare,Africa/Monrovia,Africa/Nairobi,Africa/Tripoli,Africa/Windhoek,America/Araguaina,America/Asuncion,America/Bogota,America/Caracas,America/Chihuahua,America/Cuiaba,America/Denver,America/Fortaleza,America/Guatemala,America/Halifax,America/Manaus,America/Matamoros,America/Monterrey,America/Montevideo,America/Phoenix,America/Santiago,America/Tijuana,Asia/Amman,Asia/Ashgabat,Asia/Baghdad,Asia/Baku,Asia/Bangkok,Asia/Beirut,Asia/Calcutta,Asia/Damascus,Asia/Dhaka,Asia/Irkutsk,Asia/Jerusalem,Asia/Kabul,Asia/Karachi,Asia/Kathmandu,Asia/Krasnoyarsk,Asia/Magadan,Asia/Muscat,Asia/Novosibirsk,Asia/Riyadh,Asia/Seoul,Asia/Shanghai,Asia/Singapore,Asia/Taipei,Asia/Tehran,Asia/Tokyo,Asia/Ulaanbaatar,Asia/Vladivostok,Asia/Yakutsk,Asia/Yerevan,Atlantic/Azores,Australia/Adelaide,Australia/Brisbane,Australia/Darwin,Australia/Hobart,Australia/Perth,Australia/Sydney,Brazil/East,Canada/Newfoundland,Canada/Saskatchewan,Europe/Amsterdam,Europe/Athens,Europe/Dublin,Europe/Helsinki,Europe/Istanbul,Europe/Kaliningrad,Europe/Moscow,Europe/Paris,Europe/Prague,Europe/Sarajevo,Pacific/Auckland,Pacific/Fiji,Pacific/Guam,Pacific/Honolulu,Pacific/Samoa,US/Alaska,US/Central,US/Eastern,US/East-Indiana,US/Pacific,UTCDYNAMICUTCTrueSpecifies the timezone the user wants to change the system time toTIME_ZONEoracle-ee\n \n 11.2Oracle XMLDB RepositoryXMLDBoracle-ee\n \n \n \n \n 457f7bb8-9fbf-11e4-9084-5754f80d5144\n \n'}, - 'oracle-sa': {'11.2': '\n \n \n \n 11.2XMLDBOracle Application Express Runtime EnvironmentAPEXoracle-ee\n \n 11.2APEXOracle Application Express Development EnvironmentAPEX-DEVoracle-ee\n \n 11.2Oracle Advanced Security - Native Network EncryptionNATIVE_NETWORK_ENCRYPTIONACCEPTED,REJECTED,REQUESTED,REQUIREDSTATICREQUESTEDTrueSpecifies the desired encryption behaviorSQLNET.ENCRYPTION_SERVERACCEPTED,REJECTED,REQUESTED,REQUIREDSTATICREQUESTEDTrueSpecifies the desired data integrity behaviorSQLNET.CRYPTO_CHECKSUM_SERVERRC4_256,AES256,AES192,3DES168,RC4_128,AES128,3DES112,RC4_56,DES,RC4_40,DES40STATICRC4_256,AES256,AES192,3DES168,RC4_128,AES128,3DES112,RC4_56,DES,RC4_40,DES40TrueSpecifies list of encryption algorithms in order of intended useSQLNET.ENCRYPTION_TYPES_SERVERSHA1,MD5STATICSHA1,MD5TrueSpecifies list of checksumming algorithms in order of intended useSQLNET.CRYPTO_CHECKSUM_TYPES_SERVERoracle-ee\n \n 11.21158TrueOracle Enterprise Manager (Database Control only)OEMoracle-ee\n \n 11.2Oracle StatspackSTATSPACKoracle-ee\n \n 11.2TrueTrueOracle Advanced Security - Transparent Data EncryptionTDEoracle-ee\n \n 11.2TrueTrueOracle Advanced Security - TDE with HSMTDE_HSMoracle-ee\n \n 11.2TrueTrueChange time zoneTimezoneAfrica/Cairo,Africa/Casablanca,Africa/Harare,Africa/Monrovia,Africa/Nairobi,Africa/Tripoli,Africa/Windhoek,America/Araguaina,America/Asuncion,America/Bogota,America/Caracas,America/Chihuahua,America/Cuiaba,America/Denver,America/Fortaleza,America/Guatemala,America/Halifax,America/Manaus,America/Matamoros,America/Monterrey,America/Montevideo,America/Phoenix,America/Santiago,America/Tijuana,Asia/Amman,Asia/Ashgabat,Asia/Baghdad,Asia/Baku,Asia/Bangkok,Asia/Beirut,Asia/Calcutta,Asia/Damascus,Asia/Dhaka,Asia/Irkutsk,Asia/Jerusalem,Asia/Kabul,Asia/Karachi,Asia/Kathmandu,Asia/Krasnoyarsk,Asia/Magadan,Asia/Muscat,Asia/Novosibirsk,Asia/Riyadh,Asia/Seoul,Asia/Shanghai,Asia/Singapore,Asia/Taipei,Asia/Tehran,Asia/Tokyo,Asia/Ulaanbaatar,Asia/Vladivostok,Asia/Yakutsk,Asia/Yerevan,Atlantic/Azores,Australia/Adelaide,Australia/Brisbane,Australia/Darwin,Australia/Hobart,Australia/Perth,Australia/Sydney,Brazil/East,Canada/Newfoundland,Canada/Saskatchewan,Europe/Amsterdam,Europe/Athens,Europe/Dublin,Europe/Helsinki,Europe/Istanbul,Europe/Kaliningrad,Europe/Moscow,Europe/Paris,Europe/Prague,Europe/Sarajevo,Pacific/Auckland,Pacific/Fiji,Pacific/Guam,Pacific/Honolulu,Pacific/Samoa,US/Alaska,US/Central,US/Eastern,US/East-Indiana,US/Pacific,UTCDYNAMICUTCTrueSpecifies the timezone the user wants to change the system time toTIME_ZONEoracle-ee\n \n 11.2Oracle XMLDB RepositoryXMLDBoracle-ee\n \n \n \n \n 457f7bb8-9fbf-11e4-9084-5754f80d5144\n \n', - 'all': '\n \n \n \n 11.2XMLDBOracle Application Express Runtime EnvironmentAPEXoracle-ee\n \n 11.2APEXOracle Application Express Development EnvironmentAPEX-DEVoracle-ee\n \n 11.2Oracle Advanced Security - Native Network EncryptionNATIVE_NETWORK_ENCRYPTIONACCEPTED,REJECTED,REQUESTED,REQUIREDSTATICREQUESTEDTrueSpecifies the desired encryption behaviorSQLNET.ENCRYPTION_SERVERACCEPTED,REJECTED,REQUESTED,REQUIREDSTATICREQUESTEDTrueSpecifies the desired data integrity behaviorSQLNET.CRYPTO_CHECKSUM_SERVERRC4_256,AES256,AES192,3DES168,RC4_128,AES128,3DES112,RC4_56,DES,RC4_40,DES40STATICRC4_256,AES256,AES192,3DES168,RC4_128,AES128,3DES112,RC4_56,DES,RC4_40,DES40TrueSpecifies list of encryption algorithms in order of intended useSQLNET.ENCRYPTION_TYPES_SERVERSHA1,MD5STATICSHA1,MD5TrueSpecifies list of checksumming algorithms in order of intended useSQLNET.CRYPTO_CHECKSUM_TYPES_SERVERoracle-ee\n \n 11.21158TrueOracle Enterprise Manager (Database Control only)OEMoracle-ee\n \n 11.2Oracle StatspackSTATSPACKoracle-ee\n \n 11.2TrueTrueOracle Advanced Security - Transparent Data EncryptionTDEoracle-ee\n \n 11.2TrueTrueOracle Advanced Security - TDE with HSMTDE_HSMoracle-ee\n \n 11.2TrueTrueChange time zoneTimezoneAfrica/Cairo,Africa/Casablanca,Africa/Harare,Africa/Monrovia,Africa/Nairobi,Africa/Tripoli,Africa/Windhoek,America/Araguaina,America/Asuncion,America/Bogota,America/Caracas,America/Chihuahua,America/Cuiaba,America/Denver,America/Fortaleza,America/Guatemala,America/Halifax,America/Manaus,America/Matamoros,America/Monterrey,America/Montevideo,America/Phoenix,America/Santiago,America/Tijuana,Asia/Amman,Asia/Ashgabat,Asia/Baghdad,Asia/Baku,Asia/Bangkok,Asia/Beirut,Asia/Calcutta,Asia/Damascus,Asia/Dhaka,Asia/Irkutsk,Asia/Jerusalem,Asia/Kabul,Asia/Karachi,Asia/Kathmandu,Asia/Krasnoyarsk,Asia/Magadan,Asia/Muscat,Asia/Novosibirsk,Asia/Riyadh,Asia/Seoul,Asia/Shanghai,Asia/Singapore,Asia/Taipei,Asia/Tehran,Asia/Tokyo,Asia/Ulaanbaatar,Asia/Vladivostok,Asia/Yakutsk,Asia/Yerevan,Atlantic/Azores,Australia/Adelaide,Australia/Brisbane,Australia/Darwin,Australia/Hobart,Australia/Perth,Australia/Sydney,Brazil/East,Canada/Newfoundland,Canada/Saskatchewan,Europe/Amsterdam,Europe/Athens,Europe/Dublin,Europe/Helsinki,Europe/Istanbul,Europe/Kaliningrad,Europe/Moscow,Europe/Paris,Europe/Prague,Europe/Sarajevo,Pacific/Auckland,Pacific/Fiji,Pacific/Guam,Pacific/Honolulu,Pacific/Samoa,US/Alaska,US/Central,US/Eastern,US/East-Indiana,US/Pacific,UTCDYNAMICUTCTrueSpecifies the timezone the user wants to change the system time toTIME_ZONEoracle-ee\n \n 11.2Oracle XMLDB RepositoryXMLDBoracle-ee\n \n \n \n \n 457f7bb8-9fbf-11e4-9084-5754f80d5144\n \n'}, - 'oracle-sa1': {'11.2': '\n \n \n \n 11.2XMLDBOracle Application Express Runtime EnvironmentAPEXoracle-ee\n \n 11.2APEXOracle Application Express Development EnvironmentAPEX-DEVoracle-ee\n \n 11.2Oracle Advanced Security - Native Network EncryptionNATIVE_NETWORK_ENCRYPTIONACCEPTED,REJECTED,REQUESTED,REQUIREDSTATICREQUESTEDTrueSpecifies the desired encryption behaviorSQLNET.ENCRYPTION_SERVERACCEPTED,REJECTED,REQUESTED,REQUIREDSTATICREQUESTEDTrueSpecifies the desired data integrity behaviorSQLNET.CRYPTO_CHECKSUM_SERVERRC4_256,AES256,AES192,3DES168,RC4_128,AES128,3DES112,RC4_56,DES,RC4_40,DES40STATICRC4_256,AES256,AES192,3DES168,RC4_128,AES128,3DES112,RC4_56,DES,RC4_40,DES40TrueSpecifies list of encryption algorithms in order of intended useSQLNET.ENCRYPTION_TYPES_SERVERSHA1,MD5STATICSHA1,MD5TrueSpecifies list of checksumming algorithms in order of intended useSQLNET.CRYPTO_CHECKSUM_TYPES_SERVERoracle-ee\n \n 11.21158TrueOracle Enterprise Manager (Database Control only)OEMoracle-ee\n \n 11.2Oracle StatspackSTATSPACKoracle-ee\n \n 11.2TrueTrueOracle Advanced Security - Transparent Data EncryptionTDEoracle-ee\n \n 11.2TrueTrueOracle Advanced Security - TDE with HSMTDE_HSMoracle-ee\n \n 11.2TrueTrueChange time zoneTimezoneAfrica/Cairo,Africa/Casablanca,Africa/Harare,Africa/Monrovia,Africa/Nairobi,Africa/Tripoli,Africa/Windhoek,America/Araguaina,America/Asuncion,America/Bogota,America/Caracas,America/Chihuahua,America/Cuiaba,America/Denver,America/Fortaleza,America/Guatemala,America/Halifax,America/Manaus,America/Matamoros,America/Monterrey,America/Montevideo,America/Phoenix,America/Santiago,America/Tijuana,Asia/Amman,Asia/Ashgabat,Asia/Baghdad,Asia/Baku,Asia/Bangkok,Asia/Beirut,Asia/Calcutta,Asia/Damascus,Asia/Dhaka,Asia/Irkutsk,Asia/Jerusalem,Asia/Kabul,Asia/Karachi,Asia/Kathmandu,Asia/Krasnoyarsk,Asia/Magadan,Asia/Muscat,Asia/Novosibirsk,Asia/Riyadh,Asia/Seoul,Asia/Shanghai,Asia/Singapore,Asia/Taipei,Asia/Tehran,Asia/Tokyo,Asia/Ulaanbaatar,Asia/Vladivostok,Asia/Yakutsk,Asia/Yerevan,Atlantic/Azores,Australia/Adelaide,Australia/Brisbane,Australia/Darwin,Australia/Hobart,Australia/Perth,Australia/Sydney,Brazil/East,Canada/Newfoundland,Canada/Saskatchewan,Europe/Amsterdam,Europe/Athens,Europe/Dublin,Europe/Helsinki,Europe/Istanbul,Europe/Kaliningrad,Europe/Moscow,Europe/Paris,Europe/Prague,Europe/Sarajevo,Pacific/Auckland,Pacific/Fiji,Pacific/Guam,Pacific/Honolulu,Pacific/Samoa,US/Alaska,US/Central,US/Eastern,US/East-Indiana,US/Pacific,UTCDYNAMICUTCTrueSpecifies the timezone the user wants to change the system time toTIME_ZONEoracle-ee\n \n 11.2Oracle XMLDB RepositoryXMLDBoracle-ee\n \n \n \n \n 457f7bb8-9fbf-11e4-9084-5754f80d5144\n \n', - 'all': '\n \n \n \n 11.2XMLDBOracle Application Express Runtime EnvironmentAPEXoracle-ee\n \n 11.2APEXOracle Application Express Development EnvironmentAPEX-DEVoracle-ee\n \n 11.2Oracle Advanced Security - Native Network EncryptionNATIVE_NETWORK_ENCRYPTIONACCEPTED,REJECTED,REQUESTED,REQUIREDSTATICREQUESTEDTrueSpecifies the desired encryption behaviorSQLNET.ENCRYPTION_SERVERACCEPTED,REJECTED,REQUESTED,REQUIREDSTATICREQUESTEDTrueSpecifies the desired data integrity behaviorSQLNET.CRYPTO_CHECKSUM_SERVERRC4_256,AES256,AES192,3DES168,RC4_128,AES128,3DES112,RC4_56,DES,RC4_40,DES40STATICRC4_256,AES256,AES192,3DES168,RC4_128,AES128,3DES112,RC4_56,DES,RC4_40,DES40TrueSpecifies list of encryption algorithms in order of intended useSQLNET.ENCRYPTION_TYPES_SERVERSHA1,MD5STATICSHA1,MD5TrueSpecifies list of checksumming algorithms in order of intended useSQLNET.CRYPTO_CHECKSUM_TYPES_SERVERoracle-ee\n \n 11.21158TrueOracle Enterprise Manager (Database Control only)OEMoracle-ee\n \n 11.2Oracle StatspackSTATSPACKoracle-ee\n \n 11.2TrueTrueOracle Advanced Security - Transparent Data EncryptionTDEoracle-ee\n \n 11.2TrueTrueOracle Advanced Security - TDE with HSMTDE_HSMoracle-ee\n \n 11.2TrueTrueChange time zoneTimezoneAfrica/Cairo,Africa/Casablanca,Africa/Harare,Africa/Monrovia,Africa/Nairobi,Africa/Tripoli,Africa/Windhoek,America/Araguaina,America/Asuncion,America/Bogota,America/Caracas,America/Chihuahua,America/Cuiaba,America/Denver,America/Fortaleza,America/Guatemala,America/Halifax,America/Manaus,America/Matamoros,America/Monterrey,America/Montevideo,America/Phoenix,America/Santiago,America/Tijuana,Asia/Amman,Asia/Ashgabat,Asia/Baghdad,Asia/Baku,Asia/Bangkok,Asia/Beirut,Asia/Calcutta,Asia/Damascus,Asia/Dhaka,Asia/Irkutsk,Asia/Jerusalem,Asia/Kabul,Asia/Karachi,Asia/Kathmandu,Asia/Krasnoyarsk,Asia/Magadan,Asia/Muscat,Asia/Novosibirsk,Asia/Riyadh,Asia/Seoul,Asia/Shanghai,Asia/Singapore,Asia/Taipei,Asia/Tehran,Asia/Tokyo,Asia/Ulaanbaatar,Asia/Vladivostok,Asia/Yakutsk,Asia/Yerevan,Atlantic/Azores,Australia/Adelaide,Australia/Brisbane,Australia/Darwin,Australia/Hobart,Australia/Perth,Australia/Sydney,Brazil/East,Canada/Newfoundland,Canada/Saskatchewan,Europe/Amsterdam,Europe/Athens,Europe/Dublin,Europe/Helsinki,Europe/Istanbul,Europe/Kaliningrad,Europe/Moscow,Europe/Paris,Europe/Prague,Europe/Sarajevo,Pacific/Auckland,Pacific/Fiji,Pacific/Guam,Pacific/Honolulu,Pacific/Samoa,US/Alaska,US/Central,US/Eastern,US/East-Indiana,US/Pacific,UTCDYNAMICUTCTrueSpecifies the timezone the user wants to change the system time toTIME_ZONEoracle-ee\n \n 11.2Oracle XMLDB RepositoryXMLDBoracle-ee\n \n \n \n \n 457f7bb8-9fbf-11e4-9084-5754f80d5144\n \n'}, - 'sqlserver-ee': {'10.50': '\n \n \n \n 10.50SQLServer Database MirroringMirroringsqlserver-ee\n \n 10.50TrueSQL Server - Transparent Data EncryptionTDEsqlserver-ee\n \n \n \n \n 457f7bb8-9fbf-11e4-9084-5754f80d5144\n \n', - '11.00': '\n \n \n \n 11.00SQLServer Database MirroringMirroringsqlserver-ee\n \n 11.00TrueSQL Server - Transparent Data EncryptionTDEsqlserver-ee\n \n \n \n \n 457f7bb8-9fbf-11e4-9084-5754f80d5144\n \n', - 'all': '\n \n \n \n 10.50SQLServer Database MirroringMirroringsqlserver-ee\n \n 10.50TrueSQL Server - Transparent Data EncryptionTDEsqlserver-ee\n \n 11.00SQLServer Database MirroringMirroringsqlserver-ee\n \n 11.00TrueSQL Server - Transparent Data EncryptionTDEsqlserver-ee\n \n \n \n \n 457f7bb8-9fbf-11e4-9084-5754f80d5144\n \n'}} + 'all': '\n \n \n \n 5.611211TrueInnodb Memcached for MySQLMEMCACHED1-4294967295STATIC1TrueSpecifies how many memcached read operations (get) to perform before doing a COMMIT to start a new transactionDAEMON_MEMCACHED_R_BATCH_SIZE1-4294967295STATIC1TrueSpecifies how many memcached write operations, such as add, set, or incr, to perform before doing a COMMIT to start a new transactionDAEMON_MEMCACHED_W_BATCH_SIZE1-1073741824DYNAMIC5TrueSpecifies how often to auto-commit idle connections that use the InnoDB memcached interface.INNODB_API_BK_COMMIT_INTERVAL0,1STATIC0TrueDisables the use of row locks when using the InnoDB memcached interface.INNODB_API_DISABLE_ROWLOCK0,1STATIC0TrueLocks the table used by the InnoDB memcached plugin, so that it cannot be dropped or altered by DDL through the SQL interface.INNODB_API_ENABLE_MDL0-3STATIC0TrueLets you control the transaction isolation level on queries processed by the memcached interface.INNODB_API_TRX_LEVELauto,ascii,binarySTATICautoTrueThe binding protocol to use which can be either auto, ascii, or binary. The default is auto which means the server automatically negotiates the protocol with the client.BINDING_PROTOCOL1-2048STATIC1024TrueThe backlog queue configures how many network connections can be waiting to be processed by memcachedBACKLOG_QUEUE_LIMIT0,1STATIC0TrueDisable the use of compare and swap (CAS) which reduces the per-item size by 8 bytes.CAS_DISABLED1-48STATIC48TrueMinimum chunk size in bytes to allocate for the smallest item\'s key, value, and flags. The default is 48 and you can get a significant memory efficiency gain with a lower value.CHUNK_SIZE1-2STATIC1.25TrueChunk size growth factor that controls the size of each successive chunk with each chunk growing times this amount larger than the previous chunk.CHUNK_SIZE_GROWTH_FACTOR0,1STATIC0TrueIf enabled when there is no more memory to store items, memcached will return an error rather than evicting items.ERROR_ON_MEMORY_EXHAUSTED10-1024STATIC1024TrueMaximum number of concurrent connections. Setting this value to anything less than 10 prevents MySQL from starting.MAX_SIMULTANEOUS_CONNECTIONSv,vv,vvvSTATICvTrueVerbose level for memcached.VERBOSITYmysql\n \n \n \n \n 457f7bb8-9fbf-11e4-9084-5754f80d5144\n \n'}, + 'oracle-ee': {'11.2': '\n \n \n \n 11.2XMLDBOracle Application Express Runtime EnvironmentAPEXoracle-ee\n \n 11.2APEXOracle Application Express Development EnvironmentAPEX-DEVoracle-ee\n \n 11.2Oracle Advanced Security - Native Network EncryptionNATIVE_NETWORK_ENCRYPTIONACCEPTED,REJECTED,REQUESTED,REQUIREDSTATICREQUESTEDTrueSpecifies the desired encryption behaviorSQLNET.ENCRYPTION_SERVERACCEPTED,REJECTED,REQUESTED,REQUIREDSTATICREQUESTEDTrueSpecifies the desired data integrity behaviorSQLNET.CRYPTO_CHECKSUM_SERVERRC4_256,AES256,AES192,3DES168,RC4_128,AES128,3DES112,RC4_56,DES,RC4_40,DES40STATICRC4_256,AES256,AES192,3DES168,RC4_128,AES128,3DES112,RC4_56,DES,RC4_40,DES40TrueSpecifies list of encryption algorithms in order of intended useSQLNET.ENCRYPTION_TYPES_SERVERSHA1,MD5STATICSHA1,MD5TrueSpecifies list of checksumming algorithms in order of intended useSQLNET.CRYPTO_CHECKSUM_TYPES_SERVERoracle-ee\n \n 11.21158TrueOracle Enterprise Manager (Database Control only)OEMoracle-ee\n \n 11.2Oracle StatspackSTATSPACKoracle-ee\n \n 11.2TrueTrueOracle Advanced Security - Transparent Data EncryptionTDEoracle-ee\n \n 11.2TrueTrueOracle Advanced Security - TDE with HSMTDE_HSMoracle-ee\n \n 11.2TrueTrueChange time zoneTimezoneAfrica/Cairo,Africa/Casablanca,Africa/Harare,Africa/Monrovia,Africa/Nairobi,Africa/Tripoli,Africa/Windhoek,America/Araguaina,America/Asuncion,America/Bogota,America/Caracas,America/Chihuahua,America/Cuiaba,America/Denver,America/Fortaleza,America/Guatemala,America/Halifax,America/Manaus,America/Matamoros,America/Monterrey,America/Montevideo,America/Phoenix,America/Santiago,America/Tijuana,Asia/Amman,Asia/Ashgabat,Asia/Baghdad,Asia/Baku,Asia/Bangkok,Asia/Beirut,Asia/Calcutta,Asia/Damascus,Asia/Dhaka,Asia/Irkutsk,Asia/Jerusalem,Asia/Kabul,Asia/Karachi,Asia/Kathmandu,Asia/Krasnoyarsk,Asia/Magadan,Asia/Muscat,Asia/Novosibirsk,Asia/Riyadh,Asia/Seoul,Asia/Shanghai,Asia/Singapore,Asia/Taipei,Asia/Tehran,Asia/Tokyo,Asia/Ulaanbaatar,Asia/Vladivostok,Asia/Yakutsk,Asia/Yerevan,Atlantic/Azores,Australia/Adelaide,Australia/Brisbane,Australia/Darwin,Australia/Hobart,Australia/Perth,Australia/Sydney,Brazil/East,Canada/Newfoundland,Canada/Saskatchewan,Europe/Amsterdam,Europe/Athens,Europe/Dublin,Europe/Helsinki,Europe/Istanbul,Europe/Kaliningrad,Europe/Moscow,Europe/Paris,Europe/Prague,Europe/Sarajevo,Pacific/Auckland,Pacific/Fiji,Pacific/Guam,Pacific/Honolulu,Pacific/Samoa,US/Alaska,US/Central,US/Eastern,US/East-Indiana,US/Pacific,UTCDYNAMICUTCTrueSpecifies the timezone the user wants to change the system time toTIME_ZONEoracle-ee\n \n 11.2Oracle XMLDB RepositoryXMLDBoracle-ee\n \n \n \n \n 457f7bb8-9fbf-11e4-9084-5754f80d5144\n \n', + 'all': '\n \n \n \n 11.2XMLDBOracle Application Express Runtime EnvironmentAPEXoracle-ee\n \n 11.2APEXOracle Application Express Development EnvironmentAPEX-DEVoracle-ee\n \n 11.2Oracle Advanced Security - Native Network EncryptionNATIVE_NETWORK_ENCRYPTIONACCEPTED,REJECTED,REQUESTED,REQUIREDSTATICREQUESTEDTrueSpecifies the desired encryption behaviorSQLNET.ENCRYPTION_SERVERACCEPTED,REJECTED,REQUESTED,REQUIREDSTATICREQUESTEDTrueSpecifies the desired data integrity behaviorSQLNET.CRYPTO_CHECKSUM_SERVERRC4_256,AES256,AES192,3DES168,RC4_128,AES128,3DES112,RC4_56,DES,RC4_40,DES40STATICRC4_256,AES256,AES192,3DES168,RC4_128,AES128,3DES112,RC4_56,DES,RC4_40,DES40TrueSpecifies list of encryption algorithms in order of intended useSQLNET.ENCRYPTION_TYPES_SERVERSHA1,MD5STATICSHA1,MD5TrueSpecifies list of checksumming algorithms in order of intended useSQLNET.CRYPTO_CHECKSUM_TYPES_SERVERoracle-ee\n \n 11.21158TrueOracle Enterprise Manager (Database Control only)OEMoracle-ee\n \n 11.2Oracle StatspackSTATSPACKoracle-ee\n \n 11.2TrueTrueOracle Advanced Security - Transparent Data EncryptionTDEoracle-ee\n \n 11.2TrueTrueOracle Advanced Security - TDE with HSMTDE_HSMoracle-ee\n \n 11.2TrueTrueChange time zoneTimezoneAfrica/Cairo,Africa/Casablanca,Africa/Harare,Africa/Monrovia,Africa/Nairobi,Africa/Tripoli,Africa/Windhoek,America/Araguaina,America/Asuncion,America/Bogota,America/Caracas,America/Chihuahua,America/Cuiaba,America/Denver,America/Fortaleza,America/Guatemala,America/Halifax,America/Manaus,America/Matamoros,America/Monterrey,America/Montevideo,America/Phoenix,America/Santiago,America/Tijuana,Asia/Amman,Asia/Ashgabat,Asia/Baghdad,Asia/Baku,Asia/Bangkok,Asia/Beirut,Asia/Calcutta,Asia/Damascus,Asia/Dhaka,Asia/Irkutsk,Asia/Jerusalem,Asia/Kabul,Asia/Karachi,Asia/Kathmandu,Asia/Krasnoyarsk,Asia/Magadan,Asia/Muscat,Asia/Novosibirsk,Asia/Riyadh,Asia/Seoul,Asia/Shanghai,Asia/Singapore,Asia/Taipei,Asia/Tehran,Asia/Tokyo,Asia/Ulaanbaatar,Asia/Vladivostok,Asia/Yakutsk,Asia/Yerevan,Atlantic/Azores,Australia/Adelaide,Australia/Brisbane,Australia/Darwin,Australia/Hobart,Australia/Perth,Australia/Sydney,Brazil/East,Canada/Newfoundland,Canada/Saskatchewan,Europe/Amsterdam,Europe/Athens,Europe/Dublin,Europe/Helsinki,Europe/Istanbul,Europe/Kaliningrad,Europe/Moscow,Europe/Paris,Europe/Prague,Europe/Sarajevo,Pacific/Auckland,Pacific/Fiji,Pacific/Guam,Pacific/Honolulu,Pacific/Samoa,US/Alaska,US/Central,US/Eastern,US/East-Indiana,US/Pacific,UTCDYNAMICUTCTrueSpecifies the timezone the user wants to change the system time toTIME_ZONEoracle-ee\n \n 11.2Oracle XMLDB RepositoryXMLDBoracle-ee\n \n \n \n \n 457f7bb8-9fbf-11e4-9084-5754f80d5144\n \n'}, + 'oracle-sa': {'11.2': '\n \n \n \n 11.2XMLDBOracle Application Express Runtime EnvironmentAPEXoracle-ee\n \n 11.2APEXOracle Application Express Development EnvironmentAPEX-DEVoracle-ee\n \n 11.2Oracle Advanced Security - Native Network EncryptionNATIVE_NETWORK_ENCRYPTIONACCEPTED,REJECTED,REQUESTED,REQUIREDSTATICREQUESTEDTrueSpecifies the desired encryption behaviorSQLNET.ENCRYPTION_SERVERACCEPTED,REJECTED,REQUESTED,REQUIREDSTATICREQUESTEDTrueSpecifies the desired data integrity behaviorSQLNET.CRYPTO_CHECKSUM_SERVERRC4_256,AES256,AES192,3DES168,RC4_128,AES128,3DES112,RC4_56,DES,RC4_40,DES40STATICRC4_256,AES256,AES192,3DES168,RC4_128,AES128,3DES112,RC4_56,DES,RC4_40,DES40TrueSpecifies list of encryption algorithms in order of intended useSQLNET.ENCRYPTION_TYPES_SERVERSHA1,MD5STATICSHA1,MD5TrueSpecifies list of checksumming algorithms in order of intended useSQLNET.CRYPTO_CHECKSUM_TYPES_SERVERoracle-ee\n \n 11.21158TrueOracle Enterprise Manager (Database Control only)OEMoracle-ee\n \n 11.2Oracle StatspackSTATSPACKoracle-ee\n \n 11.2TrueTrueOracle Advanced Security - Transparent Data EncryptionTDEoracle-ee\n \n 11.2TrueTrueOracle Advanced Security - TDE with HSMTDE_HSMoracle-ee\n \n 11.2TrueTrueChange time zoneTimezoneAfrica/Cairo,Africa/Casablanca,Africa/Harare,Africa/Monrovia,Africa/Nairobi,Africa/Tripoli,Africa/Windhoek,America/Araguaina,America/Asuncion,America/Bogota,America/Caracas,America/Chihuahua,America/Cuiaba,America/Denver,America/Fortaleza,America/Guatemala,America/Halifax,America/Manaus,America/Matamoros,America/Monterrey,America/Montevideo,America/Phoenix,America/Santiago,America/Tijuana,Asia/Amman,Asia/Ashgabat,Asia/Baghdad,Asia/Baku,Asia/Bangkok,Asia/Beirut,Asia/Calcutta,Asia/Damascus,Asia/Dhaka,Asia/Irkutsk,Asia/Jerusalem,Asia/Kabul,Asia/Karachi,Asia/Kathmandu,Asia/Krasnoyarsk,Asia/Magadan,Asia/Muscat,Asia/Novosibirsk,Asia/Riyadh,Asia/Seoul,Asia/Shanghai,Asia/Singapore,Asia/Taipei,Asia/Tehran,Asia/Tokyo,Asia/Ulaanbaatar,Asia/Vladivostok,Asia/Yakutsk,Asia/Yerevan,Atlantic/Azores,Australia/Adelaide,Australia/Brisbane,Australia/Darwin,Australia/Hobart,Australia/Perth,Australia/Sydney,Brazil/East,Canada/Newfoundland,Canada/Saskatchewan,Europe/Amsterdam,Europe/Athens,Europe/Dublin,Europe/Helsinki,Europe/Istanbul,Europe/Kaliningrad,Europe/Moscow,Europe/Paris,Europe/Prague,Europe/Sarajevo,Pacific/Auckland,Pacific/Fiji,Pacific/Guam,Pacific/Honolulu,Pacific/Samoa,US/Alaska,US/Central,US/Eastern,US/East-Indiana,US/Pacific,UTCDYNAMICUTCTrueSpecifies the timezone the user wants to change the system time toTIME_ZONEoracle-ee\n \n 11.2Oracle XMLDB RepositoryXMLDBoracle-ee\n \n \n \n \n 457f7bb8-9fbf-11e4-9084-5754f80d5144\n \n', + 'all': '\n \n \n \n 11.2XMLDBOracle Application Express Runtime EnvironmentAPEXoracle-ee\n \n 11.2APEXOracle Application Express Development EnvironmentAPEX-DEVoracle-ee\n \n 11.2Oracle Advanced Security - Native Network EncryptionNATIVE_NETWORK_ENCRYPTIONACCEPTED,REJECTED,REQUESTED,REQUIREDSTATICREQUESTEDTrueSpecifies the desired encryption behaviorSQLNET.ENCRYPTION_SERVERACCEPTED,REJECTED,REQUESTED,REQUIREDSTATICREQUESTEDTrueSpecifies the desired data integrity behaviorSQLNET.CRYPTO_CHECKSUM_SERVERRC4_256,AES256,AES192,3DES168,RC4_128,AES128,3DES112,RC4_56,DES,RC4_40,DES40STATICRC4_256,AES256,AES192,3DES168,RC4_128,AES128,3DES112,RC4_56,DES,RC4_40,DES40TrueSpecifies list of encryption algorithms in order of intended useSQLNET.ENCRYPTION_TYPES_SERVERSHA1,MD5STATICSHA1,MD5TrueSpecifies list of checksumming algorithms in order of intended useSQLNET.CRYPTO_CHECKSUM_TYPES_SERVERoracle-ee\n \n 11.21158TrueOracle Enterprise Manager (Database Control only)OEMoracle-ee\n \n 11.2Oracle StatspackSTATSPACKoracle-ee\n \n 11.2TrueTrueOracle Advanced Security - Transparent Data EncryptionTDEoracle-ee\n \n 11.2TrueTrueOracle Advanced Security - TDE with HSMTDE_HSMoracle-ee\n \n 11.2TrueTrueChange time zoneTimezoneAfrica/Cairo,Africa/Casablanca,Africa/Harare,Africa/Monrovia,Africa/Nairobi,Africa/Tripoli,Africa/Windhoek,America/Araguaina,America/Asuncion,America/Bogota,America/Caracas,America/Chihuahua,America/Cuiaba,America/Denver,America/Fortaleza,America/Guatemala,America/Halifax,America/Manaus,America/Matamoros,America/Monterrey,America/Montevideo,America/Phoenix,America/Santiago,America/Tijuana,Asia/Amman,Asia/Ashgabat,Asia/Baghdad,Asia/Baku,Asia/Bangkok,Asia/Beirut,Asia/Calcutta,Asia/Damascus,Asia/Dhaka,Asia/Irkutsk,Asia/Jerusalem,Asia/Kabul,Asia/Karachi,Asia/Kathmandu,Asia/Krasnoyarsk,Asia/Magadan,Asia/Muscat,Asia/Novosibirsk,Asia/Riyadh,Asia/Seoul,Asia/Shanghai,Asia/Singapore,Asia/Taipei,Asia/Tehran,Asia/Tokyo,Asia/Ulaanbaatar,Asia/Vladivostok,Asia/Yakutsk,Asia/Yerevan,Atlantic/Azores,Australia/Adelaide,Australia/Brisbane,Australia/Darwin,Australia/Hobart,Australia/Perth,Australia/Sydney,Brazil/East,Canada/Newfoundland,Canada/Saskatchewan,Europe/Amsterdam,Europe/Athens,Europe/Dublin,Europe/Helsinki,Europe/Istanbul,Europe/Kaliningrad,Europe/Moscow,Europe/Paris,Europe/Prague,Europe/Sarajevo,Pacific/Auckland,Pacific/Fiji,Pacific/Guam,Pacific/Honolulu,Pacific/Samoa,US/Alaska,US/Central,US/Eastern,US/East-Indiana,US/Pacific,UTCDYNAMICUTCTrueSpecifies the timezone the user wants to change the system time toTIME_ZONEoracle-ee\n \n 11.2Oracle XMLDB RepositoryXMLDBoracle-ee\n \n \n \n \n 457f7bb8-9fbf-11e4-9084-5754f80d5144\n \n'}, + 'oracle-sa1': {'11.2': '\n \n \n \n 11.2XMLDBOracle Application Express Runtime EnvironmentAPEXoracle-ee\n \n 11.2APEXOracle Application Express Development EnvironmentAPEX-DEVoracle-ee\n \n 11.2Oracle Advanced Security - Native Network EncryptionNATIVE_NETWORK_ENCRYPTIONACCEPTED,REJECTED,REQUESTED,REQUIREDSTATICREQUESTEDTrueSpecifies the desired encryption behaviorSQLNET.ENCRYPTION_SERVERACCEPTED,REJECTED,REQUESTED,REQUIREDSTATICREQUESTEDTrueSpecifies the desired data integrity behaviorSQLNET.CRYPTO_CHECKSUM_SERVERRC4_256,AES256,AES192,3DES168,RC4_128,AES128,3DES112,RC4_56,DES,RC4_40,DES40STATICRC4_256,AES256,AES192,3DES168,RC4_128,AES128,3DES112,RC4_56,DES,RC4_40,DES40TrueSpecifies list of encryption algorithms in order of intended useSQLNET.ENCRYPTION_TYPES_SERVERSHA1,MD5STATICSHA1,MD5TrueSpecifies list of checksumming algorithms in order of intended useSQLNET.CRYPTO_CHECKSUM_TYPES_SERVERoracle-ee\n \n 11.21158TrueOracle Enterprise Manager (Database Control only)OEMoracle-ee\n \n 11.2Oracle StatspackSTATSPACKoracle-ee\n \n 11.2TrueTrueOracle Advanced Security - Transparent Data EncryptionTDEoracle-ee\n \n 11.2TrueTrueOracle Advanced Security - TDE with HSMTDE_HSMoracle-ee\n \n 11.2TrueTrueChange time zoneTimezoneAfrica/Cairo,Africa/Casablanca,Africa/Harare,Africa/Monrovia,Africa/Nairobi,Africa/Tripoli,Africa/Windhoek,America/Araguaina,America/Asuncion,America/Bogota,America/Caracas,America/Chihuahua,America/Cuiaba,America/Denver,America/Fortaleza,America/Guatemala,America/Halifax,America/Manaus,America/Matamoros,America/Monterrey,America/Montevideo,America/Phoenix,America/Santiago,America/Tijuana,Asia/Amman,Asia/Ashgabat,Asia/Baghdad,Asia/Baku,Asia/Bangkok,Asia/Beirut,Asia/Calcutta,Asia/Damascus,Asia/Dhaka,Asia/Irkutsk,Asia/Jerusalem,Asia/Kabul,Asia/Karachi,Asia/Kathmandu,Asia/Krasnoyarsk,Asia/Magadan,Asia/Muscat,Asia/Novosibirsk,Asia/Riyadh,Asia/Seoul,Asia/Shanghai,Asia/Singapore,Asia/Taipei,Asia/Tehran,Asia/Tokyo,Asia/Ulaanbaatar,Asia/Vladivostok,Asia/Yakutsk,Asia/Yerevan,Atlantic/Azores,Australia/Adelaide,Australia/Brisbane,Australia/Darwin,Australia/Hobart,Australia/Perth,Australia/Sydney,Brazil/East,Canada/Newfoundland,Canada/Saskatchewan,Europe/Amsterdam,Europe/Athens,Europe/Dublin,Europe/Helsinki,Europe/Istanbul,Europe/Kaliningrad,Europe/Moscow,Europe/Paris,Europe/Prague,Europe/Sarajevo,Pacific/Auckland,Pacific/Fiji,Pacific/Guam,Pacific/Honolulu,Pacific/Samoa,US/Alaska,US/Central,US/Eastern,US/East-Indiana,US/Pacific,UTCDYNAMICUTCTrueSpecifies the timezone the user wants to change the system time toTIME_ZONEoracle-ee\n \n 11.2Oracle XMLDB RepositoryXMLDBoracle-ee\n \n \n \n \n 457f7bb8-9fbf-11e4-9084-5754f80d5144\n \n', + 'all': '\n \n \n \n 11.2XMLDBOracle Application Express Runtime EnvironmentAPEXoracle-ee\n \n 11.2APEXOracle Application Express Development EnvironmentAPEX-DEVoracle-ee\n \n 11.2Oracle Advanced Security - Native Network EncryptionNATIVE_NETWORK_ENCRYPTIONACCEPTED,REJECTED,REQUESTED,REQUIREDSTATICREQUESTEDTrueSpecifies the desired encryption behaviorSQLNET.ENCRYPTION_SERVERACCEPTED,REJECTED,REQUESTED,REQUIREDSTATICREQUESTEDTrueSpecifies the desired data integrity behaviorSQLNET.CRYPTO_CHECKSUM_SERVERRC4_256,AES256,AES192,3DES168,RC4_128,AES128,3DES112,RC4_56,DES,RC4_40,DES40STATICRC4_256,AES256,AES192,3DES168,RC4_128,AES128,3DES112,RC4_56,DES,RC4_40,DES40TrueSpecifies list of encryption algorithms in order of intended useSQLNET.ENCRYPTION_TYPES_SERVERSHA1,MD5STATICSHA1,MD5TrueSpecifies list of checksumming algorithms in order of intended useSQLNET.CRYPTO_CHECKSUM_TYPES_SERVERoracle-ee\n \n 11.21158TrueOracle Enterprise Manager (Database Control only)OEMoracle-ee\n \n 11.2Oracle StatspackSTATSPACKoracle-ee\n \n 11.2TrueTrueOracle Advanced Security - Transparent Data EncryptionTDEoracle-ee\n \n 11.2TrueTrueOracle Advanced Security - TDE with HSMTDE_HSMoracle-ee\n \n 11.2TrueTrueChange time zoneTimezoneAfrica/Cairo,Africa/Casablanca,Africa/Harare,Africa/Monrovia,Africa/Nairobi,Africa/Tripoli,Africa/Windhoek,America/Araguaina,America/Asuncion,America/Bogota,America/Caracas,America/Chihuahua,America/Cuiaba,America/Denver,America/Fortaleza,America/Guatemala,America/Halifax,America/Manaus,America/Matamoros,America/Monterrey,America/Montevideo,America/Phoenix,America/Santiago,America/Tijuana,Asia/Amman,Asia/Ashgabat,Asia/Baghdad,Asia/Baku,Asia/Bangkok,Asia/Beirut,Asia/Calcutta,Asia/Damascus,Asia/Dhaka,Asia/Irkutsk,Asia/Jerusalem,Asia/Kabul,Asia/Karachi,Asia/Kathmandu,Asia/Krasnoyarsk,Asia/Magadan,Asia/Muscat,Asia/Novosibirsk,Asia/Riyadh,Asia/Seoul,Asia/Shanghai,Asia/Singapore,Asia/Taipei,Asia/Tehran,Asia/Tokyo,Asia/Ulaanbaatar,Asia/Vladivostok,Asia/Yakutsk,Asia/Yerevan,Atlantic/Azores,Australia/Adelaide,Australia/Brisbane,Australia/Darwin,Australia/Hobart,Australia/Perth,Australia/Sydney,Brazil/East,Canada/Newfoundland,Canada/Saskatchewan,Europe/Amsterdam,Europe/Athens,Europe/Dublin,Europe/Helsinki,Europe/Istanbul,Europe/Kaliningrad,Europe/Moscow,Europe/Paris,Europe/Prague,Europe/Sarajevo,Pacific/Auckland,Pacific/Fiji,Pacific/Guam,Pacific/Honolulu,Pacific/Samoa,US/Alaska,US/Central,US/Eastern,US/East-Indiana,US/Pacific,UTCDYNAMICUTCTrueSpecifies the timezone the user wants to change the system time toTIME_ZONEoracle-ee\n \n 11.2Oracle XMLDB RepositoryXMLDBoracle-ee\n \n \n \n \n 457f7bb8-9fbf-11e4-9084-5754f80d5144\n \n'}, + 'sqlserver-ee': {'10.50': '\n \n \n \n 10.50SQLServer Database MirroringMirroringsqlserver-ee\n \n 10.50TrueSQL Server - Transparent Data EncryptionTDEsqlserver-ee\n \n \n \n \n 457f7bb8-9fbf-11e4-9084-5754f80d5144\n \n', + '11.00': '\n \n \n \n 11.00SQLServer Database MirroringMirroringsqlserver-ee\n \n 11.00TrueSQL Server - Transparent Data EncryptionTDEsqlserver-ee\n \n \n \n \n 457f7bb8-9fbf-11e4-9084-5754f80d5144\n \n', + 'all': '\n \n \n \n 10.50SQLServer Database MirroringMirroringsqlserver-ee\n \n 10.50TrueSQL Server - Transparent Data EncryptionTDEsqlserver-ee\n \n 11.00SQLServer Database MirroringMirroringsqlserver-ee\n \n 11.00TrueSQL Server - Transparent Data EncryptionTDEsqlserver-ee\n \n \n \n \n 457f7bb8-9fbf-11e4-9084-5754f80d5144\n \n'}} if engine_name not in default_option_group_options: - raise RDSClientError('InvalidParameterValue', 'Invalid DB engine: {0}'.format(engine_name)) + raise RDSClientError('InvalidParameterValue', + 'Invalid DB engine: {0}'.format(engine_name)) if major_engine_version and major_engine_version not in default_option_group_options[engine_name]: raise RDSClientError('InvalidParameterCombination', 'Cannot find major version {0} for {1}'.format(major_engine_version, engine_name)) @@ -779,9 +800,11 @@ class RDS2Backend(BaseBackend): raise RDSClientError('InvalidParameterValue', 'At least one option must be added, modified, or removed.') if options_to_remove: - self.option_groups[option_group_name].remove_options(options_to_remove) + self.option_groups[option_group_name].remove_options( + options_to_remove) if options_to_include: - self.option_groups[option_group_name].add_options(options_to_include) + self.option_groups[option_group_name].add_options( + options_to_include) return self.option_groups[option_group_name] def create_db_parameter_group(self, db_parameter_group_kwargs): @@ -821,7 +844,7 @@ class RDS2Backend(BaseBackend): else: continue - return db_parameter_group_list[marker:max_records+marker] + return db_parameter_group_list[marker:max_records + marker] def modify_db_parameter_group(self, db_parameter_group_name, db_parameter_group_parameters): if db_parameter_group_name not in self.db_parameter_groups: @@ -832,22 +855,17 @@ class RDS2Backend(BaseBackend): return db_parameter_group - def delete_db_parameter_group(self, db_parameter_group_name): - if db_parameter_group_name in self.db_parameter_groups: - return self.db_parameter_groups.pop(db_parameter_group_name) - else: - raise DBParameterGroupNotFoundError(db_parameter_group_name) - def list_tags_for_resource(self, arn): if self.arn_regex.match(arn): arn_breakdown = arn.split(':') - resource_type = arn_breakdown[len(arn_breakdown)-2] - resource_name = arn_breakdown[len(arn_breakdown)-1] + resource_type = arn_breakdown[len(arn_breakdown) - 2] + resource_name = arn_breakdown[len(arn_breakdown) - 1] if resource_type == 'db': # Database if resource_name in self.databases: return self.databases[resource_name].get_tags() elif resource_type == 'es': # Event Subscription - # TODO: Complete call to tags on resource type Event Subscription + # TODO: Complete call to tags on resource type Event + # Subscription return [] elif resource_type == 'og': # Option Group if resource_name in self.option_groups: @@ -856,7 +874,8 @@ class RDS2Backend(BaseBackend): if resource_name in self.db_parameter_groups: return self.db_parameter_groups[resource_name].get_tags() elif resource_type == 'ri': # Reserved DB instance - # TODO: Complete call to tags on resource type Reserved DB instance + # TODO: Complete call to tags on resource type Reserved DB + # instance return [] elif resource_type == 'secgrp': # DB security group if resource_name in self.security_groups: @@ -875,8 +894,8 @@ class RDS2Backend(BaseBackend): def remove_tags_from_resource(self, arn, tag_keys): if self.arn_regex.match(arn): arn_breakdown = arn.split(':') - resource_type = arn_breakdown[len(arn_breakdown)-2] - resource_name = arn_breakdown[len(arn_breakdown)-1] + resource_type = arn_breakdown[len(arn_breakdown) - 2] + resource_name = arn_breakdown[len(arn_breakdown) - 1] if resource_type == 'db': # Database if resource_name in self.databases: self.databases[resource_name].remove_tags(tag_keys) @@ -904,8 +923,8 @@ class RDS2Backend(BaseBackend): def add_tags_to_resource(self, arn, tags): if self.arn_regex.match(arn): arn_breakdown = arn.split(':') - resource_type = arn_breakdown[len(arn_breakdown)-2] - resource_name = arn_breakdown[len(arn_breakdown)-1] + resource_type = arn_breakdown[len(arn_breakdown) - 2] + resource_name = arn_breakdown[len(arn_breakdown) - 1] if resource_type == 'db': # Database if resource_name in self.databases: return self.databases[resource_name].add_tags(tags) @@ -932,6 +951,7 @@ class RDS2Backend(BaseBackend): class OptionGroup(object): + def __init__(self, name, engine_name, major_engine_version, description=None): self.engine_name = engine_name self.major_engine_version = major_engine_version @@ -966,11 +986,13 @@ class OptionGroup(object): return template.render(option_group=self) def remove_options(self, options_to_remove): - # TODO: Check for option in self.options and remove if exists. Raise error otherwise + # TODO: Check for option in self.options and remove if exists. Raise + # error otherwise return def add_options(self, options_to_add): - # TODO: Validate option and add it to self.options. If invalid raise error + # TODO: Validate option and add it to self.options. If invalid raise + # error return def get_tags(self): @@ -978,22 +1000,26 @@ class OptionGroup(object): def add_tags(self, tags): new_keys = [tag_set['Key'] for tag_set in tags] - self.tags = [tag_set for tag_set in self.tags if tag_set['Key'] not in new_keys] + self.tags = [tag_set for tag_set in self.tags if tag_set[ + 'Key'] not in new_keys] self.tags.extend(tags) return self.tags def remove_tags(self, tag_keys): - self.tags = [tag_set for tag_set in self.tags if tag_set['Key'] not in tag_keys] + self.tags = [tag_set for tag_set in self.tags if tag_set[ + 'Key'] not in tag_keys] class OptionGroupOption(object): + def __init__(self, **kwargs): self.default_port = kwargs.get('default_port') self.description = kwargs.get('description') self.engine_name = kwargs.get('engine_name') self.major_engine_version = kwargs.get('major_engine_version') self.name = kwargs.get('name') - self.option_group_option_settings = self._make_option_group_option_settings(kwargs.get('option_group_option_settings', [])) + self.option_group_option_settings = self._make_option_group_option_settings( + kwargs.get('option_group_option_settings', [])) self.options_depended_on = kwargs.get('options_depended_on', []) self.permanent = kwargs.get('permanent') self.persistent = kwargs.get('persistent') @@ -1044,6 +1070,7 @@ class OptionGroupOption(object): class OptionGroupOptionSetting(object): + def __init__(self, *kwargs): self.allowed_values = kwargs.get('allowed_values') self.apply_type = kwargs.get('apply_type') @@ -1063,7 +1090,9 @@ class OptionGroupOptionSetting(object): """) return template.render(option_group_option_setting=self) + class DBParameterGroup(object): + def __init__(self, name, description, family, tags): self.name = name self.description = description @@ -1084,12 +1113,14 @@ class DBParameterGroup(object): def add_tags(self, tags): new_keys = [tag_set['Key'] for tag_set in tags] - self.tags = [tag_set for tag_set in self.tags if tag_set['Key'] not in new_keys] + self.tags = [tag_set for tag_set in self.tags if tag_set[ + 'Key'] not in new_keys] self.tags.extend(tags) return self.tags def remove_tags(self, tag_keys): - self.tags = [tag_set for tag_set in self.tags if tag_set['Key'] not in tag_keys] + self.tags = [tag_set for tag_set in self.tags if tag_set[ + 'Key'] not in tag_keys] def update_parameters(self, new_parameters): for new_parameter in new_parameters: @@ -1118,9 +1149,11 @@ class DBParameterGroup(object): }) rds2_backend = rds2_backends[region_name] - db_parameter_group = rds2_backend.create_db_parameter_group(db_parameter_group_kwargs) + db_parameter_group = rds2_backend.create_db_parameter_group( + db_parameter_group_kwargs) db_parameter_group.update_parameters(db_parameter_group_parameters) return db_parameter_group -rds2_backends = dict((region.name, RDS2Backend(region.name)) for region in boto.rds2.regions()) +rds2_backends = dict((region.name, RDS2Backend(region.name)) + for region in boto.rds2.regions()) diff --git a/moto/rds2/responses.py b/moto/rds2/responses.py index 879edbdd3..96b98463d 100644 --- a/moto/rds2/responses.py +++ b/moto/rds2/responses.py @@ -5,8 +5,6 @@ from moto.core.responses import BaseResponse from moto.ec2.models import ec2_backends from .models import rds2_backends from .exceptions import DBParameterGroupNotFoundError -import json -import re class RDS2Response(BaseResponse): @@ -45,7 +43,8 @@ class RDS2Response(BaseResponse): # VpcSecurityGroupIds.member.N "tags": list(), } - args['tags'] = self.unpack_complex_list_params('Tags.Tag', ('Key', 'Value')) + args['tags'] = self.unpack_complex_list_params( + 'Tags.Tag', ('Key', 'Value')) return args def _get_db_replica_kwargs(self): @@ -85,7 +84,8 @@ class RDS2Response(BaseResponse): while self._get_param('{0}.{1}.{2}'.format(label, count, names[0])): param = dict() for i in range(len(names)): - param[names[i]] = self._get_param('{0}.{1}.{2}'.format(label, count, names[i])) + param[names[i]] = self._get_param( + '{0}.{1}.{2}'.format(label, count, names[i])) unpacked_list.append(param) count += 1 return unpacked_list @@ -94,7 +94,8 @@ class RDS2Response(BaseResponse): unpacked_list = list() count = 1 while self._get_param('{0}.{1}'.format(label, count)): - unpacked_list.append(self._get_param('{0}.{1}'.format(label, count))) + unpacked_list.append(self._get_param( + '{0}.{1}'.format(label, count))) count += 1 return unpacked_list @@ -132,7 +133,8 @@ class RDS2Response(BaseResponse): def modify_db_instance(self): db_instance_identifier = self._get_param('DBInstanceIdentifier') db_kwargs = self._get_db_kwargs() - database = self.backend.modify_database(db_instance_identifier, db_kwargs) + database = self.backend.modify_database( + db_instance_identifier, db_kwargs) template = self.response_template(MODIFY_DATABASE_TEMPLATE) return template.render(database=database) @@ -181,7 +183,8 @@ class RDS2Response(BaseResponse): group_name = self._get_param('DBSecurityGroupName') description = self._get_param('DBSecurityGroupDescription') tags = self.unpack_complex_list_params('Tags.Tag', ('Key', 'Value')) - security_group = self.backend.create_security_group(group_name, description, tags) + security_group = self.backend.create_security_group( + group_name, description, tags) template = self.response_template(CREATE_SECURITY_GROUP_TEMPLATE) return template.render(security_group=security_group) @@ -190,7 +193,8 @@ class RDS2Response(BaseResponse): def describe_db_security_groups(self): security_group_name = self._get_param('DBSecurityGroupName') - security_groups = self.backend.describe_security_groups(security_group_name) + security_groups = self.backend.describe_security_groups( + security_group_name) template = self.response_template(DESCRIBE_SECURITY_GROUPS_TEMPLATE) return template.render(security_groups=security_groups) @@ -199,7 +203,8 @@ class RDS2Response(BaseResponse): def delete_db_security_group(self): security_group_name = self._get_param('DBSecurityGroupName') - security_group = self.backend.delete_security_group(security_group_name) + security_group = self.backend.delete_security_group( + security_group_name) template = self.response_template(DELETE_SECURITY_GROUP_TEMPLATE) return template.render(security_group=security_group) @@ -209,7 +214,8 @@ class RDS2Response(BaseResponse): def authorize_db_security_group_ingress(self): security_group_name = self._get_param('DBSecurityGroupName') cidr_ip = self._get_param('CIDRIP') - security_group = self.backend.authorize_security_group(security_group_name, cidr_ip) + security_group = self.backend.authorize_security_group( + security_group_name, cidr_ip) template = self.response_template(AUTHORIZE_SECURITY_GROUP_TEMPLATE) return template.render(security_group=security_group) @@ -221,8 +227,10 @@ class RDS2Response(BaseResponse): description = self._get_param('DBSubnetGroupDescription') subnet_ids = self._get_multi_param('SubnetIds.SubnetIdentifier') tags = self.unpack_complex_list_params('Tags.Tag', ('Key', 'Value')) - subnets = [ec2_backends[self.region].get_subnet(subnet_id) for subnet_id in subnet_ids] - subnet_group = self.backend.create_subnet_group(subnet_name, description, subnets, tags) + subnets = [ec2_backends[self.region].get_subnet( + subnet_id) for subnet_id in subnet_ids] + subnet_group = self.backend.create_subnet_group( + subnet_name, description, subnets, tags) template = self.response_template(CREATE_SUBNET_GROUP_TEMPLATE) return template.render(subnet_group=subnet_group) @@ -267,7 +275,8 @@ class RDS2Response(BaseResponse): def describe_option_group_options(self): engine_name = self._get_param('EngineName') major_engine_version = self._get_param('MajorEngineVersion') - option_group_options = self.backend.describe_option_group_options(engine_name, major_engine_version) + option_group_options = self.backend.describe_option_group_options( + engine_name, major_engine_version) return option_group_options def modify_option_group(self): @@ -287,7 +296,8 @@ class RDS2Response(BaseResponse): count = 1 options_to_remove = [] while self._get_param('OptionsToRemove.member.{0}'.format(count)): - options_to_remove.append(self._get_param('OptionsToRemove.member.{0}'.format(count))) + options_to_remove.append(self._get_param( + 'OptionsToRemove.member.{0}'.format(count))) count += 1 apply_immediately = self._get_param('ApplyImmediately') option_group = self.backend.modify_option_group(option_group_name, @@ -314,7 +324,8 @@ class RDS2Response(BaseResponse): kwargs['max_records'] = self._get_param('MaxRecords') kwargs['marker'] = self._get_param('Marker') db_parameter_groups = self.backend.describe_db_parameter_groups(kwargs) - template = self.response_template(DESCRIBE_DB_PARAMETER_GROUPS_TEMPLATE) + template = self.response_template( + DESCRIBE_DB_PARAMETER_GROUPS_TEMPLATE) return template.render(db_parameter_groups=db_parameter_groups) def modify_dbparameter_group(self): @@ -347,7 +358,8 @@ class RDS2Response(BaseResponse): def describe_db_parameters(self): db_parameter_group_name = self._get_param('DBParameterGroupName') - db_parameter_groups = self.backend.describe_db_parameter_groups({'name': db_parameter_group_name}) + db_parameter_groups = self.backend.describe_db_parameter_groups( + {'name': db_parameter_group_name}) if not db_parameter_groups: raise DBParameterGroupNotFoundError(db_parameter_group_name) @@ -359,7 +371,8 @@ class RDS2Response(BaseResponse): def delete_db_parameter_group(self): kwargs = self._get_db_parameter_group_kwargs() - db_parameter_group = self.backend.delete_db_parameter_group(kwargs['name']) + db_parameter_group = self.backend.delete_db_parameter_group(kwargs[ + 'name']) template = self.response_template(DELETE_DB_PARAMETER_GROUP_TEMPLATE) return template.render(db_parameter_group=db_parameter_group) diff --git a/moto/redshift/__init__.py b/moto/redshift/__init__.py index 58be5fc70..06f778e8d 100644 --- a/moto/redshift/__init__.py +++ b/moto/redshift/__init__.py @@ -1,6 +1,6 @@ from __future__ import unicode_literals from .models import redshift_backends -from ..core.models import MockAWS, base_decorator, HttprettyMockAWS, deprecated_base_decorator +from ..core.models import base_decorator, deprecated_base_decorator redshift_backend = redshift_backends['us-east-1'] mock_redshift = base_decorator(redshift_backends) diff --git a/moto/redshift/exceptions.py b/moto/redshift/exceptions.py index 6d1b2c3bb..8bcca807e 100644 --- a/moto/redshift/exceptions.py +++ b/moto/redshift/exceptions.py @@ -5,6 +5,7 @@ from werkzeug.exceptions import BadRequest class RedshiftClientError(BadRequest): + def __init__(self, code, message): super(RedshiftClientError, self).__init__() self.description = json.dumps({ @@ -18,6 +19,7 @@ class RedshiftClientError(BadRequest): class ClusterNotFoundError(RedshiftClientError): + def __init__(self, cluster_identifier): super(ClusterNotFoundError, self).__init__( 'ClusterNotFound', @@ -25,6 +27,7 @@ class ClusterNotFoundError(RedshiftClientError): class ClusterSubnetGroupNotFoundError(RedshiftClientError): + def __init__(self, subnet_identifier): super(ClusterSubnetGroupNotFoundError, self).__init__( 'ClusterSubnetGroupNotFound', @@ -32,6 +35,7 @@ class ClusterSubnetGroupNotFoundError(RedshiftClientError): class ClusterSecurityGroupNotFoundError(RedshiftClientError): + def __init__(self, group_identifier): super(ClusterSecurityGroupNotFoundError, self).__init__( 'ClusterSecurityGroupNotFound', @@ -39,6 +43,7 @@ class ClusterSecurityGroupNotFoundError(RedshiftClientError): class ClusterParameterGroupNotFoundError(RedshiftClientError): + def __init__(self, group_identifier): super(ClusterParameterGroupNotFoundError, self).__init__( 'ClusterParameterGroupNotFound', @@ -46,6 +51,7 @@ class ClusterParameterGroupNotFoundError(RedshiftClientError): class InvalidSubnetError(RedshiftClientError): + def __init__(self, subnet_identifier): super(InvalidSubnetError, self).__init__( 'InvalidSubnet', diff --git a/moto/redshift/models.py b/moto/redshift/models.py index bd81526df..af6c6f643 100644 --- a/moto/redshift/models.py +++ b/moto/redshift/models.py @@ -13,13 +13,14 @@ from .exceptions import ( class Cluster(object): + def __init__(self, redshift_backend, cluster_identifier, node_type, master_username, - master_user_password, db_name, cluster_type, cluster_security_groups, - vpc_security_group_ids, cluster_subnet_group_name, availability_zone, - preferred_maintenance_window, cluster_parameter_group_name, - automated_snapshot_retention_period, port, cluster_version, - allow_version_upgrade, number_of_nodes, publicly_accessible, - encrypted, region): + master_user_password, db_name, cluster_type, cluster_security_groups, + vpc_security_group_ids, cluster_subnet_group_name, availability_zone, + preferred_maintenance_window, cluster_parameter_group_name, + automated_snapshot_retention_period, port, cluster_version, + allow_version_upgrade, number_of_nodes, publicly_accessible, + encrypted, region): self.redshift_backend = redshift_backend self.cluster_identifier = cluster_identifier self.node_type = node_type @@ -34,7 +35,8 @@ class Cluster(object): self.allow_version_upgrade = allow_version_upgrade if allow_version_upgrade is not None else True self.cluster_version = cluster_version if cluster_version else "1.0" self.port = int(port) if port else 5439 - self.automated_snapshot_retention_period = int(automated_snapshot_retention_period) if automated_snapshot_retention_period else 1 + self.automated_snapshot_retention_period = int( + automated_snapshot_retention_period) if automated_snapshot_retention_period else 1 self.preferred_maintenance_window = preferred_maintenance_window if preferred_maintenance_window else "Mon:03:00-Mon:03:30" if cluster_parameter_group_name: @@ -68,7 +70,8 @@ class Cluster(object): properties = cloudformation_json['Properties'] if 'ClusterSubnetGroupName' in properties: - subnet_group_name = properties['ClusterSubnetGroupName'].cluster_subnet_group_name + subnet_group_name = properties[ + 'ClusterSubnetGroupName'].cluster_subnet_group_name else: subnet_group_name = None cluster = redshift_backend.create_cluster( @@ -78,13 +81,17 @@ class Cluster(object): master_user_password=properties.get('MasterUserPassword'), db_name=properties.get('DBName'), cluster_type=properties.get('ClusterType'), - cluster_security_groups=properties.get('ClusterSecurityGroups', []), + cluster_security_groups=properties.get( + 'ClusterSecurityGroups', []), vpc_security_group_ids=properties.get('VpcSecurityGroupIds', []), cluster_subnet_group_name=subnet_group_name, availability_zone=properties.get('AvailabilityZone'), - preferred_maintenance_window=properties.get('PreferredMaintenanceWindow'), - cluster_parameter_group_name=properties.get('ClusterParameterGroupName'), - automated_snapshot_retention_period=properties.get('AutomatedSnapshotRetentionPeriod'), + preferred_maintenance_window=properties.get( + 'PreferredMaintenanceWindow'), + cluster_parameter_group_name=properties.get( + 'ClusterParameterGroupName'), + automated_snapshot_retention_period=properties.get( + 'AutomatedSnapshotRetentionPeriod'), port=properties.get('Port'), cluster_version=properties.get('ClusterVersion'), allow_version_upgrade=properties.get('AllowVersionUpgrade'), @@ -214,6 +221,7 @@ class SubnetGroup(object): class SecurityGroup(object): + def __init__(self, cluster_security_group_name, description): self.cluster_security_group_name = cluster_security_group_name self.description = description @@ -293,7 +301,8 @@ class RedshiftBackend(BaseBackend): def modify_cluster(self, **cluster_kwargs): cluster_identifier = cluster_kwargs.pop('cluster_identifier') - new_cluster_identifier = cluster_kwargs.pop('new_cluster_identifier', None) + new_cluster_identifier = cluster_kwargs.pop( + 'new_cluster_identifier', None) cluster = self.describe_clusters(cluster_identifier)[0] @@ -313,7 +322,8 @@ class RedshiftBackend(BaseBackend): raise ClusterNotFoundError(cluster_identifier) def create_cluster_subnet_group(self, cluster_subnet_group_name, description, subnet_ids): - subnet_group = SubnetGroup(self.ec2_backend, cluster_subnet_group_name, description, subnet_ids) + subnet_group = SubnetGroup( + self.ec2_backend, cluster_subnet_group_name, description, subnet_ids) self.subnet_groups[cluster_subnet_group_name] = subnet_group return subnet_group @@ -332,7 +342,8 @@ class RedshiftBackend(BaseBackend): raise ClusterSubnetGroupNotFoundError(subnet_identifier) def create_cluster_security_group(self, cluster_security_group_name, description): - security_group = SecurityGroup(cluster_security_group_name, description) + security_group = SecurityGroup( + cluster_security_group_name, description) self.security_groups[cluster_security_group_name] = security_group return security_group @@ -351,8 +362,9 @@ class RedshiftBackend(BaseBackend): raise ClusterSecurityGroupNotFoundError(security_group_identifier) def create_cluster_parameter_group(self, cluster_parameter_group_name, - group_family, description): - parameter_group = ParameterGroup(cluster_parameter_group_name, group_family, description) + group_family, description): + parameter_group = ParameterGroup( + cluster_parameter_group_name, group_family, description) self.parameter_groups[cluster_parameter_group_name] = parameter_group return parameter_group diff --git a/moto/redshift/responses.py b/moto/redshift/responses.py index a9c977b4e..23c653332 100644 --- a/moto/redshift/responses.py +++ b/moto/redshift/responses.py @@ -134,7 +134,8 @@ class RedshiftResponse(BaseResponse): def describe_cluster_subnet_groups(self): subnet_identifier = self._get_param("ClusterSubnetGroupName") - subnet_groups = self.redshift_backend.describe_cluster_subnet_groups(subnet_identifier) + subnet_groups = self.redshift_backend.describe_cluster_subnet_groups( + subnet_identifier) return json.dumps({ "DescribeClusterSubnetGroupsResponse": { @@ -160,7 +161,8 @@ class RedshiftResponse(BaseResponse): }) def create_cluster_security_group(self): - cluster_security_group_name = self._get_param('ClusterSecurityGroupName') + cluster_security_group_name = self._get_param( + 'ClusterSecurityGroupName') description = self._get_param('Description') security_group = self.redshift_backend.create_cluster_security_group( @@ -180,8 +182,10 @@ class RedshiftResponse(BaseResponse): }) def describe_cluster_security_groups(self): - cluster_security_group_name = self._get_param("ClusterSecurityGroupName") - security_groups = self.redshift_backend.describe_cluster_security_groups(cluster_security_group_name) + cluster_security_group_name = self._get_param( + "ClusterSecurityGroupName") + security_groups = self.redshift_backend.describe_cluster_security_groups( + cluster_security_group_name) return json.dumps({ "DescribeClusterSecurityGroupsResponse": { @@ -196,7 +200,8 @@ class RedshiftResponse(BaseResponse): def delete_cluster_security_group(self): security_group_identifier = self._get_param("ClusterSecurityGroupName") - self.redshift_backend.delete_cluster_security_group(security_group_identifier) + self.redshift_backend.delete_cluster_security_group( + security_group_identifier) return json.dumps({ "DeleteClusterSecurityGroupResponse": { @@ -230,7 +235,8 @@ class RedshiftResponse(BaseResponse): def describe_cluster_parameter_groups(self): cluster_parameter_group_name = self._get_param("ParameterGroupName") - parameter_groups = self.redshift_backend.describe_cluster_parameter_groups(cluster_parameter_group_name) + parameter_groups = self.redshift_backend.describe_cluster_parameter_groups( + cluster_parameter_group_name) return json.dumps({ "DescribeClusterParameterGroupsResponse": { @@ -245,7 +251,8 @@ class RedshiftResponse(BaseResponse): def delete_cluster_parameter_group(self): cluster_parameter_group_name = self._get_param("ParameterGroupName") - self.redshift_backend.delete_cluster_parameter_group(cluster_parameter_group_name) + self.redshift_backend.delete_cluster_parameter_group( + cluster_parameter_group_name) return json.dumps({ "DeleteClusterParameterGroupResponse": { diff --git a/moto/route53/models.py b/moto/route53/models.py index 6b293a1ca..338c6d30a 100644 --- a/moto/route53/models.py +++ b/moto/route53/models.py @@ -10,6 +10,7 @@ from moto.core.utils import get_random_hex class HealthCheck(object): + def __init__(self, health_check_id, health_check_args): self.id = health_check_id self.ip_address = health_check_args.get("ip_address") @@ -63,6 +64,7 @@ class HealthCheck(object): class RecordSet(object): + def __init__(self, kwargs): self.name = kwargs.get('Name') self._type = kwargs.get('Type') @@ -83,25 +85,29 @@ class RecordSet(object): if zone_name: hosted_zone = route53_backend.get_hosted_zone_by_name(zone_name) else: - hosted_zone = route53_backend.get_hosted_zone(properties["HostedZoneId"]) + hosted_zone = route53_backend.get_hosted_zone( + properties["HostedZoneId"]) record_set = hosted_zone.add_rrset(properties) return record_set @classmethod def update_from_cloudformation_json(cls, original_resource, new_resource_name, cloudformation_json, region_name): - cls.delete_from_cloudformation_json(original_resource.name, cloudformation_json, region_name) + cls.delete_from_cloudformation_json( + original_resource.name, cloudformation_json, region_name) return cls.create_from_cloudformation_json(new_resource_name, cloudformation_json, region_name) @classmethod def delete_from_cloudformation_json(cls, resource_name, cloudformation_json, region_name): - # this will break if you changed the zone the record is in, unfortunately + # this will break if you changed the zone the record is in, + # unfortunately properties = cloudformation_json['Properties'] zone_name = properties.get("HostedZoneName") if zone_name: hosted_zone = route53_backend.get_hosted_zone_by_name(zone_name) else: - hosted_zone = route53_backend.get_hosted_zone(properties["HostedZoneId"]) + hosted_zone = route53_backend.get_hosted_zone( + properties["HostedZoneId"]) try: hosted_zone.delete_rrset_by_name(resource_name) @@ -141,7 +147,8 @@ class RecordSet(object): def delete(self, *args, **kwargs): ''' Not exposed as part of the Route 53 API - used for CloudFormation. args are ignored ''' - hosted_zone = route53_backend.get_hosted_zone_by_name(self.hosted_zone_name) + hosted_zone = route53_backend.get_hosted_zone_by_name( + self.hosted_zone_name) if not hosted_zone: hosted_zone = route53_backend.get_hosted_zone(self.hosted_zone_id) hosted_zone.delete_rrset_by_name(self.name) @@ -173,17 +180,21 @@ class FakeZone(object): return new_rrset def delete_rrset_by_name(self, name): - self.rrsets = [record_set for record_set in self.rrsets if record_set.name != name] + self.rrsets = [ + record_set for record_set in self.rrsets if record_set.name != name] def delete_rrset_by_id(self, set_identifier): - self.rrsets = [record_set for record_set in self.rrsets if record_set.set_identifier != set_identifier] + self.rrsets = [ + record_set for record_set in self.rrsets if record_set.set_identifier != set_identifier] def get_record_sets(self, type_filter, name_filter): record_sets = list(self.rrsets) # Copy the list if type_filter: - record_sets = [record_set for record_set in record_sets if record_set._type == type_filter] + record_sets = [ + record_set for record_set in record_sets if record_set._type == type_filter] if name_filter: - record_sets = [record_set for record_set in record_sets if record_set.name == name_filter] + record_sets = [ + record_set for record_set in record_sets if record_set.name == name_filter] return record_sets @@ -196,11 +207,13 @@ class FakeZone(object): properties = cloudformation_json['Properties'] name = properties["Name"] - hosted_zone = route53_backend.create_hosted_zone(name, private_zone=False) + hosted_zone = route53_backend.create_hosted_zone( + name, private_zone=False) return hosted_zone class RecordSetGroup(object): + def __init__(self, hosted_zone_id, record_sets): self.hosted_zone_id = hosted_zone_id self.record_sets = record_sets @@ -232,7 +245,8 @@ class Route53Backend(BaseBackend): def create_hosted_zone(self, name, private_zone, comment=None): new_id = get_random_hex() - new_zone = FakeZone(name, new_id, private_zone=private_zone, comment=comment) + new_zone = FakeZone( + name, new_id, private_zone=private_zone, comment=comment) self.zones[new_id] = new_zone return new_zone @@ -285,4 +299,5 @@ class Route53Backend(BaseBackend): def delete_health_check(self, health_check_id): return self.health_checks.pop(health_check_id, None) + route53_backend = Route53Backend() diff --git a/moto/route53/responses.py b/moto/route53/responses.py index d796660e1..07f6e2303 100644 --- a/moto/route53/responses.py +++ b/moto/route53/responses.py @@ -8,43 +8,45 @@ import xmltodict class Route53 (BaseResponse): + def list_or_create_hostzone_response(self, request, full_url, headers): self.setup_class(request, full_url, headers) if request.method == "POST": - elements = xmltodict.parse(self.body) - if "HostedZoneConfig" in elements["CreateHostedZoneRequest"]: - comment = elements["CreateHostedZoneRequest"]["HostedZoneConfig"]["Comment"] - try: - # in boto3, this field is set directly in the xml - private_zone = elements["CreateHostedZoneRequest"]["HostedZoneConfig"]["PrivateZone"] - except KeyError: - # if a VPC subsection is only included in xmls params when private_zone=True, - # see boto: boto/route53/connection.py - private_zone = 'VPC' in elements["CreateHostedZoneRequest"] - else: - comment = None - private_zone = False + elements = xmltodict.parse(self.body) + if "HostedZoneConfig" in elements["CreateHostedZoneRequest"]: + comment = elements["CreateHostedZoneRequest"][ + "HostedZoneConfig"]["Comment"] + try: + # in boto3, this field is set directly in the xml + private_zone = elements["CreateHostedZoneRequest"][ + "HostedZoneConfig"]["PrivateZone"] + except KeyError: + # if a VPC subsection is only included in xmls params when private_zone=True, + # see boto: boto/route53/connection.py + private_zone = 'VPC' in elements["CreateHostedZoneRequest"] + else: + comment = None + private_zone = False - name = elements["CreateHostedZoneRequest"]["Name"] + name = elements["CreateHostedZoneRequest"]["Name"] - if name[-1] != ".": - name += "." + if name[-1] != ".": + name += "." - new_zone = route53_backend.create_hosted_zone( - name, - comment=comment, - private_zone=private_zone, - ) - template = Template(CREATE_HOSTED_ZONE_RESPONSE) - return 201, headers, template.render(zone=new_zone) + new_zone = route53_backend.create_hosted_zone( + name, + comment=comment, + private_zone=private_zone, + ) + template = Template(CREATE_HOSTED_ZONE_RESPONSE) + return 201, headers, template.render(zone=new_zone) elif request.method == "GET": all_zones = route53_backend.get_all_hosted_zones() template = Template(LIST_HOSTED_ZONES_RESPONSE) return 200, headers, template.render(zones=all_zones) - def get_or_delete_hostzone_response(self, request, full_url, headers): self.setup_class(request, full_url, headers) parsed_url = urlparse(full_url) @@ -61,7 +63,6 @@ class Route53 (BaseResponse): route53_backend.delete_hosted_zone(zoneid) return 200, headers, DELETE_HOSTED_ZONE_RESPONSE - def rrset_response(self, request, full_url, headers): self.setup_class(request, full_url, headers) @@ -76,18 +77,22 @@ class Route53 (BaseResponse): if method == "POST": elements = xmltodict.parse(self.body) - change_list = elements['ChangeResourceRecordSetsRequest']['ChangeBatch']['Changes']['Change'] + change_list = elements['ChangeResourceRecordSetsRequest'][ + 'ChangeBatch']['Changes']['Change'] if not isinstance(change_list, list): - change_list = [elements['ChangeResourceRecordSetsRequest']['ChangeBatch']['Changes']['Change']] + change_list = [elements['ChangeResourceRecordSetsRequest'][ + 'ChangeBatch']['Changes']['Change']] for value in change_list: action = value['Action'] record_set = value['ResourceRecordSet'] if action in ('CREATE', 'UPSERT'): if 'ResourceRecords' in record_set: - resource_records = list(record_set['ResourceRecords'].values())[0] + resource_records = list( + record_set['ResourceRecords'].values())[0] if not isinstance(resource_records, list): - # Depending on how many records there are, this may or may not be a list + # Depending on how many records there are, this may + # or may not be a list resource_records = [resource_records] record_values = [x['Value'] for x in resource_records] elif 'AliasTarget' in record_set: @@ -99,7 +104,8 @@ class Route53 (BaseResponse): the_zone.upsert_rrset(record_set) elif action == "DELETE": if 'SetIdentifier' in record_set: - the_zone.delete_rrset_by_id(record_set["SetIdentifier"]) + the_zone.delete_rrset_by_id( + record_set["SetIdentifier"]) else: the_zone.delete_rrset_by_name(record_set["Name"]) @@ -113,7 +119,6 @@ class Route53 (BaseResponse): record_sets = the_zone.get_record_sets(type_filter, name_filter) return 200, headers, template.render(record_sets=record_sets) - def health_check_response(self, request, full_url, headers): self.setup_class(request, full_url, headers) @@ -121,7 +126,8 @@ class Route53 (BaseResponse): method = request.method if method == "POST": - properties = xmltodict.parse(self.body)['CreateHealthCheckRequest']['HealthCheckConfig'] + properties = xmltodict.parse(self.body)['CreateHealthCheckRequest'][ + 'HealthCheckConfig'] health_check_args = { "ip_address": properties.get('IPAddress'), "port": properties.get('Port'), @@ -132,7 +138,8 @@ class Route53 (BaseResponse): "request_interval": properties.get('RequestInterval'), "failure_threshold": properties.get('FailureThreshold'), } - health_check = route53_backend.create_health_check(health_check_args) + health_check = route53_backend.create_health_check( + health_check_args) template = Template(CREATE_HEALTH_CHECK_RESPONSE) return 201, headers, template.render(health_check=health_check) elif method == "DELETE": @@ -152,8 +159,8 @@ class Route53 (BaseResponse): action = 'tags' elif 'trafficpolicyinstances' in full_url: action = 'policies' - raise NotImplementedError("The action for {0} has not been implemented for route 53".format(action)) - + raise NotImplementedError( + "The action for {0} has not been implemented for route 53".format(action)) def list_or_change_tags_for_resource_request(self, request, full_url, headers): self.setup_class(request, full_url, headers) @@ -182,6 +189,7 @@ class Route53 (BaseResponse): return 200, headers, template.render() + LIST_TAGS_FOR_RESOURCE_RESPONSE = """ diff --git a/moto/s3/__init__.py b/moto/s3/__init__.py index 2c54a8d5a..84c1cbde0 100644 --- a/moto/s3/__init__.py +++ b/moto/s3/__init__.py @@ -3,4 +3,4 @@ from .models import s3_backend s3_backends = {"global": s3_backend} mock_s3 = s3_backend.decorator -mock_s3_deprecated = s3_backend.deprecated_decorator \ No newline at end of file +mock_s3_deprecated = s3_backend.deprecated_decorator diff --git a/moto/s3/exceptions.py b/moto/s3/exceptions.py index 2f444e2dd..df817ba78 100644 --- a/moto/s3/exceptions.py +++ b/moto/s3/exceptions.py @@ -12,6 +12,7 @@ ERROR_WITH_KEY_NAME = """{% extends 'single_error' %} class S3ClientError(RESTError): + def __init__(self, *args, **kwargs): kwargs.setdefault('template', 'single_error') self.templates['bucket_error'] = ERROR_WITH_BUCKET_NAME @@ -19,6 +20,7 @@ class S3ClientError(RESTError): class BucketError(S3ClientError): + def __init__(self, *args, **kwargs): kwargs.setdefault('template', 'bucket_error') self.templates['bucket_error'] = ERROR_WITH_BUCKET_NAME diff --git a/moto/s3/models.py b/moto/s3/models.py index d5e156498..c7bf557ca 100644 --- a/moto/s3/models.py +++ b/moto/s3/models.py @@ -120,6 +120,7 @@ class FakeKey(object): class FakeMultipart(object): + def __init__(self, key_name, metadata): self.key_name = key_name self.metadata = metadata @@ -167,6 +168,7 @@ class FakeMultipart(object): class FakeGrantee(object): + def __init__(self, id='', uri='', display_name=''): self.id = id self.uri = uri @@ -177,9 +179,12 @@ class FakeGrantee(object): return 'Group' if self.uri else 'CanonicalUser' -ALL_USERS_GRANTEE = FakeGrantee(uri='http://acs.amazonaws.com/groups/global/AllUsers') -AUTHENTICATED_USERS_GRANTEE = FakeGrantee(uri='http://acs.amazonaws.com/groups/global/AuthenticatedUsers') -LOG_DELIVERY_GRANTEE = FakeGrantee(uri='http://acs.amazonaws.com/groups/s3/LogDelivery') +ALL_USERS_GRANTEE = FakeGrantee( + uri='http://acs.amazonaws.com/groups/global/AllUsers') +AUTHENTICATED_USERS_GRANTEE = FakeGrantee( + uri='http://acs.amazonaws.com/groups/global/AuthenticatedUsers') +LOG_DELIVERY_GRANTEE = FakeGrantee( + uri='http://acs.amazonaws.com/groups/s3/LogDelivery') PERMISSION_FULL_CONTROL = 'FULL_CONTROL' PERMISSION_WRITE = 'WRITE' @@ -189,27 +194,32 @@ PERMISSION_READ_ACP = 'READ_ACP' class FakeGrant(object): + def __init__(self, grantees, permissions): self.grantees = grantees self.permissions = permissions class FakeAcl(object): + def __init__(self, grants=[]): self.grants = grants def get_canned_acl(acl): - owner_grantee = FakeGrantee(id='75aa57f09aa0c8caeab4f8c24e99d10f8e7faeebf76c078efc7c6caea54ba06a') + owner_grantee = FakeGrantee( + id='75aa57f09aa0c8caeab4f8c24e99d10f8e7faeebf76c078efc7c6caea54ba06a') grants = [FakeGrant([owner_grantee], [PERMISSION_FULL_CONTROL])] if acl == 'private': pass # no other permissions elif acl == 'public-read': grants.append(FakeGrant([ALL_USERS_GRANTEE], [PERMISSION_READ])) elif acl == 'public-read-write': - grants.append(FakeGrant([ALL_USERS_GRANTEE], [PERMISSION_READ, PERMISSION_WRITE])) + grants.append(FakeGrant([ALL_USERS_GRANTEE], [ + PERMISSION_READ, PERMISSION_WRITE])) elif acl == 'authenticated-read': - grants.append(FakeGrant([AUTHENTICATED_USERS_GRANTEE], [PERMISSION_READ])) + grants.append( + FakeGrant([AUTHENTICATED_USERS_GRANTEE], [PERMISSION_READ])) elif acl == 'bucket-owner-read': pass # TODO: bucket owner ACL elif acl == 'bucket-owner-full-control': @@ -217,13 +227,15 @@ def get_canned_acl(acl): elif acl == 'aws-exec-read': pass # TODO: bucket owner, EC2 Read elif acl == 'log-delivery-write': - grants.append(FakeGrant([LOG_DELIVERY_GRANTEE], [PERMISSION_READ_ACP, PERMISSION_WRITE])) + grants.append(FakeGrant([LOG_DELIVERY_GRANTEE], [ + PERMISSION_READ_ACP, PERMISSION_WRITE])) else: assert False, 'Unknown canned acl: %s' % (acl,) return FakeAcl(grants=grants) class LifecycleRule(object): + def __init__(self, id=None, prefix=None, status=None, expiration_days=None, expiration_date=None, transition_days=None, transition_date=None, storage_class=None): @@ -271,7 +283,8 @@ class FakeBucket(object): expiration_date=expiration.get('Date') if expiration else None, transition_days=transition.get('Days') if transition else None, transition_date=transition.get('Date') if transition else None, - storage_class=transition['StorageClass'] if transition else None, + storage_class=transition[ + 'StorageClass'] if transition else None, )) def delete_lifecycle(self): @@ -283,9 +296,11 @@ class FakeBucket(object): def get_cfn_attribute(self, attribute_name): from moto.cloudformation.exceptions import UnformattedGetAttTemplateException if attribute_name == 'DomainName': - raise NotImplementedError('"Fn::GetAtt" : [ "{0}" , "DomainName" ]"') + raise NotImplementedError( + '"Fn::GetAtt" : [ "{0}" , "DomainName" ]"') elif attribute_name == 'WebsiteURL': - raise NotImplementedError('"Fn::GetAtt" : [ "{0}" , "WebsiteURL" ]"') + raise NotImplementedError( + '"Fn::GetAtt" : [ "{0}" , "WebsiteURL" ]"') raise UnformattedGetAttTemplateException() def set_acl(self, acl): @@ -470,20 +485,24 @@ class S3Backend(BaseBackend): key_without_prefix = key_name.replace(prefix, "", 1) if delimiter and delimiter in key_without_prefix: # If delimiter, we need to split out folder_results - key_without_delimiter = key_without_prefix.split(delimiter)[0] - folder_results.add("{0}{1}{2}".format(prefix, key_without_delimiter, delimiter)) + key_without_delimiter = key_without_prefix.split(delimiter)[ + 0] + folder_results.add("{0}{1}{2}".format( + prefix, key_without_delimiter, delimiter)) else: key_results.add(key) else: for key_name, key in bucket.keys.items(): if delimiter and delimiter in key_name: # If delimiter, we need to split out folder_results - folder_results.add(key_name.split(delimiter)[0] + delimiter) + folder_results.add(key_name.split( + delimiter)[0] + delimiter) else: key_results.add(key) key_results = sorted(key_results, key=lambda key: key.name) - folder_results = [folder_name for folder_name in sorted(folder_results, key=lambda key: key)] + folder_results = [folder_name for folder_name in sorted( + folder_results, key=lambda key: key)] return key_results, folder_results @@ -502,7 +521,8 @@ class S3Backend(BaseBackend): src_key_name = clean_key_name(src_key_name) dest_key_name = clean_key_name(dest_key_name) dest_bucket = self.get_bucket(dest_bucket_name) - key = self.get_key(src_bucket_name, src_key_name, version_id=src_version_id) + key = self.get_key(src_bucket_name, src_key_name, + version_id=src_version_id) if dest_key_name != src_key_name: key = key.copy(dest_key_name) dest_bucket.keys[dest_key_name] = key diff --git a/moto/s3/responses.py b/moto/s3/responses.py index 07be98e7b..e123d76e1 100644 --- a/moto/s3/responses.py +++ b/moto/s3/responses.py @@ -33,6 +33,7 @@ def is_delete_keys(request, path, bucket_name): class ResponseObject(_TemplateEnvironmentMixin): + def __init__(self, backend): super(ResponseObject, self).__init__() self.backend = backend @@ -70,7 +71,8 @@ class ResponseObject(_TemplateEnvironmentMixin): if match: return False - path_based = (host == 's3.amazonaws.com' or re.match(r"s3[\.\-]([^.]*)\.amazonaws\.com", host)) + path_based = (host == 's3.amazonaws.com' or re.match( + r"s3[\.\-]([^.]*)\.amazonaws\.com", host)) return not path_based def is_delete_keys(self, request, path, bucket_name): @@ -148,7 +150,8 @@ class ResponseObject(_TemplateEnvironmentMixin): elif method == 'POST': return self._bucket_response_post(request, body, bucket_name, headers) else: - raise NotImplementedError("Method {0} has not been impelemented in the S3 backend yet".format(method)) + raise NotImplementedError( + "Method {0} has not been impelemented in the S3 backend yet".format(method)) def _bucket_response_head(self, bucket_name, headers): self.backend.get_bucket(bucket_name) @@ -158,11 +161,14 @@ class ResponseObject(_TemplateEnvironmentMixin): if 'uploads' in querystring: for unsup in ('delimiter', 'max-uploads'): if unsup in querystring: - raise NotImplementedError("Listing multipart uploads with {} has not been implemented yet.".format(unsup)) - multiparts = list(self.backend.get_all_multiparts(bucket_name).values()) + raise NotImplementedError( + "Listing multipart uploads with {} has not been implemented yet.".format(unsup)) + multiparts = list( + self.backend.get_all_multiparts(bucket_name).values()) if 'prefix' in querystring: prefix = querystring.get('prefix', [None])[0] - multiparts = [upload for upload in multiparts if upload.key_name.startswith(prefix)] + multiparts = [ + upload for upload in multiparts if upload.key_name.startswith(prefix)] template = self.response_template(S3_ALL_MULTIPARTS) return template.render( bucket_name=bucket_name, @@ -175,7 +181,8 @@ class ResponseObject(_TemplateEnvironmentMixin): bucket = self.backend.get_bucket(bucket_name) if not bucket.rules: return 404, {}, "NoSuchLifecycleConfiguration" - template = self.response_template(S3_BUCKET_LIFECYCLE_CONFIGURATION) + template = self.response_template( + S3_BUCKET_LIFECYCLE_CONFIGURATION) return template.render(rules=bucket.rules) elif 'versioning' in querystring: versioning = self.backend.get_bucket_versioning(bucket_name) @@ -188,7 +195,8 @@ class ResponseObject(_TemplateEnvironmentMixin): return 404, {}, template.render(bucket_name=bucket_name) return 200, {}, policy elif 'website' in querystring: - website_configuration = self.backend.get_bucket_website_configuration(bucket_name) + website_configuration = self.backend.get_bucket_website_configuration( + bucket_name) return website_configuration elif 'acl' in querystring: bucket = self.backend.get_bucket(bucket_name) @@ -226,7 +234,8 @@ class ResponseObject(_TemplateEnvironmentMixin): bucket = self.backend.get_bucket(bucket_name) prefix = querystring.get('prefix', [None])[0] delimiter = querystring.get('delimiter', [None])[0] - result_keys, result_folders = self.backend.prefix_query(bucket, prefix, delimiter) + result_keys, result_folders = self.backend.prefix_query( + bucket, prefix, delimiter) template = self.response_template(S3_BUCKET_GET_RESPONSE) return 200, {}, template.render( bucket=bucket, @@ -242,7 +251,8 @@ class ResponseObject(_TemplateEnvironmentMixin): prefix = querystring.get('prefix', [None])[0] delimiter = querystring.get('delimiter', [None])[0] - result_keys, result_folders = self.backend.prefix_query(bucket, prefix, delimiter) + result_keys, result_folders = self.backend.prefix_query( + bucket, prefix, delimiter) fetch_owner = querystring.get('fetch-owner', [False])[0] max_keys = int(querystring.get('max-keys', [1000])[0]) @@ -308,7 +318,8 @@ class ResponseObject(_TemplateEnvironmentMixin): return "" else: try: - new_bucket = self.backend.create_bucket(bucket_name, region_name) + new_bucket = self.backend.create_bucket( + bucket_name, region_name) except BucketAlreadyExists: if region_name == DEFAULT_REGION_NAME: # us-east-1 has different behavior @@ -335,7 +346,8 @@ class ResponseObject(_TemplateEnvironmentMixin): return 204, {}, template.render(bucket=removed_bucket) else: # Tried to delete a bucket that still has keys - template = self.response_template(S3_DELETE_BUCKET_WITH_ITEMS_ERROR) + template = self.response_template( + S3_DELETE_BUCKET_WITH_ITEMS_ERROR) return 409, {}, template.render(bucket=removed_bucket) def _bucket_response_post(self, request, body, bucket_name, headers): @@ -393,7 +405,9 @@ class ResponseObject(_TemplateEnvironmentMixin): if ',' in rspec: raise NotImplementedError( "Multiple range specifiers not supported") - toint = lambda i: int(i) if i else None + + def toint(i): + return int(i) if i else None begin, end = map(toint, rspec.split('-')) if begin is not None: # byte range end = last if end is None else min(end, last) @@ -455,7 +469,8 @@ class ResponseObject(_TemplateEnvironmentMixin): elif method == 'POST': return self._key_response_post(request, body, bucket_name, query, key_name, headers) else: - raise NotImplementedError("Method {0} has not been impelemented in the S3 backend yet".format(method)) + raise NotImplementedError( + "Method {0} has not been impelemented in the S3 backend yet".format(method)) def _key_response_get(self, bucket_name, query, key_name, headers): response_headers = {} @@ -489,7 +504,8 @@ class ResponseObject(_TemplateEnvironmentMixin): if 'x-amz-copy-source' in request.headers: src = request.headers.get("x-amz-copy-source") src_bucket, src_key = src.split("/", 1) - src_range = request.headers.get('x-amz-copy-source-range', '').split("bytes=")[-1] + src_range = request.headers.get( + 'x-amz-copy-source-range', '').split("bytes=")[-1] try: start_byte, end_byte = src_range.split("-") @@ -522,7 +538,8 @@ class ResponseObject(_TemplateEnvironmentMixin): # Copy key src_key_parsed = urlparse(request.headers.get("x-amz-copy-source")) src_bucket, src_key = src_key_parsed.path.split("/", 1) - src_version_id = parse_qs(src_key_parsed.query).get('versionId', [None])[0] + src_version_id = parse_qs(src_key_parsed.query).get( + 'versionId', [None])[0] self.backend.copy_key(src_bucket, src_key, bucket_name, key_name, storage=storage_class, acl=acl, src_version_id=src_version_id) new_key = self.backend.get_key(bucket_name, key_name) @@ -557,7 +574,8 @@ class ResponseObject(_TemplateEnvironmentMixin): def _key_response_head(self, bucket_name, query, key_name, headers): response_headers = {} version_id = query.get('versionId', [None])[0] - key = self.backend.get_key(bucket_name, key_name, version_id=version_id) + key = self.backend.get_key( + bucket_name, key_name, version_id=version_id) if key: response_headers.update(key.metadata) response_headers.update(key.response_dict) @@ -585,7 +603,8 @@ class ResponseObject(_TemplateEnvironmentMixin): grantees = [] for key_and_value in value.split(","): - key, value = re.match('([^=]+)="([^"]+)"', key_and_value.strip()).groups() + key, value = re.match( + '([^=]+)="([^"]+)"', key_and_value.strip()).groups() if key.lower() == 'id': grantees.append(FakeGrantee(id=value)) else: @@ -610,7 +629,8 @@ class ResponseObject(_TemplateEnvironmentMixin): ps = minidom.parseString(body).getElementsByTagName('Part') prev = 0 for p in ps: - pn = int(p.getElementsByTagName('PartNumber')[0].firstChild.wholeText) + pn = int(p.getElementsByTagName( + 'PartNumber')[0].firstChild.wholeText) if pn <= prev: raise InvalidPartOrder() yield (pn, p.getElementsByTagName('ETag')[0].firstChild.wholeText) @@ -618,7 +638,8 @@ class ResponseObject(_TemplateEnvironmentMixin): def _key_response_post(self, request, body, bucket_name, query, key_name, headers): if body == b'' and 'uploads' in query: metadata = metadata_from_headers(request.headers) - multipart = self.backend.initiate_multipart(bucket_name, key_name, metadata) + multipart = self.backend.initiate_multipart( + bucket_name, key_name, metadata) template = self.response_template(S3_MULTIPART_INITIATE_RESPONSE) response = template.render( @@ -648,7 +669,9 @@ class ResponseObject(_TemplateEnvironmentMixin): key.restore(int(days)) return r, {}, "" else: - raise NotImplementedError("Method POST had only been implemented for multipart uploads and restore operations, so far") + raise NotImplementedError( + "Method POST had only been implemented for multipart uploads and restore operations, so far") + S3ResponseInstance = ResponseObject(s3_backend) diff --git a/moto/s3/utils.py b/moto/s3/utils.py index 8ea18c207..a121eae3a 100644 --- a/moto/s3/utils.py +++ b/moto/s3/utils.py @@ -29,7 +29,8 @@ def bucket_name_from_url(url): def metadata_from_headers(headers): metadata = {} - meta_regex = re.compile('^x-amz-meta-([a-zA-Z0-9\-_]+)$', flags=re.IGNORECASE) + meta_regex = re.compile( + '^x-amz-meta-([a-zA-Z0-9\-_]+)$', flags=re.IGNORECASE) for header, value in headers.items(): if isinstance(header, six.string_types): result = meta_regex.match(header) diff --git a/moto/server.py b/moto/server.py index 0bb4eb779..c7e7f18fb 100644 --- a/moto/server.py +++ b/moto/server.py @@ -57,11 +57,13 @@ class DomainDispatcherApplication(object): # Fall back to parsing auth header to find service # ['Credential=sdffdsa', '20170220', 'us-east-1', 'sns', 'aws4_request'] try: - _, _, region, service, _ = environ['HTTP_AUTHORIZATION'].split(",")[0].split()[1].split("/") + _, _, region, service, _ = environ['HTTP_AUTHORIZATION'].split(",")[0].split()[ + 1].split("/") except ValueError: region = 'us-east-1' service = 's3' - host = "{service}.{region}.amazonaws.com".format(service=service, region=region) + host = "{service}.{region}.amazonaws.com".format( + service=service, region=region) with self.lock: backend = self.get_backend_for_host(host) @@ -78,6 +80,7 @@ class DomainDispatcherApplication(object): class RegexConverter(BaseConverter): # http://werkzeug.pocoo.org/docs/routing/#custom-converters + def __init__(self, url_map, *items): super(RegexConverter, self).__init__(url_map) self.regex = items[0] @@ -92,7 +95,7 @@ class AWSTestHelper(FlaskClient): opts = {"Action": action_name} opts.update(kwargs) res = self.get("/?{0}".format(urlencode(opts)), - headers={"Host": "{0}.us-east-1.amazonaws.com".format(self.application.service)}) + headers={"Host": "{0}.us-east-1.amazonaws.com".format(self.application.service)}) return res.data.decode("utf-8") def action_json(self, action_name, **kwargs): @@ -166,10 +169,12 @@ def main(argv=sys.argv[1:]): args = parser.parse_args(argv) # Wrap the main application - main_app = DomainDispatcherApplication(create_backend_app, service=args.service) + main_app = DomainDispatcherApplication( + create_backend_app, service=args.service) main_app.debug = True - run_simple(args.host, args.port, main_app, threaded=True, use_reloader=args.reload) + run_simple(args.host, args.port, main_app, + threaded=True, use_reloader=args.reload) if __name__ == '__main__': diff --git a/moto/ses/__init__.py b/moto/ses/__init__.py index e105b9929..0477d2623 100644 --- a/moto/ses/__init__.py +++ b/moto/ses/__init__.py @@ -3,4 +3,4 @@ from .models import ses_backend ses_backends = {"global": ses_backend} mock_ses = ses_backend.decorator -mock_ses_deprecated = ses_backend.deprecated_decorator \ No newline at end of file +mock_ses_deprecated = ses_backend.deprecated_decorator diff --git a/moto/ses/models.py b/moto/ses/models.py index 6950ead5b..3502d6bc7 100644 --- a/moto/ses/models.py +++ b/moto/ses/models.py @@ -11,16 +11,19 @@ RECIPIENT_LIMIT = 50 class Message(object): + def __init__(self, message_id): self.id = message_id class RawMessage(object): + def __init__(self, message_id): self.id = message_id class SESQuota(object): + def __init__(self, sent): self.sent = sent @@ -30,6 +33,7 @@ class SESQuota(object): class SESBackend(BaseBackend): + def __init__(self): self.addresses = [] self.domains = [] @@ -97,4 +101,5 @@ class SESBackend(BaseBackend): def get_send_quota(self): return SESQuota(self.sent_message_count) + ses_backend = SESBackend() diff --git a/moto/sns/__init__.py b/moto/sns/__init__.py index a50911e3b..bd36cb23d 100644 --- a/moto/sns/__init__.py +++ b/moto/sns/__init__.py @@ -1,6 +1,6 @@ from __future__ import unicode_literals from .models import sns_backends -from ..core.models import MockAWS, base_decorator, HttprettyMockAWS, deprecated_base_decorator +from ..core.models import base_decorator, deprecated_base_decorator sns_backend = sns_backends['us-east-1'] mock_sns = base_decorator(sns_backends) diff --git a/moto/sns/models.py b/moto/sns/models.py index d924b1e5d..0ad00928d 100644 --- a/moto/sns/models.py +++ b/moto/sns/models.py @@ -20,6 +20,7 @@ DEFAULT_PAGE_SIZE = 100 class Topic(object): + def __init__(self, name, sns_backend): self.name = name self.sns_backend = sns_backend @@ -28,7 +29,8 @@ class Topic(object): self.policy = DEFAULT_TOPIC_POLICY self.delivery_policy = "" self.effective_delivery_policy = DEFAULT_EFFECTIVE_DELIVERY_POLICY - self.arn = make_arn_for_topic(self.account_id, name, sns_backend.region_name) + self.arn = make_arn_for_topic( + self.account_id, name, sns_backend.region_name) self.subscriptions_pending = 0 self.subscriptions_confimed = 0 @@ -60,11 +62,13 @@ class Topic(object): properties.get("TopicName") ) for subscription in properties.get("Subscription", []): - sns_backend.subscribe(topic.arn, subscription['Endpoint'], subscription['Protocol']) + sns_backend.subscribe(topic.arn, subscription[ + 'Endpoint'], subscription['Protocol']) return topic class Subscription(object): + def __init__(self, topic, endpoint, protocol): self.topic = topic self.endpoint = endpoint @@ -96,6 +100,7 @@ class Subscription(object): class PlatformApplication(object): + def __init__(self, region, name, platform, attributes): self.region = region self.name = name @@ -112,6 +117,7 @@ class PlatformApplication(object): class PlatformEndpoint(object): + def __init__(self, region, application, custom_user_data, token, attributes): self.region = region self.application = application @@ -125,9 +131,9 @@ class PlatformEndpoint(object): def __fixup_attributes(self): # When AWS returns the attributes dict, it always contains these two elements, so we need to # automatically ensure they exist as well. - if not 'Token' in self.attributes: + if 'Token' not in self.attributes: self.attributes['Token'] = self.token - if not 'Enabled' in self.attributes: + if 'Enabled' not in self.attributes: self.attributes['Enabled'] = True @property @@ -147,6 +153,7 @@ class PlatformEndpoint(object): class SNSBackend(BaseBackend): + def __init__(self, region_name): super(SNSBackend, self).__init__() self.topics = OrderedDict() @@ -169,7 +176,8 @@ class SNSBackend(BaseBackend): if next_token is None: next_token = 0 next_token = int(next_token) - values = list(values_map.values())[next_token: next_token + DEFAULT_PAGE_SIZE] + values = list(values_map.values())[ + next_token: next_token + DEFAULT_PAGE_SIZE] if len(values) == DEFAULT_PAGE_SIZE: next_token = next_token + DEFAULT_PAGE_SIZE else: @@ -204,7 +212,8 @@ class SNSBackend(BaseBackend): def list_subscriptions(self, topic_arn=None, next_token=None): if topic_arn: topic = self.get_topic(topic_arn) - filtered = OrderedDict([(k, sub) for k, sub in self.subscriptions.items() if sub.topic == topic]) + filtered = OrderedDict( + [(k, sub) for k, sub in self.subscriptions.items() if sub.topic == topic]) return self._get_values_nexttoken(filtered, next_token) else: return self._get_values_nexttoken(self.subscriptions, next_token) @@ -227,7 +236,8 @@ class SNSBackend(BaseBackend): try: return self.applications[arn] except KeyError: - raise SNSNotFoundError("Application with arn {0} not found".format(arn)) + raise SNSNotFoundError( + "Application with arn {0} not found".format(arn)) def set_application_attributes(self, arn, attributes): application = self.get_application(arn) @@ -241,7 +251,8 @@ class SNSBackend(BaseBackend): self.applications.pop(platform_arn) def create_platform_endpoint(self, region, application, custom_user_data, token, attributes): - platform_endpoint = PlatformEndpoint(region, application, custom_user_data, token, attributes) + platform_endpoint = PlatformEndpoint( + region, application, custom_user_data, token, attributes) self.platform_endpoints[platform_endpoint.arn] = platform_endpoint return platform_endpoint @@ -256,7 +267,8 @@ class SNSBackend(BaseBackend): try: return self.platform_endpoints[arn] except KeyError: - raise SNSNotFoundError("Endpoint with arn {0} not found".format(arn)) + raise SNSNotFoundError( + "Endpoint with arn {0} not found".format(arn)) def set_endpoint_attributes(self, arn, attributes): endpoint = self.get_endpoint(arn) @@ -267,7 +279,8 @@ class SNSBackend(BaseBackend): try: del self.platform_endpoints[arn] except KeyError: - raise SNSNotFoundError("Endpoint with arn {0} not found".format(arn)) + raise SNSNotFoundError( + "Endpoint with arn {0} not found".format(arn)) sns_backends = {} diff --git a/moto/sns/responses.py b/moto/sns/responses.py index 9a20dbcb5..edb82e40c 100644 --- a/moto/sns/responses.py +++ b/moto/sns/responses.py @@ -109,7 +109,8 @@ class SNSResponse(BaseResponse): attribute_name = self._get_param('AttributeName') attribute_name = camelcase_to_underscores(attribute_name) attribute_value = self._get_param('AttributeValue') - self.backend.set_topic_attribute(topic_arn, attribute_name, attribute_value) + self.backend.set_topic_attribute( + topic_arn, attribute_name, attribute_value) if self.request_json: return json.dumps({ @@ -162,7 +163,8 @@ class SNSResponse(BaseResponse): def list_subscriptions(self): next_token = self._get_param('NextToken') - subscriptions, next_token = self.backend.list_subscriptions(next_token=next_token) + subscriptions, next_token = self.backend.list_subscriptions( + next_token=next_token) if self.request_json: return json.dumps({ @@ -190,7 +192,8 @@ class SNSResponse(BaseResponse): def list_subscriptions_by_topic(self): topic_arn = self._get_param('TopicArn') next_token = self._get_param('NextToken') - subscriptions, next_token = self.backend.list_subscriptions(topic_arn, next_token=next_token) + subscriptions, next_token = self.backend.list_subscriptions( + topic_arn, next_token=next_token) if self.request_json: return json.dumps({ @@ -241,7 +244,8 @@ class SNSResponse(BaseResponse): name = self._get_param('Name') platform = self._get_param('Platform') attributes = self._get_attributes() - platform_application = self.backend.create_platform_application(self.region, name, platform, attributes) + platform_application = self.backend.create_platform_application( + self.region, name, platform, attributes) if self.request_json: return json.dumps({ @@ -274,7 +278,8 @@ class SNSResponse(BaseResponse): } }) - template = self.response_template(GET_PLATFORM_APPLICATION_ATTRIBUTES_TEMPLATE) + template = self.response_template( + GET_PLATFORM_APPLICATION_ATTRIBUTES_TEMPLATE) return template.render(application=application) def set_platform_application_attributes(self): @@ -292,7 +297,8 @@ class SNSResponse(BaseResponse): } }) - template = self.response_template(SET_PLATFORM_APPLICATION_ATTRIBUTES_TEMPLATE) + template = self.response_template( + SET_PLATFORM_APPLICATION_ATTRIBUTES_TEMPLATE) return template.render() def list_platform_applications(self): @@ -361,7 +367,8 @@ class SNSResponse(BaseResponse): def list_endpoints_by_platform_application(self): application_arn = self._get_param('PlatformApplicationArn') - endpoints = self.backend.list_endpoints_by_platform_application(application_arn) + endpoints = self.backend.list_endpoints_by_platform_application( + application_arn) if self.request_json: return json.dumps({ @@ -381,7 +388,8 @@ class SNSResponse(BaseResponse): } }) - template = self.response_template(LIST_ENDPOINTS_BY_PLATFORM_APPLICATION_TEMPLATE) + template = self.response_template( + LIST_ENDPOINTS_BY_PLATFORM_APPLICATION_TEMPLATE) return template.render(endpoints=endpoints) def get_endpoint_attributes(self): @@ -438,7 +446,6 @@ class SNSResponse(BaseResponse): return template.render() - CREATE_TOPIC_TEMPLATE = """ {{ topic.arn }} diff --git a/moto/sqs/__init__.py b/moto/sqs/__init__.py index 946ba8f47..46c83133f 100644 --- a/moto/sqs/__init__.py +++ b/moto/sqs/__init__.py @@ -1,6 +1,6 @@ from __future__ import unicode_literals from .models import sqs_backends -from ..core.models import MockAWS, base_decorator, HttprettyMockAWS, deprecated_base_decorator +from ..core.models import base_decorator, deprecated_base_decorator sqs_backend = sqs_backends['us-east-1'] mock_sqs = base_decorator(sqs_backends) diff --git a/moto/sqs/models.py b/moto/sqs/models.py index 13b8c34b6..5f4833772 100644 --- a/moto/sqs/models.py +++ b/moto/sqs/models.py @@ -1,7 +1,6 @@ from __future__ import unicode_literals import hashlib -import time import re from xml.sax.saxutils import escape @@ -18,7 +17,9 @@ from .exceptions import ( DEFAULT_ACCOUNT_ID = 123456789012 DEFAULT_SENDER_ID = "AIDAIT2UOQQY3AUEKVGXU" + class Message(object): + def __init__(self, message_id, body): self.id = message_id self._body = body @@ -122,7 +123,8 @@ class Queue(object): self.last_modified_timestamp = now self.maximum_message_size = 64 << 10 self.message_retention_period = 86400 * 4 # four days - self.queue_arn = 'arn:aws:sqs:{0}:123456789012:{1}'.format(self.region, self.name) + self.queue_arn = 'arn:aws:sqs:{0}:123456789012:{1}'.format( + self.region, self.name) self.receive_message_wait_time_seconds = 0 @classmethod @@ -177,7 +179,8 @@ class Queue(object): def attributes(self): result = {} for attribute in self.camelcase_attributes: - result[attribute] = getattr(self, camelcase_to_underscores(attribute)) + result[attribute] = getattr( + self, camelcase_to_underscores(attribute)) return result @property @@ -201,6 +204,7 @@ class Queue(object): class SQSBackend(BaseBackend): + def __init__(self, region_name): self.region_name = region_name self.queues = {} @@ -214,7 +218,8 @@ class SQSBackend(BaseBackend): def create_queue(self, name, visibility_timeout, wait_time_seconds): queue = self.queues.get(name) if queue is None: - queue = Queue(name, visibility_timeout, wait_time_seconds, self.region_name) + queue = Queue(name, visibility_timeout, + wait_time_seconds, self.region_name) self.queues[name] = queue return queue diff --git a/moto/sqs/responses.py b/moto/sqs/responses.py index d57ec3430..84886068e 100644 --- a/moto/sqs/responses.py +++ b/moto/sqs/responses.py @@ -27,7 +27,8 @@ class SQSResponse(BaseResponse): @property def attribute(self): if not hasattr(self, '_attribute'): - self._attribute = dict([(a['name'], a['value']) for a in self._get_list_prefix('Attribute')]) + self._attribute = dict([(a['name'], a['value']) + for a in self._get_list_prefix('Attribute')]) return self._attribute def _get_queue_name(self): @@ -59,7 +60,7 @@ class SQSResponse(BaseResponse): def create_queue(self): queue_name = self.querystring.get("QueueName")[0] queue = self.sqs_backend.create_queue(queue_name, visibility_timeout=self.attribute.get('VisibilityTimeout'), - wait_time_seconds=self.attribute.get('WaitTimeSeconds')) + wait_time_seconds=self.attribute.get('WaitTimeSeconds')) template = self.response_template(CREATE_QUEUE_RESPONSE) return template.render(queue=queue) @@ -108,7 +109,8 @@ class SQSResponse(BaseResponse): def set_queue_attributes(self): queue_name = self._get_queue_name() if "Attribute.Name" in self.querystring: - key = camelcase_to_underscores(self.querystring.get("Attribute.Name")[0]) + key = camelcase_to_underscores( + self.querystring.get("Attribute.Name")[0]) value = self.querystring.get("Attribute.Value")[0] self.sqs_backend.set_queue_attribute(queue_name, key, value) for a in self._get_list_prefix("Attribute"): @@ -171,20 +173,25 @@ class SQSResponse(BaseResponse): messages = [] for index in range(1, 11): # Loop through looking for messages - message_key = 'SendMessageBatchRequestEntry.{0}.MessageBody'.format(index) + message_key = 'SendMessageBatchRequestEntry.{0}.MessageBody'.format( + index) message_body = self.querystring.get(message_key) if not message_body: # Found all messages break - message_user_id_key = 'SendMessageBatchRequestEntry.{0}.Id'.format(index) + message_user_id_key = 'SendMessageBatchRequestEntry.{0}.Id'.format( + index) message_user_id = self.querystring.get(message_user_id_key)[0] - delay_key = 'SendMessageBatchRequestEntry.{0}.DelaySeconds'.format(index) + delay_key = 'SendMessageBatchRequestEntry.{0}.DelaySeconds'.format( + index) delay_seconds = self.querystring.get(delay_key, [None])[0] - message = self.sqs_backend.send_message(queue_name, message_body[0], delay_seconds=delay_seconds) + message = self.sqs_backend.send_message( + queue_name, message_body[0], delay_seconds=delay_seconds) message.user_id = message_user_id - message_attributes = parse_message_attributes(self.querystring, base='SendMessageBatchRequestEntry.{0}.'.format(index)) + message_attributes = parse_message_attributes( + self.querystring, base='SendMessageBatchRequestEntry.{0}.'.format(index)) if type(message_attributes) == tuple: return message_attributes[0], message_attributes[1] message.message_attributes = message_attributes @@ -216,7 +223,8 @@ class SQSResponse(BaseResponse): message_ids = [] for index in range(1, 11): # Loop through looking for messages - receipt_key = 'DeleteMessageBatchRequestEntry.{0}.ReceiptHandle'.format(index) + receipt_key = 'DeleteMessageBatchRequestEntry.{0}.ReceiptHandle'.format( + index) receipt_handle = self.querystring.get(receipt_key) if not receipt_handle: # Found all messages @@ -224,7 +232,8 @@ class SQSResponse(BaseResponse): self.sqs_backend.delete_message(queue_name, receipt_handle[0]) - message_user_id_key = 'DeleteMessageBatchRequestEntry.{0}.Id'.format(index) + message_user_id_key = 'DeleteMessageBatchRequestEntry.{0}.Id'.format( + index) message_user_id = self.querystring.get(message_user_id_key)[0] message_ids.append(message_user_id) @@ -258,7 +267,8 @@ class SQSResponse(BaseResponse): except ValueError: return ERROR_MAX_VISIBILITY_TIMEOUT_RESPONSE, dict(status=400) - messages = self.sqs_backend.receive_messages(queue_name, message_count, wait_time, visibility_timeout) + messages = self.sqs_backend.receive_messages( + queue_name, message_count, wait_time, visibility_timeout) template = self.response_template(RECEIVE_MESSAGE_RESPONSE) output = template.render(messages=messages) return output @@ -444,7 +454,8 @@ ERROR_TOO_LONG_RESPONSE = """ diff --git a/moto/sqs/utils.py b/moto/sqs/utils.py index a00ec1c79..78be5f629 100644 --- a/moto/sqs/utils.py +++ b/moto/sqs/utils.py @@ -22,25 +22,32 @@ def parse_message_attributes(querystring, base='', value_namespace='Value.'): # Found all attributes break - data_type_key = base + 'MessageAttribute.{0}.{1}DataType'.format(index, value_namespace) + data_type_key = base + \ + 'MessageAttribute.{0}.{1}DataType'.format(index, value_namespace) data_type = querystring.get(data_type_key) if not data_type: - raise MessageAttributesInvalid("The message attribute '{0}' must contain non-empty message attribute value.".format(name[0])) + raise MessageAttributesInvalid( + "The message attribute '{0}' must contain non-empty message attribute value.".format(name[0])) data_type_parts = data_type[0].split('.') if len(data_type_parts) > 2 or data_type_parts[0] not in ['String', 'Binary', 'Number']: - raise MessageAttributesInvalid("The message attribute '{0}' has an invalid message attribute type, the set of supported type prefixes is Binary, Number, and String.".format(name[0])) + raise MessageAttributesInvalid( + "The message attribute '{0}' has an invalid message attribute type, the set of supported type prefixes is Binary, Number, and String.".format(name[0])) type_prefix = 'String' if data_type_parts[0] == 'Binary': type_prefix = 'Binary' - value_key = base + 'MessageAttribute.{0}.{1}{2}Value'.format(index, value_namespace, type_prefix) + value_key = base + \ + 'MessageAttribute.{0}.{1}{2}Value'.format( + index, value_namespace, type_prefix) value = querystring.get(value_key) if not value: - raise MessageAttributesInvalid("The message attribute '{0}' must contain non-empty message attribute value for message attribute type '{1}'.".format(name[0], data_type[0])) + raise MessageAttributesInvalid( + "The message attribute '{0}' must contain non-empty message attribute value for message attribute type '{1}'.".format(name[0], data_type[0])) - message_attributes[name[0]] = {'data_type': data_type[0], type_prefix.lower() + '_value': value[0]} + message_attributes[name[0]] = {'data_type': data_type[ + 0], type_prefix.lower() + '_value': value[0]} index += 1 diff --git a/moto/sts/models.py b/moto/sts/models.py index 9ce629c91..f1c6401d2 100644 --- a/moto/sts/models.py +++ b/moto/sts/models.py @@ -5,6 +5,7 @@ from moto.core.utils import iso_8601_datetime_with_milliseconds class Token(object): + def __init__(self, duration, name=None, policy=None): now = datetime.datetime.utcnow() self.expiration = now + datetime.timedelta(seconds=duration) @@ -17,6 +18,7 @@ class Token(object): class AssumedRole(object): + def __init__(self, role_session_name, role_arn, policy, duration, external_id): self.session_name = role_session_name self.arn = role_arn @@ -31,6 +33,7 @@ class AssumedRole(object): class STSBackend(BaseBackend): + def get_session_token(self, duration): token = Token(duration=duration) return token @@ -43,4 +46,5 @@ class STSBackend(BaseBackend): role = AssumedRole(**kwargs) return role + sts_backend = STSBackend() diff --git a/moto/sts/responses.py b/moto/sts/responses.py index d721bfaaa..a5abb6b81 100644 --- a/moto/sts/responses.py +++ b/moto/sts/responses.py @@ -43,6 +43,7 @@ class TokenResponse(BaseResponse): template = self.response_template(GET_CALLER_IDENTITY_RESPONSE) return template.render() + GET_SESSION_TOKEN_RESPONSE = """ diff --git a/moto/swf/__init__.py b/moto/swf/__init__.py index 5ac59fbb6..0d626690a 100644 --- a/moto/swf/__init__.py +++ b/moto/swf/__init__.py @@ -1,6 +1,6 @@ from __future__ import unicode_literals from .models import swf_backends -from ..core.models import MockAWS, base_decorator, HttprettyMockAWS, deprecated_base_decorator +from ..core.models import base_decorator, deprecated_base_decorator swf_backend = swf_backends['us-east-1'] mock_swf = base_decorator(swf_backends) diff --git a/moto/swf/exceptions.py b/moto/swf/exceptions.py index 8bc5c0c9a..232b1f237 100644 --- a/moto/swf/exceptions.py +++ b/moto/swf/exceptions.py @@ -8,6 +8,7 @@ class SWFClientError(JsonRESTError): class SWFUnknownResourceFault(SWFClientError): + def __init__(self, resource_type, resource_name=None): if resource_name: message = "Unknown {0}: {1}".format(resource_type, resource_name) @@ -20,6 +21,7 @@ class SWFUnknownResourceFault(SWFClientError): class SWFDomainAlreadyExistsFault(SWFClientError): + def __init__(self, domain_name): super(SWFDomainAlreadyExistsFault, self).__init__( "com.amazonaws.swf.base.model#DomainAlreadyExistsFault", @@ -28,6 +30,7 @@ class SWFDomainAlreadyExistsFault(SWFClientError): class SWFDomainDeprecatedFault(SWFClientError): + def __init__(self, domain_name): super(SWFDomainDeprecatedFault, self).__init__( "com.amazonaws.swf.base.model#DomainDeprecatedFault", @@ -36,9 +39,11 @@ class SWFDomainDeprecatedFault(SWFClientError): class SWFSerializationException(SWFClientError): + def __init__(self, value): message = "class java.lang.Foo can not be converted to an String " - message += " (not a real SWF exception ; happened on: {0})".format(value) + message += " (not a real SWF exception ; happened on: {0})".format( + value) __type = "com.amazonaws.swf.base.model#SerializationException" super(SWFSerializationException, self).__init__( __type, @@ -47,22 +52,27 @@ class SWFSerializationException(SWFClientError): class SWFTypeAlreadyExistsFault(SWFClientError): + def __init__(self, _type): super(SWFTypeAlreadyExistsFault, self).__init__( "com.amazonaws.swf.base.model#TypeAlreadyExistsFault", - "{0}=[name={1}, version={2}]".format(_type.__class__.__name__, _type.name, _type.version), + "{0}=[name={1}, version={2}]".format( + _type.__class__.__name__, _type.name, _type.version), ) class SWFTypeDeprecatedFault(SWFClientError): + def __init__(self, _type): super(SWFTypeDeprecatedFault, self).__init__( "com.amazonaws.swf.base.model#TypeDeprecatedFault", - "{0}=[name={1}, version={2}]".format(_type.__class__.__name__, _type.name, _type.version), + "{0}=[name={1}, version={2}]".format( + _type.__class__.__name__, _type.name, _type.version), ) class SWFWorkflowExecutionAlreadyStartedFault(SWFClientError): + def __init__(self): super(SWFWorkflowExecutionAlreadyStartedFault, self).__init__( "com.amazonaws.swf.base.model#WorkflowExecutionAlreadyStartedFault", @@ -71,6 +81,7 @@ class SWFWorkflowExecutionAlreadyStartedFault(SWFClientError): class SWFDefaultUndefinedFault(SWFClientError): + def __init__(self, key): # TODO: move that into moto.core.utils maybe? words = key.split("_") @@ -84,6 +95,7 @@ class SWFDefaultUndefinedFault(SWFClientError): class SWFValidationException(SWFClientError): + def __init__(self, message): super(SWFValidationException, self).__init__( "com.amazon.coral.validate#ValidationException", @@ -92,6 +104,7 @@ class SWFValidationException(SWFClientError): class SWFDecisionValidationException(SWFClientError): + def __init__(self, problems): # messages messages = [] @@ -109,7 +122,8 @@ class SWFDecisionValidationException(SWFClientError): ) else: raise ValueError( - "Unhandled decision constraint type: {0}".format(pb["type"]) + "Unhandled decision constraint type: {0}".format(pb[ + "type"]) ) # prefix count = len(problems) @@ -124,5 +138,6 @@ class SWFDecisionValidationException(SWFClientError): class SWFWorkflowExecutionClosedError(Exception): + def __str__(self): return repr("Cannot change this object because the WorkflowExecution is closed") diff --git a/moto/swf/models/__init__.py b/moto/swf/models/__init__.py index 61fe5f52a..833596a23 100644 --- a/moto/swf/models/__init__.py +++ b/moto/swf/models/__init__.py @@ -12,15 +12,15 @@ from ..exceptions import ( SWFTypeDeprecatedFault, SWFValidationException, ) -from .activity_task import ActivityTask -from .activity_type import ActivityType -from .decision_task import DecisionTask -from .domain import Domain -from .generic_type import GenericType -from .history_event import HistoryEvent -from .timeout import Timeout -from .workflow_type import WorkflowType -from .workflow_execution import WorkflowExecution +from .activity_task import ActivityTask # flake8: noqa +from .activity_type import ActivityType # flake8: noqa +from .decision_task import DecisionTask # flake8: noqa +from .domain import Domain # flake8: noqa +from .generic_type import GenericType # flake8: noqa +from .history_event import HistoryEvent # flake8: noqa +from .timeout import Timeout # flake8: noqa +from .workflow_type import WorkflowType # flake8: noqa +from .workflow_execution import WorkflowExecution # flake8: noqa KNOWN_SWF_TYPES = { @@ -30,6 +30,7 @@ KNOWN_SWF_TYPES = { class SWFBackend(BaseBackend): + def __init__(self, region_name): self.region_name = region_name self.domains = [] @@ -246,7 +247,8 @@ class SWFBackend(BaseBackend): if decision_task.state != "STARTED": if decision_task.state == "COMPLETED": raise SWFUnknownResourceFault( - "decision task, scheduledEventId = {0}".format(decision_task.scheduled_event_id) + "decision task, scheduledEventId = {0}".format( + decision_task.scheduled_event_id) ) else: raise ValueError( @@ -300,7 +302,8 @@ class SWFBackend(BaseBackend): count = 0 for _task_list, tasks in domain.activity_task_lists.items(): if _task_list == task_list: - pending = [t for t in tasks if t.state in ["SCHEDULED", "STARTED"]] + pending = [t for t in tasks if t.state in [ + "SCHEDULED", "STARTED"]] count += len(pending) return count @@ -330,7 +333,8 @@ class SWFBackend(BaseBackend): if activity_task.state != "STARTED": if activity_task.state == "COMPLETED": raise SWFUnknownResourceFault( - "activity, scheduledEventId = {0}".format(activity_task.scheduled_event_id) + "activity, scheduledEventId = {0}".format( + activity_task.scheduled_event_id) ) else: raise ValueError( @@ -354,15 +358,18 @@ class SWFBackend(BaseBackend): self._process_timeouts() activity_task = self._find_activity_task_from_token(task_token) wfe = activity_task.workflow_execution - wfe.fail_activity_task(activity_task.task_token, reason=reason, details=details) + wfe.fail_activity_task(activity_task.task_token, + reason=reason, details=details) def terminate_workflow_execution(self, domain_name, workflow_id, child_policy=None, details=None, reason=None, run_id=None): # process timeouts on all objects self._process_timeouts() domain = self._get_domain(domain_name) - wfe = domain.get_workflow_execution(workflow_id, run_id=run_id, raise_if_closed=True) - wfe.terminate(child_policy=child_policy, details=details, reason=reason) + wfe = domain.get_workflow_execution( + workflow_id, run_id=run_id, raise_if_closed=True) + wfe.terminate(child_policy=child_policy, + details=details, reason=reason) def record_activity_task_heartbeat(self, task_token, details=None): # process timeouts on all objects diff --git a/moto/swf/models/activity_task.py b/moto/swf/models/activity_task.py index eb361d258..e205cc07a 100644 --- a/moto/swf/models/activity_task.py +++ b/moto/swf/models/activity_task.py @@ -9,6 +9,7 @@ from .timeout import Timeout class ActivityTask(object): + def __init__(self, activity_id, activity_type, scheduled_event_id, workflow_execution, timeouts, input=None): self.activity_id = activity_id diff --git a/moto/swf/models/activity_type.py b/moto/swf/models/activity_type.py index 95a83ca7a..eb1bbfa68 100644 --- a/moto/swf/models/activity_type.py +++ b/moto/swf/models/activity_type.py @@ -2,6 +2,7 @@ from .generic_type import GenericType class ActivityType(GenericType): + @property def _configuration_keys(self): return [ diff --git a/moto/swf/models/decision_task.py b/moto/swf/models/decision_task.py index bcd28f372..13bddfd7a 100644 --- a/moto/swf/models/decision_task.py +++ b/moto/swf/models/decision_task.py @@ -9,6 +9,7 @@ from .timeout import Timeout class DecisionTask(object): + def __init__(self, workflow_execution, scheduled_event_id): self.workflow_execution = workflow_execution self.workflow_type = workflow_execution.workflow_type @@ -60,7 +61,8 @@ class DecisionTask(object): if not self.started or not self.workflow_execution.open: return None # TODO: handle the "NONE" case - start_to_close_at = self.started_timestamp + int(self.start_to_close_timeout) + start_to_close_at = self.started_timestamp + \ + int(self.start_to_close_timeout) _timeout = Timeout(self, start_to_close_at, "START_TO_CLOSE") if _timeout.reached: return _timeout diff --git a/moto/swf/models/domain.py b/moto/swf/models/domain.py index 4efdc3150..ed7154067 100644 --- a/moto/swf/models/domain.py +++ b/moto/swf/models/domain.py @@ -8,6 +8,7 @@ from ..exceptions import ( class Domain(object): + def __init__(self, name, retention, description=None): self.name = name self.retention = retention diff --git a/moto/swf/models/generic_type.py b/moto/swf/models/generic_type.py index 7c8389fbe..2ae98bb53 100644 --- a/moto/swf/models/generic_type.py +++ b/moto/swf/models/generic_type.py @@ -4,6 +4,7 @@ from moto.core.utils import camelcase_to_underscores class GenericType(object): + def __init__(self, name, version, **kwargs): self.name = name self.version = version diff --git a/moto/swf/models/history_event.py b/moto/swf/models/history_event.py index b181297f7..e841ca38e 100644 --- a/moto/swf/models/history_event.py +++ b/moto/swf/models/history_event.py @@ -28,10 +28,12 @@ SUPPORTED_HISTORY_EVENT_TYPES = ( class HistoryEvent(object): + def __init__(self, event_id, event_type, event_timestamp=None, **kwargs): if event_type not in SUPPORTED_HISTORY_EVENT_TYPES: raise NotImplementedError( - "HistoryEvent does not implement attributes for type '{0}'".format(event_type) + "HistoryEvent does not implement attributes for type '{0}'".format( + event_type) ) self.event_id = event_id self.event_type = event_type diff --git a/moto/swf/models/timeout.py b/moto/swf/models/timeout.py index cf0283760..09e0f6772 100644 --- a/moto/swf/models/timeout.py +++ b/moto/swf/models/timeout.py @@ -2,6 +2,7 @@ from moto.core.utils import unix_time class Timeout(object): + def __init__(self, obj, timestamp, kind): self.obj = obj self.timestamp = timestamp diff --git a/moto/swf/models/workflow_execution.py b/moto/swf/models/workflow_execution.py index a30c2e18d..8b8acda4e 100644 --- a/moto/swf/models/workflow_execution.py +++ b/moto/swf/models/workflow_execution.py @@ -64,9 +64,12 @@ class WorkflowExecution(object): # NB: the order follows boto/SWF order of exceptions appearance (if no # param is set, # SWF will raise DefaultUndefinedFault errors in the # same order as the few lines that follow) - self._set_from_kwargs_or_workflow_type(kwargs, "execution_start_to_close_timeout") - self._set_from_kwargs_or_workflow_type(kwargs, "task_list", "task_list") - self._set_from_kwargs_or_workflow_type(kwargs, "task_start_to_close_timeout") + self._set_from_kwargs_or_workflow_type( + kwargs, "execution_start_to_close_timeout") + self._set_from_kwargs_or_workflow_type( + kwargs, "task_list", "task_list") + self._set_from_kwargs_or_workflow_type( + kwargs, "task_start_to_close_timeout") self._set_from_kwargs_or_workflow_type(kwargs, "child_policy") self.input = kwargs.get("input") # counters @@ -368,13 +371,16 @@ class WorkflowExecution(object): # check decision types mandatory attributes # NB: the real SWF service seems to check attributes even for attributes list # that are not in line with the decisionType, so we do the same - attrs_to_check = [d for d in dcs.keys() if d.endswith("DecisionAttributes")] + attrs_to_check = [ + d for d in dcs.keys() if d.endswith("DecisionAttributes")] if dcs["decisionType"] in self.KNOWN_DECISION_TYPES: decision_type = dcs["decisionType"] - decision_attr = "{0}DecisionAttributes".format(decapitalize(decision_type)) + decision_attr = "{0}DecisionAttributes".format( + decapitalize(decision_type)) attrs_to_check.append(decision_attr) for attr in attrs_to_check: - problems += self._check_decision_attributes(attr, dcs.get(attr, {}), decision_number) + problems += self._check_decision_attributes( + attr, dcs.get(attr, {}), decision_number) # check decision type is correct if dcs["decisionType"] not in self.KNOWN_DECISION_TYPES: problems.append({ @@ -396,12 +402,14 @@ class WorkflowExecution(object): # handle each decision separately, in order for decision in decisions: decision_type = decision["decisionType"] - attributes_key = "{0}DecisionAttributes".format(decapitalize(decision_type)) + attributes_key = "{0}DecisionAttributes".format( + decapitalize(decision_type)) attributes = decision.get(attributes_key, {}) if decision_type == "CompleteWorkflowExecution": self.complete(event_id, attributes.get("result")) elif decision_type == "FailWorkflowExecution": - self.fail(event_id, attributes.get("details"), attributes.get("reason")) + self.fail(event_id, attributes.get( + "details"), attributes.get("reason")) elif decision_type == "ScheduleActivityTask": self.schedule_activity_task(event_id, attributes) else: @@ -415,7 +423,8 @@ class WorkflowExecution(object): # TODO: implement Decision type: SignalExternalWorkflowExecution # TODO: implement Decision type: StartChildWorkflowExecution # TODO: implement Decision type: StartTimer - raise NotImplementedError("Cannot handle decision: {0}".format(decision_type)) + raise NotImplementedError( + "Cannot handle decision: {0}".format(decision_type)) # finally decrement counter if and only if everything went well self.open_counts["openDecisionTasks"] -= 1 @@ -447,7 +456,8 @@ class WorkflowExecution(object): def fail_schedule_activity_task(_type, _cause): # TODO: implement other possible failure mode: OPEN_ACTIVITIES_LIMIT_EXCEEDED # NB: some failure modes are not implemented and probably won't be implemented in - # the future, such as ACTIVITY_CREATION_RATE_EXCEEDED or OPERATION_NOT_PERMITTED + # the future, such as ACTIVITY_CREATION_RATE_EXCEEDED or + # OPERATION_NOT_PERMITTED self._add_event( "ScheduleActivityTaskFailed", activity_id=attributes["activityId"], @@ -591,13 +601,15 @@ class WorkflowExecution(object): def first_timeout(self): if not self.open or not self.start_timestamp: return None - start_to_close_at = self.start_timestamp + int(self.execution_start_to_close_timeout) + start_to_close_at = self.start_timestamp + \ + int(self.execution_start_to_close_timeout) _timeout = Timeout(self, start_to_close_at, "START_TO_CLOSE") if _timeout.reached: return _timeout def timeout(self, timeout): - # TODO: process child policy on child workflows here or in the triggering function + # TODO: process child policy on child workflows here or in the + # triggering function self.execution_status = "CLOSED" self.close_status = "TIMED_OUT" self.timeout_type = timeout.kind diff --git a/moto/swf/models/workflow_type.py b/moto/swf/models/workflow_type.py index ddb2475b2..18d18d415 100644 --- a/moto/swf/models/workflow_type.py +++ b/moto/swf/models/workflow_type.py @@ -2,6 +2,7 @@ from .generic_type import GenericType class WorkflowType(GenericType): + @property def _configuration_keys(self): return [ diff --git a/moto/swf/responses.py b/moto/swf/responses.py index 92d4957fd..1ee89bfc1 100644 --- a/moto/swf/responses.py +++ b/moto/swf/responses.py @@ -64,7 +64,8 @@ class SWFResponse(BaseResponse): reverse_order = self._params.get("reverseOrder", None) self._check_string(domain_name) self._check_string(status) - types = self.swf_backend.list_types(kind, domain_name, status, reverse_order=reverse_order) + types = self.swf_backend.list_types( + kind, domain_name, status, reverse_order=reverse_order) return json.dumps({ "typeInfos": [_type.to_medium_dict() for _type in types] }) @@ -97,7 +98,8 @@ class SWFResponse(BaseResponse): status = self._params["registrationStatus"] self._check_string(status) reverse_order = self._params.get("reverseOrder", None) - domains = self.swf_backend.list_domains(status, reverse_order=reverse_order) + domains = self.swf_backend.list_domains( + status, reverse_order=reverse_order) return json.dumps({ "domainInfos": [domain.to_short_dict() for domain in domains] }) @@ -107,7 +109,8 @@ class SWFResponse(BaseResponse): start_time_filter = self._params.get('startTimeFilter', None) close_time_filter = self._params.get('closeTimeFilter', None) execution_filter = self._params.get('executionFilter', None) - workflow_id = execution_filter['workflowId'] if execution_filter else None + workflow_id = execution_filter[ + 'workflowId'] if execution_filter else None maximum_page_size = self._params.get('maximumPageSize', 1000) reverse_order = self._params.get('reverseOrder', None) tag_filter = self._params.get('tagFilter', None) @@ -162,7 +165,8 @@ class SWFResponse(BaseResponse): domain = self._params['domain'] start_time_filter = self._params['startTimeFilter'] execution_filter = self._params.get('executionFilter', None) - workflow_id = execution_filter['workflowId'] if execution_filter else None + workflow_id = execution_filter[ + 'workflowId'] if execution_filter else None maximum_page_size = self._params.get('maximumPageSize', 1000) reverse_order = self._params.get('reverseOrder', None) tag_filter = self._params.get('tagFilter', None) @@ -234,10 +238,14 @@ class SWFResponse(BaseResponse): task_list = default_task_list.get("name") else: task_list = None - default_task_heartbeat_timeout = self._params.get("defaultTaskHeartbeatTimeout") - default_task_schedule_to_close_timeout = self._params.get("defaultTaskScheduleToCloseTimeout") - default_task_schedule_to_start_timeout = self._params.get("defaultTaskScheduleToStartTimeout") - default_task_start_to_close_timeout = self._params.get("defaultTaskStartToCloseTimeout") + default_task_heartbeat_timeout = self._params.get( + "defaultTaskHeartbeatTimeout") + default_task_schedule_to_close_timeout = self._params.get( + "defaultTaskScheduleToCloseTimeout") + default_task_schedule_to_start_timeout = self._params.get( + "defaultTaskScheduleToStartTimeout") + default_task_start_to_close_timeout = self._params.get( + "defaultTaskStartToCloseTimeout") description = self._params.get("description") self._check_string(domain) @@ -280,8 +288,10 @@ class SWFResponse(BaseResponse): else: task_list = None default_child_policy = self._params.get("defaultChildPolicy") - default_task_start_to_close_timeout = self._params.get("defaultTaskStartToCloseTimeout") - default_execution_start_to_close_timeout = self._params.get("defaultExecutionStartToCloseTimeout") + default_task_start_to_close_timeout = self._params.get( + "defaultTaskStartToCloseTimeout") + default_execution_start_to_close_timeout = self._params.get( + "defaultExecutionStartToCloseTimeout") description = self._params.get("description") self._check_string(domain) @@ -322,10 +332,12 @@ class SWFResponse(BaseResponse): else: task_list = None child_policy = self._params.get("childPolicy") - execution_start_to_close_timeout = self._params.get("executionStartToCloseTimeout") + execution_start_to_close_timeout = self._params.get( + "executionStartToCloseTimeout") input_ = self._params.get("input") tag_list = self._params.get("tagList") - task_start_to_close_timeout = self._params.get("taskStartToCloseTimeout") + task_start_to_close_timeout = self._params.get( + "taskStartToCloseTimeout") self._check_string(domain) self._check_string(workflow_id) @@ -360,7 +372,8 @@ class SWFResponse(BaseResponse): self._check_string(run_id) self._check_string(workflow_id) - wfe = self.swf_backend.describe_workflow_execution(domain_name, run_id, workflow_id) + wfe = self.swf_backend.describe_workflow_execution( + domain_name, run_id, workflow_id) return json.dumps(wfe.to_full_dict()) def get_workflow_execution_history(self): @@ -369,7 +382,8 @@ class SWFResponse(BaseResponse): run_id = _workflow_execution["runId"] workflow_id = _workflow_execution["workflowId"] reverse_order = self._params.get("reverseOrder", None) - wfe = self.swf_backend.describe_workflow_execution(domain_name, run_id, workflow_id) + wfe = self.swf_backend.describe_workflow_execution( + domain_name, run_id, workflow_id) events = wfe.events(reverse_order=reverse_order) return json.dumps({ "events": [evt.to_dict() for evt in events] @@ -399,7 +413,8 @@ class SWFResponse(BaseResponse): task_list = self._params["taskList"]["name"] self._check_string(domain_name) self._check_string(task_list) - count = self.swf_backend.count_pending_decision_tasks(domain_name, task_list) + count = self.swf_backend.count_pending_decision_tasks( + domain_name, task_list) return json.dumps({"count": count, "truncated": False}) def respond_decision_task_completed(self): @@ -435,7 +450,8 @@ class SWFResponse(BaseResponse): task_list = self._params["taskList"]["name"] self._check_string(domain_name) self._check_string(task_list) - count = self.swf_backend.count_pending_activity_tasks(domain_name, task_list) + count = self.swf_backend.count_pending_activity_tasks( + domain_name, task_list) return json.dumps({"count": count, "truncated": False}) def respond_activity_task_completed(self): @@ -453,7 +469,8 @@ class SWFResponse(BaseResponse): reason = self._params.get("reason") details = self._params.get("details") self._check_string(task_token) - # TODO: implement length limits on reason and details (common pb with client libs) + # TODO: implement length limits on reason and details (common pb with + # client libs) self._check_none_or_string(reason) self._check_none_or_string(details) self.swf_backend.respond_activity_task_failed( diff --git a/tests/backport_assert_raises.py b/tests/backport_assert_raises.py index 6ceacaa89..9b20edf9d 100644 --- a/tests/backport_assert_raises.py +++ b/tests/backport_assert_raises.py @@ -19,6 +19,7 @@ try: except TypeError: # this version of assert_raises doesn't support the 1-arg version class AssertRaisesContext(object): + def __init__(self, expected): self.expected = expected diff --git a/tests/helpers.py b/tests/helpers.py index 33509c06e..50615b094 100644 --- a/tests/helpers.py +++ b/tests/helpers.py @@ -8,13 +8,15 @@ def version_tuple(v): return tuple(map(int, (v.split(".")))) -# Note: See https://github.com/spulec/moto/issues/201 for why this is a separate method. +# Note: See https://github.com/spulec/moto/issues/201 for why this is a +# separate method. def skip_test(): raise SkipTest class requires_boto_gte(object): """Decorator for requiring boto version greater than or equal to 'version'""" + def __init__(self, version): self.version = version @@ -27,6 +29,7 @@ class requires_boto_gte(object): class disable_on_py3(object): + def __call__(self, test): if not six.PY3: return test diff --git a/tests/test_apigateway/test_apigateway.py b/tests/test_apigateway/test_apigateway.py index e52bfe0d7..11230658b 100644 --- a/tests/test_apigateway/test_apigateway.py +++ b/tests/test_apigateway/test_apigateway.py @@ -72,13 +72,15 @@ def test_create_resource(): api_id = response['id'] resources = client.get_resources(restApiId=api_id) - root_id = [resource for resource in resources['items'] if resource['path'] == '/'][0]['id'] + root_id = [resource for resource in resources[ + 'items'] if resource['path'] == '/'][0]['id'] root_resource = client.get_resource( restApiId=api_id, resourceId=root_id, ) - root_resource['ResponseMetadata'].pop('HTTPHeaders', None) # this is hard to match against, so remove it + # this is hard to match against, so remove it + root_resource['ResponseMetadata'].pop('HTTPHeaders', None) root_resource['ResponseMetadata'].pop('RetryAttempts', None) root_resource.should.equal({ 'path': '/', @@ -97,7 +99,8 @@ def test_create_resource(): resources = client.get_resources(restApiId=api_id)['items'] len(resources).should.equal(2) - non_root_resource = [resource for resource in resources if resource['path'] != '/'][0] + non_root_resource = [ + resource for resource in resources if resource['path'] != '/'][0] response = client.delete_resource( restApiId=api_id, @@ -117,7 +120,8 @@ def test_child_resource(): api_id = response['id'] resources = client.get_resources(restApiId=api_id) - root_id = [resource for resource in resources['items'] if resource['path'] == '/'][0]['id'] + root_id = [resource for resource in resources[ + 'items'] if resource['path'] == '/'][0]['id'] response = client.create_resource( restApiId=api_id, @@ -137,7 +141,8 @@ def test_child_resource(): restApiId=api_id, resourceId=tags_id, ) - child_resource['ResponseMetadata'].pop('HTTPHeaders', None) # this is hard to match against, so remove it + # this is hard to match against, so remove it + child_resource['ResponseMetadata'].pop('HTTPHeaders', None) child_resource['ResponseMetadata'].pop('RetryAttempts', None) child_resource.should.equal({ 'path': '/users/tags', @@ -159,7 +164,8 @@ def test_create_method(): api_id = response['id'] resources = client.get_resources(restApiId=api_id) - root_id = [resource for resource in resources['items'] if resource['path'] == '/'][0]['id'] + root_id = [resource for resource in resources[ + 'items'] if resource['path'] == '/'][0]['id'] client.put_method( restApiId=api_id, @@ -174,7 +180,8 @@ def test_create_method(): httpMethod='GET' ) - response['ResponseMetadata'].pop('HTTPHeaders', None) # this is hard to match against, so remove it + # this is hard to match against, so remove it + response['ResponseMetadata'].pop('HTTPHeaders', None) response['ResponseMetadata'].pop('RetryAttempts', None) response.should.equal({ 'httpMethod': 'GET', @@ -193,7 +200,8 @@ def test_create_method_response(): api_id = response['id'] resources = client.get_resources(restApiId=api_id) - root_id = [resource for resource in resources['items'] if resource['path'] == '/'][0]['id'] + root_id = [resource for resource in resources[ + 'items'] if resource['path'] == '/'][0]['id'] client.put_method( restApiId=api_id, @@ -214,7 +222,8 @@ def test_create_method_response(): httpMethod='GET', statusCode='200', ) - response['ResponseMetadata'].pop('HTTPHeaders', None) # this is hard to match against, so remove it + # this is hard to match against, so remove it + response['ResponseMetadata'].pop('HTTPHeaders', None) response['ResponseMetadata'].pop('RetryAttempts', None) response.should.equal({ 'ResponseMetadata': {'HTTPStatusCode': 200}, @@ -227,7 +236,8 @@ def test_create_method_response(): httpMethod='GET', statusCode='200', ) - response['ResponseMetadata'].pop('HTTPHeaders', None) # this is hard to match against, so remove it + # this is hard to match against, so remove it + response['ResponseMetadata'].pop('HTTPHeaders', None) response['ResponseMetadata'].pop('RetryAttempts', None) response.should.equal({ 'ResponseMetadata': {'HTTPStatusCode': 200}, @@ -240,7 +250,8 @@ def test_create_method_response(): httpMethod='GET', statusCode='200', ) - response['ResponseMetadata'].pop('HTTPHeaders', None) # this is hard to match against, so remove it + # this is hard to match against, so remove it + response['ResponseMetadata'].pop('HTTPHeaders', None) response['ResponseMetadata'].pop('RetryAttempts', None) response.should.equal({'ResponseMetadata': {'HTTPStatusCode': 200}}) @@ -255,7 +266,8 @@ def test_integrations(): api_id = response['id'] resources = client.get_resources(restApiId=api_id) - root_id = [resource for resource in resources['items'] if resource['path'] == '/'][0]['id'] + root_id = [resource for resource in resources[ + 'items'] if resource['path'] == '/'][0]['id'] client.put_method( restApiId=api_id, @@ -278,7 +290,8 @@ def test_integrations(): type='HTTP', uri='http://httpbin.org/robots.txt', ) - response['ResponseMetadata'].pop('HTTPHeaders', None) # this is hard to match against, so remove it + # this is hard to match against, so remove it + response['ResponseMetadata'].pop('HTTPHeaders', None) response['ResponseMetadata'].pop('RetryAttempts', None) response.should.equal({ 'ResponseMetadata': {'HTTPStatusCode': 200}, @@ -300,7 +313,8 @@ def test_integrations(): resourceId=root_id, httpMethod='GET' ) - response['ResponseMetadata'].pop('HTTPHeaders', None) # this is hard to match against, so remove it + # this is hard to match against, so remove it + response['ResponseMetadata'].pop('HTTPHeaders', None) response['ResponseMetadata'].pop('RetryAttempts', None) response.should.equal({ 'ResponseMetadata': {'HTTPStatusCode': 200}, @@ -321,7 +335,8 @@ def test_integrations(): restApiId=api_id, resourceId=root_id, ) - response['ResponseMetadata'].pop('HTTPHeaders', None) # this is hard to match against, so remove it + # this is hard to match against, so remove it + response['ResponseMetadata'].pop('HTTPHeaders', None) response['ResponseMetadata'].pop('RetryAttempts', None) response['resourceMethods']['GET']['methodIntegration'].should.equal({ 'httpMethod': 'GET', @@ -359,7 +374,8 @@ def test_integrations(): ) templates = { - # example based on http://docs.aws.amazon.com/apigateway/latest/developerguide/api-as-kinesis-proxy-export-swagger-with-extensions.html + # example based on + # http://docs.aws.amazon.com/apigateway/latest/developerguide/api-as-kinesis-proxy-export-swagger-with-extensions.html 'application/json': "{\n \"StreamName\": \"$input.params('stream-name')\",\n \"Records\": []\n}" } test_uri = 'http://example.com/foobar.txt' @@ -371,7 +387,8 @@ def test_integrations(): uri=test_uri, requestTemplates=templates ) - response['ResponseMetadata'].pop('HTTPHeaders', None) # this is hard to match against, so remove it + # this is hard to match against, so remove it + response['ResponseMetadata'].pop('HTTPHeaders', None) response['ResponseMetadata'].pop('RetryAttempts', None) response['ResponseMetadata'].should.equal({'HTTPStatusCode': 200}) @@ -394,7 +411,8 @@ def test_integration_response(): api_id = response['id'] resources = client.get_resources(restApiId=api_id) - root_id = [resource for resource in resources['items'] if resource['path'] == '/'][0]['id'] + root_id = [resource for resource in resources[ + 'items'] if resource['path'] == '/'][0]['id'] client.put_method( restApiId=api_id, @@ -425,7 +443,8 @@ def test_integration_response(): statusCode='200', selectionPattern='foobar', ) - response['ResponseMetadata'].pop('HTTPHeaders', None) # this is hard to match against, so remove it + # this is hard to match against, so remove it + response['ResponseMetadata'].pop('HTTPHeaders', None) response['ResponseMetadata'].pop('RetryAttempts', None) response.should.equal({ 'statusCode': '200', @@ -442,7 +461,8 @@ def test_integration_response(): httpMethod='GET', statusCode='200', ) - response['ResponseMetadata'].pop('HTTPHeaders', None) # this is hard to match against, so remove it + # this is hard to match against, so remove it + response['ResponseMetadata'].pop('HTTPHeaders', None) response['ResponseMetadata'].pop('RetryAttempts', None) response.should.equal({ 'statusCode': '200', @@ -458,7 +478,8 @@ def test_integration_response(): resourceId=root_id, httpMethod='GET', ) - response['ResponseMetadata'].pop('HTTPHeaders', None) # this is hard to match against, so remove it + # this is hard to match against, so remove it + response['ResponseMetadata'].pop('HTTPHeaders', None) response['ResponseMetadata'].pop('RetryAttempts', None) response['methodIntegration']['integrationResponses'].should.equal({ '200': { @@ -506,23 +527,24 @@ def test_update_stage_configuration(): restApiId=api_id, deploymentId=deployment_id, ) - response.pop('createdDate',None) # createdDate is hard to match against, remove it - response['ResponseMetadata'].pop('HTTPHeaders', None) # this is hard to match against, so remove it + # createdDate is hard to match against, remove it + response.pop('createdDate', None) + # this is hard to match against, so remove it + response['ResponseMetadata'].pop('HTTPHeaders', None) response['ResponseMetadata'].pop('RetryAttempts', None) response.should.equal({ 'id': deployment_id, 'ResponseMetadata': {'HTTPStatusCode': 200}, - 'description' : '1.0.1' + 'description': '1.0.1' }) response = client.create_deployment( - restApiId=api_id, - stageName=stage_name, - description="1.0.2" - ) + restApiId=api_id, + stageName=stage_name, + description="1.0.2" + ) deployment_id2 = response['id'] - stage = client.get_stage( restApiId=api_id, stageName=stage_name @@ -531,11 +553,11 @@ def test_update_stage_configuration(): stage['deploymentId'].should.equal(deployment_id2) stage.shouldnt.have.key('cacheClusterSize') - client.update_stage(restApiId=api_id,stageName=stage_name, + client.update_stage(restApiId=api_id, stageName=stage_name, patchOperations=[ { - "op" : "replace", - "path" : "/cacheClusterEnabled", + "op": "replace", + "path": "/cacheClusterEnabled", "value": "True" } ]) @@ -547,11 +569,11 @@ def test_update_stage_configuration(): stage.should.have.key('cacheClusterSize').which.should.equal("0.5") - client.update_stage(restApiId=api_id,stageName=stage_name, + client.update_stage(restApiId=api_id, stageName=stage_name, patchOperations=[ { - "op" : "replace", - "path" : "/cacheClusterSize", + "op": "replace", + "path": "/cacheClusterSize", "value": "1.6" } ]) @@ -563,56 +585,55 @@ def test_update_stage_configuration(): stage.should.have.key('cacheClusterSize').which.should.equal("1.6") - - client.update_stage(restApiId=api_id,stageName=stage_name, + client.update_stage(restApiId=api_id, stageName=stage_name, patchOperations=[ { - "op" : "replace", - "path" : "/deploymentId", + "op": "replace", + "path": "/deploymentId", "value": deployment_id }, { - "op" : "replace", - "path" : "/variables/environment", - "value" : "dev" + "op": "replace", + "path": "/variables/environment", + "value": "dev" }, { - "op" : "replace", - "path" : "/variables/region", - "value" : "eu-west-1" + "op": "replace", + "path": "/variables/region", + "value": "eu-west-1" }, { - "op" : "replace", - "path" : "/*/*/caching/dataEncrypted", - "value" : "True" + "op": "replace", + "path": "/*/*/caching/dataEncrypted", + "value": "True" }, { - "op" : "replace", - "path" : "/cacheClusterEnabled", - "value" : "True" + "op": "replace", + "path": "/cacheClusterEnabled", + "value": "True" }, { - "op" : "replace", - "path" : "/description", - "value" : "stage description update" + "op": "replace", + "path": "/description", + "value": "stage description update" }, { - "op" : "replace", - "path" : "/cacheClusterSize", - "value" : "1.6" + "op": "replace", + "path": "/cacheClusterSize", + "value": "1.6" } ]) - client.update_stage(restApiId=api_id,stageName=stage_name, + client.update_stage(restApiId=api_id, stageName=stage_name, patchOperations=[ { - "op" : "remove", - "path" : "/variables/region", - "value" : "eu-west-1" + "op": "remove", + "path": "/variables/region", + "value": "eu-west-1" } ]) - stage = client.get_stage(restApiId=api_id,stageName=stage_name) + stage = client.get_stage(restApiId=api_id, stageName=stage_name) stage['description'].should.match('stage description update') stage['cacheClusterSize'].should.equal("1.6") @@ -621,21 +642,23 @@ def test_update_stage_configuration(): stage['cacheClusterEnabled'].should.be.true stage['deploymentId'].should.match(deployment_id) stage['methodSettings'].should.have.key('*/*') - stage['methodSettings']['*/*'].should.have.key('cacheDataEncrypted').which.should.be.true + stage['methodSettings'][ + '*/*'].should.have.key('cacheDataEncrypted').which.should.be.true try: - client.update_stage(restApiId=api_id,stageName=stage_name, - patchOperations=[ - { - "op" : "add", - "path" : "/notasetting", - "value" : "eu-west-1" - } - ]) - assert False.should.be.ok #Fail, should not be here + client.update_stage(restApiId=api_id, stageName=stage_name, + patchOperations=[ + { + "op": "add", + "path": "/notasetting", + "value": "eu-west-1" + } + ]) + assert False.should.be.ok # Fail, should not be here except Exception: assert True.should.be.ok + @mock_apigateway def test_non_existent_stage(): client = boto3.client('apigateway', region_name='us-west-2') @@ -645,9 +668,8 @@ def test_non_existent_stage(): ) api_id = response['id'] - - client.get_stage.when.called_with(restApiId=api_id,stageName='xxx').should.throw(ClientError) - + client.get_stage.when.called_with( + restApiId=api_id, stageName='xxx').should.throw(ClientError) @mock_apigateway @@ -670,13 +692,15 @@ def test_create_stage(): restApiId=api_id, deploymentId=deployment_id, ) - response.pop('createdDate',None) # createdDate is hard to match against, remove it - response['ResponseMetadata'].pop('HTTPHeaders', None) # this is hard to match against, so remove it + # createdDate is hard to match against, remove it + response.pop('createdDate', None) + # this is hard to match against, so remove it + response['ResponseMetadata'].pop('HTTPHeaders', None) response['ResponseMetadata'].pop('RetryAttempts', None) response.should.equal({ 'id': deployment_id, 'ResponseMetadata': {'HTTPStatusCode': 200}, - 'description' : '' + 'description': '' }) response = client.create_deployment( @@ -686,34 +710,37 @@ def test_create_stage(): deployment_id2 = response['id'] - response = client.get_deployments( restApiId=api_id, ) - response['ResponseMetadata'].pop('HTTPHeaders', None) # this is hard to match against, so remove it + # this is hard to match against, so remove it + response['ResponseMetadata'].pop('HTTPHeaders', None) response['ResponseMetadata'].pop('RetryAttempts', None) response['items'][0].pop('createdDate') response['items'][1].pop('createdDate') - response['items'][0]['id'].should.match(r"{0}|{1}".format(deployment_id2,deployment_id)) - response['items'][1]['id'].should.match(r"{0}|{1}".format(deployment_id2,deployment_id)) - + response['items'][0]['id'].should.match( + r"{0}|{1}".format(deployment_id2, deployment_id)) + response['items'][1]['id'].should.match( + r"{0}|{1}".format(deployment_id2, deployment_id)) new_stage_name = 'current' - response = client.create_stage(restApiId=api_id,stageName=new_stage_name,deploymentId=deployment_id2) + response = client.create_stage( + restApiId=api_id, stageName=new_stage_name, deploymentId=deployment_id2) - response['ResponseMetadata'].pop('HTTPHeaders', None) # this is hard to match against, so remove it + # this is hard to match against, so remove it + response['ResponseMetadata'].pop('HTTPHeaders', None) response['ResponseMetadata'].pop('RetryAttempts', None) response.should.equal({ - 'stageName':new_stage_name, - 'deploymentId':deployment_id2, - 'methodSettings':{}, - 'variables':{}, + 'stageName': new_stage_name, + 'deploymentId': deployment_id2, + 'methodSettings': {}, + 'variables': {}, 'ResponseMetadata': {'HTTPStatusCode': 200}, - 'description':'', - 'cacheClusterEnabled':False + 'description': '', + 'cacheClusterEnabled': False }) stage = client.get_stage( @@ -724,20 +751,21 @@ def test_create_stage(): stage['deploymentId'].should.equal(deployment_id2) new_stage_name_with_vars = 'stage_with_vars' - response = client.create_stage(restApiId=api_id,stageName=new_stage_name_with_vars,deploymentId=deployment_id2,variables={ - "env" : "dev" + response = client.create_stage(restApiId=api_id, stageName=new_stage_name_with_vars, deploymentId=deployment_id2, variables={ + "env": "dev" }) - response['ResponseMetadata'].pop('HTTPHeaders', None) # this is hard to match against, so remove it + # this is hard to match against, so remove it + response['ResponseMetadata'].pop('HTTPHeaders', None) response['ResponseMetadata'].pop('RetryAttempts', None) response.should.equal({ - 'stageName':new_stage_name_with_vars, - 'deploymentId':deployment_id2, - 'methodSettings':{}, - 'variables':{ "env" : "dev" }, + 'stageName': new_stage_name_with_vars, + 'deploymentId': deployment_id2, + 'methodSettings': {}, + 'variables': {"env": "dev"}, 'ResponseMetadata': {'HTTPStatusCode': 200}, - 'description':'', + 'description': '', 'cacheClusterEnabled': False }) @@ -750,22 +778,23 @@ def test_create_stage(): stage['variables'].should.have.key('env').which.should.match("dev") new_stage_name = 'stage_with_vars_and_cache_settings' - response = client.create_stage(restApiId=api_id,stageName=new_stage_name,deploymentId=deployment_id2,variables={ - "env" : "dev" - }, cacheClusterEnabled=True,description="hello moto") + response = client.create_stage(restApiId=api_id, stageName=new_stage_name, deploymentId=deployment_id2, variables={ + "env": "dev" + }, cacheClusterEnabled=True, description="hello moto") - response['ResponseMetadata'].pop('HTTPHeaders', None) # this is hard to match against, so remove it + # this is hard to match against, so remove it + response['ResponseMetadata'].pop('HTTPHeaders', None) response['ResponseMetadata'].pop('RetryAttempts', None) response.should.equal({ - 'stageName':new_stage_name, - 'deploymentId':deployment_id2, - 'methodSettings':{}, - 'variables':{ "env" : "dev" }, + 'stageName': new_stage_name, + 'deploymentId': deployment_id2, + 'methodSettings': {}, + 'variables': {"env": "dev"}, 'ResponseMetadata': {'HTTPStatusCode': 200}, - 'description':'hello moto', + 'description': 'hello moto', 'cacheClusterEnabled': True, - 'cacheClusterSize' : "0.5" + 'cacheClusterSize': "0.5" }) stage = client.get_stage( @@ -776,22 +805,23 @@ def test_create_stage(): stage['cacheClusterSize'].should.equal("0.5") new_stage_name = 'stage_with_vars_and_cache_settings_and_size' - response = client.create_stage(restApiId=api_id,stageName=new_stage_name,deploymentId=deployment_id2,variables={ - "env" : "dev" - }, cacheClusterEnabled=True,cacheClusterSize="1.6",description="hello moto") + response = client.create_stage(restApiId=api_id, stageName=new_stage_name, deploymentId=deployment_id2, variables={ + "env": "dev" + }, cacheClusterEnabled=True, cacheClusterSize="1.6", description="hello moto") - response['ResponseMetadata'].pop('HTTPHeaders', None) # this is hard to match against, so remove it + # this is hard to match against, so remove it + response['ResponseMetadata'].pop('HTTPHeaders', None) response['ResponseMetadata'].pop('RetryAttempts', None) response.should.equal({ - 'stageName':new_stage_name, - 'deploymentId':deployment_id2, - 'methodSettings':{}, - 'variables':{ "env" : "dev" }, + 'stageName': new_stage_name, + 'deploymentId': deployment_id2, + 'methodSettings': {}, + 'variables': {"env": "dev"}, 'ResponseMetadata': {'HTTPStatusCode': 200}, - 'description':'hello moto', + 'description': 'hello moto', 'cacheClusterEnabled': True, - 'cacheClusterSize' : "1.6" + 'cacheClusterSize': "1.6" }) stage = client.get_stage( @@ -804,7 +834,6 @@ def test_create_stage(): stage['cacheClusterSize'].should.equal("1.6") - @mock_apigateway def test_deployment(): client = boto3.client('apigateway', region_name='us-west-2') @@ -825,13 +854,15 @@ def test_deployment(): restApiId=api_id, deploymentId=deployment_id, ) - response.pop('createdDate',None) # createdDate is hard to match against, remove it - response['ResponseMetadata'].pop('HTTPHeaders', None) # this is hard to match against, so remove it + # createdDate is hard to match against, remove it + response.pop('createdDate', None) + # this is hard to match against, so remove it + response['ResponseMetadata'].pop('HTTPHeaders', None) response['ResponseMetadata'].pop('RetryAttempts', None) response.should.equal({ 'id': deployment_id, 'ResponseMetadata': {'HTTPStatusCode': 200}, - 'description' : '' + 'description': '' }) response = client.get_deployments( @@ -898,7 +929,8 @@ def test_http_proxying_integration(): api_id = response['id'] resources = client.get_resources(restApiId=api_id) - root_id = [resource for resource in resources['items'] if resource['path'] == '/'][0]['id'] + root_id = [resource for resource in resources[ + 'items'] if resource['path'] == '/'][0]['id'] client.put_method( restApiId=api_id, @@ -928,7 +960,8 @@ def test_http_proxying_integration(): stageName=stage_name, ) - deploy_url = "https://{api_id}.execute-api.{region_name}.amazonaws.com/{stage_name}".format(api_id=api_id, region_name=region_name, stage_name=stage_name) + deploy_url = "https://{api_id}.execute-api.{region_name}.amazonaws.com/{stage_name}".format( + api_id=api_id, region_name=region_name, stage_name=stage_name) if not settings.TEST_SERVER_MODE: requests.get(deploy_url).content.should.equal(b"a fake response") diff --git a/tests/test_autoscaling/test_autoscaling.py b/tests/test_autoscaling/test_autoscaling.py index 4d0905196..9a6408999 100644 --- a/tests/test_autoscaling/test_autoscaling.py +++ b/tests/test_autoscaling/test_autoscaling.py @@ -16,7 +16,8 @@ from tests.helpers import requires_boto_gte @mock_elb_deprecated def test_create_autoscaling_group(): elb_conn = boto.ec2.elb.connect_to_region('us-east-1') - elb_conn.create_load_balancer('test_lb', zones=[], listeners=[(80, 8080, 'http')]) + elb_conn.create_load_balancer( + 'test_lb', zones=[], listeners=[(80, 8080, 'http')]) conn = boto.ec2.autoscale.connect_to_region('us-east-1') config = LaunchConfiguration( @@ -45,14 +46,15 @@ def test_create_autoscaling_group(): key='test_key', value='test_value', propagate_at_launch=True - ) + ) ], ) conn.create_auto_scaling_group(group) group = conn.get_all_groups()[0] group.name.should.equal('tester_group') - set(group.availability_zones).should.equal(set(['us-east-1c', 'us-east-1b'])) + set(group.availability_zones).should.equal( + set(['us-east-1c', 'us-east-1b'])) group.desired_capacity.should.equal(2) group.max_size.should.equal(2) group.min_size.should.equal(2) @@ -64,7 +66,8 @@ def test_create_autoscaling_group(): group.health_check_type.should.equal("EC2") list(group.load_balancers).should.equal(["test_lb"]) group.placement_group.should.equal("test_placement") - list(group.termination_policies).should.equal(["OldestInstance", "NewestInstance"]) + list(group.termination_policies).should.equal( + ["OldestInstance", "NewestInstance"]) len(list(group.tags)).should.equal(1) tag = list(group.tags)[0] tag.resource_id.should.equal('tester_group') @@ -134,7 +137,8 @@ def test_autoscaling_group_describe_filter(): group.name = 'tester_group3' conn.create_auto_scaling_group(group) - conn.get_all_groups(names=['tester_group', 'tester_group2']).should.have.length_of(2) + conn.get_all_groups( + names=['tester_group', 'tester_group2']).should.have.length_of(2) conn.get_all_groups().should.have.length_of(3) @@ -197,16 +201,16 @@ def test_autoscaling_tags_update(): conn.create_auto_scaling_group(group) conn.create_or_update_tags(tags=[Tag( - resource_id='tester_group', - key='test_key', - value='new_test_value', - propagate_at_launch=True - ), Tag( - resource_id='tester_group', - key='test_key2', - value='test_value2', - propagate_at_launch=True - )]) + resource_id='tester_group', + key='test_key', + value='new_test_value', + propagate_at_launch=True + ), Tag( + resource_id='tester_group', + key='test_key2', + value='test_value2', + propagate_at_launch=True + )]) group = conn.get_all_groups()[0] group.tags.should.have.length_of(2) @@ -372,6 +376,7 @@ def test_set_desired_capacity_the_same(): instances = list(conn.get_all_autoscaling_instances()) instances.should.have.length_of(2) + @mock_autoscaling_deprecated @mock_elb_deprecated def test_autoscaling_group_with_elb(): @@ -402,7 +407,8 @@ def test_autoscaling_group_with_elb(): group.desired_capacity.should.equal(2) elb.instances.should.have.length_of(2) - autoscale_instance_ids = set(instance.instance_id for instance in group.instances) + autoscale_instance_ids = set( + instance.instance_id for instance in group.instances) elb_instace_ids = set(instance.id for instance in elb.instances) autoscale_instance_ids.should.equal(elb_instace_ids) @@ -412,7 +418,8 @@ def test_autoscaling_group_with_elb(): group.desired_capacity.should.equal(3) elb.instances.should.have.length_of(3) - autoscale_instance_ids = set(instance.instance_id for instance in group.instances) + autoscale_instance_ids = set( + instance.instance_id for instance in group.instances) elb_instace_ids = set(instance.id for instance in elb.instances) autoscale_instance_ids.should.equal(elb_instace_ids) @@ -429,38 +436,39 @@ Boto3 @mock_autoscaling def test_create_autoscaling_group_boto3(): - client = boto3.client('autoscaling', region_name='us-east-1') - _ = client.create_launch_configuration( - LaunchConfigurationName='test_launch_configuration' - ) - response = client.create_auto_scaling_group( - AutoScalingGroupName='test_asg', - LaunchConfigurationName='test_launch_configuration', - MinSize=0, - MaxSize=20, - DesiredCapacity=5 - ) - response['ResponseMetadata']['HTTPStatusCode'].should.equal(200) + client = boto3.client('autoscaling', region_name='us-east-1') + _ = client.create_launch_configuration( + LaunchConfigurationName='test_launch_configuration' + ) + response = client.create_auto_scaling_group( + AutoScalingGroupName='test_asg', + LaunchConfigurationName='test_launch_configuration', + MinSize=0, + MaxSize=20, + DesiredCapacity=5 + ) + response['ResponseMetadata']['HTTPStatusCode'].should.equal(200) @mock_autoscaling def test_describe_autoscaling_groups_boto3(): - client = boto3.client('autoscaling', region_name='us-east-1') - _ = client.create_launch_configuration( - LaunchConfigurationName='test_launch_configuration' - ) - _ = client.create_auto_scaling_group( - AutoScalingGroupName='test_asg', - LaunchConfigurationName='test_launch_configuration', - MinSize=0, - MaxSize=20, - DesiredCapacity=5 - ) - response = client.describe_auto_scaling_groups( - AutoScalingGroupNames=["test_asg"] - ) - response['ResponseMetadata']['HTTPStatusCode'].should.equal(200) - response['AutoScalingGroups'][0]['AutoScalingGroupName'].should.equal('test_asg') + client = boto3.client('autoscaling', region_name='us-east-1') + _ = client.create_launch_configuration( + LaunchConfigurationName='test_launch_configuration' + ) + _ = client.create_auto_scaling_group( + AutoScalingGroupName='test_asg', + LaunchConfigurationName='test_launch_configuration', + MinSize=0, + MaxSize=20, + DesiredCapacity=5 + ) + response = client.describe_auto_scaling_groups( + AutoScalingGroupNames=["test_asg"] + ) + response['ResponseMetadata']['HTTPStatusCode'].should.equal(200) + response['AutoScalingGroups'][0][ + 'AutoScalingGroupName'].should.equal('test_asg') @mock_autoscaling @@ -509,22 +517,23 @@ def test_autoscaling_taqs_update_boto3(): ) client.create_or_update_tags(Tags=[{ - "ResourceId": 'test_asg', - "Key": 'test_key', - "Value": 'updated_test_value', - "PropagateAtLaunch": True - }, { - "ResourceId": 'test_asg', - "Key": 'test_key2', - "Value": 'test_value2', - "PropagateAtLaunch": True - }]) + "ResourceId": 'test_asg', + "Key": 'test_key', + "Value": 'updated_test_value', + "PropagateAtLaunch": True + }, { + "ResourceId": 'test_asg', + "Key": 'test_key2', + "Value": 'test_value2', + "PropagateAtLaunch": True + }]) response = client.describe_auto_scaling_groups( AutoScalingGroupNames=["test_asg"] ) response['AutoScalingGroups'][0]['Tags'].should.have.length_of(2) + @mock_autoscaling def test_autoscaling_describe_policies_boto3(): client = boto3.client('autoscaling', region_name='us-east-1') @@ -577,4 +586,5 @@ def test_autoscaling_describe_policies_boto3(): PolicyTypes=['SimpleScaling'] ) response['ScalingPolicies'].should.have.length_of(1) - response['ScalingPolicies'][0]['PolicyName'].should.equal('test_policy_down') + response['ScalingPolicies'][0][ + 'PolicyName'].should.equal('test_policy_down') diff --git a/tests/test_autoscaling/test_launch_configurations.py b/tests/test_autoscaling/test_launch_configurations.py index b2e21b03e..1c1486421 100644 --- a/tests/test_autoscaling/test_launch_configurations.py +++ b/tests/test_autoscaling/test_launch_configurations.py @@ -30,10 +30,12 @@ def test_create_launch_configuration(): launch_config.image_id.should.equal('ami-abcd1234') launch_config.instance_type.should.equal('t1.micro') launch_config.key_name.should.equal('the_keys') - set(launch_config.security_groups).should.equal(set(['default', 'default2'])) + set(launch_config.security_groups).should.equal( + set(['default', 'default2'])) launch_config.user_data.should.equal(b"This is some user_data") launch_config.instance_monitoring.enabled.should.equal('true') - launch_config.instance_profile_name.should.equal('arn:aws:iam::123456789012:instance-profile/testing') + launch_config.instance_profile_name.should.equal( + 'arn:aws:iam::123456789012:instance-profile/testing') launch_config.spot_price.should.equal(0.1) @@ -78,16 +80,19 @@ def test_create_launch_configuration_with_block_device_mappings(): launch_config.image_id.should.equal('ami-abcd1234') launch_config.instance_type.should.equal('m1.small') launch_config.key_name.should.equal('the_keys') - set(launch_config.security_groups).should.equal(set(['default', 'default2'])) + set(launch_config.security_groups).should.equal( + set(['default', 'default2'])) launch_config.user_data.should.equal(b"This is some user_data") launch_config.instance_monitoring.enabled.should.equal('true') - launch_config.instance_profile_name.should.equal('arn:aws:iam::123456789012:instance-profile/testing') + launch_config.instance_profile_name.should.equal( + 'arn:aws:iam::123456789012:instance-profile/testing') launch_config.spot_price.should.equal(0.1) len(launch_config.block_device_mappings).should.equal(3) returned_mapping = launch_config.block_device_mappings - set(returned_mapping.keys()).should.equal(set(['/dev/xvdb', '/dev/xvdp', '/dev/xvdh'])) + set(returned_mapping.keys()).should.equal( + set(['/dev/xvdb', '/dev/xvdp', '/dev/xvdh'])) returned_mapping['/dev/xvdh'].iops.should.equal(1000) returned_mapping['/dev/xvdh'].size.should.equal(100) @@ -198,7 +203,8 @@ def test_launch_configuration_describe_filter(): config.name = 'tester3' conn.create_launch_configuration(config) - conn.get_all_launch_configurations(names=['tester', 'tester2']).should.have.length_of(2) + conn.get_all_launch_configurations( + names=['tester', 'tester2']).should.have.length_of(2) conn.get_all_launch_configurations().should.have.length_of(3) diff --git a/tests/test_awslambda/test_lambda.py b/tests/test_awslambda/test_lambda.py index 74e93c373..84e8a8f2b 100644 --- a/tests/test_awslambda/test_lambda.py +++ b/tests/test_awslambda/test_lambda.py @@ -73,8 +73,10 @@ def test_invoke_requestresponse_function(): Payload=json.dumps(in_data)) success_result["StatusCode"].should.equal(202) - base64.b64decode(success_result["LogResult"]).decode('utf-8').should.equal(json.dumps(in_data)) - json.loads(success_result["Payload"].read().decode('utf-8')).should.equal(in_data) + base64.b64decode(success_result["LogResult"]).decode( + 'utf-8').should.equal(json.dumps(in_data)) + json.loads(success_result["Payload"].read().decode( + 'utf-8')).should.equal(in_data) @mock_lambda @@ -101,9 +103,11 @@ def test_invoke_event_function(): ).should.throw(botocore.client.ClientError) in_data = {'msg': 'So long and thanks for all the fish'} - success_result = conn.invoke(FunctionName='testFunction', InvocationType='Event', Payload=json.dumps(in_data)) + success_result = conn.invoke( + FunctionName='testFunction', InvocationType='Event', Payload=json.dumps(in_data)) success_result["StatusCode"].should.equal(202) - json.loads(success_result['Payload'].read().decode('utf-8')).should.equal({}) + json.loads(success_result['Payload'].read().decode( + 'utf-8')).should.equal({}) @mock_ec2 @@ -129,9 +133,11 @@ def test_invoke_function_get_ec2_volume(): ) in_data = {'volume_id': vol.id} - result = conn.invoke(FunctionName='testFunction', InvocationType='RequestResponse', Payload=json.dumps(in_data)) + result = conn.invoke(FunctionName='testFunction', + InvocationType='RequestResponse', Payload=json.dumps(in_data)) result["StatusCode"].should.equal(202) - msg = 'get volume details for %s\nVolume - %s state=%s, size=%s\n%s' % (vol.id, vol.id, vol.state, vol.size, json.dumps(in_data)) + msg = 'get volume details for %s\nVolume - %s state=%s, size=%s\n%s' % ( + vol.id, vol.id, vol.state, vol.size, json.dumps(in_data)) base64.b64decode(result["LogResult"]).decode('utf-8').should.equal(msg) result['Payload'].read().decode('utf-8').should.equal(msg) @@ -189,8 +195,10 @@ def test_create_function_from_aws_bucket(): "SubnetIds": ["subnet-123abc"], }, ) - result['ResponseMetadata'].pop('HTTPHeaders', None) # this is hard to match against, so remove it - result['ResponseMetadata'].pop('RetryAttempts', None) # Botocore inserts retry attempts not seen in Python27 + # this is hard to match against, so remove it + result['ResponseMetadata'].pop('HTTPHeaders', None) + # Botocore inserts retry attempts not seen in Python27 + result['ResponseMetadata'].pop('RetryAttempts', None) result.pop('LastModified') result.should.equal({ 'FunctionName': 'testFunction', @@ -231,8 +239,10 @@ def test_create_function_from_zipfile(): MemorySize=128, Publish=True, ) - result['ResponseMetadata'].pop('HTTPHeaders', None) # this is hard to match against, so remove it - result['ResponseMetadata'].pop('RetryAttempts', None) # Botocore inserts retry attempts not seen in Python27 + # this is hard to match against, so remove it + result['ResponseMetadata'].pop('HTTPHeaders', None) + # Botocore inserts retry attempts not seen in Python27 + result['ResponseMetadata'].pop('RetryAttempts', None) result.pop('LastModified') result.should.equal({ @@ -283,8 +293,10 @@ def test_get_function(): ) result = conn.get_function(FunctionName='testFunction') - result['ResponseMetadata'].pop('HTTPHeaders', None) # this is hard to match against, so remove it - result['ResponseMetadata'].pop('RetryAttempts', None) # Botocore inserts retry attempts not seen in Python27 + # this is hard to match against, so remove it + result['ResponseMetadata'].pop('HTTPHeaders', None) + # Botocore inserts retry attempts not seen in Python27 + result['ResponseMetadata'].pop('RetryAttempts', None) result['Configuration'].pop('LastModified') result.should.equal({ @@ -339,12 +351,15 @@ def test_delete_function(): ) success_result = conn.delete_function(FunctionName='testFunction') - success_result['ResponseMetadata'].pop('HTTPHeaders', None) # this is hard to match against, so remove it - success_result['ResponseMetadata'].pop('RetryAttempts', None) # Botocore inserts retry attempts not seen in Python27 + # this is hard to match against, so remove it + success_result['ResponseMetadata'].pop('HTTPHeaders', None) + # Botocore inserts retry attempts not seen in Python27 + success_result['ResponseMetadata'].pop('RetryAttempts', None) success_result.should.equal({'ResponseMetadata': {'HTTPStatusCode': 204}}) - conn.delete_function.when.called_with(FunctionName='testFunctionThatDoesntExist').should.throw(botocore.client.ClientError) + conn.delete_function.when.called_with( + FunctionName='testFunctionThatDoesntExist').should.throw(botocore.client.ClientError) @mock_lambda @@ -407,8 +422,10 @@ def test_list_create_list_get_delete_list(): func.should.equal(expected_function_result['Configuration']) func = conn.get_function(FunctionName='testFunction') - func['ResponseMetadata'].pop('HTTPHeaders', None) # this is hard to match against, so remove it - func['ResponseMetadata'].pop('RetryAttempts', None) # Botocore inserts retry attempts not seen in Python27 + # this is hard to match against, so remove it + func['ResponseMetadata'].pop('HTTPHeaders', None) + # Botocore inserts retry attempts not seen in Python27 + func['ResponseMetadata'].pop('RetryAttempts', None) func['Configuration'].pop('LastModified') func.should.equal(expected_function_result) diff --git a/tests/test_cloudformation/fixtures/rds_mysql_with_db_parameter_group.py b/tests/test_cloudformation/fixtures/rds_mysql_with_db_parameter_group.py index 866197125..6f379daa6 100644 --- a/tests/test_cloudformation/fixtures/rds_mysql_with_db_parameter_group.py +++ b/tests/test_cloudformation/fixtures/rds_mysql_with_db_parameter_group.py @@ -1,201 +1,204 @@ from __future__ import unicode_literals template = { - "AWSTemplateFormatVersion" : "2010-09-09", + "AWSTemplateFormatVersion": "2010-09-09", - "Description" : "AWS CloudFormation Sample Template RDS_MySQL_With_Read_Replica: Sample template showing how to create a highly-available, RDS DBInstance with a read replica. **WARNING** This template creates an Amazon Relational Database Service database instance and Amazon CloudWatch alarms. You will be billed for the AWS resources used if you create a stack from this template.", + "Description": "AWS CloudFormation Sample Template RDS_MySQL_With_Read_Replica: Sample template showing how to create a highly-available, RDS DBInstance with a read replica. **WARNING** This template creates an Amazon Relational Database Service database instance and Amazon CloudWatch alarms. You will be billed for the AWS resources used if you create a stack from this template.", - "Parameters": { - "DBName": { - "Default": "MyDatabase", - "Description" : "The database name", - "Type": "String", - "MinLength": "1", - "MaxLength": "64", - "AllowedPattern" : "[a-zA-Z][a-zA-Z0-9]*", - "ConstraintDescription" : "must begin with a letter and contain only alphanumeric characters." - }, + "Parameters": { + "DBName": { + "Default": "MyDatabase", + "Description": "The database name", + "Type": "String", + "MinLength": "1", + "MaxLength": "64", + "AllowedPattern": "[a-zA-Z][a-zA-Z0-9]*", + "ConstraintDescription": "must begin with a letter and contain only alphanumeric characters." + }, - "DBInstanceIdentifier": { - "Type": "String" - }, + "DBInstanceIdentifier": { + "Type": "String" + }, - "DBUser": { - "NoEcho": "true", - "Description" : "The database admin account username", - "Type": "String", - "MinLength": "1", - "MaxLength": "16", - "AllowedPattern" : "[a-zA-Z][a-zA-Z0-9]*", - "ConstraintDescription" : "must begin with a letter and contain only alphanumeric characters." - }, + "DBUser": { + "NoEcho": "true", + "Description": "The database admin account username", + "Type": "String", + "MinLength": "1", + "MaxLength": "16", + "AllowedPattern": "[a-zA-Z][a-zA-Z0-9]*", + "ConstraintDescription": "must begin with a letter and contain only alphanumeric characters." + }, - "DBPassword": { - "NoEcho": "true", - "Description" : "The database admin account password", - "Type": "String", - "MinLength": "1", - "MaxLength": "41", - "AllowedPattern" : "[a-zA-Z0-9]+", - "ConstraintDescription" : "must contain only alphanumeric characters." - }, + "DBPassword": { + "NoEcho": "true", + "Description": "The database admin account password", + "Type": "String", + "MinLength": "1", + "MaxLength": "41", + "AllowedPattern": "[a-zA-Z0-9]+", + "ConstraintDescription": "must contain only alphanumeric characters." + }, - "DBAllocatedStorage": { - "Default": "5", - "Description" : "The size of the database (Gb)", - "Type": "Number", - "MinValue": "5", - "MaxValue": "1024", - "ConstraintDescription" : "must be between 5 and 1024Gb." - }, + "DBAllocatedStorage": { + "Default": "5", + "Description": "The size of the database (Gb)", + "Type": "Number", + "MinValue": "5", + "MaxValue": "1024", + "ConstraintDescription": "must be between 5 and 1024Gb." + }, - "DBInstanceClass": { - "Description" : "The database instance type", - "Type": "String", - "Default": "db.m1.small", - "AllowedValues" : [ "db.t1.micro", "db.m1.small", "db.m1.medium", "db.m1.large", "db.m1.xlarge", "db.m2.xlarge", "db.m2.2xlarge", "db.m2.4xlarge", "db.m3.medium", "db.m3.large", "db.m3.xlarge", "db.m3.2xlarge", "db.r3.large", "db.r3.xlarge", "db.r3.2xlarge", "db.r3.4xlarge", "db.r3.8xlarge", "db.m2.xlarge", "db.m2.2xlarge", "db.m2.4xlarge", "db.cr1.8xlarge"] -, - "ConstraintDescription" : "must select a valid database instance type." - }, + "DBInstanceClass": { + "Description": "The database instance type", + "Type": "String", + "Default": "db.m1.small", + "AllowedValues": ["db.t1.micro", "db.m1.small", "db.m1.medium", "db.m1.large", "db.m1.xlarge", "db.m2.xlarge", "db.m2.2xlarge", "db.m2.4xlarge", "db.m3.medium", "db.m3.large", "db.m3.xlarge", "db.m3.2xlarge", "db.r3.large", "db.r3.xlarge", "db.r3.2xlarge", "db.r3.4xlarge", "db.r3.8xlarge", "db.m2.xlarge", "db.m2.2xlarge", "db.m2.4xlarge", "db.cr1.8xlarge"], + "ConstraintDescription": "must select a valid database instance type." + }, - "EC2SecurityGroup": { - "Description" : "The EC2 security group that contains instances that need access to the database", - "Default": "default", - "Type": "String", - "AllowedPattern" : "[a-zA-Z0-9\\-]+", - "ConstraintDescription" : "must be a valid security group name." - }, + "EC2SecurityGroup": { + "Description": "The EC2 security group that contains instances that need access to the database", + "Default": "default", + "Type": "String", + "AllowedPattern": "[a-zA-Z0-9\\-]+", + "ConstraintDescription": "must be a valid security group name." + }, - "MultiAZ" : { - "Description" : "Multi-AZ master database", - "Type" : "String", - "Default" : "false", - "AllowedValues" : [ "true", "false" ], - "ConstraintDescription" : "must be true or false." - } - }, - - "Conditions" : { - "Is-EC2-VPC" : { "Fn::Or" : [ {"Fn::Equals" : [{"Ref" : "AWS::Region"}, "eu-central-1" ]}, - {"Fn::Equals" : [{"Ref" : "AWS::Region"}, "cn-north-1" ]}]}, - "Is-EC2-Classic" : { "Fn::Not" : [{ "Condition" : "Is-EC2-VPC"}]} - }, - - "Resources" : { - "DBParameterGroup": { - "Type": "AWS::RDS::DBParameterGroup", - "Properties" : { - "Description": "DB Parameter Goup", - "Family" : "MySQL5.1", - "Parameters": { - "BACKLOG_QUEUE_LIMIT": "2048" + "MultiAZ": { + "Description": "Multi-AZ master database", + "Type": "String", + "Default": "false", + "AllowedValues": ["true", "false"], + "ConstraintDescription": "must be true or false." } - } }, - "DBEC2SecurityGroup": { - "Type": "AWS::EC2::SecurityGroup", - "Condition" : "Is-EC2-VPC", - "Properties" : { - "GroupDescription": "Open database for access", - "SecurityGroupIngress" : [{ - "IpProtocol" : "tcp", - "FromPort" : "3306", - "ToPort" : "3306", - "SourceSecurityGroupName" : { "Ref" : "EC2SecurityGroup" } - }] - } + "Conditions": { + "Is-EC2-VPC": {"Fn::Or": [{"Fn::Equals": [{"Ref": "AWS::Region"}, "eu-central-1"]}, + {"Fn::Equals": [{"Ref": "AWS::Region"}, "cn-north-1"]}]}, + "Is-EC2-Classic": {"Fn::Not": [{"Condition": "Is-EC2-VPC"}]} }, - "DBSecurityGroup": { - "Type": "AWS::RDS::DBSecurityGroup", - "Condition" : "Is-EC2-Classic", - "Properties": { - "DBSecurityGroupIngress": [{ - "EC2SecurityGroupName": { "Ref": "EC2SecurityGroup" } - }], - "GroupDescription": "database access" - } + "Resources": { + "DBParameterGroup": { + "Type": "AWS::RDS::DBParameterGroup", + "Properties": { + "Description": "DB Parameter Goup", + "Family": "MySQL5.1", + "Parameters": { + "BACKLOG_QUEUE_LIMIT": "2048" + } + } + }, + + "DBEC2SecurityGroup": { + "Type": "AWS::EC2::SecurityGroup", + "Condition": "Is-EC2-VPC", + "Properties": { + "GroupDescription": "Open database for access", + "SecurityGroupIngress": [{ + "IpProtocol": "tcp", + "FromPort": "3306", + "ToPort": "3306", + "SourceSecurityGroupName": {"Ref": "EC2SecurityGroup"} + }] + } + }, + + "DBSecurityGroup": { + "Type": "AWS::RDS::DBSecurityGroup", + "Condition": "Is-EC2-Classic", + "Properties": { + "DBSecurityGroupIngress": [{ + "EC2SecurityGroupName": {"Ref": "EC2SecurityGroup"} + }], + "GroupDescription": "database access" + } + }, + + "my_vpc": { + "Type": "AWS::EC2::VPC", + "Properties": { + "CidrBlock": "10.0.0.0/16", + } + }, + + "EC2Subnet": { + "Type": "AWS::EC2::Subnet", + "Condition": "Is-EC2-VPC", + "Properties": { + "AvailabilityZone": "eu-central-1a", + "CidrBlock": "10.0.1.0/24", + "VpcId": {"Ref": "my_vpc"} + } + }, + + "DBSubnet": { + "Type": "AWS::RDS::DBSubnetGroup", + "Condition": "Is-EC2-VPC", + "Properties": { + "DBSubnetGroupDescription": "my db subnet group", + "SubnetIds": [{"Ref": "EC2Subnet"}], + } + }, + + "MasterDB": { + "Type": "AWS::RDS::DBInstance", + "Properties": { + "DBInstanceIdentifier": {"Ref": "DBInstanceIdentifier"}, + "DBName": {"Ref": "DBName"}, + "AllocatedStorage": {"Ref": "DBAllocatedStorage"}, + "DBInstanceClass": {"Ref": "DBInstanceClass"}, + "Engine": "MySQL", + "DBSubnetGroupName": {"Fn::If": ["Is-EC2-VPC", {"Ref": "DBSubnet"}, {"Ref": "AWS::NoValue"}]}, + "MasterUsername": {"Ref": "DBUser"}, + "MasterUserPassword": {"Ref": "DBPassword"}, + "MultiAZ": {"Ref": "MultiAZ"}, + "Tags": [{"Key": "Name", "Value": "Master Database"}], + "VPCSecurityGroups": {"Fn::If": ["Is-EC2-VPC", [{"Fn::GetAtt": ["DBEC2SecurityGroup", "GroupId"]}], {"Ref": "AWS::NoValue"}]}, + "DBSecurityGroups": {"Fn::If": ["Is-EC2-Classic", [{"Ref": "DBSecurityGroup"}], {"Ref": "AWS::NoValue"}]} + }, + "DeletionPolicy": "Snapshot" + }, + + "ReplicaDB": { + "Type": "AWS::RDS::DBInstance", + "Properties": { + "SourceDBInstanceIdentifier": {"Ref": "MasterDB"}, + "DBInstanceClass": {"Ref": "DBInstanceClass"}, + "Tags": [{"Key": "Name", "Value": "Read Replica Database"}] + } + } }, - "my_vpc": { - "Type" : "AWS::EC2::VPC", - "Properties" : { - "CidrBlock" : "10.0.0.0/16", - } - }, + "Outputs": { + "EC2Platform": { + "Description": "Platform in which this stack is deployed", + "Value": {"Fn::If": ["Is-EC2-VPC", "EC2-VPC", "EC2-Classic"]} + }, - "EC2Subnet": { - "Type" : "AWS::EC2::Subnet", - "Condition" : "Is-EC2-VPC", - "Properties" : { - "AvailabilityZone" : "eu-central-1a", - "CidrBlock" : "10.0.1.0/24", - "VpcId" : { "Ref" : "my_vpc" } - } - }, - - "DBSubnet": { - "Type": "AWS::RDS::DBSubnetGroup", - "Condition" : "Is-EC2-VPC", - "Properties": { - "DBSubnetGroupDescription": "my db subnet group", - "SubnetIds" : [ { "Ref": "EC2Subnet" } ], - } - }, - - "MasterDB" : { - "Type" : "AWS::RDS::DBInstance", - "Properties" : { - "DBInstanceIdentifier": { "Ref": "DBInstanceIdentifier" }, - "DBName" : { "Ref" : "DBName" }, - "AllocatedStorage" : { "Ref" : "DBAllocatedStorage" }, - "DBInstanceClass" : { "Ref" : "DBInstanceClass" }, - "Engine" : "MySQL", - "DBSubnetGroupName": {"Fn::If": ["Is-EC2-VPC", { "Ref": "DBSubnet" }, { "Ref": "AWS::NoValue" }]}, - "MasterUsername" : { "Ref" : "DBUser" }, - "MasterUserPassword" : { "Ref" : "DBPassword" }, - "MultiAZ" : { "Ref" : "MultiAZ" }, - "Tags" : [{ "Key" : "Name", "Value" : "Master Database" }], - "VPCSecurityGroups": { "Fn::If" : [ "Is-EC2-VPC", [ { "Fn::GetAtt": [ "DBEC2SecurityGroup", "GroupId" ] } ], { "Ref" : "AWS::NoValue"}]}, - "DBSecurityGroups": { "Fn::If" : [ "Is-EC2-Classic", [ { "Ref": "DBSecurityGroup" } ], { "Ref" : "AWS::NoValue"}]} - }, - "DeletionPolicy" : "Snapshot" - }, - - "ReplicaDB" : { - "Type" : "AWS::RDS::DBInstance", - "Properties" : { - "SourceDBInstanceIdentifier" : { "Ref" : "MasterDB" }, - "DBInstanceClass" : { "Ref" : "DBInstanceClass" }, - "Tags" : [{ "Key" : "Name", "Value" : "Read Replica Database" }] - } + "MasterJDBCConnectionString": { + "Description": "JDBC connection string for the master database", + "Value": {"Fn::Join": ["", ["jdbc:mysql://", + {"Fn::GetAtt": [ + "MasterDB", "Endpoint.Address"]}, + ":", + {"Fn::GetAtt": [ + "MasterDB", "Endpoint.Port"]}, + "/", + {"Ref": "DBName"}]]} + }, + "ReplicaJDBCConnectionString": { + "Description": "JDBC connection string for the replica database", + "Value": {"Fn::Join": ["", ["jdbc:mysql://", + {"Fn::GetAtt": [ + "ReplicaDB", "Endpoint.Address"]}, + ":", + {"Fn::GetAtt": [ + "ReplicaDB", "Endpoint.Port"]}, + "/", + {"Ref": "DBName"}]]} + } } - }, - - "Outputs" : { - "EC2Platform" : { - "Description" : "Platform in which this stack is deployed", - "Value" : { "Fn::If" : [ "Is-EC2-VPC", "EC2-VPC", "EC2-Classic" ]} - }, - - "MasterJDBCConnectionString": { - "Description" : "JDBC connection string for the master database", - "Value" : { "Fn::Join": [ "", [ "jdbc:mysql://", - { "Fn::GetAtt": [ "MasterDB", "Endpoint.Address" ] }, - ":", - { "Fn::GetAtt": [ "MasterDB", "Endpoint.Port" ] }, - "/", - { "Ref": "DBName" }]]} - }, - "ReplicaJDBCConnectionString": { - "Description" : "JDBC connection string for the replica database", - "Value" : { "Fn::Join": [ "", [ "jdbc:mysql://", - { "Fn::GetAtt": [ "ReplicaDB", "Endpoint.Address" ] }, - ":", - { "Fn::GetAtt": [ "ReplicaDB", "Endpoint.Port" ] }, - "/", - { "Ref": "DBName" }]]} - } - } } diff --git a/tests/test_cloudformation/fixtures/rds_mysql_with_read_replica.py b/tests/test_cloudformation/fixtures/rds_mysql_with_read_replica.py index 3e5efa04a..2fbfb4cad 100644 --- a/tests/test_cloudformation/fixtures/rds_mysql_with_read_replica.py +++ b/tests/test_cloudformation/fixtures/rds_mysql_with_read_replica.py @@ -1,190 +1,193 @@ from __future__ import unicode_literals template = { - "AWSTemplateFormatVersion" : "2010-09-09", + "AWSTemplateFormatVersion": "2010-09-09", - "Description" : "AWS CloudFormation Sample Template RDS_MySQL_With_Read_Replica: Sample template showing how to create a highly-available, RDS DBInstance with a read replica. **WARNING** This template creates an Amazon Relational Database Service database instance and Amazon CloudWatch alarms. You will be billed for the AWS resources used if you create a stack from this template.", + "Description": "AWS CloudFormation Sample Template RDS_MySQL_With_Read_Replica: Sample template showing how to create a highly-available, RDS DBInstance with a read replica. **WARNING** This template creates an Amazon Relational Database Service database instance and Amazon CloudWatch alarms. You will be billed for the AWS resources used if you create a stack from this template.", - "Parameters": { - "DBName": { - "Default": "MyDatabase", - "Description" : "The database name", - "Type": "String", - "MinLength": "1", - "MaxLength": "64", - "AllowedPattern" : "[a-zA-Z][a-zA-Z0-9]*", - "ConstraintDescription" : "must begin with a letter and contain only alphanumeric characters." + "Parameters": { + "DBName": { + "Default": "MyDatabase", + "Description": "The database name", + "Type": "String", + "MinLength": "1", + "MaxLength": "64", + "AllowedPattern": "[a-zA-Z][a-zA-Z0-9]*", + "ConstraintDescription": "must begin with a letter and contain only alphanumeric characters." + }, + + "DBInstanceIdentifier": { + "Type": "String" + }, + + "DBUser": { + "NoEcho": "true", + "Description": "The database admin account username", + "Type": "String", + "MinLength": "1", + "MaxLength": "16", + "AllowedPattern": "[a-zA-Z][a-zA-Z0-9]*", + "ConstraintDescription": "must begin with a letter and contain only alphanumeric characters." + }, + + "DBPassword": { + "NoEcho": "true", + "Description": "The database admin account password", + "Type": "String", + "MinLength": "1", + "MaxLength": "41", + "AllowedPattern": "[a-zA-Z0-9]+", + "ConstraintDescription": "must contain only alphanumeric characters." + }, + + "DBAllocatedStorage": { + "Default": "5", + "Description": "The size of the database (Gb)", + "Type": "Number", + "MinValue": "5", + "MaxValue": "1024", + "ConstraintDescription": "must be between 5 and 1024Gb." + }, + + "DBInstanceClass": { + "Description": "The database instance type", + "Type": "String", + "Default": "db.m1.small", + "AllowedValues": ["db.t1.micro", "db.m1.small", "db.m1.medium", "db.m1.large", "db.m1.xlarge", "db.m2.xlarge", "db.m2.2xlarge", "db.m2.4xlarge", "db.m3.medium", "db.m3.large", "db.m3.xlarge", "db.m3.2xlarge", "db.r3.large", "db.r3.xlarge", "db.r3.2xlarge", "db.r3.4xlarge", "db.r3.8xlarge", "db.m2.xlarge", "db.m2.2xlarge", "db.m2.4xlarge", "db.cr1.8xlarge"], + "ConstraintDescription": "must select a valid database instance type." + }, + + "EC2SecurityGroup": { + "Description": "The EC2 security group that contains instances that need access to the database", + "Default": "default", + "Type": "String", + "AllowedPattern": "[a-zA-Z0-9\\-]+", + "ConstraintDescription": "must be a valid security group name." + }, + + "MultiAZ": { + "Description": "Multi-AZ master database", + "Type": "String", + "Default": "false", + "AllowedValues": ["true", "false"], + "ConstraintDescription": "must be true or false." + } }, - "DBInstanceIdentifier": { - "Type": "String" + "Conditions": { + "Is-EC2-VPC": {"Fn::Or": [{"Fn::Equals": [{"Ref": "AWS::Region"}, "eu-central-1"]}, + {"Fn::Equals": [{"Ref": "AWS::Region"}, "cn-north-1"]}]}, + "Is-EC2-Classic": {"Fn::Not": [{"Condition": "Is-EC2-VPC"}]} }, - "DBUser": { - "NoEcho": "true", - "Description" : "The database admin account username", - "Type": "String", - "MinLength": "1", - "MaxLength": "16", - "AllowedPattern" : "[a-zA-Z][a-zA-Z0-9]*", - "ConstraintDescription" : "must begin with a letter and contain only alphanumeric characters." + "Resources": { + "DBEC2SecurityGroup": { + "Type": "AWS::EC2::SecurityGroup", + "Condition": "Is-EC2-VPC", + "Properties": { + "GroupDescription": "Open database for access", + "SecurityGroupIngress": [{ + "IpProtocol": "tcp", + "FromPort": "3306", + "ToPort": "3306", + "SourceSecurityGroupName": {"Ref": "EC2SecurityGroup"} + }] + } + }, + + "DBSecurityGroup": { + "Type": "AWS::RDS::DBSecurityGroup", + "Condition": "Is-EC2-Classic", + "Properties": { + "DBSecurityGroupIngress": [{ + "EC2SecurityGroupName": {"Ref": "EC2SecurityGroup"} + }], + "GroupDescription": "database access" + } + }, + + "my_vpc": { + "Type": "AWS::EC2::VPC", + "Properties": { + "CidrBlock": "10.0.0.0/16", + } + }, + + "EC2Subnet": { + "Type": "AWS::EC2::Subnet", + "Condition": "Is-EC2-VPC", + "Properties": { + "AvailabilityZone": "eu-central-1a", + "CidrBlock": "10.0.1.0/24", + "VpcId": {"Ref": "my_vpc"} + } + }, + + "DBSubnet": { + "Type": "AWS::RDS::DBSubnetGroup", + "Condition": "Is-EC2-VPC", + "Properties": { + "DBSubnetGroupDescription": "my db subnet group", + "SubnetIds": [{"Ref": "EC2Subnet"}], + } + }, + + "MasterDB": { + "Type": "AWS::RDS::DBInstance", + "Properties": { + "DBInstanceIdentifier": {"Ref": "DBInstanceIdentifier"}, + "DBName": {"Ref": "DBName"}, + "AllocatedStorage": {"Ref": "DBAllocatedStorage"}, + "DBInstanceClass": {"Ref": "DBInstanceClass"}, + "Engine": "MySQL", + "DBSubnetGroupName": {"Fn::If": ["Is-EC2-VPC", {"Ref": "DBSubnet"}, {"Ref": "AWS::NoValue"}]}, + "MasterUsername": {"Ref": "DBUser"}, + "MasterUserPassword": {"Ref": "DBPassword"}, + "MultiAZ": {"Ref": "MultiAZ"}, + "Tags": [{"Key": "Name", "Value": "Master Database"}], + "VPCSecurityGroups": {"Fn::If": ["Is-EC2-VPC", [{"Fn::GetAtt": ["DBEC2SecurityGroup", "GroupId"]}], {"Ref": "AWS::NoValue"}]}, + "DBSecurityGroups": {"Fn::If": ["Is-EC2-Classic", [{"Ref": "DBSecurityGroup"}], {"Ref": "AWS::NoValue"}]} + }, + "DeletionPolicy": "Snapshot" + }, + + "ReplicaDB": { + "Type": "AWS::RDS::DBInstance", + "Properties": { + "SourceDBInstanceIdentifier": {"Ref": "MasterDB"}, + "DBInstanceClass": {"Ref": "DBInstanceClass"}, + "Tags": [{"Key": "Name", "Value": "Read Replica Database"}] + } + } }, - "DBPassword": { - "NoEcho": "true", - "Description" : "The database admin account password", - "Type": "String", - "MinLength": "1", - "MaxLength": "41", - "AllowedPattern" : "[a-zA-Z0-9]+", - "ConstraintDescription" : "must contain only alphanumeric characters." - }, + "Outputs": { + "EC2Platform": { + "Description": "Platform in which this stack is deployed", + "Value": {"Fn::If": ["Is-EC2-VPC", "EC2-VPC", "EC2-Classic"]} + }, - "DBAllocatedStorage": { - "Default": "5", - "Description" : "The size of the database (Gb)", - "Type": "Number", - "MinValue": "5", - "MaxValue": "1024", - "ConstraintDescription" : "must be between 5 and 1024Gb." - }, - - "DBInstanceClass": { - "Description" : "The database instance type", - "Type": "String", - "Default": "db.m1.small", - "AllowedValues" : [ "db.t1.micro", "db.m1.small", "db.m1.medium", "db.m1.large", "db.m1.xlarge", "db.m2.xlarge", "db.m2.2xlarge", "db.m2.4xlarge", "db.m3.medium", "db.m3.large", "db.m3.xlarge", "db.m3.2xlarge", "db.r3.large", "db.r3.xlarge", "db.r3.2xlarge", "db.r3.4xlarge", "db.r3.8xlarge", "db.m2.xlarge", "db.m2.2xlarge", "db.m2.4xlarge", "db.cr1.8xlarge"] -, - "ConstraintDescription" : "must select a valid database instance type." - }, - - "EC2SecurityGroup": { - "Description" : "The EC2 security group that contains instances that need access to the database", - "Default": "default", - "Type": "String", - "AllowedPattern" : "[a-zA-Z0-9\\-]+", - "ConstraintDescription" : "must be a valid security group name." - }, - - "MultiAZ" : { - "Description" : "Multi-AZ master database", - "Type" : "String", - "Default" : "false", - "AllowedValues" : [ "true", "false" ], - "ConstraintDescription" : "must be true or false." + "MasterJDBCConnectionString": { + "Description": "JDBC connection string for the master database", + "Value": {"Fn::Join": ["", ["jdbc:mysql://", + {"Fn::GetAtt": [ + "MasterDB", "Endpoint.Address"]}, + ":", + {"Fn::GetAtt": [ + "MasterDB", "Endpoint.Port"]}, + "/", + {"Ref": "DBName"}]]} + }, + "ReplicaJDBCConnectionString": { + "Description": "JDBC connection string for the replica database", + "Value": {"Fn::Join": ["", ["jdbc:mysql://", + {"Fn::GetAtt": [ + "ReplicaDB", "Endpoint.Address"]}, + ":", + {"Fn::GetAtt": [ + "ReplicaDB", "Endpoint.Port"]}, + "/", + {"Ref": "DBName"}]]} + } } - }, - - "Conditions" : { - "Is-EC2-VPC" : { "Fn::Or" : [ {"Fn::Equals" : [{"Ref" : "AWS::Region"}, "eu-central-1" ]}, - {"Fn::Equals" : [{"Ref" : "AWS::Region"}, "cn-north-1" ]}]}, - "Is-EC2-Classic" : { "Fn::Not" : [{ "Condition" : "Is-EC2-VPC"}]} - }, - - "Resources" : { - "DBEC2SecurityGroup": { - "Type": "AWS::EC2::SecurityGroup", - "Condition" : "Is-EC2-VPC", - "Properties" : { - "GroupDescription": "Open database for access", - "SecurityGroupIngress" : [{ - "IpProtocol" : "tcp", - "FromPort" : "3306", - "ToPort" : "3306", - "SourceSecurityGroupName" : { "Ref" : "EC2SecurityGroup" } - }] - } - }, - - "DBSecurityGroup": { - "Type": "AWS::RDS::DBSecurityGroup", - "Condition" : "Is-EC2-Classic", - "Properties": { - "DBSecurityGroupIngress": [{ - "EC2SecurityGroupName": { "Ref": "EC2SecurityGroup" } - }], - "GroupDescription": "database access" - } - }, - - "my_vpc": { - "Type" : "AWS::EC2::VPC", - "Properties" : { - "CidrBlock" : "10.0.0.0/16", - } - }, - - "EC2Subnet": { - "Type" : "AWS::EC2::Subnet", - "Condition" : "Is-EC2-VPC", - "Properties" : { - "AvailabilityZone" : "eu-central-1a", - "CidrBlock" : "10.0.1.0/24", - "VpcId" : { "Ref" : "my_vpc" } - } - }, - - "DBSubnet": { - "Type": "AWS::RDS::DBSubnetGroup", - "Condition" : "Is-EC2-VPC", - "Properties": { - "DBSubnetGroupDescription": "my db subnet group", - "SubnetIds" : [ { "Ref": "EC2Subnet" } ], - } - }, - - "MasterDB" : { - "Type" : "AWS::RDS::DBInstance", - "Properties" : { - "DBInstanceIdentifier": { "Ref": "DBInstanceIdentifier" }, - "DBName" : { "Ref" : "DBName" }, - "AllocatedStorage" : { "Ref" : "DBAllocatedStorage" }, - "DBInstanceClass" : { "Ref" : "DBInstanceClass" }, - "Engine" : "MySQL", - "DBSubnetGroupName": {"Fn::If": ["Is-EC2-VPC", { "Ref": "DBSubnet" }, { "Ref": "AWS::NoValue" }]}, - "MasterUsername" : { "Ref" : "DBUser" }, - "MasterUserPassword" : { "Ref" : "DBPassword" }, - "MultiAZ" : { "Ref" : "MultiAZ" }, - "Tags" : [{ "Key" : "Name", "Value" : "Master Database" }], - "VPCSecurityGroups": { "Fn::If" : [ "Is-EC2-VPC", [ { "Fn::GetAtt": [ "DBEC2SecurityGroup", "GroupId" ] } ], { "Ref" : "AWS::NoValue"}]}, - "DBSecurityGroups": { "Fn::If" : [ "Is-EC2-Classic", [ { "Ref": "DBSecurityGroup" } ], { "Ref" : "AWS::NoValue"}]} - }, - "DeletionPolicy" : "Snapshot" - }, - - "ReplicaDB" : { - "Type" : "AWS::RDS::DBInstance", - "Properties" : { - "SourceDBInstanceIdentifier" : { "Ref" : "MasterDB" }, - "DBInstanceClass" : { "Ref" : "DBInstanceClass" }, - "Tags" : [{ "Key" : "Name", "Value" : "Read Replica Database" }] - } - } - }, - - "Outputs" : { - "EC2Platform" : { - "Description" : "Platform in which this stack is deployed", - "Value" : { "Fn::If" : [ "Is-EC2-VPC", "EC2-VPC", "EC2-Classic" ]} - }, - - "MasterJDBCConnectionString": { - "Description" : "JDBC connection string for the master database", - "Value" : { "Fn::Join": [ "", [ "jdbc:mysql://", - { "Fn::GetAtt": [ "MasterDB", "Endpoint.Address" ] }, - ":", - { "Fn::GetAtt": [ "MasterDB", "Endpoint.Port" ] }, - "/", - { "Ref": "DBName" }]]} - }, - "ReplicaJDBCConnectionString": { - "Description" : "JDBC connection string for the replica database", - "Value" : { "Fn::Join": [ "", [ "jdbc:mysql://", - { "Fn::GetAtt": [ "ReplicaDB", "Endpoint.Address" ] }, - ":", - { "Fn::GetAtt": [ "ReplicaDB", "Endpoint.Port" ] }, - "/", - { "Ref": "DBName" }]]} - } - } } diff --git a/tests/test_cloudformation/fixtures/redshift.py b/tests/test_cloudformation/fixtures/redshift.py index 90e171659..317e213bc 100644 --- a/tests/test_cloudformation/fixtures/redshift.py +++ b/tests/test_cloudformation/fixtures/redshift.py @@ -1,187 +1,187 @@ from __future__ import unicode_literals template = { - "AWSTemplateFormatVersion": "2010-09-09", - "Parameters" : { - "DatabaseName" : { - "Description" : "The name of the first database to be created when the cluster is created", - "Type" : "String", - "Default" : "dev", - "AllowedPattern" : "([a-z]|[0-9])+" - }, - "ClusterType" : { - "Description" : "The type of cluster", - "Type" : "String", - "Default" : "single-node", - "AllowedValues" : [ "single-node", "multi-node" ] - }, - "NumberOfNodes" : { - "Description" : "The number of compute nodes in the cluster. For multi-node clusters, the NumberOfNodes parameter must be greater than 1", - "Type" : "Number", - "Default" : "1" - }, - "NodeType" : { - "Description" : "The type of node to be provisioned", - "Type" : "String", - "Default" : "dw1.xlarge", - "AllowedValues" : [ "dw1.xlarge", "dw1.8xlarge", "dw2.large", "dw2.8xlarge" ] - }, - "MasterUsername" : { - "Description" : "The user name that is associated with the master user account for the cluster that is being created", - "Type" : "String", - "Default" : "defaultuser", - "AllowedPattern" : "([a-z])([a-z]|[0-9])*" - }, - "MasterUserPassword" : { - "Description" : "The password that is associated with the master user account for the cluster that is being created.", - "Type" : "String", - "NoEcho" : "true" - }, - "InboundTraffic" : { - "Description" : "Allow inbound traffic to the cluster from this CIDR range.", - "Type" : "String", - "MinLength": "9", - "MaxLength": "18", - "Default" : "0.0.0.0/0", - "AllowedPattern" : "(\\d{1,3})\\.(\\d{1,3})\\.(\\d{1,3})\\.(\\d{1,3})/(\\d{1,2})", - "ConstraintDescription" : "must be a valid CIDR range of the form x.x.x.x/x." - }, - "PortNumber" : { - "Description" : "The port number on which the cluster accepts incoming connections.", - "Type" : "Number", - "Default" : "5439" - } - }, - "Conditions" : { - "IsMultiNodeCluster" : { - "Fn::Equals" : [{ "Ref" : "ClusterType" }, "multi-node" ] - } - }, - "Resources" : { - "RedshiftCluster" : { - "Type" : "AWS::Redshift::Cluster", - "DependsOn" : "AttachGateway", - "Properties" : { - "ClusterType" : { "Ref" : "ClusterType" }, - "NumberOfNodes" : { "Fn::If" : [ "IsMultiNodeCluster", { "Ref" : "NumberOfNodes" }, { "Ref" : "AWS::NoValue" }]}, - "NodeType" : { "Ref" : "NodeType" }, - "DBName" : { "Ref" : "DatabaseName" }, - "MasterUsername" : { "Ref" : "MasterUsername" }, - "MasterUserPassword" : { "Ref" : "MasterUserPassword" }, - "ClusterParameterGroupName" : { "Ref" : "RedshiftClusterParameterGroup" }, - "VpcSecurityGroupIds" : [ { "Ref" : "SecurityGroup" } ], - "ClusterSubnetGroupName" : { "Ref" : "RedshiftClusterSubnetGroup" }, - "PubliclyAccessible" : "true", - "Port" : { "Ref" : "PortNumber" } - } - }, - "RedshiftClusterParameterGroup" : { - "Type" : "AWS::Redshift::ClusterParameterGroup", - "Properties" : { - "Description" : "Cluster parameter group", - "ParameterGroupFamily" : "redshift-1.0", - "Parameters" : [{ - "ParameterName" : "enable_user_activity_logging", - "ParameterValue" : "true" - }] - } - }, - "RedshiftClusterSubnetGroup" : { - "Type" : "AWS::Redshift::ClusterSubnetGroup", - "Properties" : { - "Description" : "Cluster subnet group", - "SubnetIds" : [ { "Ref" : "PublicSubnet" } ] - } - }, - "VPC" : { - "Type" : "AWS::EC2::VPC", - "Properties" : { - "CidrBlock" : "10.0.0.0/16" - } - }, - "PublicSubnet" : { - "Type" : "AWS::EC2::Subnet", - "Properties" : { - "CidrBlock" : "10.0.0.0/24", - "VpcId" : { "Ref" : "VPC" } - } - }, - "SecurityGroup" : { - "Type" : "AWS::EC2::SecurityGroup", - "Properties" : { - "GroupDescription" : "Security group", - "SecurityGroupIngress" : [ { - "CidrIp" : { "Ref": "InboundTraffic" }, - "FromPort" : { "Ref" : "PortNumber" }, - "ToPort" : { "Ref" : "PortNumber" }, - "IpProtocol" : "tcp" - } ], - "VpcId" : { "Ref" : "VPC" } - } - }, - "myInternetGateway" : { - "Type" : "AWS::EC2::InternetGateway" - }, - "AttachGateway" : { - "Type" : "AWS::EC2::VPCGatewayAttachment", - "Properties" : { - "VpcId" : { "Ref" : "VPC" }, - "InternetGatewayId" : { "Ref" : "myInternetGateway" } - } - }, - "PublicRouteTable" : { - "Type" : "AWS::EC2::RouteTable", - "Properties" : { - "VpcId" : { - "Ref" : "VPC" - } - } - }, - "PublicRoute" : { - "Type" : "AWS::EC2::Route", - "DependsOn" : "AttachGateway", - "Properties" : { - "RouteTableId" : { - "Ref" : "PublicRouteTable" + "AWSTemplateFormatVersion": "2010-09-09", + "Parameters": { + "DatabaseName": { + "Description": "The name of the first database to be created when the cluster is created", + "Type": "String", + "Default": "dev", + "AllowedPattern": "([a-z]|[0-9])+" }, - "DestinationCidrBlock" : "0.0.0.0/0", - "GatewayId" : { - "Ref" : "myInternetGateway" - } - } - }, - "PublicSubnetRouteTableAssociation" : { - "Type" : "AWS::EC2::SubnetRouteTableAssociation", - "Properties" : { - "SubnetId" : { - "Ref" : "PublicSubnet" + "ClusterType": { + "Description": "The type of cluster", + "Type": "String", + "Default": "single-node", + "AllowedValues": ["single-node", "multi-node"] }, - "RouteTableId" : { - "Ref" : "PublicRouteTable" + "NumberOfNodes": { + "Description": "The number of compute nodes in the cluster. For multi-node clusters, the NumberOfNodes parameter must be greater than 1", + "Type": "Number", + "Default": "1" + }, + "NodeType": { + "Description": "The type of node to be provisioned", + "Type": "String", + "Default": "dw1.xlarge", + "AllowedValues": ["dw1.xlarge", "dw1.8xlarge", "dw2.large", "dw2.8xlarge"] + }, + "MasterUsername": { + "Description": "The user name that is associated with the master user account for the cluster that is being created", + "Type": "String", + "Default": "defaultuser", + "AllowedPattern": "([a-z])([a-z]|[0-9])*" + }, + "MasterUserPassword": { + "Description": "The password that is associated with the master user account for the cluster that is being created.", + "Type": "String", + "NoEcho": "true" + }, + "InboundTraffic": { + "Description": "Allow inbound traffic to the cluster from this CIDR range.", + "Type": "String", + "MinLength": "9", + "MaxLength": "18", + "Default": "0.0.0.0/0", + "AllowedPattern": "(\\d{1,3})\\.(\\d{1,3})\\.(\\d{1,3})\\.(\\d{1,3})/(\\d{1,2})", + "ConstraintDescription": "must be a valid CIDR range of the form x.x.x.x/x." + }, + "PortNumber": { + "Description": "The port number on which the cluster accepts incoming connections.", + "Type": "Number", + "Default": "5439" + } + }, + "Conditions": { + "IsMultiNodeCluster": { + "Fn::Equals": [{"Ref": "ClusterType"}, "multi-node"] + } + }, + "Resources": { + "RedshiftCluster": { + "Type": "AWS::Redshift::Cluster", + "DependsOn": "AttachGateway", + "Properties": { + "ClusterType": {"Ref": "ClusterType"}, + "NumberOfNodes": {"Fn::If": ["IsMultiNodeCluster", {"Ref": "NumberOfNodes"}, {"Ref": "AWS::NoValue"}]}, + "NodeType": {"Ref": "NodeType"}, + "DBName": {"Ref": "DatabaseName"}, + "MasterUsername": {"Ref": "MasterUsername"}, + "MasterUserPassword": {"Ref": "MasterUserPassword"}, + "ClusterParameterGroupName": {"Ref": "RedshiftClusterParameterGroup"}, + "VpcSecurityGroupIds": [{"Ref": "SecurityGroup"}], + "ClusterSubnetGroupName": {"Ref": "RedshiftClusterSubnetGroup"}, + "PubliclyAccessible": "true", + "Port": {"Ref": "PortNumber"} + } + }, + "RedshiftClusterParameterGroup": { + "Type": "AWS::Redshift::ClusterParameterGroup", + "Properties": { + "Description": "Cluster parameter group", + "ParameterGroupFamily": "redshift-1.0", + "Parameters": [{ + "ParameterName": "enable_user_activity_logging", + "ParameterValue": "true" + }] + } + }, + "RedshiftClusterSubnetGroup": { + "Type": "AWS::Redshift::ClusterSubnetGroup", + "Properties": { + "Description": "Cluster subnet group", + "SubnetIds": [{"Ref": "PublicSubnet"}] + } + }, + "VPC": { + "Type": "AWS::EC2::VPC", + "Properties": { + "CidrBlock": "10.0.0.0/16" + } + }, + "PublicSubnet": { + "Type": "AWS::EC2::Subnet", + "Properties": { + "CidrBlock": "10.0.0.0/24", + "VpcId": {"Ref": "VPC"} + } + }, + "SecurityGroup": { + "Type": "AWS::EC2::SecurityGroup", + "Properties": { + "GroupDescription": "Security group", + "SecurityGroupIngress": [{ + "CidrIp": {"Ref": "InboundTraffic"}, + "FromPort": {"Ref": "PortNumber"}, + "ToPort": {"Ref": "PortNumber"}, + "IpProtocol": "tcp" + }], + "VpcId": {"Ref": "VPC"} + } + }, + "myInternetGateway": { + "Type": "AWS::EC2::InternetGateway" + }, + "AttachGateway": { + "Type": "AWS::EC2::VPCGatewayAttachment", + "Properties": { + "VpcId": {"Ref": "VPC"}, + "InternetGatewayId": {"Ref": "myInternetGateway"} + } + }, + "PublicRouteTable": { + "Type": "AWS::EC2::RouteTable", + "Properties": { + "VpcId": { + "Ref": "VPC" + } + } + }, + "PublicRoute": { + "Type": "AWS::EC2::Route", + "DependsOn": "AttachGateway", + "Properties": { + "RouteTableId": { + "Ref": "PublicRouteTable" + }, + "DestinationCidrBlock": "0.0.0.0/0", + "GatewayId": { + "Ref": "myInternetGateway" + } + } + }, + "PublicSubnetRouteTableAssociation": { + "Type": "AWS::EC2::SubnetRouteTableAssociation", + "Properties": { + "SubnetId": { + "Ref": "PublicSubnet" + }, + "RouteTableId": { + "Ref": "PublicRouteTable" + } + } + } + }, + "Outputs": { + "ClusterEndpoint": { + "Description": "Cluster endpoint", + "Value": {"Fn::Join": [":", [{"Fn::GetAtt": ["RedshiftCluster", "Endpoint.Address"]}, {"Fn::GetAtt": ["RedshiftCluster", "Endpoint.Port"]}]]} + }, + "ClusterName": { + "Description": "Name of cluster", + "Value": {"Ref": "RedshiftCluster"} + }, + "ParameterGroupName": { + "Description": "Name of parameter group", + "Value": {"Ref": "RedshiftClusterParameterGroup"} + }, + "RedshiftClusterSubnetGroupName": { + "Description": "Name of cluster subnet group", + "Value": {"Ref": "RedshiftClusterSubnetGroup"} + }, + "RedshiftClusterSecurityGroupName": { + "Description": "Name of cluster security group", + "Value": {"Ref": "SecurityGroup"} } - } } - }, - "Outputs" : { - "ClusterEndpoint" : { - "Description" : "Cluster endpoint", - "Value" : { "Fn::Join" : [ ":", [ { "Fn::GetAtt" : [ "RedshiftCluster", "Endpoint.Address" ] }, { "Fn::GetAtt" : [ "RedshiftCluster", "Endpoint.Port" ] } ] ] } - }, - "ClusterName" : { - "Description" : "Name of cluster", - "Value" : { "Ref" : "RedshiftCluster" } - }, - "ParameterGroupName" : { - "Description" : "Name of parameter group", - "Value" : { "Ref" : "RedshiftClusterParameterGroup" } - }, - "RedshiftClusterSubnetGroupName" : { - "Description" : "Name of cluster subnet group", - "Value" : { "Ref" : "RedshiftClusterSubnetGroup" } - }, - "RedshiftClusterSecurityGroupName" : { - "Description" : "Name of cluster security group", - "Value" : { "Ref" : "SecurityGroup" } - } - } -} \ No newline at end of file +} diff --git a/tests/test_cloudformation/fixtures/route53_ec2_instance_with_public_ip.py b/tests/test_cloudformation/fixtures/route53_ec2_instance_with_public_ip.py index 02fa57b8f..5e66bbd86 100644 --- a/tests/test_cloudformation/fixtures/route53_ec2_instance_with_public_ip.py +++ b/tests/test_cloudformation/fixtures/route53_ec2_instance_with_public_ip.py @@ -1,40 +1,40 @@ from __future__ import unicode_literals template = { - "Resources" : { - "Ec2Instance" : { - "Type" : "AWS::EC2::Instance", - "Properties" : { - "ImageId" : "ami-1234abcd", + "Resources": { + "Ec2Instance": { + "Type": "AWS::EC2::Instance", + "Properties": { + "ImageId": "ami-1234abcd", "PrivateIpAddress": "10.0.0.25", } }, "HostedZone": { - "Type" : "AWS::Route53::HostedZone", - "Properties" : { - "Name" : "my_zone" + "Type": "AWS::Route53::HostedZone", + "Properties": { + "Name": "my_zone" } }, - "myDNSRecord" : { - "Type" : "AWS::Route53::RecordSet", - "Properties" : { - "HostedZoneName" : { "Ref" : "HostedZone" }, - "Comment" : "DNS name for my instance.", - "Name" : { - "Fn::Join" : [ "", [ - {"Ref" : "Ec2Instance"}, ".", - {"Ref" : "AWS::Region"}, ".", - {"Ref" : "HostedZone"} ,"." - ] ] - }, - "Type" : "A", - "TTL" : "900", - "ResourceRecords" : [ - { "Fn::GetAtt" : [ "Ec2Instance", "PrivateIp" ] } - ] - } + "myDNSRecord": { + "Type": "AWS::Route53::RecordSet", + "Properties": { + "HostedZoneName": {"Ref": "HostedZone"}, + "Comment": "DNS name for my instance.", + "Name": { + "Fn::Join": ["", [ + {"Ref": "Ec2Instance"}, ".", + {"Ref": "AWS::Region"}, ".", + {"Ref": "HostedZone"}, "." + ]] + }, + "Type": "A", + "TTL": "900", + "ResourceRecords": [ + {"Fn::GetAtt": ["Ec2Instance", "PrivateIp"]} + ] + } } }, -} \ No newline at end of file +} diff --git a/tests/test_cloudformation/fixtures/route53_health_check.py b/tests/test_cloudformation/fixtures/route53_health_check.py index 6c6159fde..f6a2c9b8e 100644 --- a/tests/test_cloudformation/fixtures/route53_health_check.py +++ b/tests/test_cloudformation/fixtures/route53_health_check.py @@ -1,39 +1,39 @@ from __future__ import unicode_literals template = { - "Resources" : { + "Resources": { "HostedZone": { - "Type" : "AWS::Route53::HostedZone", - "Properties" : { - "Name" : "my_zone" + "Type": "AWS::Route53::HostedZone", + "Properties": { + "Name": "my_zone" } }, "my_health_check": { "Type": "AWS::Route53::HealthCheck", - "Properties" : { - "HealthCheckConfig" : { - "FailureThreshold" : 3, - "IPAddress" : "10.0.0.4", - "Port" : 80, - "RequestInterval" : 10, - "ResourcePath" : "/", - "Type" : "HTTP", + "Properties": { + "HealthCheckConfig": { + "FailureThreshold": 3, + "IPAddress": "10.0.0.4", + "Port": 80, + "RequestInterval": 10, + "ResourcePath": "/", + "Type": "HTTP", } } }, - "myDNSRecord" : { - "Type" : "AWS::Route53::RecordSet", - "Properties" : { - "HostedZoneName" : { "Ref" : "HostedZone" }, - "Comment" : "DNS name for my instance.", - "Name" : "my_record_set", - "Type" : "A", - "TTL" : "900", - "ResourceRecords" : ["my.example.com"], - "HealthCheckId": {"Ref": "my_health_check"}, - } + "myDNSRecord": { + "Type": "AWS::Route53::RecordSet", + "Properties": { + "HostedZoneName": {"Ref": "HostedZone"}, + "Comment": "DNS name for my instance.", + "Name": "my_record_set", + "Type": "A", + "TTL": "900", + "ResourceRecords": ["my.example.com"], + "HealthCheckId": {"Ref": "my_health_check"}, + } } }, -} \ No newline at end of file +} diff --git a/tests/test_cloudformation/fixtures/route53_roundrobin.py b/tests/test_cloudformation/fixtures/route53_roundrobin.py index d985623bb..da4fecd4d 100644 --- a/tests/test_cloudformation/fixtures/route53_roundrobin.py +++ b/tests/test_cloudformation/fixtures/route53_roundrobin.py @@ -1,47 +1,47 @@ from __future__ import unicode_literals template = { - "AWSTemplateFormatVersion" : "2010-09-09", + "AWSTemplateFormatVersion": "2010-09-09", - "Description" : "AWS CloudFormation Sample Template Route53_RoundRobin: Sample template showing how to use weighted round robin (WRR) DNS entried via Amazon Route 53. This contrived sample uses weighted CNAME records to illustrate that the weighting influences the return records. It assumes that you already have a Hosted Zone registered with Amazon Route 53. **WARNING** This template creates one or more AWS resources. You will be billed for the AWS resources used if you create a stack from this template.", + "Description": "AWS CloudFormation Sample Template Route53_RoundRobin: Sample template showing how to use weighted round robin (WRR) DNS entried via Amazon Route 53. This contrived sample uses weighted CNAME records to illustrate that the weighting influences the return records. It assumes that you already have a Hosted Zone registered with Amazon Route 53. **WARNING** This template creates one or more AWS resources. You will be billed for the AWS resources used if you create a stack from this template.", - "Resources" : { + "Resources": { - "MyZone": { - "Type" : "AWS::Route53::HostedZone", - "Properties" : { - "Name" : "my_zone" - } + "MyZone": { + "Type": "AWS::Route53::HostedZone", + "Properties": { + "Name": "my_zone" + } + }, + + "MyDNSRecord": { + "Type": "AWS::Route53::RecordSetGroup", + "Properties": { + "HostedZoneName": {"Ref": "MyZone"}, + "Comment": "Contrived example to redirect to aws.amazon.com 75% of the time and www.amazon.com 25% of the time.", + "RecordSets": [{ + "SetIdentifier": {"Fn::Join": [" ", [{"Ref": "AWS::StackName"}, "AWS"]]}, + "Name": {"Fn::Join": ["", [{"Ref": "AWS::StackName"}, ".", {"Ref": "AWS::Region"}, ".", {"Ref": "MyZone"}, "."]]}, + "Type": "CNAME", + "TTL": "900", + "ResourceRecords": ["aws.amazon.com"], + "Weight": "3" + }, { + "SetIdentifier": {"Fn::Join": [" ", [{"Ref": "AWS::StackName"}, "Amazon"]]}, + "Name": {"Fn::Join": ["", [{"Ref": "AWS::StackName"}, ".", {"Ref": "AWS::Region"}, ".", {"Ref": "MyZone"}, "."]]}, + "Type": "CNAME", + "TTL": "900", + "ResourceRecords": ["www.amazon.com"], + "Weight": "1" + }] + } + } }, - "MyDNSRecord" : { - "Type" : "AWS::Route53::RecordSetGroup", - "Properties" : { - "HostedZoneName" : {"Ref": "MyZone"}, - "Comment" : "Contrived example to redirect to aws.amazon.com 75% of the time and www.amazon.com 25% of the time.", - "RecordSets" : [{ - "SetIdentifier" : { "Fn::Join" : [ " ", [{"Ref" : "AWS::StackName"}, "AWS" ]]}, - "Name" : { "Fn::Join" : [ "", [{"Ref" : "AWS::StackName"}, ".", {"Ref" : "AWS::Region"}, ".", {"Ref" : "MyZone"}, "."]]}, - "Type" : "CNAME", - "TTL" : "900", - "ResourceRecords" : ["aws.amazon.com"], - "Weight" : "3" - },{ - "SetIdentifier" : { "Fn::Join" : [ " ", [{"Ref" : "AWS::StackName"}, "Amazon" ]]}, - "Name" : { "Fn::Join" : [ "", [{"Ref" : "AWS::StackName"}, ".", {"Ref" : "AWS::Region"}, ".", {"Ref" : "MyZone"}, "."]]}, - "Type" : "CNAME", - "TTL" : "900", - "ResourceRecords" : ["www.amazon.com"], - "Weight" : "1" - }] - } + "Outputs": { + "DomainName": { + "Description": "Fully qualified domain name", + "Value": {"Ref": "MyDNSRecord"} + } } - }, - - "Outputs" : { - "DomainName" : { - "Description" : "Fully qualified domain name", - "Value" : { "Ref" : "MyDNSRecord" } - } - } -} \ No newline at end of file +} diff --git a/tests/test_cloudformation/test_cloudformation_stack_crud.py b/tests/test_cloudformation/test_cloudformation_stack_crud.py index 3d41c9d91..619d8c3da 100644 --- a/tests/test_cloudformation/test_cloudformation_stack_crud.py +++ b/tests/test_cloudformation/test_cloudformation_stack_crud.py @@ -35,8 +35,8 @@ dummy_template3 = { "VPC": { "Properties": { "CidrBlock": "192.168.0.0/16", - }, - "Type": "AWS::EC2::VPC" + }, + "Type": "AWS::EC2::VPC" } }, } @@ -91,7 +91,8 @@ def test_create_stack_with_notification_arn(): ) stack = conn.describe_stacks()[0] - [n.value for n in stack.notification_arns].should.contain('arn:aws:sns:us-east-1:123456789012:fake-queue') + [n.value for n in stack.notification_arns].should.contain( + 'arn:aws:sns:us-east-1:123456789012:fake-queue') @mock_cloudformation_deprecated @@ -111,16 +112,16 @@ def test_create_stack_from_s3_url(): stack.stack_name.should.equal('new-stack') stack.get_template().should.equal( { - 'GetTemplateResponse': { - 'GetTemplateResult': { - 'TemplateBody': dummy_template_json, - 'ResponseMetadata': { - 'RequestId': '2d06e36c-ac1d-11e0-a958-f9382b6eb86bEXAMPLE' + 'GetTemplateResponse': { + 'GetTemplateResult': { + 'TemplateBody': dummy_template_json, + 'ResponseMetadata': { + 'RequestId': '2d06e36c-ac1d-11e0-a958-f9382b6eb86bEXAMPLE' + } } } - } - }) + }) @mock_cloudformation_deprecated @@ -271,7 +272,8 @@ def test_cloudformation_params(): } dummy_template_json = json.dumps(dummy_template) cfn = boto.connect_cloudformation() - cfn.create_stack('test_stack1', template_body=dummy_template_json, parameters=[('APPNAME', 'testing123')]) + cfn.create_stack('test_stack1', template_body=dummy_template_json, parameters=[ + ('APPNAME', 'testing123')]) stack = cfn.describe_stacks('test_stack1')[0] stack.parameters.should.have.length_of(1) param = stack.parameters[0] @@ -342,23 +344,28 @@ def test_update_stack(): @mock_cloudformation_deprecated def test_update_stack_when_rolled_back(): conn = boto.connect_cloudformation() - stack_id = conn.create_stack("test_stack", template_body=dummy_template_json) + stack_id = conn.create_stack( + "test_stack", template_body=dummy_template_json) - cloudformation_backends[conn.region.name].stacks[stack_id].status = 'ROLLBACK_COMPLETE' + cloudformation_backends[conn.region.name].stacks[ + stack_id].status = 'ROLLBACK_COMPLETE' with assert_raises(BotoServerError) as err: conn.update_stack("test_stack", dummy_template_json) ex = err.exception - ex.body.should.match(r'is in ROLLBACK_COMPLETE state and can not be updated') + ex.body.should.match( + r'is in ROLLBACK_COMPLETE state and can not be updated') ex.error_code.should.equal('ValidationError') ex.reason.should.equal('Bad Request') ex.status.should.equal(400) + @mock_cloudformation_deprecated def test_describe_stack_events_shows_create_update_and_delete(): conn = boto.connect_cloudformation() - stack_id = conn.create_stack("test_stack", template_body=dummy_template_json) + stack_id = conn.create_stack( + "test_stack", template_body=dummy_template_json) conn.update_stack(stack_id, template_body=dummy_template_json2) conn.delete_stack(stack_id) @@ -367,7 +374,8 @@ def test_describe_stack_events_shows_create_update_and_delete(): events[0].resource_type.should.equal("AWS::CloudFormation::Stack") events[-1].resource_type.should.equal("AWS::CloudFormation::Stack") - # testing ordering of stack events without assuming resource events will not exist + # testing ordering of stack events without assuming resource events will + # not exist stack_events_to_look_for = iter([ ("CREATE_IN_PROGRESS", "User Initiated"), ("CREATE_COMPLETE", None), ("UPDATE_IN_PROGRESS", "User Initiated"), ("UPDATE_COMPLETE", None), @@ -381,12 +389,13 @@ def test_describe_stack_events_shows_create_update_and_delete(): event.logical_resource_id.should.equal("test_stack") event.physical_resource_id.should.equal(stack_id) - status_to_look_for, reason_to_look_for = next(stack_events_to_look_for) + status_to_look_for, reason_to_look_for = next( + stack_events_to_look_for) event.resource_status.should.equal(status_to_look_for) if reason_to_look_for is not None: - event.resource_status_reason.should.equal(reason_to_look_for) + event.resource_status_reason.should.equal( + reason_to_look_for) except StopIteration: assert False, "Too many stack events" list(stack_events_to_look_for).should.be.empty - diff --git a/tests/test_cloudformation/test_cloudformation_stack_crud_boto3.py b/tests/test_cloudformation/test_cloudformation_stack_crud_boto3.py index 2ee74f886..29e2dfa10 100644 --- a/tests/test_cloudformation/test_cloudformation_stack_crud_boto3.py +++ b/tests/test_cloudformation/test_cloudformation_stack_crud_boto3.py @@ -124,7 +124,8 @@ def test_create_stack_from_s3_url(): s3_conn = boto3.resource('s3') bucket = s3_conn.create_bucket(Bucket="foobar") - key = s3_conn.Object('foobar', 'template-key').put(Body=dummy_template_json) + key = s3_conn.Object( + 'foobar', 'template-key').put(Body=dummy_template_json) key_url = s3.generate_presigned_url( ClientMethod='get_object', Params={ @@ -160,6 +161,7 @@ def test_describe_stack_resources(): resource['ResourceType'].should.equal('AWS::EC2::Instance') resource['StackId'].should.equal(stack['StackId']) + @mock_cloudformation def test_describe_stack_by_name(): cf_conn = boto3.client('cloudformation', region_name='us-east-1') @@ -249,6 +251,7 @@ def test_describe_deleted_stack(): stack_by_id['StackName'].should.equal("test_stack") stack_by_id['StackStatus'].should.equal("DELETE_COMPLETE") + @mock_cloudformation def test_describe_updated_stack(): cf_conn = boto3.client('cloudformation', region_name='us-east-1') @@ -299,9 +302,9 @@ def test_cloudformation_params(): StackName='test_stack', TemplateBody=dummy_template_with_params_json, Parameters=[{ - "ParameterKey": "APPNAME", - "ParameterValue": "testing123", - }], + "ParameterKey": "APPNAME", + "ParameterValue": "testing123", + }], ) stack.parameters.should.have.length_of(1) @@ -334,6 +337,7 @@ def test_stack_tags(): item for items in [tag.items() for tag in tags] for item in items) observed_tag_items.should.equal(expected_tag_items) + @mock_cloudformation def test_stack_events(): cf = boto3.resource('cloudformation', region_name='us-east-1') @@ -350,7 +354,8 @@ def test_stack_events(): events[0].resource_type.should.equal("AWS::CloudFormation::Stack") events[-1].resource_type.should.equal("AWS::CloudFormation::Stack") - # testing ordering of stack events without assuming resource events will not exist + # testing ordering of stack events without assuming resource events will + # not exist stack_events_to_look_for = iter([ ("CREATE_IN_PROGRESS", "User Initiated"), ("CREATE_COMPLETE", None), ("UPDATE_IN_PROGRESS", "User Initiated"), ("UPDATE_COMPLETE", None), @@ -364,10 +369,12 @@ def test_stack_events(): event.logical_resource_id.should.equal("test_stack") event.physical_resource_id.should.equal(stack.stack_id) - status_to_look_for, reason_to_look_for = next(stack_events_to_look_for) + status_to_look_for, reason_to_look_for = next( + stack_events_to_look_for) event.resource_status.should.equal(status_to_look_for) if reason_to_look_for is not None: - event.resource_status_reason.should.equal(reason_to_look_for) + event.resource_status_reason.should.equal( + reason_to_look_for) except StopIteration: assert False, "Too many stack events" diff --git a/tests/test_cloudformation/test_cloudformation_stack_integration.py b/tests/test_cloudformation/test_cloudformation_stack_integration.py index 609a0b46d..e2304f840 100644 --- a/tests/test_cloudformation/test_cloudformation_stack_integration.py +++ b/tests/test_cloudformation/test_cloudformation_stack_integration.py @@ -143,15 +143,18 @@ def test_update_stack(): sqs_conn = boto.sqs.connect_to_region("us-west-1") queues = sqs_conn.get_all_queues() queues.should.have.length_of(1) - queues[0].get_attributes('VisibilityTimeout')['VisibilityTimeout'].should.equal('60') + queues[0].get_attributes('VisibilityTimeout')[ + 'VisibilityTimeout'].should.equal('60') - sqs_template['Resources']['QueueGroup']['Properties']['VisibilityTimeout'] = 100 + sqs_template['Resources']['QueueGroup'][ + 'Properties']['VisibilityTimeout'] = 100 sqs_template_json = json.dumps(sqs_template) conn.update_stack("test_stack", sqs_template_json) queues = sqs_conn.get_all_queues() queues.should.have.length_of(1) - queues[0].get_attributes('VisibilityTimeout')['VisibilityTimeout'].should.equal('100') + queues[0].get_attributes('VisibilityTimeout')[ + 'VisibilityTimeout'].should.equal('100') @mock_cloudformation_deprecated() @@ -395,7 +398,8 @@ def test_stack_elb_integration_with_update(): load_balancer = elb_conn.get_all_load_balancers()[0] load_balancer.availability_zones[0].should.equal('us-west-1a') - elb_template['Resources']['MyELB']['Properties']['AvailabilityZones'] = ['us-west-1b'] + elb_template['Resources']['MyELB']['Properties'][ + 'AvailabilityZones'] = ['us-west-1b'] elb_template_json = json.dumps(elb_template) conn.update_stack( "elb_stack", @@ -431,7 +435,8 @@ def test_redshift_stack(): redshift_conn = boto.redshift.connect_to_region("us-west-2") cluster_res = redshift_conn.describe_clusters() - clusters = cluster_res['DescribeClustersResponse']['DescribeClustersResult']['Clusters'] + clusters = cluster_res['DescribeClustersResponse'][ + 'DescribeClustersResult']['Clusters'] clusters.should.have.length_of(1) cluster = clusters[0] cluster['DBName'].should.equal("mydb") @@ -499,12 +504,14 @@ def test_stack_security_groups(): conn.create_stack( "security_group_stack", template_body=security_group_template_json, - tags={"foo":"bar"} + tags={"foo": "bar"} ) ec2_conn = boto.ec2.connect_to_region("us-west-1") - instance_group = ec2_conn.get_all_security_groups(filters={'description': ['My security group']})[0] - other_group = ec2_conn.get_all_security_groups(filters={'description': ['My other group']})[0] + instance_group = ec2_conn.get_all_security_groups( + filters={'description': ['My security group']})[0] + other_group = ec2_conn.get_all_security_groups( + filters={'description': ['My other group']})[0] reservation = ec2_conn.get_all_instances()[0] ec2_instance = reservation.instances[0] @@ -597,13 +604,17 @@ def test_autoscaling_group_with_elb(): stack = conn.describe_stacks()[0] resources = stack.describe_resources() - as_group_resource = [resource for resource in resources if resource.resource_type == 'AWS::AutoScaling::AutoScalingGroup'][0] + as_group_resource = [resource for resource in resources if resource.resource_type == + 'AWS::AutoScaling::AutoScalingGroup'][0] as_group_resource.physical_resource_id.should.contain("my-as-group") - launch_config_resource = [resource for resource in resources if resource.resource_type == 'AWS::AutoScaling::LaunchConfiguration'][0] - launch_config_resource.physical_resource_id.should.contain("my-launch-config") + launch_config_resource = [ + resource for resource in resources if resource.resource_type == 'AWS::AutoScaling::LaunchConfiguration'][0] + launch_config_resource.physical_resource_id.should.contain( + "my-launch-config") - elb_resource = [resource for resource in resources if resource.resource_type == 'AWS::ElasticLoadBalancing::LoadBalancer'][0] + elb_resource = [resource for resource in resources if resource.resource_type == + 'AWS::ElasticLoadBalancing::LoadBalancer'][0] elb_resource.physical_resource_id.should.contain("my-elb") @@ -687,26 +698,32 @@ def test_vpc_single_instance_in_subnet(): eip.domain.should.equal('vpc') eip.instance_id.should.equal(instance.id) - security_group = ec2_conn.get_all_security_groups(filters={'vpc_id': [vpc.id]})[0] + security_group = ec2_conn.get_all_security_groups( + filters={'vpc_id': [vpc.id]})[0] security_group.vpc_id.should.equal(vpc.id) stack = conn.describe_stacks()[0] resources = stack.describe_resources() - vpc_resource = [resource for resource in resources if resource.resource_type == 'AWS::EC2::VPC'][0] + vpc_resource = [ + resource for resource in resources if resource.resource_type == 'AWS::EC2::VPC'][0] vpc_resource.physical_resource_id.should.equal(vpc.id) - subnet_resource = [resource for resource in resources if resource.resource_type == 'AWS::EC2::Subnet'][0] + subnet_resource = [ + resource for resource in resources if resource.resource_type == 'AWS::EC2::Subnet'][0] subnet_resource.physical_resource_id.should.equal(subnet.id) - eip_resource = [resource for resource in resources if resource.resource_type == 'AWS::EC2::EIP'][0] + eip_resource = [ + resource for resource in resources if resource.resource_type == 'AWS::EC2::EIP'][0] eip_resource.physical_resource_id.should.equal(eip.allocation_id) + @mock_cloudformation() @mock_ec2() @mock_rds2() def test_rds_db_parameter_groups(): ec2_conn = boto3.client("ec2", region_name="us-west-1") - ec2_conn.create_security_group(GroupName='application', Description='Our Application Group') + ec2_conn.create_security_group( + GroupName='application', Description='Our Application Group') template_json = json.dumps(rds_mysql_with_db_parameter_group.template) cf_conn = boto3.client('cloudformation', 'us-west-1') @@ -714,16 +731,16 @@ def test_rds_db_parameter_groups(): StackName="test_stack", TemplateBody=template_json, Parameters=[{'ParameterKey': key, 'ParameterValue': value} for - key, value in [ - ("DBInstanceIdentifier", "master_db"), - ("DBName", "my_db"), - ("DBUser", "my_user"), - ("DBPassword", "my_password"), - ("DBAllocatedStorage", "20"), - ("DBInstanceClass", "db.m1.medium"), - ("EC2SecurityGroup", "application"), - ("MultiAZ", "true"), - ] + key, value in [ + ("DBInstanceIdentifier", "master_db"), + ("DBName", "my_db"), + ("DBUser", "my_user"), + ("DBPassword", "my_password"), + ("DBAllocatedStorage", "20"), + ("DBInstanceClass", "db.m1.medium"), + ("EC2SecurityGroup", "application"), + ("MultiAZ", "true"), + ] ], ) @@ -731,7 +748,8 @@ def test_rds_db_parameter_groups(): db_parameter_groups = rds_conn.describe_db_parameter_groups() len(db_parameter_groups['DBParameterGroups']).should.equal(1) - db_parameter_group_name = db_parameter_groups['DBParameterGroups'][0]['DBParameterGroupName'] + db_parameter_group_name = db_parameter_groups[ + 'DBParameterGroups'][0]['DBParameterGroupName'] found_cloudformation_set_parameter = False for db_parameter in rds_conn.describe_db_parameters(DBParameterGroupName=db_parameter_group_name)['Parameters']: @@ -741,7 +759,6 @@ def test_rds_db_parameter_groups(): found_cloudformation_set_parameter.should.equal(True) - @mock_cloudformation_deprecated() @mock_ec2_deprecated() @mock_rds_deprecated() @@ -906,15 +923,20 @@ def test_iam_roles(): iam_conn = boto.iam.connect_to_region("us-west-1") - role_result = iam_conn.list_roles()['list_roles_response']['list_roles_result']['roles'][0] + role_result = iam_conn.list_roles()['list_roles_response'][ + 'list_roles_result']['roles'][0] role = iam_conn.get_role(role_result.role_name) role.role_name.should.contain("my-role") role.path.should.equal("my-path") - instance_profile_response = iam_conn.list_instance_profiles()['list_instance_profiles_response'] - cfn_instance_profile = instance_profile_response['list_instance_profiles_result']['instance_profiles'][0] - instance_profile = iam_conn.get_instance_profile(cfn_instance_profile.instance_profile_name) - instance_profile.instance_profile_name.should.contain("my-instance-profile") + instance_profile_response = iam_conn.list_instance_profiles()[ + 'list_instance_profiles_response'] + cfn_instance_profile = instance_profile_response[ + 'list_instance_profiles_result']['instance_profiles'][0] + instance_profile = iam_conn.get_instance_profile( + cfn_instance_profile.instance_profile_name) + instance_profile.instance_profile_name.should.contain( + "my-instance-profile") instance_profile.path.should.equal("my-path") instance_profile.role_id.should.equal(role.role_id) @@ -924,10 +946,13 @@ def test_iam_roles(): stack = conn.describe_stacks()[0] resources = stack.describe_resources() - instance_profile_resource = [resource for resource in resources if resource.resource_type == 'AWS::IAM::InstanceProfile'][0] - instance_profile_resource.physical_resource_id.should.equal(instance_profile.instance_profile_name) + instance_profile_resource = [ + resource for resource in resources if resource.resource_type == 'AWS::IAM::InstanceProfile'][0] + instance_profile_resource.physical_resource_id.should.equal( + instance_profile.instance_profile_name) - role_resource = [resource for resource in resources if resource.resource_type == 'AWS::IAM::Role'][0] + role_resource = [ + resource for resource in resources if resource.resource_type == 'AWS::IAM::Role'][0] role_resource.physical_resource_id.should.equal(role.role_id) @@ -949,13 +974,15 @@ def test_single_instance_with_ebs_volume(): volumes = ec2_conn.get_all_volumes() # Grab the mounted drive - volume = [volume for volume in volumes if volume.attach_data.device == '/dev/sdh'][0] + volume = [ + volume for volume in volumes if volume.attach_data.device == '/dev/sdh'][0] volume.volume_state().should.equal('in-use') volume.attach_data.instance_id.should.equal(ec2_instance.id) stack = conn.describe_stacks()[0] resources = stack.describe_resources() - ebs_volumes = [resource for resource in resources if resource.resource_type == 'AWS::EC2::Volume'] + ebs_volumes = [ + resource for resource in resources if resource.resource_type == 'AWS::EC2::Volume'] ebs_volumes[0].physical_resource_id.should.equal(volume.id) @@ -981,7 +1008,8 @@ def test_classic_eip(): stack = conn.describe_stacks()[0] resources = stack.describe_resources() - cfn_eip = [resource for resource in resources if resource.resource_type == 'AWS::EC2::EIP'][0] + cfn_eip = [ + resource for resource in resources if resource.resource_type == 'AWS::EC2::EIP'][0] cfn_eip.physical_resource_id.should.equal(eip.public_ip) @@ -997,7 +1025,8 @@ def test_vpc_eip(): stack = conn.describe_stacks()[0] resources = stack.describe_resources() - cfn_eip = [resource for resource in resources if resource.resource_type == 'AWS::EC2::EIP'][0] + cfn_eip = [ + resource for resource in resources if resource.resource_type == 'AWS::EC2::EIP'][0] cfn_eip.physical_resource_id.should.equal(eip.allocation_id) @@ -1111,7 +1140,8 @@ def test_conditional_if_handling(): ec2_instance.terminate() conn = boto.cloudformation.connect_to_region("us-west-2") - conn.create_stack('test_stack1', template_body=dummy_template_json, parameters=[("ENV", "prd")]) + conn.create_stack( + 'test_stack1', template_body=dummy_template_json, parameters=[("ENV", "prd")]) ec2_conn = boto.ec2.connect_to_region("us-west-2") reservation = ec2_conn.get_all_instances()[0] ec2_instance = reservation.instances[0] @@ -1175,7 +1205,8 @@ def test_route53_roundrobin(): template_body=template_json, ) - zones = route53_conn.get_all_hosted_zones()['ListHostedZonesResponse']['HostedZones'] + zones = route53_conn.get_all_hosted_zones()['ListHostedZonesResponse'][ + 'HostedZones'] list(zones).should.have.length_of(1) zone_id = zones[0]['Id'] zone_id = zone_id.split('/') @@ -1203,7 +1234,8 @@ def test_route53_roundrobin(): stack = conn.describe_stacks()[0] output = stack.outputs[0] output.key.should.equal('DomainName') - output.value.should.equal('arn:aws:route53:::hostedzone/{0}'.format(zone_id)) + output.value.should.equal( + 'arn:aws:route53:::hostedzone/{0}'.format(zone_id)) @mock_cloudformation_deprecated() @@ -1222,13 +1254,13 @@ def test_route53_ec2_instance_with_public_ip(): instance_id = ec2_conn.get_all_reservations()[0].instances[0].id - zones = route53_conn.get_all_hosted_zones()['ListHostedZonesResponse']['HostedZones'] + zones = route53_conn.get_all_hosted_zones()['ListHostedZonesResponse'][ + 'HostedZones'] list(zones).should.have.length_of(1) zone_id = zones[0]['Id'] zone_id = zone_id.split('/') zone_id = zone_id[2] - rrsets = route53_conn.get_all_rrsets(zone_id) rrsets.should.have.length_of(1) @@ -1253,7 +1285,8 @@ def test_route53_associate_health_check(): template_body=template_json, ) - checks = route53_conn.get_list_health_checks()['ListHealthChecksResponse']['HealthChecks'] + checks = route53_conn.get_list_health_checks()['ListHealthChecksResponse'][ + 'HealthChecks'] list(checks).should.have.length_of(1) check = checks[0] health_check_id = check['Id'] @@ -1265,7 +1298,8 @@ def test_route53_associate_health_check(): config["ResourcePath"].should.equal("/") config["Type"].should.equal("HTTP") - zones = route53_conn.get_all_hosted_zones()['ListHostedZonesResponse']['HostedZones'] + zones = route53_conn.get_all_hosted_zones()['ListHostedZonesResponse'][ + 'HostedZones'] list(zones).should.have.length_of(1) zone_id = zones[0]['Id'] zone_id = zone_id.split('/') @@ -1290,7 +1324,8 @@ def test_route53_with_update(): template_body=template_json, ) - zones = route53_conn.get_all_hosted_zones()['ListHostedZonesResponse']['HostedZones'] + zones = route53_conn.get_all_hosted_zones()['ListHostedZonesResponse'][ + 'HostedZones'] list(zones).should.have.length_of(1) zone_id = zones[0]['Id'] zone_id = zone_id.split('/') @@ -1302,14 +1337,16 @@ def test_route53_with_update(): record_set = rrsets[0] record_set.resource_records.should.equal(["my.example.com"]) - route53_health_check.template['Resources']['myDNSRecord']['Properties']['ResourceRecords'] = ["my_other.example.com"] + route53_health_check.template['Resources']['myDNSRecord'][ + 'Properties']['ResourceRecords'] = ["my_other.example.com"] template_json = json.dumps(route53_health_check.template) cf_conn.update_stack( "test_stack", template_body=template_json, ) - zones = route53_conn.get_all_hosted_zones()['ListHostedZonesResponse']['HostedZones'] + zones = route53_conn.get_all_hosted_zones()['ListHostedZonesResponse'][ + 'HostedZones'] list(zones).should.have.length_of(1) zone_id = zones[0]['Id'] zone_id = zone_id.split('/') @@ -1355,12 +1392,14 @@ def test_sns_topic(): ) sns_conn = boto.sns.connect_to_region("us-west-1") - topics = sns_conn.get_all_topics()["ListTopicsResponse"]["ListTopicsResult"]["Topics"] + topics = sns_conn.get_all_topics()["ListTopicsResponse"][ + "ListTopicsResult"]["Topics"] topics.should.have.length_of(1) topic_arn = topics[0]['TopicArn'] topic_arn.should.contain("my_topics") - subscriptions = sns_conn.get_all_subscriptions()["ListSubscriptionsResponse"]["ListSubscriptionsResult"]["Subscriptions"] + subscriptions = sns_conn.get_all_subscriptions()["ListSubscriptionsResponse"][ + "ListSubscriptionsResult"]["Subscriptions"] subscriptions.should.have.length_of(1) subscription = subscriptions[0] subscription["TopicArn"].should.equal(topic_arn) @@ -1504,12 +1543,15 @@ def test_multiple_security_group_ingress_separate_from_security_group_by_id(): ) ec2_conn = boto.ec2.connect_to_region("us-west-1") - security_group1 = ec2_conn.get_all_security_groups(filters={"tag:sg-name": "sg1"})[0] - security_group2 = ec2_conn.get_all_security_groups(filters={"tag:sg-name": "sg2"})[0] + security_group1 = ec2_conn.get_all_security_groups( + filters={"tag:sg-name": "sg1"})[0] + security_group2 = ec2_conn.get_all_security_groups( + filters={"tag:sg-name": "sg2"})[0] security_group1.rules.should.have.length_of(1) security_group1.rules[0].grants.should.have.length_of(1) - security_group1.rules[0].grants[0].group_id.should.equal(security_group2.id) + security_group1.rules[0].grants[ + 0].group_id.should.equal(security_group2.id) security_group1.rules[0].ip_protocol.should.equal('tcp') security_group1.rules[0].from_port.should.equal('80') security_group1.rules[0].to_port.should.equal('8080') @@ -1519,7 +1561,8 @@ def test_multiple_security_group_ingress_separate_from_security_group_by_id(): @mock_ec2_deprecated def test_security_group_ingress_separate_from_security_group_by_id(): ec2_conn = boto.ec2.connect_to_region("us-west-1") - ec2_conn.create_security_group("test-security-group1", "test security group") + ec2_conn.create_security_group( + "test-security-group1", "test security group") template = { "AWSTemplateFormatVersion": "2010-09-09", @@ -1555,12 +1598,15 @@ def test_security_group_ingress_separate_from_security_group_by_id(): "test_stack", template_body=template_json, ) - security_group1 = ec2_conn.get_all_security_groups(groupnames=["test-security-group1"])[0] - security_group2 = ec2_conn.get_all_security_groups(filters={"tag:sg-name": "sg2"})[0] + security_group1 = ec2_conn.get_all_security_groups( + groupnames=["test-security-group1"])[0] + security_group2 = ec2_conn.get_all_security_groups( + filters={"tag:sg-name": "sg2"})[0] security_group1.rules.should.have.length_of(1) security_group1.rules[0].grants.should.have.length_of(1) - security_group1.rules[0].grants[0].group_id.should.equal(security_group2.id) + security_group1.rules[0].grants[ + 0].group_id.should.equal(security_group2.id) security_group1.rules[0].ip_protocol.should.equal('tcp') security_group1.rules[0].from_port.should.equal('80') security_group1.rules[0].to_port.should.equal('8080') @@ -1621,12 +1667,15 @@ def test_security_group_ingress_separate_from_security_group_by_id_using_vpc(): "test_stack", template_body=template_json, ) - security_group1 = vpc_conn.get_all_security_groups(filters={"tag:sg-name": "sg1"})[0] - security_group2 = vpc_conn.get_all_security_groups(filters={"tag:sg-name": "sg2"})[0] + security_group1 = vpc_conn.get_all_security_groups( + filters={"tag:sg-name": "sg1"})[0] + security_group2 = vpc_conn.get_all_security_groups( + filters={"tag:sg-name": "sg2"})[0] security_group1.rules.should.have.length_of(1) security_group1.rules[0].grants.should.have.length_of(1) - security_group1.rules[0].grants[0].group_id.should.equal(security_group2.id) + security_group1.rules[0].grants[ + 0].group_id.should.equal(security_group2.id) security_group1.rules[0].ip_protocol.should.equal('tcp') security_group1.rules[0].from_port.should.equal('80') security_group1.rules[0].to_port.should.equal('8080') @@ -1663,17 +1712,20 @@ def test_security_group_with_update(): "test_stack", template_body=template_json, ) - security_group = vpc_conn.get_all_security_groups(filters={"tag:sg-name": "sg"})[0] + security_group = vpc_conn.get_all_security_groups( + filters={"tag:sg-name": "sg"})[0] security_group.vpc_id.should.equal(vpc1.id) vpc2 = vpc_conn.create_vpc("10.1.0.0/16") - template['Resources']['test-security-group']['Properties']['VpcId'] = vpc2.id + template['Resources'][ + 'test-security-group']['Properties']['VpcId'] = vpc2.id template_json = json.dumps(template) cf_conn.update_stack( "test_stack", template_body=template_json, ) - security_group = vpc_conn.get_all_security_groups(filters={"tag:sg-name": "sg"})[0] + security_group = vpc_conn.get_all_security_groups( + filters={"tag:sg-name": "sg"})[0] security_group.vpc_id.should.equal(vpc2.id) @@ -1779,11 +1831,14 @@ def test_datapipeline(): data_pipelines = dp_conn.list_pipelines() data_pipelines['pipelineIdList'].should.have.length_of(1) - data_pipelines['pipelineIdList'][0]['name'].should.equal('testDataPipeline') + data_pipelines['pipelineIdList'][0][ + 'name'].should.equal('testDataPipeline') stack_resources = cf_conn.list_stack_resources(stack_id) stack_resources.should.have.length_of(1) - stack_resources[0].physical_resource_id.should.equal(data_pipelines['pipelineIdList'][0]['id']) + stack_resources[0].physical_resource_id.should.equal( + data_pipelines['pipelineIdList'][0]['id']) + def _process_lamda(pfunc): import io @@ -1849,33 +1904,35 @@ def test_lambda_function(): def test_nat_gateway(): ec2_conn = boto3.client('ec2', 'us-east-1') vpc_id = ec2_conn.create_vpc(CidrBlock="10.0.0.0/16")['Vpc']['VpcId'] - subnet_id = ec2_conn.create_subnet(CidrBlock='10.0.1.0/24', VpcId=vpc_id)['Subnet']['SubnetId'] - route_table_id = ec2_conn.create_route_table(VpcId=vpc_id)['RouteTable']['RouteTableId'] + subnet_id = ec2_conn.create_subnet( + CidrBlock='10.0.1.0/24', VpcId=vpc_id)['Subnet']['SubnetId'] + route_table_id = ec2_conn.create_route_table( + VpcId=vpc_id)['RouteTable']['RouteTableId'] template = { "AWSTemplateFormatVersion": "2010-09-09", "Resources": { - "NAT" : { - "DependsOn" : "vpcgatewayattachment", - "Type" : "AWS::EC2::NatGateway", - "Properties" : { - "AllocationId" : { "Fn::GetAtt" : ["EIP", "AllocationId"]}, - "SubnetId" : subnet_id - } - }, - "EIP" : { - "Type" : "AWS::EC2::EIP", - "Properties" : { - "Domain" : "vpc" + "NAT": { + "DependsOn": "vpcgatewayattachment", + "Type": "AWS::EC2::NatGateway", + "Properties": { + "AllocationId": {"Fn::GetAtt": ["EIP", "AllocationId"]}, + "SubnetId": subnet_id } }, - "Route" : { - "Type" : "AWS::EC2::Route", - "Properties" : { - "RouteTableId" : route_table_id, - "DestinationCidrBlock" : "0.0.0.0/0", - "NatGatewayId" : { "Ref" : "NAT" } - } + "EIP": { + "Type": "AWS::EC2::EIP", + "Properties": { + "Domain": "vpc" + } + }, + "Route": { + "Type": "AWS::EC2::Route", + "Properties": { + "RouteTableId": route_table_id, + "DestinationCidrBlock": "0.0.0.0/0", + "NatGatewayId": {"Ref": "NAT"} + } }, "internetgateway": { "Type": "AWS::EC2::InternetGateway" @@ -1905,6 +1962,7 @@ def test_nat_gateway(): result['NatGateways'][0]['SubnetId'].should.equal(subnet_id) result['NatGateways'][0]['State'].should.equal('available') + @mock_cloudformation() @mock_kms() def test_stack_kms(): @@ -1944,42 +2002,43 @@ def test_stack_spot_fleet(): conn = boto3.client('ec2', 'us-east-1') vpc = conn.create_vpc(CidrBlock="10.0.0.0/8")['Vpc'] - subnet = conn.create_subnet(VpcId=vpc['VpcId'], CidrBlock='10.0.0.0/16', AvailabilityZone='us-east-1a')['Subnet'] + subnet = conn.create_subnet( + VpcId=vpc['VpcId'], CidrBlock='10.0.0.0/16', AvailabilityZone='us-east-1a')['Subnet'] subnet_id = subnet['SubnetId'] spot_fleet_template = { 'Resources': { "SpotFleet": { - "Type": "AWS::EC2::SpotFleet", - "Properties": { - "SpotFleetRequestConfigData": { - "IamFleetRole": "arn:aws:iam::123456789012:role/fleet", - "SpotPrice": "0.12", - "TargetCapacity": 6, - "AllocationStrategy": "diversified", - "LaunchSpecifications": [ - { - "EbsOptimized": "false", - "InstanceType": 't2.small', - "ImageId": "ami-1234", - "SubnetId": subnet_id, - "WeightedCapacity": "2", - "SpotPrice": "0.13", - }, - { - "EbsOptimized": "true", - "InstanceType": 't2.large', - "ImageId": "ami-1234", - "Monitoring": { "Enabled": "true" }, - "SecurityGroups": [{"GroupId": "sg-123"}], - "SubnetId": subnet_id, - "IamInstanceProfile": {"Arn": "arn:aws:iam::123456789012:role/fleet"}, - "WeightedCapacity": "4", - "SpotPrice": "10.00", - } - ] + "Type": "AWS::EC2::SpotFleet", + "Properties": { + "SpotFleetRequestConfigData": { + "IamFleetRole": "arn:aws:iam::123456789012:role/fleet", + "SpotPrice": "0.12", + "TargetCapacity": 6, + "AllocationStrategy": "diversified", + "LaunchSpecifications": [ + { + "EbsOptimized": "false", + "InstanceType": 't2.small', + "ImageId": "ami-1234", + "SubnetId": subnet_id, + "WeightedCapacity": "2", + "SpotPrice": "0.13", + }, + { + "EbsOptimized": "true", + "InstanceType": 't2.large', + "ImageId": "ami-1234", + "Monitoring": {"Enabled": "true"}, + "SecurityGroups": [{"GroupId": "sg-123"}], + "SubnetId": subnet_id, + "IamInstanceProfile": {"Arn": "arn:aws:iam::123456789012:role/fleet"}, + "WeightedCapacity": "4", + "SpotPrice": "10.00", + } + ] + } } - } } } } @@ -1993,9 +2052,11 @@ def test_stack_spot_fleet(): stack_resources = cf_conn.list_stack_resources(StackName=stack_id) stack_resources['StackResourceSummaries'].should.have.length_of(1) - spot_fleet_id = stack_resources['StackResourceSummaries'][0]['PhysicalResourceId'] + spot_fleet_id = stack_resources[ + 'StackResourceSummaries'][0]['PhysicalResourceId'] - spot_fleet_requests = conn.describe_spot_fleet_requests(SpotFleetRequestIds=[spot_fleet_id])['SpotFleetRequestConfigs'] + spot_fleet_requests = conn.describe_spot_fleet_requests( + SpotFleetRequestIds=[spot_fleet_id])['SpotFleetRequestConfigs'] len(spot_fleet_requests).should.equal(1) spot_fleet_request = spot_fleet_requests[0] spot_fleet_request['SpotFleetRequestState'].should.equal("active") @@ -2003,7 +2064,8 @@ def test_stack_spot_fleet(): spot_fleet_config['SpotPrice'].should.equal('0.12') spot_fleet_config['TargetCapacity'].should.equal(6) - spot_fleet_config['IamFleetRole'].should.equal('arn:aws:iam::123456789012:role/fleet') + spot_fleet_config['IamFleetRole'].should.equal( + 'arn:aws:iam::123456789012:role/fleet') spot_fleet_config['AllocationStrategy'].should.equal('diversified') spot_fleet_config['FulfilledCapacity'].should.equal(6.0) diff --git a/tests/test_cloudformation/test_server.py b/tests/test_cloudformation/test_server.py index b4f50024b..de3ab77b5 100644 --- a/tests/test_cloudformation/test_server.py +++ b/tests/test_cloudformation/test_server.py @@ -20,11 +20,14 @@ def test_cloudformation_server_get(): "Resources": {}, } create_stack_resp = test_client.action_data("CreateStack", StackName=stack_name, - TemplateBody=json.dumps(template_body)) - create_stack_resp.should.match(r".*.*.*.*.*", re.DOTALL) - stack_id_from_create_response = re.search("(.*)", create_stack_resp).groups()[0] + TemplateBody=json.dumps(template_body)) + create_stack_resp.should.match( + r".*.*.*.*.*", re.DOTALL) + stack_id_from_create_response = re.search( + "(.*)", create_stack_resp).groups()[0] list_stacks_resp = test_client.action_data("ListStacks") - stack_id_from_list_response = re.search("(.*)", list_stacks_resp).groups()[0] + stack_id_from_list_response = re.search( + "(.*)", list_stacks_resp).groups()[0] stack_id_from_create_response.should.equal(stack_id_from_list_response) diff --git a/tests/test_cloudformation/test_stack_parsing.py b/tests/test_cloudformation/test_stack_parsing.py index 42208810f..be459eff1 100644 --- a/tests/test_cloudformation/test_stack_parsing.py +++ b/tests/test_cloudformation/test_stack_parsing.py @@ -25,8 +25,8 @@ dummy_template = { } }, "S3Bucket": { - "Type": "AWS::S3::Bucket", - "DeletionPolicy": "Retain" + "Type": "AWS::S3::Bucket", + "DeletionPolicy": "Retain" }, }, } @@ -71,15 +71,19 @@ get_attribute_output = { } } -outputs_template = dict(list(dummy_template.items()) + list(output_dict.items())) -bad_outputs_template = dict(list(dummy_template.items()) + list(bad_output.items())) -get_attribute_outputs_template = dict(list(dummy_template.items()) + list(get_attribute_output.items())) +outputs_template = dict(list(dummy_template.items()) + + list(output_dict.items())) +bad_outputs_template = dict( + list(dummy_template.items()) + list(bad_output.items())) +get_attribute_outputs_template = dict( + list(dummy_template.items()) + list(get_attribute_output.items())) dummy_template_json = json.dumps(dummy_template) name_type_template_json = json.dumps(name_type_template) output_type_template_json = json.dumps(outputs_template) bad_output_template_json = json.dumps(bad_outputs_template) -get_attribute_outputs_template_json = json.dumps(get_attribute_outputs_template) +get_attribute_outputs_template_json = json.dumps( + get_attribute_outputs_template) def test_parse_stack_resources(): @@ -104,7 +108,8 @@ def test_parse_stack_resources(): @patch("moto.cloudformation.parsing.logger") def test_missing_resource_logs(logger): resource_class_from_type("foobar") - logger.warning.assert_called_with('No Moto CloudFormation support for %s', 'foobar') + logger.warning.assert_called_with( + 'No Moto CloudFormation support for %s', 'foobar') def test_parse_stack_with_name_type_resource(): diff --git a/tests/test_cloudwatch/test_cloudwatch.py b/tests/test_cloudwatch/test_cloudwatch.py index 88a3190c6..9b3f76c36 100644 --- a/tests/test_cloudwatch/test_cloudwatch.py +++ b/tests/test_cloudwatch/test_cloudwatch.py @@ -4,6 +4,7 @@ import sure # noqa from moto import mock_cloudwatch_deprecated + def alarm_fixture(name="tester", action=None): action = action or ['arn:alarm'] return MetricAlarm( @@ -23,6 +24,7 @@ def alarm_fixture(name="tester", action=None): unit='Seconds', ) + @mock_cloudwatch_deprecated def test_create_alarm(): conn = boto.connect_cloudwatch() @@ -42,7 +44,8 @@ def test_create_alarm(): alarm.evaluation_periods.should.equal(5) alarm.statistic.should.equal('Average') alarm.description.should.equal('A test') - dict(alarm.dimensions).should.equal({'InstanceId': ['i-0123456,i-0123457']}) + dict(alarm.dimensions).should.equal( + {'InstanceId': ['i-0123456,i-0123457']}) list(alarm.alarm_actions).should.equal(['arn:alarm']) list(alarm.ok_actions).should.equal(['arn:ok']) list(alarm.insufficient_data_actions).should.equal(['arn:insufficient']) @@ -84,7 +87,8 @@ def test_put_metric_data(): metric = metrics[0] metric.namespace.should.equal('tester') metric.name.should.equal('metric') - dict(metric.dimensions).should.equal({'InstanceId': ['i-0123456,i-0123457']}) + dict(metric.dimensions).should.equal( + {'InstanceId': ['i-0123456,i-0123457']}) @mock_cloudwatch_deprecated @@ -103,7 +107,8 @@ def test_describe_alarms(): alarms.should.have.length_of(4) alarms = conn.describe_alarms(alarm_name_prefix="nfoo") alarms.should.have.length_of(2) - alarms = conn.describe_alarms(alarm_names=["nfoobar", "nbarfoo", "nbazfoo"]) + alarms = conn.describe_alarms( + alarm_names=["nfoobar", "nbarfoo", "nbazfoo"]) alarms.should.have.length_of(3) alarms = conn.describe_alarms(action_prefix="afoo") alarms.should.have.length_of(2) @@ -114,10 +119,11 @@ def test_describe_alarms(): alarms = conn.describe_alarms() alarms.should.have.length_of(0) + @mock_cloudwatch_deprecated def test_describe_state_value_unimplemented(): conn = boto.connect_cloudwatch() conn.describe_alarms() - conn.describe_alarms.when.called_with(state_value="foo").should.throw(NotImplementedError) - + conn.describe_alarms.when.called_with( + state_value="foo").should.throw(NotImplementedError) diff --git a/tests/test_core/test_decorator_calls.py b/tests/test_core/test_decorator_calls.py index 81dc0639a..9e3638cc2 100644 --- a/tests/test_core/test_decorator_calls.py +++ b/tests/test_core/test_decorator_calls.py @@ -59,11 +59,13 @@ def test_decorater_wrapped_gets_set(): """ Moto decorator's __wrapped__ should get set to the tests function """ - test_decorater_wrapped_gets_set.__wrapped__.__name__.should.equal('test_decorater_wrapped_gets_set') + test_decorater_wrapped_gets_set.__wrapped__.__name__.should.equal( + 'test_decorater_wrapped_gets_set') @mock_ec2_deprecated class Tester(object): + def test_the_class(self): conn = boto.connect_ec2() list(conn.get_all_instances()).should.have.length_of(0) @@ -75,6 +77,7 @@ class Tester(object): @mock_s3_deprecated class TesterWithSetup(unittest.TestCase): + def setUp(self): self.conn = boto.connect_s3() self.conn.create_bucket('mybucket') diff --git a/tests/test_core/test_instance_metadata.py b/tests/test_core/test_instance_metadata.py index 80dd501e7..69b9052e9 100644 --- a/tests/test_core/test_instance_metadata.py +++ b/tests/test_core/test_instance_metadata.py @@ -30,13 +30,15 @@ def test_meta_data_iam(): @mock_ec2 def test_meta_data_security_credentials(): - res = requests.get("{0}/latest/meta-data/iam/security-credentials/".format(BASE_URL)) + res = requests.get( + "{0}/latest/meta-data/iam/security-credentials/".format(BASE_URL)) res.content.should.equal(b"default-role") @mock_ec2 def test_meta_data_default_role(): - res = requests.get("{0}/latest/meta-data/iam/security-credentials/default-role".format(BASE_URL)) + res = requests.get( + "{0}/latest/meta-data/iam/security-credentials/default-role".format(BASE_URL)) json_response = res.json() json_response.should.contain('AccessKeyId') json_response.should.contain('SecretAccessKey') diff --git a/tests/test_core/test_responses.py b/tests/test_core/test_responses.py index aa89ac840..c3cc27aef 100644 --- a/tests/test_core/test_responses.py +++ b/tests/test_core/test_responses.py @@ -7,7 +7,8 @@ from moto.core.responses import flatten_json_request_body def test_flatten_json_request_body(): - spec = AWSServiceSpec('data/emr/2009-03-31/service-2.json').input_spec('RunJobFlow') + spec = AWSServiceSpec( + 'data/emr/2009-03-31/service-2.json').input_spec('RunJobFlow') body = { 'Name': 'cluster', @@ -42,25 +43,32 @@ def test_flatten_json_request_body(): flat['Name'].should.equal(body['Name']) flat['Instances.Ec2KeyName'].should.equal(body['Instances']['Ec2KeyName']) for idx in range(2): - flat['Instances.InstanceGroups.member.' + str(idx + 1) + '.InstanceRole'].should.equal(body['Instances']['InstanceGroups'][idx]['InstanceRole']) - flat['Instances.InstanceGroups.member.' + str(idx + 1) + '.InstanceType'].should.equal(body['Instances']['InstanceGroups'][idx]['InstanceType']) - flat['Instances.Placement.AvailabilityZone'].should.equal(body['Instances']['Placement']['AvailabilityZone']) + flat['Instances.InstanceGroups.member.' + str(idx + 1) + '.InstanceRole'].should.equal( + body['Instances']['InstanceGroups'][idx]['InstanceRole']) + flat['Instances.InstanceGroups.member.' + str(idx + 1) + '.InstanceType'].should.equal( + body['Instances']['InstanceGroups'][idx]['InstanceType']) + flat['Instances.Placement.AvailabilityZone'].should.equal( + body['Instances']['Placement']['AvailabilityZone']) for idx in range(1): prefix = 'Steps.member.' + str(idx + 1) + '.HadoopJarStep' step = body['Steps'][idx]['HadoopJarStep'] i = 0 while prefix + '.Properties.member.' + str(i + 1) + '.Key' in flat: - flat[prefix + '.Properties.member.' + str(i + 1) + '.Key'].should.equal(step['Properties'][i]['Key']) - flat[prefix + '.Properties.member.' + str(i + 1) + '.Value'].should.equal(step['Properties'][i]['Value']) + flat[prefix + '.Properties.member.' + + str(i + 1) + '.Key'].should.equal(step['Properties'][i]['Key']) + flat[prefix + '.Properties.member.' + + str(i + 1) + '.Value'].should.equal(step['Properties'][i]['Value']) i += 1 i = 0 while prefix + '.Args.member.' + str(i + 1) in flat: - flat[prefix + '.Args.member.' + str(i + 1)].should.equal(step['Args'][i]) + flat[prefix + '.Args.member.' + + str(i + 1)].should.equal(step['Args'][i]) i += 1 for idx in range(2): - flat['Configurations.member.' + str(idx + 1) + '.Classification'].should.equal(body['Configurations'][idx]['Classification']) + flat['Configurations.member.' + str(idx + 1) + '.Classification'].should.equal( + body['Configurations'][idx]['Classification']) props = {} i = 1 diff --git a/tests/test_core/test_server.py b/tests/test_core/test_server.py index a0fb328cf..b7290e351 100644 --- a/tests/test_core/test_server.py +++ b/tests/test_core/test_server.py @@ -32,19 +32,22 @@ def test_port_argument(run_simple): def test_domain_dispatched(): dispatcher = DomainDispatcherApplication(create_backend_app) - backend_app = dispatcher.get_application({"HTTP_HOST": "email.us-east1.amazonaws.com"}) + backend_app = dispatcher.get_application( + {"HTTP_HOST": "email.us-east1.amazonaws.com"}) keys = list(backend_app.view_functions.keys()) keys[0].should.equal('EmailResponse.dispatch') def test_domain_without_matches(): dispatcher = DomainDispatcherApplication(create_backend_app) - dispatcher.get_application.when.called_with({"HTTP_HOST": "not-matching-anything.com"}).should.throw(RuntimeError) + dispatcher.get_application.when.called_with( + {"HTTP_HOST": "not-matching-anything.com"}).should.throw(RuntimeError) def test_domain_dispatched_with_service(): # If we pass a particular service, always return that. dispatcher = DomainDispatcherApplication(create_backend_app, service="s3") - backend_app = dispatcher.get_application({"HTTP_HOST": "s3.us-east1.amazonaws.com"}) + backend_app = dispatcher.get_application( + {"HTTP_HOST": "s3.us-east1.amazonaws.com"}) keys = set(backend_app.view_functions.keys()) keys.should.contain('ResponseObject.key_response') diff --git a/tests/test_core/test_url_mapping.py b/tests/test_core/test_url_mapping.py index 4e4e19a3a..8f7921a5a 100644 --- a/tests/test_core/test_url_mapping.py +++ b/tests/test_core/test_url_mapping.py @@ -14,7 +14,8 @@ def test_flask_path_converting_simple(): def test_flask_path_converting_regex(): - convert_regex_to_flask_path("/(?P[a-zA-Z0-9\-_]+)").should.equal('/') + convert_regex_to_flask_path( + "/(?P[a-zA-Z0-9\-_]+)").should.equal('/') convert_regex_to_flask_path("(?P\d+)/(?P.*)$").should.equal( '/' diff --git a/tests/test_datapipeline/test_datapipeline.py b/tests/test_datapipeline/test_datapipeline.py index aaa9f7f77..520142c2e 100644 --- a/tests/test_datapipeline/test_datapipeline.py +++ b/tests/test_datapipeline/test_datapipeline.py @@ -20,7 +20,8 @@ def test_create_pipeline(): res = conn.create_pipeline("mypipeline", "some-unique-id") pipeline_id = res["pipelineId"] - pipeline_descriptions = conn.describe_pipelines([pipeline_id])["pipelineDescriptionList"] + pipeline_descriptions = conn.describe_pipelines( + [pipeline_id])["pipelineDescriptionList"] pipeline_descriptions.should.have.length_of(1) pipeline_description = pipeline_descriptions[0] @@ -105,7 +106,8 @@ def test_describing_pipeline_objects(): conn.put_pipeline_definition(PIPELINE_OBJECTS, pipeline_id) - objects = conn.describe_objects(["Schedule", "Default"], pipeline_id)['pipelineObjects'] + objects = conn.describe_objects(["Schedule", "Default"], pipeline_id)[ + 'pipelineObjects'] objects.should.have.length_of(2) default_object = [x for x in objects if x['id'] == 'Default'][0] @@ -125,7 +127,8 @@ def test_activate_pipeline(): pipeline_id = res["pipelineId"] conn.activate_pipeline(pipeline_id) - pipeline_descriptions = conn.describe_pipelines([pipeline_id])["pipelineDescriptionList"] + pipeline_descriptions = conn.describe_pipelines( + [pipeline_id])["pipelineDescriptionList"] pipeline_descriptions.should.have.length_of(1) pipeline_description = pipeline_descriptions[0] fields = pipeline_description['fields'] diff --git a/tests/test_datapipeline/test_server.py b/tests/test_datapipeline/test_server.py index 012c5ad55..03c77b034 100644 --- a/tests/test_datapipeline/test_server.py +++ b/tests/test_datapipeline/test_server.py @@ -17,9 +17,10 @@ def test_list_streams(): test_client = backend.test_client() res = test_client.post('/', - data={"pipelineIds": ["ASdf"]}, - headers={"X-Amz-Target": "DataPipeline.DescribePipelines"}, - ) + data={"pipelineIds": ["ASdf"]}, + headers={ + "X-Amz-Target": "DataPipeline.DescribePipelines"}, + ) json_data = json.loads(res.data.decode("utf-8")) json_data.should.equal({ diff --git a/tests/test_dynamodb/test_dynamodb.py b/tests/test_dynamodb/test_dynamodb.py index f2df39a22..d48519755 100644 --- a/tests/test_dynamodb/test_dynamodb.py +++ b/tests/test_dynamodb/test_dynamodb.py @@ -16,15 +16,18 @@ from boto.exception import DynamoDBResponseError @mock_dynamodb_deprecated def test_list_tables(): name = 'TestTable' - dynamodb_backend.create_table(name, hash_key_attr="name", hash_key_type="S") + dynamodb_backend.create_table( + name, hash_key_attr="name", hash_key_type="S") conn = boto.connect_dynamodb('the_key', 'the_secret') assert conn.list_tables() == ['TestTable'] @mock_dynamodb_deprecated def test_list_tables_layer_1(): - dynamodb_backend.create_table("test_1", hash_key_attr="name", hash_key_type="S") - dynamodb_backend.create_table("test_2", hash_key_attr="name", hash_key_type="S") + dynamodb_backend.create_table( + "test_1", hash_key_attr="name", hash_key_type="S") + dynamodb_backend.create_table( + "test_2", hash_key_attr="name", hash_key_type="S") conn = boto.connect_dynamodb('the_key', 'the_secret') res = conn.layer1.list_tables(limit=1) expected = {"TableNames": ["test_1"], "LastEvaluatedTableName": "test_1"} diff --git a/tests/test_dynamodb/test_dynamodb_table_with_range_key.py b/tests/test_dynamodb/test_dynamodb_table_with_range_key.py index c7832b08f..2a482b31e 100644 --- a/tests/test_dynamodb/test_dynamodb_table_with_range_key.py +++ b/tests/test_dynamodb/test_dynamodb_table_with_range_key.py @@ -69,7 +69,8 @@ def test_delete_table(): conn.layer1.delete_table('messages') conn.list_tables().should.have.length_of(0) - conn.layer1.delete_table.when.called_with('messages').should.throw(DynamoDBResponseError) + conn.layer1.delete_table.when.called_with( + 'messages').should.throw(DynamoDBResponseError) @mock_dynamodb_deprecated @@ -192,7 +193,8 @@ def test_get_item_without_range_key(): new_item = table.new_item(hash_key=hash_key, range_key=range_key) new_item.put() - table.get_item.when.called_with(hash_key=hash_key).should.throw(DynamoDBValidationError) + table.get_item.when.called_with( + hash_key=hash_key).should.throw(DynamoDBValidationError) @mock_dynamodb_deprecated @@ -304,22 +306,28 @@ def test_query(): ) item.put() - results = table.query(hash_key='the-key', range_key_condition=condition.GT('1')) + results = table.query(hash_key='the-key', + range_key_condition=condition.GT('1')) results.response['Items'].should.have.length_of(3) - results = table.query(hash_key='the-key', range_key_condition=condition.GT('234')) + results = table.query(hash_key='the-key', + range_key_condition=condition.GT('234')) results.response['Items'].should.have.length_of(2) - results = table.query(hash_key='the-key', range_key_condition=condition.GT('9999')) + results = table.query(hash_key='the-key', + range_key_condition=condition.GT('9999')) results.response['Items'].should.have.length_of(0) - results = table.query(hash_key='the-key', range_key_condition=condition.CONTAINS('12')) + results = table.query(hash_key='the-key', + range_key_condition=condition.CONTAINS('12')) results.response['Items'].should.have.length_of(1) - results = table.query(hash_key='the-key', range_key_condition=condition.BEGINS_WITH('7')) + results = table.query(hash_key='the-key', + range_key_condition=condition.BEGINS_WITH('7')) results.response['Items'].should.have.length_of(1) - results = table.query(hash_key='the-key', range_key_condition=condition.BETWEEN('567', '890')) + results = table.query(hash_key='the-key', + range_key_condition=condition.BETWEEN('567', '890')) results.response['Items'].should.have.length_of(1) diff --git a/tests/test_dynamodb/test_dynamodb_table_without_range_key.py b/tests/test_dynamodb/test_dynamodb_table_without_range_key.py index 18d353928..ebd0c2051 100644 --- a/tests/test_dynamodb/test_dynamodb_table_without_range_key.py +++ b/tests/test_dynamodb/test_dynamodb_table_without_range_key.py @@ -63,7 +63,8 @@ def test_delete_table(): conn.layer1.delete_table('messages') conn.list_tables().should.have.length_of(0) - conn.layer1.delete_table.when.called_with('messages').should.throw(DynamoDBResponseError) + conn.layer1.delete_table.when.called_with( + 'messages').should.throw(DynamoDBResponseError) @mock_dynamodb_deprecated diff --git a/tests/test_dynamodb2/test_dynamodb.py b/tests/test_dynamodb2/test_dynamodb.py index 9e92e7985..860333e50 100644 --- a/tests/test_dynamodb2/test_dynamodb.py +++ b/tests/test_dynamodb2/test_dynamodb.py @@ -15,17 +15,18 @@ try: except ImportError: print("This boto version is not supported") + @requires_boto_gte("2.9") @mock_dynamodb2_deprecated def test_list_tables(): name = 'TestTable' #{'schema': } - dynamodb_backend2.create_table(name,schema=[ + dynamodb_backend2.create_table(name, schema=[ {u'KeyType': u'HASH', u'AttributeName': u'forum_name'}, {u'KeyType': u'RANGE', u'AttributeName': u'subject'} ]) - conn = boto.dynamodb2.connect_to_region( - 'us-west-2', + conn = boto.dynamodb2.connect_to_region( + 'us-west-2', aws_access_key_id="ak", aws_secret_access_key="sk") assert conn.list_tables()["TableNames"] == [name] @@ -34,13 +35,13 @@ def test_list_tables(): @requires_boto_gte("2.9") @mock_dynamodb2_deprecated def test_list_tables_layer_1(): - dynamodb_backend2.create_table("test_1",schema=[ + dynamodb_backend2.create_table("test_1", schema=[ {u'KeyType': u'HASH', u'AttributeName': u'name'} ]) - dynamodb_backend2.create_table("test_2",schema=[ + dynamodb_backend2.create_table("test_2", schema=[ {u'KeyType': u'HASH', u'AttributeName': u'name'} ]) - conn = boto.dynamodb2.connect_to_region( + conn = boto.dynamodb2.connect_to_region( 'us-west-2', aws_access_key_id="ak", aws_secret_access_key="sk") @@ -57,7 +58,7 @@ def test_list_tables_layer_1(): @requires_boto_gte("2.9") @mock_dynamodb2_deprecated def test_describe_missing_table(): - conn = boto.dynamodb2.connect_to_region( + conn = boto.dynamodb2.connect_to_region( 'us-west-2', aws_access_key_id="ak", aws_secret_access_key="sk") diff --git a/tests/test_dynamodb2/test_dynamodb_table_with_range_key.py b/tests/test_dynamodb2/test_dynamodb_table_with_range_key.py index 029506378..58e0d66d1 100644 --- a/tests/test_dynamodb2/test_dynamodb_table_with_range_key.py +++ b/tests/test_dynamodb2/test_dynamodb_table_with_range_key.py @@ -140,7 +140,8 @@ def test_delete_table(): table.delete() conn.list_tables()["TableNames"].should.have.length_of(0) - conn.delete_table.when.called_with('messages').should.throw(JSONResponseError) + conn.delete_table.when.called_with( + 'messages').should.throw(JSONResponseError) @requires_boto_gte("2.9") @@ -181,7 +182,8 @@ def test_item_add_and_describe_and_update(): }) ok.should.equal(True) - table.get_item(forum_name="LOLCat Forum", subject='Check this out!').should_not.be.none + table.get_item(forum_name="LOLCat Forum", + subject='Check this out!').should_not.be.none returned_item = table.get_item( forum_name='LOLCat Forum', @@ -224,7 +226,8 @@ def test_item_partial_save(): } table.put_item(data=data) - returned_item = table.get_item(forum_name="LOLCat Forum", subject='The LOLz') + returned_item = table.get_item( + forum_name="LOLCat Forum", subject='The LOLz') returned_item['SentBy'] = 'User B' returned_item.partial_save() @@ -270,7 +273,8 @@ def test_get_missing_item(): @mock_dynamodb2_deprecated def test_get_item_with_undeclared_table(): table = Table('undeclared-table') - table.get_item.when.called_with(test_hash=3241526475).should.throw(JSONResponseError) + table.get_item.when.called_with( + test_hash=3241526475).should.throw(JSONResponseError) @requires_boto_gte("2.9") @@ -287,7 +291,8 @@ def test_get_item_without_range_key(): hash_key = 3241526475 range_key = 1234567890987 table.put_item(data={'test_hash': hash_key, 'test_range': range_key}) - table.get_item.when.called_with(test_hash=hash_key).should.throw(ValidationException) + table.get_item.when.called_with( + test_hash=hash_key).should.throw(ValidationException) @requires_boto_gte("2.30.0") @@ -355,19 +360,23 @@ def test_query(): table.count().should.equal(4) - results = table.query_2(forum_name__eq='the-key', subject__gt='1', consistent=True) + results = table.query_2(forum_name__eq='the-key', + subject__gt='1', consistent=True) expected = ["123", "456", "789"] for index, item in enumerate(results): item["subject"].should.equal(expected[index]) - results = table.query_2(forum_name__eq="the-key", subject__gt='1', reverse=True) + results = table.query_2(forum_name__eq="the-key", + subject__gt='1', reverse=True) for index, item in enumerate(results): item["subject"].should.equal(expected[len(expected) - 1 - index]) - results = table.query_2(forum_name__eq='the-key', subject__gt='1', consistent=True) + results = table.query_2(forum_name__eq='the-key', + subject__gt='1', consistent=True) sum(1 for _ in results).should.equal(3) - results = table.query_2(forum_name__eq='the-key', subject__gt='234', consistent=True) + results = table.query_2(forum_name__eq='the-key', + subject__gt='234', consistent=True) sum(1 for _ in results).should.equal(2) results = table.query_2(forum_name__eq='the-key', subject__gt='9999') @@ -379,7 +388,8 @@ def test_query(): results = table.query_2(forum_name__eq='the-key', subject__beginswith='7') sum(1 for _ in results).should.equal(1) - results = table.query_2(forum_name__eq='the-key', subject__between=['567', '890']) + results = table.query_2(forum_name__eq='the-key', + subject__between=['567', '890']) sum(1 for _ in results).should.equal(1) @@ -558,15 +568,15 @@ def test_create_with_global_indexes(): RangeKey('version'), ], global_indexes=[ GlobalAllIndex('topic-created_at-index', - parts=[ - HashKey('topic'), - RangeKey('created_at', data_type='N') - ], - throughput={ - 'read': 6, - 'write': 1 - } - ), + parts=[ + HashKey('topic'), + RangeKey('created_at', data_type='N') + ], + throughput={ + 'read': 6, + 'write': 1 + } + ), ]) table_description = conn.describe_table("messages") @@ -601,25 +611,25 @@ def test_query_with_global_indexes(): RangeKey('version'), ], global_indexes=[ GlobalAllIndex('topic-created_at-index', - parts=[ - HashKey('topic'), - RangeKey('created_at', data_type='N') - ], - throughput={ - 'read': 6, - 'write': 1 - } - ), + parts=[ + HashKey('topic'), + RangeKey('created_at', data_type='N') + ], + throughput={ + 'read': 6, + 'write': 1 + } + ), GlobalAllIndex('status-created_at-index', - parts=[ - HashKey('status'), - RangeKey('created_at', data_type='N') - ], - throughput={ - 'read': 2, - 'write': 1 - } - ) + parts=[ + HashKey('status'), + RangeKey('created_at', data_type='N') + ], + throughput={ + 'read': 2, + 'write': 1 + } + ) ]) item_data = { @@ -653,7 +663,8 @@ def test_query_with_local_indexes(): item['version'] = '2' item.save(overwrite=True) - results = table.query(forum_name__eq='Cool Forum', index='threads_index', threads__eq=1) + results = table.query(forum_name__eq='Cool Forum', + index='threads_index', threads__eq=1) list(results).should.have.length_of(1) @@ -888,7 +899,8 @@ def test_failed_overwrite(): table.put_item(data=data2, overwrite=True) data3 = {'id': '123', 'range': 'abc', 'data': '812'} - table.put_item.when.called_with(data=data3).should.throw(ConditionalCheckFailedException) + table.put_item.when.called_with(data=data3).should.throw( + ConditionalCheckFailedException) returned_item = table.lookup('123', 'abc') dict(returned_item).should.equal(data2) @@ -972,7 +984,8 @@ def test_boto3_conditions(): # Test a query returning all items results = table.query( - KeyConditionExpression=Key('forum_name').eq('the-key') & Key("subject").gt('1'), + KeyConditionExpression=Key('forum_name').eq( + 'the-key') & Key("subject").gt('1'), ScanIndexForward=True, ) expected = ["123", "456", "789"] @@ -981,7 +994,8 @@ def test_boto3_conditions(): # Return all items again, but in reverse results = table.query( - KeyConditionExpression=Key('forum_name').eq('the-key') & Key("subject").gt('1'), + KeyConditionExpression=Key('forum_name').eq( + 'the-key') & Key("subject").gt('1'), ScanIndexForward=False, ) for index, item in enumerate(reversed(results['Items'])): @@ -989,29 +1003,34 @@ def test_boto3_conditions(): # Filter the subjects to only return some of the results results = table.query( - KeyConditionExpression=Key('forum_name').eq('the-key') & Key("subject").gt('234'), + KeyConditionExpression=Key('forum_name').eq( + 'the-key') & Key("subject").gt('234'), ConsistentRead=True, ) results['Count'].should.equal(2) # Filter to return no results results = table.query( - KeyConditionExpression=Key('forum_name').eq('the-key') & Key("subject").gt('9999') + KeyConditionExpression=Key('forum_name').eq( + 'the-key') & Key("subject").gt('9999') ) results['Count'].should.equal(0) results = table.query( - KeyConditionExpression=Key('forum_name').eq('the-key') & Key("subject").begins_with('12') + KeyConditionExpression=Key('forum_name').eq( + 'the-key') & Key("subject").begins_with('12') ) results['Count'].should.equal(1) results = table.query( - KeyConditionExpression=Key("subject").begins_with('7') & Key('forum_name').eq('the-key') + KeyConditionExpression=Key("subject").begins_with( + '7') & Key('forum_name').eq('the-key') ) results['Count'].should.equal(1) results = table.query( - KeyConditionExpression=Key('forum_name').eq('the-key') & Key("subject").between('567', '890') + KeyConditionExpression=Key('forum_name').eq( + 'the-key') & Key("subject").between('567', '890') ) results['Count'].should.equal(1) @@ -1337,7 +1356,8 @@ def test_boto3_query_gsi_range_comparison(): # Test a query returning all johndoe items results = table.query( - KeyConditionExpression=Key('username').eq('johndoe') & Key("created").gt(0), + KeyConditionExpression=Key('username').eq( + 'johndoe') & Key("created").gt(0), ScanIndexForward=True, IndexName='TestGSI', ) @@ -1347,7 +1367,8 @@ def test_boto3_query_gsi_range_comparison(): # Return all johndoe items again, but in reverse results = table.query( - KeyConditionExpression=Key('username').eq('johndoe') & Key("created").gt(0), + KeyConditionExpression=Key('username').eq( + 'johndoe') & Key("created").gt(0), ScanIndexForward=False, IndexName='TestGSI', ) @@ -1357,7 +1378,8 @@ def test_boto3_query_gsi_range_comparison(): # Filter the creation to only return some of the results # And reverse order of hash + range key results = table.query( - KeyConditionExpression=Key("created").gt(1) & Key('username').eq('johndoe'), + KeyConditionExpression=Key("created").gt( + 1) & Key('username').eq('johndoe'), ConsistentRead=True, IndexName='TestGSI', ) @@ -1365,20 +1387,23 @@ def test_boto3_query_gsi_range_comparison(): # Filter to return no results results = table.query( - KeyConditionExpression=Key('username').eq('janedoe') & Key("created").gt(9), + KeyConditionExpression=Key('username').eq( + 'janedoe') & Key("created").gt(9), IndexName='TestGSI', ) results['Count'].should.equal(0) results = table.query( - KeyConditionExpression=Key('username').eq('janedoe') & Key("created").eq(5), + KeyConditionExpression=Key('username').eq( + 'janedoe') & Key("created").eq(5), IndexName='TestGSI', ) results['Count'].should.equal(1) # Test range key sorting results = table.query( - KeyConditionExpression=Key('username').eq('johndoe') & Key("created").gt(0), + KeyConditionExpression=Key('username').eq( + 'johndoe') & Key("created").gt(0), IndexName='TestGSI', ) expected = [Decimal('1'), Decimal('2'), Decimal('3')] @@ -1516,7 +1541,6 @@ def test_boto3_update_table_gsi_throughput(): gsi_throughput['WriteCapacityUnits'].should.equal(11) - @mock_dynamodb2 def test_update_table_gsi_create(): dynamodb = boto3.resource('dynamodb', region_name='us-east-1') diff --git a/tests/test_dynamodb2/test_dynamodb_table_without_range_key.py b/tests/test_dynamodb2/test_dynamodb_table_without_range_key.py index 83eff6519..36e1b6c61 100644 --- a/tests/test_dynamodb2/test_dynamodb_table_without_range_key.py +++ b/tests/test_dynamodb2/test_dynamodb_table_without_range_key.py @@ -71,7 +71,8 @@ def test_delete_table(): conn.delete_table('messages') conn.list_tables()["TableNames"].should.have.length_of(0) - conn.delete_table.when.called_with('messages').should.throw(JSONResponseError) + conn.delete_table.when.called_with( + 'messages').should.throw(JSONResponseError) @requires_boto_gte("2.9") @@ -239,7 +240,8 @@ def test_query_with_undeclared_table(): conn.query.when.called_with( table_name='undeclared-table', - key_conditions={"forum_name": {"ComparisonOperator": "EQ", "AttributeValueList": [{"S": "the-key"}]}} + key_conditions={"forum_name": { + "ComparisonOperator": "EQ", "AttributeValueList": [{"S": "the-key"}]}} ).should.throw(JSONResponseError) @@ -396,7 +398,8 @@ def test_get_key_fields(): @mock_dynamodb2_deprecated def test_get_missing_item(): table = create_table() - table.get_item.when.called_with(forum_name='missing').should.throw(ItemNotFound) + table.get_item.when.called_with( + forum_name='missing').should.throw(ItemNotFound) @requires_boto_gte("2.9") @@ -436,7 +439,8 @@ def test_update_item_remove(): } # Then remove the SentBy field - conn.update_item("messages", key_map, update_expression="REMOVE SentBy, SentTo") + conn.update_item("messages", key_map, + update_expression="REMOVE SentBy, SentTo") returned_item = table.get_item(username="steve") dict(returned_item).should.equal({ @@ -460,7 +464,8 @@ def test_update_item_set(): 'username': {"S": "steve"} } - conn.update_item("messages", key_map, update_expression="SET foo=bar, blah=baz REMOVE SentBy") + conn.update_item("messages", key_map, + update_expression="SET foo=bar, blah=baz REMOVE SentBy") returned_item = table.get_item(username="steve") dict(returned_item).should.equal({ @@ -470,7 +475,6 @@ def test_update_item_set(): }) - @mock_dynamodb2_deprecated def test_failed_overwrite(): table = Table.create('messages', schema=[ @@ -487,7 +491,8 @@ def test_failed_overwrite(): table.put_item(data=data2, overwrite=True) data3 = {'id': '123', 'data': '812'} - table.put_item.when.called_with(data=data3).should.throw(ConditionalCheckFailedException) + table.put_item.when.called_with(data=data3).should.throw( + ConditionalCheckFailedException) returned_item = table.lookup('123') dict(returned_item).should.equal(data2) @@ -521,6 +526,7 @@ def test_conflicting_writes(): boto3 """ + @mock_dynamodb2 def test_boto3_create_table(): dynamodb = boto3.resource('dynamodb', region_name='us-east-1') @@ -617,7 +623,6 @@ def test_boto3_put_item_conditions_pass(): assert dict(returned_item)['Item']['foo'].should.equal("baz") - @mock_dynamodb2 def test_scan_pagination(): table = _create_user_table() diff --git a/tests/test_ec2/test_amis.py b/tests/test_ec2/test_amis.py index 4c154ae84..40cc5fe24 100755 --- a/tests/test_ec2/test_amis.py +++ b/tests/test_ec2/test_amis.py @@ -20,10 +20,12 @@ def test_ami_create_and_delete(): instance = reservation.instances[0] with assert_raises(EC2ResponseError) as ex: - image_id = conn.create_image(instance.id, "test-ami", "this is a test ami", dry_run=True) + image_id = conn.create_image( + instance.id, "test-ami", "this is a test ami", dry_run=True) ex.exception.error_code.should.equal('DryRunOperation') ex.exception.status.should.equal(400) - ex.exception.message.should.equal('An error occurred (DryRunOperation) when calling the CreateImage operation: Request would have succeeded, but DryRun flag is set') + ex.exception.message.should.equal( + 'An error occurred (DryRunOperation) when calling the CreateImage operation: Request would have succeeded, but DryRun flag is set') image_id = conn.create_image(instance.id, "test-ami", "this is a test ami") @@ -47,8 +49,10 @@ def test_ami_create_and_delete(): snapshots.should.have.length_of(1) snapshot = snapshots[0] - image.block_device_mapping.current_value.snapshot_id.should.equal(snapshot.id) - snapshot.description.should.equal("Auto-created snapshot for AMI {0}".format(image.id)) + image.block_device_mapping.current_value.snapshot_id.should.equal( + snapshot.id) + snapshot.description.should.equal( + "Auto-created snapshot for AMI {0}".format(image.id)) snapshot.volume_id.should.equal(volume.id) # Deregister @@ -56,7 +60,8 @@ def test_ami_create_and_delete(): success = conn.deregister_image(image_id, dry_run=True) ex.exception.error_code.should.equal('DryRunOperation') ex.exception.status.should.equal(400) - ex.exception.message.should.equal('An error occurred (DryRunOperation) when calling the DeregisterImage operation: Request would have succeeded, but DryRun flag is set') + ex.exception.message.should.equal( + 'An error occurred (DryRunOperation) when calling the DeregisterImage operation: Request would have succeeded, but DryRun flag is set') success = conn.deregister_image(image_id) success.should.be.true @@ -75,23 +80,29 @@ def test_ami_copy(): reservation = conn.run_instances('ami-1234abcd') instance = reservation.instances[0] - source_image_id = conn.create_image(instance.id, "test-ami", "this is a test ami") + source_image_id = conn.create_image( + instance.id, "test-ami", "this is a test ami") instance.terminate() source_image = conn.get_all_images(image_ids=[source_image_id])[0] - # Boto returns a 'CopyImage' object with an image_id attribute here. Use the image_id to fetch the full info. + # Boto returns a 'CopyImage' object with an image_id attribute here. Use + # the image_id to fetch the full info. with assert_raises(EC2ResponseError) as ex: - copy_image_ref = conn.copy_image(source_image.region.name, source_image.id, "test-copy-ami", "this is a test copy ami", dry_run=True) + copy_image_ref = conn.copy_image( + source_image.region.name, source_image.id, "test-copy-ami", "this is a test copy ami", dry_run=True) ex.exception.error_code.should.equal('DryRunOperation') ex.exception.status.should.equal(400) - ex.exception.message.should.equal('An error occurred (DryRunOperation) when calling the CopyImage operation: Request would have succeeded, but DryRun flag is set') + ex.exception.message.should.equal( + 'An error occurred (DryRunOperation) when calling the CopyImage operation: Request would have succeeded, but DryRun flag is set') - copy_image_ref = conn.copy_image(source_image.region.name, source_image.id, "test-copy-ami", "this is a test copy ami") + copy_image_ref = conn.copy_image( + source_image.region.name, source_image.id, "test-copy-ami", "this is a test copy ami") copy_image_id = copy_image_ref.image_id copy_image = conn.get_all_images(image_ids=[copy_image_id])[0] copy_image.id.should.equal(copy_image_id) - copy_image.virtualization_type.should.equal(source_image.virtualization_type) + copy_image.virtualization_type.should.equal( + source_image.virtualization_type) copy_image.architecture.should.equal(source_image.architecture) copy_image.kernel_id.should.equal(source_image.kernel_id) copy_image.platform.should.equal(source_image.platform) @@ -105,15 +116,18 @@ def test_ami_copy(): # Copy from non-existent source ID. with assert_raises(EC2ResponseError) as cm: - conn.copy_image(source_image.region.name, 'ami-abcd1234', "test-copy-ami", "this is a test copy ami") + conn.copy_image(source_image.region.name, 'ami-abcd1234', + "test-copy-ami", "this is a test copy ami") cm.exception.code.should.equal('InvalidAMIID.NotFound') cm.exception.status.should.equal(400) cm.exception.request_id.should_not.be.none # Copy from non-existent source region. with assert_raises(EC2ResponseError) as cm: - invalid_region = 'us-east-1' if (source_image.region.name != 'us-east-1') else 'us-west-1' - conn.copy_image(invalid_region, source_image.id, "test-copy-ami", "this is a test copy ami") + invalid_region = 'us-east-1' if (source_image.region.name != + 'us-east-1') else 'us-west-1' + conn.copy_image(invalid_region, source_image.id, + "test-copy-ami", "this is a test copy ami") cm.exception.code.should.equal('InvalidAMIID.NotFound') cm.exception.status.should.equal(400) cm.exception.request_id.should_not.be.none @@ -131,7 +145,8 @@ def test_ami_tagging(): image.add_tag("a key", "some value", dry_run=True) ex.exception.error_code.should.equal('DryRunOperation') ex.exception.status.should.equal(400) - ex.exception.message.should.equal('An error occurred (DryRunOperation) when calling the CreateTags operation: Request would have succeeded, but DryRun flag is set') + ex.exception.message.should.equal( + 'An error occurred (DryRunOperation) when calling the CreateTags operation: Request would have succeeded, but DryRun flag is set') image.add_tag("a key", "some value") @@ -179,7 +194,8 @@ def test_ami_filters(): instanceA.modify_attribute("kernel", "k-1234abcd") instanceA.modify_attribute("platform", "windows") instanceA.modify_attribute("virtualization_type", "hvm") - imageA_id = conn.create_image(instanceA.id, "test-ami-A", "this is a test ami") + imageA_id = conn.create_image( + instanceA.id, "test-ami-A", "this is a test ami") imageA = conn.get_image(imageA_id) reservationB = conn.run_instances('ami-abcd1234') @@ -188,18 +204,22 @@ def test_ami_filters(): instanceB.modify_attribute("kernel", "k-abcd1234") instanceB.modify_attribute("platform", "linux") instanceB.modify_attribute("virtualization_type", "paravirtual") - imageB_id = conn.create_image(instanceB.id, "test-ami-B", "this is a test ami") + imageB_id = conn.create_image( + instanceB.id, "test-ami-B", "this is a test ami") imageB = conn.get_image(imageB_id) imageB.set_launch_permissions(group_names=("all")) - amis_by_architecture = conn.get_all_images(filters={'architecture': 'x86_64'}) + amis_by_architecture = conn.get_all_images( + filters={'architecture': 'x86_64'}) set([ami.id for ami in amis_by_architecture]).should.equal(set([imageB.id])) amis_by_kernel = conn.get_all_images(filters={'kernel-id': 'k-abcd1234'}) set([ami.id for ami in amis_by_kernel]).should.equal(set([imageB.id])) - amis_by_virtualization = conn.get_all_images(filters={'virtualization-type': 'paravirtual'}) - set([ami.id for ami in amis_by_virtualization]).should.equal(set([imageB.id])) + amis_by_virtualization = conn.get_all_images( + filters={'virtualization-type': 'paravirtual'}) + set([ami.id for ami in amis_by_virtualization] + ).should.equal(set([imageB.id])) amis_by_platform = conn.get_all_images(filters={'platform': 'windows'}) set([ami.id for ami in amis_by_platform]).should.equal(set([imageA.id])) @@ -208,7 +228,8 @@ def test_ami_filters(): set([ami.id for ami in amis_by_id]).should.equal(set([imageA.id])) amis_by_state = conn.get_all_images(filters={'state': 'available'}) - set([ami.id for ami in amis_by_state]).should.equal(set([imageA.id, imageB.id])) + set([ami.id for ami in amis_by_state]).should.equal( + set([imageA.id, imageB.id])) amis_by_name = conn.get_all_images(filters={'name': imageA.name}) set([ami.id for ami in amis_by_name]).should.equal(set([imageA.id])) @@ -226,20 +247,23 @@ def test_ami_filtering_via_tag(): reservationA = conn.run_instances('ami-1234abcd') instanceA = reservationA.instances[0] - imageA_id = conn.create_image(instanceA.id, "test-ami-A", "this is a test ami") + imageA_id = conn.create_image( + instanceA.id, "test-ami-A", "this is a test ami") imageA = conn.get_image(imageA_id) imageA.add_tag("a key", "some value") reservationB = conn.run_instances('ami-abcd1234') instanceB = reservationB.instances[0] - imageB_id = conn.create_image(instanceB.id, "test-ami-B", "this is a test ami") + imageB_id = conn.create_image( + instanceB.id, "test-ami-B", "this is a test ami") imageB = conn.get_image(imageB_id) imageB.add_tag("another key", "some other value") amis_by_tagA = conn.get_all_images(filters={'tag:a key': 'some value'}) set([ami.id for ami in amis_by_tagA]).should.equal(set([imageA.id])) - amis_by_tagB = conn.get_all_images(filters={'tag:another key': 'some other value'}) + amis_by_tagB = conn.get_all_images( + filters={'tag:another key': 'some other value'}) set([ami.id for ami in amis_by_tagB]).should.equal(set([imageB.id])) @@ -274,7 +298,8 @@ def test_ami_attribute_group_permissions(): image = conn.get_image(image_id) # Baseline - attributes = conn.get_image_attribute(image.id, attribute='launchPermission') + attributes = conn.get_image_attribute( + image.id, attribute='launchPermission') attributes.name.should.equal('launch_permission') attributes.attrs.should.have.length_of(0) @@ -290,32 +315,38 @@ def test_ami_attribute_group_permissions(): # Add 'all' group and confirm with assert_raises(EC2ResponseError) as ex: - conn.modify_image_attribute(**dict(ADD_GROUP_ARGS, **{'dry_run': True})) + conn.modify_image_attribute( + **dict(ADD_GROUP_ARGS, **{'dry_run': True})) ex.exception.error_code.should.equal('DryRunOperation') ex.exception.status.should.equal(400) - ex.exception.message.should.equal('An error occurred (DryRunOperation) when calling the ModifyImageAttribute operation: Request would have succeeded, but DryRun flag is set') + ex.exception.message.should.equal( + 'An error occurred (DryRunOperation) when calling the ModifyImageAttribute operation: Request would have succeeded, but DryRun flag is set') conn.modify_image_attribute(**ADD_GROUP_ARGS) - attributes = conn.get_image_attribute(image.id, attribute='launchPermission') + attributes = conn.get_image_attribute( + image.id, attribute='launchPermission') attributes.attrs['groups'].should.have.length_of(1) attributes.attrs['groups'].should.equal(['all']) image = conn.get_image(image_id) image.is_public.should.equal(True) # Add is idempotent - conn.modify_image_attribute.when.called_with(**ADD_GROUP_ARGS).should_not.throw(EC2ResponseError) + conn.modify_image_attribute.when.called_with( + **ADD_GROUP_ARGS).should_not.throw(EC2ResponseError) # Remove 'all' group and confirm conn.modify_image_attribute(**REMOVE_GROUP_ARGS) - attributes = conn.get_image_attribute(image.id, attribute='launchPermission') + attributes = conn.get_image_attribute( + image.id, attribute='launchPermission') attributes.attrs.should.have.length_of(0) image = conn.get_image(image_id) image.is_public.should.equal(False) # Remove is idempotent - conn.modify_image_attribute.when.called_with(**REMOVE_GROUP_ARGS).should_not.throw(EC2ResponseError) + conn.modify_image_attribute.when.called_with( + **REMOVE_GROUP_ARGS).should_not.throw(EC2ResponseError) @mock_emr_deprecated @@ -327,7 +358,8 @@ def test_ami_attribute_user_permissions(): image = conn.get_image(image_id) # Baseline - attributes = conn.get_image_attribute(image.id, attribute='launchPermission') + attributes = conn.get_image_attribute( + image.id, attribute='launchPermission') attributes.name.should.equal('launch_permission') attributes.attrs.should.have.length_of(0) @@ -353,19 +385,23 @@ def test_ami_attribute_user_permissions(): # Add multiple users and confirm conn.modify_image_attribute(**ADD_USERS_ARGS) - attributes = conn.get_image_attribute(image.id, attribute='launchPermission') + attributes = conn.get_image_attribute( + image.id, attribute='launchPermission') attributes.attrs['user_ids'].should.have.length_of(2) - set(attributes.attrs['user_ids']).should.equal(set([str(USER1), str(USER2)])) + set(attributes.attrs['user_ids']).should.equal( + set([str(USER1), str(USER2)])) image = conn.get_image(image_id) image.is_public.should.equal(False) # Add is idempotent - conn.modify_image_attribute.when.called_with(**ADD_USERS_ARGS).should_not.throw(EC2ResponseError) + conn.modify_image_attribute.when.called_with( + **ADD_USERS_ARGS).should_not.throw(EC2ResponseError) # Remove single user and confirm conn.modify_image_attribute(**REMOVE_SINGLE_USER_ARGS) - attributes = conn.get_image_attribute(image.id, attribute='launchPermission') + attributes = conn.get_image_attribute( + image.id, attribute='launchPermission') attributes.attrs['user_ids'].should.have.length_of(1) set(attributes.attrs['user_ids']).should.equal(set([str(USER2)])) image = conn.get_image(image_id) @@ -374,13 +410,15 @@ def test_ami_attribute_user_permissions(): # Remove multiple users and confirm conn.modify_image_attribute(**REMOVE_USERS_ARGS) - attributes = conn.get_image_attribute(image.id, attribute='launchPermission') + attributes = conn.get_image_attribute( + image.id, attribute='launchPermission') attributes.attrs.should.have.length_of(0) image = conn.get_image(image_id) image.is_public.should.equal(False) # Remove is idempotent - conn.modify_image_attribute.when.called_with(**REMOVE_USERS_ARGS).should_not.throw(EC2ResponseError) + conn.modify_image_attribute.when.called_with( + **REMOVE_USERS_ARGS).should_not.throw(EC2ResponseError) @mock_emr_deprecated @@ -397,7 +435,8 @@ def test_ami_attribute_user_and_group_permissions(): image = conn.get_image(image_id) # Baseline - attributes = conn.get_image_attribute(image.id, attribute='launchPermission') + attributes = conn.get_image_attribute( + image.id, attribute='launchPermission') attributes.name.should.equal('launch_permission') attributes.attrs.should.have.length_of(0) @@ -419,7 +458,8 @@ def test_ami_attribute_user_and_group_permissions(): # Add and confirm conn.modify_image_attribute(**ADD_ARGS) - attributes = conn.get_image_attribute(image.id, attribute='launchPermission') + attributes = conn.get_image_attribute( + image.id, attribute='launchPermission') attributes.attrs['user_ids'].should.have.length_of(2) set(attributes.attrs['user_ids']).should.equal(set([USER1, USER2])) set(attributes.attrs['groups']).should.equal(set(['all'])) @@ -429,7 +469,8 @@ def test_ami_attribute_user_and_group_permissions(): # Remove and confirm conn.modify_image_attribute(**REMOVE_ARGS) - attributes = conn.get_image_attribute(image.id, attribute='launchPermission') + attributes = conn.get_image_attribute( + image.id, attribute='launchPermission') attributes.attrs.should.have.length_of(0) image = conn.get_image(image_id) image.is_public.should.equal(False) @@ -483,7 +524,8 @@ def test_ami_attribute_error_cases(): cm.exception.status.should.equal(400) cm.exception.request_id.should_not.be.none - # Error: Add with one invalid user ID among other valid IDs, ensure no partial changes. + # Error: Add with one invalid user ID among other valid IDs, ensure no + # partial changes. with assert_raises(EC2ResponseError) as cm: conn.modify_image_attribute(image.id, attribute='launchPermission', @@ -493,7 +535,8 @@ def test_ami_attribute_error_cases(): cm.exception.status.should.equal(400) cm.exception.request_id.should_not.be.none - attributes = conn.get_image_attribute(image.id, attribute='launchPermission') + attributes = conn.get_image_attribute( + image.id, attribute='launchPermission') attributes.attrs.should.have.length_of(0) # Error: Add with invalid image ID diff --git a/tests/test_ec2/test_customer_gateways.py b/tests/test_ec2/test_customer_gateways.py index 93e35dc6a..589f887f6 100644 --- a/tests/test_ec2/test_customer_gateways.py +++ b/tests/test_ec2/test_customer_gateways.py @@ -12,26 +12,31 @@ from moto import mock_ec2_deprecated def test_create_customer_gateways(): conn = boto.connect_vpc('the_key', 'the_secret') - customer_gateway = conn.create_customer_gateway('ipsec.1', '205.251.242.54', 65534) + customer_gateway = conn.create_customer_gateway( + 'ipsec.1', '205.251.242.54', 65534) customer_gateway.should_not.be.none customer_gateway.id.should.match(r'cgw-\w+') customer_gateway.type.should.equal('ipsec.1') customer_gateway.bgp_asn.should.equal(65534) customer_gateway.ip_address.should.equal('205.251.242.54') + @mock_ec2_deprecated def test_describe_customer_gateways(): conn = boto.connect_vpc('the_key', 'the_secret') - customer_gateway = conn.create_customer_gateway('ipsec.1', '205.251.242.54', 65534) + customer_gateway = conn.create_customer_gateway( + 'ipsec.1', '205.251.242.54', 65534) cgws = conn.get_all_customer_gateways() cgws.should.have.length_of(1) cgws[0].id.should.match(customer_gateway.id) + @mock_ec2_deprecated def test_delete_customer_gateways(): conn = boto.connect_vpc('the_key', 'the_secret') - customer_gateway = conn.create_customer_gateway('ipsec.1', '205.251.242.54', 65534) + customer_gateway = conn.create_customer_gateway( + 'ipsec.1', '205.251.242.54', 65534) customer_gateway.should_not.be.none cgws = conn.get_all_customer_gateways() cgws[0].id.should.match(customer_gateway.id) @@ -39,6 +44,7 @@ def test_delete_customer_gateways(): cgws = conn.get_all_customer_gateways() cgws.should.have.length_of(0) + @mock_ec2_deprecated def test_delete_customer_gateways_bad_id(): conn = boto.connect_vpc('the_key', 'the_secret') diff --git a/tests/test_ec2/test_dhcp_options.py b/tests/test_ec2/test_dhcp_options.py index 0279a3d54..4e2520241 100644 --- a/tests/test_ec2/test_dhcp_options.py +++ b/tests/test_ec2/test_dhcp_options.py @@ -19,7 +19,8 @@ SAMPLE_NAME_SERVERS = [u'10.0.0.6', u'10.0.0.7'] def test_dhcp_options_associate(): """ associate dhcp option """ conn = boto.connect_vpc('the_key', 'the_secret') - dhcp_options = conn.create_dhcp_options(SAMPLE_DOMAIN_NAME, SAMPLE_NAME_SERVERS) + dhcp_options = conn.create_dhcp_options( + SAMPLE_DOMAIN_NAME, SAMPLE_NAME_SERVERS) vpc = conn.create_vpc("10.0.0.0/16") rval = conn.associate_dhcp_options(dhcp_options.id, vpc.id) @@ -43,7 +44,8 @@ def test_dhcp_options_associate_invalid_dhcp_id(): def test_dhcp_options_associate_invalid_vpc_id(): """ associate dhcp option invalid vpc id """ conn = boto.connect_vpc('the_key', 'the_secret') - dhcp_options = conn.create_dhcp_options(SAMPLE_DOMAIN_NAME, SAMPLE_NAME_SERVERS) + dhcp_options = conn.create_dhcp_options( + SAMPLE_DOMAIN_NAME, SAMPLE_NAME_SERVERS) with assert_raises(EC2ResponseError) as cm: conn.associate_dhcp_options(dhcp_options.id, "foo") @@ -56,7 +58,8 @@ def test_dhcp_options_associate_invalid_vpc_id(): def test_dhcp_options_delete_with_vpc(): """Test deletion of dhcp options with vpc""" conn = boto.connect_vpc('the_key', 'the_secret') - dhcp_options = conn.create_dhcp_options(SAMPLE_DOMAIN_NAME, SAMPLE_NAME_SERVERS) + dhcp_options = conn.create_dhcp_options( + SAMPLE_DOMAIN_NAME, SAMPLE_NAME_SERVERS) dhcp_options_id = dhcp_options.id vpc = conn.create_vpc("10.0.0.0/16") @@ -83,10 +86,13 @@ def test_create_dhcp_options(): """Create most basic dhcp option""" conn = boto.connect_vpc('the_key', 'the_secret') - dhcp_option = conn.create_dhcp_options(SAMPLE_DOMAIN_NAME, SAMPLE_NAME_SERVERS) + dhcp_option = conn.create_dhcp_options( + SAMPLE_DOMAIN_NAME, SAMPLE_NAME_SERVERS) dhcp_option.options[u'domain-name'][0].should.be.equal(SAMPLE_DOMAIN_NAME) - dhcp_option.options[u'domain-name-servers'][0].should.be.equal(SAMPLE_NAME_SERVERS[0]) - dhcp_option.options[u'domain-name-servers'][1].should.be.equal(SAMPLE_NAME_SERVERS[1]) + dhcp_option.options[ + u'domain-name-servers'][0].should.be.equal(SAMPLE_NAME_SERVERS[0]) + dhcp_option.options[ + u'domain-name-servers'][1].should.be.equal(SAMPLE_NAME_SERVERS[1]) @mock_ec2_deprecated @@ -210,8 +216,10 @@ def test_dhcp_options_get_by_tag(): dhcp_options_sets = conn.get_all_dhcp_options(filters=filters) dhcp_options_sets.should.have.length_of(1) - dhcp_options_sets[0].options['domain-name'][0].should.be.equal('example.com') - dhcp_options_sets[0].options['domain-name-servers'][0].should.be.equal('10.0.10.2') + dhcp_options_sets[0].options[ + 'domain-name'][0].should.be.equal('example.com') + dhcp_options_sets[0].options[ + 'domain-name-servers'][0].should.be.equal('10.0.10.2') dhcp_options_sets[0].tags['Name'].should.equal('TestDhcpOptions1') dhcp_options_sets[0].tags['test-tag'].should.equal('test-value') @@ -219,8 +227,10 @@ def test_dhcp_options_get_by_tag(): dhcp_options_sets = conn.get_all_dhcp_options(filters=filters) dhcp_options_sets.should.have.length_of(1) - dhcp_options_sets[0].options['domain-name'][0].should.be.equal('example.com') - dhcp_options_sets[0].options['domain-name-servers'][0].should.be.equal('10.0.20.2') + dhcp_options_sets[0].options[ + 'domain-name'][0].should.be.equal('example.com') + dhcp_options_sets[0].options[ + 'domain-name-servers'][0].should.be.equal('10.0.20.2') dhcp_options_sets[0].tags['Name'].should.equal('TestDhcpOptions2') dhcp_options_sets[0].tags['test-tag'].should.equal('test-value') @@ -247,17 +257,21 @@ def test_dhcp_options_get_by_id(): dhcp_options_sets = conn.get_all_dhcp_options() dhcp_options_sets.should.have.length_of(2) - dhcp_options_sets = conn.get_all_dhcp_options(filters={'dhcp-options-id': dhcp1_id}) + dhcp_options_sets = conn.get_all_dhcp_options( + filters={'dhcp-options-id': dhcp1_id}) dhcp_options_sets.should.have.length_of(1) dhcp_options_sets[0].options['domain-name'][0].should.be.equal('test1.com') - dhcp_options_sets[0].options['domain-name-servers'][0].should.be.equal('10.0.10.2') + dhcp_options_sets[0].options[ + 'domain-name-servers'][0].should.be.equal('10.0.10.2') - dhcp_options_sets = conn.get_all_dhcp_options(filters={'dhcp-options-id': dhcp2_id}) + dhcp_options_sets = conn.get_all_dhcp_options( + filters={'dhcp-options-id': dhcp2_id}) dhcp_options_sets.should.have.length_of(1) dhcp_options_sets[0].options['domain-name'][0].should.be.equal('test2.com') - dhcp_options_sets[0].options['domain-name-servers'][0].should.be.equal('10.0.20.2') + dhcp_options_sets[0].options[ + 'domain-name-servers'][0].should.be.equal('10.0.20.2') @mock_ec2 @@ -315,4 +329,5 @@ def test_dhcp_options_get_by_invalid_filter(): conn.create_dhcp_options(SAMPLE_DOMAIN_NAME, SAMPLE_NAME_SERVERS) filters = {'invalid-filter': 'invalid-value'} - conn.get_all_dhcp_options.when.called_with(filters=filters).should.throw(NotImplementedError) + conn.get_all_dhcp_options.when.called_with( + filters=filters).should.throw(NotImplementedError) diff --git a/tests/test_ec2/test_elastic_block_store.py b/tests/test_ec2/test_elastic_block_store.py index 6491412e3..83c89d129 100644 --- a/tests/test_ec2/test_elastic_block_store.py +++ b/tests/test_ec2/test_elastic_block_store.py @@ -28,7 +28,8 @@ def test_create_and_delete_volume(): volume.delete(dry_run=True) ex.exception.error_code.should.equal('DryRunOperation') ex.exception.status.should.equal(400) - ex.exception.message.should.equal('An error occurred (DryRunOperation) when calling the DeleteVolume operation: Request would have succeeded, but DryRun flag is set') + ex.exception.message.should.equal( + 'An error occurred (DryRunOperation) when calling the DeleteVolume operation: Request would have succeeded, but DryRun flag is set') volume.delete() @@ -42,7 +43,6 @@ def test_create_and_delete_volume(): cm.exception.request_id.should_not.be.none - @mock_ec2_deprecated def test_create_encrypted_volume_dryrun(): conn = boto.connect_ec2('the_key', 'the_secret') @@ -50,7 +50,8 @@ def test_create_encrypted_volume_dryrun(): conn.create_volume(80, "us-east-1a", encrypted=True, dry_run=True) ex.exception.error_code.should.equal('DryRunOperation') ex.exception.status.should.equal(400) - ex.exception.message.should.equal('An error occurred (DryRunOperation) when calling the CreateVolume operation: Request would have succeeded, but DryRun flag is set') + ex.exception.message.should.equal( + 'An error occurred (DryRunOperation) when calling the CreateVolume operation: Request would have succeeded, but DryRun flag is set') @mock_ec2_deprecated @@ -62,7 +63,8 @@ def test_create_encrypted_volume(): conn.create_volume(80, "us-east-1a", encrypted=True, dry_run=True) ex.exception.error_code.should.equal('DryRunOperation') ex.exception.status.should.equal(400) - ex.exception.message.should.equal('An error occurred (DryRunOperation) when calling the CreateVolume operation: Request would have succeeded, but DryRun flag is set') + ex.exception.message.should.equal( + 'An error occurred (DryRunOperation) when calling the CreateVolume operation: Request would have succeeded, but DryRun flag is set') all_volumes = conn.get_all_volumes() all_volumes[0].encrypted.should.be(True) @@ -108,29 +110,42 @@ def test_volume_filters(): block_mapping = instance.block_device_mapping['/dev/sda1'] - volumes_by_attach_time = conn.get_all_volumes(filters={'attachment.attach-time': block_mapping.attach_time}) - set([vol.id for vol in volumes_by_attach_time]).should.equal(set([block_mapping.volume_id])) + volumes_by_attach_time = conn.get_all_volumes( + filters={'attachment.attach-time': block_mapping.attach_time}) + set([vol.id for vol in volumes_by_attach_time] + ).should.equal(set([block_mapping.volume_id])) - volumes_by_attach_device = conn.get_all_volumes(filters={'attachment.device': '/dev/sda1'}) - set([vol.id for vol in volumes_by_attach_device]).should.equal(set([block_mapping.volume_id])) + volumes_by_attach_device = conn.get_all_volumes( + filters={'attachment.device': '/dev/sda1'}) + set([vol.id for vol in volumes_by_attach_device] + ).should.equal(set([block_mapping.volume_id])) - volumes_by_attach_instance_id = conn.get_all_volumes(filters={'attachment.instance-id': instance.id}) - set([vol.id for vol in volumes_by_attach_instance_id]).should.equal(set([block_mapping.volume_id])) + volumes_by_attach_instance_id = conn.get_all_volumes( + filters={'attachment.instance-id': instance.id}) + set([vol.id for vol in volumes_by_attach_instance_id] + ).should.equal(set([block_mapping.volume_id])) - volumes_by_attach_status = conn.get_all_volumes(filters={'attachment.status': 'attached'}) - set([vol.id for vol in volumes_by_attach_status]).should.equal(set([block_mapping.volume_id])) + volumes_by_attach_status = conn.get_all_volumes( + filters={'attachment.status': 'attached'}) + set([vol.id for vol in volumes_by_attach_status] + ).should.equal(set([block_mapping.volume_id])) - volumes_by_create_time = conn.get_all_volumes(filters={'create-time': volume4.create_time}) - set([vol.create_time for vol in volumes_by_create_time]).should.equal(set([volume4.create_time])) + volumes_by_create_time = conn.get_all_volumes( + filters={'create-time': volume4.create_time}) + set([vol.create_time for vol in volumes_by_create_time] + ).should.equal(set([volume4.create_time])) volumes_by_size = conn.get_all_volumes(filters={'size': volume2.size}) set([vol.id for vol in volumes_by_size]).should.equal(set([volume2.id])) - volumes_by_snapshot_id = conn.get_all_volumes(filters={'snapshot-id': snapshot.id}) - set([vol.id for vol in volumes_by_snapshot_id]).should.equal(set([volume4.id])) + volumes_by_snapshot_id = conn.get_all_volumes( + filters={'snapshot-id': snapshot.id}) + set([vol.id for vol in volumes_by_snapshot_id] + ).should.equal(set([volume4.id])) volumes_by_status = conn.get_all_volumes(filters={'status': 'in-use'}) - set([vol.id for vol in volumes_by_status]).should.equal(set([block_mapping.volume_id])) + set([vol.id for vol in volumes_by_status]).should.equal( + set([block_mapping.volume_id])) volumes_by_id = conn.get_all_volumes(filters={'volume-id': volume1.id}) set([vol.id for vol in volumes_by_id]).should.equal(set([volume1.id])) @@ -138,13 +153,17 @@ def test_volume_filters(): volumes_by_tag_key = conn.get_all_volumes(filters={'tag-key': 'testkey1'}) set([vol.id for vol in volumes_by_tag_key]).should.equal(set([volume1.id])) - volumes_by_tag_value = conn.get_all_volumes(filters={'tag-value': 'testvalue1'}) - set([vol.id for vol in volumes_by_tag_value]).should.equal(set([volume1.id])) + volumes_by_tag_value = conn.get_all_volumes( + filters={'tag-value': 'testvalue1'}) + set([vol.id for vol in volumes_by_tag_value] + ).should.equal(set([volume1.id])) - volumes_by_tag = conn.get_all_volumes(filters={'tag:testkey1': 'testvalue1'}) + volumes_by_tag = conn.get_all_volumes( + filters={'tag:testkey1': 'testvalue1'}) set([vol.id for vol in volumes_by_tag]).should.equal(set([volume1.id])) - volumes_by_unencrypted = conn.get_all_volumes(filters={'encrypted': 'false'}) + volumes_by_unencrypted = conn.get_all_volumes( + filters={'encrypted': 'false'}) set([vol.id for vol in volumes_by_unencrypted]).should.equal( set([block_mapping.volume_id, volume2.id]) ) @@ -169,7 +188,8 @@ def test_volume_attach_and_detach(): volume.attach(instance.id, "/dev/sdh", dry_run=True) ex.exception.error_code.should.equal('DryRunOperation') ex.exception.status.should.equal(400) - ex.exception.message.should.equal('An error occurred (DryRunOperation) when calling the AttachVolume operation: Request would have succeeded, but DryRun flag is set') + ex.exception.message.should.equal( + 'An error occurred (DryRunOperation) when calling the AttachVolume operation: Request would have succeeded, but DryRun flag is set') volume.attach(instance.id, "/dev/sdh") @@ -183,7 +203,8 @@ def test_volume_attach_and_detach(): volume.detach(dry_run=True) ex.exception.error_code.should.equal('DryRunOperation') ex.exception.status.should.equal(400) - ex.exception.message.should.equal('An error occurred (DryRunOperation) when calling the DetachVolume operation: Request would have succeeded, but DryRun flag is set') + ex.exception.message.should.equal( + 'An error occurred (DryRunOperation) when calling the DetachVolume operation: Request would have succeeded, but DryRun flag is set') volume.detach() @@ -218,7 +239,8 @@ def test_create_snapshot(): snapshot = volume.create_snapshot('a dryrun snapshot', dry_run=True) ex.exception.error_code.should.equal('DryRunOperation') ex.exception.status.should.equal(400) - ex.exception.message.should.equal('An error occurred (DryRunOperation) when calling the CreateSnapshot operation: Request would have succeeded, but DryRun flag is set') + ex.exception.message.should.equal( + 'An error occurred (DryRunOperation) when calling the CreateSnapshot operation: Request would have succeeded, but DryRun flag is set') snapshot = volume.create_snapshot('a test snapshot') snapshot.update() @@ -294,32 +316,50 @@ def test_snapshot_filters(): conn.create_tags([snapshot1.id], {'testkey1': 'testvalue1'}) conn.create_tags([snapshot2.id], {'testkey2': 'testvalue2'}) - snapshots_by_description = conn.get_all_snapshots(filters={'description': 'testsnapshot1'}) - set([snap.id for snap in snapshots_by_description]).should.equal(set([snapshot1.id])) + snapshots_by_description = conn.get_all_snapshots( + filters={'description': 'testsnapshot1'}) + set([snap.id for snap in snapshots_by_description] + ).should.equal(set([snapshot1.id])) - snapshots_by_id = conn.get_all_snapshots(filters={'snapshot-id': snapshot1.id}) - set([snap.id for snap in snapshots_by_id]).should.equal(set([snapshot1.id])) + snapshots_by_id = conn.get_all_snapshots( + filters={'snapshot-id': snapshot1.id}) + set([snap.id for snap in snapshots_by_id] + ).should.equal(set([snapshot1.id])) - snapshots_by_start_time = conn.get_all_snapshots(filters={'start-time': snapshot1.start_time}) - set([snap.start_time for snap in snapshots_by_start_time]).should.equal(set([snapshot1.start_time])) + snapshots_by_start_time = conn.get_all_snapshots( + filters={'start-time': snapshot1.start_time}) + set([snap.start_time for snap in snapshots_by_start_time] + ).should.equal(set([snapshot1.start_time])) - snapshots_by_volume_id = conn.get_all_snapshots(filters={'volume-id': volume1.id}) - set([snap.id for snap in snapshots_by_volume_id]).should.equal(set([snapshot1.id, snapshot2.id])) + snapshots_by_volume_id = conn.get_all_snapshots( + filters={'volume-id': volume1.id}) + set([snap.id for snap in snapshots_by_volume_id] + ).should.equal(set([snapshot1.id, snapshot2.id])) - snapshots_by_volume_size = conn.get_all_snapshots(filters={'volume-size': volume1.size}) - set([snap.id for snap in snapshots_by_volume_size]).should.equal(set([snapshot1.id, snapshot2.id])) + snapshots_by_volume_size = conn.get_all_snapshots( + filters={'volume-size': volume1.size}) + set([snap.id for snap in snapshots_by_volume_size] + ).should.equal(set([snapshot1.id, snapshot2.id])) - snapshots_by_tag_key = conn.get_all_snapshots(filters={'tag-key': 'testkey1'}) - set([snap.id for snap in snapshots_by_tag_key]).should.equal(set([snapshot1.id])) + snapshots_by_tag_key = conn.get_all_snapshots( + filters={'tag-key': 'testkey1'}) + set([snap.id for snap in snapshots_by_tag_key] + ).should.equal(set([snapshot1.id])) - snapshots_by_tag_value = conn.get_all_snapshots(filters={'tag-value': 'testvalue1'}) - set([snap.id for snap in snapshots_by_tag_value]).should.equal(set([snapshot1.id])) + snapshots_by_tag_value = conn.get_all_snapshots( + filters={'tag-value': 'testvalue1'}) + set([snap.id for snap in snapshots_by_tag_value] + ).should.equal(set([snapshot1.id])) - snapshots_by_tag = conn.get_all_snapshots(filters={'tag:testkey1': 'testvalue1'}) - set([snap.id for snap in snapshots_by_tag]).should.equal(set([snapshot1.id])) + snapshots_by_tag = conn.get_all_snapshots( + filters={'tag:testkey1': 'testvalue1'}) + set([snap.id for snap in snapshots_by_tag] + ).should.equal(set([snapshot1.id])) - snapshots_by_encrypted = conn.get_all_snapshots(filters={'encrypted': 'true'}) - set([snap.id for snap in snapshots_by_encrypted]).should.equal(set([snapshot3.id])) + snapshots_by_encrypted = conn.get_all_snapshots( + filters={'encrypted': 'true'}) + set([snap.id for snap in snapshots_by_encrypted] + ).should.equal(set([snapshot3.id])) @mock_ec2_deprecated @@ -331,7 +371,8 @@ def test_snapshot_attribute(): snapshot = volume.create_snapshot() # Baseline - attributes = conn.get_snapshot_attribute(snapshot.id, attribute='createVolumePermission') + attributes = conn.get_snapshot_attribute( + snapshot.id, attribute='createVolumePermission') attributes.name.should.equal('create_volume_permission') attributes.attrs.should.have.length_of(0) @@ -348,34 +389,42 @@ def test_snapshot_attribute(): # Add 'all' group and confirm with assert_raises(EC2ResponseError) as ex: - conn.modify_snapshot_attribute(**dict(ADD_GROUP_ARGS, **{'dry_run': True})) + conn.modify_snapshot_attribute( + **dict(ADD_GROUP_ARGS, **{'dry_run': True})) ex.exception.error_code.should.equal('DryRunOperation') ex.exception.status.should.equal(400) - ex.exception.message.should.equal('An error occurred (DryRunOperation) when calling the ModifySnapshotAttribute operation: Request would have succeeded, but DryRun flag is set') + ex.exception.message.should.equal( + 'An error occurred (DryRunOperation) when calling the ModifySnapshotAttribute operation: Request would have succeeded, but DryRun flag is set') conn.modify_snapshot_attribute(**ADD_GROUP_ARGS) - attributes = conn.get_snapshot_attribute(snapshot.id, attribute='createVolumePermission') + attributes = conn.get_snapshot_attribute( + snapshot.id, attribute='createVolumePermission') attributes.attrs['groups'].should.have.length_of(1) attributes.attrs['groups'].should.equal(['all']) # Add is idempotent - conn.modify_snapshot_attribute.when.called_with(**ADD_GROUP_ARGS).should_not.throw(EC2ResponseError) + conn.modify_snapshot_attribute.when.called_with( + **ADD_GROUP_ARGS).should_not.throw(EC2ResponseError) # Remove 'all' group and confirm with assert_raises(EC2ResponseError) as ex: - conn.modify_snapshot_attribute(**dict(REMOVE_GROUP_ARGS, **{'dry_run': True})) + conn.modify_snapshot_attribute( + **dict(REMOVE_GROUP_ARGS, **{'dry_run': True})) ex.exception.error_code.should.equal('DryRunOperation') ex.exception.status.should.equal(400) - ex.exception.message.should.equal('An error occurred (DryRunOperation) when calling the ModifySnapshotAttribute operation: Request would have succeeded, but DryRun flag is set') + ex.exception.message.should.equal( + 'An error occurred (DryRunOperation) when calling the ModifySnapshotAttribute operation: Request would have succeeded, but DryRun flag is set') conn.modify_snapshot_attribute(**REMOVE_GROUP_ARGS) - attributes = conn.get_snapshot_attribute(snapshot.id, attribute='createVolumePermission') + attributes = conn.get_snapshot_attribute( + snapshot.id, attribute='createVolumePermission') attributes.attrs.should.have.length_of(0) # Remove is idempotent - conn.modify_snapshot_attribute.when.called_with(**REMOVE_GROUP_ARGS).should_not.throw(EC2ResponseError) + conn.modify_snapshot_attribute.when.called_with( + **REMOVE_GROUP_ARGS).should_not.throw(EC2ResponseError) # Error: Add with group != 'all' with assert_raises(EC2ResponseError) as cm: @@ -428,7 +477,8 @@ def test_create_volume_from_snapshot(): snapshot = volume.create_snapshot('a test snapshot', dry_run=True) ex.exception.error_code.should.equal('DryRunOperation') ex.exception.status.should.equal(400) - ex.exception.message.should.equal('An error occurred (DryRunOperation) when calling the CreateSnapshot operation: Request would have succeeded, but DryRun flag is set') + ex.exception.message.should.equal( + 'An error occurred (DryRunOperation) when calling the CreateSnapshot operation: Request would have succeeded, but DryRun flag is set') snapshot = volume.create_snapshot('a test snapshot') snapshot.update() @@ -469,16 +519,19 @@ def test_modify_attribute_blockDeviceMapping(): instance = reservation.instances[0] with assert_raises(EC2ResponseError) as ex: - instance.modify_attribute('blockDeviceMapping', {'/dev/sda1': True}, dry_run=True) + instance.modify_attribute('blockDeviceMapping', { + '/dev/sda1': True}, dry_run=True) ex.exception.error_code.should.equal('DryRunOperation') ex.exception.status.should.equal(400) - ex.exception.message.should.equal('An error occurred (DryRunOperation) when calling the ModifyInstanceAttribute operation: Request would have succeeded, but DryRun flag is set') + ex.exception.message.should.equal( + 'An error occurred (DryRunOperation) when calling the ModifyInstanceAttribute operation: Request would have succeeded, but DryRun flag is set') instance.modify_attribute('blockDeviceMapping', {'/dev/sda1': True}) instance = ec2_backends[conn.region.name].get_instance(instance.id) instance.block_device_mapping.should.have.key('/dev/sda1') - instance.block_device_mapping['/dev/sda1'].delete_on_termination.should.be(True) + instance.block_device_mapping[ + '/dev/sda1'].delete_on_termination.should.be(True) @mock_ec2_deprecated @@ -491,8 +544,10 @@ def test_volume_tag_escaping(): snapshot.add_tags({'key': ''}, dry_run=True) ex.exception.error_code.should.equal('DryRunOperation') ex.exception.status.should.equal(400) - ex.exception.message.should.equal('An error occurred (DryRunOperation) when calling the CreateTags operation: Request would have succeeded, but DryRun flag is set') - dict(conn.get_all_snapshots()[0].tags).should_not.be.equal({'key': ''}) + ex.exception.message.should.equal( + 'An error occurred (DryRunOperation) when calling the CreateTags operation: Request would have succeeded, but DryRun flag is set') + dict(conn.get_all_snapshots()[0].tags).should_not.be.equal( + {'key': ''}) snapshot.add_tags({'key': ''}) diff --git a/tests/test_ec2/test_elastic_ip_addresses.py b/tests/test_ec2/test_elastic_ip_addresses.py index f92c4df8b..2e1ae189a 100644 --- a/tests/test_ec2/test_elastic_ip_addresses.py +++ b/tests/test_ec2/test_elastic_ip_addresses.py @@ -24,7 +24,8 @@ def test_eip_allocate_classic(): standard = conn.allocate_address(dry_run=True) ex.exception.error_code.should.equal('DryRunOperation') ex.exception.status.should.equal(400) - ex.exception.message.should.equal('An error occurred (DryRunOperation) when calling the AllocateAddress operation: Request would have succeeded, but DryRun flag is set') + ex.exception.message.should.equal( + 'An error occurred (DryRunOperation) when calling the AllocateAddress operation: Request would have succeeded, but DryRun flag is set') standard = conn.allocate_address() standard.should.be.a(boto.ec2.address.Address) @@ -36,7 +37,8 @@ def test_eip_allocate_classic(): standard.release(dry_run=True) ex.exception.error_code.should.equal('DryRunOperation') ex.exception.status.should.equal(400) - ex.exception.message.should.equal('An error occurred (DryRunOperation) when calling the ReleaseAddress operation: Request would have succeeded, but DryRun flag is set') + ex.exception.message.should.equal( + 'An error occurred (DryRunOperation) when calling the ReleaseAddress operation: Request would have succeeded, but DryRun flag is set') standard.release() standard.should_not.be.within(conn.get_all_addresses()) @@ -51,7 +53,8 @@ def test_eip_allocate_vpc(): vpc = conn.allocate_address(domain="vpc", dry_run=True) ex.exception.error_code.should.equal('DryRunOperation') ex.exception.status.should.equal(400) - ex.exception.message.should.equal('An error occurred (DryRunOperation) when calling the AllocateAddress operation: Request would have succeeded, but DryRun flag is set') + ex.exception.message.should.equal( + 'An error occurred (DryRunOperation) when calling the AllocateAddress operation: Request would have succeeded, but DryRun flag is set') vpc = conn.allocate_address(domain="vpc") vpc.should.be.a(boto.ec2.address.Address) @@ -90,23 +93,28 @@ def test_eip_associate_classic(): cm.exception.request_id.should_not.be.none with assert_raises(EC2ResponseError) as ex: - conn.associate_address(instance_id=instance.id, public_ip=eip.public_ip, dry_run=True) + conn.associate_address(instance_id=instance.id, + public_ip=eip.public_ip, dry_run=True) ex.exception.error_code.should.equal('DryRunOperation') ex.exception.status.should.equal(400) - ex.exception.message.should.equal('An error occurred (DryRunOperation) when calling the AssociateAddress operation: Request would have succeeded, but DryRun flag is set') + ex.exception.message.should.equal( + 'An error occurred (DryRunOperation) when calling the AssociateAddress operation: Request would have succeeded, but DryRun flag is set') conn.associate_address(instance_id=instance.id, public_ip=eip.public_ip) - eip = conn.get_all_addresses(addresses=[eip.public_ip])[0] # no .update() on address ): + # no .update() on address ): + eip = conn.get_all_addresses(addresses=[eip.public_ip])[0] eip.instance_id.should.be.equal(instance.id) with assert_raises(EC2ResponseError) as ex: conn.disassociate_address(public_ip=eip.public_ip, dry_run=True) ex.exception.error_code.should.equal('DryRunOperation') ex.exception.status.should.equal(400) - ex.exception.message.should.equal('An error occurred (DryRunOperation) when calling the DisAssociateAddress operation: Request would have succeeded, but DryRun flag is set') + ex.exception.message.should.equal( + 'An error occurred (DryRunOperation) when calling the DisAssociateAddress operation: Request would have succeeded, but DryRun flag is set') conn.disassociate_address(public_ip=eip.public_ip) - eip = conn.get_all_addresses(addresses=[eip.public_ip])[0] # no .update() on address ): + # no .update() on address ): + eip = conn.get_all_addresses(addresses=[eip.public_ip])[0] eip.instance_id.should.be.equal(u'') eip.release() eip.should_not.be.within(conn.get_all_addresses()) @@ -114,6 +122,7 @@ def test_eip_associate_classic(): instance.terminate() + @mock_ec2_deprecated def test_eip_associate_vpc(): """Associate/Disassociate EIP to VPC instance""" @@ -131,11 +140,14 @@ def test_eip_associate_vpc(): cm.exception.status.should.equal(400) cm.exception.request_id.should_not.be.none - conn.associate_address(instance_id=instance.id, allocation_id=eip.allocation_id) - eip = conn.get_all_addresses(addresses=[eip.public_ip])[0] # no .update() on address ): + conn.associate_address(instance_id=instance.id, + allocation_id=eip.allocation_id) + # no .update() on address ): + eip = conn.get_all_addresses(addresses=[eip.public_ip])[0] eip.instance_id.should.be.equal(instance.id) conn.disassociate_address(association_id=eip.association_id) - eip = conn.get_all_addresses(addresses=[eip.public_ip])[0] # no .update() on address ): + # no .update() on address ): + eip = conn.get_all_addresses(addresses=[eip.public_ip])[0] eip.instance_id.should.be.equal(u'') eip.association_id.should.be.none @@ -143,13 +155,15 @@ def test_eip_associate_vpc(): eip.release(dry_run=True) ex.exception.error_code.should.equal('DryRunOperation') ex.exception.status.should.equal(400) - ex.exception.message.should.equal('An error occurred (DryRunOperation) when calling the ReleaseAddress operation: Request would have succeeded, but DryRun flag is set') + ex.exception.message.should.equal( + 'An error occurred (DryRunOperation) when calling the ReleaseAddress operation: Request would have succeeded, but DryRun flag is set') eip.release() eip = None instance.terminate() + @mock_ec2 def test_eip_boto3_vpc_association(): """Associate EIP to VPC instance in a new subnet with boto3""" @@ -157,7 +171,7 @@ def test_eip_boto3_vpc_association(): client = boto3.client('ec2', region_name='us-west-1') vpc_res = client.create_vpc(CidrBlock='10.0.0.0/24') subnet_res = client.create_subnet( - VpcId=vpc_res['Vpc']['VpcId'], CidrBlock='10.0.0.0/24') + VpcId=vpc_res['Vpc']['VpcId'], CidrBlock='10.0.0.0/24') instance = service.create_instances(**{ 'InstanceType': 't2.micro', 'ImageId': 'ami-test', @@ -192,17 +206,21 @@ def test_eip_associate_network_interface(): cm.exception.status.should.equal(400) cm.exception.request_id.should_not.be.none - conn.associate_address(network_interface_id=eni.id, allocation_id=eip.allocation_id) - eip = conn.get_all_addresses(addresses=[eip.public_ip])[0] # no .update() on address ): + conn.associate_address(network_interface_id=eni.id, + allocation_id=eip.allocation_id) + # no .update() on address ): + eip = conn.get_all_addresses(addresses=[eip.public_ip])[0] eip.network_interface_id.should.be.equal(eni.id) conn.disassociate_address(association_id=eip.association_id) - eip = conn.get_all_addresses(addresses=[eip.public_ip])[0] # no .update() on address ): + # no .update() on address ): + eip = conn.get_all_addresses(addresses=[eip.public_ip])[0] eip.network_interface_id.should.be.equal(u'') eip.association_id.should.be.none eip.release() eip = None + @mock_ec2_deprecated def test_eip_reassociate(): """reassociate EIP""" @@ -219,12 +237,14 @@ def test_eip_reassociate(): # Different ID detects resource association with assert_raises(EC2ResponseError) as cm: - conn.associate_address(instance_id=instance2.id, public_ip=eip.public_ip, allow_reassociation=False) + conn.associate_address( + instance_id=instance2.id, public_ip=eip.public_ip, allow_reassociation=False) cm.exception.code.should.equal('Resource.AlreadyAssociated') cm.exception.status.should.equal(400) cm.exception.request_id.should_not.be.none - conn.associate_address.when.called_with(instance_id=instance2.id, public_ip=eip.public_ip, allow_reassociation=True).should_not.throw(EC2ResponseError) + conn.associate_address.when.called_with( + instance_id=instance2.id, public_ip=eip.public_ip, allow_reassociation=True).should_not.throw(EC2ResponseError) eip.release() eip = None @@ -232,6 +252,7 @@ def test_eip_reassociate(): instance1.terminate() instance2.terminate() + @mock_ec2_deprecated def test_eip_reassociate_nic(): """reassociate EIP""" @@ -243,23 +264,28 @@ def test_eip_reassociate_nic(): eni2 = conn.create_network_interface(subnet.id) eip = conn.allocate_address() - conn.associate_address(network_interface_id=eni1.id, public_ip=eip.public_ip) + conn.associate_address(network_interface_id=eni1.id, + public_ip=eip.public_ip) # Same ID is idempotent - conn.associate_address(network_interface_id=eni1.id, public_ip=eip.public_ip) + conn.associate_address(network_interface_id=eni1.id, + public_ip=eip.public_ip) # Different ID detects resource association with assert_raises(EC2ResponseError) as cm: - conn.associate_address(network_interface_id=eni2.id, public_ip=eip.public_ip) + conn.associate_address( + network_interface_id=eni2.id, public_ip=eip.public_ip) cm.exception.code.should.equal('Resource.AlreadyAssociated') cm.exception.status.should.equal(400) cm.exception.request_id.should_not.be.none - conn.associate_address.when.called_with(network_interface_id=eni2.id, public_ip=eip.public_ip, allow_reassociation=True).should_not.throw(EC2ResponseError) + conn.associate_address.when.called_with( + network_interface_id=eni2.id, public_ip=eip.public_ip, allow_reassociation=True).should_not.throw(EC2ResponseError) eip.release() eip = None + @mock_ec2_deprecated def test_eip_associate_invalid_args(): """Associate EIP, invalid args """ @@ -290,6 +316,7 @@ def test_eip_disassociate_bogus_association(): cm.exception.status.should.equal(400) cm.exception.request_id.should_not.be.none + @mock_ec2_deprecated def test_eip_release_bogus_eip(): """Release bogus EIP""" @@ -334,7 +361,7 @@ def test_eip_describe(): number_of_classic_ips = 2 number_of_vpc_ips = 2 - #allocate some IPs + # allocate some IPs for _ in range(number_of_classic_ips): eips.append(conn.allocate_address()) for _ in range(number_of_vpc_ips): @@ -344,19 +371,22 @@ def test_eip_describe(): # Can we find each one individually? for eip in eips: if eip.allocation_id: - lookup_addresses = conn.get_all_addresses(allocation_ids=[eip.allocation_id]) + lookup_addresses = conn.get_all_addresses( + allocation_ids=[eip.allocation_id]) else: - lookup_addresses = conn.get_all_addresses(addresses=[eip.public_ip]) + lookup_addresses = conn.get_all_addresses( + addresses=[eip.public_ip]) len(lookup_addresses).should.be.equal(1) lookup_addresses[0].public_ip.should.be.equal(eip.public_ip) # Can we find first two when we search for them? - lookup_addresses = conn.get_all_addresses(addresses=[eips[0].public_ip, eips[1].public_ip]) + lookup_addresses = conn.get_all_addresses( + addresses=[eips[0].public_ip, eips[1].public_ip]) len(lookup_addresses).should.be.equal(2) lookup_addresses[0].public_ip.should.be.equal(eips[0].public_ip) lookup_addresses[1].public_ip.should.be.equal(eips[1].public_ip) - #Release all IPs + # Release all IPs for eip in eips: eip.release() len(conn.get_all_addresses()).should.be.equal(0) @@ -372,4 +402,3 @@ def test_eip_describe_none(): cm.exception.code.should.equal('InvalidAddress.NotFound') cm.exception.status.should.equal(400) cm.exception.request_id.should_not.be.none - diff --git a/tests/test_ec2/test_elastic_network_interfaces.py b/tests/test_ec2/test_elastic_network_interfaces.py index 9027e0448..4ec23b919 100644 --- a/tests/test_ec2/test_elastic_network_interfaces.py +++ b/tests/test_ec2/test_elastic_network_interfaces.py @@ -27,7 +27,8 @@ def test_elastic_network_interfaces(): eni = conn.create_network_interface(subnet.id, dry_run=True) ex.exception.error_code.should.equal('DryRunOperation') ex.exception.status.should.equal(400) - ex.exception.message.should.equal('An error occurred (DryRunOperation) when calling the CreateNetworkInterface operation: Request would have succeeded, but DryRun flag is set') + ex.exception.message.should.equal( + 'An error occurred (DryRunOperation) when calling the CreateNetworkInterface operation: Request would have succeeded, but DryRun flag is set') eni = conn.create_network_interface(subnet.id) @@ -41,7 +42,8 @@ def test_elastic_network_interfaces(): conn.delete_network_interface(eni.id, dry_run=True) ex.exception.error_code.should.equal('DryRunOperation') ex.exception.status.should.equal(400) - ex.exception.message.should.equal('An error occurred (DryRunOperation) when calling the DeleteNetworkInterface operation: Request would have succeeded, but DryRun flag is set') + ex.exception.message.should.equal( + 'An error occurred (DryRunOperation) when calling the DeleteNetworkInterface operation: Request would have succeeded, but DryRun flag is set') conn.delete_network_interface(eni.id) @@ -89,16 +91,20 @@ def test_elastic_network_interfaces_with_groups(): conn = boto.connect_vpc('the_key', 'the_secret') vpc = conn.create_vpc("10.0.0.0/16") subnet = conn.create_subnet(vpc.id, "10.0.0.0/18") - security_group1 = conn.create_security_group('test security group #1', 'this is a test security group') - security_group2 = conn.create_security_group('test security group #2', 'this is a test security group') - conn.create_network_interface(subnet.id, groups=[security_group1.id, security_group2.id]) + security_group1 = conn.create_security_group( + 'test security group #1', 'this is a test security group') + security_group2 = conn.create_security_group( + 'test security group #2', 'this is a test security group') + conn.create_network_interface( + subnet.id, groups=[security_group1.id, security_group2.id]) all_enis = conn.get_all_network_interfaces() all_enis.should.have.length_of(1) eni = all_enis[0] eni.groups.should.have.length_of(2) - set([group.id for group in eni.groups]).should.equal(set([security_group1.id, security_group2.id])) + set([group.id for group in eni.groups]).should.equal( + set([security_group1.id, security_group2.id])) @requires_boto_gte("2.12.0") @@ -107,8 +113,10 @@ def test_elastic_network_interfaces_modify_attribute(): conn = boto.connect_vpc('the_key', 'the_secret') vpc = conn.create_vpc("10.0.0.0/16") subnet = conn.create_subnet(vpc.id, "10.0.0.0/18") - security_group1 = conn.create_security_group('test security group #1', 'this is a test security group') - security_group2 = conn.create_security_group('test security group #2', 'this is a test security group') + security_group1 = conn.create_security_group( + 'test security group #1', 'this is a test security group') + security_group2 = conn.create_security_group( + 'test security group #2', 'this is a test security group') conn.create_network_interface(subnet.id, groups=[security_group1.id]) all_enis = conn.get_all_network_interfaces() @@ -119,12 +127,15 @@ def test_elastic_network_interfaces_modify_attribute(): eni.groups[0].id.should.equal(security_group1.id) with assert_raises(EC2ResponseError) as ex: - conn.modify_network_interface_attribute(eni.id, 'groupset', [security_group2.id], dry_run=True) + conn.modify_network_interface_attribute( + eni.id, 'groupset', [security_group2.id], dry_run=True) ex.exception.error_code.should.equal('DryRunOperation') ex.exception.status.should.equal(400) - ex.exception.message.should.equal('An error occurred (DryRunOperation) when calling the ModifyNetworkInterface operation: Request would have succeeded, but DryRun flag is set') + ex.exception.message.should.equal( + 'An error occurred (DryRunOperation) when calling the ModifyNetworkInterface operation: Request would have succeeded, but DryRun flag is set') - conn.modify_network_interface_attribute(eni.id, 'groupset', [security_group2.id]) + conn.modify_network_interface_attribute( + eni.id, 'groupset', [security_group2.id]) all_enis = conn.get_all_network_interfaces() all_enis.should.have.length_of(1) @@ -140,11 +151,15 @@ def test_elastic_network_interfaces_filtering(): vpc = conn.create_vpc("10.0.0.0/16") subnet = conn.create_subnet(vpc.id, "10.0.0.0/18") - security_group1 = conn.create_security_group('test security group #1', 'this is a test security group') - security_group2 = conn.create_security_group('test security group #2', 'this is a test security group') + security_group1 = conn.create_security_group( + 'test security group #1', 'this is a test security group') + security_group2 = conn.create_security_group( + 'test security group #2', 'this is a test security group') - eni1 = conn.create_network_interface(subnet.id, groups=[security_group1.id, security_group2.id]) - eni2 = conn.create_network_interface(subnet.id, groups=[security_group1.id]) + eni1 = conn.create_network_interface( + subnet.id, groups=[security_group1.id, security_group2.id]) + eni2 = conn.create_network_interface( + subnet.id, groups=[security_group1.id]) eni3 = conn.create_network_interface(subnet.id) all_enis = conn.get_all_network_interfaces() @@ -156,22 +171,26 @@ def test_elastic_network_interfaces_filtering(): set([eni.id for eni in enis_by_id]).should.equal(set([eni1.id])) # Filter by ENI ID - enis_by_id = conn.get_all_network_interfaces(filters={'network-interface-id': eni1.id}) + enis_by_id = conn.get_all_network_interfaces( + filters={'network-interface-id': eni1.id}) enis_by_id.should.have.length_of(1) set([eni.id for eni in enis_by_id]).should.equal(set([eni1.id])) # Filter by Security Group - enis_by_group = conn.get_all_network_interfaces(filters={'group-id': security_group1.id}) + enis_by_group = conn.get_all_network_interfaces( + filters={'group-id': security_group1.id}) enis_by_group.should.have.length_of(2) set([eni.id for eni in enis_by_group]).should.equal(set([eni1.id, eni2.id])) # Filter by ENI ID and Security Group - enis_by_group = conn.get_all_network_interfaces(filters={'network-interface-id': eni1.id, 'group-id': security_group1.id}) + enis_by_group = conn.get_all_network_interfaces( + filters={'network-interface-id': eni1.id, 'group-id': security_group1.id}) enis_by_group.should.have.length_of(1) set([eni.id for eni in enis_by_group]).should.equal(set([eni1.id])) # Unsupported filter - conn.get_all_network_interfaces.when.called_with(filters={'not-implemented-filter': 'foobar'}).should.throw(NotImplementedError) + conn.get_all_network_interfaces.when.called_with( + filters={'not-implemented-filter': 'foobar'}).should.throw(NotImplementedError) @mock_ec2 @@ -180,15 +199,19 @@ def test_elastic_network_interfaces_get_by_tag_name(): ec2_client = boto3.client('ec2', region_name='us-west-2') vpc = ec2.create_vpc(CidrBlock='10.0.0.0/16') - subnet = ec2.create_subnet(VpcId=vpc.id, CidrBlock='10.0.0.0/24', AvailabilityZone='us-west-2a') + subnet = ec2.create_subnet( + VpcId=vpc.id, CidrBlock='10.0.0.0/24', AvailabilityZone='us-west-2a') - eni1 = ec2.create_network_interface(SubnetId=subnet.id, PrivateIpAddress='10.0.10.5') + eni1 = ec2.create_network_interface( + SubnetId=subnet.id, PrivateIpAddress='10.0.10.5') with assert_raises(ClientError) as ex: eni1.create_tags(Tags=[{'Key': 'Name', 'Value': 'eni1'}], DryRun=True) ex.exception.response['Error']['Code'].should.equal('DryRunOperation') - ex.exception.response['ResponseMetadata']['HTTPStatusCode'].should.equal(400) - ex.exception.response['Error']['Message'].should.equal('An error occurred (DryRunOperation) when calling the CreateTags operation: Request would have succeeded, but DryRun flag is set') + ex.exception.response['ResponseMetadata'][ + 'HTTPStatusCode'].should.equal(400) + ex.exception.response['Error']['Message'].should.equal( + 'An error occurred (DryRunOperation) when calling the CreateTags operation: Request would have succeeded, but DryRun flag is set') eni1.create_tags(Tags=[{'Key': 'Name', 'Value': 'eni1'}]) @@ -211,9 +234,11 @@ def test_elastic_network_interfaces_get_by_private_ip(): ec2_client = boto3.client('ec2', region_name='us-west-2') vpc = ec2.create_vpc(CidrBlock='10.0.0.0/16') - subnet = ec2.create_subnet(VpcId=vpc.id, CidrBlock='10.0.0.0/24', AvailabilityZone='us-west-2a') + subnet = ec2.create_subnet( + VpcId=vpc.id, CidrBlock='10.0.0.0/24', AvailabilityZone='us-west-2a') - eni1 = ec2.create_network_interface(SubnetId=subnet.id, PrivateIpAddress='10.0.10.5') + eni1 = ec2.create_network_interface( + SubnetId=subnet.id, PrivateIpAddress='10.0.10.5') # The status of the new interface should be 'available' waiter = ec2_client.get_waiter('network_interface_available') @@ -242,9 +267,11 @@ def test_elastic_network_interfaces_get_by_vpc_id(): ec2_client = boto3.client('ec2', region_name='us-west-2') vpc = ec2.create_vpc(CidrBlock='10.0.0.0/16') - subnet = ec2.create_subnet(VpcId=vpc.id, CidrBlock='10.0.0.0/24', AvailabilityZone='us-west-2a') + subnet = ec2.create_subnet( + VpcId=vpc.id, CidrBlock='10.0.0.0/24', AvailabilityZone='us-west-2a') - eni1 = ec2.create_network_interface(SubnetId=subnet.id, PrivateIpAddress='10.0.10.5') + eni1 = ec2.create_network_interface( + SubnetId=subnet.id, PrivateIpAddress='10.0.10.5') # The status of the new interface should be 'available' waiter = ec2_client.get_waiter('network_interface_available') @@ -265,9 +292,11 @@ def test_elastic_network_interfaces_get_by_subnet_id(): ec2_client = boto3.client('ec2', region_name='us-west-2') vpc = ec2.create_vpc(CidrBlock='10.0.0.0/16') - subnet = ec2.create_subnet(VpcId=vpc.id, CidrBlock='10.0.0.0/24', AvailabilityZone='us-west-2a') + subnet = ec2.create_subnet( + VpcId=vpc.id, CidrBlock='10.0.0.0/24', AvailabilityZone='us-west-2a') - eni1 = ec2.create_network_interface(SubnetId=subnet.id, PrivateIpAddress='10.0.10.5') + eni1 = ec2.create_network_interface( + SubnetId=subnet.id, PrivateIpAddress='10.0.10.5') # The status of the new interface should be 'available' waiter = ec2_client.get_waiter('network_interface_available') @@ -297,5 +326,6 @@ def test_elastic_network_interfaces_cloudformation(): stack = conn.describe_stacks()[0] resources = stack.describe_resources() - cfn_eni = [resource for resource in resources if resource.resource_type == 'AWS::EC2::NetworkInterface'][0] + cfn_eni = [resource for resource in resources if resource.resource_type == + 'AWS::EC2::NetworkInterface'][0] cfn_eni.physical_resource_id.should.equal(eni.id) diff --git a/tests/test_ec2/test_instances.py b/tests/test_ec2/test_instances.py index b6601e87f..49020555b 100644 --- a/tests/test_ec2/test_instances.py +++ b/tests/test_ec2/test_instances.py @@ -45,7 +45,8 @@ def test_instance_launch_and_terminate(): reservation = conn.run_instances('ami-1234abcd', dry_run=True) ex.exception.error_code.should.equal('DryRunOperation') ex.exception.status.should.equal(400) - ex.exception.message.should.equal('An error occurred (DryRunOperation) when calling the RunInstance operation: Request would have succeeded, but DryRun flag is set') + ex.exception.message.should.equal( + 'An error occurred (DryRunOperation) when calling the RunInstance operation: Request would have succeeded, but DryRun flag is set') reservation = conn.run_instances('ami-1234abcd') reservation.should.be.a(Reservation) @@ -66,7 +67,8 @@ def test_instance_launch_and_terminate(): instance.placement.should.equal('us-east-1a') root_device_name = instance.root_device_name - instance.block_device_mapping[root_device_name].status.should.equal('in-use') + instance.block_device_mapping[ + root_device_name].status.should.equal('in-use') volume_id = instance.block_device_mapping[root_device_name].volume_id volume_id.should.match(r'vol-\w+') @@ -78,7 +80,8 @@ def test_instance_launch_and_terminate(): conn.terminate_instances([instance.id], dry_run=True) ex.exception.error_code.should.equal('DryRunOperation') ex.exception.status.should.equal(400) - ex.exception.message.should.equal('An error occurred (DryRunOperation) when calling the TerminateInstance operation: Request would have succeeded, but DryRun flag is set') + ex.exception.message.should.equal( + 'An error occurred (DryRunOperation) when calling the TerminateInstance operation: Request would have succeeded, but DryRun flag is set') conn.terminate_instances([instance.id]) @@ -90,7 +93,8 @@ def test_instance_launch_and_terminate(): @mock_ec2_deprecated def test_terminate_empty_instances(): conn = boto.connect_ec2('the_key', 'the_secret') - conn.terminate_instances.when.called_with([]).should.throw(EC2ResponseError) + conn.terminate_instances.when.called_with( + []).should.throw(EC2ResponseError) @freeze_time("2014-01-01 05:00:00") @@ -117,8 +121,10 @@ def test_instance_attach_volume(): for v in conn.get_all_volumes(volume_ids=[instance.block_device_mapping['/dev/sdc1'].volume_id]): v.attach_data.instance_id.should.equal(instance.id) - v.attach_data.attach_time.should.equal(instance.launch_time) # can do due to freeze_time decorator. - v.create_time.should.equal(instance.launch_time) # can do due to freeze_time decorator. + # can do due to freeze_time decorator. + v.attach_data.attach_time.should.equal(instance.launch_time) + # can do due to freeze_time decorator. + v.create_time.should.equal(instance.launch_time) v.region.name.should.equal(instance.region.name) v.status.should.equal('in-use') @@ -135,7 +141,8 @@ def test_get_instances_by_id(): reservation.instances.should.have.length_of(1) reservation.instances[0].id.should.equal(instance1.id) - reservations = conn.get_all_instances(instance_ids=[instance1.id, instance2.id]) + reservations = conn.get_all_instances( + instance_ids=[instance1.id, instance2.id]) reservations.should.have.length_of(1) reservation = reservations[0] reservation.instances.should.have.length_of(2) @@ -158,25 +165,31 @@ def test_get_instances_filtering_by_state(): conn.terminate_instances([instance1.id]) - reservations = conn.get_all_instances(filters={'instance-state-name': 'running'}) + reservations = conn.get_all_instances( + filters={'instance-state-name': 'running'}) reservations.should.have.length_of(1) - # Since we terminated instance1, only instance2 and instance3 should be returned + # Since we terminated instance1, only instance2 and instance3 should be + # returned instance_ids = [instance.id for instance in reservations[0].instances] set(instance_ids).should.equal(set([instance2.id, instance3.id])) - reservations = conn.get_all_instances([instance2.id], filters={'instance-state-name': 'running'}) + reservations = conn.get_all_instances( + [instance2.id], filters={'instance-state-name': 'running'}) reservations.should.have.length_of(1) instance_ids = [instance.id for instance in reservations[0].instances] instance_ids.should.equal([instance2.id]) - reservations = conn.get_all_instances([instance2.id], filters={'instance-state-name': 'terminated'}) + reservations = conn.get_all_instances( + [instance2.id], filters={'instance-state-name': 'terminated'}) list(reservations).should.equal([]) # get_all_instances should still return all 3 reservations = conn.get_all_instances() reservations[0].instances.should.have.length_of(3) - conn.get_all_instances.when.called_with(filters={'not-implemented-filter': 'foobar'}).should.throw(NotImplementedError) + conn.get_all_instances.when.called_with( + filters={'not-implemented-filter': 'foobar'}).should.throw(NotImplementedError) + @mock_ec2_deprecated def test_get_instances_filtering_by_instance_id(): @@ -184,16 +197,19 @@ def test_get_instances_filtering_by_instance_id(): reservation = conn.run_instances('ami-1234abcd', min_count=3) instance1, instance2, instance3 = reservation.instances - reservations = conn.get_all_instances(filters={'instance-id': instance1.id}) + reservations = conn.get_all_instances( + filters={'instance-id': instance1.id}) # get_all_instances should return just instance1 reservations[0].instances.should.have.length_of(1) reservations[0].instances[0].id.should.equal(instance1.id) - reservations = conn.get_all_instances(filters={'instance-id': [instance1.id, instance2.id]}) + reservations = conn.get_all_instances( + filters={'instance-id': [instance1.id, instance2.id]}) # get_all_instances should return two reservations[0].instances.should.have.length_of(2) - reservations = conn.get_all_instances(filters={'instance-id': 'non-existing-id'}) + reservations = conn.get_all_instances( + filters={'instance-id': 'non-existing-id'}) reservations.should.have.length_of(0) @@ -207,22 +223,25 @@ def test_get_instances_filtering_by_instance_type(): reservation3 = conn.run_instances('ami-1234abcd', instance_type='t1.micro') instance3 = reservation3.instances[0] - reservations = conn.get_all_instances(filters={'instance-type': 'm1.small'}) + reservations = conn.get_all_instances( + filters={'instance-type': 'm1.small'}) # get_all_instances should return instance1,2 reservations.should.have.length_of(2) reservations[0].instances.should.have.length_of(1) reservations[1].instances.should.have.length_of(1) - instance_ids = [ reservations[0].instances[0].id, - reservations[1].instances[0].id ] + instance_ids = [reservations[0].instances[0].id, + reservations[1].instances[0].id] set(instance_ids).should.equal(set([instance1.id, instance2.id])) - reservations = conn.get_all_instances(filters={'instance-type': 't1.micro'}) + reservations = conn.get_all_instances( + filters={'instance-type': 't1.micro'}) # get_all_instances should return one reservations.should.have.length_of(1) reservations[0].instances.should.have.length_of(1) reservations[0].instances[0].id.should.equal(instance3.id) - reservations = conn.get_all_instances(filters={'instance-type': ['t1.micro', 'm1.small']}) + reservations = conn.get_all_instances( + filters={'instance-type': ['t1.micro', 'm1.small']}) reservations.should.have.length_of(3) reservations[0].instances.should.have.length_of(1) reservations[1].instances.should.have.length_of(1) @@ -231,13 +250,15 @@ def test_get_instances_filtering_by_instance_type(): reservations[0].instances[0].id, reservations[1].instances[0].id, reservations[2].instances[0].id, - ] - set(instance_ids).should.equal(set([instance1.id, instance2.id, instance3.id])) + ] + set(instance_ids).should.equal( + set([instance1.id, instance2.id, instance3.id])) reservations = conn.get_all_instances(filters={'instance-type': 'bogus'}) - #bogus instance-type should return none + # bogus instance-type should return none reservations.should.have.length_of(0) + @mock_ec2_deprecated def test_get_instances_filtering_by_reason_code(): conn = boto.connect_ec2() @@ -246,10 +267,12 @@ def test_get_instances_filtering_by_reason_code(): instance1.stop() instance2.terminate() - reservations = conn.get_all_instances(filters={'state-reason-code': 'Client.UserInitiatedShutdown'}) + reservations = conn.get_all_instances( + filters={'state-reason-code': 'Client.UserInitiatedShutdown'}) # get_all_instances should return instance1 and instance2 reservations[0].instances.should.have.length_of(2) - set([instance1.id, instance2.id]).should.equal(set([i.id for i in reservations[0].instances])) + set([instance1.id, instance2.id]).should.equal( + set([i.id for i in reservations[0].instances])) reservations = conn.get_all_instances(filters={'state-reason-code': ''}) # get_all_instances should return instance 3 @@ -262,10 +285,13 @@ def test_get_instances_filtering_by_source_dest_check(): conn = boto.connect_ec2() reservation = conn.run_instances('ami-1234abcd', min_count=2) instance1, instance2 = reservation.instances - conn.modify_instance_attribute(instance1.id, attribute='sourceDestCheck', value=False) + conn.modify_instance_attribute( + instance1.id, attribute='sourceDestCheck', value=False) - source_dest_check_false = conn.get_all_instances(filters={'source-dest-check': 'false'}) - source_dest_check_true = conn.get_all_instances(filters={'source-dest-check': 'true'}) + source_dest_check_false = conn.get_all_instances( + filters={'source-dest-check': 'false'}) + source_dest_check_true = conn.get_all_instances( + filters={'source-dest-check': 'true'}) source_dest_check_false[0].instances.should.have.length_of(1) source_dest_check_false[0].instances[0].id.should.equal(instance1.id) @@ -279,12 +305,14 @@ def test_get_instances_filtering_by_vpc_id(): conn = boto.connect_vpc('the_key', 'the_secret') vpc1 = conn.create_vpc("10.0.0.0/16") subnet1 = conn.create_subnet(vpc1.id, "10.0.0.0/27") - reservation1 = conn.run_instances('ami-1234abcd', min_count=1, subnet_id=subnet1.id) + reservation1 = conn.run_instances( + 'ami-1234abcd', min_count=1, subnet_id=subnet1.id) instance1 = reservation1.instances[0] vpc2 = conn.create_vpc("10.1.0.0/16") subnet2 = conn.create_subnet(vpc2.id, "10.1.0.0/27") - reservation2 = conn.run_instances('ami-1234abcd', min_count=1, subnet_id=subnet2.id) + reservation2 = conn.run_instances( + 'ami-1234abcd', min_count=1, subnet_id=subnet2.id) instance2 = reservation2.instances[0] reservations1 = conn.get_all_instances(filters={'vpc-id': vpc1.id}) @@ -320,31 +348,35 @@ def test_get_instances_filtering_by_tag(): instance2.add_tag('tag2', 'wrong value') instance3.add_tag('tag2', 'value2') - reservations = conn.get_all_instances(filters={'tag:tag0' : 'value0'}) + reservations = conn.get_all_instances(filters={'tag:tag0': 'value0'}) # get_all_instances should return no instances reservations.should.have.length_of(0) - reservations = conn.get_all_instances(filters={'tag:tag1' : 'value1'}) + reservations = conn.get_all_instances(filters={'tag:tag1': 'value1'}) # get_all_instances should return both instances with this tag value reservations.should.have.length_of(1) reservations[0].instances.should.have.length_of(2) reservations[0].instances[0].id.should.equal(instance1.id) reservations[0].instances[1].id.should.equal(instance2.id) - reservations = conn.get_all_instances(filters={'tag:tag1' : 'value1', 'tag:tag2' : 'value2'}) + reservations = conn.get_all_instances( + filters={'tag:tag1': 'value1', 'tag:tag2': 'value2'}) # get_all_instances should return the instance with both tag values reservations.should.have.length_of(1) reservations[0].instances.should.have.length_of(1) reservations[0].instances[0].id.should.equal(instance1.id) - reservations = conn.get_all_instances(filters={'tag:tag1' : 'value1', 'tag:tag2' : 'value2'}) + reservations = conn.get_all_instances( + filters={'tag:tag1': 'value1', 'tag:tag2': 'value2'}) # get_all_instances should return the instance with both tag values reservations.should.have.length_of(1) reservations[0].instances.should.have.length_of(1) reservations[0].instances[0].id.should.equal(instance1.id) - reservations = conn.get_all_instances(filters={'tag:tag2' : ['value2', 'bogus']}) - # get_all_instances should return both instances with one of the acceptable tag values + reservations = conn.get_all_instances( + filters={'tag:tag2': ['value2', 'bogus']}) + # get_all_instances should return both instances with one of the + # acceptable tag values reservations.should.have.length_of(1) reservations[0].instances.should.have.length_of(2) reservations[0].instances[0].id.should.equal(instance1.id) @@ -362,32 +394,37 @@ def test_get_instances_filtering_by_tag_value(): instance2.add_tag('tag2', 'wrong value') instance3.add_tag('tag2', 'value2') - reservations = conn.get_all_instances(filters={'tag-value' : 'value0'}) + reservations = conn.get_all_instances(filters={'tag-value': 'value0'}) # get_all_instances should return no instances reservations.should.have.length_of(0) - reservations = conn.get_all_instances(filters={'tag-value' : 'value1'}) + reservations = conn.get_all_instances(filters={'tag-value': 'value1'}) # get_all_instances should return both instances with this tag value reservations.should.have.length_of(1) reservations[0].instances.should.have.length_of(2) reservations[0].instances[0].id.should.equal(instance1.id) reservations[0].instances[1].id.should.equal(instance2.id) - reservations = conn.get_all_instances(filters={'tag-value' : ['value2', 'value1']}) - # get_all_instances should return both instances with one of the acceptable tag values + reservations = conn.get_all_instances( + filters={'tag-value': ['value2', 'value1']}) + # get_all_instances should return both instances with one of the + # acceptable tag values reservations.should.have.length_of(1) reservations[0].instances.should.have.length_of(3) reservations[0].instances[0].id.should.equal(instance1.id) reservations[0].instances[1].id.should.equal(instance2.id) reservations[0].instances[2].id.should.equal(instance3.id) - reservations = conn.get_all_instances(filters={'tag-value' : ['value2', 'bogus']}) - # get_all_instances should return both instances with one of the acceptable tag values + reservations = conn.get_all_instances( + filters={'tag-value': ['value2', 'bogus']}) + # get_all_instances should return both instances with one of the + # acceptable tag values reservations.should.have.length_of(1) reservations[0].instances.should.have.length_of(2) reservations[0].instances[0].id.should.equal(instance1.id) reservations[0].instances[1].id.should.equal(instance3.id) + @mock_ec2_deprecated def test_get_instances_filtering_by_tag_name(): conn = boto.connect_ec2() @@ -399,25 +436,28 @@ def test_get_instances_filtering_by_tag_name(): instance2.add_tag('tag2X') instance3.add_tag('tag3') - reservations = conn.get_all_instances(filters={'tag-key' : 'tagX'}) + reservations = conn.get_all_instances(filters={'tag-key': 'tagX'}) # get_all_instances should return no instances reservations.should.have.length_of(0) - reservations = conn.get_all_instances(filters={'tag-key' : 'tag1'}) + reservations = conn.get_all_instances(filters={'tag-key': 'tag1'}) # get_all_instances should return both instances with this tag value reservations.should.have.length_of(1) reservations[0].instances.should.have.length_of(2) reservations[0].instances[0].id.should.equal(instance1.id) reservations[0].instances[1].id.should.equal(instance2.id) - reservations = conn.get_all_instances(filters={'tag-key' : ['tag1', 'tag3']}) - # get_all_instances should return both instances with one of the acceptable tag values + reservations = conn.get_all_instances( + filters={'tag-key': ['tag1', 'tag3']}) + # get_all_instances should return both instances with one of the + # acceptable tag values reservations.should.have.length_of(1) reservations[0].instances.should.have.length_of(3) reservations[0].instances[0].id.should.equal(instance1.id) reservations[0].instances[1].id.should.equal(instance2.id) reservations[0].instances[2].id.should.equal(instance3.id) + @mock_ec2_deprecated def test_instance_start_and_stop(): conn = boto.connect_ec2('the_key', 'the_secret') @@ -431,7 +471,8 @@ def test_instance_start_and_stop(): stopped_instances = conn.stop_instances(instance_ids, dry_run=True) ex.exception.error_code.should.equal('DryRunOperation') ex.exception.status.should.equal(400) - ex.exception.message.should.equal('An error occurred (DryRunOperation) when calling the StopInstance operation: Request would have succeeded, but DryRun flag is set') + ex.exception.message.should.equal( + 'An error occurred (DryRunOperation) when calling the StopInstance operation: Request would have succeeded, but DryRun flag is set') stopped_instances = conn.stop_instances(instance_ids) @@ -439,10 +480,12 @@ def test_instance_start_and_stop(): instance.state.should.equal('stopping') with assert_raises(EC2ResponseError) as ex: - started_instances = conn.start_instances([instances[0].id], dry_run=True) + started_instances = conn.start_instances( + [instances[0].id], dry_run=True) ex.exception.error_code.should.equal('DryRunOperation') ex.exception.status.should.equal(400) - ex.exception.message.should.equal('An error occurred (DryRunOperation) when calling the StartInstance operation: Request would have succeeded, but DryRun flag is set') + ex.exception.message.should.equal( + 'An error occurred (DryRunOperation) when calling the StartInstance operation: Request would have succeeded, but DryRun flag is set') started_instances = conn.start_instances([instances[0].id]) started_instances[0].state.should.equal('pending') @@ -458,7 +501,8 @@ def test_instance_reboot(): instance.reboot(dry_run=True) ex.exception.error_code.should.equal('DryRunOperation') ex.exception.status.should.equal(400) - ex.exception.message.should.equal('An error occurred (DryRunOperation) when calling the RebootInstance operation: Request would have succeeded, but DryRun flag is set') + ex.exception.message.should.equal( + 'An error occurred (DryRunOperation) when calling the RebootInstance operation: Request would have succeeded, but DryRun flag is set') instance.reboot() instance.state.should.equal('pending') @@ -474,7 +518,8 @@ def test_instance_attribute_instance_type(): instance.modify_attribute("instanceType", "m1.small", dry_run=True) ex.exception.error_code.should.equal('DryRunOperation') ex.exception.status.should.equal(400) - ex.exception.message.should.equal('An error occurred (DryRunOperation) when calling the ModifyInstanceType operation: Request would have succeeded, but DryRun flag is set') + ex.exception.message.should.equal( + 'An error occurred (DryRunOperation) when calling the ModifyInstanceType operation: Request would have succeeded, but DryRun flag is set') instance.modify_attribute("instanceType", "m1.small") @@ -482,6 +527,7 @@ def test_instance_attribute_instance_type(): instance_attribute.should.be.a(InstanceAttribute) instance_attribute.get('instanceType').should.equal("m1.small") + @mock_ec2_deprecated def test_modify_instance_attribute_security_groups(): conn = boto.connect_ec2('the_key', 'the_secret') @@ -495,7 +541,8 @@ def test_modify_instance_attribute_security_groups(): instance.modify_attribute("groupSet", [sg_id, sg_id2], dry_run=True) ex.exception.error_code.should.equal('DryRunOperation') ex.exception.status.should.equal(400) - ex.exception.message.should.equal('An error occurred (DryRunOperation) when calling the ModifyInstanceSecurityGroups operation: Request would have succeeded, but DryRun flag is set') + ex.exception.message.should.equal( + 'An error occurred (DryRunOperation) when calling the ModifyInstanceSecurityGroups operation: Request would have succeeded, but DryRun flag is set') instance.modify_attribute("groupSet", [sg_id, sg_id2]) @@ -513,10 +560,12 @@ def test_instance_attribute_user_data(): instance = reservation.instances[0] with assert_raises(EC2ResponseError) as ex: - instance.modify_attribute("userData", "this is my user data", dry_run=True) + instance.modify_attribute( + "userData", "this is my user data", dry_run=True) ex.exception.error_code.should.equal('DryRunOperation') ex.exception.status.should.equal(400) - ex.exception.message.should.equal('An error occurred (DryRunOperation) when calling the ModifyUserData operation: Request would have succeeded, but DryRun flag is set') + ex.exception.message.should.equal( + 'An error occurred (DryRunOperation) when calling the ModifyUserData operation: Request would have succeeded, but DryRun flag is set') instance.modify_attribute("userData", "this is my user data") @@ -544,7 +593,8 @@ def test_instance_attribute_source_dest_check(): instance.modify_attribute("sourceDestCheck", False, dry_run=True) ex.exception.error_code.should.equal('DryRunOperation') ex.exception.status.should.equal(400) - ex.exception.message.should.equal('An error occurred (DryRunOperation) when calling the ModifySourceDestCheck operation: Request would have succeeded, but DryRun flag is set') + ex.exception.message.should.equal( + 'An error occurred (DryRunOperation) when calling the ModifySourceDestCheck operation: Request would have succeeded, but DryRun flag is set') instance.modify_attribute("sourceDestCheck", False) @@ -585,10 +635,12 @@ def test_run_instance_with_security_group_name(): conn = boto.connect_ec2('the_key', 'the_secret') with assert_raises(EC2ResponseError) as ex: - group = conn.create_security_group('group1', "some description", dry_run=True) + group = conn.create_security_group( + 'group1', "some description", dry_run=True) ex.exception.error_code.should.equal('DryRunOperation') ex.exception.status.should.equal(400) - ex.exception.message.should.equal('An error occurred (DryRunOperation) when calling the CreateSecurityGroup operation: Request would have succeeded, but DryRun flag is set') + ex.exception.message.should.equal( + 'An error occurred (DryRunOperation) when calling the CreateSecurityGroup operation: Request would have succeeded, but DryRun flag is set') group = conn.create_security_group('group1', "some description") @@ -658,14 +710,16 @@ def test_run_instance_with_nic_autocreated(): conn = boto.connect_vpc('the_key', 'the_secret') vpc = conn.create_vpc("10.0.0.0/16") subnet = conn.create_subnet(vpc.id, "10.0.0.0/18") - security_group1 = conn.create_security_group('test security group #1', 'this is a test security group') - security_group2 = conn.create_security_group('test security group #2', 'this is a test security group') + security_group1 = conn.create_security_group( + 'test security group #1', 'this is a test security group') + security_group2 = conn.create_security_group( + 'test security group #2', 'this is a test security group') private_ip = "54.0.0.1" reservation = conn.run_instances('ami-1234abcd', subnet_id=subnet.id, - security_groups=[security_group1.name], - security_group_ids=[security_group2.id], - private_ip_address=private_ip) + security_groups=[security_group1.name], + security_group_ids=[security_group2.id], + private_ip_address=private_ip) instance = reservation.instances[0] all_enis = conn.get_all_network_interfaces() @@ -677,11 +731,13 @@ def test_run_instance_with_nic_autocreated(): instance.subnet_id.should.equal(subnet.id) instance.groups.should.have.length_of(2) - set([group.id for group in instance.groups]).should.equal(set([security_group1.id,security_group2.id])) + set([group.id for group in instance.groups]).should.equal( + set([security_group1.id, security_group2.id])) eni.subnet_id.should.equal(subnet.id) eni.groups.should.have.length_of(2) - set([group.id for group in eni.groups]).should.equal(set([security_group1.id,security_group2.id])) + set([group.id for group in eni.groups]).should.equal( + set([security_group1.id, security_group2.id])) eni.private_ip_addresses.should.have.length_of(1) eni.private_ip_addresses[0].private_ip_address.should.equal(private_ip) @@ -691,20 +747,24 @@ def test_run_instance_with_nic_preexisting(): conn = boto.connect_vpc('the_key', 'the_secret') vpc = conn.create_vpc("10.0.0.0/16") subnet = conn.create_subnet(vpc.id, "10.0.0.0/18") - security_group1 = conn.create_security_group('test security group #1', 'this is a test security group') - security_group2 = conn.create_security_group('test security group #2', 'this is a test security group') + security_group1 = conn.create_security_group( + 'test security group #1', 'this is a test security group') + security_group2 = conn.create_security_group( + 'test security group #2', 'this is a test security group') private_ip = "54.0.0.1" - eni = conn.create_network_interface(subnet.id, private_ip, groups=[security_group1.id]) + eni = conn.create_network_interface( + subnet.id, private_ip, groups=[security_group1.id]) # Boto requires NetworkInterfaceCollection of NetworkInterfaceSpecifications... # annoying, but generates the desired querystring. from boto.ec2.networkinterface import NetworkInterfaceSpecification, NetworkInterfaceCollection - interface = NetworkInterfaceSpecification(network_interface_id=eni.id, device_index=0) + interface = NetworkInterfaceSpecification( + network_interface_id=eni.id, device_index=0) interfaces = NetworkInterfaceCollection(interface) # end Boto objects reservation = conn.run_instances('ami-1234abcd', network_interfaces=interfaces, - security_group_ids=[security_group2.id]) + security_group_ids=[security_group2.id]) instance = reservation.instances[0] instance.subnet_id.should.equal(subnet.id) @@ -718,9 +778,11 @@ def test_run_instance_with_nic_preexisting(): instance_eni.subnet_id.should.equal(subnet.id) instance_eni.groups.should.have.length_of(2) - set([group.id for group in instance_eni.groups]).should.equal(set([security_group1.id,security_group2.id])) + set([group.id for group in instance_eni.groups]).should.equal( + set([security_group1.id, security_group2.id])) instance_eni.private_ip_addresses.should.have.length_of(1) - instance_eni.private_ip_addresses[0].private_ip_address.should.equal(private_ip) + instance_eni.private_ip_addresses[ + 0].private_ip_address.should.equal(private_ip) @requires_boto_gte("2.32.0") @@ -730,10 +792,13 @@ def test_instance_with_nic_attach_detach(): vpc = conn.create_vpc("10.0.0.0/16") subnet = conn.create_subnet(vpc.id, "10.0.0.0/18") - security_group1 = conn.create_security_group('test security group #1', 'this is a test security group') - security_group2 = conn.create_security_group('test security group #2', 'this is a test security group') + security_group1 = conn.create_security_group( + 'test security group #1', 'this is a test security group') + security_group2 = conn.create_security_group( + 'test security group #2', 'this is a test security group') - reservation = conn.run_instances('ami-1234abcd', security_group_ids=[security_group1.id]) + reservation = conn.run_instances( + 'ami-1234abcd', security_group_ids=[security_group1.id]) instance = reservation.instances[0] eni = conn.create_network_interface(subnet.id, groups=[security_group2.id]) @@ -742,14 +807,17 @@ def test_instance_with_nic_attach_detach(): instance.interfaces.should.have.length_of(1) eni.groups.should.have.length_of(1) - set([group.id for group in eni.groups]).should.equal(set([security_group2.id])) + set([group.id for group in eni.groups]).should.equal( + set([security_group2.id])) # Attach with assert_raises(EC2ResponseError) as ex: - conn.attach_network_interface(eni.id, instance.id, device_index=1, dry_run=True) + conn.attach_network_interface( + eni.id, instance.id, device_index=1, dry_run=True) ex.exception.error_code.should.equal('DryRunOperation') ex.exception.status.should.equal(400) - ex.exception.message.should.equal('An error occurred (DryRunOperation) when calling the AttachNetworkInterface operation: Request would have succeeded, but DryRun flag is set') + ex.exception.message.should.equal( + 'An error occurred (DryRunOperation) when calling the AttachNetworkInterface operation: Request would have succeeded, but DryRun flag is set') conn.attach_network_interface(eni.id, instance.id, device_index=1) @@ -759,18 +827,22 @@ def test_instance_with_nic_attach_detach(): instance_eni = instance.interfaces[1] instance_eni.id.should.equal(eni.id) instance_eni.groups.should.have.length_of(2) - set([group.id for group in instance_eni.groups]).should.equal(set([security_group1.id,security_group2.id])) + set([group.id for group in instance_eni.groups]).should.equal( + set([security_group1.id, security_group2.id])) - eni = conn.get_all_network_interfaces(filters={'network-interface-id': eni.id})[0] + eni = conn.get_all_network_interfaces( + filters={'network-interface-id': eni.id})[0] eni.groups.should.have.length_of(2) - set([group.id for group in eni.groups]).should.equal(set([security_group1.id,security_group2.id])) + set([group.id for group in eni.groups]).should.equal( + set([security_group1.id, security_group2.id])) # Detach with assert_raises(EC2ResponseError) as ex: conn.detach_network_interface(instance_eni.attachment.id, dry_run=True) ex.exception.error_code.should.equal('DryRunOperation') ex.exception.status.should.equal(400) - ex.exception.message.should.equal('An error occurred (DryRunOperation) when calling the DetachNetworkInterface operation: Request would have succeeded, but DryRun flag is set') + ex.exception.message.should.equal( + 'An error occurred (DryRunOperation) when calling the DetachNetworkInterface operation: Request would have succeeded, but DryRun flag is set') conn.detach_network_interface(instance_eni.attachment.id) @@ -778,9 +850,11 @@ def test_instance_with_nic_attach_detach(): instance.update() instance.interfaces.should.have.length_of(1) - eni = conn.get_all_network_interfaces(filters={'network-interface-id': eni.id})[0] + eni = conn.get_all_network_interfaces( + filters={'network-interface-id': eni.id})[0] eni.groups.should.have.length_of(1) - set([group.id for group in eni.groups]).should.equal(set([security_group2.id])) + set([group.id for group in eni.groups]).should.equal( + set([security_group2.id])) # Detach with invalid attachment ID with assert_raises(EC2ResponseError) as cm: @@ -851,6 +925,7 @@ def test_describe_instance_status_with_instance_filter(): cm.exception.status.should.equal(400) cm.exception.request_id.should_not.be.none + @requires_boto_gte("2.32.0") @mock_ec2_deprecated def test_describe_instance_status_with_non_running_instances(): @@ -877,6 +952,7 @@ def test_describe_instance_status_with_non_running_instances(): status3 = next((s for s in all_status if s.id == instance3.id), None) status3.state_name.should.equal('running') + @mock_ec2_deprecated def test_get_instance_by_security_group(): conn = boto.connect_ec2('the_key', 'the_secret') @@ -887,12 +963,15 @@ def test_get_instance_by_security_group(): security_group = conn.create_security_group('test', 'test') with assert_raises(EC2ResponseError) as ex: - conn.modify_instance_attribute(instance.id, "groupSet", [security_group.id], dry_run=True) + conn.modify_instance_attribute(instance.id, "groupSet", [ + security_group.id], dry_run=True) ex.exception.error_code.should.equal('DryRunOperation') ex.exception.status.should.equal(400) - ex.exception.message.should.equal('An error occurred (DryRunOperation) when calling the ModifyInstanceSecurityGroups operation: Request would have succeeded, but DryRun flag is set') + ex.exception.message.should.equal( + 'An error occurred (DryRunOperation) when calling the ModifyInstanceSecurityGroups operation: Request would have succeeded, but DryRun flag is set') - conn.modify_instance_attribute(instance.id, "groupSet", [security_group.id]) + conn.modify_instance_attribute( + instance.id, "groupSet", [security_group.id]) security_group_instances = security_group.instances() diff --git a/tests/test_ec2/test_internet_gateways.py b/tests/test_ec2/test_internet_gateways.py index fe5e4945d..5842621cd 100644 --- a/tests/test_ec2/test_internet_gateways.py +++ b/tests/test_ec2/test_internet_gateways.py @@ -13,9 +13,10 @@ import sure # noqa from moto import mock_ec2_deprecated -VPC_CIDR="10.0.0.0/16" -BAD_VPC="vpc-deadbeef" -BAD_IGW="igw-deadbeef" +VPC_CIDR = "10.0.0.0/16" +BAD_VPC = "vpc-deadbeef" +BAD_IGW = "igw-deadbeef" + @mock_ec2_deprecated def test_igw_create(): @@ -28,7 +29,8 @@ def test_igw_create(): igw = conn.create_internet_gateway(dry_run=True) ex.exception.error_code.should.equal('DryRunOperation') ex.exception.status.should.equal(400) - ex.exception.message.should.equal('An error occurred (DryRunOperation) when calling the CreateInternetGateway operation: Request would have succeeded, but DryRun flag is set') + ex.exception.message.should.equal( + 'An error occurred (DryRunOperation) when calling the CreateInternetGateway operation: Request would have succeeded, but DryRun flag is set') igw = conn.create_internet_gateway() conn.get_all_internet_gateways().should.have.length_of(1) @@ -37,6 +39,7 @@ def test_igw_create(): igw = conn.get_all_internet_gateways()[0] igw.attachments.should.have.length_of(0) + @mock_ec2_deprecated def test_igw_attach(): """ internet gateway attach """ @@ -48,13 +51,15 @@ def test_igw_attach(): conn.attach_internet_gateway(igw.id, vpc.id, dry_run=True) ex.exception.error_code.should.equal('DryRunOperation') ex.exception.status.should.equal(400) - ex.exception.message.should.equal('An error occurred (DryRunOperation) when calling the AttachInternetGateway operation: Request would have succeeded, but DryRun flag is set') + ex.exception.message.should.equal( + 'An error occurred (DryRunOperation) when calling the AttachInternetGateway operation: Request would have succeeded, but DryRun flag is set') conn.attach_internet_gateway(igw.id, vpc.id) igw = conn.get_all_internet_gateways()[0] igw.attachments[0].vpc_id.should.be.equal(vpc.id) + @mock_ec2_deprecated def test_igw_attach_bad_vpc(): """ internet gateway fail to attach w/ bad vpc """ @@ -67,6 +72,7 @@ def test_igw_attach_bad_vpc(): cm.exception.status.should.equal(400) cm.exception.request_id.should_not.be.none + @mock_ec2_deprecated def test_igw_attach_twice(): """ internet gateway fail to attach twice """ @@ -82,6 +88,7 @@ def test_igw_attach_twice(): cm.exception.status.should.equal(400) cm.exception.request_id.should_not.be.none + @mock_ec2_deprecated def test_igw_detach(): """ internet gateway detach""" @@ -94,12 +101,14 @@ def test_igw_detach(): conn.detach_internet_gateway(igw.id, vpc.id, dry_run=True) ex.exception.error_code.should.equal('DryRunOperation') ex.exception.status.should.equal(400) - ex.exception.message.should.equal('An error occurred (DryRunOperation) when calling the DetachInternetGateway operation: Request would have succeeded, but DryRun flag is set') + ex.exception.message.should.equal( + 'An error occurred (DryRunOperation) when calling the DetachInternetGateway operation: Request would have succeeded, but DryRun flag is set') conn.detach_internet_gateway(igw.id, vpc.id) igw = conn.get_all_internet_gateways()[0] igw.attachments.should.have.length_of(0) + @mock_ec2_deprecated def test_igw_detach_wrong_vpc(): """ internet gateway fail to detach w/ wrong vpc """ @@ -115,6 +124,7 @@ def test_igw_detach_wrong_vpc(): cm.exception.status.should.equal(400) cm.exception.request_id.should_not.be.none + @mock_ec2_deprecated def test_igw_detach_invalid_vpc(): """ internet gateway fail to detach w/ invalid vpc """ @@ -129,6 +139,7 @@ def test_igw_detach_invalid_vpc(): cm.exception.status.should.equal(400) cm.exception.request_id.should_not.be.none + @mock_ec2_deprecated def test_igw_detach_unattached(): """ internet gateway fail to detach unattached """ @@ -142,6 +153,7 @@ def test_igw_detach_unattached(): cm.exception.status.should.equal(400) cm.exception.request_id.should_not.be.none + @mock_ec2_deprecated def test_igw_delete(): """ internet gateway delete""" @@ -155,11 +167,13 @@ def test_igw_delete(): conn.delete_internet_gateway(igw.id, dry_run=True) ex.exception.error_code.should.equal('DryRunOperation') ex.exception.status.should.equal(400) - ex.exception.message.should.equal('An error occurred (DryRunOperation) when calling the DeleteInternetGateway operation: Request would have succeeded, but DryRun flag is set') + ex.exception.message.should.equal( + 'An error occurred (DryRunOperation) when calling the DeleteInternetGateway operation: Request would have succeeded, but DryRun flag is set') conn.delete_internet_gateway(igw.id) conn.get_all_internet_gateways().should.have.length_of(0) + @mock_ec2_deprecated def test_igw_delete_attached(): """ internet gateway fail to delete attached """ @@ -174,6 +188,7 @@ def test_igw_delete_attached(): cm.exception.status.should.equal(400) cm.exception.request_id.should_not.be.none + @mock_ec2_deprecated def test_igw_desribe(): """ internet gateway fetch by id """ @@ -182,6 +197,7 @@ def test_igw_desribe(): igw_by_search = conn.get_all_internet_gateways([igw.id])[0] igw.id.should.equal(igw_by_search.id) + @mock_ec2_deprecated def test_igw_desribe_bad_id(): """ internet gateway fail to fetch by bad id """ @@ -203,7 +219,8 @@ def test_igw_filter_by_vpc_id(): vpc = conn.create_vpc(VPC_CIDR) conn.attach_internet_gateway(igw1.id, vpc.id) - result = conn.get_all_internet_gateways(filters={"attachment.vpc-id": vpc.id}) + result = conn.get_all_internet_gateways( + filters={"attachment.vpc-id": vpc.id}) result.should.have.length_of(1) result[0].id.should.equal(igw1.id) @@ -230,7 +247,8 @@ def test_igw_filter_by_internet_gateway_id(): igw1 = conn.create_internet_gateway() igw2 = conn.create_internet_gateway() - result = conn.get_all_internet_gateways(filters={"internet-gateway-id": igw1.id}) + result = conn.get_all_internet_gateways( + filters={"internet-gateway-id": igw1.id}) result.should.have.length_of(1) result[0].id.should.equal(igw1.id) @@ -245,6 +263,7 @@ def test_igw_filter_by_attachment_state(): vpc = conn.create_vpc(VPC_CIDR) conn.attach_internet_gateway(igw1.id, vpc.id) - result = conn.get_all_internet_gateways(filters={"attachment.state": "available"}) + result = conn.get_all_internet_gateways( + filters={"attachment.state": "available"}) result.should.have.length_of(1) result[0].id.should.equal(igw1.id) diff --git a/tests/test_ec2/test_key_pairs.py b/tests/test_ec2/test_key_pairs.py index 6c4773200..ec979a871 100644 --- a/tests/test_ec2/test_key_pairs.py +++ b/tests/test_ec2/test_key_pairs.py @@ -36,7 +36,8 @@ def test_key_pairs_create(): kp = conn.create_key_pair('foo', dry_run=True) ex.exception.error_code.should.equal('DryRunOperation') ex.exception.status.should.equal(400) - ex.exception.message.should.equal('An error occurred (DryRunOperation) when calling the CreateKeyPair operation: Request would have succeeded, but DryRun flag is set') + ex.exception.message.should.equal( + 'An error occurred (DryRunOperation) when calling the CreateKeyPair operation: Request would have succeeded, but DryRun flag is set') kp = conn.create_key_pair('foo') assert kp.material.startswith('---- BEGIN RSA PRIVATE KEY ----') @@ -91,7 +92,8 @@ def test_key_pairs_delete_exist(): r = conn.delete_key_pair('foo', dry_run=True) ex.exception.error_code.should.equal('DryRunOperation') ex.exception.status.should.equal(400) - ex.exception.message.should.equal('An error occurred (DryRunOperation) when calling the DeleteKeyPair operation: Request would have succeeded, but DryRun flag is set') + ex.exception.message.should.equal( + 'An error occurred (DryRunOperation) when calling the DeleteKeyPair operation: Request would have succeeded, but DryRun flag is set') r = conn.delete_key_pair('foo') r.should.be.ok @@ -106,7 +108,8 @@ def test_key_pairs_import(): kp = conn.import_key_pair('foo', b'content', dry_run=True) ex.exception.error_code.should.equal('DryRunOperation') ex.exception.status.should.equal(400) - ex.exception.message.should.equal('An error occurred (DryRunOperation) when calling the ImportKeyPair operation: Request would have succeeded, but DryRun flag is set') + ex.exception.message.should.equal( + 'An error occurred (DryRunOperation) when calling the ImportKeyPair operation: Request would have succeeded, but DryRun flag is set') kp = conn.import_key_pair('foo', b'content') assert kp.name == 'foo' diff --git a/tests/test_ec2/test_nat_gateway.py b/tests/test_ec2/test_nat_gateway.py index b9c95f7c3..27e8753be 100644 --- a/tests/test_ec2/test_nat_gateway.py +++ b/tests/test_ec2/test_nat_gateway.py @@ -56,7 +56,8 @@ def test_delete_nat_gateway(): nat_gateway_id = nat_gateway['NatGateway']['NatGatewayId'] response = conn.delete_nat_gateway(NatGatewayId=nat_gateway_id) - response['ResponseMetadata'].pop('HTTPHeaders', None) # this is hard to match against, so remove it + # this is hard to match against, so remove it + response['ResponseMetadata'].pop('HTTPHeaders', None) response['ResponseMetadata'].pop('RetryAttempts', None) response.should.equal({ 'NatGatewayId': nat_gateway_id, @@ -89,14 +90,20 @@ def test_create_and_describe_nat_gateway(): enis = conn.describe_network_interfaces()['NetworkInterfaces'] eni_id = enis[0]['NetworkInterfaceId'] - public_ip = conn.describe_addresses(AllocationIds=[allocation_id])['Addresses'][0]['PublicIp'] + public_ip = conn.describe_addresses(AllocationIds=[allocation_id])[ + 'Addresses'][0]['PublicIp'] describe_response['NatGateways'].should.have.length_of(1) - describe_response['NatGateways'][0]['NatGatewayId'].should.equal(nat_gateway_id) + describe_response['NatGateways'][0][ + 'NatGatewayId'].should.equal(nat_gateway_id) describe_response['NatGateways'][0]['State'].should.equal('available') describe_response['NatGateways'][0]['SubnetId'].should.equal(subnet_id) describe_response['NatGateways'][0]['VpcId'].should.equal(vpc_id) - describe_response['NatGateways'][0]['NatGatewayAddresses'][0]['AllocationId'].should.equal(allocation_id) - describe_response['NatGateways'][0]['NatGatewayAddresses'][0]['NetworkInterfaceId'].should.equal(eni_id) - assert describe_response['NatGateways'][0]['NatGatewayAddresses'][0]['PrivateIp'].startswith('10.') - describe_response['NatGateways'][0]['NatGatewayAddresses'][0]['PublicIp'].should.equal(public_ip) + describe_response['NatGateways'][0]['NatGatewayAddresses'][ + 0]['AllocationId'].should.equal(allocation_id) + describe_response['NatGateways'][0]['NatGatewayAddresses'][ + 0]['NetworkInterfaceId'].should.equal(eni_id) + assert describe_response['NatGateways'][0][ + 'NatGatewayAddresses'][0]['PrivateIp'].startswith('10.') + describe_response['NatGateways'][0]['NatGatewayAddresses'][ + 0]['PublicIp'].should.equal(public_ip) diff --git a/tests/test_ec2/test_regions.py b/tests/test_ec2/test_regions.py index 07e02c526..4beca7c67 100644 --- a/tests/test_ec2/test_regions.py +++ b/tests/test_ec2/test_regions.py @@ -50,9 +50,11 @@ def test_add_servers_to_multiple_regions(): @mock_elb_deprecated def test_create_autoscaling_group(): elb_conn = boto.ec2.elb.connect_to_region('us-east-1') - elb_conn.create_load_balancer('us_test_lb', zones=[], listeners=[(80, 8080, 'http')]) + elb_conn.create_load_balancer( + 'us_test_lb', zones=[], listeners=[(80, 8080, 'http')]) elb_conn = boto.ec2.elb.connect_to_region('ap-northeast-1') - elb_conn.create_load_balancer('ap_test_lb', zones=[], listeners=[(80, 8080, 'http')]) + elb_conn.create_load_balancer( + 'ap_test_lb', zones=[], listeners=[(80, 8080, 'http')]) us_conn = boto.ec2.autoscale.connect_to_region('us-east-1') config = boto.ec2.autoscale.LaunchConfiguration( @@ -79,7 +81,6 @@ def test_create_autoscaling_group(): ) us_conn.create_auto_scaling_group(group) - ap_conn = boto.ec2.autoscale.connect_to_region('ap-northeast-1') config = boto.ec2.autoscale.LaunchConfiguration( name='ap_tester', @@ -105,7 +106,6 @@ def test_create_autoscaling_group(): ) ap_conn.create_auto_scaling_group(group) - len(us_conn.get_all_groups()).should.equal(1) len(ap_conn.get_all_groups()).should.equal(1) @@ -122,7 +122,8 @@ def test_create_autoscaling_group(): us_group.health_check_type.should.equal("EC2") list(us_group.load_balancers).should.equal(["us_test_lb"]) us_group.placement_group.should.equal("us_test_placement") - list(us_group.termination_policies).should.equal(["OldestInstance", "NewestInstance"]) + list(us_group.termination_policies).should.equal( + ["OldestInstance", "NewestInstance"]) ap_group = ap_conn.get_all_groups()[0] ap_group.name.should.equal('ap_tester_group') @@ -137,4 +138,5 @@ def test_create_autoscaling_group(): ap_group.health_check_type.should.equal("EC2") list(ap_group.load_balancers).should.equal(["ap_test_lb"]) ap_group.placement_group.should.equal("ap_test_placement") - list(ap_group.termination_policies).should.equal(["OldestInstance", "NewestInstance"]) + list(ap_group.termination_policies).should.equal( + ["OldestInstance", "NewestInstance"]) diff --git a/tests/test_ec2/test_route_tables.py b/tests/test_ec2/test_route_tables.py index 3aa4b460a..6e6c62741 100644 --- a/tests/test_ec2/test_route_tables.py +++ b/tests/test_ec2/test_route_tables.py @@ -91,28 +91,34 @@ def test_route_tables_filters_standard(): all_route_tables.should.have.length_of(5) # Filter by main route table - main_route_tables = conn.get_all_route_tables(filters={'association.main':'true'}) + main_route_tables = conn.get_all_route_tables( + filters={'association.main': 'true'}) main_route_tables.should.have.length_of(3) - main_route_table_ids = [route_table.id for route_table in main_route_tables] + main_route_table_ids = [ + route_table.id for route_table in main_route_tables] main_route_table_ids.should_not.contain(route_table1.id) main_route_table_ids.should_not.contain(route_table2.id) # Filter by VPC - vpc1_route_tables = conn.get_all_route_tables(filters={'vpc-id':vpc1.id}) + vpc1_route_tables = conn.get_all_route_tables(filters={'vpc-id': vpc1.id}) vpc1_route_tables.should.have.length_of(2) - vpc1_route_table_ids = [route_table.id for route_table in vpc1_route_tables] + vpc1_route_table_ids = [ + route_table.id for route_table in vpc1_route_tables] vpc1_route_table_ids.should.contain(route_table1.id) vpc1_route_table_ids.should_not.contain(route_table2.id) # Filter by VPC and main route table - vpc2_main_route_tables = conn.get_all_route_tables(filters={'association.main':'true', 'vpc-id':vpc2.id}) + vpc2_main_route_tables = conn.get_all_route_tables( + filters={'association.main': 'true', 'vpc-id': vpc2.id}) vpc2_main_route_tables.should.have.length_of(1) - vpc2_main_route_table_ids = [route_table.id for route_table in vpc2_main_route_tables] + vpc2_main_route_table_ids = [ + route_table.id for route_table in vpc2_main_route_tables] vpc2_main_route_table_ids.should_not.contain(route_table1.id) vpc2_main_route_table_ids.should_not.contain(route_table2.id) # Unsupported filter - conn.get_all_route_tables.when.called_with(filters={'not-implemented-filter': 'foobar'}).should.throw(NotImplementedError) + conn.get_all_route_tables.when.called_with( + filters={'not-implemented-filter': 'foobar'}).should.throw(NotImplementedError) @mock_ec2_deprecated @@ -134,19 +140,22 @@ def test_route_tables_filters_associations(): all_route_tables.should.have.length_of(4) # Filter by association ID - association1_route_tables = conn.get_all_route_tables(filters={'association.route-table-association-id':association_id1}) + association1_route_tables = conn.get_all_route_tables( + filters={'association.route-table-association-id': association_id1}) association1_route_tables.should.have.length_of(1) association1_route_tables[0].id.should.equal(route_table1.id) association1_route_tables[0].associations.should.have.length_of(2) # Filter by route table ID - route_table2_route_tables = conn.get_all_route_tables(filters={'association.route-table-id':route_table2.id}) + route_table2_route_tables = conn.get_all_route_tables( + filters={'association.route-table-id': route_table2.id}) route_table2_route_tables.should.have.length_of(1) route_table2_route_tables[0].id.should.equal(route_table2.id) route_table2_route_tables[0].associations.should.have.length_of(1) # Filter by subnet ID - subnet_route_tables = conn.get_all_route_tables(filters={'association.subnet-id':subnet1.id}) + subnet_route_tables = conn.get_all_route_tables( + filters={'association.subnet-id': subnet1.id}) subnet_route_tables.should.have.length_of(1) subnet_route_tables[0].id.should.equal(route_table1.id) association1_route_tables[0].associations.should.have.length_of(2) @@ -179,7 +188,8 @@ def test_route_table_associations(): route_table.associations[0].subnet_id.should.equal(subnet.id) # Associate is idempotent - association_id_idempotent = conn.associate_route_table(route_table.id, subnet.id) + association_id_idempotent = conn.associate_route_table( + route_table.id, subnet.id) association_id_idempotent.should.equal(association_id) # Error: Attempt delete associated route table. @@ -255,7 +265,8 @@ def test_route_table_replace_route_table_association(): route_table1.associations[0].subnet_id.should.equal(subnet.id) # Replace Association - association_id2 = conn.replace_route_table_association_with_assoc(association_id1, route_table2.id) + association_id2 = conn.replace_route_table_association_with_assoc( + association_id1, route_table2.id) # Refresh route_table1 = conn.get_all_route_tables(route_table1.id)[0] @@ -271,19 +282,22 @@ def test_route_table_replace_route_table_association(): route_table2.associations[0].subnet_id.should.equal(subnet.id) # Replace Association is idempotent - association_id_idempotent = conn.replace_route_table_association_with_assoc(association_id2, route_table2.id) + association_id_idempotent = conn.replace_route_table_association_with_assoc( + association_id2, route_table2.id) association_id_idempotent.should.equal(association_id2) # Error: Replace association with invalid association ID with assert_raises(EC2ResponseError) as cm: - conn.replace_route_table_association_with_assoc("rtbassoc-1234abcd", route_table1.id) + conn.replace_route_table_association_with_assoc( + "rtbassoc-1234abcd", route_table1.id) cm.exception.code.should.equal('InvalidAssociationID.NotFound') cm.exception.status.should.equal(400) cm.exception.request_id.should_not.be.none # Error: Replace association with invalid route table ID with assert_raises(EC2ResponseError) as cm: - conn.replace_route_table_association_with_assoc(association_id2, "rtb-1234abcd") + conn.replace_route_table_association_with_assoc( + association_id2, "rtb-1234abcd") cm.exception.code.should.equal('InvalidRouteTableID.NotFound') cm.exception.status.should.equal(400) cm.exception.request_id.should_not.be.none @@ -298,7 +312,8 @@ def test_route_table_get_by_tag(): route_table = conn.create_route_table(vpc.id) route_table.add_tag('Name', 'TestRouteTable') - route_tables = conn.get_all_route_tables(filters={'tag:Name': 'TestRouteTable'}) + route_tables = conn.get_all_route_tables( + filters={'tag:Name': 'TestRouteTable'}) route_tables.should.have.length_of(1) route_tables[0].vpc_id.should.equal(vpc.id) @@ -323,7 +338,8 @@ def test_route_table_get_by_tag_boto3(): route_tables[0].vpc_id.should.equal(vpc.id) route_tables[0].id.should.equal(route_table.id) route_tables[0].tags.should.have.length_of(1) - route_tables[0].tags[0].should.equal({'Key': 'Name', 'Value': 'TestRouteTable'}) + route_tables[0].tags[0].should.equal( + {'Key': 'Name', 'Value': 'TestRouteTable'}) @mock_ec2_deprecated @@ -337,10 +353,12 @@ def test_routes_additional(): conn.create_route(main_route_table.id, ROUTE_CIDR, gateway_id=igw.id) - main_route_table = conn.get_all_route_tables(filters={'vpc-id': vpc.id})[0] # Refresh route table + main_route_table = conn.get_all_route_tables( + filters={'vpc-id': vpc.id})[0] # Refresh route table main_route_table.routes.should.have.length_of(2) - new_routes = [route for route in main_route_table.routes if route.destination_cidr_block != vpc.cidr_block] + new_routes = [ + route for route in main_route_table.routes if route.destination_cidr_block != vpc.cidr_block] new_routes.should.have.length_of(1) new_route = new_routes[0] @@ -351,10 +369,12 @@ def test_routes_additional(): conn.delete_route(main_route_table.id, ROUTE_CIDR) - main_route_table = conn.get_all_route_tables(filters={'vpc-id': vpc.id})[0] # Refresh route table + main_route_table = conn.get_all_route_tables( + filters={'vpc-id': vpc.id})[0] # Refresh route table main_route_table.routes.should.have.length_of(1) - new_routes = [route for route in main_route_table.routes if route.destination_cidr_block != vpc.cidr_block] + new_routes = [ + route for route in main_route_table.routes if route.destination_cidr_block != vpc.cidr_block] new_routes.should.have.length_of(0) with assert_raises(EC2ResponseError) as cm: @@ -368,7 +388,8 @@ def test_routes_additional(): def test_routes_replace(): conn = boto.connect_vpc('the_key', 'the_secret') vpc = conn.create_vpc("10.0.0.0/16") - main_route_table = conn.get_all_route_tables(filters={'association.main':'true','vpc-id':vpc.id})[0] + main_route_table = conn.get_all_route_tables( + filters={'association.main': 'true', 'vpc-id': vpc.id})[0] local_route = main_route_table.routes[0] ROUTE_CIDR = "10.0.0.4/24" @@ -384,11 +405,13 @@ def test_routes_replace(): # Replace... def get_target_route(): route_table = conn.get_all_route_tables(main_route_table.id)[0] - routes = [route for route in route_table.routes if route.destination_cidr_block != vpc.cidr_block] + routes = [ + route for route in route_table.routes if route.destination_cidr_block != vpc.cidr_block] routes.should.have.length_of(1) return routes[0] - conn.replace_route(main_route_table.id, ROUTE_CIDR, instance_id=instance.id) + conn.replace_route(main_route_table.id, ROUTE_CIDR, + instance_id=instance.id) target_route = get_target_route() target_route.gateway_id.should.be.none @@ -422,12 +445,14 @@ def test_routes_not_supported(): ROUTE_CIDR = "10.0.0.4/24" # Create - conn.create_route.when.called_with(main_route_table.id, ROUTE_CIDR, interface_id='eni-1234abcd').should.throw(NotImplementedError) + conn.create_route.when.called_with( + main_route_table.id, ROUTE_CIDR, interface_id='eni-1234abcd').should.throw(NotImplementedError) # Replace igw = conn.create_internet_gateway() conn.create_route(main_route_table.id, ROUTE_CIDR, gateway_id=igw.id) - conn.replace_route.when.called_with(main_route_table.id, ROUTE_CIDR, interface_id='eni-1234abcd').should.throw(NotImplementedError) + conn.replace_route.when.called_with( + main_route_table.id, ROUTE_CIDR, interface_id='eni-1234abcd').should.throw(NotImplementedError) @requires_boto_gte("2.34.0") @@ -435,18 +460,21 @@ def test_routes_not_supported(): def test_routes_vpc_peering_connection(): conn = boto.connect_vpc('the_key', 'the_secret') vpc = conn.create_vpc("10.0.0.0/16") - main_route_table = conn.get_all_route_tables(filters={'association.main':'true','vpc-id':vpc.id})[0] + main_route_table = conn.get_all_route_tables( + filters={'association.main': 'true', 'vpc-id': vpc.id})[0] local_route = main_route_table.routes[0] ROUTE_CIDR = "10.0.0.4/24" peer_vpc = conn.create_vpc("11.0.0.0/16") vpc_pcx = conn.create_vpc_peering_connection(vpc.id, peer_vpc.id) - conn.create_route(main_route_table.id, ROUTE_CIDR, vpc_peering_connection_id=vpc_pcx.id) + conn.create_route(main_route_table.id, ROUTE_CIDR, + vpc_peering_connection_id=vpc_pcx.id) # Refresh route table main_route_table = conn.get_all_route_tables(main_route_table.id)[0] - new_routes = [route for route in main_route_table.routes if route.destination_cidr_block != vpc.cidr_block] + new_routes = [ + route for route in main_route_table.routes if route.destination_cidr_block != vpc.cidr_block] new_routes.should.have.length_of(1) new_route = new_routes[0] @@ -463,7 +491,8 @@ def test_routes_vpn_gateway(): conn = boto.connect_vpc('the_key', 'the_secret') vpc = conn.create_vpc("10.0.0.0/16") - main_route_table = conn.get_all_route_tables(filters={'association.main':'true','vpc-id':vpc.id})[0] + main_route_table = conn.get_all_route_tables( + filters={'association.main': 'true', 'vpc-id': vpc.id})[0] ROUTE_CIDR = "10.0.0.4/24" vpn_gw = conn.create_vpn_gateway(type="ipsec.1") @@ -471,7 +500,8 @@ def test_routes_vpn_gateway(): conn.create_route(main_route_table.id, ROUTE_CIDR, gateway_id=vpn_gw.id) main_route_table = conn.get_all_route_tables(main_route_table.id)[0] - new_routes = [route for route in main_route_table.routes if route.destination_cidr_block != vpc.cidr_block] + new_routes = [ + route for route in main_route_table.routes if route.destination_cidr_block != vpc.cidr_block] new_routes.should.have.length_of(1) new_route = new_routes[0] diff --git a/tests/test_ec2/test_security_groups.py b/tests/test_ec2/test_security_groups.py index 3056331be..21ecad11e 100644 --- a/tests/test_ec2/test_security_groups.py +++ b/tests/test_ec2/test_security_groups.py @@ -20,25 +20,30 @@ def test_create_and_describe_security_group(): conn = boto.connect_ec2('the_key', 'the_secret') with assert_raises(EC2ResponseError) as ex: - security_group = conn.create_security_group('test security group', 'this is a test security group', dry_run=True) + security_group = conn.create_security_group( + 'test security group', 'this is a test security group', dry_run=True) ex.exception.error_code.should.equal('DryRunOperation') ex.exception.status.should.equal(400) - ex.exception.message.should.equal('An error occurred (DryRunOperation) when calling the CreateSecurityGroup operation: Request would have succeeded, but DryRun flag is set') + ex.exception.message.should.equal( + 'An error occurred (DryRunOperation) when calling the CreateSecurityGroup operation: Request would have succeeded, but DryRun flag is set') - security_group = conn.create_security_group('test security group', 'this is a test security group') + security_group = conn.create_security_group( + 'test security group', 'this is a test security group') security_group.name.should.equal('test security group') security_group.description.should.equal('this is a test security group') # Trying to create another group with the same name should throw an error with assert_raises(EC2ResponseError) as cm: - conn.create_security_group('test security group', 'this is a test security group') + conn.create_security_group( + 'test security group', 'this is a test security group') cm.exception.code.should.equal('InvalidGroup.Duplicate') cm.exception.status.should.equal(400) cm.exception.request_id.should_not.be.none all_groups = conn.get_all_security_groups() - all_groups.should.have.length_of(3) # The default group gets created automatically + # The default group gets created automatically + all_groups.should.have.length_of(3) group_names = [group.name for group in all_groups] set(group_names).should.equal(set(["default", "test security group"])) @@ -66,16 +71,19 @@ def test_default_security_group(): def test_create_and_describe_vpc_security_group(): conn = boto.connect_ec2('the_key', 'the_secret') vpc_id = 'vpc-5300000c' - security_group = conn.create_security_group('test security group', 'this is a test security group', vpc_id=vpc_id) + security_group = conn.create_security_group( + 'test security group', 'this is a test security group', vpc_id=vpc_id) security_group.vpc_id.should.equal(vpc_id) security_group.name.should.equal('test security group') security_group.description.should.equal('this is a test security group') - # Trying to create another group with the same name in the same VPC should throw an error + # Trying to create another group with the same name in the same VPC should + # throw an error with assert_raises(EC2ResponseError) as cm: - conn.create_security_group('test security group', 'this is a test security group', vpc_id) + conn.create_security_group( + 'test security group', 'this is a test security group', vpc_id) cm.exception.code.should.equal('InvalidGroup.Duplicate') cm.exception.status.should.equal(400) cm.exception.request_id.should_not.be.none @@ -94,8 +102,10 @@ def test_create_two_security_groups_with_same_name_in_different_vpc(): vpc_id = 'vpc-5300000c' vpc_id2 = 'vpc-5300000d' - conn.create_security_group('test security group', 'this is a test security group', vpc_id) - conn.create_security_group('test security group', 'this is a test security group', vpc_id2) + conn.create_security_group( + 'test security group', 'this is a test security group', vpc_id) + conn.create_security_group( + 'test security group', 'this is a test security group', vpc_id2) all_groups = conn.get_all_security_groups() @@ -125,7 +135,8 @@ def test_deleting_security_groups(): conn.delete_security_group('test2', dry_run=True) ex.exception.error_code.should.equal('DryRunOperation') ex.exception.status.should.equal(400) - ex.exception.message.should.equal('An error occurred (DryRunOperation) when calling the DeleteSecurityGroup operation: Request would have succeeded, but DryRun flag is set') + ex.exception.message.should.equal( + 'An error occurred (DryRunOperation) when calling the DeleteSecurityGroup operation: Request would have succeeded, but DryRun flag is set') conn.delete_security_group('test2') conn.get_all_security_groups().should.have.length_of(3) @@ -151,65 +162,83 @@ def test_authorize_ip_range_and_revoke(): security_group = conn.create_security_group('test', 'test') with assert_raises(EC2ResponseError) as ex: - success = security_group.authorize(ip_protocol="tcp", from_port="22", to_port="2222", cidr_ip="123.123.123.123/32", dry_run=True) + success = security_group.authorize( + ip_protocol="tcp", from_port="22", to_port="2222", cidr_ip="123.123.123.123/32", dry_run=True) ex.exception.error_code.should.equal('DryRunOperation') ex.exception.status.should.equal(400) - ex.exception.message.should.equal('An error occurred (DryRunOperation) when calling the GrantSecurityGroupIngress operation: Request would have succeeded, but DryRun flag is set') + ex.exception.message.should.equal( + 'An error occurred (DryRunOperation) when calling the GrantSecurityGroupIngress operation: Request would have succeeded, but DryRun flag is set') - success = security_group.authorize(ip_protocol="tcp", from_port="22", to_port="2222", cidr_ip="123.123.123.123/32") + success = security_group.authorize( + ip_protocol="tcp", from_port="22", to_port="2222", cidr_ip="123.123.123.123/32") assert success.should.be.true security_group = conn.get_all_security_groups(groupnames=['test'])[0] int(security_group.rules[0].to_port).should.equal(2222) - security_group.rules[0].grants[0].cidr_ip.should.equal("123.123.123.123/32") + security_group.rules[0].grants[ + 0].cidr_ip.should.equal("123.123.123.123/32") # Wrong Cidr should throw error with assert_raises(EC2ResponseError) as cm: - security_group.revoke(ip_protocol="tcp", from_port="22", to_port="2222", cidr_ip="123.123.123.122/32") + security_group.revoke(ip_protocol="tcp", from_port="22", + to_port="2222", cidr_ip="123.123.123.122/32") cm.exception.code.should.equal('InvalidPermission.NotFound') cm.exception.status.should.equal(400) cm.exception.request_id.should_not.be.none # Actually revoke with assert_raises(EC2ResponseError) as ex: - security_group.revoke(ip_protocol="tcp", from_port="22", to_port="2222", cidr_ip="123.123.123.123/32", dry_run=True) + security_group.revoke(ip_protocol="tcp", from_port="22", + to_port="2222", cidr_ip="123.123.123.123/32", dry_run=True) ex.exception.error_code.should.equal('DryRunOperation') ex.exception.status.should.equal(400) - ex.exception.message.should.equal('An error occurred (DryRunOperation) when calling the RevokeSecurityGroupIngress operation: Request would have succeeded, but DryRun flag is set') + ex.exception.message.should.equal( + 'An error occurred (DryRunOperation) when calling the RevokeSecurityGroupIngress operation: Request would have succeeded, but DryRun flag is set') - security_group.revoke(ip_protocol="tcp", from_port="22", to_port="2222", cidr_ip="123.123.123.123/32") + security_group.revoke(ip_protocol="tcp", from_port="22", + to_port="2222", cidr_ip="123.123.123.123/32") security_group = conn.get_all_security_groups()[0] security_group.rules.should.have.length_of(0) # Test for egress as well - egress_security_group = conn.create_security_group('testegress', 'testegress', vpc_id='vpc-3432589') + egress_security_group = conn.create_security_group( + 'testegress', 'testegress', vpc_id='vpc-3432589') with assert_raises(EC2ResponseError) as ex: - success = conn.authorize_security_group_egress(egress_security_group.id, "tcp", from_port="22", to_port="2222", cidr_ip="123.123.123.123/32", dry_run=True) + success = conn.authorize_security_group_egress( + egress_security_group.id, "tcp", from_port="22", to_port="2222", cidr_ip="123.123.123.123/32", dry_run=True) ex.exception.error_code.should.equal('DryRunOperation') ex.exception.status.should.equal(400) - ex.exception.message.should.equal('An error occurred (DryRunOperation) when calling the GrantSecurityGroupEgress operation: Request would have succeeded, but DryRun flag is set') + ex.exception.message.should.equal( + 'An error occurred (DryRunOperation) when calling the GrantSecurityGroupEgress operation: Request would have succeeded, but DryRun flag is set') - success = conn.authorize_security_group_egress(egress_security_group.id, "tcp", from_port="22", to_port="2222", cidr_ip="123.123.123.123/32") + success = conn.authorize_security_group_egress( + egress_security_group.id, "tcp", from_port="22", to_port="2222", cidr_ip="123.123.123.123/32") assert success.should.be.true - egress_security_group = conn.get_all_security_groups(groupnames='testegress')[0] + egress_security_group = conn.get_all_security_groups( + groupnames='testegress')[0] # There are two egress rules associated with the security group: # the default outbound rule and the new one int(egress_security_group.rules_egress[1].to_port).should.equal(2222) - egress_security_group.rules_egress[1].grants[0].cidr_ip.should.equal("123.123.123.123/32") + egress_security_group.rules_egress[1].grants[ + 0].cidr_ip.should.equal("123.123.123.123/32") # Wrong Cidr should throw error - egress_security_group.revoke.when.called_with(ip_protocol="tcp", from_port="22", to_port="2222", cidr_ip="123.123.123.122/32").should.throw(EC2ResponseError) + egress_security_group.revoke.when.called_with( + ip_protocol="tcp", from_port="22", to_port="2222", cidr_ip="123.123.123.122/32").should.throw(EC2ResponseError) # Actually revoke with assert_raises(EC2ResponseError) as ex: - conn.revoke_security_group_egress(egress_security_group.id, "tcp", from_port="22", to_port="2222", cidr_ip="123.123.123.123/32", dry_run=True) + conn.revoke_security_group_egress( + egress_security_group.id, "tcp", from_port="22", to_port="2222", cidr_ip="123.123.123.123/32", dry_run=True) ex.exception.error_code.should.equal('DryRunOperation') ex.exception.status.should.equal(400) - ex.exception.message.should.equal('An error occurred (DryRunOperation) when calling the RevokeSecurityGroupEgress operation: Request would have succeeded, but DryRun flag is set') + ex.exception.message.should.equal( + 'An error occurred (DryRunOperation) when calling the RevokeSecurityGroupEgress operation: Request would have succeeded, but DryRun flag is set') - conn.revoke_security_group_egress(egress_security_group.id, "tcp", from_port="22", to_port="2222", cidr_ip="123.123.123.123/32") + conn.revoke_security_group_egress( + egress_security_group.id, "tcp", from_port="22", to_port="2222", cidr_ip="123.123.123.123/32") egress_security_group = conn.get_all_security_groups()[0] # There is still the default outbound rule @@ -223,24 +252,30 @@ def test_authorize_other_group_and_revoke(): other_security_group = conn.create_security_group('other', 'other') wrong_group = conn.create_security_group('wrong', 'wrong') - success = security_group.authorize(ip_protocol="tcp", from_port="22", to_port="2222", src_group=other_security_group) + success = security_group.authorize( + ip_protocol="tcp", from_port="22", to_port="2222", src_group=other_security_group) assert success.should.be.true - security_group = [group for group in conn.get_all_security_groups() if group.name == 'test'][0] + security_group = [ + group for group in conn.get_all_security_groups() if group.name == 'test'][0] int(security_group.rules[0].to_port).should.equal(2222) - security_group.rules[0].grants[0].group_id.should.equal(other_security_group.id) + security_group.rules[0].grants[ + 0].group_id.should.equal(other_security_group.id) # Wrong source group should throw error with assert_raises(EC2ResponseError) as cm: - security_group.revoke(ip_protocol="tcp", from_port="22", to_port="2222", src_group=wrong_group) + security_group.revoke(ip_protocol="tcp", from_port="22", + to_port="2222", src_group=wrong_group) cm.exception.code.should.equal('InvalidPermission.NotFound') cm.exception.status.should.equal(400) cm.exception.request_id.should_not.be.none # Actually revoke - security_group.revoke(ip_protocol="tcp", from_port="22", to_port="2222", src_group=other_security_group) + security_group.revoke(ip_protocol="tcp", from_port="22", + to_port="2222", src_group=other_security_group) - security_group = [group for group in conn.get_all_security_groups() if group.name == 'test'][0] + security_group = [ + group for group in conn.get_all_security_groups() if group.name == 'test'][0] security_group.rules.should.have.length_of(0) @@ -250,8 +285,10 @@ def test_authorize_other_group_egress_and_revoke(): vpc = ec2.create_vpc(CidrBlock='10.0.0.0/16') - sg01 = ec2.create_security_group(GroupName='sg01', Description='Test security group sg01', VpcId=vpc.id) - sg02 = ec2.create_security_group(GroupName='sg02', Description='Test security group sg02', VpcId=vpc.id) + sg01 = ec2.create_security_group( + GroupName='sg01', Description='Test security group sg01', VpcId=vpc.id) + sg02 = ec2.create_security_group( + GroupName='sg02', Description='Test security group sg02', VpcId=vpc.id) ip_permission = { 'IpProtocol': 'tcp', @@ -278,27 +315,33 @@ def test_authorize_group_in_vpc(): security_group = conn.create_security_group('test1', 'test1', vpc_id) other_security_group = conn.create_security_group('test2', 'test2', vpc_id) - success = security_group.authorize(ip_protocol="tcp", from_port="22", to_port="2222", src_group=other_security_group) + success = security_group.authorize( + ip_protocol="tcp", from_port="22", to_port="2222", src_group=other_security_group) success.should.be.true # Check that the rule is accurate - security_group = [group for group in conn.get_all_security_groups() if group.name == 'test1'][0] + security_group = [ + group for group in conn.get_all_security_groups() if group.name == 'test1'][0] int(security_group.rules[0].to_port).should.equal(2222) - security_group.rules[0].grants[0].group_id.should.equal(other_security_group.id) + security_group.rules[0].grants[ + 0].group_id.should.equal(other_security_group.id) # Now remove the rule - success = security_group.revoke(ip_protocol="tcp", from_port="22", to_port="2222", src_group=other_security_group) + success = security_group.revoke( + ip_protocol="tcp", from_port="22", to_port="2222", src_group=other_security_group) success.should.be.true # And check that it gets revoked - security_group = [group for group in conn.get_all_security_groups() if group.name == 'test1'][0] + security_group = [ + group for group in conn.get_all_security_groups() if group.name == 'test1'][0] security_group.rules.should.have.length_of(0) @mock_ec2_deprecated def test_get_all_security_groups(): conn = boto.connect_ec2() - sg1 = conn.create_security_group(name='test1', description='test1', vpc_id='vpc-mjm05d27') + sg1 = conn.create_security_group( + name='test1', description='test1', vpc_id='vpc-mjm05d27') conn.create_security_group(name='test2', description='test2') resp = conn.get_all_security_groups(groupnames=['test1']) @@ -326,7 +369,8 @@ def test_authorize_bad_cidr_throws_invalid_parameter_value(): conn = boto.connect_ec2('the_key', 'the_secret') security_group = conn.create_security_group('test', 'test') with assert_raises(EC2ResponseError) as cm: - security_group.authorize(ip_protocol="tcp", from_port="22", to_port="2222", cidr_ip="123.123.123.123") + security_group.authorize( + ip_protocol="tcp", from_port="22", to_port="2222", cidr_ip="123.123.123.123") cm.exception.code.should.equal('InvalidParameterValue') cm.exception.status.should.equal(400) cm.exception.request_id.should_not.be.none @@ -343,7 +387,8 @@ def test_security_group_tagging(): sg.add_tag("Test", "Tag", dry_run=True) ex.exception.error_code.should.equal('DryRunOperation') ex.exception.status.should.equal(400) - ex.exception.message.should.equal('An error occurred (DryRunOperation) when calling the CreateTags operation: Request would have succeeded, but DryRun flag is set') + ex.exception.message.should.equal( + 'An error occurred (DryRunOperation) when calling the CreateTags operation: Request would have succeeded, but DryRun flag is set') sg.add_tag("Test", "Tag") @@ -362,7 +407,8 @@ def test_security_group_tag_filtering(): sg = conn.create_security_group("test-sg", "Test SG") sg.add_tag("test-tag", "test-value") - groups = conn.get_all_security_groups(filters={"tag:test-tag": "test-value"}) + groups = conn.get_all_security_groups( + filters={"tag:test-tag": "test-value"}) groups.should.have.length_of(1) @@ -507,18 +553,18 @@ def test_sec_group_rule_limit_vpc(): cm.exception.error_code.should.equal('RulesPerSecurityGroupLimitExceeded') - - ''' Boto3 ''' + @mock_ec2 def test_add_same_rule_twice_throws_error(): ec2 = boto3.resource('ec2', region_name='us-west-1') vpc = ec2.create_vpc(CidrBlock='10.0.0.0/16') - sg = ec2.create_security_group(GroupName='sg1', Description='Test security group sg1', VpcId=vpc.id) + sg = ec2.create_security_group( + GroupName='sg1', Description='Test security group sg1', VpcId=vpc.id) ip_permissions = [ { @@ -541,13 +587,18 @@ def test_security_group_tagging_boto3(): sg = conn.create_security_group(GroupName="test-sg", Description="Test SG") with assert_raises(ClientError) as ex: - conn.create_tags(Resources=[sg['GroupId']], Tags=[{'Key': 'Test', 'Value': 'Tag'}], DryRun=True) + conn.create_tags(Resources=[sg['GroupId']], Tags=[ + {'Key': 'Test', 'Value': 'Tag'}], DryRun=True) ex.exception.response['Error']['Code'].should.equal('DryRunOperation') - ex.exception.response['ResponseMetadata']['HTTPStatusCode'].should.equal(400) - ex.exception.response['Error']['Message'].should.equal('An error occurred (DryRunOperation) when calling the CreateTags operation: Request would have succeeded, but DryRun flag is set') + ex.exception.response['ResponseMetadata'][ + 'HTTPStatusCode'].should.equal(400) + ex.exception.response['Error']['Message'].should.equal( + 'An error occurred (DryRunOperation) when calling the CreateTags operation: Request would have succeeded, but DryRun flag is set') - conn.create_tags(Resources=[sg['GroupId']], Tags=[{'Key': 'Test', 'Value': 'Tag'}]) - describe = conn.describe_security_groups(Filters=[{'Name': 'tag-value', 'Values': ['Tag']}]) + conn.create_tags(Resources=[sg['GroupId']], Tags=[ + {'Key': 'Test', 'Value': 'Tag'}]) + describe = conn.describe_security_groups( + Filters=[{'Name': 'tag-value', 'Values': ['Tag']}]) tag = describe["SecurityGroups"][0]['Tags'][0] tag['Value'].should.equal("Tag") tag['Key'].should.equal("Test") @@ -559,9 +610,12 @@ def test_authorize_and_revoke_in_bulk(): vpc = ec2.create_vpc(CidrBlock='10.0.0.0/16') - sg01 = ec2.create_security_group(GroupName='sg01', Description='Test security group sg01', VpcId=vpc.id) - sg02 = ec2.create_security_group(GroupName='sg02', Description='Test security group sg02', VpcId=vpc.id) - sg03 = ec2.create_security_group(GroupName='sg03', Description='Test security group sg03') + sg01 = ec2.create_security_group( + GroupName='sg01', Description='Test security group sg01', VpcId=vpc.id) + sg02 = ec2.create_security_group( + GroupName='sg02', Description='Test security group sg02', VpcId=vpc.id) + sg03 = ec2.create_security_group( + GroupName='sg03', Description='Test security group sg03') ip_permissions = [ { @@ -611,15 +665,19 @@ def test_authorize_and_revoke_in_bulk(): for ip_permission in expected_ip_permissions: sg01.ip_permissions_egress.shouldnt.contain(ip_permission) + @mock_ec2_deprecated def test_get_all_security_groups_filter_with_same_vpc_id(): conn = boto.connect_ec2('the_key', 'the_secret') vpc_id = 'vpc-5300000c' - security_group = conn.create_security_group('test1', 'test1', vpc_id=vpc_id) - security_group2 = conn.create_security_group('test2', 'test2', vpc_id=vpc_id) + security_group = conn.create_security_group( + 'test1', 'test1', vpc_id=vpc_id) + security_group2 = conn.create_security_group( + 'test2', 'test2', vpc_id=vpc_id) security_group.vpc_id.should.equal(vpc_id) security_group2.vpc_id.should.equal(vpc_id) - security_groups = conn.get_all_security_groups(group_ids=[security_group.id], filters={'vpc-id': [vpc_id]}) + security_groups = conn.get_all_security_groups( + group_ids=[security_group.id], filters={'vpc-id': [vpc_id]}) security_groups.should.have.length_of(1) diff --git a/tests/test_ec2/test_server.py b/tests/test_ec2/test_server.py index e6e9998ba..00be62593 100644 --- a/tests/test_ec2/test_server.py +++ b/tests/test_ec2/test_server.py @@ -18,7 +18,8 @@ def test_ec2_server_get(): headers={"Host": "ec2.us-east-1.amazonaws.com"} ) - groups = re.search("(.*)", res.data.decode('utf-8')) + groups = re.search("(.*)", + res.data.decode('utf-8')) instance_id = groups.groups()[0] res = test_client.get('/?Action=DescribeInstances') diff --git a/tests/test_ec2/test_spot_fleet.py b/tests/test_ec2/test_spot_fleet.py index 5b51ae68a..8ac91c57b 100644 --- a/tests/test_ec2/test_spot_fleet.py +++ b/tests/test_ec2/test_spot_fleet.py @@ -5,9 +5,11 @@ import sure # noqa from moto import mock_ec2 + def get_subnet_id(conn): vpc = conn.create_vpc(CidrBlock="10.0.0.0/8")['Vpc'] - subnet = conn.create_subnet(VpcId=vpc['VpcId'], CidrBlock='10.0.0.0/16', AvailabilityZone='us-east-1a')['Subnet'] + subnet = conn.create_subnet( + VpcId=vpc['VpcId'], CidrBlock='10.0.0.0/16', AvailabilityZone='us-east-1a')['Subnet'] subnet_id = subnet['SubnetId'] return subnet_id @@ -19,60 +21,60 @@ def spot_config(subnet_id, allocation_strategy="lowestPrice"): 'TargetCapacity': 6, 'IamFleetRole': 'arn:aws:iam::123456789012:role/fleet', 'LaunchSpecifications': [{ - 'ImageId': 'ami-123', - 'KeyName': 'my-key', - 'SecurityGroups': [ - { + 'ImageId': 'ami-123', + 'KeyName': 'my-key', + 'SecurityGroups': [ + { 'GroupId': 'sg-123' - }, - ], - 'UserData': 'some user data', - 'InstanceType': 't2.small', - 'BlockDeviceMappings': [ - { + }, + ], + 'UserData': 'some user data', + 'InstanceType': 't2.small', + 'BlockDeviceMappings': [ + { 'VirtualName': 'string', 'DeviceName': 'string', 'Ebs': { 'SnapshotId': 'string', 'VolumeSize': 123, - 'DeleteOnTermination': True|False, + 'DeleteOnTermination': True | False, 'VolumeType': 'standard', 'Iops': 123, - 'Encrypted': True|False + 'Encrypted': True | False }, - 'NoDevice': 'string' - }, - ], - 'Monitoring': { - 'Enabled': True + 'NoDevice': 'string' }, - 'SubnetId': subnet_id, - 'IamInstanceProfile': { - 'Arn': 'arn:aws:iam::123456789012:role/fleet' - }, - 'EbsOptimized': False, - 'WeightedCapacity': 2.0, - 'SpotPrice': '0.13' + ], + 'Monitoring': { + 'Enabled': True + }, + 'SubnetId': subnet_id, + 'IamInstanceProfile': { + 'Arn': 'arn:aws:iam::123456789012:role/fleet' + }, + 'EbsOptimized': False, + 'WeightedCapacity': 2.0, + 'SpotPrice': '0.13' }, { - 'ImageId': 'ami-123', - 'KeyName': 'my-key', - 'SecurityGroups': [ - { - 'GroupId': 'sg-123' - }, - ], - 'UserData': 'some user data', - 'InstanceType': 't2.large', - 'Monitoring': { - 'Enabled': True + 'ImageId': 'ami-123', + 'KeyName': 'my-key', + 'SecurityGroups': [ + { + 'GroupId': 'sg-123' }, - 'SubnetId': subnet_id, - 'IamInstanceProfile': { - 'Arn': 'arn:aws:iam::123456789012:role/fleet' - }, - 'EbsOptimized': False, - 'WeightedCapacity': 4.0, - 'SpotPrice': '10.00', + ], + 'UserData': 'some user data', + 'InstanceType': 't2.large', + 'Monitoring': { + 'Enabled': True + }, + 'SubnetId': subnet_id, + 'IamInstanceProfile': { + 'Arn': 'arn:aws:iam::123456789012:role/fleet' + }, + 'EbsOptimized': False, + 'WeightedCapacity': 4.0, + 'SpotPrice': '10.00', }], 'AllocationStrategy': allocation_strategy, 'FulfilledCapacity': 6, @@ -89,7 +91,8 @@ def test_create_spot_fleet_with_lowest_price(): ) spot_fleet_id = spot_fleet_res['SpotFleetRequestId'] - spot_fleet_requests = conn.describe_spot_fleet_requests(SpotFleetRequestIds=[spot_fleet_id])['SpotFleetRequestConfigs'] + spot_fleet_requests = conn.describe_spot_fleet_requests( + SpotFleetRequestIds=[spot_fleet_id])['SpotFleetRequestConfigs'] len(spot_fleet_requests).should.equal(1) spot_fleet_request = spot_fleet_requests[0] spot_fleet_request['SpotFleetRequestState'].should.equal("active") @@ -97,7 +100,8 @@ def test_create_spot_fleet_with_lowest_price(): spot_fleet_config['SpotPrice'].should.equal('0.12') spot_fleet_config['TargetCapacity'].should.equal(6) - spot_fleet_config['IamFleetRole'].should.equal('arn:aws:iam::123456789012:role/fleet') + spot_fleet_config['IamFleetRole'].should.equal( + 'arn:aws:iam::123456789012:role/fleet') spot_fleet_config['AllocationStrategy'].should.equal('lowestPrice') spot_fleet_config['FulfilledCapacity'].should.equal(6.0) @@ -106,7 +110,8 @@ def test_create_spot_fleet_with_lowest_price(): launch_spec['EbsOptimized'].should.equal(False) launch_spec['SecurityGroups'].should.equal([{"GroupId": "sg-123"}]) - launch_spec['IamInstanceProfile'].should.equal({"Arn": "arn:aws:iam::123456789012:role/fleet"}) + launch_spec['IamInstanceProfile'].should.equal( + {"Arn": "arn:aws:iam::123456789012:role/fleet"}) launch_spec['ImageId'].should.equal("ami-123") launch_spec['InstanceType'].should.equal("t2.small") launch_spec['KeyName'].should.equal("my-key") @@ -116,7 +121,8 @@ def test_create_spot_fleet_with_lowest_price(): launch_spec['UserData'].should.equal("some user data") launch_spec['WeightedCapacity'].should.equal(2.0) - instance_res = conn.describe_spot_fleet_instances(SpotFleetRequestId=spot_fleet_id) + instance_res = conn.describe_spot_fleet_instances( + SpotFleetRequestId=spot_fleet_id) instances = instance_res['ActiveInstances'] len(instances).should.equal(3) @@ -125,14 +131,16 @@ def test_create_spot_fleet_with_lowest_price(): def test_create_diversified_spot_fleet(): conn = boto3.client("ec2", region_name='us-west-2') subnet_id = get_subnet_id(conn) - diversified_config = spot_config(subnet_id, allocation_strategy='diversified') + diversified_config = spot_config( + subnet_id, allocation_strategy='diversified') spot_fleet_res = conn.request_spot_fleet( SpotFleetRequestConfig=diversified_config ) spot_fleet_id = spot_fleet_res['SpotFleetRequestId'] - instance_res = conn.describe_spot_fleet_instances(SpotFleetRequestId=spot_fleet_id) + instance_res = conn.describe_spot_fleet_instances( + SpotFleetRequestId=spot_fleet_id) instances = instance_res['ActiveInstances'] len(instances).should.equal(2) instance_types = set([instance['InstanceType'] for instance in instances]) @@ -150,7 +158,9 @@ def test_cancel_spot_fleet_request(): ) spot_fleet_id = spot_fleet_res['SpotFleetRequestId'] - conn.cancel_spot_fleet_requests(SpotFleetRequestIds=[spot_fleet_id], TerminateInstances=True) + conn.cancel_spot_fleet_requests( + SpotFleetRequestIds=[spot_fleet_id], TerminateInstances=True) - spot_fleet_requests = conn.describe_spot_fleet_requests(SpotFleetRequestIds=[spot_fleet_id])['SpotFleetRequestConfigs'] + spot_fleet_requests = conn.describe_spot_fleet_requests( + SpotFleetRequestIds=[spot_fleet_id])['SpotFleetRequestConfigs'] len(spot_fleet_requests).should.equal(0) diff --git a/tests/test_ec2/test_spot_instances.py b/tests/test_ec2/test_spot_instances.py index 2d3cb3036..5c3bdff12 100644 --- a/tests/test_ec2/test_spot_instances.py +++ b/tests/test_ec2/test_spot_instances.py @@ -18,7 +18,8 @@ from moto.core.utils import iso_8601_datetime_with_milliseconds def test_request_spot_instances(): conn = boto3.client('ec2', 'us-east-1') vpc = conn.create_vpc(CidrBlock="10.0.0.0/8")['Vpc'] - subnet = conn.create_subnet(VpcId=vpc['VpcId'], CidrBlock='10.0.0.0/16', AvailabilityZone='us-east-1a')['Subnet'] + subnet = conn.create_subnet( + VpcId=vpc['VpcId'], CidrBlock='10.0.0.0/16', AvailabilityZone='us-east-1a')['Subnet'] subnet_id = subnet['SubnetId'] conn.create_security_group(GroupName='group1', Description='description') @@ -53,29 +54,31 @@ def test_request_spot_instances(): DryRun=True, ) ex.exception.response['Error']['Code'].should.equal('DryRunOperation') - ex.exception.response['ResponseMetadata']['HTTPStatusCode'].should.equal(400) - ex.exception.response['Error']['Message'].should.equal('An error occurred (DryRunOperation) when calling the RequestSpotInstance operation: Request would have succeeded, but DryRun flag is set') + ex.exception.response['ResponseMetadata'][ + 'HTTPStatusCode'].should.equal(400) + ex.exception.response['Error']['Message'].should.equal( + 'An error occurred (DryRunOperation) when calling the RequestSpotInstance operation: Request would have succeeded, but DryRun flag is set') request = conn.request_spot_instances( - SpotPrice="0.5", InstanceCount=1, Type='one-time', - ValidFrom=start, ValidUntil=end, LaunchGroup="the-group", - AvailabilityZoneGroup='my-group', - LaunchSpecification={ - "ImageId": 'ami-abcd1234', - "KeyName": "test", - "SecurityGroups": ['group1', 'group2'], - "UserData": b"some test data", - "InstanceType": 'm1.small', - "Placement": { - "AvailabilityZone": 'us-east-1c', - }, - "KernelId": "test-kernel", - "RamdiskId": "test-ramdisk", - "Monitoring": { - "Enabled": True, - }, - "SubnetId": subnet_id, + SpotPrice="0.5", InstanceCount=1, Type='one-time', + ValidFrom=start, ValidUntil=end, LaunchGroup="the-group", + AvailabilityZoneGroup='my-group', + LaunchSpecification={ + "ImageId": 'ami-abcd1234', + "KeyName": "test", + "SecurityGroups": ['group1', 'group2'], + "UserData": b"some test data", + "InstanceType": 'm1.small', + "Placement": { + "AvailabilityZone": 'us-east-1c', }, + "KernelId": "test-kernel", + "RamdiskId": "test-ramdisk", + "Monitoring": { + "Enabled": True, + }, + "SubnetId": subnet_id, + }, ) requests = conn.describe_spot_instance_requests()['SpotInstanceRequests'] @@ -91,7 +94,8 @@ def test_request_spot_instances(): request['AvailabilityZoneGroup'].should.equal('my-group') launch_spec = request['LaunchSpecification'] - security_group_names = [group['GroupName'] for group in launch_spec['SecurityGroups']] + security_group_names = [group['GroupName'] + for group in launch_spec['SecurityGroups']] set(security_group_names).should.equal(set(['group1', 'group2'])) launch_spec['ImageId'].should.equal('ami-abcd1234') @@ -112,7 +116,7 @@ def test_request_spot_instances_default_arguments(): request = conn.request_spot_instances( SpotPrice="0.5", LaunchSpecification={ - "ImageId": 'ami-abcd1234', + "ImageId": 'ami-abcd1234', } ) @@ -130,7 +134,8 @@ def test_request_spot_instances_default_arguments(): launch_spec = request['LaunchSpecification'] - security_group_names = [group['GroupName'] for group in launch_spec['SecurityGroups']] + security_group_names = [group['GroupName'] + for group in launch_spec['SecurityGroups']] security_group_names.should.equal(["default"]) launch_spec['ImageId'].should.equal('ami-abcd1234') @@ -152,12 +157,12 @@ def test_cancel_spot_instance_request(): requests = conn.get_all_spot_instance_requests() requests.should.have.length_of(1) - with assert_raises(EC2ResponseError) as ex: conn.cancel_spot_instance_requests([requests[0].id], dry_run=True) ex.exception.error_code.should.equal('DryRunOperation') ex.exception.status.should.equal(400) - ex.exception.message.should.equal('An error occurred (DryRunOperation) when calling the CancelSpotInstance operation: Request would have succeeded, but DryRun flag is set') + ex.exception.message.should.equal( + 'An error occurred (DryRunOperation) when calling the CancelSpotInstance operation: Request would have succeeded, but DryRun flag is set') conn.cancel_spot_instance_requests([requests[0].id]) @@ -239,10 +244,12 @@ def test_get_all_spot_instance_requests_filtering(): requests = conn.get_all_spot_instance_requests(filters={'state': 'open'}) requests.should.have.length_of(3) - requests = conn.get_all_spot_instance_requests(filters={'tag:tag1': 'value1'}) + requests = conn.get_all_spot_instance_requests( + filters={'tag:tag1': 'value1'}) requests.should.have.length_of(2) - requests = conn.get_all_spot_instance_requests(filters={'tag:tag1': 'value1', 'tag:tag2': 'value2'}) + requests = conn.get_all_spot_instance_requests( + filters={'tag:tag1': 'value1', 'tag:tag2': 'value2'}) requests.should.have.length_of(1) @@ -259,4 +266,3 @@ def test_request_spot_instances_setting_instance_id(): request = conn.get_all_spot_instance_requests()[0] assert request.state == 'active' assert request.instance_id == 'i-12345678' - diff --git a/tests/test_ec2/test_subnets.py b/tests/test_ec2/test_subnets.py index 0a9b41b8e..38565a28f 100644 --- a/tests/test_ec2/test_subnets.py +++ b/tests/test_ec2/test_subnets.py @@ -69,7 +69,8 @@ def test_subnet_tagging(): def test_subnet_should_have_proper_availability_zone_set(): conn = boto.vpc.connect_to_region('us-west-1') vpcA = conn.create_vpc("10.0.0.0/16") - subnetA = conn.create_subnet(vpcA.id, "10.0.0.0/24", availability_zone='us-west-1b') + subnetA = conn.create_subnet( + vpcA.id, "10.0.0.0/24", availability_zone='us-west-1b') subnetA.availability_zone.should.equal('us-west-1b') @@ -82,7 +83,8 @@ def test_default_subnet(): default_vpc.reload() default_vpc.is_default.should.be.ok - subnet = ec2.create_subnet(VpcId=default_vpc.id, CidrBlock='172.31.0.0/20', AvailabilityZone='us-west-1a') + subnet = ec2.create_subnet( + VpcId=default_vpc.id, CidrBlock='172.31.0.0/20', AvailabilityZone='us-west-1a') subnet.reload() subnet.map_public_ip_on_launch.shouldnt.be.ok @@ -109,7 +111,8 @@ def test_boto3_non_default_subnet(): vpc.reload() vpc.is_default.shouldnt.be.ok - subnet = ec2.create_subnet(VpcId=vpc.id, CidrBlock='10.0.0.0/24', AvailabilityZone='us-west-1a') + subnet = ec2.create_subnet( + VpcId=vpc.id, CidrBlock='10.0.0.0/24', AvailabilityZone='us-west-1a') subnet.reload() subnet.map_public_ip_on_launch.shouldnt.be.ok @@ -122,7 +125,8 @@ def test_modify_subnet_attribute(): # Get the default VPC vpc = list(ec2.vpcs.all())[0] - subnet = ec2.create_subnet(VpcId=vpc.id, CidrBlock='10.0.0.0/24', AvailabilityZone='us-west-1a') + subnet = ec2.create_subnet( + VpcId=vpc.id, CidrBlock='10.0.0.0/24', AvailabilityZone='us-west-1a') # 'map_public_ip_on_launch' is set when calling 'DescribeSubnets' action subnet.reload() @@ -130,11 +134,13 @@ def test_modify_subnet_attribute(): # For non default subnet, attribute value should be 'False' subnet.map_public_ip_on_launch.shouldnt.be.ok - client.modify_subnet_attribute(SubnetId=subnet.id, MapPublicIpOnLaunch={'Value': False}) + client.modify_subnet_attribute( + SubnetId=subnet.id, MapPublicIpOnLaunch={'Value': False}) subnet.reload() subnet.map_public_ip_on_launch.shouldnt.be.ok - client.modify_subnet_attribute(SubnetId=subnet.id, MapPublicIpOnLaunch={'Value': True}) + client.modify_subnet_attribute( + SubnetId=subnet.id, MapPublicIpOnLaunch={'Value': True}) subnet.reload() subnet.map_public_ip_on_launch.should.be.ok @@ -144,10 +150,12 @@ def test_modify_subnet_attribute_validation(): ec2 = boto3.resource('ec2', region_name='us-west-1') client = boto3.client('ec2', region_name='us-west-1') vpc = ec2.create_vpc(CidrBlock='10.0.0.0/16') - subnet = ec2.create_subnet(VpcId=vpc.id, CidrBlock='10.0.0.0/24', AvailabilityZone='us-west-1a') + subnet = ec2.create_subnet( + VpcId=vpc.id, CidrBlock='10.0.0.0/24', AvailabilityZone='us-west-1a') with assert_raises(ParamValidationError): - client.modify_subnet_attribute(SubnetId=subnet.id, MapPublicIpOnLaunch={'Value': 'invalid'}) + client.modify_subnet_attribute( + SubnetId=subnet.id, MapPublicIpOnLaunch={'Value': 'invalid'}) @mock_ec2_deprecated @@ -155,10 +163,13 @@ def test_get_subnets_filtering(): ec2 = boto.ec2.connect_to_region('us-west-1') conn = boto.vpc.connect_to_region('us-west-1') vpcA = conn.create_vpc("10.0.0.0/16") - subnetA = conn.create_subnet(vpcA.id, "10.0.0.0/24", availability_zone='us-west-1a') + subnetA = conn.create_subnet( + vpcA.id, "10.0.0.0/24", availability_zone='us-west-1a') vpcB = conn.create_vpc("10.0.0.0/16") - subnetB1 = conn.create_subnet(vpcB.id, "10.0.0.0/24", availability_zone='us-west-1a') - subnetB2 = conn.create_subnet(vpcB.id, "10.0.1.0/24", availability_zone='us-west-1b') + subnetB1 = conn.create_subnet( + vpcB.id, "10.0.0.0/24", availability_zone='us-west-1a') + subnetB2 = conn.create_subnet( + vpcB.id, "10.0.1.0/24", availability_zone='us-west-1b') all_subnets = conn.get_all_subnets() all_subnets.should.have.length_of(3 + len(ec2.get_all_zones())) @@ -166,25 +177,33 @@ def test_get_subnets_filtering(): # Filter by VPC ID subnets_by_vpc = conn.get_all_subnets(filters={'vpc-id': vpcB.id}) subnets_by_vpc.should.have.length_of(2) - set([subnet.id for subnet in subnets_by_vpc]).should.equal(set([subnetB1.id, subnetB2.id])) + set([subnet.id for subnet in subnets_by_vpc]).should.equal( + set([subnetB1.id, subnetB2.id])) # Filter by CIDR variations subnets_by_cidr1 = conn.get_all_subnets(filters={'cidr': "10.0.0.0/24"}) subnets_by_cidr1.should.have.length_of(2) - set([subnet.id for subnet in subnets_by_cidr1]).should.equal(set([subnetA.id, subnetB1.id])) + set([subnet.id for subnet in subnets_by_cidr1] + ).should.equal(set([subnetA.id, subnetB1.id])) - subnets_by_cidr2 = conn.get_all_subnets(filters={'cidr-block': "10.0.0.0/24"}) + subnets_by_cidr2 = conn.get_all_subnets( + filters={'cidr-block': "10.0.0.0/24"}) subnets_by_cidr2.should.have.length_of(2) - set([subnet.id for subnet in subnets_by_cidr2]).should.equal(set([subnetA.id, subnetB1.id])) + set([subnet.id for subnet in subnets_by_cidr2] + ).should.equal(set([subnetA.id, subnetB1.id])) - subnets_by_cidr3 = conn.get_all_subnets(filters={'cidrBlock': "10.0.0.0/24"}) + subnets_by_cidr3 = conn.get_all_subnets( + filters={'cidrBlock': "10.0.0.0/24"}) subnets_by_cidr3.should.have.length_of(2) - set([subnet.id for subnet in subnets_by_cidr3]).should.equal(set([subnetA.id, subnetB1.id])) + set([subnet.id for subnet in subnets_by_cidr3] + ).should.equal(set([subnetA.id, subnetB1.id])) # Filter by VPC ID and CIDR - subnets_by_vpc_and_cidr = conn.get_all_subnets(filters={'vpc-id': vpcB.id, 'cidr': "10.0.0.0/24"}) + subnets_by_vpc_and_cidr = conn.get_all_subnets( + filters={'vpc-id': vpcB.id, 'cidr': "10.0.0.0/24"}) subnets_by_vpc_and_cidr.should.have.length_of(1) - set([subnet.id for subnet in subnets_by_vpc_and_cidr]).should.equal(set([subnetB1.id])) + set([subnet.id for subnet in subnets_by_vpc_and_cidr] + ).should.equal(set([subnetB1.id])) # Filter by subnet ID subnets_by_id = conn.get_all_subnets(filters={'subnet-id': subnetA.id}) @@ -192,9 +211,11 @@ def test_get_subnets_filtering(): set([subnet.id for subnet in subnets_by_id]).should.equal(set([subnetA.id])) # Filter by availabilityZone - subnets_by_az = conn.get_all_subnets(filters={'availabilityZone': 'us-west-1a', 'vpc-id': vpcB.id}) + subnets_by_az = conn.get_all_subnets( + filters={'availabilityZone': 'us-west-1a', 'vpc-id': vpcB.id}) subnets_by_az.should.have.length_of(1) - set([subnet.id for subnet in subnets_by_az]).should.equal(set([subnetB1.id])) + set([subnet.id for subnet in subnets_by_az] + ).should.equal(set([subnetB1.id])) # Filter by defaultForAz @@ -202,7 +223,8 @@ def test_get_subnets_filtering(): subnets_by_az.should.have.length_of(len(conn.get_all_zones())) # Unsupported filter - conn.get_all_subnets.when.called_with(filters={'not-implemented-filter': 'foobar'}).should.throw(NotImplementedError) + conn.get_all_subnets.when.called_with( + filters={'not-implemented-filter': 'foobar'}).should.throw(NotImplementedError) @mock_ec2_deprecated diff --git a/tests/test_ec2/test_tags.py b/tests/test_ec2/test_tags.py index 23b7d0bd4..bb3a8d36b 100644 --- a/tests/test_ec2/test_tags.py +++ b/tests/test_ec2/test_tags.py @@ -22,11 +22,13 @@ def test_add_tag(): instance.add_tag("a key", "some value", dry_run=True) ex.exception.error_code.should.equal('DryRunOperation') ex.exception.status.should.equal(400) - ex.exception.message.should.equal('An error occurred (DryRunOperation) when calling the CreateTags operation: Request would have succeeded, but DryRun flag is set') + ex.exception.message.should.equal( + 'An error occurred (DryRunOperation) when calling the CreateTags operation: Request would have succeeded, but DryRun flag is set') instance.add_tag("a key", "some value") chain = itertools.chain.from_iterable - existing_instances = list(chain([res.instances for res in conn.get_all_instances()])) + existing_instances = list( + chain([res.instances for res in conn.get_all_instances()])) existing_instances.should.have.length_of(1) existing_instance = existing_instances[0] existing_instance.tags["a key"].should.equal("some value") @@ -49,7 +51,8 @@ def test_remove_tag(): instance.remove_tag("a key", dry_run=True) ex.exception.error_code.should.equal('DryRunOperation') ex.exception.status.should.equal(400) - ex.exception.message.should.equal('An error occurred (DryRunOperation) when calling the DeleteTags operation: Request would have succeeded, but DryRun flag is set') + ex.exception.message.should.equal( + 'An error occurred (DryRunOperation) when calling the DeleteTags operation: Request would have succeeded, but DryRun flag is set') instance.remove_tag("a key") conn.get_all_tags().should.have.length_of(0) @@ -100,12 +103,15 @@ def test_create_tags(): conn.create_tags(instance.id, tag_dict, dry_run=True) ex.exception.error_code.should.equal('DryRunOperation') ex.exception.status.should.equal(400) - ex.exception.message.should.equal('An error occurred (DryRunOperation) when calling the CreateTags operation: Request would have succeeded, but DryRun flag is set') + ex.exception.message.should.equal( + 'An error occurred (DryRunOperation) when calling the CreateTags operation: Request would have succeeded, but DryRun flag is set') conn.create_tags(instance.id, tag_dict) tags = conn.get_all_tags() - set([key for key in tag_dict]).should.equal(set([tag.name for tag in tags])) - set([tag_dict[key] for key in tag_dict]).should.equal(set([tag.value for tag in tags])) + set([key for key in tag_dict]).should.equal( + set([tag.name for tag in tags])) + set([tag_dict[key] for key in tag_dict]).should.equal( + set([tag.value for tag in tags])) @mock_ec2_deprecated @@ -115,7 +121,7 @@ def test_tag_limit_exceeded(): instance = reservation.instances[0] tag_dict = {} for i in range(51): - tag_dict['{0:02d}'.format(i+1)] = '' + tag_dict['{0:02d}'.format(i + 1)] = '' with assert_raises(EC2ResponseError) as cm: conn.create_tags(instance.id, tag_dict) @@ -342,7 +348,8 @@ def test_retrieved_snapshots_must_contain_their_tags(): tag_key = 'Tag name' tag_value = 'Tag value' tags_to_be_set = {tag_key: tag_value} - conn = boto.connect_ec2(aws_access_key_id='the_key', aws_secret_access_key='the_secret') + conn = boto.connect_ec2(aws_access_key_id='the_key', + aws_secret_access_key='the_secret') volume = conn.create_volume(80, "eu-west-1a") snapshot = conn.create_snapshot(volume.id) conn.create_tags([snapshot.id], tags_to_be_set) @@ -361,7 +368,8 @@ def test_retrieved_snapshots_must_contain_their_tags(): @mock_ec2_deprecated def test_filter_instances_by_wildcard_tags(): - conn = boto.connect_ec2(aws_access_key_id='the_key', aws_secret_access_key='the_secret') + conn = boto.connect_ec2(aws_access_key_id='the_key', + aws_secret_access_key='the_secret') reservation = conn.run_instances('ami-1234abcd') instance_a = reservation.instances[0] instance_a.add_tag("Key1", "Value1") diff --git a/tests/test_ec2/test_virtual_private_gateways.py b/tests/test_ec2/test_virtual_private_gateways.py index 0a7e34ea5..d90e97b45 100644 --- a/tests/test_ec2/test_virtual_private_gateways.py +++ b/tests/test_ec2/test_virtual_private_gateways.py @@ -16,6 +16,7 @@ def test_virtual_private_gateways(): vpn_gateway.state.should.equal('available') vpn_gateway.availability_zone.should.equal('us-east-1a') + @mock_ec2_deprecated def test_describe_vpn_gateway(): conn = boto.connect_vpc('the_key', 'the_secret') diff --git a/tests/test_ec2/test_vpc_peering.py b/tests/test_ec2/test_vpc_peering.py index c6a2feffb..6722eed60 100644 --- a/tests/test_ec2/test_vpc_peering.py +++ b/tests/test_ec2/test_vpc_peering.py @@ -93,4 +93,3 @@ def test_vpc_peering_connections_delete(): cm.exception.code.should.equal('InvalidVpcPeeringConnectionId.NotFound') cm.exception.status.should.equal(400) cm.exception.request_id.should_not.be.none - diff --git a/tests/test_ec2/test_vpcs.py b/tests/test_ec2/test_vpcs.py index c4dbf788e..904603f6d 100644 --- a/tests/test_ec2/test_vpcs.py +++ b/tests/test_ec2/test_vpcs.py @@ -42,13 +42,16 @@ def test_vpc_defaults(): conn.get_all_vpcs().should.have.length_of(2) conn.get_all_route_tables().should.have.length_of(2) - conn.get_all_security_groups(filters={'vpc-id': [vpc.id]}).should.have.length_of(1) + conn.get_all_security_groups( + filters={'vpc-id': [vpc.id]}).should.have.length_of(1) vpc.delete() conn.get_all_vpcs().should.have.length_of(1) conn.get_all_route_tables().should.have.length_of(1) - conn.get_all_security_groups(filters={'vpc-id': [vpc.id]}).should.have.length_of(0) + conn.get_all_security_groups( + filters={'vpc-id': [vpc.id]}).should.have.length_of(0) + @mock_ec2_deprecated def test_vpc_isdefault_filter(): @@ -80,6 +83,7 @@ def test_vpc_state_available_filter(): vpc.delete() conn.get_all_vpcs(filters={'state': 'available'}).should.have.length_of(2) + @mock_ec2_deprecated def test_vpc_tagging(): conn = boto.connect_vpc() @@ -127,7 +131,8 @@ def test_vpc_get_by_cidr_block(): @mock_ec2_deprecated def test_vpc_get_by_dhcp_options_id(): conn = boto.connect_vpc() - dhcp_options = conn.create_dhcp_options(SAMPLE_DOMAIN_NAME, SAMPLE_NAME_SERVERS) + dhcp_options = conn.create_dhcp_options( + SAMPLE_DOMAIN_NAME, SAMPLE_NAME_SERVERS) vpc1 = conn.create_vpc("10.0.0.0/16") vpc2 = conn.create_vpc("10.0.0.0/16") conn.create_vpc("10.0.0.0/24") @@ -284,6 +289,7 @@ def test_non_default_vpc(): attr = response.get('EnableDnsHostnames') attr.get('Value').shouldnt.be.ok + @mock_ec2 def test_vpc_dedicated_tenancy(): ec2 = boto3.resource('ec2', region_name='us-west-1') @@ -298,6 +304,7 @@ def test_vpc_dedicated_tenancy(): vpc.instance_tenancy.should.equal('dedicated') + @mock_ec2 def test_vpc_modify_enable_dns_support(): ec2 = boto3.resource('ec2', region_name='us-west-1') @@ -339,10 +346,12 @@ def test_vpc_modify_enable_dns_hostnames(): attr = response.get('EnableDnsHostnames') attr.get('Value').should.be.ok + @mock_ec2_deprecated def test_vpc_associate_dhcp_options(): conn = boto.connect_vpc() - dhcp_options = conn.create_dhcp_options(SAMPLE_DOMAIN_NAME, SAMPLE_NAME_SERVERS) + dhcp_options = conn.create_dhcp_options( + SAMPLE_DOMAIN_NAME, SAMPLE_NAME_SERVERS) vpc = conn.create_vpc("10.0.0.0/16") conn.associate_dhcp_options(dhcp_options.id, vpc.id) diff --git a/tests/test_ec2/test_vpn_connections.py b/tests/test_ec2/test_vpn_connections.py index 864c1c3ee..e95aa76ee 100644 --- a/tests/test_ec2/test_vpn_connections.py +++ b/tests/test_ec2/test_vpn_connections.py @@ -10,27 +10,32 @@ from moto import mock_ec2_deprecated @mock_ec2_deprecated def test_create_vpn_connections(): conn = boto.connect_vpc('the_key', 'the_secret') - vpn_connection = conn.create_vpn_connection('ipsec.1', 'vgw-0123abcd', 'cgw-0123abcd') + vpn_connection = conn.create_vpn_connection( + 'ipsec.1', 'vgw-0123abcd', 'cgw-0123abcd') vpn_connection.should_not.be.none vpn_connection.id.should.match(r'vpn-\w+') vpn_connection.type.should.equal('ipsec.1') + @mock_ec2_deprecated def test_delete_vpn_connections(): conn = boto.connect_vpc('the_key', 'the_secret') - vpn_connection = conn.create_vpn_connection('ipsec.1', 'vgw-0123abcd', 'cgw-0123abcd') + vpn_connection = conn.create_vpn_connection( + 'ipsec.1', 'vgw-0123abcd', 'cgw-0123abcd') list_of_vpn_connections = conn.get_all_vpn_connections() list_of_vpn_connections.should.have.length_of(1) conn.delete_vpn_connection(vpn_connection.id) list_of_vpn_connections = conn.get_all_vpn_connections() list_of_vpn_connections.should.have.length_of(0) + @mock_ec2_deprecated def test_delete_vpn_connections_bad_id(): conn = boto.connect_vpc('the_key', 'the_secret') with assert_raises(EC2ResponseError): conn.delete_vpn_connection('vpn-0123abcd') + @mock_ec2_deprecated def test_describe_vpn_connections(): conn = boto.connect_vpc('the_key', 'the_secret') diff --git a/tests/test_ecs/test_ecs_boto3.py b/tests/test_ecs/test_ecs_boto3.py index f073628a9..044d827c9 100644 --- a/tests/test_ecs/test_ecs_boto3.py +++ b/tests/test_ecs/test_ecs_boto3.py @@ -20,7 +20,8 @@ def test_create_cluster(): clusterName='test_ecs_cluster' ) response['cluster']['clusterName'].should.equal('test_ecs_cluster') - response['cluster']['clusterArn'].should.equal('arn:aws:ecs:us-east-1:012345678910:cluster/test_ecs_cluster') + response['cluster']['clusterArn'].should.equal( + 'arn:aws:ecs:us-east-1:012345678910:cluster/test_ecs_cluster') response['cluster']['status'].should.equal('ACTIVE') response['cluster']['registeredContainerInstancesCount'].should.equal(0) response['cluster']['runningTasksCount'].should.equal(0) @@ -38,8 +39,10 @@ def test_list_clusters(): clusterName='test_cluster1' ) response = client.list_clusters() - response['clusterArns'].should.contain('arn:aws:ecs:us-east-1:012345678910:cluster/test_cluster0') - response['clusterArns'].should.contain('arn:aws:ecs:us-east-1:012345678910:cluster/test_cluster1') + response['clusterArns'].should.contain( + 'arn:aws:ecs:us-east-1:012345678910:cluster/test_cluster0') + response['clusterArns'].should.contain( + 'arn:aws:ecs:us-east-1:012345678910:cluster/test_cluster1') @mock_ecs @@ -50,7 +53,8 @@ def test_delete_cluster(): ) response = client.delete_cluster(cluster='test_ecs_cluster') response['cluster']['clusterName'].should.equal('test_ecs_cluster') - response['cluster']['clusterArn'].should.equal('arn:aws:ecs:us-east-1:012345678910:cluster/test_ecs_cluster') + response['cluster']['clusterArn'].should.equal( + 'arn:aws:ecs:us-east-1:012345678910:cluster/test_ecs_cluster') response['cluster']['status'].should.equal('ACTIVE') response['cluster']['registeredContainerInstancesCount'].should.equal(0) response['cluster']['runningTasksCount'].should.equal(0) @@ -82,15 +86,24 @@ def test_register_task_definition(): ] ) type(response['taskDefinition']).should.be(dict) - response['taskDefinition']['taskDefinitionArn'].should.equal('arn:aws:ecs:us-east-1:012345678910:task-definition/test_ecs_task:1') - response['taskDefinition']['containerDefinitions'][0]['name'].should.equal('hello_world') - response['taskDefinition']['containerDefinitions'][0]['image'].should.equal('docker/hello-world:latest') - response['taskDefinition']['containerDefinitions'][0]['cpu'].should.equal(1024) - response['taskDefinition']['containerDefinitions'][0]['memory'].should.equal(400) - response['taskDefinition']['containerDefinitions'][0]['essential'].should.equal(True) - response['taskDefinition']['containerDefinitions'][0]['environment'][0]['name'].should.equal('AWS_ACCESS_KEY_ID') - response['taskDefinition']['containerDefinitions'][0]['environment'][0]['value'].should.equal('SOME_ACCESS_KEY') - response['taskDefinition']['containerDefinitions'][0]['logConfiguration']['logDriver'].should.equal('json-file') + response['taskDefinition']['taskDefinitionArn'].should.equal( + 'arn:aws:ecs:us-east-1:012345678910:task-definition/test_ecs_task:1') + response['taskDefinition']['containerDefinitions'][ + 0]['name'].should.equal('hello_world') + response['taskDefinition']['containerDefinitions'][0][ + 'image'].should.equal('docker/hello-world:latest') + response['taskDefinition']['containerDefinitions'][ + 0]['cpu'].should.equal(1024) + response['taskDefinition']['containerDefinitions'][ + 0]['memory'].should.equal(400) + response['taskDefinition']['containerDefinitions'][ + 0]['essential'].should.equal(True) + response['taskDefinition']['containerDefinitions'][0][ + 'environment'][0]['name'].should.equal('AWS_ACCESS_KEY_ID') + response['taskDefinition']['containerDefinitions'][0][ + 'environment'][0]['value'].should.equal('SOME_ACCESS_KEY') + response['taskDefinition']['containerDefinitions'][0][ + 'logConfiguration']['logDriver'].should.equal('json-file') @mock_ecs @@ -132,8 +145,10 @@ def test_list_task_definitions(): ) response = client.list_task_definitions() len(response['taskDefinitionArns']).should.equal(2) - response['taskDefinitionArns'][0].should.equal('arn:aws:ecs:us-east-1:012345678910:task-definition/test_ecs_task:1') - response['taskDefinitionArns'][1].should.equal('arn:aws:ecs:us-east-1:012345678910:task-definition/test_ecs_task:2') + response['taskDefinitionArns'][0].should.equal( + 'arn:aws:ecs:us-east-1:012345678910:task-definition/test_ecs_task:1') + response['taskDefinitionArns'][1].should.equal( + 'arn:aws:ecs:us-east-1:012345678910:task-definition/test_ecs_task:2') @mock_ecs @@ -191,10 +206,13 @@ def test_describe_task_definition(): ] ) response = client.describe_task_definition(taskDefinition='test_ecs_task') - response['taskDefinition']['taskDefinitionArn'].should.equal('arn:aws:ecs:us-east-1:012345678910:task-definition/test_ecs_task:3') + response['taskDefinition']['taskDefinitionArn'].should.equal( + 'arn:aws:ecs:us-east-1:012345678910:task-definition/test_ecs_task:3') - response = client.describe_task_definition(taskDefinition='test_ecs_task:2') - response['taskDefinition']['taskDefinitionArn'].should.equal('arn:aws:ecs:us-east-1:012345678910:task-definition/test_ecs_task:2') + response = client.describe_task_definition( + taskDefinition='test_ecs_task:2') + response['taskDefinition']['taskDefinitionArn'].should.equal( + 'arn:aws:ecs:us-east-1:012345678910:task-definition/test_ecs_task:2') @mock_ecs @@ -221,15 +239,24 @@ def test_deregister_task_definition(): taskDefinition='test_ecs_task:1' ) type(response['taskDefinition']).should.be(dict) - response['taskDefinition']['taskDefinitionArn'].should.equal('arn:aws:ecs:us-east-1:012345678910:task-definition/test_ecs_task:1') - response['taskDefinition']['containerDefinitions'][0]['name'].should.equal('hello_world') - response['taskDefinition']['containerDefinitions'][0]['image'].should.equal('docker/hello-world:latest') - response['taskDefinition']['containerDefinitions'][0]['cpu'].should.equal(1024) - response['taskDefinition']['containerDefinitions'][0]['memory'].should.equal(400) - response['taskDefinition']['containerDefinitions'][0]['essential'].should.equal(True) - response['taskDefinition']['containerDefinitions'][0]['environment'][0]['name'].should.equal('AWS_ACCESS_KEY_ID') - response['taskDefinition']['containerDefinitions'][0]['environment'][0]['value'].should.equal('SOME_ACCESS_KEY') - response['taskDefinition']['containerDefinitions'][0]['logConfiguration']['logDriver'].should.equal('json-file') + response['taskDefinition']['taskDefinitionArn'].should.equal( + 'arn:aws:ecs:us-east-1:012345678910:task-definition/test_ecs_task:1') + response['taskDefinition']['containerDefinitions'][ + 0]['name'].should.equal('hello_world') + response['taskDefinition']['containerDefinitions'][0][ + 'image'].should.equal('docker/hello-world:latest') + response['taskDefinition']['containerDefinitions'][ + 0]['cpu'].should.equal(1024) + response['taskDefinition']['containerDefinitions'][ + 0]['memory'].should.equal(400) + response['taskDefinition']['containerDefinitions'][ + 0]['essential'].should.equal(True) + response['taskDefinition']['containerDefinitions'][0][ + 'environment'][0]['name'].should.equal('AWS_ACCESS_KEY_ID') + response['taskDefinition']['containerDefinitions'][0][ + 'environment'][0]['value'].should.equal('SOME_ACCESS_KEY') + response['taskDefinition']['containerDefinitions'][0][ + 'logConfiguration']['logDriver'].should.equal('json-file') @mock_ecs @@ -261,16 +288,19 @@ def test_create_service(): taskDefinition='test_ecs_task', desiredCount=2 ) - response['service']['clusterArn'].should.equal('arn:aws:ecs:us-east-1:012345678910:cluster/test_ecs_cluster') + response['service']['clusterArn'].should.equal( + 'arn:aws:ecs:us-east-1:012345678910:cluster/test_ecs_cluster') response['service']['desiredCount'].should.equal(2) len(response['service']['events']).should.equal(0) len(response['service']['loadBalancers']).should.equal(0) response['service']['pendingCount'].should.equal(0) response['service']['runningCount'].should.equal(0) - response['service']['serviceArn'].should.equal('arn:aws:ecs:us-east-1:012345678910:service/test_ecs_service') + response['service']['serviceArn'].should.equal( + 'arn:aws:ecs:us-east-1:012345678910:service/test_ecs_service') response['service']['serviceName'].should.equal('test_ecs_service') response['service']['status'].should.equal('ACTIVE') - response['service']['taskDefinition'].should.equal('arn:aws:ecs:us-east-1:012345678910:task-definition/test_ecs_task:1') + response['service']['taskDefinition'].should.equal( + 'arn:aws:ecs:us-east-1:012345678910:task-definition/test_ecs_task:1') @mock_ecs @@ -312,8 +342,10 @@ def test_list_services(): cluster='test_ecs_cluster' ) len(response['serviceArns']).should.equal(2) - response['serviceArns'][0].should.equal('arn:aws:ecs:us-east-1:012345678910:service/test_ecs_service1') - response['serviceArns'][1].should.equal('arn:aws:ecs:us-east-1:012345678910:service/test_ecs_service2') + response['serviceArns'][0].should.equal( + 'arn:aws:ecs:us-east-1:012345678910:service/test_ecs_service1') + response['serviceArns'][1].should.equal( + 'arn:aws:ecs:us-east-1:012345678910:service/test_ecs_service2') @mock_ecs @@ -359,12 +391,15 @@ def test_describe_services(): ) response = client.describe_services( cluster='test_ecs_cluster', - services=['test_ecs_service1', 'arn:aws:ecs:us-east-1:012345678910:service/test_ecs_service2'] + services=['test_ecs_service1', + 'arn:aws:ecs:us-east-1:012345678910:service/test_ecs_service2'] ) len(response['services']).should.equal(2) - response['services'][0]['serviceArn'].should.equal('arn:aws:ecs:us-east-1:012345678910:service/test_ecs_service1') + response['services'][0]['serviceArn'].should.equal( + 'arn:aws:ecs:us-east-1:012345678910:service/test_ecs_service1') response['services'][0]['serviceName'].should.equal('test_ecs_service1') - response['services'][1]['serviceArn'].should.equal('arn:aws:ecs:us-east-1:012345678910:service/test_ecs_service2') + response['services'][1]['serviceArn'].should.equal( + 'arn:aws:ecs:us-east-1:012345678910:service/test_ecs_service2') response['services'][1]['serviceName'].should.equal('test_ecs_service2') @@ -446,16 +481,20 @@ def test_delete_service(): cluster='test_ecs_cluster', service='test_ecs_service' ) - response['service']['clusterArn'].should.equal('arn:aws:ecs:us-east-1:012345678910:cluster/test_ecs_cluster') + response['service']['clusterArn'].should.equal( + 'arn:aws:ecs:us-east-1:012345678910:cluster/test_ecs_cluster') response['service']['desiredCount'].should.equal(0) len(response['service']['events']).should.equal(0) len(response['service']['loadBalancers']).should.equal(0) response['service']['pendingCount'].should.equal(0) response['service']['runningCount'].should.equal(0) - response['service']['serviceArn'].should.equal('arn:aws:ecs:us-east-1:012345678910:service/test_ecs_service') + response['service']['serviceArn'].should.equal( + 'arn:aws:ecs:us-east-1:012345678910:service/test_ecs_service') response['service']['serviceName'].should.equal('test_ecs_service') response['service']['status'].should.equal('ACTIVE') - response['service']['taskDefinition'].should.equal('arn:aws:ecs:us-east-1:012345678910:task-definition/test_ecs_task:1') + response['service']['taskDefinition'].should.equal( + 'arn:aws:ecs:us-east-1:012345678910:task-definition/test_ecs_task:1') + @mock_ec2 @mock_ecs @@ -484,18 +523,23 @@ def test_register_container_instance(): instanceIdentityDocument=instance_id_document ) - response['containerInstance']['ec2InstanceId'].should.equal(test_instance.id) + response['containerInstance'][ + 'ec2InstanceId'].should.equal(test_instance.id) full_arn = response['containerInstance']['containerInstanceArn'] arn_part = full_arn.split('/') - arn_part[0].should.equal('arn:aws:ecs:us-east-1:012345678910:container-instance') + arn_part[0].should.equal( + 'arn:aws:ecs:us-east-1:012345678910:container-instance') arn_part[1].should.equal(str(UUID(arn_part[1]))) response['containerInstance']['status'].should.equal('ACTIVE') len(response['containerInstance']['registeredResources']).should.equal(0) len(response['containerInstance']['remainingResources']).should.equal(0) response['containerInstance']['agentConnected'].should.equal(True) - response['containerInstance']['versionInfo']['agentVersion'].should.equal('1.0.0') - response['containerInstance']['versionInfo']['agentHash'].should.equal('4023248') - response['containerInstance']['versionInfo']['dockerVersion'].should.equal('DockerVersion: 1.5.0') + response['containerInstance']['versionInfo'][ + 'agentVersion'].should.equal('1.0.0') + response['containerInstance']['versionInfo'][ + 'agentHash'].should.equal('4023248') + response['containerInstance']['versionInfo'][ + 'dockerVersion'].should.equal('DockerVersion: 1.5.0') @mock_ec2 @@ -526,7 +570,8 @@ def test_list_container_instances(): cluster=test_cluster_name, instanceIdentityDocument=instance_id_document) - test_instance_arns.append(response['containerInstance']['containerInstanceArn']) + test_instance_arns.append(response['containerInstance'][ + 'containerInstanceArn']) response = ecs_client.list_container_instances(cluster=test_cluster_name) @@ -563,13 +608,17 @@ def test_describe_container_instances(): cluster=test_cluster_name, instanceIdentityDocument=instance_id_document) - test_instance_arns.append(response['containerInstance']['containerInstanceArn']) + test_instance_arns.append(response['containerInstance'][ + 'containerInstanceArn']) - test_instance_ids = list(map((lambda x: x.split('/')[1]), test_instance_arns)) - response = ecs_client.describe_container_instances(cluster=test_cluster_name, containerInstances=test_instance_ids) + test_instance_ids = list( + map((lambda x: x.split('/')[1]), test_instance_arns)) + response = ecs_client.describe_container_instances( + cluster=test_cluster_name, containerInstances=test_instance_ids) len(response['failures']).should.equal(0) len(response['containerInstances']).should.equal(instance_to_create) - response_arns = [ci['containerInstanceArn'] for ci in response['containerInstances']] + response_arns = [ci['containerInstanceArn'] + for ci in response['containerInstances']] for arn in test_instance_arns: response_arns.should.contain(arn) @@ -626,10 +675,14 @@ def test_run_task(): startedBy='moto' ) len(response['tasks']).should.equal(2) - response['tasks'][0]['taskArn'].should.contain('arn:aws:ecs:us-east-1:012345678910:task/') - response['tasks'][0]['clusterArn'].should.equal('arn:aws:ecs:us-east-1:012345678910:cluster/test_ecs_cluster') - response['tasks'][0]['taskDefinitionArn'].should.equal('arn:aws:ecs:us-east-1:012345678910:task-definition/test_ecs_task:1') - response['tasks'][0]['containerInstanceArn'].should.contain('arn:aws:ecs:us-east-1:012345678910:container-instance/') + response['tasks'][0]['taskArn'].should.contain( + 'arn:aws:ecs:us-east-1:012345678910:task/') + response['tasks'][0]['clusterArn'].should.equal( + 'arn:aws:ecs:us-east-1:012345678910:cluster/test_ecs_cluster') + response['tasks'][0]['taskDefinitionArn'].should.equal( + 'arn:aws:ecs:us-east-1:012345678910:task-definition/test_ecs_task:1') + response['tasks'][0]['containerInstanceArn'].should.contain( + 'arn:aws:ecs:us-east-1:012345678910:container-instance/') response['tasks'][0]['overrides'].should.equal({}) response['tasks'][0]['lastStatus'].should.equal("RUNNING") response['tasks'][0]['desiredStatus'].should.equal("RUNNING") @@ -664,8 +717,10 @@ def test_start_task(): instanceIdentityDocument=instance_id_document ) - container_instances = client.list_container_instances(cluster=test_cluster_name) - container_instance_id = container_instances['containerInstanceArns'][0].split('/')[-1] + container_instances = client.list_container_instances( + cluster=test_cluster_name) + container_instance_id = container_instances[ + 'containerInstanceArns'][0].split('/')[-1] _ = client.register_task_definition( family='test_ecs_task', @@ -694,10 +749,14 @@ def test_start_task(): ) len(response['tasks']).should.equal(1) - response['tasks'][0]['taskArn'].should.contain('arn:aws:ecs:us-east-1:012345678910:task/') - response['tasks'][0]['clusterArn'].should.equal('arn:aws:ecs:us-east-1:012345678910:cluster/test_ecs_cluster') - response['tasks'][0]['taskDefinitionArn'].should.equal('arn:aws:ecs:us-east-1:012345678910:task-definition/test_ecs_task:1') - response['tasks'][0]['containerInstanceArn'].should.equal('arn:aws:ecs:us-east-1:012345678910:container-instance/{0}'.format(container_instance_id)) + response['tasks'][0]['taskArn'].should.contain( + 'arn:aws:ecs:us-east-1:012345678910:task/') + response['tasks'][0]['clusterArn'].should.equal( + 'arn:aws:ecs:us-east-1:012345678910:cluster/test_ecs_cluster') + response['tasks'][0]['taskDefinitionArn'].should.equal( + 'arn:aws:ecs:us-east-1:012345678910:task-definition/test_ecs_task:1') + response['tasks'][0]['containerInstanceArn'].should.equal( + 'arn:aws:ecs:us-east-1:012345678910:container-instance/{0}'.format(container_instance_id)) response['tasks'][0]['overrides'].should.equal({}) response['tasks'][0]['lastStatus'].should.equal("RUNNING") response['tasks'][0]['desiredStatus'].should.equal("RUNNING") @@ -732,8 +791,10 @@ def test_list_tasks(): instanceIdentityDocument=instance_id_document ) - container_instances = client.list_container_instances(cluster=test_cluster_name) - container_instance_id = container_instances['containerInstanceArns'][0].split('/')[-1] + container_instances = client.list_container_instances( + cluster=test_cluster_name) + container_instance_id = container_instances[ + 'containerInstanceArns'][0].split('/')[-1] _ = client.register_task_definition( family='test_ecs_task', @@ -770,7 +831,8 @@ def test_list_tasks(): ) assert len(client.list_tasks()['taskArns']).should.equal(2) - assert len(client.list_tasks(cluster='test_ecs_cluster')['taskArns']).should.equal(2) + assert len(client.list_tasks(cluster='test_ecs_cluster') + ['taskArns']).should.equal(2) assert len(client.list_tasks(startedBy='foo')['taskArns']).should.equal(1) @@ -819,7 +881,7 @@ def test_describe_tasks(): ] ) tasks_arns = [ - task['taskArn'] for task in client.run_task( + task['taskArn'] for task in client.run_task( cluster='test_ecs_cluster', overrides={}, taskDefinition='test_ecs_task', @@ -833,7 +895,8 @@ def test_describe_tasks(): ) len(response['tasks']).should.equal(2) - set([response['tasks'][0]['taskArn'], response['tasks'][1]['taskArn']]).should.equal(set(tasks_arns)) + set([response['tasks'][0]['taskArn'], response['tasks'] + [1]['taskArn']]).should.equal(set(tasks_arns)) @mock_ecs @@ -858,9 +921,11 @@ def describe_task_definition(): family = task_definition['family'] task = client.describe_task_definition(taskDefinition=family) task['containerDefinitions'][0].should.equal(container_definition) - task['taskDefinitionArn'].should.equal('arn:aws:ecs:us-east-1:012345678910:task-definition/test_ecs_task2:1') + task['taskDefinitionArn'].should.equal( + 'arn:aws:ecs:us-east-1:012345678910:task-definition/test_ecs_task2:1') task['volumes'].should.equal([]) + @mock_ec2 @mock_ecs def test_stop_task(): @@ -918,7 +983,8 @@ def test_stop_task(): reason='moto testing' ) - stop_response['task']['taskArn'].should.equal(run_response['tasks'][0].get('taskArn')) + stop_response['task']['taskArn'].should.equal( + run_response['tasks'][0].get('taskArn')) stop_response['task']['lastStatus'].should.equal('STOPPED') stop_response['task']['desiredStatus'].should.equal('STOPPED') stop_response['task']['stoppedReason'].should.equal('moto testing') @@ -967,7 +1033,8 @@ def test_update_cluster_name_through_cloudformation_should_trigger_a_replacement } } template2 = deepcopy(template1) - template2['Resources']['testCluster']['Properties']['ClusterName'] = 'testcluster2' + template2['Resources']['testCluster'][ + 'Properties']['ClusterName'] = 'testcluster2' template1_json = json.dumps(template1) cfn_conn = boto3.client('cloudformation', region_name='us-west-1') stack_resp = cfn_conn.create_stack( @@ -994,18 +1061,18 @@ def test_create_task_definition_through_cloudformation(): "Description": "ECS Cluster Test CloudFormation", "Resources": { "testTaskDefinition": { - "Type" : "AWS::ECS::TaskDefinition", - "Properties" : { - "ContainerDefinitions" : [ + "Type": "AWS::ECS::TaskDefinition", + "Properties": { + "ContainerDefinitions": [ { "Name": "ecs-sample", - "Image":"amazon/amazon-ecs-sample", + "Image": "amazon/amazon-ecs-sample", "Cpu": "200", "Memory": "500", "Essential": "true" } ], - "Volumes" : [], + "Volumes": [], } } } @@ -1030,19 +1097,19 @@ def test_update_task_definition_family_through_cloudformation_should_trigger_a_r "Description": "ECS Cluster Test CloudFormation", "Resources": { "testTaskDefinition": { - "Type" : "AWS::ECS::TaskDefinition", - "Properties" : { + "Type": "AWS::ECS::TaskDefinition", + "Properties": { "Family": "testTaskDefinition1", - "ContainerDefinitions" : [ + "ContainerDefinitions": [ { "Name": "ecs-sample", - "Image":"amazon/amazon-ecs-sample", + "Image": "amazon/amazon-ecs-sample", "Cpu": "200", "Memory": "500", "Essential": "true" } ], - "Volumes" : [], + "Volumes": [], } } } @@ -1055,7 +1122,8 @@ def test_update_task_definition_family_through_cloudformation_should_trigger_a_r ) template2 = deepcopy(template1) - template2['Resources']['testTaskDefinition']['Properties']['Family'] = 'testTaskDefinition2' + template2['Resources']['testTaskDefinition'][ + 'Properties']['Family'] = 'testTaskDefinition2' template2_json = json.dumps(template2) cfn_conn.update_stack( StackName="test_stack", @@ -1065,7 +1133,8 @@ def test_update_task_definition_family_through_cloudformation_should_trigger_a_r ecs_conn = boto3.client('ecs', region_name='us-west-1') resp = ecs_conn.list_task_definitions(familyPrefix='testTaskDefinition') len(resp['taskDefinitionArns']).should.equal(1) - resp['taskDefinitionArns'][0].endswith('testTaskDefinition2:1').should.be.true + resp['taskDefinitionArns'][0].endswith( + 'testTaskDefinition2:1').should.be.true @mock_ecs @@ -1082,18 +1151,18 @@ def test_create_service_through_cloudformation(): } }, "testTaskDefinition": { - "Type" : "AWS::ECS::TaskDefinition", - "Properties" : { - "ContainerDefinitions" : [ + "Type": "AWS::ECS::TaskDefinition", + "Properties": { + "ContainerDefinitions": [ { "Name": "ecs-sample", - "Image":"amazon/amazon-ecs-sample", + "Image": "amazon/amazon-ecs-sample", "Cpu": "200", "Memory": "500", "Essential": "true" } ], - "Volumes" : [], + "Volumes": [], } }, "testService": { @@ -1132,18 +1201,18 @@ def test_update_service_through_cloudformation_should_trigger_replacement(): } }, "testTaskDefinition": { - "Type" : "AWS::ECS::TaskDefinition", - "Properties" : { - "ContainerDefinitions" : [ + "Type": "AWS::ECS::TaskDefinition", + "Properties": { + "ContainerDefinitions": [ { "Name": "ecs-sample", - "Image":"amazon/amazon-ecs-sample", + "Image": "amazon/amazon-ecs-sample", "Cpu": "200", "Memory": "500", "Essential": "true" } ], - "Volumes" : [], + "Volumes": [], } }, "testService": { diff --git a/tests/test_elb/test_elb.py b/tests/test_elb/test_elb.py index fa13fc23b..4b5d59d6d 100644 --- a/tests/test_elb/test_elb.py +++ b/tests/test_elb/test_elb.py @@ -20,6 +20,7 @@ import sure # noqa from moto import mock_elb, mock_ec2, mock_elb_deprecated, mock_ec2_deprecated + @mock_elb_deprecated def test_create_load_balancer(): conn = boto.connect_elb() @@ -32,7 +33,8 @@ def test_create_load_balancer(): balancer = balancers[0] balancer.name.should.equal("my-lb") balancer.scheme.should.equal("internal") - set(balancer.availability_zones).should.equal(set(['us-east-1a', 'us-east-1b'])) + set(balancer.availability_zones).should.equal( + set(['us-east-1a', 'us-east-1b'])) listener1 = balancer.listeners[0] listener1.load_balancer_port.should.equal(80) listener1.instance_port.should.equal(8080) @@ -46,7 +48,8 @@ def test_create_load_balancer(): @mock_elb_deprecated def test_getting_missing_elb(): conn = boto.connect_elb() - conn.get_all_load_balancers.when.called_with(load_balancer_names='aaa').should.throw(BotoServerError) + conn.get_all_load_balancers.when.called_with( + load_balancer_names='aaa').should.throw(BotoServerError) @mock_elb_deprecated @@ -63,12 +66,14 @@ def test_create_elb_in_multiple_region(): list(west1_conn.get_all_load_balancers()).should.have.length_of(1) list(west2_conn.get_all_load_balancers()).should.have.length_of(1) + @mock_elb_deprecated def test_create_load_balancer_with_certificate(): conn = boto.connect_elb() zones = ['us-east-1a'] - ports = [(443, 8443, 'https', 'arn:aws:iam:123456789012:server-certificate/test-cert')] + ports = [ + (443, 8443, 'https', 'arn:aws:iam:123456789012:server-certificate/test-cert')] conn.create_load_balancer('my-lb', zones, ports) balancers = conn.get_all_load_balancers() @@ -80,7 +85,8 @@ def test_create_load_balancer_with_certificate(): listener.load_balancer_port.should.equal(443) listener.instance_port.should.equal(8443) listener.protocol.should.equal("HTTPS") - listener.ssl_certificate_id.should.equal('arn:aws:iam:123456789012:server-certificate/test-cert') + listener.ssl_certificate_id.should.equal( + 'arn:aws:iam:123456789012:server-certificate/test-cert') @mock_elb @@ -89,15 +95,19 @@ def test_create_and_delete_boto3_support(): client.create_load_balancer( LoadBalancerName='my-lb', - Listeners=[{'Protocol':'tcp', 'LoadBalancerPort':80, 'InstancePort':8080}], + Listeners=[ + {'Protocol': 'tcp', 'LoadBalancerPort': 80, 'InstancePort': 8080}], AvailabilityZones=['us-east-1a', 'us-east-1b'] ) - list(client.describe_load_balancers()['LoadBalancerDescriptions']).should.have.length_of(1) + list(client.describe_load_balancers()[ + 'LoadBalancerDescriptions']).should.have.length_of(1) client.delete_load_balancer( LoadBalancerName='my-lb' ) - list(client.describe_load_balancers()['LoadBalancerDescriptions']).should.have.length_of(0) + list(client.describe_load_balancers()[ + 'LoadBalancerDescriptions']).should.have.length_of(0) + @mock_elb_deprecated def test_add_listener(): @@ -142,23 +152,32 @@ def test_create_and_delete_listener_boto3_support(): client.create_load_balancer( LoadBalancerName='my-lb', - Listeners=[{'Protocol':'http', 'LoadBalancerPort':80, 'InstancePort':8080}], + Listeners=[{'Protocol': 'http', + 'LoadBalancerPort': 80, 'InstancePort': 8080}], AvailabilityZones=['us-east-1a', 'us-east-1b'] ) - list(client.describe_load_balancers()['LoadBalancerDescriptions']).should.have.length_of(1) + list(client.describe_load_balancers()[ + 'LoadBalancerDescriptions']).should.have.length_of(1) client.create_load_balancer_listeners( LoadBalancerName='my-lb', - Listeners=[{'Protocol':'tcp', 'LoadBalancerPort':443, 'InstancePort':8443}] + Listeners=[ + {'Protocol': 'tcp', 'LoadBalancerPort': 443, 'InstancePort': 8443}] ) balancer = client.describe_load_balancers()['LoadBalancerDescriptions'][0] list(balancer['ListenerDescriptions']).should.have.length_of(2) - balancer['ListenerDescriptions'][0]['Listener']['Protocol'].should.equal('HTTP') - balancer['ListenerDescriptions'][0]['Listener']['LoadBalancerPort'].should.equal(80) - balancer['ListenerDescriptions'][0]['Listener']['InstancePort'].should.equal(8080) - balancer['ListenerDescriptions'][1]['Listener']['Protocol'].should.equal('TCP') - balancer['ListenerDescriptions'][1]['Listener']['LoadBalancerPort'].should.equal(443) - balancer['ListenerDescriptions'][1]['Listener']['InstancePort'].should.equal(8443) + balancer['ListenerDescriptions'][0][ + 'Listener']['Protocol'].should.equal('HTTP') + balancer['ListenerDescriptions'][0]['Listener'][ + 'LoadBalancerPort'].should.equal(80) + balancer['ListenerDescriptions'][0]['Listener'][ + 'InstancePort'].should.equal(8080) + balancer['ListenerDescriptions'][1][ + 'Listener']['Protocol'].should.equal('TCP') + balancer['ListenerDescriptions'][1]['Listener'][ + 'LoadBalancerPort'].should.equal(443) + balancer['ListenerDescriptions'][1]['Listener'][ + 'InstancePort'].should.equal(8443) @mock_elb_deprecated @@ -189,8 +208,10 @@ def test_get_load_balancers_by_name(): conn.create_load_balancer('my-lb3', zones, ports) conn.get_all_load_balancers().should.have.length_of(3) - conn.get_all_load_balancers(load_balancer_names=['my-lb1']).should.have.length_of(1) - conn.get_all_load_balancers(load_balancer_names=['my-lb1', 'my-lb2']).should.have.length_of(2) + conn.get_all_load_balancers( + load_balancer_names=['my-lb1']).should.have.length_of(1) + conn.get_all_load_balancers( + load_balancer_names=['my-lb1', 'my-lb2']).should.have.length_of(2) @mock_elb_deprecated @@ -240,7 +261,8 @@ def test_create_health_check_boto3(): client.create_load_balancer( LoadBalancerName='my-lb', - Listeners=[{'Protocol':'http', 'LoadBalancerPort':80, 'InstancePort':8080}], + Listeners=[{'Protocol': 'http', + 'LoadBalancerPort': 80, 'InstancePort': 8080}], AvailabilityZones=['us-east-1a', 'us-east-1b'] ) client.configure_health_check( @@ -285,14 +307,16 @@ def test_register_instances(): @mock_elb def test_register_instances_boto3(): ec2 = boto3.resource('ec2', region_name='us-east-1') - response = ec2.create_instances(ImageId='ami-1234abcd', MinCount=2, MaxCount=2) + response = ec2.create_instances( + ImageId='ami-1234abcd', MinCount=2, MaxCount=2) instance_id1 = response[0].id instance_id2 = response[1].id client = boto3.client('elb', region_name='us-east-1') client.create_load_balancer( LoadBalancerName='my-lb', - Listeners=[{'Protocol':'http', 'LoadBalancerPort':80, 'InstancePort':8080}], + Listeners=[{'Protocol': 'http', + 'LoadBalancerPort': 80, 'InstancePort': 8080}], AvailabilityZones=['us-east-1a', 'us-east-1b'] ) client.register_instances_with_load_balancer( @@ -303,7 +327,8 @@ def test_register_instances_boto3(): ] ) balancer = client.describe_load_balancers()['LoadBalancerDescriptions'][0] - instance_ids = [instance['InstanceId'] for instance in balancer['Instances']] + instance_ids = [instance['InstanceId'] + for instance in balancer['Instances']] set(instance_ids).should.equal(set([instance_id1, instance_id2])) @@ -328,18 +353,21 @@ def test_deregister_instances(): balancer.instances.should.have.length_of(1) balancer.instances[0].id.should.equal(instance_id2) + @mock_ec2 @mock_elb def test_deregister_instances_boto3(): ec2 = boto3.resource('ec2', region_name='us-east-1') - response = ec2.create_instances(ImageId='ami-1234abcd', MinCount=2, MaxCount=2) + response = ec2.create_instances( + ImageId='ami-1234abcd', MinCount=2, MaxCount=2) instance_id1 = response[0].id instance_id2 = response[1].id client = boto3.client('elb', region_name='us-east-1') client.create_load_balancer( LoadBalancerName='my-lb', - Listeners=[{'Protocol':'http', 'LoadBalancerPort':80, 'InstancePort':8080}], + Listeners=[{'Protocol': 'http', + 'LoadBalancerPort': 80, 'InstancePort': 8080}], AvailabilityZones=['us-east-1a', 'us-east-1b'] ) client.register_instances_with_load_balancer( @@ -403,18 +431,21 @@ def test_connection_draining_attribute(): connection_draining.enabled = True connection_draining.timeout = 60 - conn.modify_lb_attribute("my-lb", "ConnectionDraining", connection_draining) + conn.modify_lb_attribute( + "my-lb", "ConnectionDraining", connection_draining) attributes = lb.get_attributes(force=True) attributes.connection_draining.enabled.should.be.true attributes.connection_draining.timeout.should.equal(60) connection_draining.timeout = 30 - conn.modify_lb_attribute("my-lb", "ConnectionDraining", connection_draining) + conn.modify_lb_attribute( + "my-lb", "ConnectionDraining", connection_draining) attributes = lb.get_attributes(force=True) attributes.connection_draining.timeout.should.equal(30) connection_draining.enabled = False - conn.modify_lb_attribute("my-lb", "ConnectionDraining", connection_draining) + conn.modify_lb_attribute( + "my-lb", "ConnectionDraining", connection_draining) attributes = lb.get_attributes(force=True) attributes.connection_draining.enabled.should.be.false @@ -453,15 +484,18 @@ def test_connection_settings_attribute(): connection_settings = ConnectionSettingAttribute(conn) connection_settings.idle_timeout = 120 - conn.modify_lb_attribute("my-lb", "ConnectingSettings", connection_settings) + conn.modify_lb_attribute( + "my-lb", "ConnectingSettings", connection_settings) attributes = lb.get_attributes(force=True) attributes.connecting_settings.idle_timeout.should.equal(120) connection_settings.idle_timeout = 60 - conn.modify_lb_attribute("my-lb", "ConnectingSettings", connection_settings) + conn.modify_lb_attribute( + "my-lb", "ConnectingSettings", connection_settings) attributes = lb.get_attributes(force=True) attributes.connecting_settings.idle_timeout.should.equal(60) + @mock_elb_deprecated def test_create_lb_cookie_stickiness_policy(): conn = boto.connect_elb() @@ -478,9 +512,13 @@ def test_create_lb_cookie_stickiness_policy(): # documentation to be a long numeric. # # To work around that, this value is converted to an int and checked. - cookie_expiration_period_response_str = lb.policies.lb_cookie_stickiness_policies[0].cookie_expiration_period - int(cookie_expiration_period_response_str).should.equal(cookie_expiration_period) - lb.policies.lb_cookie_stickiness_policies[0].policy_name.should.equal(policy_name) + cookie_expiration_period_response_str = lb.policies.lb_cookie_stickiness_policies[ + 0].cookie_expiration_period + int(cookie_expiration_period_response_str).should.equal( + cookie_expiration_period) + lb.policies.lb_cookie_stickiness_policies[ + 0].policy_name.should.equal(policy_name) + @mock_elb_deprecated def test_create_lb_cookie_stickiness_policy_no_expiry(): @@ -492,8 +530,11 @@ def test_create_lb_cookie_stickiness_policy_no_expiry(): lb.create_cookie_stickiness_policy(None, policy_name) lb = conn.get_all_load_balancers()[0] - lb.policies.lb_cookie_stickiness_policies[0].cookie_expiration_period.should.be.none - lb.policies.lb_cookie_stickiness_policies[0].policy_name.should.equal(policy_name) + lb.policies.lb_cookie_stickiness_policies[ + 0].cookie_expiration_period.should.be.none + lb.policies.lb_cookie_stickiness_policies[ + 0].policy_name.should.equal(policy_name) + @mock_elb_deprecated def test_create_app_cookie_stickiness_policy(): @@ -506,8 +547,11 @@ def test_create_app_cookie_stickiness_policy(): lb.create_app_cookie_stickiness_policy(cookie_name, policy_name) lb = conn.get_all_load_balancers()[0] - lb.policies.app_cookie_stickiness_policies[0].cookie_name.should.equal(cookie_name) - lb.policies.app_cookie_stickiness_policies[0].policy_name.should.equal(policy_name) + lb.policies.app_cookie_stickiness_policies[ + 0].cookie_name.should.equal(cookie_name) + lb.policies.app_cookie_stickiness_policies[ + 0].policy_name.should.equal(policy_name) + @mock_elb_deprecated def test_create_lb_policy(): @@ -516,11 +560,13 @@ def test_create_lb_policy(): lb = conn.create_load_balancer('my-lb', [], ports) policy_name = "ProxyPolicy" - lb.create_lb_policy(policy_name, 'ProxyProtocolPolicyType', {'ProxyProtocol': True}) + lb.create_lb_policy(policy_name, 'ProxyProtocolPolicyType', { + 'ProxyProtocol': True}) lb = conn.get_all_load_balancers()[0] lb.policies.other_policies[0].policy_name.should.equal(policy_name) + @mock_elb_deprecated def test_set_policies_of_listener(): conn = boto.connect_elb() @@ -543,6 +589,7 @@ def test_set_policies_of_listener(): # by contrast to a backend, a listener stores only policy name strings listener.policy_names[0].should.equal(policy_name) + @mock_elb_deprecated def test_set_policies_of_backend_server(): conn = boto.connect_elb() @@ -553,7 +600,8 @@ def test_set_policies_of_backend_server(): # in a real flow, it is necessary first to create a policy, # then to set that policy to the backend - lb.create_lb_policy(policy_name, 'ProxyProtocolPolicyType', {'ProxyProtocol': True}) + lb.create_lb_policy(policy_name, 'ProxyProtocolPolicyType', { + 'ProxyProtocol': True}) lb.set_policies_of_backend_server(instance_port, [policy_name]) lb = conn.get_all_load_balancers()[0] @@ -562,6 +610,7 @@ def test_set_policies_of_backend_server(): # by contrast to a listener, a backend stores OtherPolicy objects backend.policies[0].policy_name.should.equal(policy_name) + @mock_ec2_deprecated @mock_elb_deprecated def test_describe_instance_health(): @@ -583,7 +632,8 @@ def test_describe_instance_health(): instances_health = conn.describe_instance_health('my-lb') instances_health.should.have.length_of(2) for instance_health in instances_health: - instance_health.instance_id.should.be.within([instance_id1, instance_id2]) + instance_health.instance_id.should.be.within( + [instance_id1, instance_id2]) instance_health.state.should.equal('InService') instances_health = conn.describe_instance_health('my-lb', [instance_id1]) @@ -597,76 +647,78 @@ def test_add_remove_tags(): client = boto3.client('elb', region_name='us-east-1') client.add_tags.when.called_with(LoadBalancerNames=['my-lb'], - Tags=[{ - 'Key': 'a', - 'Value': 'b' - }]).should.throw(botocore.exceptions.ClientError) - + Tags=[{ + 'Key': 'a', + 'Value': 'b' + }]).should.throw(botocore.exceptions.ClientError) client.create_load_balancer( LoadBalancerName='my-lb', - Listeners=[{'Protocol':'tcp', 'LoadBalancerPort':80, 'InstancePort':8080}], + Listeners=[ + {'Protocol': 'tcp', 'LoadBalancerPort': 80, 'InstancePort': 8080}], AvailabilityZones=['us-east-1a', 'us-east-1b'] ) - list(client.describe_load_balancers()['LoadBalancerDescriptions']).should.have.length_of(1) + list(client.describe_load_balancers()[ + 'LoadBalancerDescriptions']).should.have.length_of(1) client.add_tags(LoadBalancerNames=['my-lb'], Tags=[{ - 'Key': 'a', - 'Value': 'b' + 'Key': 'a', + 'Value': 'b' }]) - tags = dict([(d['Key'], d['Value']) for d in client.describe_tags(LoadBalancerNames=['my-lb'])['TagDescriptions'][0]['Tags']]) + tags = dict([(d['Key'], d['Value']) for d in client.describe_tags( + LoadBalancerNames=['my-lb'])['TagDescriptions'][0]['Tags']]) tags.should.have.key('a').which.should.equal('b') client.add_tags(LoadBalancerNames=['my-lb'], Tags=[{ - 'Key': 'a', - 'Value': 'b' + 'Key': 'a', + 'Value': 'b' }, { - 'Key': 'b', - 'Value': 'b' + 'Key': 'b', + 'Value': 'b' }, { - 'Key': 'c', - 'Value': 'b' + 'Key': 'c', + 'Value': 'b' }, { - 'Key': 'd', - 'Value': 'b' + 'Key': 'd', + 'Value': 'b' }, { - 'Key': 'e', - 'Value': 'b' + 'Key': 'e', + 'Value': 'b' }, { - 'Key': 'f', - 'Value': 'b' + 'Key': 'f', + 'Value': 'b' }, { - 'Key': 'g', - 'Value': 'b' + 'Key': 'g', + 'Value': 'b' }, { - 'Key': 'h', - 'Value': 'b' + 'Key': 'h', + 'Value': 'b' }, { - 'Key': 'i', - 'Value': 'b' + 'Key': 'i', + 'Value': 'b' }, { - 'Key': 'j', - 'Value': 'b' + 'Key': 'j', + 'Value': 'b' }]) client.add_tags.when.called_with(LoadBalancerNames=['my-lb'], - Tags=[{ - 'Key': 'k', - 'Value': 'b' - }]).should.throw(botocore.exceptions.ClientError) + Tags=[{ + 'Key': 'k', + 'Value': 'b' + }]).should.throw(botocore.exceptions.ClientError) client.add_tags(LoadBalancerNames=['my-lb'], Tags=[{ - 'Key': 'j', - 'Value': 'c' + 'Key': 'j', + 'Value': 'c' }]) - - tags = dict([(d['Key'], d['Value']) for d in client.describe_tags(LoadBalancerNames=['my-lb'])['TagDescriptions'][0]['Tags']]) + tags = dict([(d['Key'], d['Value']) for d in client.describe_tags( + LoadBalancerNames=['my-lb'])['TagDescriptions'][0]['Tags']]) tags.should.have.key('a').which.should.equal('b') tags.should.have.key('b').which.should.equal('b') @@ -681,11 +733,12 @@ def test_add_remove_tags(): tags.shouldnt.have.key('k') client.remove_tags(LoadBalancerNames=['my-lb'], - Tags=[{ - 'Key': 'a' - }]) + Tags=[{ + 'Key': 'a' + }]) - tags = dict([(d['Key'], d['Value']) for d in client.describe_tags(LoadBalancerNames=['my-lb'])['TagDescriptions'][0]['Tags']]) + tags = dict([(d['Key'], d['Value']) for d in client.describe_tags( + LoadBalancerNames=['my-lb'])['TagDescriptions'][0]['Tags']]) tags.shouldnt.have.key('a') tags.should.have.key('b').which.should.equal('b') @@ -698,17 +751,17 @@ def test_add_remove_tags(): tags.should.have.key('i').which.should.equal('b') tags.should.have.key('j').which.should.equal('c') - client.create_load_balancer( LoadBalancerName='other-lb', - Listeners=[{'Protocol':'tcp', 'LoadBalancerPort':433, 'InstancePort':8433}], + Listeners=[ + {'Protocol': 'tcp', 'LoadBalancerPort': 433, 'InstancePort': 8433}], AvailabilityZones=['us-east-1a', 'us-east-1b'] ) client.add_tags(LoadBalancerNames=['other-lb'], Tags=[{ - 'Key': 'other', - 'Value': 'something' + 'Key': 'other', + 'Value': 'something' }]) lb_tags = dict([(l['LoadBalancerName'], dict([(d['Key'], d['Value']) for d in l['Tags']])) @@ -718,7 +771,8 @@ def test_add_remove_tags(): lb_tags.should.have.key('other-lb') lb_tags['my-lb'].shouldnt.have.key('other') - lb_tags['other-lb'].should.have.key('other').which.should.equal('something') + lb_tags[ + 'other-lb'].should.have.key('other').which.should.equal('something') @mock_elb @@ -727,15 +781,17 @@ def test_create_with_tags(): client.create_load_balancer( LoadBalancerName='my-lb', - Listeners=[{'Protocol':'tcp', 'LoadBalancerPort':80, 'InstancePort':8080}], + Listeners=[ + {'Protocol': 'tcp', 'LoadBalancerPort': 80, 'InstancePort': 8080}], AvailabilityZones=['us-east-1a', 'us-east-1b'], Tags=[{ - 'Key': 'k', - 'Value': 'v' + 'Key': 'k', + 'Value': 'v' }] ) - tags = dict((d['Key'], d['Value']) for d in client.describe_tags(LoadBalancerNames=['my-lb'])['TagDescriptions'][0]['Tags']) + tags = dict((d['Key'], d['Value']) for d in client.describe_tags( + LoadBalancerNames=['my-lb'])['TagDescriptions'][0]['Tags']) tags.should.have.key('k').which.should.equal('v') @@ -754,7 +810,8 @@ def test_subnets(): client = boto3.client('elb', region_name='us-east-1') client.create_load_balancer( LoadBalancerName='my-lb', - Listeners=[{'Protocol':'tcp', 'LoadBalancerPort':80, 'InstancePort':8080}], + Listeners=[ + {'Protocol': 'tcp', 'LoadBalancerPort': 80, 'InstancePort': 8080}], Subnets=[subnet.id] ) @@ -770,5 +827,5 @@ def test_create_load_balancer_duplicate(): conn = boto.connect_elb() ports = [(80, 8080, 'http'), (443, 8443, 'tcp')] conn.create_load_balancer('my-lb', [], ports) - conn.create_load_balancer.when.called_with('my-lb', [], ports).should.throw(BotoServerError) - + conn.create_load_balancer.when.called_with( + 'my-lb', [], ports).should.throw(BotoServerError) diff --git a/tests/test_emr/test_emr.py b/tests/test_emr/test_emr.py index 4b06d7516..4acd7067c 100644 --- a/tests/test_emr/test_emr.py +++ b/tests/test_emr/test_emr.py @@ -100,7 +100,8 @@ def test_describe_cluster(): # cluster.status.timeline.enddatetime.should.be.a(six.string_types) # cluster.status.timeline.readydatetime.should.be.a(six.string_types) - dict((item.key, item.value) for item in cluster.tags).should.equal(input_tags) + dict((item.key, item.value) + for item in cluster.tags).should.equal(input_tags) cluster.terminationprotected.should.equal('false') cluster.visibletoallusers.should.equal('true') @@ -285,7 +286,8 @@ def test_list_clusters(): y = expected[x.id] x.id.should.equal(y['id']) x.name.should.equal(y['name']) - x.normalizedinstancehours.should.equal(y['normalizedinstancehours']) + x.normalizedinstancehours.should.equal( + y['normalizedinstancehours']) x.status.state.should.equal(y['state']) x.status.timeline.creationdatetime.should.be.a(six.string_types) if y['state'] == 'TERMINATED': @@ -371,11 +373,13 @@ def test_run_jobflow_with_instance_groups(): job_id = conn.run_jobflow(instance_groups=input_instance_groups, **run_jobflow_args) job_flow = conn.describe_jobflow(job_id) - int(job_flow.instancecount).should.equal(sum(g.num_instances for g in input_instance_groups)) + int(job_flow.instancecount).should.equal( + sum(g.num_instances for g in input_instance_groups)) for instance_group in job_flow.instancegroups: expected = input_groups[instance_group.name] instance_group.should.have.property('instancegroupid') - int(instance_group.instancerunningcount).should.equal(expected.num_instances) + int(instance_group.instancerunningcount).should.equal( + expected.num_instances) instance_group.instancerole.should.equal(expected.role) instance_group.instancetype.should.equal(expected.type) instance_group.market.should.equal(expected.market) @@ -483,7 +487,8 @@ def test_instance_groups(): conn.add_instance_groups(job_id, input_instance_groups[2:]) jf = conn.describe_jobflow(job_id) - int(jf.instancecount).should.equal(sum(g.num_instances for g in input_instance_groups)) + int(jf.instancecount).should.equal( + sum(g.num_instances for g in input_instance_groups)) for x in jf.instancegroups: y = input_groups[x.name] if hasattr(y, 'bidprice'): @@ -572,7 +577,8 @@ def test_steps(): list(arg.value for arg in step.args).should.have.length_of(8) step.creationdatetime.should.be.a(six.string_types) # step.enddatetime.should.be.a(six.string_types) - step.jar.should.equal('/home/hadoop/contrib/streaming/hadoop-streaming.jar') + step.jar.should.equal( + '/home/hadoop/contrib/streaming/hadoop-streaming.jar') step.laststatechangereason.should.be.a(six.string_types) step.mainclass.should.equal('') step.name.should.be.a(six.string_types) @@ -592,7 +598,8 @@ def test_steps(): '-input', y.input, '-output', y.output, ]) - x.config.jar.should.equal('/home/hadoop/contrib/streaming/hadoop-streaming.jar') + x.config.jar.should.equal( + '/home/hadoop/contrib/streaming/hadoop-streaming.jar') x.config.mainclass.should.equal('') # properties x.should.have.property('id').should.be.a(six.string_types) @@ -610,7 +617,8 @@ def test_steps(): '-input', y.input, '-output', y.output, ]) - x.config.jar.should.equal('/home/hadoop/contrib/streaming/hadoop-streaming.jar') + x.config.jar.should.equal( + '/home/hadoop/contrib/streaming/hadoop-streaming.jar') x.config.mainclass.should.equal('') # properties x.should.have.property('id').should.be.a(six.string_types) diff --git a/tests/test_emr/test_emr_boto3.py b/tests/test_emr/test_emr_boto3.py index 4fb5c3d79..4999935c5 100644 --- a/tests/test_emr/test_emr_boto3.py +++ b/tests/test_emr/test_emr_boto3.py @@ -88,15 +88,20 @@ def test_describe_cluster(): config['Properties'].should.equal(args['Configurations'][0]['Properties']) attrs = cl['Ec2InstanceAttributes'] - attrs['AdditionalMasterSecurityGroups'].should.equal(args['Instances']['AdditionalMasterSecurityGroups']) - attrs['AdditionalSlaveSecurityGroups'].should.equal(args['Instances']['AdditionalSlaveSecurityGroups']) + attrs['AdditionalMasterSecurityGroups'].should.equal( + args['Instances']['AdditionalMasterSecurityGroups']) + attrs['AdditionalSlaveSecurityGroups'].should.equal( + args['Instances']['AdditionalSlaveSecurityGroups']) attrs['Ec2AvailabilityZone'].should.equal('us-east-1a') attrs['Ec2KeyName'].should.equal(args['Instances']['Ec2KeyName']) attrs['Ec2SubnetId'].should.equal(args['Instances']['Ec2SubnetId']) - attrs['EmrManagedMasterSecurityGroup'].should.equal(args['Instances']['EmrManagedMasterSecurityGroup']) - attrs['EmrManagedSlaveSecurityGroup'].should.equal(args['Instances']['EmrManagedSlaveSecurityGroup']) + attrs['EmrManagedMasterSecurityGroup'].should.equal( + args['Instances']['EmrManagedMasterSecurityGroup']) + attrs['EmrManagedSlaveSecurityGroup'].should.equal( + args['Instances']['EmrManagedSlaveSecurityGroup']) attrs['IamInstanceProfile'].should.equal(args['JobFlowRole']) - attrs['ServiceAccessSecurityGroup'].should.equal(args['Instances']['ServiceAccessSecurityGroup']) + attrs['ServiceAccessSecurityGroup'].should.equal( + args['Instances']['ServiceAccessSecurityGroup']) cl['Id'].should.equal(cluster_id) cl['LogUri'].should.equal(args['LogUri']) cl['MasterPublicDnsName'].should.be.a(six.string_types) @@ -222,11 +227,14 @@ def test_describe_job_flow(): ig['State'].should.equal('RUNNING') attrs['KeepJobFlowAliveWhenNoSteps'].should.equal(True) # attrs['MasterInstanceId'].should.be.a(six.string_types) - attrs['MasterInstanceType'].should.equal(args['Instances']['MasterInstanceType']) + attrs['MasterInstanceType'].should.equal( + args['Instances']['MasterInstanceType']) attrs['MasterPublicDnsName'].should.be.a(six.string_types) attrs['NormalizedInstanceHours'].should.equal(0) - attrs['Placement']['AvailabilityZone'].should.equal(args['Instances']['Placement']['AvailabilityZone']) - attrs['SlaveInstanceType'].should.equal(args['Instances']['SlaveInstanceType']) + attrs['Placement']['AvailabilityZone'].should.equal( + args['Instances']['Placement']['AvailabilityZone']) + attrs['SlaveInstanceType'].should.equal( + args['Instances']['SlaveInstanceType']) attrs['TerminationProtected'].should.equal(False) jf['JobFlowId'].should.equal(cluster_id) jf['JobFlowRole'].should.equal(args['JobFlowRole']) @@ -282,14 +290,18 @@ def test_list_clusters(): y = expected[x['Id']] x['Id'].should.equal(y['Id']) x['Name'].should.equal(y['Name']) - x['NormalizedInstanceHours'].should.equal(y['NormalizedInstanceHours']) + x['NormalizedInstanceHours'].should.equal( + y['NormalizedInstanceHours']) x['Status']['State'].should.equal(y['State']) - x['Status']['Timeline']['CreationDateTime'].should.be.a('datetime.datetime') + x['Status']['Timeline'][ + 'CreationDateTime'].should.be.a('datetime.datetime') if y['State'] == 'TERMINATED': - x['Status']['Timeline']['EndDateTime'].should.be.a('datetime.datetime') + x['Status']['Timeline'][ + 'EndDateTime'].should.be.a('datetime.datetime') else: x['Status']['Timeline'].shouldnt.have.key('EndDateTime') - x['Status']['Timeline']['ReadyDateTime'].should.be.a('datetime.datetime') + x['Status']['Timeline'][ + 'ReadyDateTime'].should.be.a('datetime.datetime') marker = resp.get('Marker') if marker is None: break @@ -316,8 +328,10 @@ def test_run_job_flow(): resp['ExecutionStatusDetail']['State'].should.equal('WAITING') resp['JobFlowId'].should.equal(cluster_id) resp['Name'].should.equal(args['Name']) - resp['Instances']['MasterInstanceType'].should.equal(args['Instances']['MasterInstanceType']) - resp['Instances']['SlaveInstanceType'].should.equal(args['Instances']['SlaveInstanceType']) + resp['Instances']['MasterInstanceType'].should.equal( + args['Instances']['MasterInstanceType']) + resp['Instances']['SlaveInstanceType'].should.equal( + args['Instances']['SlaveInstanceType']) resp['LogUri'].should.equal(args['LogUri']) resp['VisibleToAllUsers'].should.equal(args['VisibleToAllUsers']) resp['Instances']['NormalizedInstanceHours'].should.equal(0) @@ -333,7 +347,8 @@ def test_run_job_flow_with_invalid_params(): args['AmiVersion'] = '2.4' args['ReleaseLabel'] = 'emr-5.0.0' client.run_job_flow(**args) - ex.exception.response['Error']['Message'].should.contain('ValidationException') + ex.exception.response['Error'][ + 'Message'].should.contain('ValidationException') @mock_emr @@ -378,7 +393,8 @@ def test_run_job_flow_with_instance_groups(): args = deepcopy(run_job_flow_args) args['Instances'] = {'InstanceGroups': input_instance_groups} cluster_id = client.run_job_flow(**args)['JobFlowId'] - groups = client.list_instance_groups(ClusterId=cluster_id)['InstanceGroups'] + groups = client.list_instance_groups(ClusterId=cluster_id)[ + 'InstanceGroups'] for x in groups: y = input_groups[x['Name']] x.should.have.key('Id') @@ -484,10 +500,12 @@ def test_instance_groups(): jf = client.describe_job_flows(JobFlowIds=[cluster_id])['JobFlows'][0] base_instance_count = jf['Instances']['InstanceCount'] - client.add_instance_groups(JobFlowId=cluster_id, InstanceGroups=input_instance_groups[2:]) + client.add_instance_groups( + JobFlowId=cluster_id, InstanceGroups=input_instance_groups[2:]) jf = client.describe_job_flows(JobFlowIds=[cluster_id])['JobFlows'][0] - jf['Instances']['InstanceCount'].should.equal(sum(g['InstanceCount'] for g in input_instance_groups)) + jf['Instances']['InstanceCount'].should.equal( + sum(g['InstanceCount'] for g in input_instance_groups)) for x in jf['Instances']['InstanceGroups']: y = input_groups[x['Name']] if hasattr(y, 'BidPrice'): @@ -506,7 +524,8 @@ def test_instance_groups(): x['StartDateTime'].should.be.a('datetime.datetime') x['State'].should.equal('RUNNING') - groups = client.list_instance_groups(ClusterId=cluster_id)['InstanceGroups'] + groups = client.list_instance_groups(ClusterId=cluster_id)[ + 'InstanceGroups'] for x in groups: y = input_groups[x['Name']] if hasattr(y, 'BidPrice'): @@ -525,9 +544,11 @@ def test_instance_groups(): x['Status']['State'].should.equal('RUNNING') x['Status']['StateChangeReason']['Code'].should.be.a(six.string_types) # x['Status']['StateChangeReason']['Message'].should.be.a(six.string_types) - x['Status']['Timeline']['CreationDateTime'].should.be.a('datetime.datetime') + x['Status']['Timeline'][ + 'CreationDateTime'].should.be.a('datetime.datetime') # x['Status']['Timeline']['EndDateTime'].should.be.a('datetime.datetime') - x['Status']['Timeline']['ReadyDateTime'].should.be.a('datetime.datetime') + x['Status']['Timeline'][ + 'ReadyDateTime'].should.be.a('datetime.datetime') igs = dict((g['Name'], g) for g in groups) client.modify_instance_groups( @@ -592,14 +613,19 @@ def test_steps(): # x['ExecutionStatusDetail'].should.have.key('EndDateTime') # x['ExecutionStatusDetail'].should.have.key('LastStateChangeReason') # x['ExecutionStatusDetail'].should.have.key('StartDateTime') - x['ExecutionStatusDetail']['State'].should.equal('STARTING' if idx == 0 else 'PENDING') + x['ExecutionStatusDetail']['State'].should.equal( + 'STARTING' if idx == 0 else 'PENDING') x['StepConfig']['ActionOnFailure'].should.equal('TERMINATE_CLUSTER') - x['StepConfig']['HadoopJarStep']['Args'].should.equal(y['HadoopJarStep']['Args']) - x['StepConfig']['HadoopJarStep']['Jar'].should.equal(y['HadoopJarStep']['Jar']) + x['StepConfig']['HadoopJarStep'][ + 'Args'].should.equal(y['HadoopJarStep']['Args']) + x['StepConfig']['HadoopJarStep'][ + 'Jar'].should.equal(y['HadoopJarStep']['Jar']) if 'MainClass' in y['HadoopJarStep']: - x['StepConfig']['HadoopJarStep']['MainClass'].should.equal(y['HadoopJarStep']['MainClass']) + x['StepConfig']['HadoopJarStep']['MainClass'].should.equal( + y['HadoopJarStep']['MainClass']) if 'Properties' in y['HadoopJarStep']: - x['StepConfig']['HadoopJarStep']['Properties'].should.equal(y['HadoopJarStep']['Properties']) + x['StepConfig']['HadoopJarStep']['Properties'].should.equal( + y['HadoopJarStep']['Properties']) x['StepConfig']['Name'].should.equal(y['Name']) expected = dict((s['Name'], s) for s in input_steps) @@ -617,7 +643,8 @@ def test_steps(): x['Name'].should.equal(y['Name']) x['Status']['State'].should.be.within(['STARTING', 'PENDING']) # StateChangeReason - x['Status']['Timeline']['CreationDateTime'].should.be.a('datetime.datetime') + x['Status']['Timeline'][ + 'CreationDateTime'].should.be.a('datetime.datetime') # x['Status']['Timeline']['EndDateTime'].should.be.a('datetime.datetime') # x['Status']['Timeline']['StartDateTime'].should.be.a('datetime.datetime') @@ -631,7 +658,8 @@ def test_steps(): x['Name'].should.equal(y['Name']) x['Status']['State'].should.be.within(['STARTING', 'PENDING']) # StateChangeReason - x['Status']['Timeline']['CreationDateTime'].should.be.a('datetime.datetime') + x['Status']['Timeline'][ + 'CreationDateTime'].should.be.a('datetime.datetime') # x['Status']['Timeline']['EndDateTime'].should.be.a('datetime.datetime') # x['Status']['Timeline']['StartDateTime'].should.be.a('datetime.datetime') @@ -640,7 +668,8 @@ def test_steps(): steps.should.have.length_of(1) steps[0]['Id'].should.equal(step_id) - steps = client.list_steps(ClusterId=cluster_id, StepStates=['STARTING'])['Steps'] + steps = client.list_steps(ClusterId=cluster_id, + StepStates=['STARTING'])['Steps'] steps.should.have.length_of(1) steps[0]['Id'].should.equal(step_id) @@ -656,8 +685,10 @@ def test_tags(): client.add_tags(ResourceId=cluster_id, Tags=input_tags) resp = client.describe_cluster(ClusterId=cluster_id)['Cluster'] resp['Tags'].should.have.length_of(2) - dict((t['Key'], t['Value']) for t in resp['Tags']).should.equal(dict((t['Key'], t['Value']) for t in input_tags)) + dict((t['Key'], t['Value']) for t in resp['Tags']).should.equal( + dict((t['Key'], t['Value']) for t in input_tags)) - client.remove_tags(ResourceId=cluster_id, TagKeys=[t['Key'] for t in input_tags]) + client.remove_tags(ResourceId=cluster_id, TagKeys=[ + t['Key'] for t in input_tags]) resp = client.describe_cluster(ClusterId=cluster_id)['Cluster'] resp['Tags'].should.equal([]) diff --git a/tests/test_glacier/test_glacier_jobs.py b/tests/test_glacier/test_glacier_jobs.py index ef4a00b75..66780f681 100644 --- a/tests/test_glacier/test_glacier_jobs.py +++ b/tests/test_glacier/test_glacier_jobs.py @@ -13,14 +13,16 @@ def test_init_glacier_job(): conn = Layer1(region_name="us-west-2") vault_name = "my_vault" conn.create_vault(vault_name) - archive_id = conn.upload_archive(vault_name, "some stuff", "", "", "some description") + archive_id = conn.upload_archive( + vault_name, "some stuff", "", "", "some description") job_response = conn.initiate_job(vault_name, { "ArchiveId": archive_id, "Type": "archive-retrieval", }) job_id = job_response['JobId'] - job_response['Location'].should.equal("//vaults/my_vault/jobs/{0}".format(job_id)) + job_response['Location'].should.equal( + "//vaults/my_vault/jobs/{0}".format(job_id)) @mock_glacier_deprecated @@ -28,7 +30,8 @@ def test_describe_job(): conn = Layer1(region_name="us-west-2") vault_name = "my_vault" conn.create_vault(vault_name) - archive_id = conn.upload_archive(vault_name, "some stuff", "", "", "some description") + archive_id = conn.upload_archive( + vault_name, "some stuff", "", "", "some description") job_response = conn.initiate_job(vault_name, { "ArchiveId": archive_id, "Type": "archive-retrieval", @@ -61,8 +64,10 @@ def test_list_glacier_jobs(): conn = Layer1(region_name="us-west-2") vault_name = "my_vault" conn.create_vault(vault_name) - archive_id1 = conn.upload_archive(vault_name, "some stuff", "", "", "some description")['ArchiveId'] - archive_id2 = conn.upload_archive(vault_name, "some other stuff", "", "", "some description")['ArchiveId'] + archive_id1 = conn.upload_archive( + vault_name, "some stuff", "", "", "some description")['ArchiveId'] + archive_id2 = conn.upload_archive( + vault_name, "some other stuff", "", "", "some description")['ArchiveId'] conn.initiate_job(vault_name, { "ArchiveId": archive_id1, @@ -82,7 +87,8 @@ def test_get_job_output(): conn = Layer1(region_name="us-west-2") vault_name = "my_vault" conn.create_vault(vault_name) - archive_response = conn.upload_archive(vault_name, "some stuff", "", "", "some description") + archive_response = conn.upload_archive( + vault_name, "some stuff", "", "", "some description") archive_id = archive_response['ArchiveId'] job_response = conn.initiate_job(vault_name, { "ArchiveId": archive_id, diff --git a/tests/test_glacier/test_glacier_server.py b/tests/test_glacier/test_glacier_server.py index d3e09015f..fd8034421 100644 --- a/tests/test_glacier/test_glacier_server.py +++ b/tests/test_glacier/test_glacier_server.py @@ -18,4 +18,5 @@ def test_list_vaults(): res = test_client.get('/1234bcd/vaults') - json.loads(res.data.decode("utf-8")).should.equal({u'Marker': None, u'VaultList': []}) + json.loads(res.data.decode("utf-8") + ).should.equal({u'Marker': None, u'VaultList': []}) diff --git a/tests/test_iam/test_iam.py b/tests/test_iam/test_iam.py index 6504a5483..076f33916 100644 --- a/tests/test_iam/test_iam.py +++ b/tests/test_iam/test_iam.py @@ -19,11 +19,13 @@ def test_get_all_server_certs(): conn = boto.connect_iam() conn.upload_server_cert("certname", "certbody", "privatekey") - certs = conn.get_all_server_certs()['list_server_certificates_response']['list_server_certificates_result']['server_certificate_metadata_list'] + certs = conn.get_all_server_certs()['list_server_certificates_response'][ + 'list_server_certificates_result']['server_certificate_metadata_list'] certs.should.have.length_of(1) cert1 = certs[0] cert1.server_certificate_name.should.equal("certname") - cert1.arn.should.equal("arn:aws:iam::123456789012:server-certificate/certname") + cert1.arn.should.equal( + "arn:aws:iam::123456789012:server-certificate/certname") @mock_iam_deprecated() @@ -41,7 +43,8 @@ def test_get_server_cert(): conn.upload_server_cert("certname", "certbody", "privatekey") cert = conn.get_server_certificate("certname") cert.server_certificate_name.should.equal("certname") - cert.arn.should.equal("arn:aws:iam::123456789012:server-certificate/certname") + cert.arn.should.equal( + "arn:aws:iam::123456789012:server-certificate/certname") @mock_iam_deprecated() @@ -51,7 +54,8 @@ def test_upload_server_cert(): conn.upload_server_cert("certname", "certbody", "privatekey") cert = conn.get_server_certificate("certname") cert.server_certificate_name.should.equal("certname") - cert.arn.should.equal("arn:aws:iam::123456789012:server-certificate/certname") + cert.arn.should.equal( + "arn:aws:iam::123456789012:server-certificate/certname") @mock_iam_deprecated() @@ -74,7 +78,8 @@ def test_get_instance_profile__should_throw__when_instance_profile_does_not_exis def test_create_role_and_instance_profile(): conn = boto.connect_iam() conn.create_instance_profile("my-profile", path="my-path") - conn.create_role("my-role", assume_role_policy_document="some policy", path="my-path") + conn.create_role( + "my-role", assume_role_policy_document="some policy", path="my-path") conn.add_role_to_instance_profile("my-profile", "my-role") @@ -95,7 +100,8 @@ def test_create_role_and_instance_profile(): def test_remove_role_from_instance_profile(): conn = boto.connect_iam() conn.create_instance_profile("my-profile", path="my-path") - conn.create_role("my-role", assume_role_policy_document="some policy", path="my-path") + conn.create_role( + "my-role", assume_role_policy_document="some policy", path="my-path") conn.add_role_to_instance_profile("my-profile", "my-role") profile = conn.get_instance_profile("my-profile") @@ -127,29 +133,37 @@ def test_list_instance_profiles(): def test_list_instance_profiles_for_role(): conn = boto.connect_iam() - conn.create_role(role_name="my-role", assume_role_policy_document="some policy", path="my-path") - conn.create_role(role_name="my-role2", assume_role_policy_document="some policy2", path="my-path2") + conn.create_role(role_name="my-role", + assume_role_policy_document="some policy", path="my-path") + conn.create_role(role_name="my-role2", + assume_role_policy_document="some policy2", path="my-path2") profile_name_list = ['my-profile', 'my-profile2'] profile_path_list = ['my-path', 'my-path2'] for profile_count in range(0, 2): - conn.create_instance_profile(profile_name_list[profile_count], path=profile_path_list[profile_count]) + conn.create_instance_profile( + profile_name_list[profile_count], path=profile_path_list[profile_count]) for profile_count in range(0, 2): - conn.add_role_to_instance_profile(profile_name_list[profile_count], "my-role") + conn.add_role_to_instance_profile( + profile_name_list[profile_count], "my-role") profile_dump = conn.list_instance_profiles_for_role(role_name="my-role") - profile_list = profile_dump['list_instance_profiles_for_role_response']['list_instance_profiles_for_role_result']['instance_profiles'] + profile_list = profile_dump['list_instance_profiles_for_role_response'][ + 'list_instance_profiles_for_role_result']['instance_profiles'] for profile_count in range(0, len(profile_list)): - profile_name_list.remove(profile_list[profile_count]["instance_profile_name"]) + profile_name_list.remove(profile_list[profile_count][ + "instance_profile_name"]) profile_path_list.remove(profile_list[profile_count]["path"]) - profile_list[profile_count]["roles"]["member"]["role_name"].should.equal("my-role") + profile_list[profile_count]["roles"]["member"][ + "role_name"].should.equal("my-role") len(profile_name_list).should.equal(0) len(profile_path_list).should.equal(0) profile_dump2 = conn.list_instance_profiles_for_role(role_name="my-role2") - profile_list = profile_dump2['list_instance_profiles_for_role_response']['list_instance_profiles_for_role_result']['instance_profiles'] + profile_list = profile_dump2['list_instance_profiles_for_role_response'][ + 'list_instance_profiles_for_role_result']['instance_profiles'] len(profile_list).should.equal(0) @@ -165,9 +179,11 @@ def test_list_role_policies(): @mock_iam_deprecated() def test_put_role_policy(): conn = boto.connect_iam() - conn.create_role("my-role", assume_role_policy_document="some policy", path="my-path") + conn.create_role( + "my-role", assume_role_policy_document="some policy", path="my-path") conn.put_role_policy("my-role", "test policy", "my policy") - policy = conn.get_role_policy("my-role", "test policy")['get_role_policy_response']['get_role_policy_result']['policy_name'] + policy = conn.get_role_policy( + "my-role", "test policy")['get_role_policy_response']['get_role_policy_result']['policy_name'] policy.should.equal("test policy") @@ -246,13 +262,15 @@ def test_get_all_access_keys(): conn.create_user('my-user') response = conn.get_all_access_keys('my-user') assert_equals( - response['list_access_keys_response']['list_access_keys_result']['access_key_metadata'], + response['list_access_keys_response'][ + 'list_access_keys_result']['access_key_metadata'], [] ) conn.create_access_key('my-user') response = conn.get_all_access_keys('my-user') assert_not_equals( - response['list_access_keys_response']['list_access_keys_result']['access_key_metadata'], + response['list_access_keys_response'][ + 'list_access_keys_result']['access_key_metadata'], [] ) @@ -261,7 +279,8 @@ def test_get_all_access_keys(): def test_delete_access_key(): conn = boto.connect_iam() conn.create_user('my-user') - access_key_id = conn.create_access_key('my-user')['create_access_key_response']['create_access_key_result']['access_key']['access_key_id'] + access_key_id = conn.create_access_key('my-user')['create_access_key_response'][ + 'create_access_key_result']['access_key']['access_key_id'] conn.delete_access_key(access_key_id, 'my-user') @@ -278,9 +297,11 @@ def test_delete_user(): def test_generate_credential_report(): conn = boto.connect_iam() result = conn.generate_credential_report() - result['generate_credential_report_response']['generate_credential_report_result']['state'].should.equal('STARTED') + result['generate_credential_report_response'][ + 'generate_credential_report_result']['state'].should.equal('STARTED') result = conn.generate_credential_report() - result['generate_credential_report_response']['generate_credential_report_result']['state'].should.equal('COMPLETE') + result['generate_credential_report_response'][ + 'generate_credential_report_result']['state'].should.equal('COMPLETE') @mock_iam_deprecated() @@ -293,7 +314,8 @@ def test_get_credential_report(): while result['generate_credential_report_response']['generate_credential_report_result']['state'] != 'COMPLETE': result = conn.generate_credential_report() result = conn.get_credential_report() - report = base64.b64decode(result['get_credential_report_response']['get_credential_report_result']['content'].encode('ascii')).decode('ascii') + report = base64.b64decode(result['get_credential_report_response'][ + 'get_credential_report_result']['content'].encode('ascii')).decode('ascii') report.should.match(r'.*my-user.*') @@ -307,23 +329,31 @@ def test_managed_policy(): path='/mypolicy/', description='my user managed policy') - aws_policies = conn.list_policies(scope='AWS')['list_policies_response']['list_policies_result']['policies'] - set(p.name for p in aws_managed_policies).should.equal(set(p['policy_name'] for p in aws_policies)) + aws_policies = conn.list_policies(scope='AWS')['list_policies_response'][ + 'list_policies_result']['policies'] + set(p.name for p in aws_managed_policies).should.equal( + set(p['policy_name'] for p in aws_policies)) - user_policies = conn.list_policies(scope='Local')['list_policies_response']['list_policies_result']['policies'] - set(['UserManagedPolicy']).should.equal(set(p['policy_name'] for p in user_policies)) + user_policies = conn.list_policies(scope='Local')['list_policies_response'][ + 'list_policies_result']['policies'] + set(['UserManagedPolicy']).should.equal( + set(p['policy_name'] for p in user_policies)) - all_policies = conn.list_policies()['list_policies_response']['list_policies_result']['policies'] - set(p['policy_name'] for p in aws_policies + user_policies).should.equal(set(p['policy_name'] for p in all_policies)) + all_policies = conn.list_policies()['list_policies_response'][ + 'list_policies_result']['policies'] + set(p['policy_name'] for p in aws_policies + + user_policies).should.equal(set(p['policy_name'] for p in all_policies)) role_name = 'my-role' - conn.create_role(role_name, assume_role_policy_document={'policy': 'test'}, path="my-path") + conn.create_role(role_name, assume_role_policy_document={ + 'policy': 'test'}, path="my-path") for policy_name in ['AmazonElasticMapReduceRole', 'AmazonElasticMapReduceforEC2Role']: policy_arn = 'arn:aws:iam::aws:policy/service-role/' + policy_name conn.attach_role_policy(policy_arn, role_name) - rows = conn.list_policies(only_attached=True)['list_policies_response']['list_policies_result']['policies'] + rows = conn.list_policies(only_attached=True)['list_policies_response'][ + 'list_policies_result']['policies'] rows.should.have.length_of(2) for x in rows: int(x['attachment_count']).should.be.greater_than(0) @@ -332,7 +362,8 @@ def test_managed_policy(): resp = conn.get_response('ListAttachedRolePolicies', {'RoleName': role_name}, list_marker='AttachedPolicies') - resp['list_attached_role_policies_response']['list_attached_role_policies_result']['attached_policies'].should.have.length_of(2) + resp['list_attached_role_policies_response']['list_attached_role_policies_result'][ + 'attached_policies'].should.have.length_of(2) @mock_iam diff --git a/tests/test_iam/test_iam_groups.py b/tests/test_iam/test_iam_groups.py index 6fd0f47dd..a13d6de0b 100644 --- a/tests/test_iam/test_iam_groups.py +++ b/tests/test_iam/test_iam_groups.py @@ -29,7 +29,8 @@ def test_get_all_groups(): conn = boto.connect_iam() conn.create_group('my-group1') conn.create_group('my-group2') - groups = conn.get_all_groups()['list_groups_response']['list_groups_result']['groups'] + groups = conn.get_all_groups()['list_groups_response'][ + 'list_groups_result']['groups'] groups.should.have.length_of(2) @@ -68,5 +69,6 @@ def test_get_groups_for_user(): conn.add_user_to_group('my-group1', 'my-user') conn.add_user_to_group('my-group2', 'my-user') - groups = conn.get_groups_for_user('my-user')['list_groups_for_user_response']['list_groups_for_user_result']['groups'] + groups = conn.get_groups_for_user( + 'my-user')['list_groups_for_user_response']['list_groups_for_user_result']['groups'] groups.should.have.length_of(2) diff --git a/tests/test_iam/test_server.py b/tests/test_iam/test_server.py index 1b1c3bfe3..59aaf1462 100644 --- a/tests/test_iam/test_server.py +++ b/tests/test_iam/test_server.py @@ -16,10 +16,11 @@ def test_iam_server_get(): backend = server.create_backend_app("iam") test_client = backend.test_client() - group_data = test_client.action_data("CreateGroup", GroupName="test group", Path="/") + group_data = test_client.action_data( + "CreateGroup", GroupName="test group", Path="/") group_id = re.search("(.*)", group_data).groups()[0] groups_data = test_client.action_data("ListGroups") groups_ids = re.findall("(.*)", groups_data) - assert group_id in groups_ids \ No newline at end of file + assert group_id in groups_ids diff --git a/tests/test_kinesis/test_firehose.py b/tests/test_kinesis/test_firehose.py index 371be253b..6ab46c6f9 100644 --- a/tests/test_kinesis/test_firehose.py +++ b/tests/test_kinesis/test_firehose.py @@ -132,11 +132,13 @@ def test_create_stream_without_redshift(): "HasMoreDestinations": False, }) + @mock_kinesis def test_deescribe_non_existant_stream(): client = boto3.client('firehose', region_name='us-east-1') - client.describe_delivery_stream.when.called_with(DeliveryStreamName='not-a-stream').should.throw(ClientError) + client.describe_delivery_stream.when.called_with( + DeliveryStreamName='not-a-stream').should.throw(ClientError) @mock_kinesis @@ -146,11 +148,13 @@ def test_list_and_delete_stream(): create_stream(client, 'stream1') create_stream(client, 'stream2') - set(client.list_delivery_streams()['DeliveryStreamNames']).should.equal(set(['stream1', 'stream2'])) + set(client.list_delivery_streams()['DeliveryStreamNames']).should.equal( + set(['stream1', 'stream2'])) client.delete_delivery_stream(DeliveryStreamName='stream1') - set(client.list_delivery_streams()['DeliveryStreamNames']).should.equal(set(['stream2'])) + set(client.list_delivery_streams()[ + 'DeliveryStreamNames']).should.equal(set(['stream2'])) @mock_kinesis diff --git a/tests/test_kinesis/test_kinesis.py b/tests/test_kinesis/test_kinesis.py index a86bce44c..5b2f9ccf3 100644 --- a/tests/test_kinesis/test_kinesis.py +++ b/tests/test_kinesis/test_kinesis.py @@ -18,7 +18,8 @@ def test_create_cluster(): stream = stream_response["StreamDescription"] stream["StreamName"].should.equal("my_stream") stream["HasMoreShards"].should.equal(False) - stream["StreamARN"].should.equal("arn:aws:kinesis:us-west-2:123456789012:my_stream") + stream["StreamARN"].should.equal( + "arn:aws:kinesis:us-west-2:123456789012:my_stream") stream["StreamStatus"].should.equal("ACTIVE") shards = stream['Shards'] @@ -28,7 +29,8 @@ def test_create_cluster(): @mock_kinesis_deprecated def test_describe_non_existant_stream(): conn = boto.kinesis.connect_to_region("us-east-1") - conn.describe_stream.when.called_with("not-a-stream").should.throw(ResourceNotFoundException) + conn.describe_stream.when.called_with( + "not-a-stream").should.throw(ResourceNotFoundException) @mock_kinesis_deprecated @@ -45,7 +47,8 @@ def test_list_and_delete_stream(): conn.list_streams()['StreamNames'].should.have.length_of(1) # Delete invalid id - conn.delete_stream.when.called_with("not-a-stream").should.throw(ResourceNotFoundException) + conn.delete_stream.when.called_with( + "not-a-stream").should.throw(ResourceNotFoundException) @mock_kinesis_deprecated @@ -73,7 +76,8 @@ def test_get_invalid_shard_iterator(): stream_name = "my_stream" conn.create_stream(stream_name, 1) - conn.get_shard_iterator.when.called_with(stream_name, "123", 'TRIM_HORIZON').should.throw(ResourceNotFoundException) + conn.get_shard_iterator.when.called_with( + stream_name, "123", 'TRIM_HORIZON').should.throw(ResourceNotFoundException) @mock_kinesis_deprecated @@ -138,7 +142,8 @@ def test_get_records_limit(): @mock_kinesis_deprecated def test_get_records_at_sequence_number(): - # AT_SEQUENCE_NUMBER - Start reading exactly from the position denoted by a specific sequence number. + # AT_SEQUENCE_NUMBER - Start reading exactly from the position denoted by + # a specific sequence number. conn = boto.kinesis.connect_to_region("us-west-2") stream_name = "my_stream" conn.create_stream(stream_name, 1) @@ -158,7 +163,8 @@ def test_get_records_at_sequence_number(): second_sequence_id = response['Records'][1]['SequenceNumber'] # Then get a new iterator starting at that id - response = conn.get_shard_iterator(stream_name, shard_id, 'AT_SEQUENCE_NUMBER', second_sequence_id) + response = conn.get_shard_iterator( + stream_name, shard_id, 'AT_SEQUENCE_NUMBER', second_sequence_id) shard_iterator = response['ShardIterator'] response = conn.get_records(shard_iterator) @@ -169,7 +175,8 @@ def test_get_records_at_sequence_number(): @mock_kinesis_deprecated def test_get_records_after_sequence_number(): - # AFTER_SEQUENCE_NUMBER - Start reading right after the position denoted by a specific sequence number. + # AFTER_SEQUENCE_NUMBER - Start reading right after the position denoted + # by a specific sequence number. conn = boto.kinesis.connect_to_region("us-west-2") stream_name = "my_stream" conn.create_stream(stream_name, 1) @@ -189,7 +196,8 @@ def test_get_records_after_sequence_number(): second_sequence_id = response['Records'][1]['SequenceNumber'] # Then get a new iterator starting after that id - response = conn.get_shard_iterator(stream_name, shard_id, 'AFTER_SEQUENCE_NUMBER', second_sequence_id) + response = conn.get_shard_iterator( + stream_name, shard_id, 'AFTER_SEQUENCE_NUMBER', second_sequence_id) shard_iterator = response['ShardIterator'] response = conn.get_records(shard_iterator) @@ -199,7 +207,8 @@ def test_get_records_after_sequence_number(): @mock_kinesis_deprecated def test_get_records_latest(): - # LATEST - Start reading just after the most recent record in the shard, so that you always read the most recent data in the shard. + # LATEST - Start reading just after the most recent record in the shard, + # so that you always read the most recent data in the shard. conn = boto.kinesis.connect_to_region("us-west-2") stream_name = "my_stream" conn.create_stream(stream_name, 1) @@ -219,7 +228,8 @@ def test_get_records_latest(): second_sequence_id = response['Records'][1]['SequenceNumber'] # Then get a new iterator starting after that id - response = conn.get_shard_iterator(stream_name, shard_id, 'LATEST', second_sequence_id) + response = conn.get_shard_iterator( + stream_name, shard_id, 'LATEST', second_sequence_id) shard_iterator = response['ShardIterator'] # Write some more data @@ -251,10 +261,10 @@ def test_add_tags(): conn.create_stream(stream_name, 1) conn.describe_stream(stream_name) - conn.add_tags_to_stream(stream_name, {'tag1':'val1'}) - conn.add_tags_to_stream(stream_name, {'tag2':'val2'}) - conn.add_tags_to_stream(stream_name, {'tag1':'val3'}) - conn.add_tags_to_stream(stream_name, {'tag2':'val4'}) + conn.add_tags_to_stream(stream_name, {'tag1': 'val1'}) + conn.add_tags_to_stream(stream_name, {'tag2': 'val2'}) + conn.add_tags_to_stream(stream_name, {'tag1': 'val3'}) + conn.add_tags_to_stream(stream_name, {'tag2': 'val4'}) @mock_kinesis_deprecated @@ -264,17 +274,21 @@ def test_list_tags(): conn.create_stream(stream_name, 1) conn.describe_stream(stream_name) - conn.add_tags_to_stream(stream_name, {'tag1':'val1'}) - tags = dict([(tag['Key'], tag['Value']) for tag in conn.list_tags_for_stream(stream_name)['Tags']]) + conn.add_tags_to_stream(stream_name, {'tag1': 'val1'}) + tags = dict([(tag['Key'], tag['Value']) + for tag in conn.list_tags_for_stream(stream_name)['Tags']]) tags.get('tag1').should.equal('val1') - conn.add_tags_to_stream(stream_name, {'tag2':'val2'}) - tags = dict([(tag['Key'], tag['Value']) for tag in conn.list_tags_for_stream(stream_name)['Tags']]) + conn.add_tags_to_stream(stream_name, {'tag2': 'val2'}) + tags = dict([(tag['Key'], tag['Value']) + for tag in conn.list_tags_for_stream(stream_name)['Tags']]) tags.get('tag2').should.equal('val2') - conn.add_tags_to_stream(stream_name, {'tag1':'val3'}) - tags = dict([(tag['Key'], tag['Value']) for tag in conn.list_tags_for_stream(stream_name)['Tags']]) + conn.add_tags_to_stream(stream_name, {'tag1': 'val3'}) + tags = dict([(tag['Key'], tag['Value']) + for tag in conn.list_tags_for_stream(stream_name)['Tags']]) tags.get('tag1').should.equal('val3') - conn.add_tags_to_stream(stream_name, {'tag2':'val4'}) - tags = dict([(tag['Key'], tag['Value']) for tag in conn.list_tags_for_stream(stream_name)['Tags']]) + conn.add_tags_to_stream(stream_name, {'tag2': 'val4'}) + tags = dict([(tag['Key'], tag['Value']) + for tag in conn.list_tags_for_stream(stream_name)['Tags']]) tags.get('tag2').should.equal('val4') @@ -285,18 +299,22 @@ def test_remove_tags(): conn.create_stream(stream_name, 1) conn.describe_stream(stream_name) - conn.add_tags_to_stream(stream_name, {'tag1':'val1'}) - tags = dict([(tag['Key'], tag['Value']) for tag in conn.list_tags_for_stream(stream_name)['Tags']]) + conn.add_tags_to_stream(stream_name, {'tag1': 'val1'}) + tags = dict([(tag['Key'], tag['Value']) + for tag in conn.list_tags_for_stream(stream_name)['Tags']]) tags.get('tag1').should.equal('val1') conn.remove_tags_from_stream(stream_name, ['tag1']) - tags = dict([(tag['Key'], tag['Value']) for tag in conn.list_tags_for_stream(stream_name)['Tags']]) + tags = dict([(tag['Key'], tag['Value']) + for tag in conn.list_tags_for_stream(stream_name)['Tags']]) tags.get('tag1').should.equal(None) - conn.add_tags_to_stream(stream_name, {'tag2':'val2'}) - tags = dict([(tag['Key'], tag['Value']) for tag in conn.list_tags_for_stream(stream_name)['Tags']]) + conn.add_tags_to_stream(stream_name, {'tag2': 'val2'}) + tags = dict([(tag['Key'], tag['Value']) + for tag in conn.list_tags_for_stream(stream_name)['Tags']]) tags.get('tag2').should.equal('val2') conn.remove_tags_from_stream(stream_name, ['tag2']) - tags = dict([(tag['Key'], tag['Value']) for tag in conn.list_tags_for_stream(stream_name)['Tags']]) + tags = dict([(tag['Key'], tag['Value']) + for tag in conn.list_tags_for_stream(stream_name)['Tags']]) tags.get('tag2').should.equal(None) @@ -316,10 +334,12 @@ def test_split_shard(): stream = stream_response["StreamDescription"] shards = stream['Shards'] shards.should.have.length_of(2) - sum([shard['SequenceNumberRange']['EndingSequenceNumber'] for shard in shards]).should.equal(99) + sum([shard['SequenceNumberRange']['EndingSequenceNumber'] + for shard in shards]).should.equal(99) shard_range = shards[0]['HashKeyRange'] - new_starting_hash = (int(shard_range['EndingHashKey'])+int(shard_range['StartingHashKey'])) // 2 + new_starting_hash = ( + int(shard_range['EndingHashKey']) + int(shard_range['StartingHashKey'])) // 2 conn.split_shard("my_stream", shards[0]['ShardId'], str(new_starting_hash)) stream_response = conn.describe_stream(stream_name) @@ -327,10 +347,12 @@ def test_split_shard(): stream = stream_response["StreamDescription"] shards = stream['Shards'] shards.should.have.length_of(3) - sum([shard['SequenceNumberRange']['EndingSequenceNumber'] for shard in shards]).should.equal(99) + sum([shard['SequenceNumberRange']['EndingSequenceNumber'] + for shard in shards]).should.equal(99) shard_range = shards[2]['HashKeyRange'] - new_starting_hash = (int(shard_range['EndingHashKey'])+int(shard_range['StartingHashKey'])) // 2 + new_starting_hash = ( + int(shard_range['EndingHashKey']) + int(shard_range['StartingHashKey'])) // 2 conn.split_shard("my_stream", shards[2]['ShardId'], str(new_starting_hash)) stream_response = conn.describe_stream(stream_name) @@ -338,7 +360,8 @@ def test_split_shard(): stream = stream_response["StreamDescription"] shards = stream['Shards'] shards.should.have.length_of(4) - sum([shard['SequenceNumberRange']['EndingSequenceNumber'] for shard in shards]).should.equal(99) + sum([shard['SequenceNumberRange']['EndingSequenceNumber'] + for shard in shards]).should.equal(99) @mock_kinesis_deprecated @@ -358,28 +381,34 @@ def test_merge_shards(): shards = stream['Shards'] shards.should.have.length_of(4) - conn.merge_shards.when.called_with(stream_name, 'shardId-000000000000', 'shardId-000000000002').should.throw(InvalidArgumentException) + conn.merge_shards.when.called_with( + stream_name, 'shardId-000000000000', 'shardId-000000000002').should.throw(InvalidArgumentException) stream_response = conn.describe_stream(stream_name) stream = stream_response["StreamDescription"] shards = stream['Shards'] shards.should.have.length_of(4) - sum([shard['SequenceNumberRange']['EndingSequenceNumber'] for shard in shards]).should.equal(99) + sum([shard['SequenceNumberRange']['EndingSequenceNumber'] + for shard in shards]).should.equal(99) - conn.merge_shards(stream_name, 'shardId-000000000000', 'shardId-000000000001') + conn.merge_shards(stream_name, 'shardId-000000000000', + 'shardId-000000000001') stream_response = conn.describe_stream(stream_name) stream = stream_response["StreamDescription"] shards = stream['Shards'] shards.should.have.length_of(3) - sum([shard['SequenceNumberRange']['EndingSequenceNumber'] for shard in shards]).should.equal(99) - conn.merge_shards(stream_name, 'shardId-000000000002', 'shardId-000000000000') + sum([shard['SequenceNumberRange']['EndingSequenceNumber'] + for shard in shards]).should.equal(99) + conn.merge_shards(stream_name, 'shardId-000000000002', + 'shardId-000000000000') stream_response = conn.describe_stream(stream_name) stream = stream_response["StreamDescription"] shards = stream['Shards'] shards.should.have.length_of(2) - sum([shard['SequenceNumberRange']['EndingSequenceNumber'] for shard in shards]).should.equal(99) + sum([shard['SequenceNumberRange']['EndingSequenceNumber'] + for shard in shards]).should.equal(99) diff --git a/tests/test_kms/test_kms.py b/tests/test_kms/test_kms.py index 27850d4ad..e1468cce0 100644 --- a/tests/test_kms/test_kms.py +++ b/tests/test_kms/test_kms.py @@ -8,11 +8,13 @@ import sure # noqa from moto import mock_kms_deprecated from nose.tools import assert_raises + @mock_kms_deprecated def test_create_key(): conn = boto.kms.connect_to_region("us-west-2") - key = conn.create_key(policy="my policy", description="my key", key_usage='ENCRYPT_DECRYPT') + key = conn.create_key(policy="my policy", + description="my key", key_usage='ENCRYPT_DECRYPT') key['KeyMetadata']['Description'].should.equal("my key") key['KeyMetadata']['KeyUsage'].should.equal("ENCRYPT_DECRYPT") @@ -22,7 +24,8 @@ def test_create_key(): @mock_kms_deprecated def test_describe_key(): conn = boto.kms.connect_to_region("us-west-2") - key = conn.create_key(policy="my policy", description="my key", key_usage='ENCRYPT_DECRYPT') + key = conn.create_key(policy="my policy", + description="my key", key_usage='ENCRYPT_DECRYPT') key_id = key['KeyMetadata']['KeyId'] key = conn.describe_key(key_id) @@ -33,8 +36,10 @@ def test_describe_key(): @mock_kms_deprecated def test_describe_key_via_alias(): conn = boto.kms.connect_to_region("us-west-2") - key = conn.create_key(policy="my policy", description="my key", key_usage='ENCRYPT_DECRYPT') - conn.create_alias(alias_name='alias/my-key-alias', target_key_id=key['KeyMetadata']['KeyId']) + key = conn.create_key(policy="my policy", + description="my key", key_usage='ENCRYPT_DECRYPT') + conn.create_alias(alias_name='alias/my-key-alias', + target_key_id=key['KeyMetadata']['KeyId']) alias_key = conn.describe_key('alias/my-key-alias') alias_key['KeyMetadata']['Description'].should.equal("my key") @@ -45,16 +50,20 @@ def test_describe_key_via_alias(): @mock_kms_deprecated def test_describe_key_via_alias_not_found(): conn = boto.kms.connect_to_region("us-west-2") - key = conn.create_key(policy="my policy", description="my key", key_usage='ENCRYPT_DECRYPT') - conn.create_alias(alias_name='alias/my-key-alias', target_key_id=key['KeyMetadata']['KeyId']) + key = conn.create_key(policy="my policy", + description="my key", key_usage='ENCRYPT_DECRYPT') + conn.create_alias(alias_name='alias/my-key-alias', + target_key_id=key['KeyMetadata']['KeyId']) - conn.describe_key.when.called_with('alias/not-found-alias').should.throw(JSONResponseError) + conn.describe_key.when.called_with( + 'alias/not-found-alias').should.throw(JSONResponseError) @mock_kms_deprecated def test_describe_key_via_arn(): conn = boto.kms.connect_to_region("us-west-2") - key = conn.create_key(policy="my policy", description="my key", key_usage='ENCRYPT_DECRYPT') + key = conn.create_key(policy="my policy", + description="my key", key_usage='ENCRYPT_DECRYPT') arn = key['KeyMetadata']['Arn'] the_key = conn.describe_key(arn) @@ -66,15 +75,18 @@ def test_describe_key_via_arn(): @mock_kms_deprecated def test_describe_missing_key(): conn = boto.kms.connect_to_region("us-west-2") - conn.describe_key.when.called_with("not-a-key").should.throw(JSONResponseError) + conn.describe_key.when.called_with( + "not-a-key").should.throw(JSONResponseError) @mock_kms_deprecated def test_list_keys(): conn = boto.kms.connect_to_region("us-west-2") - conn.create_key(policy="my policy", description="my key1", key_usage='ENCRYPT_DECRYPT') - conn.create_key(policy="my policy", description="my key2", key_usage='ENCRYPT_DECRYPT') + conn.create_key(policy="my policy", description="my key1", + key_usage='ENCRYPT_DECRYPT') + conn.create_key(policy="my policy", description="my key2", + key_usage='ENCRYPT_DECRYPT') keys = conn.list_keys() keys['Keys'].should.have.length_of(2) @@ -84,56 +96,67 @@ def test_list_keys(): def test_enable_key_rotation(): conn = boto.kms.connect_to_region("us-west-2") - key = conn.create_key(policy="my policy", description="my key", key_usage='ENCRYPT_DECRYPT') + key = conn.create_key(policy="my policy", + description="my key", key_usage='ENCRYPT_DECRYPT') key_id = key['KeyMetadata']['KeyId'] conn.enable_key_rotation(key_id) - conn.get_key_rotation_status(key_id)['KeyRotationEnabled'].should.equal(True) + conn.get_key_rotation_status( + key_id)['KeyRotationEnabled'].should.equal(True) + @mock_kms_deprecated def test_enable_key_rotation_via_arn(): conn = boto.kms.connect_to_region("us-west-2") - key = conn.create_key(policy="my policy", description="my key", key_usage='ENCRYPT_DECRYPT') + key = conn.create_key(policy="my policy", + description="my key", key_usage='ENCRYPT_DECRYPT') key_id = key['KeyMetadata']['Arn'] conn.enable_key_rotation(key_id) - conn.get_key_rotation_status(key_id)['KeyRotationEnabled'].should.equal(True) - + conn.get_key_rotation_status( + key_id)['KeyRotationEnabled'].should.equal(True) @mock_kms_deprecated def test_enable_key_rotation_with_missing_key(): conn = boto.kms.connect_to_region("us-west-2") - conn.enable_key_rotation.when.called_with("not-a-key").should.throw(JSONResponseError) + conn.enable_key_rotation.when.called_with( + "not-a-key").should.throw(JSONResponseError) @mock_kms_deprecated def test_enable_key_rotation_with_alias_name_should_fail(): conn = boto.kms.connect_to_region("us-west-2") - key = conn.create_key(policy="my policy", description="my key", key_usage='ENCRYPT_DECRYPT') - conn.create_alias(alias_name='alias/my-key-alias', target_key_id=key['KeyMetadata']['KeyId']) + key = conn.create_key(policy="my policy", + description="my key", key_usage='ENCRYPT_DECRYPT') + conn.create_alias(alias_name='alias/my-key-alias', + target_key_id=key['KeyMetadata']['KeyId']) alias_key = conn.describe_key('alias/my-key-alias') alias_key['KeyMetadata']['Arn'].should.equal(key['KeyMetadata']['Arn']) - conn.enable_key_rotation.when.called_with('alias/my-alias').should.throw(JSONResponseError) + conn.enable_key_rotation.when.called_with( + 'alias/my-alias').should.throw(JSONResponseError) @mock_kms_deprecated def test_disable_key_rotation(): conn = boto.kms.connect_to_region("us-west-2") - key = conn.create_key(policy="my policy", description="my key", key_usage='ENCRYPT_DECRYPT') + key = conn.create_key(policy="my policy", + description="my key", key_usage='ENCRYPT_DECRYPT') key_id = key['KeyMetadata']['KeyId'] conn.enable_key_rotation(key_id) - conn.get_key_rotation_status(key_id)['KeyRotationEnabled'].should.equal(True) + conn.get_key_rotation_status( + key_id)['KeyRotationEnabled'].should.equal(True) conn.disable_key_rotation(key_id) - conn.get_key_rotation_status(key_id)['KeyRotationEnabled'].should.equal(False) + conn.get_key_rotation_status( + key_id)['KeyRotationEnabled'].should.equal(False) @mock_kms_deprecated @@ -157,59 +180,70 @@ def test_decrypt(): @mock_kms_deprecated def test_disable_key_rotation_with_missing_key(): conn = boto.kms.connect_to_region("us-west-2") - conn.disable_key_rotation.when.called_with("not-a-key").should.throw(JSONResponseError) + conn.disable_key_rotation.when.called_with( + "not-a-key").should.throw(JSONResponseError) @mock_kms_deprecated def test_get_key_rotation_status_with_missing_key(): conn = boto.kms.connect_to_region("us-west-2") - conn.get_key_rotation_status.when.called_with("not-a-key").should.throw(JSONResponseError) + conn.get_key_rotation_status.when.called_with( + "not-a-key").should.throw(JSONResponseError) @mock_kms_deprecated def test_get_key_rotation_status(): conn = boto.kms.connect_to_region("us-west-2") - key = conn.create_key(policy="my policy", description="my key", key_usage='ENCRYPT_DECRYPT') + key = conn.create_key(policy="my policy", + description="my key", key_usage='ENCRYPT_DECRYPT') key_id = key['KeyMetadata']['KeyId'] - conn.get_key_rotation_status(key_id)['KeyRotationEnabled'].should.equal(False) + conn.get_key_rotation_status( + key_id)['KeyRotationEnabled'].should.equal(False) @mock_kms_deprecated def test_create_key_defaults_key_rotation(): conn = boto.kms.connect_to_region("us-west-2") - key = conn.create_key(policy="my policy", description="my key", key_usage='ENCRYPT_DECRYPT') + key = conn.create_key(policy="my policy", + description="my key", key_usage='ENCRYPT_DECRYPT') key_id = key['KeyMetadata']['KeyId'] - conn.get_key_rotation_status(key_id)['KeyRotationEnabled'].should.equal(False) + conn.get_key_rotation_status( + key_id)['KeyRotationEnabled'].should.equal(False) @mock_kms_deprecated def test_get_key_policy(): conn = boto.kms.connect_to_region('us-west-2') - key = conn.create_key(policy='my policy', description='my key1', key_usage='ENCRYPT_DECRYPT') + key = conn.create_key(policy='my policy', + description='my key1', key_usage='ENCRYPT_DECRYPT') key_id = key['KeyMetadata']['KeyId'] policy = conn.get_key_policy(key_id, 'default') policy['Policy'].should.equal('my policy') + @mock_kms_deprecated def test_get_key_policy_via_arn(): conn = boto.kms.connect_to_region('us-west-2') - key = conn.create_key(policy='my policy', description='my key1', key_usage='ENCRYPT_DECRYPT') + key = conn.create_key(policy='my policy', + description='my key1', key_usage='ENCRYPT_DECRYPT') policy = conn.get_key_policy(key['KeyMetadata']['Arn'], 'default') policy['Policy'].should.equal('my policy') + @mock_kms_deprecated def test_put_key_policy(): conn = boto.kms.connect_to_region('us-west-2') - key = conn.create_key(policy='my policy', description='my key1', key_usage='ENCRYPT_DECRYPT') + key = conn.create_key(policy='my policy', + description='my key1', key_usage='ENCRYPT_DECRYPT') key_id = key['KeyMetadata']['KeyId'] conn.put_key_policy(key_id, 'default', 'new policy') @@ -221,7 +255,8 @@ def test_put_key_policy(): def test_put_key_policy_via_arn(): conn = boto.kms.connect_to_region('us-west-2') - key = conn.create_key(policy='my policy', description='my key1', key_usage='ENCRYPT_DECRYPT') + key = conn.create_key(policy='my policy', + description='my key1', key_usage='ENCRYPT_DECRYPT') key_id = key['KeyMetadata']['Arn'] conn.put_key_policy(key_id, 'default', 'new policy') @@ -233,10 +268,13 @@ def test_put_key_policy_via_arn(): def test_put_key_policy_via_alias_should_not_update(): conn = boto.kms.connect_to_region('us-west-2') - key = conn.create_key(policy='my policy', description='my key1', key_usage='ENCRYPT_DECRYPT') - conn.create_alias(alias_name='alias/my-key-alias', target_key_id=key['KeyMetadata']['KeyId']) + key = conn.create_key(policy='my policy', + description='my key1', key_usage='ENCRYPT_DECRYPT') + conn.create_alias(alias_name='alias/my-key-alias', + target_key_id=key['KeyMetadata']['KeyId']) - conn.put_key_policy.when.called_with('alias/my-key-alias', 'default', 'new policy').should.throw(JSONResponseError) + conn.put_key_policy.when.called_with( + 'alias/my-key-alias', 'default', 'new policy').should.throw(JSONResponseError) policy = conn.get_key_policy(key['KeyMetadata']['KeyId'], 'default') policy['Policy'].should.equal('my policy') @@ -246,7 +284,8 @@ def test_put_key_policy_via_alias_should_not_update(): def test_put_key_policy(): conn = boto.kms.connect_to_region('us-west-2') - key = conn.create_key(policy='my policy', description='my key1', key_usage='ENCRYPT_DECRYPT') + key = conn.create_key(policy='my policy', + description='my key1', key_usage='ENCRYPT_DECRYPT') conn.put_key_policy(key['KeyMetadata']['Arn'], 'default', 'new policy') policy = conn.get_key_policy(key['KeyMetadata']['KeyId'], 'default') @@ -257,7 +296,8 @@ def test_put_key_policy(): def test_list_key_policies(): conn = boto.kms.connect_to_region('us-west-2') - key = conn.create_key(policy='my policy', description='my key1', key_usage='ENCRYPT_DECRYPT') + key = conn.create_key(policy='my policy', + description='my key1', key_usage='ENCRYPT_DECRYPT') key_id = key['KeyMetadata']['KeyId'] policies = conn.list_key_policies(key_id) @@ -323,7 +363,8 @@ def test__create_alias__raises_if_wrong_prefix(): ex = err.exception ex.error_message.should.equal('Invalid identifier') ex.error_code.should.equal('ValidationException') - ex.body.should.equal({'message': 'Invalid identifier', '__type': 'ValidationException'}) + ex.body.should.equal({'message': 'Invalid identifier', + '__type': 'ValidationException'}) ex.reason.should.equal('Bad Request') ex.status.should.equal(400) @@ -371,16 +412,19 @@ def test__create_alias__raises_if_alias_has_restricted_characters(): kms.create_alias(alias_name, key_id) ex = err.exception ex.body['__type'].should.equal('ValidationException') - ex.body['message'].should.equal("1 validation error detected: Value '{alias_name}' at 'aliasName' failed to satisfy constraint: Member must satisfy regular expression pattern: ^[a-zA-Z0-9:/_-]+$".format(**locals())) + ex.body['message'].should.equal( + "1 validation error detected: Value '{alias_name}' at 'aliasName' failed to satisfy constraint: Member must satisfy regular expression pattern: ^[a-zA-Z0-9:/_-]+$".format(**locals())) ex.error_code.should.equal('ValidationException') - ex.message.should.equal("1 validation error detected: Value '{alias_name}' at 'aliasName' failed to satisfy constraint: Member must satisfy regular expression pattern: ^[a-zA-Z0-9:/_-]+$".format(**locals())) + ex.message.should.equal( + "1 validation error detected: Value '{alias_name}' at 'aliasName' failed to satisfy constraint: Member must satisfy regular expression pattern: ^[a-zA-Z0-9:/_-]+$".format(**locals())) ex.reason.should.equal('Bad Request') ex.status.should.equal(400) @mock_kms_deprecated def test__create_alias__raises_if_alias_has_colon_character(): - # For some reason, colons are not accepted for an alias, even though they are accepted by regex ^[a-zA-Z0-9:/_-]+$ + # For some reason, colons are not accepted for an alias, even though they + # are accepted by regex ^[a-zA-Z0-9:/_-]+$ kms = boto.connect_kms() create_resp = kms.create_key() key_id = create_resp['KeyMetadata']['KeyId'] @@ -394,9 +438,11 @@ def test__create_alias__raises_if_alias_has_colon_character(): kms.create_alias(alias_name, key_id) ex = err.exception ex.body['__type'].should.equal('ValidationException') - ex.body['message'].should.equal("{alias_name} contains invalid characters for an alias".format(**locals())) + ex.body['message'].should.equal( + "{alias_name} contains invalid characters for an alias".format(**locals())) ex.error_code.should.equal('ValidationException') - ex.message.should.equal("{alias_name} contains invalid characters for an alias".format(**locals())) + ex.message.should.equal( + "{alias_name} contains invalid characters for an alias".format(**locals())) ex.reason.should.equal('Bad Request') ex.status.should.equal(400) @@ -481,10 +527,12 @@ def test__delete_alias__raises_if_alias_is_not_found(): ex = err.exception ex.body['__type'].should.equal('NotFoundException') - ex.body['message'].should.match(r'Alias arn:aws:kms:{region}:\d{{12}}:{alias_name} is not found.'.format(**locals())) + ex.body['message'].should.match( + r'Alias arn:aws:kms:{region}:\d{{12}}:{alias_name} is not found.'.format(**locals())) ex.box_usage.should.be.none ex.error_code.should.be.none - ex.message.should.match(r'Alias arn:aws:kms:{region}:\d{{12}}:{alias_name} is not found.'.format(**locals())) + ex.message.should.match( + r'Alias arn:aws:kms:{region}:\d{{12}}:{alias_name} is not found.'.format(**locals())) ex.reason.should.equal('Bad Request') ex.request_id.should.be.none ex.status.should.equal(400) @@ -527,7 +575,8 @@ def test__list_aliases(): len([alias for alias in aliases if has_correct_arn(alias) and 'alias/my-alias2' == alias['AliasName']]).should.equal(1) - len([alias for alias in aliases if 'TargetKeyId' in alias and key_id == alias['TargetKeyId']]).should.equal(3) + len([alias for alias in aliases if 'TargetKeyId' in alias and key_id == + alias['TargetKeyId']]).should.equal(3) len(aliases).should.equal(7) @@ -537,13 +586,17 @@ def test__assert_valid_key_id(): from moto.kms.responses import _assert_valid_key_id import uuid - _assert_valid_key_id.when.called_with("not-a-key").should.throw(JSONResponseError) - _assert_valid_key_id.when.called_with(str(uuid.uuid4())).should_not.throw(JSONResponseError) + _assert_valid_key_id.when.called_with( + "not-a-key").should.throw(JSONResponseError) + _assert_valid_key_id.when.called_with( + str(uuid.uuid4())).should_not.throw(JSONResponseError) @mock_kms_deprecated def test__assert_default_policy(): from moto.kms.responses import _assert_default_policy - _assert_default_policy.when.called_with("not-default").should.throw(JSONResponseError) - _assert_default_policy.when.called_with("default").should_not.throw(JSONResponseError) + _assert_default_policy.when.called_with( + "not-default").should.throw(JSONResponseError) + _assert_default_policy.when.called_with( + "default").should_not.throw(JSONResponseError) diff --git a/tests/test_opsworks/test_instances.py b/tests/test_opsworks/test_instances.py index e24486a2f..9c9e20878 100644 --- a/tests/test_opsworks/test_instances.py +++ b/tests/test_opsworks/test_instances.py @@ -102,7 +102,8 @@ def test_describe_instances(): S1L1_i1.should.be.within([i["InstanceId"] for i in response]) S1L1_i2.should.be.within([i["InstanceId"] for i in response]) - response2 = client.describe_instances(InstanceIds=[S1L1_i1, S1L1_i2])['Instances'] + response2 = client.describe_instances( + InstanceIds=[S1L1_i1, S1L1_i2])['Instances'] sorted(response2, key=lambda d: d['InstanceId']).should.equal( sorted(response, key=lambda d: d['InstanceId'])) @@ -168,9 +169,8 @@ def test_ec2_integration(): reservations = ec2.describe_instances()['Reservations'] reservations[0]['Instances'].should.have.length_of(1) instance = reservations[0]['Instances'][0] - opsworks_instance = opsworks.describe_instances(StackId=stack_id)['Instances'][0] + opsworks_instance = opsworks.describe_instances(StackId=stack_id)[ + 'Instances'][0] instance['InstanceId'].should.equal(opsworks_instance['Ec2InstanceId']) instance['PrivateIpAddress'].should.equal(opsworks_instance['PrivateIp']) - - diff --git a/tests/test_opsworks/test_layers.py b/tests/test_opsworks/test_layers.py index dc268bbe5..31fdeae8c 100644 --- a/tests/test_opsworks/test_layers.py +++ b/tests/test_opsworks/test_layers.py @@ -43,7 +43,8 @@ def test_create_layer_response(): Name="_", Shortname="TestLayerShortName" ).should.throw( - Exception, re.compile(r'already a layer with shortname "TestLayerShortName"') + Exception, re.compile( + r'already a layer with shortname "TestLayerShortName"') ) @@ -69,4 +70,3 @@ def test_describe_layers(): rv1['Layers'].should.equal(rv2['Layers']) rv1['Layers'][0]['Name'].should.equal("TestLayer") - diff --git a/tests/test_opsworks/test_stack.py b/tests/test_opsworks/test_stack.py index 8d86e4207..5913ce6d5 100644 --- a/tests/test_opsworks/test_stack.py +++ b/tests/test_opsworks/test_stack.py @@ -44,5 +44,3 @@ def test_describe_stacks(): client.describe_stacks.when.called_with(StackIds=["foo"]).should.throw( Exception, re.compile(r'foo') ) - - diff --git a/tests/test_rds/test_rds.py b/tests/test_rds/test_rds.py index 7a6cab633..090147d11 100644 --- a/tests/test_rds/test_rds.py +++ b/tests/test_rds/test_rds.py @@ -15,14 +15,15 @@ def test_create_database(): conn = boto.rds.connect_to_region("us-west-2") database = conn.create_dbinstance("db-master-1", 10, 'db.m1.small', 'root', 'hunter2', - security_groups=["my_sg"]) + security_groups=["my_sg"]) database.status.should.equal('available') database.id.should.equal("db-master-1") database.allocated_storage.should.equal(10) database.instance_class.should.equal("db.m1.small") database.master_username.should.equal("root") - database.endpoint.should.equal(('db-master-1.aaaaaaaaaa.us-west-2.rds.amazonaws.com', 3306)) + database.endpoint.should.equal( + ('db-master-1.aaaaaaaaaa.us-west-2.rds.amazonaws.com', 3306)) database.security_groups[0].name.should.equal('my_sg') @@ -47,7 +48,8 @@ def test_get_databases(): @mock_rds_deprecated def test_describe_non_existant_database(): conn = boto.rds.connect_to_region("us-west-2") - conn.get_all_dbinstances.when.called_with("not-a-db").should.throw(BotoServerError) + conn.get_all_dbinstances.when.called_with( + "not-a-db").should.throw(BotoServerError) @disable_on_py3() @@ -66,7 +68,8 @@ def test_delete_database(): @mock_rds_deprecated def test_delete_non_existant_database(): conn = boto.rds.connect_to_region("us-west-2") - conn.delete_dbinstance.when.called_with("not-a-db").should.throw(BotoServerError) + conn.delete_dbinstance.when.called_with( + "not-a-db").should.throw(BotoServerError) @mock_rds_deprecated @@ -99,7 +102,8 @@ def test_get_security_groups(): @mock_rds_deprecated def test_get_non_existant_security_group(): conn = boto.rds.connect_to_region("us-west-2") - conn.get_all_dbsecurity_groups.when.called_with("not-a-sg").should.throw(BotoServerError) + conn.get_all_dbsecurity_groups.when.called_with( + "not-a-sg").should.throw(BotoServerError) @mock_rds_deprecated @@ -116,7 +120,8 @@ def test_delete_database_security_group(): @mock_rds_deprecated def test_delete_non_existant_security_group(): conn = boto.rds.connect_to_region("us-west-2") - conn.delete_dbsecurity_group.when.called_with("not-a-db").should.throw(BotoServerError) + conn.delete_dbsecurity_group.when.called_with( + "not-a-db").should.throw(BotoServerError) @disable_on_py3() @@ -137,7 +142,8 @@ def test_security_group_authorize(): def test_add_security_group_to_database(): conn = boto.rds.connect_to_region("us-west-2") - database = conn.create_dbinstance("db-master-1", 10, 'db.m1.small', 'root', 'hunter2') + database = conn.create_dbinstance( + "db-master-1", 10, 'db.m1.small', 'root', 'hunter2') security_group = conn.create_dbsecurity_group('db_sg', 'DB Security Group') database.modify(security_groups=[security_group]) @@ -157,7 +163,8 @@ def test_add_database_subnet_group(): subnet_ids = [subnet1.id, subnet2.id] conn = boto.rds.connect_to_region("us-west-2") - subnet_group = conn.create_db_subnet_group("db_subnet", "my db subnet", subnet_ids) + subnet_group = conn.create_db_subnet_group( + "db_subnet", "my db subnet", subnet_ids) subnet_group.name.should.equal('db_subnet') subnet_group.description.should.equal("my db subnet") list(subnet_group.subnet_ids).should.equal(subnet_ids) @@ -177,7 +184,8 @@ def test_describe_database_subnet_group(): list(conn.get_all_db_subnet_groups()).should.have.length_of(2) list(conn.get_all_db_subnet_groups("db_subnet1")).should.have.length_of(1) - conn.get_all_db_subnet_groups.when.called_with("not-a-subnet").should.throw(BotoServerError) + conn.get_all_db_subnet_groups.when.called_with( + "not-a-subnet").should.throw(BotoServerError) @mock_ec2_deprecated @@ -194,7 +202,8 @@ def test_delete_database_subnet_group(): conn.delete_db_subnet_group("db_subnet1") list(conn.get_all_db_subnet_groups()).should.have.length_of(0) - conn.delete_db_subnet_group.when.called_with("db_subnet1").should.throw(BotoServerError) + conn.delete_db_subnet_group.when.called_with( + "db_subnet1").should.throw(BotoServerError) @disable_on_py3() @@ -209,7 +218,7 @@ def test_create_database_in_subnet_group(): conn.create_db_subnet_group("db_subnet1", "my db subnet", [subnet.id]) database = conn.create_dbinstance("db-master-1", 10, 'db.m1.small', - 'root', 'hunter2', db_subnet_group_name="db_subnet1") + 'root', 'hunter2', db_subnet_group_name="db_subnet1") database = conn.get_all_dbinstances("db-master-1")[0] database.subnet_group.name.should.equal("db_subnet1") @@ -220,9 +229,11 @@ def test_create_database_in_subnet_group(): def test_create_database_replica(): conn = boto.rds.connect_to_region("us-west-2") - primary = conn.create_dbinstance("db-master-1", 10, 'db.m1.small', 'root', 'hunter2') + primary = conn.create_dbinstance( + "db-master-1", 10, 'db.m1.small', 'root', 'hunter2') - replica = conn.create_dbinstance_read_replica("replica", "db-master-1", "db.m1.small") + replica = conn.create_dbinstance_read_replica( + "replica", "db-master-1", "db.m1.small") replica.id.should.equal("replica") replica.instance_class.should.equal("db.m1.small") status_info = replica.status_infos[0] @@ -238,13 +249,15 @@ def test_create_database_replica(): primary = conn.get_all_dbinstances("db-master-1")[0] list(primary.read_replica_dbinstance_identifiers).should.have.length_of(0) + @disable_on_py3() @mock_rds_deprecated def test_create_cross_region_database_replica(): west_1_conn = boto.rds.connect_to_region("us-west-1") west_2_conn = boto.rds.connect_to_region("us-west-2") - primary = west_1_conn.create_dbinstance("db-master-1", 10, 'db.m1.small', 'root', 'hunter2') + primary = west_1_conn.create_dbinstance( + "db-master-1", 10, 'db.m1.small', 'root', 'hunter2') primary_arn = "arn:aws:rds:us-west-1:1234567890:db:db-master-1" replica = west_2_conn.create_dbinstance_read_replica( @@ -274,14 +287,15 @@ def test_connecting_to_us_east_1(): conn = boto.rds.connect_to_region("us-east-1") database = conn.create_dbinstance("db-master-1", 10, 'db.m1.small', 'root', 'hunter2', - security_groups=["my_sg"]) + security_groups=["my_sg"]) database.status.should.equal('available') database.id.should.equal("db-master-1") database.allocated_storage.should.equal(10) database.instance_class.should.equal("db.m1.small") database.master_username.should.equal("root") - database.endpoint.should.equal(('db-master-1.aaaaaaaaaa.us-east-1.rds.amazonaws.com', 3306)) + database.endpoint.should.equal( + ('db-master-1.aaaaaaaaaa.us-east-1.rds.amazonaws.com', 3306)) database.security_groups[0].name.should.equal('my_sg') @@ -290,7 +304,8 @@ def test_connecting_to_us_east_1(): def test_create_database_with_iops(): conn = boto.rds.connect_to_region("us-west-2") - database = conn.create_dbinstance("db-master-1", 10, 'db.m1.small', 'root', 'hunter2', iops=6000) + database = conn.create_dbinstance( + "db-master-1", 10, 'db.m1.small', 'root', 'hunter2', iops=6000) database.status.should.equal('available') database.iops.should.equal(6000) diff --git a/tests/test_rds2/test_rds2.py b/tests/test_rds2/test_rds2.py index 581209655..731bc75c1 100644 --- a/tests/test_rds2/test_rds2.py +++ b/tests/test_rds2/test_rds2.py @@ -24,7 +24,8 @@ def test_create_database(): database['DBInstance']['AllocatedStorage'].should.equal(10) database['DBInstance']['DBInstanceClass'].should.equal("db.m1.small") database['DBInstance']['MasterUsername'].should.equal("root") - database['DBInstance']['DBSecurityGroups'][0]['DBSecurityGroupName'].should.equal('my_sg') + database['DBInstance']['DBSecurityGroups'][0][ + 'DBSecurityGroupName'].should.equal('my_sg') @disable_on_py3() @@ -56,14 +57,16 @@ def test_get_databases(): instances = conn.describe_db_instances(DBInstanceIdentifier="db-master-1") list(instances['DBInstances']).should.have.length_of(1) - instances['DBInstances'][0]['DBInstanceIdentifier'].should.equal("db-master-1") + instances['DBInstances'][0][ + 'DBInstanceIdentifier'].should.equal("db-master-1") @disable_on_py3() @mock_rds2 def test_describe_non_existant_database(): conn = boto3.client('rds', region_name='us-west-2') - conn.describe_db_instances.when.called_with(DBInstanceIdentifier="not-a-db").should.throw(ClientError) + conn.describe_db_instances.when.called_with( + DBInstanceIdentifier="not-a-db").should.throw(ClientError) @disable_on_py3() @@ -95,6 +98,7 @@ def test_modify_non_existant_database(): AllocatedStorage=20, ApplyImmediately=True).should.throw(ClientError) + @disable_on_py3() @mock_rds2 def test_reboot_db_instance(): @@ -115,7 +119,8 @@ def test_reboot_db_instance(): @mock_rds2 def test_reboot_non_existant_database(): conn = boto3.client('rds', region_name='us-west-2') - conn.reboot_db_instance.when.called_with(DBInstanceIdentifier="not-a-db").should.throw(ClientError) + conn.reboot_db_instance.when.called_with( + DBInstanceIdentifier="not-a-db").should.throw(ClientError) @disable_on_py3() @@ -144,7 +149,8 @@ def test_delete_database(): @mock_rds2 def test_delete_non_existant_database(): conn = boto3.client('rds2', region_name="us-west-2") - conn.delete_db_instance.when.called_with(DBInstanceIdentifier="not-a-db").should.throw(ClientError) + conn.delete_db_instance.when.called_with( + DBInstanceIdentifier="not-a-db").should.throw(ClientError) @disable_on_py3() @@ -157,7 +163,8 @@ def test_create_option_group(): OptionGroupDescription='test option group') option_group['OptionGroup']['OptionGroupName'].should.equal('test') option_group['OptionGroup']['EngineName'].should.equal('mysql') - option_group['OptionGroup']['OptionGroupDescription'].should.equal('test option group') + option_group['OptionGroup'][ + 'OptionGroupDescription'].should.equal('test option group') option_group['OptionGroup']['MajorEngineVersion'].should.equal('5.6') @@ -214,14 +221,16 @@ def test_describe_option_group(): MajorEngineVersion='5.6', OptionGroupDescription='test option group') option_groups = conn.describe_option_groups(OptionGroupName='test') - option_groups['OptionGroupsList'][0]['OptionGroupName'].should.equal('test') + option_groups['OptionGroupsList'][0][ + 'OptionGroupName'].should.equal('test') @disable_on_py3() @mock_rds2 def test_describe_non_existant_option_group(): conn = boto3.client('rds', region_name='us-west-2') - conn.describe_option_groups.when.called_with(OptionGroupName="not-a-option-group").should.throw(ClientError) + conn.describe_option_groups.when.called_with( + OptionGroupName="not-a-option-group").should.throw(ClientError) @disable_on_py3() @@ -233,41 +242,51 @@ def test_delete_option_group(): MajorEngineVersion='5.6', OptionGroupDescription='test option group') option_groups = conn.describe_option_groups(OptionGroupName='test') - option_groups['OptionGroupsList'][0]['OptionGroupName'].should.equal('test') + option_groups['OptionGroupsList'][0][ + 'OptionGroupName'].should.equal('test') conn.delete_option_group(OptionGroupName='test') - conn.describe_option_groups.when.called_with(OptionGroupName='test').should.throw(ClientError) + conn.describe_option_groups.when.called_with( + OptionGroupName='test').should.throw(ClientError) @disable_on_py3() @mock_rds2 def test_delete_non_existant_option_group(): conn = boto3.client('rds', region_name='us-west-2') - conn.delete_option_group.when.called_with(OptionGroupName='non-existant').should.throw(ClientError) + conn.delete_option_group.when.called_with( + OptionGroupName='non-existant').should.throw(ClientError) @disable_on_py3() @mock_rds2 def test_describe_option_group_options(): conn = boto3.client('rds', region_name='us-west-2') - option_group_options = conn.describe_option_group_options(EngineName='sqlserver-ee') + option_group_options = conn.describe_option_group_options( + EngineName='sqlserver-ee') len(option_group_options['OptionGroupOptions']).should.equal(4) - option_group_options = conn.describe_option_group_options(EngineName='sqlserver-ee', MajorEngineVersion='11.00') + option_group_options = conn.describe_option_group_options( + EngineName='sqlserver-ee', MajorEngineVersion='11.00') len(option_group_options['OptionGroupOptions']).should.equal(2) - option_group_options = conn.describe_option_group_options(EngineName='mysql', MajorEngineVersion='5.6') + option_group_options = conn.describe_option_group_options( + EngineName='mysql', MajorEngineVersion='5.6') len(option_group_options['OptionGroupOptions']).should.equal(1) - conn.describe_option_group_options.when.called_with(EngineName='non-existent').should.throw(ClientError) - conn.describe_option_group_options.when.called_with(EngineName='mysql', MajorEngineVersion='non-existent').should.throw(ClientError) + conn.describe_option_group_options.when.called_with( + EngineName='non-existent').should.throw(ClientError) + conn.describe_option_group_options.when.called_with( + EngineName='mysql', MajorEngineVersion='non-existent').should.throw(ClientError) @disable_on_py3() @mock_rds2 def test_modify_option_group(): conn = boto3.client('rds', region_name='us-west-2') - conn.create_option_group(OptionGroupName='test', EngineName='mysql', MajorEngineVersion='5.6', OptionGroupDescription='test option group') + conn.create_option_group(OptionGroupName='test', EngineName='mysql', + MajorEngineVersion='5.6', OptionGroupDescription='test option group') # TODO: create option and validate before deleting. # if Someone can tell me how the hell to use this function # to add options to an option_group, I can finish coding this. - result = conn.modify_option_group(OptionGroupName='test', OptionsToInclude=[], OptionsToRemove=['MEMCACHED'], ApplyImmediately=True) + result = conn.modify_option_group(OptionGroupName='test', OptionsToInclude=[ + ], OptionsToRemove=['MEMCACHED'], ApplyImmediately=True) result['OptionGroup']['EngineName'].should.equal('mysql') result['OptionGroup']['Options'].should.equal([]) result['OptionGroup']['OptionGroupName'].should.equal('test') @@ -277,36 +296,42 @@ def test_modify_option_group(): @mock_rds2 def test_modify_option_group_no_options(): conn = boto3.client('rds', region_name='us-west-2') - conn.create_option_group(OptionGroupName='test', EngineName='mysql', MajorEngineVersion='5.6', OptionGroupDescription='test option group') - conn.modify_option_group.when.called_with(OptionGroupName='test').should.throw(ClientError) + conn.create_option_group(OptionGroupName='test', EngineName='mysql', + MajorEngineVersion='5.6', OptionGroupDescription='test option group') + conn.modify_option_group.when.called_with( + OptionGroupName='test').should.throw(ClientError) @disable_on_py3() @mock_rds2 def test_modify_non_existant_option_group(): conn = boto3.client('rds', region_name='us-west-2') - conn.modify_option_group.when.called_with(OptionGroupName='non-existant', OptionsToInclude=[('OptionName', 'Port', 'DBSecurityGroupMemberships', 'VpcSecurityGroupMemberships', 'OptionSettings')]).should.throw(ParamValidationError) + conn.modify_option_group.when.called_with(OptionGroupName='non-existant', OptionsToInclude=[( + 'OptionName', 'Port', 'DBSecurityGroupMemberships', 'VpcSecurityGroupMemberships', 'OptionSettings')]).should.throw(ParamValidationError) @disable_on_py3() @mock_rds2 def test_delete_non_existant_database(): conn = boto3.client('rds', region_name='us-west-2') - conn.delete_db_instance.when.called_with(DBInstanceIdentifier="not-a-db").should.throw(ClientError) + conn.delete_db_instance.when.called_with( + DBInstanceIdentifier="not-a-db").should.throw(ClientError) @disable_on_py3() @mock_rds2 def test_list_tags_invalid_arn(): conn = boto3.client('rds', region_name='us-west-2') - conn.list_tags_for_resource.when.called_with(ResourceName='arn:aws:rds:bad-arn').should.throw(ClientError) + conn.list_tags_for_resource.when.called_with( + ResourceName='arn:aws:rds:bad-arn').should.throw(ClientError) @disable_on_py3() @mock_rds2 def test_list_tags_db(): conn = boto3.client('rds', region_name='us-west-2') - result = conn.list_tags_for_resource(ResourceName='arn:aws:rds:us-west-2:1234567890:db:foo') + result = conn.list_tags_for_resource( + ResourceName='arn:aws:rds:us-west-2:1234567890:db:foo') result['TagList'].should.equal([]) conn.create_db_instance(DBInstanceIdentifier='db-with-tags', AllocatedStorage=10, @@ -326,11 +351,12 @@ def test_list_tags_db(): 'Value': 'bar1', }, ]) - result = conn.list_tags_for_resource(ResourceName='arn:aws:rds:us-west-2:1234567890:db:db-with-tags') + result = conn.list_tags_for_resource( + ResourceName='arn:aws:rds:us-west-2:1234567890:db:db-with-tags') result['TagList'].should.equal([{'Value': 'bar', 'Key': 'foo'}, - {'Value': 'bar1', - 'Key': 'foo1'}]) + {'Value': 'bar1', + 'Key': 'foo1'}]) @disable_on_py3() @@ -355,7 +381,8 @@ def test_add_tags_db(): 'Value': 'bar1', }, ]) - result = conn.list_tags_for_resource(ResourceName='arn:aws:rds:us-west-2:1234567890:db:db-without-tags') + result = conn.list_tags_for_resource( + ResourceName='arn:aws:rds:us-west-2:1234567890:db:db-without-tags') list(result['TagList']).should.have.length_of(2) conn.add_tags_to_resource(ResourceName='arn:aws:rds:us-west-2:1234567890:db:db-without-tags', Tags=[ @@ -368,7 +395,8 @@ def test_add_tags_db(): 'Value': 'bar2', }, ]) - result = conn.list_tags_for_resource(ResourceName='arn:aws:rds:us-west-2:1234567890:db:db-without-tags') + result = conn.list_tags_for_resource( + ResourceName='arn:aws:rds:us-west-2:1234567890:db:db-without-tags') list(result['TagList']).should.have.length_of(3) @@ -394,10 +422,13 @@ def test_remove_tags_db(): 'Value': 'bar1', }, ]) - result = conn.list_tags_for_resource(ResourceName='arn:aws:rds:us-west-2:1234567890:db:db-with-tags') + result = conn.list_tags_for_resource( + ResourceName='arn:aws:rds:us-west-2:1234567890:db:db-with-tags') list(result['TagList']).should.have.length_of(2) - conn.remove_tags_from_resource(ResourceName='arn:aws:rds:us-west-2:1234567890:db:db-with-tags', TagKeys=['foo']) - result = conn.list_tags_for_resource(ResourceName='arn:aws:rds:us-west-2:1234567890:db:db-with-tags') + conn.remove_tags_from_resource( + ResourceName='arn:aws:rds:us-west-2:1234567890:db:db-with-tags', TagKeys=['foo']) + result = conn.list_tags_for_resource( + ResourceName='arn:aws:rds:us-west-2:1234567890:db:db-with-tags') len(result['TagList']).should.equal(1) @@ -409,7 +440,8 @@ def test_add_tags_option_group(): EngineName='mysql', MajorEngineVersion='5.6', OptionGroupDescription='test option group') - result = conn.list_tags_for_resource(ResourceName='arn:aws:rds:us-west-2:1234567890:og:test') + result = conn.list_tags_for_resource( + ResourceName='arn:aws:rds:us-west-2:1234567890:og:test') list(result['TagList']).should.have.length_of(0) conn.add_tags_to_resource(ResourceName='arn:aws:rds:us-west-2:1234567890:og:test', Tags=[ @@ -421,7 +453,8 @@ def test_add_tags_option_group(): 'Key': 'foo2', 'Value': 'bar2', }]) - result = conn.list_tags_for_resource(ResourceName='arn:aws:rds:us-west-2:1234567890:og:test') + result = conn.list_tags_for_resource( + ResourceName='arn:aws:rds:us-west-2:1234567890:og:test') list(result['TagList']).should.have.length_of(2) @@ -433,7 +466,8 @@ def test_remove_tags_option_group(): EngineName='mysql', MajorEngineVersion='5.6', OptionGroupDescription='test option group') - result = conn.list_tags_for_resource(ResourceName='arn:aws:rds:us-west-2:1234567890:og:test') + result = conn.list_tags_for_resource( + ResourceName='arn:aws:rds:us-west-2:1234567890:og:test') conn.add_tags_to_resource(ResourceName='arn:aws:rds:us-west-2:1234567890:og:test', Tags=[ { @@ -444,11 +478,13 @@ def test_remove_tags_option_group(): 'Key': 'foo2', 'Value': 'bar2', }]) - result = conn.list_tags_for_resource(ResourceName='arn:aws:rds:us-west-2:1234567890:og:test') + result = conn.list_tags_for_resource( + ResourceName='arn:aws:rds:us-west-2:1234567890:og:test') list(result['TagList']).should.have.length_of(2) conn.remove_tags_from_resource(ResourceName='arn:aws:rds:us-west-2:1234567890:og:test', TagKeys=['foo']) - result = conn.list_tags_for_resource(ResourceName='arn:aws:rds:us-west-2:1234567890:og:test') + result = conn.list_tags_for_resource( + ResourceName='arn:aws:rds:us-west-2:1234567890:og:test') list(result['TagList']).should.have.length_of(1) @@ -457,9 +493,11 @@ def test_remove_tags_option_group(): def test_create_database_security_group(): conn = boto3.client('rds', region_name='us-west-2') - result = conn.create_db_security_group(DBSecurityGroupName='db_sg', DBSecurityGroupDescription='DB Security Group') + result = conn.create_db_security_group( + DBSecurityGroupName='db_sg', DBSecurityGroupDescription='DB Security Group') result['DBSecurityGroup']['DBSecurityGroupName'].should.equal("db_sg") - result['DBSecurityGroup']['DBSecurityGroupDescription'].should.equal("DB Security Group") + result['DBSecurityGroup'][ + 'DBSecurityGroupDescription'].should.equal("DB Security Group") result['DBSecurityGroup']['IPRanges'].should.equal([]) @@ -471,8 +509,10 @@ def test_get_security_groups(): result = conn.describe_db_security_groups() result['DBSecurityGroups'].should.have.length_of(0) - conn.create_db_security_group(DBSecurityGroupName='db_sg1', DBSecurityGroupDescription='DB Security Group') - conn.create_db_security_group(DBSecurityGroupName='db_sg2', DBSecurityGroupDescription='DB Security Group') + conn.create_db_security_group( + DBSecurityGroupName='db_sg1', DBSecurityGroupDescription='DB Security Group') + conn.create_db_security_group( + DBSecurityGroupName='db_sg2', DBSecurityGroupDescription='DB Security Group') result = conn.describe_db_security_groups() result['DBSecurityGroups'].should.have.length_of(2) @@ -486,14 +526,16 @@ def test_get_security_groups(): @mock_rds2 def test_get_non_existant_security_group(): conn = boto3.client('rds', region_name='us-west-2') - conn.describe_db_security_groups.when.called_with(DBSecurityGroupName="not-a-sg").should.throw(ClientError) + conn.describe_db_security_groups.when.called_with( + DBSecurityGroupName="not-a-sg").should.throw(ClientError) @disable_on_py3() @mock_rds2 def test_delete_database_security_group(): conn = boto3.client('rds', region_name='us-west-2') - conn.create_db_security_group(DBSecurityGroupName='db_sg', DBSecurityGroupDescription='DB Security Group') + conn.create_db_security_group( + DBSecurityGroupName='db_sg', DBSecurityGroupDescription='DB Security Group') result = conn.describe_db_security_groups() result['DBSecurityGroups'].should.have.length_of(1) @@ -507,7 +549,8 @@ def test_delete_database_security_group(): @mock_rds2 def test_delete_non_existant_security_group(): conn = boto3.client('rds', region_name='us-west-2') - conn.delete_db_security_group.when.called_with(DBSecurityGroupName="not-a-db").should.throw(ClientError) + conn.delete_db_security_group.when.called_with( + DBSecurityGroupName="not-a-db").should.throw(ClientError) @disable_on_py3() @@ -518,13 +561,13 @@ def test_security_group_authorize(): DBSecurityGroupDescription='DB Security Group') security_group['DBSecurityGroup']['IPRanges'].should.equal([]) - conn.authorize_db_security_group_ingress(DBSecurityGroupName='db_sg', CIDRIP='10.3.2.45/32') result = conn.describe_db_security_groups(DBSecurityGroupName="db_sg") result['DBSecurityGroups'][0]['IPRanges'].should.have.length_of(1) - result['DBSecurityGroups'][0]['IPRanges'].should.equal([{'Status': 'authorized', 'CIDRIP': '10.3.2.45/32'}]) + result['DBSecurityGroups'][0]['IPRanges'].should.equal( + [{'Status': 'authorized', 'CIDRIP': '10.3.2.45/32'}]) conn.authorize_db_security_group_ingress(DBSecurityGroupName='db_sg', CIDRIP='10.3.2.46/32') @@ -554,9 +597,10 @@ def test_add_security_group_to_database(): conn.create_db_security_group(DBSecurityGroupName='db_sg', DBSecurityGroupDescription='DB Security Group') conn.modify_db_instance(DBInstanceIdentifier='db-master-1', - DBSecurityGroups=['db_sg']) + DBSecurityGroups=['db_sg']) result = conn.describe_db_instances() - result['DBInstances'][0]['DBSecurityGroups'][0]['DBSecurityGroupName'].should.equal('db_sg') + result['DBInstances'][0]['DBSecurityGroups'][0][ + 'DBSecurityGroupName'].should.equal('db_sg') @disable_on_py3() @@ -572,12 +616,13 @@ def test_list_tags_security_group(): 'Key': 'foo'}, {'Value': 'bar1', 'Key': 'foo1'}])['DBSecurityGroup']['DBSecurityGroupName'] - resource = 'arn:aws:rds:us-west-2:1234567890:secgrp:{0}'.format(security_group) + resource = 'arn:aws:rds:us-west-2:1234567890:secgrp:{0}'.format( + security_group) result = conn.list_tags_for_resource(ResourceName=resource) result['TagList'].should.equal([{'Value': 'bar', 'Key': 'foo'}, - {'Value': 'bar1', - 'Key': 'foo1'}]) + {'Value': 'bar1', + 'Key': 'foo1'}]) @disable_on_py3() @@ -590,7 +635,8 @@ def test_add_tags_security_group(): security_group = conn.create_db_security_group(DBSecurityGroupName="db_sg", DBSecurityGroupDescription='DB Security Group')['DBSecurityGroup']['DBSecurityGroupName'] - resource = 'arn:aws:rds:us-west-2:1234567890:secgrp:{0}'.format(security_group) + resource = 'arn:aws:rds:us-west-2:1234567890:secgrp:{0}'.format( + security_group) conn.add_tags_to_resource(ResourceName=resource, Tags=[{'Value': 'bar', 'Key': 'foo'}, @@ -600,8 +646,9 @@ def test_add_tags_security_group(): result = conn.list_tags_for_resource(ResourceName=resource) result['TagList'].should.equal([{'Value': 'bar', 'Key': 'foo'}, - {'Value': 'bar1', - 'Key': 'foo1'}]) + {'Value': 'bar1', + 'Key': 'foo1'}]) + @disable_on_py3() @mock_rds2 @@ -617,7 +664,8 @@ def test_remove_tags_security_group(): {'Value': 'bar1', 'Key': 'foo1'}])['DBSecurityGroup']['DBSecurityGroupName'] - resource = 'arn:aws:rds:us-west-2:1234567890:secgrp:{0}'.format(security_group) + resource = 'arn:aws:rds:us-west-2:1234567890:secgrp:{0}'.format( + security_group) conn.remove_tags_from_resource(ResourceName=resource, TagKeys=['foo']) result = conn.list_tags_for_resource(ResourceName=resource) @@ -630,8 +678,10 @@ def test_remove_tags_security_group(): def test_create_database_subnet_group(): vpc_conn = boto3.client('ec2', 'us-west-2') vpc = vpc_conn.create_vpc(CidrBlock='10.0.0.0/16')['Vpc'] - subnet1 = vpc_conn.create_subnet(VpcId=vpc['VpcId'], CidrBlock='10.1.0.0/24')['Subnet'] - subnet2 = vpc_conn.create_subnet(VpcId=vpc['VpcId'], CidrBlock='10.1.0.0/26')['Subnet'] + subnet1 = vpc_conn.create_subnet( + VpcId=vpc['VpcId'], CidrBlock='10.1.0.0/24')['Subnet'] + subnet2 = vpc_conn.create_subnet( + VpcId=vpc['VpcId'], CidrBlock='10.1.0.0/26')['Subnet'] subnet_ids = [subnet1['SubnetId'], subnet2['SubnetId']] conn = boto3.client('rds', region_name='us-west-2') @@ -639,9 +689,11 @@ def test_create_database_subnet_group(): DBSubnetGroupDescription='my db subnet', SubnetIds=subnet_ids) result['DBSubnetGroup']['DBSubnetGroupName'].should.equal("db_subnet") - result['DBSubnetGroup']['DBSubnetGroupDescription'].should.equal("my db subnet") + result['DBSubnetGroup'][ + 'DBSubnetGroupDescription'].should.equal("my db subnet") subnets = result['DBSubnetGroup']['Subnets'] - subnet_group_ids = [subnets[0]['SubnetIdentifier'], subnets[1]['SubnetIdentifier']] + subnet_group_ids = [subnets[0]['SubnetIdentifier'], + subnets[1]['SubnetIdentifier']] list(subnet_group_ids).should.equal(subnet_ids) @@ -651,7 +703,8 @@ def test_create_database_subnet_group(): def test_create_database_in_subnet_group(): vpc_conn = boto3.client('ec2', 'us-west-2') vpc = vpc_conn.create_vpc(CidrBlock='10.0.0.0/16')['Vpc'] - subnet = vpc_conn.create_subnet(VpcId=vpc['VpcId'], CidrBlock='10.1.0.0/24')['Subnet'] + subnet = vpc_conn.create_subnet( + VpcId=vpc['VpcId'], CidrBlock='10.1.0.0/24')['Subnet'] conn = boto3.client('rds', region_name='us-west-2') conn.create_db_subnet_group(DBSubnetGroupName='db_subnet1', @@ -666,7 +719,8 @@ def test_create_database_in_subnet_group(): Port=1234, DBSubnetGroupName='db_subnet1') result = conn.describe_db_instances(DBInstanceIdentifier='db-master-1') - result['DBInstances'][0]['DBSubnetGroup']['DBSubnetGroupName'].should.equal('db_subnet1') + result['DBInstances'][0]['DBSubnetGroup'][ + 'DBSubnetGroupName'].should.equal('db_subnet1') @disable_on_py3() @@ -675,7 +729,8 @@ def test_create_database_in_subnet_group(): def test_describe_database_subnet_group(): vpc_conn = boto3.client('ec2', 'us-west-2') vpc = vpc_conn.create_vpc(CidrBlock='10.0.0.0/16')['Vpc'] - subnet = vpc_conn.create_subnet(VpcId=vpc['VpcId'], CidrBlock='10.1.0.0/24')['Subnet'] + subnet = vpc_conn.create_subnet( + VpcId=vpc['VpcId'], CidrBlock='10.1.0.0/24')['Subnet'] conn = boto3.client('rds', region_name='us-west-2') conn.create_db_subnet_group(DBSubnetGroupName="db_subnet1", @@ -691,9 +746,11 @@ def test_describe_database_subnet_group(): subnets = resp['DBSubnetGroups'][0]['Subnets'] subnets.should.have.length_of(1) - list(conn.describe_db_subnet_groups(DBSubnetGroupName="db_subnet1")['DBSubnetGroups']).should.have.length_of(1) + list(conn.describe_db_subnet_groups(DBSubnetGroupName="db_subnet1") + ['DBSubnetGroups']).should.have.length_of(1) - conn.describe_db_subnet_groups.when.called_with(DBSubnetGroupName="not-a-subnet").should.throw(ClientError) + conn.describe_db_subnet_groups.when.called_with( + DBSubnetGroupName="not-a-subnet").should.throw(ClientError) @disable_on_py3() @@ -702,7 +759,8 @@ def test_describe_database_subnet_group(): def test_delete_database_subnet_group(): vpc_conn = boto3.client('ec2', 'us-west-2') vpc = vpc_conn.create_vpc(CidrBlock='10.0.0.0/16')['Vpc'] - subnet = vpc_conn.create_subnet(VpcId=vpc['VpcId'], CidrBlock='10.1.0.0/24')['Subnet'] + subnet = vpc_conn.create_subnet( + VpcId=vpc['VpcId'], CidrBlock='10.1.0.0/24')['Subnet'] conn = boto3.client('rds', region_name='us-west-2') result = conn.describe_db_subnet_groups() @@ -718,7 +776,8 @@ def test_delete_database_subnet_group(): result = conn.describe_db_subnet_groups() result['DBSubnetGroups'].should.have.length_of(0) - conn.delete_db_subnet_group.when.called_with(DBSubnetGroupName="db_subnet1").should.throw(ClientError) + conn.delete_db_subnet_group.when.called_with( + DBSubnetGroupName="db_subnet1").should.throw(ClientError) @disable_on_py3() @@ -727,7 +786,8 @@ def test_delete_database_subnet_group(): def test_list_tags_database_subnet_group(): vpc_conn = boto3.client('ec2', 'us-west-2') vpc = vpc_conn.create_vpc(CidrBlock='10.0.0.0/16')['Vpc'] - subnet = vpc_conn.create_subnet(VpcId=vpc['VpcId'], CidrBlock='10.1.0.0/24')['Subnet'] + subnet = vpc_conn.create_subnet( + VpcId=vpc['VpcId'], CidrBlock='10.1.0.0/24')['Subnet'] conn = boto3.client('rds', region_name='us-west-2') result = conn.describe_db_subnet_groups() @@ -740,11 +800,13 @@ def test_list_tags_database_subnet_group(): 'Key': 'foo'}, {'Value': 'bar1', 'Key': 'foo1'}])['DBSubnetGroup']['DBSubnetGroupName'] - result = conn.list_tags_for_resource(ResourceName='arn:aws:rds:us-west-2:1234567890:subgrp:{0}'.format(subnet)) + result = conn.list_tags_for_resource( + ResourceName='arn:aws:rds:us-west-2:1234567890:subgrp:{0}'.format(subnet)) result['TagList'].should.equal([{'Value': 'bar', 'Key': 'foo'}, - {'Value': 'bar1', - 'Key': 'foo1'}]) + {'Value': 'bar1', + 'Key': 'foo1'}]) + @disable_on_py3() @mock_ec2 @@ -752,7 +814,8 @@ def test_list_tags_database_subnet_group(): def test_add_tags_database_subnet_group(): vpc_conn = boto3.client('ec2', 'us-west-2') vpc = vpc_conn.create_vpc(CidrBlock='10.0.0.0/16')['Vpc'] - subnet = vpc_conn.create_subnet(VpcId=vpc['VpcId'], CidrBlock='10.1.0.0/24')['Subnet'] + subnet = vpc_conn.create_subnet( + VpcId=vpc['VpcId'], CidrBlock='10.1.0.0/24')['Subnet'] conn = boto3.client('rds', region_name='us-west-2') result = conn.describe_db_subnet_groups() @@ -773,8 +836,9 @@ def test_add_tags_database_subnet_group(): result = conn.list_tags_for_resource(ResourceName=resource) result['TagList'].should.equal([{'Value': 'bar', 'Key': 'foo'}, - {'Value': 'bar1', - 'Key': 'foo1'}]) + {'Value': 'bar1', + 'Key': 'foo1'}]) + @disable_on_py3() @mock_ec2 @@ -782,7 +846,8 @@ def test_add_tags_database_subnet_group(): def test_remove_tags_database_subnet_group(): vpc_conn = boto3.client('ec2', 'us-west-2') vpc = vpc_conn.create_vpc(CidrBlock='10.0.0.0/16')['Vpc'] - subnet = vpc_conn.create_subnet(VpcId=vpc['VpcId'], CidrBlock='10.1.0.0/24')['Subnet'] + subnet = vpc_conn.create_subnet( + VpcId=vpc['VpcId'], CidrBlock='10.1.0.0/24')['Subnet'] conn = boto3.client('rds', region_name='us-west-2') result = conn.describe_db_subnet_groups() @@ -820,17 +885,22 @@ def test_create_database_replica(): replica = conn.create_db_instance_read_replica(DBInstanceIdentifier="db-replica-1", SourceDBInstanceIdentifier="db-master-1", DBInstanceClass="db.m1.small") - replica['DBInstance']['ReadReplicaSourceDBInstanceIdentifier'].should.equal('db-master-1') + replica['DBInstance'][ + 'ReadReplicaSourceDBInstanceIdentifier'].should.equal('db-master-1') replica['DBInstance']['DBInstanceClass'].should.equal('db.m1.small') replica['DBInstance']['DBInstanceIdentifier'].should.equal('db-replica-1') master = conn.describe_db_instances(DBInstanceIdentifier="db-master-1") - master['DBInstances'][0]['ReadReplicaDBInstanceIdentifiers'].should.equal(['db-replica-1']) + master['DBInstances'][0]['ReadReplicaDBInstanceIdentifiers'].should.equal([ + 'db-replica-1']) - conn.delete_db_instance(DBInstanceIdentifier="db-replica-1", SkipFinalSnapshot=True) + conn.delete_db_instance( + DBInstanceIdentifier="db-replica-1", SkipFinalSnapshot=True) master = conn.describe_db_instances(DBInstanceIdentifier="db-master-1") - master['DBInstances'][0]['ReadReplicaDBInstanceIdentifiers'].should.equal([]) + master['DBInstances'][0][ + 'ReadReplicaDBInstanceIdentifiers'].should.equal([]) + @disable_on_py3() @mock_rds2 @@ -854,19 +924,25 @@ def test_create_database_with_encrypted_storage(): KmsKeyId=key['KeyMetadata']['KeyId']) database['DBInstance']['StorageEncrypted'].should.equal(True) - database['DBInstance']['KmsKeyId'].should.equal(key['KeyMetadata']['KeyId']) + database['DBInstance']['KmsKeyId'].should.equal( + key['KeyMetadata']['KeyId']) + @disable_on_py3() @mock_rds2 def test_create_db_parameter_group(): conn = boto3.client('rds', region_name='us-west-2') db_parameter_group = conn.create_db_parameter_group(DBParameterGroupName='test', - DBParameterGroupFamily='mysql5.6', - Description='test parameter group') + DBParameterGroupFamily='mysql5.6', + Description='test parameter group') + + db_parameter_group['DBParameterGroup'][ + 'DBParameterGroupName'].should.equal('test') + db_parameter_group['DBParameterGroup'][ + 'DBParameterGroupFamily'].should.equal('mysql5.6') + db_parameter_group['DBParameterGroup'][ + 'Description'].should.equal('test parameter group') - db_parameter_group['DBParameterGroup']['DBParameterGroupName'].should.equal('test') - db_parameter_group['DBParameterGroup']['DBParameterGroupFamily'].should.equal('mysql5.6') - db_parameter_group['DBParameterGroup']['Description'].should.equal('test parameter group') @disable_on_py3() @mock_rds2 @@ -886,8 +962,11 @@ def test_create_db_instance_with_parameter_group(): Port=1234) len(database['DBInstance']['DBParameterGroups']).should.equal(1) - database['DBInstance']['DBParameterGroups'][0]['DBParameterGroupName'].should.equal('test') - database['DBInstance']['DBParameterGroups'][0]['ParameterApplyStatus'].should.equal('in-sync') + database['DBInstance']['DBParameterGroups'][0][ + 'DBParameterGroupName'].should.equal('test') + database['DBInstance']['DBParameterGroups'][0][ + 'ParameterApplyStatus'].should.equal('in-sync') + @disable_on_py3() @mock_rds2 @@ -902,8 +981,10 @@ def test_modify_db_instance_with_parameter_group(): Port=1234) len(database['DBInstance']['DBParameterGroups']).should.equal(1) - database['DBInstance']['DBParameterGroups'][0]['DBParameterGroupName'].should.equal('default.mysql5.6') - database['DBInstance']['DBParameterGroups'][0]['ParameterApplyStatus'].should.equal('in-sync') + database['DBInstance']['DBParameterGroups'][0][ + 'DBParameterGroupName'].should.equal('default.mysql5.6') + database['DBInstance']['DBParameterGroups'][0][ + 'ParameterApplyStatus'].should.equal('in-sync') db_parameter_group = conn.create_db_parameter_group(DBParameterGroupName='test', DBParameterGroupFamily='mysql5.6', @@ -912,10 +993,13 @@ def test_modify_db_instance_with_parameter_group(): DBParameterGroupName='test', ApplyImmediately=True) - database = conn.describe_db_instances(DBInstanceIdentifier='db-master-1')['DBInstances'][0] + database = conn.describe_db_instances( + DBInstanceIdentifier='db-master-1')['DBInstances'][0] len(database['DBParameterGroups']).should.equal(1) - database['DBParameterGroups'][0]['DBParameterGroupName'].should.equal('test') - database['DBParameterGroups'][0]['ParameterApplyStatus'].should.equal('in-sync') + database['DBParameterGroups'][0][ + 'DBParameterGroupName'].should.equal('test') + database['DBParameterGroups'][0][ + 'ParameterApplyStatus'].should.equal('in-sync') @disable_on_py3() @@ -946,15 +1030,18 @@ def test_describe_db_parameter_group(): conn.create_db_parameter_group(DBParameterGroupName='test', DBParameterGroupFamily='mysql5.6', Description='test parameter group') - db_parameter_groups = conn.describe_db_parameter_groups(DBParameterGroupName='test') - db_parameter_groups['DBParameterGroups'][0]['DBParameterGroupName'].should.equal('test') + db_parameter_groups = conn.describe_db_parameter_groups( + DBParameterGroupName='test') + db_parameter_groups['DBParameterGroups'][0][ + 'DBParameterGroupName'].should.equal('test') @disable_on_py3() @mock_rds2 def test_describe_non_existant_db_parameter_group(): conn = boto3.client('rds', region_name='us-west-2') - db_parameter_groups = conn.describe_db_parameter_groups(DBParameterGroupName='test') + db_parameter_groups = conn.describe_db_parameter_groups( + DBParameterGroupName='test') len(db_parameter_groups['DBParameterGroups']).should.equal(0) @@ -963,14 +1050,18 @@ def test_describe_non_existant_db_parameter_group(): def test_delete_db_parameter_group(): conn = boto3.client('rds', region_name='us-west-2') conn.create_db_parameter_group(DBParameterGroupName='test', - DBParameterGroupFamily='mysql5.6', - Description='test parameter group') - db_parameter_groups = conn.describe_db_parameter_groups(DBParameterGroupName='test') - db_parameter_groups['DBParameterGroups'][0]['DBParameterGroupName'].should.equal('test') + DBParameterGroupFamily='mysql5.6', + Description='test parameter group') + db_parameter_groups = conn.describe_db_parameter_groups( + DBParameterGroupName='test') + db_parameter_groups['DBParameterGroups'][0][ + 'DBParameterGroupName'].should.equal('test') conn.delete_db_parameter_group(DBParameterGroupName='test') - db_parameter_groups = conn.describe_db_parameter_groups(DBParameterGroupName='test') + db_parameter_groups = conn.describe_db_parameter_groups( + DBParameterGroupName='test') len(db_parameter_groups['DBParameterGroups']).should.equal(0) + @disable_on_py3() @mock_rds2 def test_modify_db_parameter_group(): @@ -986,7 +1077,7 @@ def test_modify_db_parameter_group(): 'Description': 'test param', 'ApplyMethod': 'immediate' }] - ) + ) modify_result['DBParameterGroupName'].should.equal('test') @@ -1001,7 +1092,9 @@ def test_modify_db_parameter_group(): @mock_rds2 def test_delete_non_existant_db_parameter_group(): conn = boto3.client('rds', region_name='us-west-2') - conn.delete_db_parameter_group.when.called_with(DBParameterGroupName='non-existant').should.throw(ClientError) + conn.delete_db_parameter_group.when.called_with( + DBParameterGroupName='non-existant').should.throw(ClientError) + @disable_on_py3() @mock_rds2 @@ -1011,8 +1104,9 @@ def test_create_parameter_group_with_tags(): DBParameterGroupFamily='mysql5.6', Description='test parameter group', Tags=[{ - 'Key': 'foo', - 'Value': 'bar', + 'Key': 'foo', + 'Value': 'bar', }]) - result = conn.list_tags_for_resource(ResourceName='arn:aws:rds:us-west-2:1234567890:pg:test') + result = conn.list_tags_for_resource( + ResourceName='arn:aws:rds:us-west-2:1234567890:pg:test') result['TagList'].should.equal([{'Value': 'bar', 'Key': 'foo'}]) diff --git a/tests/test_rds2/test_server.py b/tests/test_rds2/test_server.py index 19c2b6e9f..f9489e054 100644 --- a/tests/test_rds2/test_server.py +++ b/tests/test_rds2/test_server.py @@ -11,7 +11,7 @@ Test the different server responses #@mock_rds2 -#def test_list_databases(): +# def test_list_databases(): # backend = server.create_backend_app("rds2") # test_client = backend.test_client() # diff --git a/tests/test_redshift/test_redshift.py b/tests/test_redshift/test_redshift.py index 13acf6d7c..41be8f022 100644 --- a/tests/test_redshift/test_redshift.py +++ b/tests/test_redshift/test_redshift.py @@ -35,18 +35,21 @@ def test_create_cluster(): ) cluster_response = conn.describe_clusters(cluster_identifier) - cluster = cluster_response['DescribeClustersResponse']['DescribeClustersResult']['Clusters'][0] + cluster = cluster_response['DescribeClustersResponse'][ + 'DescribeClustersResult']['Clusters'][0] cluster['ClusterIdentifier'].should.equal(cluster_identifier) cluster['NodeType'].should.equal("dw.hs1.xlarge") cluster['MasterUsername'].should.equal("username") cluster['DBName'].should.equal("my_db") - cluster['ClusterSecurityGroups'][0]['ClusterSecurityGroupName'].should.equal("Default") + cluster['ClusterSecurityGroups'][0][ + 'ClusterSecurityGroupName'].should.equal("Default") cluster['VpcSecurityGroups'].should.equal([]) cluster['ClusterSubnetGroupName'].should.equal(None) cluster['AvailabilityZone'].should.equal("us-east-1d") cluster['PreferredMaintenanceWindow'].should.equal("Mon:03:00-Mon:11:00") - cluster['ClusterParameterGroups'][0]['ParameterGroupName'].should.equal("default.redshift-1.0") + cluster['ClusterParameterGroups'][0][ + 'ParameterGroupName'].should.equal("default.redshift-1.0") cluster['AutomatedSnapshotRetentionPeriod'].should.equal(10) cluster['Port'].should.equal(1234) cluster['ClusterVersion'].should.equal("1.0") @@ -69,7 +72,8 @@ def test_create_single_node_cluster(): ) cluster_response = conn.describe_clusters(cluster_identifier) - cluster = cluster_response['DescribeClustersResponse']['DescribeClustersResult']['Clusters'][0] + cluster = cluster_response['DescribeClustersResponse'][ + 'DescribeClustersResult']['Clusters'][0] cluster['ClusterIdentifier'].should.equal(cluster_identifier) cluster['NodeType'].should.equal("dw.hs1.xlarge") @@ -91,13 +95,15 @@ def test_default_cluster_attibutes(): ) cluster_response = conn.describe_clusters(cluster_identifier) - cluster = cluster_response['DescribeClustersResponse']['DescribeClustersResult']['Clusters'][0] + cluster = cluster_response['DescribeClustersResponse'][ + 'DescribeClustersResult']['Clusters'][0] cluster['DBName'].should.equal("dev") cluster['ClusterSubnetGroupName'].should.equal(None) assert "us-east-" in cluster['AvailabilityZone'] cluster['PreferredMaintenanceWindow'].should.equal("Mon:03:00-Mon:03:30") - cluster['ClusterParameterGroups'][0]['ParameterGroupName'].should.equal("default.redshift-1.0") + cluster['ClusterParameterGroups'][0][ + 'ParameterGroupName'].should.equal("default.redshift-1.0") cluster['AutomatedSnapshotRetentionPeriod'].should.equal(1) cluster['Port'].should.equal(5439) cluster['ClusterVersion'].should.equal("1.0") @@ -127,7 +133,8 @@ def test_create_cluster_in_subnet_group(): ) cluster_response = redshift_conn.describe_clusters("my_cluster") - cluster = cluster_response['DescribeClustersResponse']['DescribeClustersResult']['Clusters'][0] + cluster = cluster_response['DescribeClustersResponse'][ + 'DescribeClustersResult']['Clusters'][0] cluster['ClusterSubnetGroupName'].should.equal('my_subnet_group') @@ -153,8 +160,10 @@ def test_create_cluster_with_security_group(): ) cluster_response = conn.describe_clusters(cluster_identifier) - cluster = cluster_response['DescribeClustersResponse']['DescribeClustersResult']['Clusters'][0] - group_names = [group['ClusterSecurityGroupName'] for group in cluster['ClusterSecurityGroups']] + cluster = cluster_response['DescribeClustersResponse'][ + 'DescribeClustersResult']['Clusters'][0] + group_names = [group['ClusterSecurityGroupName'] + for group in cluster['ClusterSecurityGroups']] set(group_names).should.equal(set(["security_group1", "security_group2"])) @@ -165,7 +174,8 @@ def test_create_cluster_with_vpc_security_groups(): ec2_conn = boto.connect_ec2() redshift_conn = boto.connect_redshift() vpc = vpc_conn.create_vpc("10.0.0.0/16") - security_group = ec2_conn.create_security_group("vpc_security_group", "a group", vpc_id=vpc.id) + security_group = ec2_conn.create_security_group( + "vpc_security_group", "a group", vpc_id=vpc.id) redshift_conn.create_cluster( "my_cluster", @@ -176,8 +186,10 @@ def test_create_cluster_with_vpc_security_groups(): ) cluster_response = redshift_conn.describe_clusters("my_cluster") - cluster = cluster_response['DescribeClustersResponse']['DescribeClustersResult']['Clusters'][0] - group_ids = [group['VpcSecurityGroupId'] for group in cluster['VpcSecurityGroups']] + cluster = cluster_response['DescribeClustersResponse'][ + 'DescribeClustersResult']['Clusters'][0] + group_ids = [group['VpcSecurityGroupId'] + for group in cluster['VpcSecurityGroups']] list(group_ids).should.equal([security_group.id]) @@ -199,14 +211,17 @@ def test_create_cluster_with_parameter_group(): ) cluster_response = conn.describe_clusters("my_cluster") - cluster = cluster_response['DescribeClustersResponse']['DescribeClustersResult']['Clusters'][0] - cluster['ClusterParameterGroups'][0]['ParameterGroupName'].should.equal("my_parameter_group") + cluster = cluster_response['DescribeClustersResponse'][ + 'DescribeClustersResult']['Clusters'][0] + cluster['ClusterParameterGroups'][0][ + 'ParameterGroupName'].should.equal("my_parameter_group") @mock_redshift_deprecated def test_describe_non_existant_cluster(): conn = boto.redshift.connect_to_region("us-east-1") - conn.describe_clusters.when.called_with("not-a-cluster").should.throw(ClusterNotFound) + conn.describe_clusters.when.called_with( + "not-a-cluster").should.throw(ClusterNotFound) @mock_redshift_deprecated @@ -221,16 +236,19 @@ def test_delete_cluster(): master_user_password="password", ) - clusters = conn.describe_clusters()['DescribeClustersResponse']['DescribeClustersResult']['Clusters'] + clusters = conn.describe_clusters()['DescribeClustersResponse'][ + 'DescribeClustersResult']['Clusters'] list(clusters).should.have.length_of(1) conn.delete_cluster(cluster_identifier) - clusters = conn.describe_clusters()['DescribeClustersResponse']['DescribeClustersResult']['Clusters'] + clusters = conn.describe_clusters()['DescribeClustersResponse'][ + 'DescribeClustersResult']['Clusters'] list(clusters).should.have.length_of(0) # Delete invalid id - conn.delete_cluster.when.called_with("not-a-cluster").should.throw(ClusterNotFound) + conn.delete_cluster.when.called_with( + "not-a-cluster").should.throw(ClusterNotFound) @mock_redshift_deprecated @@ -269,13 +287,16 @@ def test_modify_cluster(): ) cluster_response = conn.describe_clusters("new_identifier") - cluster = cluster_response['DescribeClustersResponse']['DescribeClustersResult']['Clusters'][0] + cluster = cluster_response['DescribeClustersResponse'][ + 'DescribeClustersResult']['Clusters'][0] cluster['ClusterIdentifier'].should.equal("new_identifier") cluster['NodeType'].should.equal("dw.hs1.xlarge") - cluster['ClusterSecurityGroups'][0]['ClusterSecurityGroupName'].should.equal("security_group") + cluster['ClusterSecurityGroups'][0][ + 'ClusterSecurityGroupName'].should.equal("security_group") cluster['PreferredMaintenanceWindow'].should.equal("Tue:03:00-Tue:11:00") - cluster['ClusterParameterGroups'][0]['ParameterGroupName'].should.equal("my_parameter_group") + cluster['ClusterParameterGroups'][0][ + 'ParameterGroupName'].should.equal("my_parameter_group") cluster['AutomatedSnapshotRetentionPeriod'].should.equal(7) cluster['AllowVersionUpgrade'].should.equal(False) cluster['NumberOfNodes'].should.equal(2) @@ -297,12 +318,15 @@ def test_create_cluster_subnet_group(): subnet_ids=[subnet1.id, subnet2.id], ) - subnets_response = redshift_conn.describe_cluster_subnet_groups("my_subnet") - my_subnet = subnets_response['DescribeClusterSubnetGroupsResponse']['DescribeClusterSubnetGroupsResult']['ClusterSubnetGroups'][0] + subnets_response = redshift_conn.describe_cluster_subnet_groups( + "my_subnet") + my_subnet = subnets_response['DescribeClusterSubnetGroupsResponse'][ + 'DescribeClusterSubnetGroupsResult']['ClusterSubnetGroups'][0] my_subnet['ClusterSubnetGroupName'].should.equal("my_subnet") my_subnet['Description'].should.equal("This is my subnet group") - subnet_ids = [subnet['SubnetIdentifier'] for subnet in my_subnet['Subnets']] + subnet_ids = [subnet['SubnetIdentifier'] + for subnet in my_subnet['Subnets']] set(subnet_ids).should.equal(set([subnet1.id, subnet2.id])) @@ -320,7 +344,8 @@ def test_create_invalid_cluster_subnet_group(): @mock_redshift_deprecated def test_describe_non_existant_subnet_group(): conn = boto.redshift.connect_to_region("us-east-1") - conn.describe_cluster_subnet_groups.when.called_with("not-a-subnet-group").should.throw(ClusterSubnetGroupNotFound) + conn.describe_cluster_subnet_groups.when.called_with( + "not-a-subnet-group").should.throw(ClusterSubnetGroupNotFound) @mock_redshift_deprecated @@ -338,17 +363,20 @@ def test_delete_cluster_subnet_group(): ) subnets_response = redshift_conn.describe_cluster_subnet_groups() - subnets = subnets_response['DescribeClusterSubnetGroupsResponse']['DescribeClusterSubnetGroupsResult']['ClusterSubnetGroups'] + subnets = subnets_response['DescribeClusterSubnetGroupsResponse'][ + 'DescribeClusterSubnetGroupsResult']['ClusterSubnetGroups'] subnets.should.have.length_of(1) redshift_conn.delete_cluster_subnet_group("my_subnet") subnets_response = redshift_conn.describe_cluster_subnet_groups() - subnets = subnets_response['DescribeClusterSubnetGroupsResponse']['DescribeClusterSubnetGroupsResult']['ClusterSubnetGroups'] + subnets = subnets_response['DescribeClusterSubnetGroupsResponse'][ + 'DescribeClusterSubnetGroupsResult']['ClusterSubnetGroups'] subnets.should.have.length_of(0) # Delete invalid id - redshift_conn.delete_cluster_subnet_group.when.called_with("not-a-subnet-group").should.throw(ClusterSubnetGroupNotFound) + redshift_conn.delete_cluster_subnet_group.when.called_with( + "not-a-subnet-group").should.throw(ClusterSubnetGroupNotFound) @mock_redshift_deprecated @@ -359,8 +387,10 @@ def test_create_cluster_security_group(): "This is my security group", ) - groups_response = conn.describe_cluster_security_groups("my_security_group") - my_group = groups_response['DescribeClusterSecurityGroupsResponse']['DescribeClusterSecurityGroupsResult']['ClusterSecurityGroups'][0] + groups_response = conn.describe_cluster_security_groups( + "my_security_group") + my_group = groups_response['DescribeClusterSecurityGroupsResponse'][ + 'DescribeClusterSecurityGroupsResult']['ClusterSecurityGroups'][0] my_group['ClusterSecurityGroupName'].should.equal("my_security_group") my_group['Description'].should.equal("This is my security group") @@ -370,7 +400,8 @@ def test_create_cluster_security_group(): @mock_redshift_deprecated def test_describe_non_existant_security_group(): conn = boto.redshift.connect_to_region("us-east-1") - conn.describe_cluster_security_groups.when.called_with("not-a-security-group").should.throw(ClusterSecurityGroupNotFound) + conn.describe_cluster_security_groups.when.called_with( + "not-a-security-group").should.throw(ClusterSecurityGroupNotFound) @mock_redshift_deprecated @@ -382,17 +413,20 @@ def test_delete_cluster_security_group(): ) groups_response = conn.describe_cluster_security_groups() - groups = groups_response['DescribeClusterSecurityGroupsResponse']['DescribeClusterSecurityGroupsResult']['ClusterSecurityGroups'] + groups = groups_response['DescribeClusterSecurityGroupsResponse'][ + 'DescribeClusterSecurityGroupsResult']['ClusterSecurityGroups'] groups.should.have.length_of(2) # The default group already exists conn.delete_cluster_security_group("my_security_group") groups_response = conn.describe_cluster_security_groups() - groups = groups_response['DescribeClusterSecurityGroupsResponse']['DescribeClusterSecurityGroupsResult']['ClusterSecurityGroups'] + groups = groups_response['DescribeClusterSecurityGroupsResponse'][ + 'DescribeClusterSecurityGroupsResult']['ClusterSecurityGroups'] groups.should.have.length_of(1) # Delete invalid id - conn.delete_cluster_security_group.when.called_with("not-a-security-group").should.throw(ClusterSecurityGroupNotFound) + conn.delete_cluster_security_group.when.called_with( + "not-a-security-group").should.throw(ClusterSecurityGroupNotFound) @mock_redshift_deprecated @@ -404,8 +438,10 @@ def test_create_cluster_parameter_group(): "This is my parameter group", ) - groups_response = conn.describe_cluster_parameter_groups("my_parameter_group") - my_group = groups_response['DescribeClusterParameterGroupsResponse']['DescribeClusterParameterGroupsResult']['ParameterGroups'][0] + groups_response = conn.describe_cluster_parameter_groups( + "my_parameter_group") + my_group = groups_response['DescribeClusterParameterGroupsResponse'][ + 'DescribeClusterParameterGroupsResult']['ParameterGroups'][0] my_group['ParameterGroupName'].should.equal("my_parameter_group") my_group['ParameterGroupFamily'].should.equal("redshift-1.0") @@ -415,7 +451,8 @@ def test_create_cluster_parameter_group(): @mock_redshift_deprecated def test_describe_non_existant_parameter_group(): conn = boto.redshift.connect_to_region("us-east-1") - conn.describe_cluster_parameter_groups.when.called_with("not-a-parameter-group").should.throw(ClusterParameterGroupNotFound) + conn.describe_cluster_parameter_groups.when.called_with( + "not-a-parameter-group").should.throw(ClusterParameterGroupNotFound) @mock_redshift_deprecated @@ -428,14 +465,17 @@ def test_delete_cluster_parameter_group(): ) groups_response = conn.describe_cluster_parameter_groups() - groups = groups_response['DescribeClusterParameterGroupsResponse']['DescribeClusterParameterGroupsResult']['ParameterGroups'] + groups = groups_response['DescribeClusterParameterGroupsResponse'][ + 'DescribeClusterParameterGroupsResult']['ParameterGroups'] groups.should.have.length_of(2) # The default group already exists conn.delete_cluster_parameter_group("my_parameter_group") groups_response = conn.describe_cluster_parameter_groups() - groups = groups_response['DescribeClusterParameterGroupsResponse']['DescribeClusterParameterGroupsResult']['ParameterGroups'] + groups = groups_response['DescribeClusterParameterGroupsResponse'][ + 'DescribeClusterParameterGroupsResult']['ParameterGroups'] groups.should.have.length_of(1) # Delete invalid id - conn.delete_cluster_parameter_group.when.called_with("not-a-parameter-group").should.throw(ClusterParameterGroupNotFound) + conn.delete_cluster_parameter_group.when.called_with( + "not-a-parameter-group").should.throw(ClusterParameterGroupNotFound) diff --git a/tests/test_redshift/test_server.py b/tests/test_redshift/test_server.py index a6bdc93f3..ba407ab4c 100644 --- a/tests/test_redshift/test_server.py +++ b/tests/test_redshift/test_server.py @@ -19,5 +19,6 @@ def test_describe_clusters(): res = test_client.get('/?Action=DescribeClusters') json_data = json.loads(res.data.decode("utf-8")) - clusters = json_data['DescribeClustersResponse']['DescribeClustersResult']['Clusters'] + clusters = json_data['DescribeClustersResponse'][ + 'DescribeClustersResult']['Clusters'] list(clusters).should.equal([]) diff --git a/tests/test_route53/test_route53.py b/tests/test_route53/test_route53.py index f376375a0..ea8609556 100644 --- a/tests/test_route53/test_route53.py +++ b/tests/test_route53/test_route53.py @@ -23,15 +23,18 @@ def test_hosted_zone(): zones = conn.get_all_hosted_zones() len(zones["ListHostedZonesResponse"]["HostedZones"]).should.equal(2) - id1 = firstzone["CreateHostedZoneResponse"]["HostedZone"]["Id"].split("/")[-1] + id1 = firstzone["CreateHostedZoneResponse"][ + "HostedZone"]["Id"].split("/")[-1] zone = conn.get_hosted_zone(id1) - zone["GetHostedZoneResponse"]["HostedZone"]["Name"].should.equal("testdns.aws.com.") + zone["GetHostedZoneResponse"]["HostedZone"][ + "Name"].should.equal("testdns.aws.com.") conn.delete_hosted_zone(id1) zones = conn.get_all_hosted_zones() len(zones["ListHostedZonesResponse"]["HostedZones"]).should.equal(1) - conn.get_hosted_zone.when.called_with("abcd").should.throw(boto.route53.exception.DNSServerError, "404 Not Found") + conn.get_hosted_zone.when.called_with("abcd").should.throw( + boto.route53.exception.DNSServerError, "404 Not Found") @mock_route53_deprecated @@ -42,7 +45,8 @@ def test_rrset(): boto.route53.exception.DNSServerError, "404 Not Found") zone = conn.create_hosted_zone("testdns.aws.com") - zoneid = zone["CreateHostedZoneResponse"]["HostedZone"]["Id"].split("/")[-1] + zoneid = zone["CreateHostedZoneResponse"][ + "HostedZone"]["Id"].split("/")[-1] changes = ResourceRecordSets(conn, zoneid) change = changes.add_change("CREATE", "foo.bar.testdns.aws.com", "A") @@ -105,15 +109,18 @@ def test_rrset(): rrsets = conn.get_all_rrsets(zoneid, type="A") rrsets.should.have.length_of(2) - rrsets = conn.get_all_rrsets(zoneid, name="foo.bar.testdns.aws.com", type="A") + rrsets = conn.get_all_rrsets( + zoneid, name="foo.bar.testdns.aws.com", type="A") rrsets.should.have.length_of(1) rrsets[0].resource_records[0].should.equal('1.2.3.4') - rrsets = conn.get_all_rrsets(zoneid, name="bar.foo.testdns.aws.com", type="A") + rrsets = conn.get_all_rrsets( + zoneid, name="bar.foo.testdns.aws.com", type="A") rrsets.should.have.length_of(1) rrsets[0].resource_records[0].should.equal('5.6.7.8') - rrsets = conn.get_all_rrsets(zoneid, name="foo.foo.testdns.aws.com", type="A") + rrsets = conn.get_all_rrsets( + zoneid, name="foo.foo.testdns.aws.com", type="A") rrsets.should.have.length_of(0) @@ -121,7 +128,8 @@ def test_rrset(): def test_rrset_with_multiple_values(): conn = boto.connect_route53('the_key', 'the_secret') zone = conn.create_hosted_zone("testdns.aws.com") - zoneid = zone["CreateHostedZoneResponse"]["HostedZone"]["Id"].split("/")[-1] + zoneid = zone["CreateHostedZoneResponse"][ + "HostedZone"]["Id"].split("/")[-1] changes = ResourceRecordSets(conn, zoneid) change = changes.add_change("CREATE", "foo.bar.testdns.aws.com", "A") @@ -138,11 +146,14 @@ def test_rrset_with_multiple_values(): def test_alias_rrset(): conn = boto.connect_route53('the_key', 'the_secret') zone = conn.create_hosted_zone("testdns.aws.com") - zoneid = zone["CreateHostedZoneResponse"]["HostedZone"]["Id"].split("/")[-1] + zoneid = zone["CreateHostedZoneResponse"][ + "HostedZone"]["Id"].split("/")[-1] changes = ResourceRecordSets(conn, zoneid) - changes.add_change("CREATE", "foo.alias.testdns.aws.com", "A", alias_hosted_zone_id="Z3DG6IL3SJCGPX", alias_dns_name="foo.testdns.aws.com") - changes.add_change("CREATE", "bar.alias.testdns.aws.com", "CNAME", alias_hosted_zone_id="Z3DG6IL3SJCGPX", alias_dns_name="bar.testdns.aws.com") + changes.add_change("CREATE", "foo.alias.testdns.aws.com", "A", + alias_hosted_zone_id="Z3DG6IL3SJCGPX", alias_dns_name="foo.testdns.aws.com") + changes.add_change("CREATE", "bar.alias.testdns.aws.com", "CNAME", + alias_hosted_zone_id="Z3DG6IL3SJCGPX", alias_dns_name="bar.testdns.aws.com") changes.commit() rrsets = conn.get_all_rrsets(zoneid, type="A") @@ -169,7 +180,8 @@ def test_create_health_check(): ) conn.create_health_check(check) - checks = conn.get_list_health_checks()['ListHealthChecksResponse']['HealthChecks'] + checks = conn.get_list_health_checks()['ListHealthChecksResponse'][ + 'HealthChecks'] list(checks).should.have.length_of(1) check = checks[0] config = check['HealthCheckConfig'] @@ -195,12 +207,14 @@ def test_delete_health_check(): ) conn.create_health_check(check) - checks = conn.get_list_health_checks()['ListHealthChecksResponse']['HealthChecks'] + checks = conn.get_list_health_checks()['ListHealthChecksResponse'][ + 'HealthChecks'] list(checks).should.have.length_of(1) health_check_id = checks[0]['Id'] conn.delete_health_check(health_check_id) - checks = conn.get_list_health_checks()['ListHealthChecksResponse']['HealthChecks'] + checks = conn.get_list_health_checks()['ListHealthChecksResponse'][ + 'HealthChecks'] list(checks).should.have.length_of(0) @@ -214,14 +228,17 @@ def test_use_health_check_in_resource_record_set(): hc_type="HTTP", resource_path="/", ) - check = conn.create_health_check(check)['CreateHealthCheckResponse']['HealthCheck'] + check = conn.create_health_check( + check)['CreateHealthCheckResponse']['HealthCheck'] check_id = check['Id'] zone = conn.create_hosted_zone("testdns.aws.com") - zone_id = zone["CreateHostedZoneResponse"]["HostedZone"]["Id"].split("/")[-1] + zone_id = zone["CreateHostedZoneResponse"][ + "HostedZone"]["Id"].split("/")[-1] changes = ResourceRecordSets(conn, zone_id) - change = changes.add_change("CREATE", "foo.bar.testdns.aws.com", "A", health_check=check_id) + change = changes.add_change( + "CREATE", "foo.bar.testdns.aws.com", "A", health_check=check_id) change.add_value("1.2.3.4") changes.commit() @@ -233,14 +250,18 @@ def test_use_health_check_in_resource_record_set(): def test_hosted_zone_comment_preserved(): conn = boto.connect_route53('the_key', 'the_secret') - firstzone = conn.create_hosted_zone("testdns.aws.com.", comment="test comment") - zone_id = firstzone["CreateHostedZoneResponse"]["HostedZone"]["Id"].split("/")[-1] + firstzone = conn.create_hosted_zone( + "testdns.aws.com.", comment="test comment") + zone_id = firstzone["CreateHostedZoneResponse"][ + "HostedZone"]["Id"].split("/")[-1] hosted_zone = conn.get_hosted_zone(zone_id) - hosted_zone["GetHostedZoneResponse"]["HostedZone"]["Config"]["Comment"].should.equal("test comment") + hosted_zone["GetHostedZoneResponse"]["HostedZone"][ + "Config"]["Comment"].should.equal("test comment") hosted_zones = conn.get_all_hosted_zones() - hosted_zones["ListHostedZonesResponse"]["HostedZones"][0]["Config"]["Comment"].should.equal("test comment") + hosted_zones["ListHostedZonesResponse"]["HostedZones"][ + 0]["Config"]["Comment"].should.equal("test comment") zone = conn.get_zone("testdns.aws.com.") zone.config["Comment"].should.equal("test comment") @@ -253,16 +274,20 @@ def test_deleting_weighted_route(): conn.create_hosted_zone("testdns.aws.com.") zone = conn.get_zone("testdns.aws.com.") - zone.add_cname("cname.testdns.aws.com", "example.com", identifier=('success-test-foo', '50')) - zone.add_cname("cname.testdns.aws.com", "example.com", identifier=('success-test-bar', '50')) + zone.add_cname("cname.testdns.aws.com", "example.com", + identifier=('success-test-foo', '50')) + zone.add_cname("cname.testdns.aws.com", "example.com", + identifier=('success-test-bar', '50')) cnames = zone.get_cname('cname.testdns.aws.com.', all=True) cnames.should.have.length_of(2) - foo_cname = [cname for cname in cnames if cname.identifier == 'success-test-foo'][0] + foo_cname = [cname for cname in cnames if cname.identifier == + 'success-test-foo'][0] zone.delete_record(foo_cname) cname = zone.get_cname('cname.testdns.aws.com.', all=True) - # When get_cname only had one result, it returns just that result instead of a list. + # When get_cname only had one result, it returns just that result instead + # of a list. cname.identifier.should.equal('success-test-bar') @@ -273,17 +298,21 @@ def test_deleting_latency_route(): conn.create_hosted_zone("testdns.aws.com.") zone = conn.get_zone("testdns.aws.com.") - zone.add_cname("cname.testdns.aws.com", "example.com", identifier=('success-test-foo', 'us-west-2')) - zone.add_cname("cname.testdns.aws.com", "example.com", identifier=('success-test-bar', 'us-west-1')) + zone.add_cname("cname.testdns.aws.com", "example.com", + identifier=('success-test-foo', 'us-west-2')) + zone.add_cname("cname.testdns.aws.com", "example.com", + identifier=('success-test-bar', 'us-west-1')) cnames = zone.get_cname('cname.testdns.aws.com.', all=True) cnames.should.have.length_of(2) - foo_cname = [cname for cname in cnames if cname.identifier == 'success-test-foo'][0] + foo_cname = [cname for cname in cnames if cname.identifier == + 'success-test-foo'][0] foo_cname.region.should.equal('us-west-2') zone.delete_record(foo_cname) cname = zone.get_cname('cname.testdns.aws.com.', all=True) - # When get_cname only had one result, it returns just that result instead of a list. + # When get_cname only had one result, it returns just that result instead + # of a list. cname.identifier.should.equal('success-test-bar') cname.region.should.equal('us-west-1') @@ -292,15 +321,19 @@ def test_deleting_latency_route(): def test_hosted_zone_private_zone_preserved(): conn = boto.connect_route53('the_key', 'the_secret') - firstzone = conn.create_hosted_zone("testdns.aws.com.", private_zone=True, vpc_id='vpc-fake', vpc_region='us-east-1') - zone_id = firstzone["CreateHostedZoneResponse"]["HostedZone"]["Id"].split("/")[-1] + firstzone = conn.create_hosted_zone( + "testdns.aws.com.", private_zone=True, vpc_id='vpc-fake', vpc_region='us-east-1') + zone_id = firstzone["CreateHostedZoneResponse"][ + "HostedZone"]["Id"].split("/")[-1] hosted_zone = conn.get_hosted_zone(zone_id) # in (original) boto, these bools returned as strings. - hosted_zone["GetHostedZoneResponse"]["HostedZone"]["Config"]["PrivateZone"].should.equal('True') + hosted_zone["GetHostedZoneResponse"]["HostedZone"][ + "Config"]["PrivateZone"].should.equal('True') hosted_zones = conn.get_all_hosted_zones() - hosted_zones["ListHostedZonesResponse"]["HostedZones"][0]["Config"]["PrivateZone"].should.equal('True') + hosted_zones["ListHostedZonesResponse"]["HostedZones"][ + 0]["Config"]["PrivateZone"].should.equal('True') zone = conn.get_zone("testdns.aws.com.") zone.config["PrivateZone"].should.equal('True') @@ -331,6 +364,7 @@ def test_hosted_zone_private_zone_preserved_boto3(): # zone = conn.list_hosted_zones_by_name(DNSName="testdns.aws.com.") # zone.config["PrivateZone"].should.equal(True) + @mock_route53 def test_list_or_change_tags_for_resource_request(): conn = boto3.client('route53', region_name='us-east-1') @@ -359,7 +393,8 @@ def test_list_or_change_tags_for_resource_request(): ) # Check to make sure that the response has the 'ResourceTagSet' key - response = conn.list_tags_for_resource(ResourceType='healthcheck', ResourceId=healthcheck_id) + response = conn.list_tags_for_resource( + ResourceType='healthcheck', ResourceId=healthcheck_id) response.should.contain('ResourceTagSet') # Validate that each key was added @@ -376,7 +411,8 @@ def test_list_or_change_tags_for_resource_request(): ) # Check to make sure that the response has the 'ResourceTagSet' key - response = conn.list_tags_for_resource(ResourceType='healthcheck', ResourceId=healthcheck_id) + response = conn.list_tags_for_resource( + ResourceType='healthcheck', ResourceId=healthcheck_id) response.should.contain('ResourceTagSet') response['ResourceTagSet']['Tags'].should_not.contain(tag1) response['ResourceTagSet']['Tags'].should.contain(tag2) @@ -388,7 +424,8 @@ def test_list_or_change_tags_for_resource_request(): RemoveTagKeys=[tag2['Key']] ) - response = conn.list_tags_for_resource(ResourceType='healthcheck', ResourceId=healthcheck_id) + response = conn.list_tags_for_resource( + ResourceType='healthcheck', ResourceId=healthcheck_id) response['ResourceTagSet']['Tags'].should_not.contain(tag2) # Re-add the tags @@ -405,5 +442,6 @@ def test_list_or_change_tags_for_resource_request(): RemoveTagKeys=[tag1['Key'], tag2['Key']] ) - response = conn.list_tags_for_resource(ResourceType='healthcheck', ResourceId=healthcheck_id) + response = conn.list_tags_for_resource( + ResourceType='healthcheck', ResourceId=healthcheck_id) response['ResourceTagSet']['Tags'].should.be.empty diff --git a/tests/test_s3/test_s3.py b/tests/test_s3/test_s3.py index e424ba6a3..32b772abe 100644 --- a/tests/test_s3/test_s3.py +++ b/tests/test_s3/test_s3.py @@ -48,6 +48,7 @@ def reduced_min_part_size(f): class MyModel(object): + def __init__(self, name, value): self.name = name self.value = value @@ -67,7 +68,8 @@ def test_my_model_save(): model_instance = MyModel('steve', 'is awesome') model_instance.save() - body = conn.Object('mybucket', 'steve').get()['Body'].read().decode("utf-8") + body = conn.Object('mybucket', 'steve').get()[ + 'Body'].read().decode("utf-8") assert body == b'is awesome' @@ -110,7 +112,8 @@ def test_multipart_upload(): multipart.upload_part_from_file(BytesIO(part2), 2) multipart.complete_upload() # we should get both parts as the key contents - bucket.get_key("the-key").get_contents_as_string().should.equal(part1 + part2) + bucket.get_key( + "the-key").get_contents_as_string().should.equal(part1 + part2) @mock_s3_deprecated @@ -127,7 +130,8 @@ def test_multipart_upload_out_of_order(): multipart.upload_part_from_file(BytesIO(part1), 2) multipart.complete_upload() # we should get both parts as the key contents - bucket.get_key("the-key").get_contents_as_string().should.equal(part1 + part2) + bucket.get_key( + "the-key").get_contents_as_string().should.equal(part1 + part2) @mock_s3_deprecated @@ -136,7 +140,8 @@ def test_multipart_upload_with_headers(): conn = boto.connect_s3('the_key', 'the_secret') bucket = conn.create_bucket("foobar") - multipart = bucket.initiate_multipart_upload("the-key", metadata={"foo": "bar"}) + multipart = bucket.initiate_multipart_upload( + "the-key", metadata={"foo": "bar"}) part1 = b'0' * 10 multipart.upload_part_from_file(BytesIO(part1), 1) multipart.complete_upload() @@ -159,7 +164,8 @@ def test_multipart_upload_with_copy_key(): multipart.upload_part_from_file(BytesIO(part1), 1) multipart.copy_part_from_key("foobar", "original-key", 2, 0, 3) multipart.complete_upload() - bucket.get_key("the-key").get_contents_as_string().should.equal(part1 + b"key_") + bucket.get_key( + "the-key").get_contents_as_string().should.equal(part1 + b"key_") @mock_s3_deprecated @@ -229,7 +235,8 @@ def test_multipart_duplicate_upload(): multipart.upload_part_from_file(BytesIO(part2), 2) multipart.complete_upload() # We should get only one copy of part 1. - bucket.get_key("the-key").get_contents_as_string().should.equal(part1 + part2) + bucket.get_key( + "the-key").get_contents_as_string().should.equal(part1 + part2) @mock_s3_deprecated @@ -260,7 +267,8 @@ def test_key_save_to_missing_bucket(): key = Key(bucket) key.key = "the-key" - key.set_contents_from_string.when.called_with("foobar").should.throw(S3ResponseError) + key.set_contents_from_string.when.called_with( + "foobar").should.throw(S3ResponseError) @mock_s3_deprecated @@ -275,7 +283,8 @@ def test_missing_key_urllib2(): conn = boto.connect_s3('the_key', 'the_secret') conn.create_bucket("foobar") - urlopen.when.called_with("http://foobar.s3.amazonaws.com/the-key").should.throw(HTTPError) + urlopen.when.called_with( + "http://foobar.s3.amazonaws.com/the-key").should.throw(HTTPError) @mock_s3_deprecated @@ -315,7 +324,8 @@ def test_large_key_save(): key.key = "the-key" key.set_contents_from_string("foobar" * 100000) - bucket.get_key("the-key").get_contents_as_string().should.equal(b'foobar' * 100000) + bucket.get_key( + "the-key").get_contents_as_string().should.equal(b'foobar' * 100000) @mock_s3_deprecated @@ -328,8 +338,10 @@ def test_copy_key(): bucket.copy_key('new-key', 'foobar', 'the-key') - bucket.get_key("the-key").get_contents_as_string().should.equal(b"some value") - bucket.get_key("new-key").get_contents_as_string().should.equal(b"some value") + bucket.get_key( + "the-key").get_contents_as_string().should.equal(b"some value") + bucket.get_key( + "new-key").get_contents_as_string().should.equal(b"some value") @mock_s3_deprecated @@ -344,8 +356,10 @@ def test_copy_key_with_version(): bucket.copy_key('new-key', 'foobar', 'the-key', src_version_id='0') - bucket.get_key("the-key").get_contents_as_string().should.equal(b"another value") - bucket.get_key("new-key").get_contents_as_string().should.equal(b"some value") + bucket.get_key( + "the-key").get_contents_as_string().should.equal(b"another value") + bucket.get_key( + "new-key").get_contents_as_string().should.equal(b"some value") @mock_s3_deprecated @@ -373,7 +387,8 @@ def test_copy_key_replace_metadata(): metadata={'momd': 'Mometadatastring'}) bucket.get_key("new-key").get_metadata('md').should.be.none - bucket.get_key("new-key").get_metadata('momd').should.equal('Mometadatastring') + bucket.get_key( + "new-key").get_metadata('momd').should.equal('Mometadatastring') @freeze_time("2012-01-01 12:00:00") @@ -389,7 +404,8 @@ def test_last_modified(): rs = bucket.get_all_keys() rs[0].last_modified.should.equal('2012-01-01T12:00:00.000Z') - bucket.get_key("the-key").last_modified.should.equal('Sun, 01 Jan 2012 12:00:00 GMT') + bucket.get_key( + "the-key").last_modified.should.equal('Sun, 01 Jan 2012 12:00:00 GMT') @mock_s3_deprecated @@ -401,7 +417,8 @@ def test_missing_bucket(): @mock_s3_deprecated def test_bucket_with_dash(): conn = boto.connect_s3('the_key', 'the_secret') - conn.get_bucket.when.called_with('mybucket-test').should.throw(S3ResponseError) + conn.get_bucket.when.called_with( + 'mybucket-test').should.throw(S3ResponseError) @mock_s3_deprecated @@ -432,7 +449,8 @@ def test_create_existing_bucket_in_us_east_1(): @mock_s3_deprecated def test_other_region(): - conn = S3Connection('key', 'secret', host='s3-website-ap-southeast-2.amazonaws.com') + conn = S3Connection( + 'key', 'secret', host='s3-website-ap-southeast-2.amazonaws.com') conn.create_bucket("foobar") list(conn.get_bucket("foobar").get_all_keys()).should.equal([]) @@ -613,7 +631,8 @@ def test_bucket_key_listing_order(): delimiter = None keys = [x.name for x in bucket.list(prefix + 'x', delimiter)] - keys.should.equal([u'toplevel/x/key', u'toplevel/x/y/key', u'toplevel/x/y/z/key']) + keys.should.equal( + [u'toplevel/x/key', u'toplevel/x/y/key', u'toplevel/x/y/z/key']) delimiter = '/' keys = [x.name for x in bucket.list(prefix + 'x', delimiter)] @@ -640,7 +659,8 @@ def test_copy_key_reduced_redundancy(): key.key = "the-key" key.set_contents_from_string("some value") - bucket.copy_key('new-key', 'foobar', 'the-key', storage_class='REDUCED_REDUNDANCY') + bucket.copy_key('new-key', 'foobar', 'the-key', + storage_class='REDUCED_REDUNDANCY') # we use the bucket iterator because of: # https:/github.com/boto/boto/issues/1173 @@ -886,34 +906,54 @@ def test_ranged_get(): key.set_contents_from_string(rep * 10) # Implicitly bounded range requests. - key.get_contents_as_string(headers={'Range': 'bytes=0-'}).should.equal(rep * 10) - key.get_contents_as_string(headers={'Range': 'bytes=50-'}).should.equal(rep * 5) - key.get_contents_as_string(headers={'Range': 'bytes=99-'}).should.equal(b'9') + key.get_contents_as_string( + headers={'Range': 'bytes=0-'}).should.equal(rep * 10) + key.get_contents_as_string( + headers={'Range': 'bytes=50-'}).should.equal(rep * 5) + key.get_contents_as_string( + headers={'Range': 'bytes=99-'}).should.equal(b'9') # Explicitly bounded range requests starting from the first byte. - key.get_contents_as_string(headers={'Range': 'bytes=0-0'}).should.equal(b'0') - key.get_contents_as_string(headers={'Range': 'bytes=0-49'}).should.equal(rep * 5) - key.get_contents_as_string(headers={'Range': 'bytes=0-99'}).should.equal(rep * 10) - key.get_contents_as_string(headers={'Range': 'bytes=0-100'}).should.equal(rep * 10) - key.get_contents_as_string(headers={'Range': 'bytes=0-700'}).should.equal(rep * 10) + key.get_contents_as_string( + headers={'Range': 'bytes=0-0'}).should.equal(b'0') + key.get_contents_as_string( + headers={'Range': 'bytes=0-49'}).should.equal(rep * 5) + key.get_contents_as_string( + headers={'Range': 'bytes=0-99'}).should.equal(rep * 10) + key.get_contents_as_string( + headers={'Range': 'bytes=0-100'}).should.equal(rep * 10) + key.get_contents_as_string( + headers={'Range': 'bytes=0-700'}).should.equal(rep * 10) # Explicitly bounded range requests starting from the / a middle byte. - key.get_contents_as_string(headers={'Range': 'bytes=50-54'}).should.equal(rep[:5]) - key.get_contents_as_string(headers={'Range': 'bytes=50-99'}).should.equal(rep * 5) - key.get_contents_as_string(headers={'Range': 'bytes=50-100'}).should.equal(rep * 5) - key.get_contents_as_string(headers={'Range': 'bytes=50-700'}).should.equal(rep * 5) + key.get_contents_as_string( + headers={'Range': 'bytes=50-54'}).should.equal(rep[:5]) + key.get_contents_as_string( + headers={'Range': 'bytes=50-99'}).should.equal(rep * 5) + key.get_contents_as_string( + headers={'Range': 'bytes=50-100'}).should.equal(rep * 5) + key.get_contents_as_string( + headers={'Range': 'bytes=50-700'}).should.equal(rep * 5) # Explicitly bounded range requests starting from the last byte. - key.get_contents_as_string(headers={'Range': 'bytes=99-99'}).should.equal(b'9') - key.get_contents_as_string(headers={'Range': 'bytes=99-100'}).should.equal(b'9') - key.get_contents_as_string(headers={'Range': 'bytes=99-700'}).should.equal(b'9') + key.get_contents_as_string( + headers={'Range': 'bytes=99-99'}).should.equal(b'9') + key.get_contents_as_string( + headers={'Range': 'bytes=99-100'}).should.equal(b'9') + key.get_contents_as_string( + headers={'Range': 'bytes=99-700'}).should.equal(b'9') # Suffix range requests. - key.get_contents_as_string(headers={'Range': 'bytes=-1'}).should.equal(b'9') - key.get_contents_as_string(headers={'Range': 'bytes=-60'}).should.equal(rep * 6) - key.get_contents_as_string(headers={'Range': 'bytes=-100'}).should.equal(rep * 10) - key.get_contents_as_string(headers={'Range': 'bytes=-101'}).should.equal(rep * 10) - key.get_contents_as_string(headers={'Range': 'bytes=-700'}).should.equal(rep * 10) + key.get_contents_as_string( + headers={'Range': 'bytes=-1'}).should.equal(b'9') + key.get_contents_as_string( + headers={'Range': 'bytes=-60'}).should.equal(rep * 6) + key.get_contents_as_string( + headers={'Range': 'bytes=-100'}).should.equal(rep * 10) + key.get_contents_as_string( + headers={'Range': 'bytes=-101'}).should.equal(rep * 10) + key.get_contents_as_string( + headers={'Range': 'bytes=-700'}).should.equal(rep * 10) key.size.should.equal(100) @@ -1006,6 +1046,7 @@ def test_boto3_key_etag(): resp = s3.get_object(Bucket='mybucket', Key='steve') resp['ETag'].should.equal('"d32bda93738f7e03adb22e66c90fbc04"') + @mock_s3 def test_boto3_list_keys_xml_escaped(): s3 = boto3.client('s3', region_name='us-east-1') @@ -1045,13 +1086,13 @@ def test_boto3_list_objects_v2_truncated_response(): assert resp['IsTruncated'] == True assert 'Delimiter' not in resp assert 'StartAfter' not in resp - assert 'Owner' not in listed_object # owner info was not requested + assert 'Owner' not in listed_object # owner info was not requested next_token = resp['NextContinuationToken'] - # Second list - resp = s3.list_objects_v2(Bucket='mybucket', MaxKeys=1, ContinuationToken=next_token) + resp = s3.list_objects_v2( + Bucket='mybucket', MaxKeys=1, ContinuationToken=next_token) listed_object = resp['Contents'][0] assert listed_object['Key'] == 'three' @@ -1065,9 +1106,9 @@ def test_boto3_list_objects_v2_truncated_response(): next_token = resp['NextContinuationToken'] - # Third list - resp = s3.list_objects_v2(Bucket='mybucket', MaxKeys=1, ContinuationToken=next_token) + resp = s3.list_objects_v2( + Bucket='mybucket', MaxKeys=1, ContinuationToken=next_token) listed_object = resp['Contents'][0] assert listed_object['Key'] == 'two' @@ -1107,7 +1148,7 @@ def test_boto3_list_objects_v2_truncated_response_start_after(): # Second list # The ContinuationToken must take precedence over StartAfter. resp = s3.list_objects_v2(Bucket='mybucket', MaxKeys=1, StartAfter='one', - ContinuationToken=next_token) + ContinuationToken=next_token) listed_object = resp['Contents'][0] assert listed_object['Key'] == 'two' @@ -1143,7 +1184,8 @@ def test_boto3_bucket_create(): s3.Object('blah', 'hello.txt').put(Body="some text") - s3.Object('blah', 'hello.txt').get()['Body'].read().decode("utf-8").should.equal("some text") + s3.Object('blah', 'hello.txt').get()['Body'].read().decode( + "utf-8").should.equal("some text") @mock_s3 @@ -1153,7 +1195,8 @@ def test_boto3_bucket_create_eu_central(): s3.Object('blah', 'hello.txt').put(Body="some text") - s3.Object('blah', 'hello.txt').get()['Body'].read().decode("utf-8").should.equal("some text") + s3.Object('blah', 'hello.txt').get()['Body'].read().decode( + "utf-8").should.equal("some text") @mock_s3 @@ -1163,10 +1206,12 @@ def test_boto3_head_object(): s3.Object('blah', 'hello.txt').put(Body="some text") - s3.Object('blah', 'hello.txt').meta.client.head_object(Bucket='blah', Key='hello.txt') + s3.Object('blah', 'hello.txt').meta.client.head_object( + Bucket='blah', Key='hello.txt') with assert_raises(ClientError): - s3.Object('blah', 'hello2.txt').meta.client.head_object(Bucket='blah', Key='hello_bad.txt') + s3.Object('blah', 'hello2.txt').meta.client.head_object( + Bucket='blah', Key='hello_bad.txt') @mock_s3 @@ -1176,7 +1221,8 @@ def test_boto3_get_object(): s3.Object('blah', 'hello.txt').put(Body="some text") - s3.Object('blah', 'hello.txt').meta.client.head_object(Bucket='blah', Key='hello.txt') + s3.Object('blah', 'hello.txt').meta.client.head_object( + Bucket='blah', Key='hello.txt') with assert_raises(ClientError) as e: s3.Object('blah', 'hello2.txt').get() diff --git a/tests/test_s3/test_s3_lifecycle.py b/tests/test_s3/test_s3_lifecycle.py index f0a70bc6f..5cae8f790 100644 --- a/tests/test_s3/test_s3_lifecycle.py +++ b/tests/test_s3/test_s3_lifecycle.py @@ -56,9 +56,9 @@ def test_lifecycle_multi(): lifecycle.add_rule("2", "2/", "Enabled", Expiration(days=2)) lifecycle.add_rule("3", "3/", "Enabled", Expiration(date=date)) lifecycle.add_rule("4", "4/", "Enabled", None, - Transition(days=4, storage_class=sc)) + Transition(days=4, storage_class=sc)) lifecycle.add_rule("5", "5/", "Enabled", None, - Transition(date=date, storage_class=sc)) + Transition(date=date, storage_class=sc)) bucket.configure_lifecycle(lifecycle) # read the lifecycle back diff --git a/tests/test_s3/test_s3_utils.py b/tests/test_s3/test_s3_utils.py index 3b1d4a01a..b4f56d89a 100644 --- a/tests/test_s3/test_s3_utils.py +++ b/tests/test_s3/test_s3_utils.py @@ -8,11 +8,14 @@ def test_base_url(): def test_localhost_bucket(): - expect(bucket_name_from_url('https://wfoobar.localhost:5000/abc')).should.equal("wfoobar") + expect(bucket_name_from_url('https://wfoobar.localhost:5000/abc') + ).should.equal("wfoobar") def test_localhost_without_bucket(): - expect(bucket_name_from_url('https://www.localhost:5000/def')).should.equal(None) + expect(bucket_name_from_url( + 'https://www.localhost:5000/def')).should.equal(None) + def test_versioned_key_store(): d = _VersionedKeyStore() diff --git a/tests/test_s3/test_server.py b/tests/test_s3/test_server.py index 303224541..f6b8f889c 100644 --- a/tests/test_s3/test_server.py +++ b/tests/test_s3/test_server.py @@ -31,7 +31,8 @@ def test_s3_server_bucket_create(): res.status_code.should.equal(200) res.data.should.contain(b"ListBucketResult") - res = test_client.put('/bar', 'http://foobaz.localhost:5000/', data='test value') + res = test_client.put( + '/bar', 'http://foobaz.localhost:5000/', data='test value') res.status_code.should.equal(200) res = test_client.get('/bar', 'http://foobaz.localhost:5000/') @@ -45,7 +46,8 @@ def test_s3_server_bucket_versioning(): # Just enough XML to enable versioning body = 'Enabled' - res = test_client.put('/?versioning', 'http://foobaz.localhost:5000', data=body) + res = test_client.put( + '/?versioning', 'http://foobaz.localhost:5000', data=body) res.status_code.should.equal(200) diff --git a/tests/test_s3bucket_path/test_bucket_path_server.py b/tests/test_s3bucket_path/test_bucket_path_server.py index adc5de532..c67a2bcaa 100644 --- a/tests/test_s3bucket_path/test_bucket_path_server.py +++ b/tests/test_s3bucket_path/test_bucket_path_server.py @@ -44,7 +44,8 @@ def test_s3_server_bucket_create(): res = test_client.get('/missing-bucket', 'http://localhost:5000') res.status_code.should.equal(404) - res = test_client.put('/foobar/bar', 'http://localhost:5000', data='test value') + res = test_client.put( + '/foobar/bar', 'http://localhost:5000', data='test value') res.status_code.should.equal(200) res = test_client.get('/foobar/bar', 'http://localhost:5000') diff --git a/tests/test_s3bucket_path/test_s3bucket_path.py b/tests/test_s3bucket_path/test_s3bucket_path.py index 528c75368..21d786c61 100644 --- a/tests/test_s3bucket_path/test_s3bucket_path.py +++ b/tests/test_s3bucket_path/test_s3bucket_path.py @@ -20,6 +20,7 @@ def create_connection(key=None, secret=None): class MyModel(object): + def __init__(self, name, value): self.name = name self.value = value @@ -42,7 +43,8 @@ def test_my_model_save(): model_instance = MyModel('steve', 'is awesome') model_instance.save() - conn.get_bucket('mybucket').get_key('steve').get_contents_as_string().should.equal(b'is awesome') + conn.get_bucket('mybucket').get_key( + 'steve').get_contents_as_string().should.equal(b'is awesome') @mock_s3_deprecated @@ -57,7 +59,8 @@ def test_missing_key_urllib2(): conn = create_connection('the_key', 'the_secret') conn.create_bucket("foobar") - urlopen.when.called_with("http://s3.amazonaws.com/foobar/the-key").should.throw(HTTPError) + urlopen.when.called_with( + "http://s3.amazonaws.com/foobar/the-key").should.throw(HTTPError) @mock_s3_deprecated @@ -93,7 +96,8 @@ def test_large_key_save(): key.key = "the-key" key.set_contents_from_string("foobar" * 100000) - bucket.get_key("the-key").get_contents_as_string().should.equal(b'foobar' * 100000) + bucket.get_key( + "the-key").get_contents_as_string().should.equal(b'foobar' * 100000) @mock_s3_deprecated @@ -106,8 +110,10 @@ def test_copy_key(): bucket.copy_key('new-key', 'foobar', 'the-key') - bucket.get_key("the-key").get_contents_as_string().should.equal(b"some value") - bucket.get_key("new-key").get_contents_as_string().should.equal(b"some value") + bucket.get_key( + "the-key").get_contents_as_string().should.equal(b"some value") + bucket.get_key( + "new-key").get_contents_as_string().should.equal(b"some value") @mock_s3_deprecated @@ -135,7 +141,8 @@ def test_last_modified(): rs = bucket.get_all_keys() rs[0].last_modified.should.equal('2012-01-01T12:00:00.000Z') - bucket.get_key("the-key").last_modified.should.equal('Sun, 01 Jan 2012 12:00:00 GMT') + bucket.get_key( + "the-key").last_modified.should.equal('Sun, 01 Jan 2012 12:00:00 GMT') @mock_s3_deprecated @@ -147,7 +154,8 @@ def test_missing_bucket(): @mock_s3_deprecated def test_bucket_with_dash(): conn = create_connection('the_key', 'the_secret') - conn.get_bucket.when.called_with('mybucket-test').should.throw(S3ResponseError) + conn.get_bucket.when.called_with( + 'mybucket-test').should.throw(S3ResponseError) @mock_s3_deprecated @@ -268,7 +276,8 @@ def test_bucket_key_listing_order(): delimiter = None keys = [x.name for x in bucket.list(prefix + 'x', delimiter)] - keys.should.equal(['toplevel/x/key', 'toplevel/x/y/key', 'toplevel/x/y/z/key']) + keys.should.equal( + ['toplevel/x/key', 'toplevel/x/y/key', 'toplevel/x/y/z/key']) delimiter = '/' keys = [x.name for x in bucket.list(prefix + 'x', delimiter)] diff --git a/tests/test_s3bucket_path/test_s3bucket_path_utils.py b/tests/test_s3bucket_path/test_s3bucket_path_utils.py index 8497f8184..c607ea2ec 100644 --- a/tests/test_s3bucket_path/test_s3bucket_path_utils.py +++ b/tests/test_s3bucket_path/test_s3bucket_path_utils.py @@ -8,7 +8,8 @@ def test_base_url(): def test_localhost_bucket(): - expect(bucket_name_from_url('https://localhost:5000/wfoobar/abc')).should.equal("wfoobar") + expect(bucket_name_from_url('https://localhost:5000/wfoobar/abc') + ).should.equal("wfoobar") def test_localhost_without_bucket(): diff --git a/tests/test_ses/test_ses.py b/tests/test_ses/test_ses.py index 7771b9a65..431d42e1d 100644 --- a/tests/test_ses/test_ses.py +++ b/tests/test_ses/test_ses.py @@ -15,7 +15,8 @@ def test_verify_email_identity(): conn.verify_email_identity("test@example.com") identities = conn.list_identities() - address = identities['ListIdentitiesResponse']['ListIdentitiesResult']['Identities'][0] + address = identities['ListIdentitiesResponse'][ + 'ListIdentitiesResult']['Identities'][0] address.should.equal('test@example.com') @@ -27,7 +28,8 @@ def test_domain_verify(): conn.verify_domain_identity("domain2.com") identities = conn.list_identities() - domains = list(identities['ListIdentitiesResponse']['ListIdentitiesResult']['Identities']) + domains = list(identities['ListIdentitiesResponse'][ + 'ListIdentitiesResult']['Identities']) domains.should.equal(['domain1.com', 'domain2.com']) @@ -36,9 +38,11 @@ def test_delete_identity(): conn = boto.connect_ses('the_key', 'the_secret') conn.verify_email_identity("test@example.com") - conn.list_identities()['ListIdentitiesResponse']['ListIdentitiesResult']['Identities'].should.have.length_of(1) + conn.list_identities()['ListIdentitiesResponse']['ListIdentitiesResult'][ + 'Identities'].should.have.length_of(1) conn.delete_identity("test@example.com") - conn.list_identities()['ListIdentitiesResponse']['ListIdentitiesResult']['Identities'].should.have.length_of(0) + conn.list_identities()['ListIdentitiesResponse']['ListIdentitiesResult'][ + 'Identities'].should.have.length_of(0) @mock_ses_deprecated @@ -50,12 +54,15 @@ def test_send_email(): "test body", "test_to@example.com").should.throw(BotoServerError) conn.verify_email_identity("test@example.com") - conn.send_email("test@example.com", "test subject", "test body", "test_to@example.com") + conn.send_email("test@example.com", "test subject", + "test body", "test_to@example.com") send_quota = conn.get_send_quota() - sent_count = int(send_quota['GetSendQuotaResponse']['GetSendQuotaResult']['SentLast24Hours']) + sent_count = int(send_quota['GetSendQuotaResponse'][ + 'GetSendQuotaResult']['SentLast24Hours']) sent_count.should.equal(1) + @mock_ses_deprecated def test_send_html_email(): conn = boto.connect_ses('the_key', 'the_secret') @@ -65,12 +72,15 @@ def test_send_html_email(): "test body", "test_to@example.com", format="html").should.throw(BotoServerError) conn.verify_email_identity("test@example.com") - conn.send_email("test@example.com", "test subject", "test body", "test_to@example.com", format="html") + conn.send_email("test@example.com", "test subject", + "test body", "test_to@example.com", format="html") send_quota = conn.get_send_quota() - sent_count = int(send_quota['GetSendQuotaResponse']['GetSendQuotaResult']['SentLast24Hours']) + sent_count = int(send_quota['GetSendQuotaResponse'][ + 'GetSendQuotaResult']['SentLast24Hours']) sent_count.should.equal(1) + @mock_ses_deprecated def test_send_raw_email(): conn = boto.connect_ses('the_key', 'the_secret') @@ -101,5 +111,6 @@ def test_send_raw_email(): ) send_quota = conn.get_send_quota() - sent_count = int(send_quota['GetSendQuotaResponse']['GetSendQuotaResult']['SentLast24Hours']) + sent_count = int(send_quota['GetSendQuotaResponse'][ + 'GetSendQuotaResult']['SentLast24Hours']) sent_count.should.equal(1) diff --git a/tests/test_sns/test_application.py b/tests/test_sns/test_application.py index 31db73f62..613b11af5 100644 --- a/tests/test_sns/test_application.py +++ b/tests/test_sns/test_application.py @@ -17,8 +17,10 @@ def test_create_platform_application(): "PlatformPrincipal": "platform_principal", }, ) - application_arn = platform_application['CreatePlatformApplicationResponse']['CreatePlatformApplicationResult']['PlatformApplicationArn'] - application_arn.should.equal('arn:aws:sns:us-east-1:123456789012:app/APNS/my-application') + application_arn = platform_application['CreatePlatformApplicationResponse'][ + 'CreatePlatformApplicationResult']['PlatformApplicationArn'] + application_arn.should.equal( + 'arn:aws:sns:us-east-1:123456789012:app/APNS/my-application') @mock_sns_deprecated @@ -32,8 +34,10 @@ def test_get_platform_application_attributes(): "PlatformPrincipal": "platform_principal", }, ) - arn = platform_application['CreatePlatformApplicationResponse']['CreatePlatformApplicationResult']['PlatformApplicationArn'] - attributes = conn.get_platform_application_attributes(arn)['GetPlatformApplicationAttributesResponse']['GetPlatformApplicationAttributesResult']['Attributes'] + arn = platform_application['CreatePlatformApplicationResponse'][ + 'CreatePlatformApplicationResult']['PlatformApplicationArn'] + attributes = conn.get_platform_application_attributes(arn)['GetPlatformApplicationAttributesResponse'][ + 'GetPlatformApplicationAttributesResult']['Attributes'] attributes.should.equal({ "PlatformCredential": "platform_credential", "PlatformPrincipal": "platform_principal", @@ -43,7 +47,8 @@ def test_get_platform_application_attributes(): @mock_sns_deprecated def test_get_missing_platform_application_attributes(): conn = boto.connect_sns() - conn.get_platform_application_attributes.when.called_with("a-fake-arn").should.throw(BotoServerError) + conn.get_platform_application_attributes.when.called_with( + "a-fake-arn").should.throw(BotoServerError) @mock_sns_deprecated @@ -57,11 +62,13 @@ def test_set_platform_application_attributes(): "PlatformPrincipal": "platform_principal", }, ) - arn = platform_application['CreatePlatformApplicationResponse']['CreatePlatformApplicationResult']['PlatformApplicationArn'] + arn = platform_application['CreatePlatformApplicationResponse'][ + 'CreatePlatformApplicationResult']['PlatformApplicationArn'] conn.set_platform_application_attributes(arn, - {"PlatformPrincipal": "other"} - ) - attributes = conn.get_platform_application_attributes(arn)['GetPlatformApplicationAttributesResponse']['GetPlatformApplicationAttributesResult']['Attributes'] + {"PlatformPrincipal": "other"} + ) + attributes = conn.get_platform_application_attributes(arn)['GetPlatformApplicationAttributesResponse'][ + 'GetPlatformApplicationAttributesResult']['Attributes'] attributes.should.equal({ "PlatformCredential": "platform_credential", "PlatformPrincipal": "other", @@ -81,7 +88,8 @@ def test_list_platform_applications(): ) applications_repsonse = conn.list_platform_applications() - applications = applications_repsonse['ListPlatformApplicationsResponse']['ListPlatformApplicationsResult']['PlatformApplications'] + applications = applications_repsonse['ListPlatformApplicationsResponse'][ + 'ListPlatformApplicationsResult']['PlatformApplications'] applications.should.have.length_of(2) @@ -98,14 +106,16 @@ def test_delete_platform_application(): ) applications_repsonse = conn.list_platform_applications() - applications = applications_repsonse['ListPlatformApplicationsResponse']['ListPlatformApplicationsResult']['PlatformApplications'] + applications = applications_repsonse['ListPlatformApplicationsResponse'][ + 'ListPlatformApplicationsResult']['PlatformApplications'] applications.should.have.length_of(2) application_arn = applications[0]['PlatformApplicationArn'] conn.delete_platform_application(application_arn) applications_repsonse = conn.list_platform_applications() - applications = applications_repsonse['ListPlatformApplicationsResponse']['ListPlatformApplicationsResult']['PlatformApplications'] + applications = applications_repsonse['ListPlatformApplicationsResponse'][ + 'ListPlatformApplicationsResult']['PlatformApplications'] applications.should.have.length_of(1) @@ -116,7 +126,8 @@ def test_create_platform_endpoint(): name="my-application", platform="APNS", ) - application_arn = platform_application['CreatePlatformApplicationResponse']['CreatePlatformApplicationResult']['PlatformApplicationArn'] + application_arn = platform_application['CreatePlatformApplicationResponse'][ + 'CreatePlatformApplicationResult']['PlatformApplicationArn'] endpoint = conn.create_platform_endpoint( platform_application_arn=application_arn, @@ -127,8 +138,10 @@ def test_create_platform_endpoint(): }, ) - endpoint_arn = endpoint['CreatePlatformEndpointResponse']['CreatePlatformEndpointResult']['EndpointArn'] - endpoint_arn.should.contain("arn:aws:sns:us-east-1:123456789012:endpoint/APNS/my-application/") + endpoint_arn = endpoint['CreatePlatformEndpointResponse'][ + 'CreatePlatformEndpointResult']['EndpointArn'] + endpoint_arn.should.contain( + "arn:aws:sns:us-east-1:123456789012:endpoint/APNS/my-application/") @mock_sns_deprecated @@ -138,7 +151,8 @@ def test_get_list_endpoints_by_platform_application(): name="my-application", platform="APNS", ) - application_arn = platform_application['CreatePlatformApplicationResponse']['CreatePlatformApplicationResult']['PlatformApplicationArn'] + application_arn = platform_application['CreatePlatformApplicationResponse'][ + 'CreatePlatformApplicationResult']['PlatformApplicationArn'] endpoint = conn.create_platform_endpoint( platform_application_arn=application_arn, @@ -148,7 +162,8 @@ def test_get_list_endpoints_by_platform_application(): "CustomUserData": "some data", }, ) - endpoint_arn = endpoint['CreatePlatformEndpointResponse']['CreatePlatformEndpointResult']['EndpointArn'] + endpoint_arn = endpoint['CreatePlatformEndpointResponse'][ + 'CreatePlatformEndpointResult']['EndpointArn'] endpoint_list = conn.list_endpoints_by_platform_application( platform_application_arn=application_arn @@ -166,7 +181,8 @@ def test_get_endpoint_attributes(): name="my-application", platform="APNS", ) - application_arn = platform_application['CreatePlatformApplicationResponse']['CreatePlatformApplicationResult']['PlatformApplicationArn'] + application_arn = platform_application['CreatePlatformApplicationResponse'][ + 'CreatePlatformApplicationResult']['PlatformApplicationArn'] endpoint = conn.create_platform_endpoint( platform_application_arn=application_arn, @@ -177,9 +193,11 @@ def test_get_endpoint_attributes(): "CustomUserData": "some data", }, ) - endpoint_arn = endpoint['CreatePlatformEndpointResponse']['CreatePlatformEndpointResult']['EndpointArn'] + endpoint_arn = endpoint['CreatePlatformEndpointResponse'][ + 'CreatePlatformEndpointResult']['EndpointArn'] - attributes = conn.get_endpoint_attributes(endpoint_arn)['GetEndpointAttributesResponse']['GetEndpointAttributesResult']['Attributes'] + attributes = conn.get_endpoint_attributes(endpoint_arn)['GetEndpointAttributesResponse'][ + 'GetEndpointAttributesResult']['Attributes'] attributes.should.equal({ "Token": "some_unique_id", "Enabled": 'False', @@ -190,7 +208,8 @@ def test_get_endpoint_attributes(): @mock_sns_deprecated def test_get_missing_endpoint_attributes(): conn = boto.connect_sns() - conn.get_endpoint_attributes.when.called_with("a-fake-arn").should.throw(BotoServerError) + conn.get_endpoint_attributes.when.called_with( + "a-fake-arn").should.throw(BotoServerError) @mock_sns_deprecated @@ -200,7 +219,8 @@ def test_set_endpoint_attributes(): name="my-application", platform="APNS", ) - application_arn = platform_application['CreatePlatformApplicationResponse']['CreatePlatformApplicationResult']['PlatformApplicationArn'] + application_arn = platform_application['CreatePlatformApplicationResponse'][ + 'CreatePlatformApplicationResult']['PlatformApplicationArn'] endpoint = conn.create_platform_endpoint( platform_application_arn=application_arn, @@ -211,12 +231,14 @@ def test_set_endpoint_attributes(): "CustomUserData": "some data", }, ) - endpoint_arn = endpoint['CreatePlatformEndpointResponse']['CreatePlatformEndpointResult']['EndpointArn'] + endpoint_arn = endpoint['CreatePlatformEndpointResponse'][ + 'CreatePlatformEndpointResult']['EndpointArn'] conn.set_endpoint_attributes(endpoint_arn, - {"CustomUserData": "other data"} - ) - attributes = conn.get_endpoint_attributes(endpoint_arn)['GetEndpointAttributesResponse']['GetEndpointAttributesResult']['Attributes'] + {"CustomUserData": "other data"} + ) + attributes = conn.get_endpoint_attributes(endpoint_arn)['GetEndpointAttributesResponse'][ + 'GetEndpointAttributesResult']['Attributes'] attributes.should.equal({ "Token": "some_unique_id", "Enabled": 'False', @@ -231,7 +253,8 @@ def test_delete_endpoint(): name="my-application", platform="APNS", ) - application_arn = platform_application['CreatePlatformApplicationResponse']['CreatePlatformApplicationResult']['PlatformApplicationArn'] + application_arn = platform_application['CreatePlatformApplicationResponse'][ + 'CreatePlatformApplicationResult']['PlatformApplicationArn'] endpoint = conn.create_platform_endpoint( platform_application_arn=application_arn, @@ -242,7 +265,8 @@ def test_delete_endpoint(): "CustomUserData": "some data", }, ) - endpoint_arn = endpoint['CreatePlatformEndpointResponse']['CreatePlatformEndpointResult']['EndpointArn'] + endpoint_arn = endpoint['CreatePlatformEndpointResponse'][ + 'CreatePlatformEndpointResult']['EndpointArn'] endpoint_list = conn.list_endpoints_by_platform_application( platform_application_arn=application_arn @@ -265,7 +289,8 @@ def test_publish_to_platform_endpoint(): name="my-application", platform="APNS", ) - application_arn = platform_application['CreatePlatformApplicationResponse']['CreatePlatformApplicationResult']['PlatformApplicationArn'] + application_arn = platform_application['CreatePlatformApplicationResponse'][ + 'CreatePlatformApplicationResult']['PlatformApplicationArn'] endpoint = conn.create_platform_endpoint( platform_application_arn=application_arn, @@ -276,6 +301,8 @@ def test_publish_to_platform_endpoint(): }, ) - endpoint_arn = endpoint['CreatePlatformEndpointResponse']['CreatePlatformEndpointResult']['EndpointArn'] + endpoint_arn = endpoint['CreatePlatformEndpointResponse'][ + 'CreatePlatformEndpointResult']['EndpointArn'] - conn.publish(message="some message", message_structure="json", target_arn=endpoint_arn) + conn.publish(message="some message", message_structure="json", + target_arn=endpoint_arn) diff --git a/tests/test_sns/test_application_boto3.py b/tests/test_sns/test_application_boto3.py index 251d1cf1d..968240b15 100644 --- a/tests/test_sns/test_application_boto3.py +++ b/tests/test_sns/test_application_boto3.py @@ -18,7 +18,8 @@ def test_create_platform_application(): }, ) application_arn = response['PlatformApplicationArn'] - application_arn.should.equal('arn:aws:sns:us-east-1:123456789012:app/APNS/my-application') + application_arn.should.equal( + 'arn:aws:sns:us-east-1:123456789012:app/APNS/my-application') @mock_sns @@ -33,7 +34,8 @@ def test_get_platform_application_attributes(): }, ) arn = platform_application['PlatformApplicationArn'] - attributes = conn.get_platform_application_attributes(PlatformApplicationArn=arn)['Attributes'] + attributes = conn.get_platform_application_attributes( + PlatformApplicationArn=arn)['Attributes'] attributes.should.equal({ "PlatformCredential": "platform_credential", "PlatformPrincipal": "platform_principal", @@ -43,7 +45,8 @@ def test_get_platform_application_attributes(): @mock_sns def test_get_missing_platform_application_attributes(): conn = boto3.client('sns', region_name='us-east-1') - conn.get_platform_application_attributes.when.called_with(PlatformApplicationArn="a-fake-arn").should.throw(ClientError) + conn.get_platform_application_attributes.when.called_with( + PlatformApplicationArn="a-fake-arn").should.throw(ClientError) @mock_sns @@ -59,9 +62,11 @@ def test_set_platform_application_attributes(): ) arn = platform_application['PlatformApplicationArn'] conn.set_platform_application_attributes(PlatformApplicationArn=arn, - Attributes={"PlatformPrincipal": "other"} - ) - attributes = conn.get_platform_application_attributes(PlatformApplicationArn=arn)['Attributes'] + Attributes={ + "PlatformPrincipal": "other"} + ) + attributes = conn.get_platform_application_attributes( + PlatformApplicationArn=arn)['Attributes'] attributes.should.equal({ "PlatformCredential": "platform_credential", "PlatformPrincipal": "other", @@ -133,7 +138,8 @@ def test_create_platform_endpoint(): ) endpoint_arn = endpoint['EndpointArn'] - endpoint_arn.should.contain("arn:aws:sns:us-east-1:123456789012:endpoint/APNS/my-application/") + endpoint_arn.should.contain( + "arn:aws:sns:us-east-1:123456789012:endpoint/APNS/my-application/") @mock_sns @@ -186,7 +192,8 @@ def test_get_endpoint_attributes(): ) endpoint_arn = endpoint['EndpointArn'] - attributes = conn.get_endpoint_attributes(EndpointArn=endpoint_arn)['Attributes'] + attributes = conn.get_endpoint_attributes( + EndpointArn=endpoint_arn)['Attributes'] attributes.should.equal({ "Token": "some_unique_id", "Enabled": 'false', @@ -197,7 +204,8 @@ def test_get_endpoint_attributes(): @mock_sns def test_get_missing_endpoint_attributes(): conn = boto3.client('sns', region_name='us-east-1') - conn.get_endpoint_attributes.when.called_with(EndpointArn="a-fake-arn").should.throw(ClientError) + conn.get_endpoint_attributes.when.called_with( + EndpointArn="a-fake-arn").should.throw(ClientError) @mock_sns @@ -222,9 +230,10 @@ def test_set_endpoint_attributes(): endpoint_arn = endpoint['EndpointArn'] conn.set_endpoint_attributes(EndpointArn=endpoint_arn, - Attributes={"CustomUserData": "other data"} - ) - attributes = conn.get_endpoint_attributes(EndpointArn=endpoint_arn)['Attributes'] + Attributes={"CustomUserData": "other data"} + ) + attributes = conn.get_endpoint_attributes( + EndpointArn=endpoint_arn)['Attributes'] attributes.should.equal({ "Token": "some_unique_id", "Enabled": 'false', @@ -253,4 +262,5 @@ def test_publish_to_platform_endpoint(): endpoint_arn = endpoint['EndpointArn'] - conn.publish(Message="some message", MessageStructure="json", TargetArn=endpoint_arn) + conn.publish(Message="some message", + MessageStructure="json", TargetArn=endpoint_arn) diff --git a/tests/test_sns/test_publishing.py b/tests/test_sns/test_publishing.py index dab2a569b..718bce5c4 100644 --- a/tests/test_sns/test_publishing.py +++ b/tests/test_sns/test_publishing.py @@ -15,12 +15,14 @@ def test_publish_to_sqs(): conn = boto.connect_sns() conn.create_topic("some-topic") topics_json = conn.get_all_topics() - topic_arn = topics_json["ListTopicsResponse"]["ListTopicsResult"]["Topics"][0]['TopicArn'] + topic_arn = topics_json["ListTopicsResponse"][ + "ListTopicsResult"]["Topics"][0]['TopicArn'] sqs_conn = boto.connect_sqs() sqs_conn.create_queue("test-queue") - conn.subscribe(topic_arn, "sqs", "arn:aws:sqs:us-east-1:123456789012:test-queue") + conn.subscribe(topic_arn, "sqs", + "arn:aws:sqs:us-east-1:123456789012:test-queue") conn.publish(topic=topic_arn, message="my message") @@ -35,12 +37,14 @@ def test_publish_to_sqs_in_different_region(): conn = boto.sns.connect_to_region("us-west-1") conn.create_topic("some-topic") topics_json = conn.get_all_topics() - topic_arn = topics_json["ListTopicsResponse"]["ListTopicsResult"]["Topics"][0]['TopicArn'] + topic_arn = topics_json["ListTopicsResponse"][ + "ListTopicsResult"]["Topics"][0]['TopicArn'] sqs_conn = boto.sqs.connect_to_region("us-west-2") sqs_conn.create_queue("test-queue") - conn.subscribe(topic_arn, "sqs", "arn:aws:sqs:us-west-2:123456789012:test-queue") + conn.subscribe(topic_arn, "sqs", + "arn:aws:sqs:us-west-2:123456789012:test-queue") conn.publish(topic=topic_arn, message="my message") @@ -61,9 +65,11 @@ def test_publish_to_http(): conn = boto.connect_sns() conn.create_topic("some-topic") topics_json = conn.get_all_topics() - topic_arn = topics_json["ListTopicsResponse"]["ListTopicsResult"]["Topics"][0]['TopicArn'] + topic_arn = topics_json["ListTopicsResponse"][ + "ListTopicsResult"]["Topics"][0]['TopicArn'] conn.subscribe(topic_arn, "http", "http://example.com/foobar") - response = conn.publish(topic=topic_arn, message="my message", subject="my subject") + response = conn.publish( + topic=topic_arn, message="my message", subject="my subject") message_id = response['PublishResponse']['PublishResult']['MessageId'] diff --git a/tests/test_sns/test_publishing_boto3.py b/tests/test_sns/test_publishing_boto3.py index edf2948fb..cda9fed60 100644 --- a/tests/test_sns/test_publishing_boto3.py +++ b/tests/test_sns/test_publishing_boto3.py @@ -70,5 +70,6 @@ def test_publish_to_http(): Protocol="http", Endpoint="http://example.com/foobar") - response = conn.publish(TopicArn=topic_arn, Message="my message", Subject="my subject") + response = conn.publish( + TopicArn=topic_arn, Message="my message", Subject="my subject") message_id = response['MessageId'] diff --git a/tests/test_sns/test_server.py b/tests/test_sns/test_server.py index 422763dac..ce505278f 100644 --- a/tests/test_sns/test_server.py +++ b/tests/test_sns/test_server.py @@ -15,8 +15,10 @@ def test_sns_server_get(): topic_data = test_client.action_data("CreateTopic", Name="test topic") topic_data.should.contain("CreateTopicResult") - topic_data.should.contain("arn:aws:sns:us-east-1:123456789012:test topic") + topic_data.should.contain( + "arn:aws:sns:us-east-1:123456789012:test topic") topics_data = test_client.action_data("ListTopics") topics_data.should.contain("ListTopicsResult") - topic_data.should.contain("arn:aws:sns:us-east-1:123456789012:test topic") + topic_data.should.contain( + "arn:aws:sns:us-east-1:123456789012:test topic") diff --git a/tests/test_sns/test_subscriptions.py b/tests/test_sns/test_subscriptions.py index e141c503a..c521bb428 100644 --- a/tests/test_sns/test_subscriptions.py +++ b/tests/test_sns/test_subscriptions.py @@ -12,11 +12,13 @@ def test_creating_subscription(): conn = boto.connect_sns() conn.create_topic("some-topic") topics_json = conn.get_all_topics() - topic_arn = topics_json["ListTopicsResponse"]["ListTopicsResult"]["Topics"][0]['TopicArn'] + topic_arn = topics_json["ListTopicsResponse"][ + "ListTopicsResult"]["Topics"][0]['TopicArn'] conn.subscribe(topic_arn, "http", "http://example.com/") - subscriptions = conn.get_all_subscriptions()["ListSubscriptionsResponse"]["ListSubscriptionsResult"]["Subscriptions"] + subscriptions = conn.get_all_subscriptions()["ListSubscriptionsResponse"][ + "ListSubscriptionsResult"]["Subscriptions"] subscriptions.should.have.length_of(1) subscription = subscriptions[0] subscription["TopicArn"].should.equal(topic_arn) @@ -28,7 +30,8 @@ def test_creating_subscription(): conn.unsubscribe(subscription["SubscriptionArn"]) # And there should be zero subscriptions left - subscriptions = conn.get_all_subscriptions()["ListSubscriptionsResponse"]["ListSubscriptionsResult"]["Subscriptions"] + subscriptions = conn.get_all_subscriptions()["ListSubscriptionsResponse"][ + "ListSubscriptionsResult"]["Subscriptions"] subscriptions.should.have.length_of(0) @@ -46,7 +49,8 @@ def test_getting_subscriptions_by_topic(): conn.subscribe(topic1_arn, "http", "http://example1.com/") conn.subscribe(topic2_arn, "http", "http://example2.com/") - topic1_subscriptions = conn.get_all_subscriptions_by_topic(topic1_arn)["ListSubscriptionsByTopicResponse"]["ListSubscriptionsByTopicResult"]["Subscriptions"] + topic1_subscriptions = conn.get_all_subscriptions_by_topic(topic1_arn)[ + "ListSubscriptionsByTopicResponse"]["ListSubscriptionsByTopicResult"]["Subscriptions"] topic1_subscriptions.should.have.length_of(1) topic1_subscriptions[0]['Endpoint'].should.equal("http://example1.com/") @@ -63,25 +67,36 @@ def test_subscription_paging(): topic2_arn = topics[1]['TopicArn'] for index in range(DEFAULT_PAGE_SIZE + int(DEFAULT_PAGE_SIZE / 3)): - conn.subscribe(topic1_arn, 'email', 'email_' + str(index) + '@test.com') - conn.subscribe(topic2_arn, 'email', 'email_' + str(index) + '@test.com') + conn.subscribe(topic1_arn, 'email', 'email_' + + str(index) + '@test.com') + conn.subscribe(topic2_arn, 'email', 'email_' + + str(index) + '@test.com') all_subscriptions = conn.get_all_subscriptions() - all_subscriptions["ListSubscriptionsResponse"]["ListSubscriptionsResult"]["Subscriptions"].should.have.length_of(DEFAULT_PAGE_SIZE) - next_token = all_subscriptions["ListSubscriptionsResponse"]["ListSubscriptionsResult"]["NextToken"] + all_subscriptions["ListSubscriptionsResponse"]["ListSubscriptionsResult"][ + "Subscriptions"].should.have.length_of(DEFAULT_PAGE_SIZE) + next_token = all_subscriptions["ListSubscriptionsResponse"][ + "ListSubscriptionsResult"]["NextToken"] next_token.should.equal(DEFAULT_PAGE_SIZE) all_subscriptions = conn.get_all_subscriptions(next_token=next_token * 2) - all_subscriptions["ListSubscriptionsResponse"]["ListSubscriptionsResult"]["Subscriptions"].should.have.length_of(int(DEFAULT_PAGE_SIZE * 2 / 3)) - next_token = all_subscriptions["ListSubscriptionsResponse"]["ListSubscriptionsResult"]["NextToken"] + all_subscriptions["ListSubscriptionsResponse"]["ListSubscriptionsResult"][ + "Subscriptions"].should.have.length_of(int(DEFAULT_PAGE_SIZE * 2 / 3)) + next_token = all_subscriptions["ListSubscriptionsResponse"][ + "ListSubscriptionsResult"]["NextToken"] next_token.should.equal(None) topic1_subscriptions = conn.get_all_subscriptions_by_topic(topic1_arn) - topic1_subscriptions["ListSubscriptionsByTopicResponse"]["ListSubscriptionsByTopicResult"]["Subscriptions"].should.have.length_of(DEFAULT_PAGE_SIZE) - next_token = topic1_subscriptions["ListSubscriptionsByTopicResponse"]["ListSubscriptionsByTopicResult"]["NextToken"] + topic1_subscriptions["ListSubscriptionsByTopicResponse"]["ListSubscriptionsByTopicResult"][ + "Subscriptions"].should.have.length_of(DEFAULT_PAGE_SIZE) + next_token = topic1_subscriptions["ListSubscriptionsByTopicResponse"][ + "ListSubscriptionsByTopicResult"]["NextToken"] next_token.should.equal(DEFAULT_PAGE_SIZE) - topic1_subscriptions = conn.get_all_subscriptions_by_topic(topic1_arn, next_token=next_token) - topic1_subscriptions["ListSubscriptionsByTopicResponse"]["ListSubscriptionsByTopicResult"]["Subscriptions"].should.have.length_of(int(DEFAULT_PAGE_SIZE / 3)) - next_token = topic1_subscriptions["ListSubscriptionsByTopicResponse"]["ListSubscriptionsByTopicResult"]["NextToken"] + topic1_subscriptions = conn.get_all_subscriptions_by_topic( + topic1_arn, next_token=next_token) + topic1_subscriptions["ListSubscriptionsByTopicResponse"]["ListSubscriptionsByTopicResult"][ + "Subscriptions"].should.have.length_of(int(DEFAULT_PAGE_SIZE / 3)) + next_token = topic1_subscriptions["ListSubscriptionsByTopicResponse"][ + "ListSubscriptionsByTopicResult"]["NextToken"] next_token.should.equal(None) diff --git a/tests/test_sns/test_subscriptions_boto3.py b/tests/test_sns/test_subscriptions_boto3.py index b884ca54d..906c483f7 100644 --- a/tests/test_sns/test_subscriptions_boto3.py +++ b/tests/test_sns/test_subscriptions_boto3.py @@ -52,7 +52,8 @@ def test_getting_subscriptions_by_topic(): Protocol="http", Endpoint="http://example2.com/") - topic1_subscriptions = conn.list_subscriptions_by_topic(TopicArn=topic1_arn)["Subscriptions"] + topic1_subscriptions = conn.list_subscriptions_by_topic(TopicArn=topic1_arn)[ + "Subscriptions"] topic1_subscriptions.should.have.length_of(1) topic1_subscriptions[0]['Endpoint'].should.equal("http://example1.com/") @@ -77,14 +78,19 @@ def test_subscription_paging(): next_token.should.equal(str(DEFAULT_PAGE_SIZE)) all_subscriptions = conn.list_subscriptions(NextToken=next_token) - all_subscriptions["Subscriptions"].should.have.length_of(int(DEFAULT_PAGE_SIZE / 3)) + all_subscriptions["Subscriptions"].should.have.length_of( + int(DEFAULT_PAGE_SIZE / 3)) all_subscriptions.shouldnt.have("NextToken") - topic1_subscriptions = conn.list_subscriptions_by_topic(TopicArn=topic1_arn) - topic1_subscriptions["Subscriptions"].should.have.length_of(DEFAULT_PAGE_SIZE) + topic1_subscriptions = conn.list_subscriptions_by_topic( + TopicArn=topic1_arn) + topic1_subscriptions["Subscriptions"].should.have.length_of( + DEFAULT_PAGE_SIZE) next_token = topic1_subscriptions["NextToken"] next_token.should.equal(str(DEFAULT_PAGE_SIZE)) - topic1_subscriptions = conn.list_subscriptions_by_topic(TopicArn=topic1_arn, NextToken=next_token) - topic1_subscriptions["Subscriptions"].should.have.length_of(int(DEFAULT_PAGE_SIZE / 3)) + topic1_subscriptions = conn.list_subscriptions_by_topic( + TopicArn=topic1_arn, NextToken=next_token) + topic1_subscriptions["Subscriptions"].should.have.length_of( + int(DEFAULT_PAGE_SIZE / 3)) topic1_subscriptions.shouldnt.have("NextToken") diff --git a/tests/test_sns/test_topics.py b/tests/test_sns/test_topics.py index ab2f06382..79b85f709 100644 --- a/tests/test_sns/test_topics.py +++ b/tests/test_sns/test_topics.py @@ -34,7 +34,8 @@ def test_create_and_delete_topic(): @mock_sns_deprecated def test_get_missing_topic(): conn = boto.connect_sns() - conn.get_topic_attributes.when.called_with("a-fake-arn").should.throw(BotoServerError) + conn.get_topic_attributes.when.called_with( + "a-fake-arn").should.throw(BotoServerError) @mock_sns_deprecated @@ -42,7 +43,9 @@ def test_create_topic_in_multiple_regions(): for region in ['us-west-1', 'us-west-2']: conn = boto.sns.connect_to_region(region) conn.create_topic("some-topic") - list(conn.get_all_topics()["ListTopicsResponse"]["ListTopicsResult"]["Topics"]).should.have.length_of(1) + list(conn.get_all_topics()["ListTopicsResponse"][ + "ListTopicsResult"]["Topics"]).should.have.length_of(1) + @mock_sns_deprecated def test_topic_corresponds_to_region(): @@ -50,8 +53,11 @@ def test_topic_corresponds_to_region(): conn = boto.sns.connect_to_region(region) conn.create_topic("some-topic") topics_json = conn.get_all_topics() - topic_arn = topics_json["ListTopicsResponse"]["ListTopicsResult"]["Topics"][0]['TopicArn'] - topic_arn.should.equal("arn:aws:sns:{0}:123456789012:some-topic".format(region)) + topic_arn = topics_json["ListTopicsResponse"][ + "ListTopicsResult"]["Topics"][0]['TopicArn'] + topic_arn.should.equal( + "arn:aws:sns:{0}:123456789012:some-topic".format(region)) + @mock_sns_deprecated def test_topic_attributes(): @@ -59,9 +65,11 @@ def test_topic_attributes(): conn.create_topic("some-topic") topics_json = conn.get_all_topics() - topic_arn = topics_json["ListTopicsResponse"]["ListTopicsResult"]["Topics"][0]['TopicArn'] + topic_arn = topics_json["ListTopicsResponse"][ + "ListTopicsResult"]["Topics"][0]['TopicArn'] - attributes = conn.get_topic_attributes(topic_arn)['GetTopicAttributesResponse']['GetTopicAttributesResult']['Attributes'] + attributes = conn.get_topic_attributes(topic_arn)['GetTopicAttributesResponse'][ + 'GetTopicAttributesResult']['Attributes'] attributes["TopicArn"].should.equal( "arn:aws:sns:{0}:123456789012:some-topic" .format(conn.region.name) @@ -73,7 +81,8 @@ def test_topic_attributes(): attributes["SubscriptionsConfirmed"].should.equal(0) attributes["SubscriptionsDeleted"].should.equal(0) attributes["DeliveryPolicy"].should.equal("") - attributes["EffectiveDeliveryPolicy"].should.equal(DEFAULT_EFFECTIVE_DELIVERY_POLICY) + attributes["EffectiveDeliveryPolicy"].should.equal( + DEFAULT_EFFECTIVE_DELIVERY_POLICY) # boto can't handle prefix-mandatory strings: # i.e. unicode on Python 2 -- u"foobar" @@ -90,10 +99,13 @@ def test_topic_attributes(): conn.set_topic_attributes(topic_arn, "DisplayName", displayname) conn.set_topic_attributes(topic_arn, "DeliveryPolicy", delivery) - attributes = conn.get_topic_attributes(topic_arn)['GetTopicAttributesResponse']['GetTopicAttributesResult']['Attributes'] + attributes = conn.get_topic_attributes(topic_arn)['GetTopicAttributesResponse'][ + 'GetTopicAttributesResult']['Attributes'] attributes["Policy"].should.equal("{'foo': 'bar'}") attributes["DisplayName"].should.equal("My display name") - attributes["DeliveryPolicy"].should.equal("{'http': {'defaultHealthyRetryPolicy': {'numRetries': 5}}}") + attributes["DeliveryPolicy"].should.equal( + "{'http': {'defaultHealthyRetryPolicy': {'numRetries': 5}}}") + @mock_sns_deprecated def test_topic_paging(): @@ -102,15 +114,19 @@ def test_topic_paging(): conn.create_topic("some-topic_" + str(index)) topics_json = conn.get_all_topics() - topics_list = topics_json["ListTopicsResponse"]["ListTopicsResult"]["Topics"] - next_token = topics_json["ListTopicsResponse"]["ListTopicsResult"]["NextToken"] + topics_list = topics_json["ListTopicsResponse"][ + "ListTopicsResult"]["Topics"] + next_token = topics_json["ListTopicsResponse"][ + "ListTopicsResult"]["NextToken"] len(topics_list).should.equal(DEFAULT_PAGE_SIZE) next_token.should.equal(DEFAULT_PAGE_SIZE) topics_json = conn.get_all_topics(next_token=next_token) - topics_list = topics_json["ListTopicsResponse"]["ListTopicsResult"]["Topics"] - next_token = topics_json["ListTopicsResponse"]["ListTopicsResult"]["NextToken"] + topics_list = topics_json["ListTopicsResponse"][ + "ListTopicsResult"]["Topics"] + next_token = topics_json["ListTopicsResponse"][ + "ListTopicsResult"]["NextToken"] topics_list.should.have.length_of(int(DEFAULT_PAGE_SIZE / 2)) next_token.should.equal(None) diff --git a/tests/test_sns/test_topics_boto3.py b/tests/test_sns/test_topics_boto3.py index b757a3750..55d03afff 100644 --- a/tests/test_sns/test_topics_boto3.py +++ b/tests/test_sns/test_topics_boto3.py @@ -35,7 +35,8 @@ def test_create_and_delete_topic(): @mock_sns def test_get_missing_topic(): conn = boto3.client("sns", region_name="us-east-1") - conn.get_topic_attributes.when.called_with(TopicArn="a-fake-arn").should.throw(ClientError) + conn.get_topic_attributes.when.called_with( + TopicArn="a-fake-arn").should.throw(ClientError) @mock_sns @@ -53,7 +54,8 @@ def test_topic_corresponds_to_region(): conn.create_topic(Name="some-topic") topics_json = conn.list_topics() topic_arn = topics_json["Topics"][0]['TopicArn'] - topic_arn.should.equal("arn:aws:sns:{0}:123456789012:some-topic".format(region)) + topic_arn.should.equal( + "arn:aws:sns:{0}:123456789012:some-topic".format(region)) @mock_sns @@ -76,7 +78,8 @@ def test_topic_attributes(): attributes["SubscriptionsConfirmed"].should.equal('0') attributes["SubscriptionsDeleted"].should.equal('0') attributes["DeliveryPolicy"].should.equal("") - attributes["EffectiveDeliveryPolicy"].should.equal(DEFAULT_EFFECTIVE_DELIVERY_POLICY) + attributes["EffectiveDeliveryPolicy"].should.equal( + DEFAULT_EFFECTIVE_DELIVERY_POLICY) # boto can't handle prefix-mandatory strings: # i.e. unicode on Python 2 -- u"foobar" @@ -84,11 +87,13 @@ def test_topic_attributes(): if six.PY2: policy = json.dumps({b"foo": b"bar"}) displayname = b"My display name" - delivery = json.dumps({b"http": {b"defaultHealthyRetryPolicy": {b"numRetries": 5}}}) + delivery = json.dumps( + {b"http": {b"defaultHealthyRetryPolicy": {b"numRetries": 5}}}) else: policy = json.dumps({u"foo": u"bar"}) displayname = u"My display name" - delivery = json.dumps({u"http": {u"defaultHealthyRetryPolicy": {u"numRetries": 5}}}) + delivery = json.dumps( + {u"http": {u"defaultHealthyRetryPolicy": {u"numRetries": 5}}}) conn.set_topic_attributes(TopicArn=topic_arn, AttributeName="Policy", AttributeValue=policy) @@ -102,7 +107,8 @@ def test_topic_attributes(): attributes = conn.get_topic_attributes(TopicArn=topic_arn)['Attributes'] attributes["Policy"].should.equal('{"foo": "bar"}') attributes["DisplayName"].should.equal("My display name") - attributes["DeliveryPolicy"].should.equal('{"http": {"defaultHealthyRetryPolicy": {"numRetries": 5}}}') + attributes["DeliveryPolicy"].should.equal( + '{"http": {"defaultHealthyRetryPolicy": {"numRetries": 5}}}') @mock_sns diff --git a/tests/test_sqs/test_server.py b/tests/test_sqs/test_server.py index c7411193a..b7a43ab90 100644 --- a/tests/test_sqs/test_server.py +++ b/tests/test_sqs/test_server.py @@ -31,7 +31,8 @@ def test_sqs_list_identities(): res = test_client.get( '/123/testqueue?Action=ReceiveMessage&MaxNumberOfMessages=1') - message = re.search("(.*?)", res.data.decode('utf-8')).groups()[0] + message = re.search("(.*?)", + res.data.decode('utf-8')).groups()[0] message.should.equal('test-message') @@ -58,7 +59,8 @@ def test_messages_polling(): msg_res = test_client.get( '/123/testqueue?Action=ReceiveMessage&MaxNumberOfMessages=1&WaitTimeSeconds=5' ) - new_msgs = re.findall("(.*?)", msg_res.data.decode('utf-8')) + new_msgs = re.findall("(.*?)", + msg_res.data.decode('utf-8')) count += len(new_msgs) messages.append(new_msgs) @@ -71,5 +73,6 @@ def test_messages_polling(): get_messages_thread.join() insert_messages_thread.join() - # got each message in a separate call to ReceiveMessage, despite the long WaitTimeSeconds + # got each message in a separate call to ReceiveMessage, despite the long + # WaitTimeSeconds assert len(messages) == 5 diff --git a/tests/test_sqs/test_sqs.py b/tests/test_sqs/test_sqs.py index 89ea7413d..653963122 100644 --- a/tests/test_sqs/test_sqs.py +++ b/tests/test_sqs/test_sqs.py @@ -34,7 +34,8 @@ def test_create_queue(): @mock_sqs def test_get_inexistent_queue(): sqs = boto3.resource('sqs', region_name='us-east-1') - sqs.get_queue_by_name.when.called_with(QueueName='nonexisting-queue').should.throw(botocore.exceptions.ClientError) + sqs.get_queue_by_name.when.called_with( + QueueName='nonexisting-queue').should.throw(botocore.exceptions.ClientError) @mock_sqs @@ -43,8 +44,10 @@ def test_message_send(): queue = sqs.create_queue(QueueName="blah") msg = queue.send_message(MessageBody="derp") - msg.get('MD5OfMessageBody').should.equal('58fd9edd83341c29f1aebba81c31e257') - msg.get('ResponseMetadata', {}).get('RequestId').should.equal('27daac76-34dd-47df-bd01-1f6e873584a0') + msg.get('MD5OfMessageBody').should.equal( + '58fd9edd83341c29f1aebba81c31e257') + msg.get('ResponseMetadata', {}).get('RequestId').should.equal( + '27daac76-34dd-47df-bd01-1f6e873584a0') msg.get('MessageId').should_not.contain(' \n') messages = queue.receive_messages() @@ -73,7 +76,8 @@ def test_create_queues_in_multiple_region(): list(west1_conn.list_queues()['QueueUrls']).should.have.length_of(1) list(west2_conn.list_queues()['QueueUrls']).should.have.length_of(1) - west1_conn.list_queues()['QueueUrls'][0].should.equal('http://sqs.us-west-1.amazonaws.com/123456789012/blah') + west1_conn.list_queues()['QueueUrls'][0].should.equal( + 'http://sqs.us-west-1.amazonaws.com/123456789012/blah') @mock_sqs @@ -87,14 +91,16 @@ def test_get_queue_with_prefix(): queue = conn.list_queues(QueueNamePrefix="test-")['QueueUrls'] queue.should.have.length_of(1) - queue[0].should.equal("http://sqs.us-west-1.amazonaws.com/123456789012/test-queue") + queue[0].should.equal( + "http://sqs.us-west-1.amazonaws.com/123456789012/test-queue") @mock_sqs def test_delete_queue(): sqs = boto3.resource('sqs', region_name='us-east-1') conn = boto3.client("sqs", region_name='us-east-1') - conn.create_queue(QueueName="test-queue", Attributes={"VisibilityTimeout": "60"}) + conn.create_queue(QueueName="test-queue", + Attributes={"VisibilityTimeout": "60"}) queue = sqs.Queue('test-queue') conn.list_queues()['QueueUrls'].should.have.length_of(1) @@ -110,7 +116,8 @@ def test_delete_queue(): def test_set_queue_attribute(): sqs = boto3.resource('sqs', region_name='us-east-1') conn = boto3.client("sqs", region_name='us-east-1') - conn.create_queue(QueueName="test-queue", Attributes={"VisibilityTimeout": '60'}) + conn.create_queue(QueueName="test-queue", + Attributes={"VisibilityTimeout": '60'}) queue = sqs.Queue("test-queue") queue.attributes['VisibilityTimeout'].should.equal('60') @@ -133,7 +140,8 @@ def test_send_message(): response = queue.send_message(MessageBody=body_one) response = queue.send_message(MessageBody=body_two) - messages = conn.receive_message(QueueUrl=queue.url, MaxNumberOfMessages=2)['Messages'] + messages = conn.receive_message( + QueueUrl=queue.url, MaxNumberOfMessages=2)['Messages'] messages[0]['Body'].should.equal(body_one) messages[1]['Body'].should.equal(body_two) @@ -244,13 +252,15 @@ def test_receive_message_with_explicit_visibility_timeout(): queue.write(queue.new_message(body_one)) queue.count().should.equal(1) - messages = conn.receive_message(queue, number_messages=1, visibility_timeout=0) + messages = conn.receive_message( + queue, number_messages=1, visibility_timeout=0) assert len(messages) == 1 # Message should remain visible queue.count().should.equal(1) + @mock_sqs_deprecated def test_change_message_visibility(): conn = boto.connect_sqs('the_key', 'the_secret') @@ -381,7 +391,8 @@ def test_send_batch_operation_with_message_attributes(): queue = conn.create_queue("test-queue", visibility_timeout=60) queue.set_message_class(RawMessage) - message_tuple = ("my_first_message", 'test message 1', 0, {'name1': {'data_type': 'String', 'string_value': 'foo'}}) + message_tuple = ("my_first_message", 'test message 1', 0, { + 'name1': {'data_type': 'String', 'string_value': 'foo'}}) queue.write_batch([message_tuple]) messages = queue.get_messages() @@ -415,7 +426,8 @@ def test_queue_attributes(): queue_name = 'test-queue' visibility_timeout = 60 - queue = conn.create_queue(queue_name, visibility_timeout=visibility_timeout) + queue = conn.create_queue( + queue_name, visibility_timeout=visibility_timeout) attributes = queue.get_attributes() @@ -462,7 +474,8 @@ def test_change_message_visibility_on_invalid_receipt(): assert len(messages) == 1 - original_message.change_visibility.when.called_with(100).should.throw(SQSError) + original_message.change_visibility.when.called_with( + 100).should.throw(SQSError) @mock_sqs_deprecated @@ -485,7 +498,8 @@ def test_change_message_visibility_on_visible_message(): queue.count().should.equal(1) - original_message.change_visibility.when.called_with(100).should.throw(SQSError) + original_message.change_visibility.when.called_with( + 100).should.throw(SQSError) @mock_sqs_deprecated @@ -505,7 +519,8 @@ def test_purge_action(): def test_delete_message_after_visibility_timeout(): VISIBILITY_TIMEOUT = 1 conn = boto.sqs.connect_to_region("us-east-1") - new_queue = conn.create_queue('new-queue', visibility_timeout=VISIBILITY_TIMEOUT) + new_queue = conn.create_queue( + 'new-queue', visibility_timeout=VISIBILITY_TIMEOUT) m1 = Message() m1.set_body('Message 1!') diff --git a/tests/test_sts/test_sts.py b/tests/test_sts/test_sts.py index 19865ca77..4e0e52606 100644 --- a/tests/test_sts/test_sts.py +++ b/tests/test_sts/test_sts.py @@ -16,7 +16,8 @@ def test_get_session_token(): token = conn.get_session_token(duration=123) token.expiration.should.equal('2012-01-01T12:02:03.000Z') - token.session_token.should.equal("AQoEXAMPLEH4aoAH0gNCAPyJxz4BlCFFxWNE1OPTgk5TthT+FvwqnKwRcOIfrRh3c/LTo6UDdyJwOOvEVPvLXCrrrUtdnniCEXAMPLE/IvU1dYUg2RVAJBanLiHb4IgRmpRV3zrkuWJOgQs8IZZaIv2BXIa2R4OlgkBN9bkUDNCJiBeb/AXlzBBko7b15fjrBs2+cTQtpZ3CYWFXG8C5zqx37wnOE49mRl/+OtkIKGO7fAE") + token.session_token.should.equal( + "AQoEXAMPLEH4aoAH0gNCAPyJxz4BlCFFxWNE1OPTgk5TthT+FvwqnKwRcOIfrRh3c/LTo6UDdyJwOOvEVPvLXCrrrUtdnniCEXAMPLE/IvU1dYUg2RVAJBanLiHb4IgRmpRV3zrkuWJOgQs8IZZaIv2BXIa2R4OlgkBN9bkUDNCJiBeb/AXlzBBko7b15fjrBs2+cTQtpZ3CYWFXG8C5zqx37wnOE49mRl/+OtkIKGO7fAE") token.access_key.should.equal("AKIAIOSFODNN7EXAMPLE") token.secret_key.should.equal("wJalrXUtnFEMI/K7MDENG/bPxRfiCYzEXAMPLEKEY") @@ -28,10 +29,13 @@ def test_get_federation_token(): token = conn.get_federation_token(duration=123, name="Bob") token.credentials.expiration.should.equal('2012-01-01T12:02:03.000Z') - token.credentials.session_token.should.equal("AQoDYXdzEPT//////////wEXAMPLEtc764bNrC9SAPBSM22wDOk4x4HIZ8j4FZTwdQWLWsKWHGBuFqwAeMicRXmxfpSPfIeoIYRqTflfKD8YUuwthAx7mSEI/qkPpKPi/kMcGdQrmGdeehM4IC1NtBmUpp2wUE8phUZampKsburEDy0KPkyQDYwT7WZ0wq5VSXDvp75YU9HFvlRd8Tx6q6fE8YQcHNVXAkiY9q6d+xo0rKwT38xVqr7ZD0u0iPPkUL64lIZbqBAz+scqKmlzm8FDrypNC9Yjc8fPOLn9FX9KSYvKTr4rvx3iSIlTJabIQwj2ICCR/oLxBA==") + token.credentials.session_token.should.equal( + "AQoDYXdzEPT//////////wEXAMPLEtc764bNrC9SAPBSM22wDOk4x4HIZ8j4FZTwdQWLWsKWHGBuFqwAeMicRXmxfpSPfIeoIYRqTflfKD8YUuwthAx7mSEI/qkPpKPi/kMcGdQrmGdeehM4IC1NtBmUpp2wUE8phUZampKsburEDy0KPkyQDYwT7WZ0wq5VSXDvp75YU9HFvlRd8Tx6q6fE8YQcHNVXAkiY9q6d+xo0rKwT38xVqr7ZD0u0iPPkUL64lIZbqBAz+scqKmlzm8FDrypNC9Yjc8fPOLn9FX9KSYvKTr4rvx3iSIlTJabIQwj2ICCR/oLxBA==") token.credentials.access_key.should.equal("AKIAIOSFODNN7EXAMPLE") - token.credentials.secret_key.should.equal("wJalrXUtnFEMI/K7MDENG/bPxRfiCYzEXAMPLEKEY") - token.federated_user_arn.should.equal("arn:aws:sts::123456789012:federated-user/Bob") + token.credentials.secret_key.should.equal( + "wJalrXUtnFEMI/K7MDENG/bPxRfiCYzEXAMPLEKEY") + token.federated_user_arn.should.equal( + "arn:aws:sts::123456789012:federated-user/Bob") token.federated_user_id.should.equal("123456789012:Bob") @@ -55,20 +59,25 @@ def test_assume_role(): ] }) s3_role = "arn:aws:iam::123456789012:role/test-role" - role = conn.assume_role(s3_role, "session-name", policy, duration_seconds=123) + role = conn.assume_role(s3_role, "session-name", + policy, duration_seconds=123) credentials = role.credentials credentials.expiration.should.equal('2012-01-01T12:02:03.000Z') - credentials.session_token.should.equal("BQoEXAMPLEH4aoAH0gNCAPyJxz4BlCFFxWNE1OPTgk5TthT+FvwqnKwRcOIfrRh3c/LTo6UDdyJwOOvEVPvLXCrrrUtdnniCEXAMPLE/IvU1dYUg2RVAJBanLiHb4IgRmpRV3zrkuWJOgQs8IZZaIv2BXIa2R4OlgkBN9bkUDNCJiBeb/AXlzBBko7b15fjrBs2+cTQtpZ3CYWFXG8C5zqx37wnOE49mRl/+OtkIKGO7fAE") + credentials.session_token.should.equal( + "BQoEXAMPLEH4aoAH0gNCAPyJxz4BlCFFxWNE1OPTgk5TthT+FvwqnKwRcOIfrRh3c/LTo6UDdyJwOOvEVPvLXCrrrUtdnniCEXAMPLE/IvU1dYUg2RVAJBanLiHb4IgRmpRV3zrkuWJOgQs8IZZaIv2BXIa2R4OlgkBN9bkUDNCJiBeb/AXlzBBko7b15fjrBs2+cTQtpZ3CYWFXG8C5zqx37wnOE49mRl/+OtkIKGO7fAE") credentials.access_key.should.equal("AKIAIOSFODNN7EXAMPLE") - credentials.secret_key.should.equal("aJalrXUtnFEMI/K7MDENG/bPxRfiCYzEXAMPLEKEY") + credentials.secret_key.should.equal( + "aJalrXUtnFEMI/K7MDENG/bPxRfiCYzEXAMPLEKEY") role.user.arn.should.equal("arn:aws:iam::123456789012:role/test-role") role.user.assume_role_id.should.contain("session-name") + @mock_sts def test_get_caller_identity(): - identity = boto3.client("sts", region_name='us-east-1').get_caller_identity() + identity = boto3.client( + "sts", region_name='us-east-1').get_caller_identity() identity['Arn'].should.equal('arn:aws:sts::123456789012:user/moto') identity['UserId'].should.equal('AKIAIOSFODNN7EXAMPLE') diff --git a/tests/test_swf/models/test_activity_task.py b/tests/test_swf/models/test_activity_task.py index 0885c4b1e..5dddab975 100644 --- a/tests/test_swf/models/test_activity_task.py +++ b/tests/test_swf/models/test_activity_task.py @@ -147,6 +147,7 @@ def test_activity_task_cannot_change_state_on_closed_workflow_execution(): ) wfe.complete(123) - task.timeout.when.called_with(Timeout(task, 0, "foo")).should.throw(SWFWorkflowExecutionClosedError) + task.timeout.when.called_with(Timeout(task, 0, "foo")).should.throw( + SWFWorkflowExecutionClosedError) task.complete.when.called_with().should.throw(SWFWorkflowExecutionClosedError) task.fail.when.called_with().should.throw(SWFWorkflowExecutionClosedError) diff --git a/tests/test_swf/models/test_decision_task.py b/tests/test_swf/models/test_decision_task.py index fdb53d28a..b5e23eaca 100644 --- a/tests/test_swf/models/test_decision_task.py +++ b/tests/test_swf/models/test_decision_task.py @@ -75,5 +75,6 @@ def test_decision_task_cannot_change_state_on_closed_workflow_execution(): wfe.complete(123) - task.timeout.when.called_with(Timeout(task, 0, "foo")).should.throw(SWFWorkflowExecutionClosedError) + task.timeout.when.called_with(Timeout(task, 0, "foo")).should.throw( + SWFWorkflowExecutionClosedError) task.complete.when.called_with().should.throw(SWFWorkflowExecutionClosedError) diff --git a/tests/test_swf/models/test_domain.py b/tests/test_swf/models/test_domain.py index ce3ed0f13..57f66c830 100644 --- a/tests/test_swf/models/test_domain.py +++ b/tests/test_swf/models/test_domain.py @@ -15,7 +15,8 @@ WorkflowExecution = namedtuple( def test_domain_short_dict_representation(): domain = Domain("foo", "52") - domain.to_short_dict().should.equal({"name": "foo", "status": "REGISTERED"}) + domain.to_short_dict().should.equal( + {"name": "foo", "status": "REGISTERED"}) domain.description = "foo bar" domain.to_short_dict()["description"].should.equal("foo bar") @@ -67,16 +68,23 @@ def test_domain_decision_tasks(): def test_domain_get_workflow_execution(): domain = Domain("my-domain", "60") - wfe1 = WorkflowExecution(workflow_id="wf-id-1", run_id="run-id-1", execution_status="OPEN", open=True) - wfe2 = WorkflowExecution(workflow_id="wf-id-1", run_id="run-id-2", execution_status="CLOSED", open=False) - wfe3 = WorkflowExecution(workflow_id="wf-id-2", run_id="run-id-3", execution_status="OPEN", open=True) - wfe4 = WorkflowExecution(workflow_id="wf-id-3", run_id="run-id-4", execution_status="CLOSED", open=False) + wfe1 = WorkflowExecution( + workflow_id="wf-id-1", run_id="run-id-1", execution_status="OPEN", open=True) + wfe2 = WorkflowExecution( + workflow_id="wf-id-1", run_id="run-id-2", execution_status="CLOSED", open=False) + wfe3 = WorkflowExecution( + workflow_id="wf-id-2", run_id="run-id-3", execution_status="OPEN", open=True) + wfe4 = WorkflowExecution( + workflow_id="wf-id-3", run_id="run-id-4", execution_status="CLOSED", open=False) domain.workflow_executions = [wfe1, wfe2, wfe3, wfe4] # get workflow execution through workflow_id and run_id - domain.get_workflow_execution("wf-id-1", run_id="run-id-1").should.equal(wfe1) - domain.get_workflow_execution("wf-id-1", run_id="run-id-2").should.equal(wfe2) - domain.get_workflow_execution("wf-id-3", run_id="run-id-4").should.equal(wfe4) + domain.get_workflow_execution( + "wf-id-1", run_id="run-id-1").should.equal(wfe1) + domain.get_workflow_execution( + "wf-id-1", run_id="run-id-2").should.equal(wfe2) + domain.get_workflow_execution( + "wf-id-3", run_id="run-id-4").should.equal(wfe4) domain.get_workflow_execution.when.called_with( "wf-id-1", run_id="non-existent" @@ -98,7 +106,8 @@ def test_domain_get_workflow_execution(): ) # raise_if_closed attribute - domain.get_workflow_execution("wf-id-1", run_id="run-id-1", raise_if_closed=True).should.equal(wfe1) + domain.get_workflow_execution( + "wf-id-1", run_id="run-id-1", raise_if_closed=True).should.equal(wfe1) domain.get_workflow_execution.when.called_with( "wf-id-3", run_id="run-id-4", raise_if_closed=True ).should.throw( diff --git a/tests/test_swf/models/test_generic_type.py b/tests/test_swf/models/test_generic_type.py index 692c66a47..d7410f395 100644 --- a/tests/test_swf/models/test_generic_type.py +++ b/tests/test_swf/models/test_generic_type.py @@ -3,6 +3,7 @@ from moto.swf.models import GenericType # Tests for GenericType (ActivityType, WorkflowType) class FooType(GenericType): + @property def kind(self): return "foo" @@ -38,10 +39,12 @@ def test_type_full_dict_representation(): _type.to_full_dict()["configuration"].should.equal({}) _type.task_list = "foo" - _type.to_full_dict()["configuration"]["defaultTaskList"].should.equal({"name": "foo"}) + _type.to_full_dict()["configuration"][ + "defaultTaskList"].should.equal({"name": "foo"}) _type.just_an_example_timeout = "60" - _type.to_full_dict()["configuration"]["justAnExampleTimeout"].should.equal("60") + _type.to_full_dict()["configuration"][ + "justAnExampleTimeout"].should.equal("60") _type.non_whitelisted_property = "34" keys = _type.to_full_dict()["configuration"].keys() @@ -50,4 +53,5 @@ def test_type_full_dict_representation(): def test_type_string_representation(): _type = FooType("test-foo", "v1.0") - str(_type).should.equal("FooType(name: test-foo, version: v1.0, status: REGISTERED)") + str(_type).should.equal( + "FooType(name: test-foo, version: v1.0, status: REGISTERED)") diff --git a/tests/test_swf/models/test_workflow_execution.py b/tests/test_swf/models/test_workflow_execution.py index f6a69f8d7..45b91c86a 100644 --- a/tests/test_swf/models/test_workflow_execution.py +++ b/tests/test_swf/models/test_workflow_execution.py @@ -240,8 +240,10 @@ def test_workflow_execution_schedule_activity_task(): wfe.open_counts["openActivityTasks"].should.equal(1) last_event = wfe.events()[-1] last_event.event_type.should.equal("ActivityTaskScheduled") - last_event.event_attributes["decisionTaskCompletedEventId"].should.equal(123) - last_event.event_attributes["taskList"]["name"].should.equal("task-list-name") + last_event.event_attributes[ + "decisionTaskCompletedEventId"].should.equal(123) + last_event.event_attributes["taskList"][ + "name"].should.equal("task-list-name") wfe.activity_tasks.should.have.length_of(1) task = wfe.activity_tasks[0] @@ -288,43 +290,50 @@ def test_workflow_execution_schedule_activity_task_should_fail_if_wrong_attribut wfe.schedule_activity_task(123, hsh) last_event = wfe.events()[-1] last_event.event_type.should.equal("ScheduleActivityTaskFailed") - last_event.event_attributes["cause"].should.equal("ACTIVITY_TYPE_DOES_NOT_EXIST") + last_event.event_attributes["cause"].should.equal( + "ACTIVITY_TYPE_DOES_NOT_EXIST") hsh["activityType"]["name"] = "test-activity" wfe.schedule_activity_task(123, hsh) last_event = wfe.events()[-1] last_event.event_type.should.equal("ScheduleActivityTaskFailed") - last_event.event_attributes["cause"].should.equal("ACTIVITY_TYPE_DEPRECATED") + last_event.event_attributes["cause"].should.equal( + "ACTIVITY_TYPE_DEPRECATED") hsh["activityType"]["version"] = "v1.2" wfe.schedule_activity_task(123, hsh) last_event = wfe.events()[-1] last_event.event_type.should.equal("ScheduleActivityTaskFailed") - last_event.event_attributes["cause"].should.equal("DEFAULT_TASK_LIST_UNDEFINED") + last_event.event_attributes["cause"].should.equal( + "DEFAULT_TASK_LIST_UNDEFINED") hsh["taskList"] = {"name": "foobar"} wfe.schedule_activity_task(123, hsh) last_event = wfe.events()[-1] last_event.event_type.should.equal("ScheduleActivityTaskFailed") - last_event.event_attributes["cause"].should.equal("DEFAULT_SCHEDULE_TO_START_TIMEOUT_UNDEFINED") + last_event.event_attributes["cause"].should.equal( + "DEFAULT_SCHEDULE_TO_START_TIMEOUT_UNDEFINED") hsh["scheduleToStartTimeout"] = "600" wfe.schedule_activity_task(123, hsh) last_event = wfe.events()[-1] last_event.event_type.should.equal("ScheduleActivityTaskFailed") - last_event.event_attributes["cause"].should.equal("DEFAULT_SCHEDULE_TO_CLOSE_TIMEOUT_UNDEFINED") + last_event.event_attributes["cause"].should.equal( + "DEFAULT_SCHEDULE_TO_CLOSE_TIMEOUT_UNDEFINED") hsh["scheduleToCloseTimeout"] = "600" wfe.schedule_activity_task(123, hsh) last_event = wfe.events()[-1] last_event.event_type.should.equal("ScheduleActivityTaskFailed") - last_event.event_attributes["cause"].should.equal("DEFAULT_START_TO_CLOSE_TIMEOUT_UNDEFINED") + last_event.event_attributes["cause"].should.equal( + "DEFAULT_START_TO_CLOSE_TIMEOUT_UNDEFINED") hsh["startToCloseTimeout"] = "600" wfe.schedule_activity_task(123, hsh) last_event = wfe.events()[-1] last_event.event_type.should.equal("ScheduleActivityTaskFailed") - last_event.event_attributes["cause"].should.equal("DEFAULT_HEARTBEAT_TIMEOUT_UNDEFINED") + last_event.event_attributes["cause"].should.equal( + "DEFAULT_HEARTBEAT_TIMEOUT_UNDEFINED") wfe.open_counts["openActivityTasks"].should.equal(0) wfe.activity_tasks.should.have.length_of(0) @@ -393,7 +402,8 @@ def test_workflow_execution_schedule_activity_task_with_same_activity_id(): wfe.open_counts["openActivityTasks"].should.equal(1) last_event = wfe.events()[-1] last_event.event_type.should.equal("ScheduleActivityTaskFailed") - last_event.event_attributes["cause"].should.equal("ACTIVITY_ID_ALREADY_IN_USE") + last_event.event_attributes["cause"].should.equal( + "ACTIVITY_ID_ALREADY_IN_USE") def test_workflow_execution_start_activity_task(): @@ -456,7 +466,8 @@ def test_first_timeout(): wfe.first_timeout().should.be.a(Timeout) -# See moto/swf/models/workflow_execution.py "_process_timeouts()" for more details +# See moto/swf/models/workflow_execution.py "_process_timeouts()" for more +# details def test_timeouts_are_processed_in_order_and_reevaluated(): # Let's make a Workflow Execution with the following properties: # - execution start to close timeout of 8 mins diff --git a/tests/test_swf/responses/test_activity_tasks.py b/tests/test_swf/responses/test_activity_tasks.py index e6671e9e9..3511d4e56 100644 --- a/tests/test_swf/responses/test_activity_tasks.py +++ b/tests/test_swf/responses/test_activity_tasks.py @@ -11,15 +11,18 @@ from ..utils import setup_workflow, SCHEDULE_ACTIVITY_TASK_DECISION @mock_swf_deprecated def test_poll_for_activity_task_when_one(): conn = setup_workflow() - decision_token = conn.poll_for_decision_task("test-domain", "queue")["taskToken"] + decision_token = conn.poll_for_decision_task( + "test-domain", "queue")["taskToken"] conn.respond_decision_task_completed(decision_token, decisions=[ SCHEDULE_ACTIVITY_TASK_DECISION ]) - resp = conn.poll_for_activity_task("test-domain", "activity-task-list", identity="surprise") + resp = conn.poll_for_activity_task( + "test-domain", "activity-task-list", identity="surprise") resp["activityId"].should.equal("my-activity-001") resp["taskToken"].should_not.be.none - resp = conn.get_workflow_execution_history("test-domain", conn.run_id, "uid-abcd1234") + resp = conn.get_workflow_execution_history( + "test-domain", conn.run_id, "uid-abcd1234") resp["events"][-1]["eventType"].should.equal("ActivityTaskStarted") resp["events"][-1]["activityTaskStartedEventAttributes"].should.equal( {"identity": "surprise", "scheduledEventId": 5} @@ -44,12 +47,14 @@ def test_poll_for_activity_task_on_non_existent_queue(): @mock_swf_deprecated def test_count_pending_activity_tasks(): conn = setup_workflow() - decision_token = conn.poll_for_decision_task("test-domain", "queue")["taskToken"] + decision_token = conn.poll_for_decision_task( + "test-domain", "queue")["taskToken"] conn.respond_decision_task_completed(decision_token, decisions=[ SCHEDULE_ACTIVITY_TASK_DECISION ]) - resp = conn.count_pending_activity_tasks("test-domain", "activity-task-list") + resp = conn.count_pending_activity_tasks( + "test-domain", "activity-task-list") resp.should.equal({"count": 1, "truncated": False}) @@ -64,16 +69,20 @@ def test_count_pending_decision_tasks_on_non_existent_task_list(): @mock_swf_deprecated def test_respond_activity_task_completed(): conn = setup_workflow() - decision_token = conn.poll_for_decision_task("test-domain", "queue")["taskToken"] + decision_token = conn.poll_for_decision_task( + "test-domain", "queue")["taskToken"] conn.respond_decision_task_completed(decision_token, decisions=[ SCHEDULE_ACTIVITY_TASK_DECISION ]) - activity_token = conn.poll_for_activity_task("test-domain", "activity-task-list")["taskToken"] + activity_token = conn.poll_for_activity_task( + "test-domain", "activity-task-list")["taskToken"] - resp = conn.respond_activity_task_completed(activity_token, result="result of the task") + resp = conn.respond_activity_task_completed( + activity_token, result="result of the task") resp.should.be.none - resp = conn.get_workflow_execution_history("test-domain", conn.run_id, "uid-abcd1234") + resp = conn.get_workflow_execution_history( + "test-domain", conn.run_id, "uid-abcd1234") resp["events"][-2]["eventType"].should.equal("ActivityTaskCompleted") resp["events"][-2]["activityTaskCompletedEventAttributes"].should.equal( {"result": "result of the task", "scheduledEventId": 5, "startedEventId": 6} @@ -83,13 +92,16 @@ def test_respond_activity_task_completed(): @mock_swf_deprecated def test_respond_activity_task_completed_on_closed_workflow_execution(): conn = setup_workflow() - decision_token = conn.poll_for_decision_task("test-domain", "queue")["taskToken"] + decision_token = conn.poll_for_decision_task( + "test-domain", "queue")["taskToken"] conn.respond_decision_task_completed(decision_token, decisions=[ SCHEDULE_ACTIVITY_TASK_DECISION ]) - activity_token = conn.poll_for_activity_task("test-domain", "activity-task-list")["taskToken"] + activity_token = conn.poll_for_activity_task( + "test-domain", "activity-task-list")["taskToken"] - # bad: we're closing workflow execution manually, but endpoints are not coded for now.. + # bad: we're closing workflow execution manually, but endpoints are not + # coded for now.. wfe = swf_backend.domains[0].workflow_executions[-1] wfe.execution_status = "CLOSED" # /bad @@ -102,11 +114,13 @@ def test_respond_activity_task_completed_on_closed_workflow_execution(): @mock_swf_deprecated def test_respond_activity_task_completed_with_task_already_completed(): conn = setup_workflow() - decision_token = conn.poll_for_decision_task("test-domain", "queue")["taskToken"] + decision_token = conn.poll_for_decision_task( + "test-domain", "queue")["taskToken"] conn.respond_decision_task_completed(decision_token, decisions=[ SCHEDULE_ACTIVITY_TASK_DECISION ]) - activity_token = conn.poll_for_activity_task("test-domain", "activity-task-list")["taskToken"] + activity_token = conn.poll_for_activity_task( + "test-domain", "activity-task-list")["taskToken"] conn.respond_activity_task_completed(activity_token) @@ -119,18 +133,21 @@ def test_respond_activity_task_completed_with_task_already_completed(): @mock_swf_deprecated def test_respond_activity_task_failed(): conn = setup_workflow() - decision_token = conn.poll_for_decision_task("test-domain", "queue")["taskToken"] + decision_token = conn.poll_for_decision_task( + "test-domain", "queue")["taskToken"] conn.respond_decision_task_completed(decision_token, decisions=[ SCHEDULE_ACTIVITY_TASK_DECISION ]) - activity_token = conn.poll_for_activity_task("test-domain", "activity-task-list")["taskToken"] + activity_token = conn.poll_for_activity_task( + "test-domain", "activity-task-list")["taskToken"] resp = conn.respond_activity_task_failed(activity_token, reason="short reason", details="long details") resp.should.be.none - resp = conn.get_workflow_execution_history("test-domain", conn.run_id, "uid-abcd1234") + resp = conn.get_workflow_execution_history( + "test-domain", conn.run_id, "uid-abcd1234") resp["events"][-2]["eventType"].should.equal("ActivityTaskFailed") resp["events"][-2]["activityTaskFailedEventAttributes"].should.equal( {"reason": "short reason", "details": "long details", @@ -144,7 +161,8 @@ def test_respond_activity_task_completed_with_wrong_token(): # because the safeguards are shared with RespondActivityTaskCompleted, so # no need to retest everything end-to-end. conn = setup_workflow() - decision_token = conn.poll_for_decision_task("test-domain", "queue")["taskToken"] + decision_token = conn.poll_for_decision_task( + "test-domain", "queue")["taskToken"] conn.respond_decision_task_completed(decision_token, decisions=[ SCHEDULE_ACTIVITY_TASK_DECISION ]) @@ -158,11 +176,13 @@ def test_respond_activity_task_completed_with_wrong_token(): @mock_swf_deprecated def test_record_activity_task_heartbeat(): conn = setup_workflow() - decision_token = conn.poll_for_decision_task("test-domain", "queue")["taskToken"] + decision_token = conn.poll_for_decision_task( + "test-domain", "queue")["taskToken"] conn.respond_decision_task_completed(decision_token, decisions=[ SCHEDULE_ACTIVITY_TASK_DECISION ]) - activity_token = conn.poll_for_activity_task("test-domain", "activity-task-list")["taskToken"] + activity_token = conn.poll_for_activity_task( + "test-domain", "activity-task-list")["taskToken"] resp = conn.record_activity_task_heartbeat(activity_token) resp.should.equal({"cancelRequested": False}) @@ -171,11 +191,13 @@ def test_record_activity_task_heartbeat(): @mock_swf_deprecated def test_record_activity_task_heartbeat_with_wrong_token(): conn = setup_workflow() - decision_token = conn.poll_for_decision_task("test-domain", "queue")["taskToken"] + decision_token = conn.poll_for_decision_task( + "test-domain", "queue")["taskToken"] conn.respond_decision_task_completed(decision_token, decisions=[ SCHEDULE_ACTIVITY_TASK_DECISION ]) - conn.poll_for_activity_task("test-domain", "activity-task-list")["taskToken"] + conn.poll_for_activity_task( + "test-domain", "activity-task-list")["taskToken"] conn.record_activity_task_heartbeat.when.called_with( "bad-token", details="some progress details" @@ -185,17 +207,21 @@ def test_record_activity_task_heartbeat_with_wrong_token(): @mock_swf_deprecated def test_record_activity_task_heartbeat_sets_details_in_case_of_timeout(): conn = setup_workflow() - decision_token = conn.poll_for_decision_task("test-domain", "queue")["taskToken"] + decision_token = conn.poll_for_decision_task( + "test-domain", "queue")["taskToken"] conn.respond_decision_task_completed(decision_token, decisions=[ SCHEDULE_ACTIVITY_TASK_DECISION ]) with freeze_time("2015-01-01 12:00:00"): - activity_token = conn.poll_for_activity_task("test-domain", "activity-task-list")["taskToken"] - conn.record_activity_task_heartbeat(activity_token, details="some progress details") + activity_token = conn.poll_for_activity_task( + "test-domain", "activity-task-list")["taskToken"] + conn.record_activity_task_heartbeat( + activity_token, details="some progress details") with freeze_time("2015-01-01 12:05:30"): # => Activity Task Heartbeat timeout reached!! - resp = conn.get_workflow_execution_history("test-domain", conn.run_id, "uid-abcd1234") + resp = conn.get_workflow_execution_history( + "test-domain", conn.run_id, "uid-abcd1234") resp["events"][-2]["eventType"].should.equal("ActivityTaskTimedOut") attrs = resp["events"][-2]["activityTaskTimedOutEventAttributes"] attrs["details"].should.equal("some progress details") diff --git a/tests/test_swf/responses/test_activity_types.py b/tests/test_swf/responses/test_activity_types.py index 20c44dc5f..b283d3448 100644 --- a/tests/test_swf/responses/test_activity_types.py +++ b/tests/test_swf/responses/test_activity_types.py @@ -48,8 +48,10 @@ def test_list_activity_types(): conn.register_activity_type("test-domain", "c-test-activity", "v1.0") all_activity_types = conn.list_activity_types("test-domain", "REGISTERED") - names = [activity_type["activityType"]["name"] for activity_type in all_activity_types["typeInfos"]] - names.should.equal(["a-test-activity", "b-test-activity", "c-test-activity"]) + names = [activity_type["activityType"]["name"] + for activity_type in all_activity_types["typeInfos"]] + names.should.equal( + ["a-test-activity", "b-test-activity", "c-test-activity"]) @mock_swf_deprecated @@ -62,8 +64,10 @@ def test_list_activity_types_reverse_order(): all_activity_types = conn.list_activity_types("test-domain", "REGISTERED", reverse_order=True) - names = [activity_type["activityType"]["name"] for activity_type in all_activity_types["typeInfos"]] - names.should.equal(["c-test-activity", "b-test-activity", "a-test-activity"]) + names = [activity_type["activityType"]["name"] + for activity_type in all_activity_types["typeInfos"]] + names.should.equal( + ["c-test-activity", "b-test-activity", "a-test-activity"]) # DeprecateActivityType endpoint @@ -110,7 +114,8 @@ def test_describe_activity_type(): conn.register_activity_type("test-domain", "test-activity", "v1.0", task_list="foo", default_task_heartbeat_timeout="32") - actype = conn.describe_activity_type("test-domain", "test-activity", "v1.0") + actype = conn.describe_activity_type( + "test-domain", "test-activity", "v1.0") actype["configuration"]["defaultTaskList"]["name"].should.equal("foo") infos = actype["typeInfo"] infos["activityType"]["name"].should.equal("test-activity") diff --git a/tests/test_swf/responses/test_decision_tasks.py b/tests/test_swf/responses/test_decision_tasks.py index b552723cb..466e1a2ae 100644 --- a/tests/test_swf/responses/test_decision_tasks.py +++ b/tests/test_swf/responses/test_decision_tasks.py @@ -12,15 +12,19 @@ from ..utils import setup_workflow def test_poll_for_decision_task_when_one(): conn = setup_workflow() - resp = conn.get_workflow_execution_history("test-domain", conn.run_id, "uid-abcd1234") + resp = conn.get_workflow_execution_history( + "test-domain", conn.run_id, "uid-abcd1234") types = [evt["eventType"] for evt in resp["events"]] types.should.equal(["WorkflowExecutionStarted", "DecisionTaskScheduled"]) - resp = conn.poll_for_decision_task("test-domain", "queue", identity="srv01") + resp = conn.poll_for_decision_task( + "test-domain", "queue", identity="srv01") types = [evt["eventType"] for evt in resp["events"]] - types.should.equal(["WorkflowExecutionStarted", "DecisionTaskScheduled", "DecisionTaskStarted"]) + types.should.equal(["WorkflowExecutionStarted", + "DecisionTaskScheduled", "DecisionTaskStarted"]) - resp["events"][-1]["decisionTaskStartedEventAttributes"]["identity"].should.equal("srv01") + resp[ + "events"][-1]["decisionTaskStartedEventAttributes"]["identity"].should.equal("srv01") @mock_swf_deprecated @@ -44,9 +48,11 @@ def test_poll_for_decision_task_on_non_existent_queue(): @mock_swf_deprecated def test_poll_for_decision_task_with_reverse_order(): conn = setup_workflow() - resp = conn.poll_for_decision_task("test-domain", "queue", reverse_order=True) + resp = conn.poll_for_decision_task( + "test-domain", "queue", reverse_order=True) types = [evt["eventType"] for evt in resp["events"]] - types.should.equal(["DecisionTaskStarted", "DecisionTaskScheduled", "WorkflowExecutionStarted"]) + types.should.equal( + ["DecisionTaskStarted", "DecisionTaskScheduled", "WorkflowExecutionStarted"]) # CountPendingDecisionTasks endpoint @@ -89,7 +95,8 @@ def test_respond_decision_task_completed_with_no_decision(): ) resp.should.be.none - resp = conn.get_workflow_execution_history("test-domain", conn.run_id, "uid-abcd1234") + resp = conn.get_workflow_execution_history( + "test-domain", conn.run_id, "uid-abcd1234") types = [evt["eventType"] for evt in resp["events"]] types.should.equal([ "WorkflowExecutionStarted", @@ -104,7 +111,8 @@ def test_respond_decision_task_completed_with_no_decision(): "startedEventId": 3, }) - resp = conn.describe_workflow_execution("test-domain", conn.run_id, "uid-abcd1234") + resp = conn.describe_workflow_execution( + "test-domain", conn.run_id, "uid-abcd1234") resp["latestExecutionContext"].should.equal("free-form context") @@ -123,7 +131,8 @@ def test_respond_decision_task_completed_on_close_workflow_execution(): resp = conn.poll_for_decision_task("test-domain", "queue") task_token = resp["taskToken"] - # bad: we're closing workflow execution manually, but endpoints are not coded for now.. + # bad: we're closing workflow execution manually, but endpoints are not + # coded for now.. wfe = swf_backend.domains[0].workflow_executions[-1] wfe.execution_status = "CLOSED" # /bad @@ -155,10 +164,12 @@ def test_respond_decision_task_completed_with_complete_workflow_execution(): "decisionType": "CompleteWorkflowExecution", "completeWorkflowExecutionDecisionAttributes": {"result": "foo bar"} }] - resp = conn.respond_decision_task_completed(task_token, decisions=decisions) + resp = conn.respond_decision_task_completed( + task_token, decisions=decisions) resp.should.be.none - resp = conn.get_workflow_execution_history("test-domain", conn.run_id, "uid-abcd1234") + resp = conn.get_workflow_execution_history( + "test-domain", conn.run_id, "uid-abcd1234") types = [evt["eventType"] for evt in resp["events"]] types.should.equal([ "WorkflowExecutionStarted", @@ -167,7 +178,8 @@ def test_respond_decision_task_completed_with_complete_workflow_execution(): "DecisionTaskCompleted", "WorkflowExecutionCompleted", ]) - resp["events"][-1]["workflowExecutionCompletedEventAttributes"]["result"].should.equal("foo bar") + resp["events"][-1]["workflowExecutionCompletedEventAttributes"][ + "result"].should.equal("foo bar") @mock_swf_deprecated @@ -255,10 +267,12 @@ def test_respond_decision_task_completed_with_fail_workflow_execution(): "decisionType": "FailWorkflowExecution", "failWorkflowExecutionDecisionAttributes": {"reason": "my rules", "details": "foo"} }] - resp = conn.respond_decision_task_completed(task_token, decisions=decisions) + resp = conn.respond_decision_task_completed( + task_token, decisions=decisions) resp.should.be.none - resp = conn.get_workflow_execution_history("test-domain", conn.run_id, "uid-abcd1234") + resp = conn.get_workflow_execution_history( + "test-domain", conn.run_id, "uid-abcd1234") types = [evt["eventType"] for evt in resp["events"]] types.should.equal([ "WorkflowExecutionStarted", @@ -294,10 +308,12 @@ def test_respond_decision_task_completed_with_schedule_activity_task(): }, } }] - resp = conn.respond_decision_task_completed(task_token, decisions=decisions) + resp = conn.respond_decision_task_completed( + task_token, decisions=decisions) resp.should.be.none - resp = conn.get_workflow_execution_history("test-domain", conn.run_id, "uid-abcd1234") + resp = conn.get_workflow_execution_history( + "test-domain", conn.run_id, "uid-abcd1234") types = [evt["eventType"] for evt in resp["events"]] types.should.equal([ "WorkflowExecutionStarted", @@ -320,5 +336,6 @@ def test_respond_decision_task_completed_with_schedule_activity_task(): }, }) - resp = conn.describe_workflow_execution("test-domain", conn.run_id, "uid-abcd1234") + resp = conn.describe_workflow_execution( + "test-domain", conn.run_id, "uid-abcd1234") resp["latestActivityTaskTimestamp"].should.equal(1420113600.0) diff --git a/tests/test_swf/responses/test_domains.py b/tests/test_swf/responses/test_domains.py index 1f785095c..3fa12d665 100644 --- a/tests/test_swf/responses/test_domains.py +++ b/tests/test_swf/responses/test_domains.py @@ -102,7 +102,8 @@ def test_describe_domain(): conn.register_domain("test-domain", "60", description="A test domain") domain = conn.describe_domain("test-domain") - domain["configuration"]["workflowExecutionRetentionPeriodInDays"].should.equal("60") + domain["configuration"][ + "workflowExecutionRetentionPeriodInDays"].should.equal("60") domain["domainInfo"]["description"].should.equal("A test domain") domain["domainInfo"]["name"].should.equal("test-domain") domain["domainInfo"]["status"].should.equal("REGISTERED") diff --git a/tests/test_swf/responses/test_timeouts.py b/tests/test_swf/responses/test_timeouts.py index 726410e76..5bd0ead96 100644 --- a/tests/test_swf/responses/test_timeouts.py +++ b/tests/test_swf/responses/test_timeouts.py @@ -11,19 +11,23 @@ from ..utils import setup_workflow, SCHEDULE_ACTIVITY_TASK_DECISION def test_activity_task_heartbeat_timeout(): with freeze_time("2015-01-01 12:00:00"): conn = setup_workflow() - decision_token = conn.poll_for_decision_task("test-domain", "queue")["taskToken"] + decision_token = conn.poll_for_decision_task( + "test-domain", "queue")["taskToken"] conn.respond_decision_task_completed(decision_token, decisions=[ SCHEDULE_ACTIVITY_TASK_DECISION ]) - conn.poll_for_activity_task("test-domain", "activity-task-list", identity="surprise") + conn.poll_for_activity_task( + "test-domain", "activity-task-list", identity="surprise") with freeze_time("2015-01-01 12:04:30"): - resp = conn.get_workflow_execution_history("test-domain", conn.run_id, "uid-abcd1234") + resp = conn.get_workflow_execution_history( + "test-domain", conn.run_id, "uid-abcd1234") resp["events"][-1]["eventType"].should.equal("ActivityTaskStarted") with freeze_time("2015-01-01 12:05:30"): # => Activity Task Heartbeat timeout reached!! - resp = conn.get_workflow_execution_history("test-domain", conn.run_id, "uid-abcd1234") + resp = conn.get_workflow_execution_history( + "test-domain", conn.run_id, "uid-abcd1234") resp["events"][-2]["eventType"].should.equal("ActivityTaskTimedOut") attrs = resp["events"][-2]["activityTaskTimedOutEventAttributes"] @@ -44,7 +48,8 @@ def test_decision_task_start_to_close_timeout(): conn.poll_for_decision_task("test-domain", "queue")["taskToken"] with freeze_time("2015-01-01 12:04:30"): - resp = conn.get_workflow_execution_history("test-domain", conn.run_id, "uid-abcd1234") + resp = conn.get_workflow_execution_history( + "test-domain", conn.run_id, "uid-abcd1234") event_types = [evt["eventType"] for evt in resp["events"]] event_types.should.equal( @@ -53,7 +58,8 @@ def test_decision_task_start_to_close_timeout(): with freeze_time("2015-01-01 12:05:30"): # => Decision Task Start to Close timeout reached!! - resp = conn.get_workflow_execution_history("test-domain", conn.run_id, "uid-abcd1234") + resp = conn.get_workflow_execution_history( + "test-domain", conn.run_id, "uid-abcd1234") event_types = [evt["eventType"] for evt in resp["events"]] event_types.should.equal( @@ -77,7 +83,8 @@ def test_workflow_execution_start_to_close_timeout(): conn = setup_workflow() with freeze_time("2015-01-01 13:59:30"): - resp = conn.get_workflow_execution_history("test-domain", conn.run_id, "uid-abcd1234") + resp = conn.get_workflow_execution_history( + "test-domain", conn.run_id, "uid-abcd1234") event_types = [evt["eventType"] for evt in resp["events"]] event_types.should.equal( @@ -86,11 +93,13 @@ def test_workflow_execution_start_to_close_timeout(): with freeze_time("2015-01-01 14:00:30"): # => Workflow Execution Start to Close timeout reached!! - resp = conn.get_workflow_execution_history("test-domain", conn.run_id, "uid-abcd1234") + resp = conn.get_workflow_execution_history( + "test-domain", conn.run_id, "uid-abcd1234") event_types = [evt["eventType"] for evt in resp["events"]] event_types.should.equal( - ["WorkflowExecutionStarted", "DecisionTaskScheduled", "WorkflowExecutionTimedOut"] + ["WorkflowExecutionStarted", "DecisionTaskScheduled", + "WorkflowExecutionTimedOut"] ) attrs = resp["events"][-1]["workflowExecutionTimedOutEventAttributes"] attrs.should.equal({ diff --git a/tests/test_swf/responses/test_workflow_executions.py b/tests/test_swf/responses/test_workflow_executions.py index d5dc44a38..5c97c778b 100644 --- a/tests/test_swf/responses/test_workflow_executions.py +++ b/tests/test_swf/responses/test_workflow_executions.py @@ -30,14 +30,16 @@ def setup_swf_environment(): def test_start_workflow_execution(): conn = setup_swf_environment() - wf = conn.start_workflow_execution("test-domain", "uid-abcd1234", "test-workflow", "v1.0") + wf = conn.start_workflow_execution( + "test-domain", "uid-abcd1234", "test-workflow", "v1.0") wf.should.contain("runId") @mock_swf_deprecated def test_start_already_started_workflow_execution(): conn = setup_swf_environment() - conn.start_workflow_execution("test-domain", "uid-abcd1234", "test-workflow", "v1.0") + conn.start_workflow_execution( + "test-domain", "uid-abcd1234", "test-workflow", "v1.0") conn.start_workflow_execution.when.called_with( "test-domain", "uid-abcd1234", "test-workflow", "v1.0" @@ -58,11 +60,14 @@ def test_start_workflow_execution_on_deprecated_type(): @mock_swf_deprecated def test_describe_workflow_execution(): conn = setup_swf_environment() - hsh = conn.start_workflow_execution("test-domain", "uid-abcd1234", "test-workflow", "v1.0") + hsh = conn.start_workflow_execution( + "test-domain", "uid-abcd1234", "test-workflow", "v1.0") run_id = hsh["runId"] - wfe = conn.describe_workflow_execution("test-domain", run_id, "uid-abcd1234") - wfe["executionInfo"]["execution"]["workflowId"].should.equal("uid-abcd1234") + wfe = conn.describe_workflow_execution( + "test-domain", run_id, "uid-abcd1234") + wfe["executionInfo"]["execution"][ + "workflowId"].should.equal("uid-abcd1234") wfe["executionInfo"]["executionStatus"].should.equal("OPEN") @@ -79,10 +84,12 @@ def test_describe_non_existent_workflow_execution(): @mock_swf_deprecated def test_get_workflow_execution_history(): conn = setup_swf_environment() - hsh = conn.start_workflow_execution("test-domain", "uid-abcd1234", "test-workflow", "v1.0") + hsh = conn.start_workflow_execution( + "test-domain", "uid-abcd1234", "test-workflow", "v1.0") run_id = hsh["runId"] - resp = conn.get_workflow_execution_history("test-domain", run_id, "uid-abcd1234") + resp = conn.get_workflow_execution_history( + "test-domain", run_id, "uid-abcd1234") types = [evt["eventType"] for evt in resp["events"]] types.should.equal(["WorkflowExecutionStarted", "DecisionTaskScheduled"]) @@ -90,7 +97,8 @@ def test_get_workflow_execution_history(): @mock_swf_deprecated def test_get_workflow_execution_history_with_reverse_order(): conn = setup_swf_environment() - hsh = conn.start_workflow_execution("test-domain", "uid-abcd1234", "test-workflow", "v1.0") + hsh = conn.start_workflow_execution( + "test-domain", "uid-abcd1234", "test-workflow", "v1.0") run_id = hsh["runId"] resp = conn.get_workflow_execution_history("test-domain", run_id, "uid-abcd1234", @@ -191,7 +199,8 @@ def test_terminate_workflow_execution(): run_id=run_id) resp.should.be.none - resp = conn.get_workflow_execution_history("test-domain", run_id, "uid-abcd1234") + resp = conn.get_workflow_execution_history( + "test-domain", run_id, "uid-abcd1234") evt = resp["events"][-1] evt["eventType"].should.equal("WorkflowExecutionTerminated") attrs = evt["workflowExecutionTerminatedEventAttributes"] diff --git a/tests/test_swf/responses/test_workflow_types.py b/tests/test_swf/responses/test_workflow_types.py index 1e838c2ee..9e097a873 100644 --- a/tests/test_swf/responses/test_workflow_types.py +++ b/tests/test_swf/responses/test_workflow_types.py @@ -49,8 +49,10 @@ def test_list_workflow_types(): conn.register_workflow_type("test-domain", "c-test-workflow", "v1.0") all_workflow_types = conn.list_workflow_types("test-domain", "REGISTERED") - names = [activity_type["workflowType"]["name"] for activity_type in all_workflow_types["typeInfos"]] - names.should.equal(["a-test-workflow", "b-test-workflow", "c-test-workflow"]) + names = [activity_type["workflowType"]["name"] + for activity_type in all_workflow_types["typeInfos"]] + names.should.equal( + ["a-test-workflow", "b-test-workflow", "c-test-workflow"]) @mock_swf_deprecated @@ -63,8 +65,10 @@ def test_list_workflow_types_reverse_order(): all_workflow_types = conn.list_workflow_types("test-domain", "REGISTERED", reverse_order=True) - names = [activity_type["workflowType"]["name"] for activity_type in all_workflow_types["typeInfos"]] - names.should.equal(["c-test-workflow", "b-test-workflow", "a-test-workflow"]) + names = [activity_type["workflowType"]["name"] + for activity_type in all_workflow_types["typeInfos"]] + names.should.equal( + ["c-test-workflow", "b-test-workflow", "a-test-workflow"]) # DeprecateWorkflowType endpoint @@ -111,10 +115,12 @@ def test_describe_workflow_type(): conn.register_workflow_type("test-domain", "test-workflow", "v1.0", task_list="foo", default_child_policy="TERMINATE") - actype = conn.describe_workflow_type("test-domain", "test-workflow", "v1.0") + actype = conn.describe_workflow_type( + "test-domain", "test-workflow", "v1.0") actype["configuration"]["defaultTaskList"]["name"].should.equal("foo") actype["configuration"]["defaultChildPolicy"].should.equal("TERMINATE") - actype["configuration"].keys().should_not.contain("defaultTaskStartToCloseTimeout") + actype["configuration"].keys().should_not.contain( + "defaultTaskStartToCloseTimeout") infos = actype["typeInfo"] infos["workflowType"]["name"].should.equal("test-workflow") infos["workflowType"]["version"].should.equal("v1.0") diff --git a/tests/test_swf/utils.py b/tests/test_swf/utils.py index 756d17c27..2197b71df 100644 --- a/tests/test_swf/utils.py +++ b/tests/test_swf/utils.py @@ -29,7 +29,8 @@ SCHEDULE_ACTIVITY_TASK_DECISION = { } } for key, value in ACTIVITY_TASK_TIMEOUTS.items(): - SCHEDULE_ACTIVITY_TASK_DECISION["scheduleActivityTaskDecisionAttributes"][key] = value + SCHEDULE_ACTIVITY_TASK_DECISION[ + "scheduleActivityTaskDecisionAttributes"][key] = value # A test Domain @@ -86,7 +87,8 @@ def setup_workflow(): default_task_schedule_to_start_timeout="600", default_task_start_to_close_timeout="600", ) - wfe = conn.start_workflow_execution("test-domain", "uid-abcd1234", "test-workflow", "v1.0") + wfe = conn.start_workflow_execution( + "test-domain", "uid-abcd1234", "test-workflow", "v1.0") conn.run_id = wfe["runId"] return conn