From d0a66f2dffaabfb50595b357aa70a74c19f44fc9 Mon Sep 17 00:00:00 2001 From: Justin Wiley Date: Tue, 29 Nov 2016 17:19:26 -0800 Subject: [PATCH 001/213] Begin work on mocking CloudWatch Events. --- moto/events/__init__.py | 5 ++++ moto/events/models.py | 58 +++++++++++++++++++++++++++++++++++++++ moto/events/responses.py | 59 ++++++++++++++++++++++++++++++++++++++++ moto/events/urls.py | 11 ++++++++ 4 files changed, 133 insertions(+) create mode 100644 moto/events/__init__.py create mode 100644 moto/events/models.py create mode 100644 moto/events/responses.py create mode 100644 moto/events/urls.py diff --git a/moto/events/__init__.py b/moto/events/__init__.py new file mode 100644 index 000000000..8b15e852a --- /dev/null +++ b/moto/events/__init__.py @@ -0,0 +1,5 @@ +from __future__ import unicode_literals + +from .models import events_backend + +mock_events = events_backend.decorator diff --git a/moto/events/models.py b/moto/events/models.py new file mode 100644 index 000000000..f732a55a5 --- /dev/null +++ b/moto/events/models.py @@ -0,0 +1,58 @@ +from moto.core import BaseBackend + + +class EventsBackend(BaseBackend): + + def __init__(self): + self.events = {} + self.rules = {} + + def can_paginate(self): + pass + + def delete_rule(self): + pass + + def describe_rule(self, name): + event = self.events['name'] + + def disable_rule(self): + pass + + def enable_rule(self): + pass + + def generate_presigned_url(self): + pass + + def get_paginator(self): + pass + + def get_waiter(self): + pass + + def list_rule_names_by_target(self): + pass + + def list_rules(self): + pass + + def list_targets_by_rule(self): + pass + + def put_events(self): + pass + + def put_rule(self, name, **kwargs): + pass + + def put_targets(self): + pass + + def remove_targets(self): + pass + + def test_event_pattern(self): + pass + +events_backend = EventsBackend() diff --git a/moto/events/responses.py b/moto/events/responses.py new file mode 100644 index 000000000..7be87d03d --- /dev/null +++ b/moto/events/responses.py @@ -0,0 +1,59 @@ +import json + +from moto.core.responses import BaseResponse + + +class EventsHandler(BaseResponse): + + def error(self, type_, status=400): + return status, self.response_headers, json.dumps({'__type': type_}) + + def can_paginate(self): + pass + + def delete_rule(self): + pass + + def describe_rule(self): + pass + + def disable_rule(self): + pass + + def enable_rule(self): + pass + + def generate_presigned_url(self): + pass + + def get_paginator(self): + pass + + def get_waiter(self): + pass + + def list_rule_names_by_target(self): + pass + + def list_rules(self): + pass + + def list_targets_by_rule(self): + pass + + def put_events(self): + pass + + def put_rule(self): + if 'Name' not in self.body: + return self.error("com.amazonaws.events.validate#ValidationException") + pass + + def put_targets(self): + pass + + def remove_targets(self): + pass + + def test_event_pattern(self): + pass diff --git a/moto/events/urls.py b/moto/events/urls.py new file mode 100644 index 000000000..9484e385e --- /dev/null +++ b/moto/events/urls.py @@ -0,0 +1,11 @@ +from __future__ import unicode_literals + +from .responses import EventsHandler + +url_bases = [ + "https?://events.(.+).amazonaws.com" +] + +url_paths = { + "{0}/": EventsHandler.dispatch, +} \ No newline at end of file From d0def03c4ccfe0901c039d0bc222533959aee4d8 Mon Sep 17 00:00:00 2001 From: Justin Wiley Date: Wed, 30 Nov 2016 17:09:58 -0800 Subject: [PATCH 002/213] Events models first draft done, need to write tests, then get responses going. --- moto/events/models.py | 172 +++++++++++++++++++++++++++++++++------ moto/events/responses.py | 6 +- 2 files changed, 148 insertions(+), 30 deletions(-) diff --git a/moto/events/models.py b/moto/events/models.py index f732a55a5..5a10c1658 100644 --- a/moto/events/models.py +++ b/moto/events/models.py @@ -1,58 +1,176 @@ +import binascii +import os +import re +from collections import OrderedDict + from moto.core import BaseBackend +class Rule(object): + + def _generate_arn(self, name): + return 'arn:aws:events:us-west-2:111111111111:rule/' + name + + def __init__(self, name, **kwargs): + self.name = name + self.arn = kwargs['arn'] if 'arn' in kwargs else self._generate_arn(name) + self.event_pattern = kwargs['event_pattern'] if 'event_pattern' in kwargs else None + self.schedule_exp = kwargs['schedule_exp'] if 'schedule_exp' in kwargs else None + self.state = kwargs['state'] if 'state' in kwargs else 'ENABLED' + self.description = kwargs['description'] if 'description' in kwargs else None + self.role_arn = kwargs['role_arn'] if 'role_arn' in kwargs else None + self.targets = {} + + def enable(self): + self.state = 'ENABLED' + + def disable(self): + self.state = 'DISABLED' + + def put_targets(self, targets): + # TODO: Will need to test for valid ARNs. + for target in targets: + self.targets[target['TargetId']] = target + + def remove_targets(self, ids): + for target in ids: + if target in self.targets: + self.targets.pop(target) + + class EventsBackend(BaseBackend): def __init__(self): - self.events = {} - self.rules = {} + self.rules = OrderedDict() + self.next_tokens = {} - def can_paginate(self): - pass + def _gen_next_token(self, index): + token = binascii.hexlify(os.urandom(16)) + self.next_tokens[token] = index + return token - def delete_rule(self): - pass + def _process_token_and_limits(self, array_len, next_token=None, limit=None): + start_index = 0 + end_index = array_len + new_next_token = None + + if next_token is not None: + if next_token in self.next_tokens: + start_index = self.next_tokens[next_token] + + if limit is not None: + new_end_index = start_index + int(limit) + if new_end_index < end_index: + end_index = new_end_index + new_next_token = self._gen_next_token(end_index - 1) + + return start_index, end_index, new_next_token + + def delete_rule(self, name): + return self.rules.pop(name) is not None def describe_rule(self, name): - event = self.events['name'] + if name in self.rules: + return self.rules[name] - def disable_rule(self): - pass + return None - def enable_rule(self): - pass + def disable_rule(self, name): + if name in self.rules: + self.rules[name].disable() + return True + + return False + + def enable_rule(self, name): + if name in self.rules: + self.rules[name].enable() + return True + + return False def generate_presigned_url(self): pass - def get_paginator(self): - pass + def list_rule_names_by_target(self, target_arn, next_token=None, limit=None): + rules_array = self.rules.values() - def get_waiter(self): - pass + matching_rules = [] + return_obj = {} - def list_rule_names_by_target(self): - pass + start_index, end_index, new_next_token = self._process_token_and_limits(len(rules_array), next_token, limit) - def list_rules(self): - pass + for i in range(start_index, end_index): + rule = rules_array[i] + if target_arn in rule.targets: + matching_rules.append(rule.name) - def list_targets_by_rule(self): - pass + return_obj['RuleNames'] = matching_rules + if new_next_token is not None: + return_obj['NextToken'] = new_next_token + + return return_obj + + def list_rules(self, prefix=None, next_token=None, limit=None): + rules_array = self.rules.values() + + match_string = '.*' + if prefix is not None: + match_string = '^' + prefix + match_string + + match_regex = re.compile(match_string) + + matching_rules = [] + return_obj = {} + + start_index, end_index, new_next_token = self._process_token_and_limits(len(rules_array), next_token, limit) + + for i in range(start_index, end_index): + rule = rules_array[i] + if match_regex.match(rule.name): + matching_rules.append(rule) + + return_obj['Rules'] = matching_rules + if new_next_token is not None: + return_obj['NextToken'] = new_next_token + + return return_obj + + def list_targets_by_rule(self, rule, next_token=None, limit=None): + # We'll let a KeyError exception be thrown for response to handle if rule doesn't exist. + targets = self.rules[rule].targets.values() + + start_index, end_index, new_next_token = self._process_token_and_limits(len(targets), next_token, limit) + + returned_targets = [] + return_obj = {} + + for i in range(start_index, end_index): + returned_targets.append(targets[i]) + + return_obj['Targets'] = returned_targets + if new_next_token is not None: + return_obj['NextToken'] = new_next_token + + return return_obj def put_events(self): + # For the purposes of this mock, there is no backend action for putting an event. + # Response module will deal with replying. pass def put_rule(self, name, **kwargs): - pass + rule = Rule(name, **kwargs) + self.rules[rule.name] = rule + return rule.arn - def put_targets(self): - pass + def put_targets(self, name, targets): + self.rules[name].put_targets(targets) - def remove_targets(self): - pass + def remove_targets(self, name, ids): + self.rules[name].remove_targets(ids) def test_event_pattern(self): - pass + raise NotImplementedError() events_backend = EventsBackend() diff --git a/moto/events/responses.py b/moto/events/responses.py index 7be87d03d..8099f5c50 100644 --- a/moto/events/responses.py +++ b/moto/events/responses.py @@ -5,8 +5,8 @@ from moto.core.responses import BaseResponse class EventsHandler(BaseResponse): - def error(self, type_, status=400): - return status, self.response_headers, json.dumps({'__type': type_}) + def error(self, type_, message='', status=400): + return status, self.response_headers, json.dumps({'__type': type_, 'message': message}) def can_paginate(self): pass @@ -46,7 +46,7 @@ class EventsHandler(BaseResponse): def put_rule(self): if 'Name' not in self.body: - return self.error("com.amazonaws.events.validate#ValidationException") + return self.error('ValidationException', 'Parameter Name is required.') pass def put_targets(self): From db0b494b4f5f9ddeb1c623ce4fcd3512fc488978 Mon Sep 17 00:00:00 2001 From: Justin Wiley Date: Thu, 1 Dec 2016 17:23:51 -0800 Subject: [PATCH 003/213] Completed the CloudWatch Events mocking module and tests. --- moto/backends.py | 9 +- moto/events/models.py | 52 +++++---- moto/events/responses.py | 180 +++++++++++++++++++++++++++---- moto/events/urls.py | 2 +- tests/test_events/test_events.py | 173 +++++++++++++++++++++++++++++ 5 files changed, 368 insertions(+), 48 deletions(-) create mode 100644 tests/test_events/test_events.py diff --git a/moto/backends.py b/moto/backends.py index d1262a7cb..0cbcf4810 100644 --- a/moto/backends.py +++ b/moto/backends.py @@ -1,28 +1,30 @@ from __future__ import unicode_literals + from moto.apigateway import apigateway_backend from moto.autoscaling import autoscaling_backend from moto.awslambda import lambda_backend -from moto.cloudwatch import cloudwatch_backend from moto.cloudformation import cloudformation_backend +from moto.cloudwatch import cloudwatch_backend from moto.datapipeline import datapipeline_backend from moto.dynamodb import dynamodb_backend from moto.dynamodb2 import dynamodb_backend2 from moto.ec2 import ec2_backend from moto.elb import elb_backend from moto.emr import emr_backend +from moto.events import events_backend from moto.glacier import glacier_backend from moto.iam import iam_backend -from moto.opsworks import opsworks_backend from moto.kinesis import kinesis_backend from moto.kms import kms_backend +from moto.opsworks import opsworks_backend from moto.rds import rds_backend from moto.redshift import redshift_backend +from moto.route53 import route53_backend from moto.s3 import s3_backend from moto.ses import ses_backend from moto.sns import sns_backend from moto.sqs import sqs_backend from moto.sts import sts_backend -from moto.route53 import route53_backend BACKENDS = { 'apigateway': apigateway_backend, @@ -34,6 +36,7 @@ BACKENDS = { 'dynamodb2': dynamodb_backend2, 'ec2': ec2_backend, 'elb': elb_backend, + 'events': events_backend, 'emr': emr_backend, 'glacier': glacier_backend, 'iam': iam_backend, diff --git a/moto/events/models.py b/moto/events/models.py index 5a10c1658..6063a8b7d 100644 --- a/moto/events/models.py +++ b/moto/events/models.py @@ -1,4 +1,3 @@ -import binascii import os import re from collections import OrderedDict @@ -13,12 +12,12 @@ class Rule(object): def __init__(self, name, **kwargs): self.name = name - self.arn = kwargs['arn'] if 'arn' in kwargs else self._generate_arn(name) - self.event_pattern = kwargs['event_pattern'] if 'event_pattern' in kwargs else None - self.schedule_exp = kwargs['schedule_exp'] if 'schedule_exp' in kwargs else None - self.state = kwargs['state'] if 'state' in kwargs else 'ENABLED' - self.description = kwargs['description'] if 'description' in kwargs else None - self.role_arn = kwargs['role_arn'] if 'role_arn' in kwargs else None + self.arn = kwargs.get('Arn') or self._generate_arn(name) + self.event_pattern = kwargs.get('EventPattern') + self.schedule_exp = kwargs.get('ScheduleExpression') + self.state = kwargs.get('State') or 'ENABLED' + self.description = kwargs.get('Description') + self.role_arn = kwargs.get('RoleArn') self.targets = {} def enable(self): @@ -28,9 +27,9 @@ class Rule(object): self.state = 'DISABLED' def put_targets(self, targets): - # TODO: Will need to test for valid ARNs. + # Not testing for valid ARNs. for target in targets: - self.targets[target['TargetId']] = target + self.targets[target['Id']] = target def remove_targets(self, ids): for target in ids: @@ -45,7 +44,7 @@ class EventsBackend(BaseBackend): self.next_tokens = {} def _gen_next_token(self, index): - token = binascii.hexlify(os.urandom(16)) + token = os.urandom(128).encode('base64') self.next_tokens[token] = index return token @@ -54,15 +53,14 @@ class EventsBackend(BaseBackend): end_index = array_len new_next_token = None - if next_token is not None: - if next_token in self.next_tokens: - start_index = self.next_tokens[next_token] + if next_token: + start_index = self.next_tokens.pop(next_token, 0) if limit is not None: new_end_index = start_index + int(limit) if new_end_index < end_index: end_index = new_end_index - new_next_token = self._gen_next_token(end_index - 1) + new_next_token = self._gen_next_token(end_index) return start_index, end_index, new_next_token @@ -70,10 +68,7 @@ class EventsBackend(BaseBackend): return self.rules.pop(name) is not None def describe_rule(self, name): - if name in self.rules: - return self.rules[name] - - return None + return self.rules.get(name) def disable_rule(self, name): if name in self.rules: @@ -102,8 +97,9 @@ class EventsBackend(BaseBackend): for i in range(start_index, end_index): rule = rules_array[i] - if target_arn in rule.targets: - matching_rules.append(rule.name) + for target in rule.targets: + if rule.targets[target]['Arn'] == target_arn: + matching_rules.append(rule.name) return_obj['RuleNames'] = matching_rules if new_next_token is not None: @@ -165,10 +161,22 @@ class EventsBackend(BaseBackend): return rule.arn def put_targets(self, name, targets): - self.rules[name].put_targets(targets) + rule = self.rules.get(name) + + if rule: + rule.put_targets(targets) + return True + + return False def remove_targets(self, name, ids): - self.rules[name].remove_targets(ids) + rule = self.rules.get(name) + + if rule: + rule.remove_targets(ids) + return True + + return False def test_event_pattern(self): raise NotImplementedError() diff --git a/moto/events/responses.py b/moto/events/responses.py index 8099f5c50..7d63388b7 100644 --- a/moto/events/responses.py +++ b/moto/events/responses.py @@ -1,59 +1,195 @@ import json +import re from moto.core.responses import BaseResponse +from moto.events import events_backend class EventsHandler(BaseResponse): - def error(self, type_, message='', status=400): - return status, self.response_headers, json.dumps({'__type': type_, 'message': message}) + def _generate_rule_dict(self, rule): + return { + 'Name': rule.name, + 'Arn': rule.arn, + 'EventPattern': rule.event_pattern, + 'State': rule.state, + 'Description': rule.description, + 'ScheduleExpression': rule.schedule_exp, + 'RoleArn': rule.role_arn + } - def can_paginate(self): - pass + def load_body(self): + decoded_body = self.body.decode('utf-8') + return json.loads(decoded_body or '{}') + + def error(self, type_, message='', status=400): + headers = self.response_headers + headers['status'] = status + return json.dumps({'__type': type_, 'message': message}), headers, def delete_rule(self): - pass + body = self.load_body() + name = body.get('NamePrefix') + + if not name: + return self.error('ValidationException', 'Parameter Name is required.') + + return '', self.response_headers def describe_rule(self): - pass + body = self.load_body() + name = body.get('Name') + + if not name: + return self.error('ValidationException', 'Parameter Name is required.') + + rule = events_backend.describe_rule(name) + + if not rule: + return self.error('ResourceNotFoundException', 'Rule test does not exist.') + + rule_dict = self._generate_rule_dict(rule) + return json.dumps(rule_dict), self.response_headers def disable_rule(self): - pass + body = self.load_body() + name = body.get('Name') + + if not name: + return self.error('ValidationException', 'Parameter Name is required.') + + if not events_backend.disable_rule(name): + return self.error('ResourceNotFoundException', 'Rule ' + name + ' does not exist.') + + return '', self.response_headers def enable_rule(self): - pass + body = self.load_body() + name = body.get('Name') + + if not name: + return self.error('ValidationException', 'Parameter Name is required.') + + if not events_backend.enable_rule(name): + return self.error('ResourceNotFoundException', 'Rule ' + name + ' does not exist.') + + return '', self.response_headers def generate_presigned_url(self): pass - def get_paginator(self): - pass - - def get_waiter(self): - pass - def list_rule_names_by_target(self): - pass + body = self.load_body() + target_arn = body.get('TargetArn') + next_token = body.get('NextToken') + limit = body.get('Limit') + + if not target_arn: + return self.error('ValidationException', 'Parameter TargetArn is required.') + + rule_names = events_backend.list_rule_names_by_target(target_arn, next_token, limit) + + return json.dumps(rule_names), self.response_headers def list_rules(self): - pass + body = self.load_body() + prefix = body.get('NamePrefix') + next_token = body.get('NextToken') + limit = body.get('Limit') + + rules = events_backend.list_rules(prefix, next_token, limit) + rules_obj = {'Rules': []} + + for rule in rules['Rules']: + rules_obj['Rules'].append(self._generate_rule_dict(rule)) + + if rules.get('NextToken'): + rules_obj['NextToken'] = rules['NextToken'] + + return json.dumps(rules_obj), self.response_headers def list_targets_by_rule(self): - pass + body = self.load_body() + rule_name = body.get('Rule') + next_token = body.get('NextToken') + limit = body.get('Limit') + + if not rule_name: + return self.error('ValidationException', 'Parameter Rule is required.') + + try: + targets = events_backend.list_targets_by_rule(rule_name, next_token, limit) + except KeyError: + return self.error('ResourceNotFoundException', 'Rule ' + rule_name + ' does not exist.') + + return json.dumps(targets), self.response_headers def put_events(self): - pass + return '', self.response_headers def put_rule(self): - if 'Name' not in self.body: + body = self.load_body() + name = body.get('Name') + event_pattern = body.get('EventPattern') + sched_exp = body.get('ScheduleExpression') + + if not name: return self.error('ValidationException', 'Parameter Name is required.') - pass + + if event_pattern: + try: + json.loads(event_pattern) + except ValueError: + # Not quite as informative as the real error, but it'll work for now. + return self.error('InvalidEventPatternException', 'Event pattern is not valid.') + + if sched_exp: + if not (re.match('^cron\(.*\)', sched_exp) or + re.match('^rate\(\d*\s(minute|minutes|hour|hours|day|days)\)', sched_exp)): + return self.error('ValidationException', 'Parameter ScheduleExpression is not valid.') + + rule_arn = events_backend.put_rule( + name, + ScheduleExpression=sched_exp, + EventPattern=event_pattern, + State=body.get('State'), + Description=body.get('Description'), + RoleArn=body.get('RoleArn') + ) + + return json.dumps({'RuleArn': rule_arn}), self.response_headers def put_targets(self): - pass + body = self.load_body() + rule_name = body.get('Rule') + targets = body.get('Targets') + + if not rule_name: + return self.error('ValidationException', 'Parameter Rule is required.') + + if not targets: + return self.error('ValidationException', 'Parameter Targets is required.') + + if not events_backend.put_targets(rule_name, targets): + return self.error('ResourceNotFoundException', 'Rule ' + rule_name + ' does not exist.') + + return '', self.response_headers def remove_targets(self): - pass + body = self.load_body() + rule_name = body.get('Rule') + ids = body.get('Ids') + + if not rule_name: + return self.error('ValidationException', 'Parameter Rule is required.') + + if not ids: + return self.error('ValidationException', 'Parameter Ids is required.') + + if not events_backend.remove_targets(rule_name, ids): + return self.error('ResourceNotFoundException', 'Rule ' + rule_name + ' does not exist.') + + return '', self.response_headers def test_event_pattern(self): pass diff --git a/moto/events/urls.py b/moto/events/urls.py index 9484e385e..c1ad554ff 100644 --- a/moto/events/urls.py +++ b/moto/events/urls.py @@ -3,7 +3,7 @@ from __future__ import unicode_literals from .responses import EventsHandler url_bases = [ - "https?://events.(.+).amazonaws.com" + "https://events.(.+).amazonaws.com" ] url_paths = { diff --git a/tests/test_events/test_events.py b/tests/test_events/test_events.py new file mode 100644 index 000000000..96d55bd0d --- /dev/null +++ b/tests/test_events/test_events.py @@ -0,0 +1,173 @@ +import random + +import boto3 + +from moto.events import mock_events + + +RULES = [ + {'Name': 'test1', 'ScheduleExpression': 'rate(5 minutes)'}, + {'Name': 'test2', 'ScheduleExpression': 'rate(1 minute)'}, + {'Name': 'test3', 'EventPattern': '{"source": ["test-source"]}'} +] + +TARGETS = { + 'test-target-1': { + 'Id': 'test-target-1', + 'Arn': 'arn:aws:lambda:us-west-2:111111111111:function:test-function-1', + 'Rules': ['test1', 'test2'] + }, + 'test-target-2': { + 'Id': 'test-target-2', + 'Arn': 'arn:aws:lambda:us-west-2:111111111111:function:test-function-2', + 'Rules': ['test1', 'test3'] + }, + 'test-target-3': { + 'Id': 'test-target-3', + 'Arn': 'arn:aws:lambda:us-west-2:111111111111:function:test-function-3', + 'Rules': ['test1', 'test2'] + }, + 'test-target-4': { + 'Id': 'test-target-4', + 'Arn': 'arn:aws:lambda:us-west-2:111111111111:function:test-function-4', + 'Rules': ['test1', 'test3'] + }, + 'test-target-5': { + 'Id': 'test-target-5', + 'Arn': 'arn:aws:lambda:us-west-2:111111111111:function:test-function-5', + 'Rules': ['test1', 'test2'] + }, + 'test-target-6': { + 'Id': 'test-target-6', + 'Arn': 'arn:aws:lambda:us-west-2:111111111111:function:test-function-6', + 'Rules': ['test1', 'test3'] + } +} + + +def get_random_rule(): + return RULES[random.randint(0, len(RULES) - 1)] + + +@mock_events +def generate_environment(): + client = boto3.client('events', 'us-west-2') + + for rule in RULES: + client.put_rule( + Name=rule['Name'], + ScheduleExpression=rule.get('ScheduleExpression', ''), + EventPattern=rule.get('EventPattern', '') + ) + + targets = [] + for target, target_attr in TARGETS.iteritems(): + if rule['Name'] in target_attr.get('Rules'): + targets.append({'Id': target, 'Arn': target_attr['Arn']}) + + client.put_targets(Rule=rule['Name'], Targets=targets) + + return client + + +@mock_events +def test_list_rules(): + client = generate_environment() + response = client.list_rules() + + assert(response is not None) + assert(len(response['Rules']) > 0) + + +@mock_events +def test_describe_rule(): + rule_name = get_random_rule()['Name'] + client = generate_environment() + response = client.describe_rule(Name=rule_name) + + assert(response is not None) + assert(response.get('Name') == rule_name) + assert(response.get('Arn') is not None) + + +@mock_events +def test_enable_disable_rule(): + rule_name = get_random_rule()['Name'] + client = generate_environment() + + # Rules should start out enabled in these tests. + rule = client.describe_rule(Name=rule_name) + assert(rule['State'] == 'ENABLED') + + client.disable_rule(Name=rule_name) + rule = client.describe_rule(Name=rule_name) + assert(rule['State'] == 'DISABLED') + + client.enable_rule(Name=rule_name) + rule = client.describe_rule(Name=rule_name) + assert(rule['State'] == 'ENABLED') + + +@mock_events +def test_list_rule_names_by_target(): + test_1_target = TARGETS['test-target-1'] + test_2_target = TARGETS['test-target-2'] + client = generate_environment() + + rules = client.list_rule_names_by_target(TargetArn=test_1_target['Arn']) + assert(len(rules) == len(test_1_target['Rules'])) + for rule in rules['RuleNames']: + assert(rule in test_1_target['Rules']) + + rules = client.list_rule_names_by_target(TargetArn=test_2_target['Arn']) + assert(len(rules) == len(test_2_target['Rules'])) + for rule in rules['RuleNames']: + assert(rule in test_2_target['Rules']) + + +@mock_events +def test_list_rules(): + client = generate_environment() + + rules = client.list_rules() + assert(len(rules['Rules']) == len(RULES)) + + +@mock_events +def test_list_targets_by_rule(): + rule_name = get_random_rule()['Name'] + client = generate_environment() + targets = client.list_targets_by_rule(Rule=rule_name) + + expected_targets = [] + for target, attrs in TARGETS.iteritems(): + if rule_name in attrs.get('Rules'): + expected_targets.append(target) + + assert(len(targets['Targets']) == len(expected_targets)) + + +@mock_events +def test_remove_targets(): + rule_name = get_random_rule()['Name'] + client = generate_environment() + + targets = client.list_targets_by_rule(Rule=rule_name)['Targets'] + targets_before = len(targets) + assert(targets_before > 0) + + client.remove_targets(Rule=rule_name, Ids=[targets[0]['Id']]) + + targets = client.list_targets_by_rule(Rule=rule_name)['Targets'] + targets_after = len(targets) + assert(targets_before - 1 == targets_after) + + +if __name__ == '__main__': + test_list_rules() + test_describe_rule() + test_enable_disable_rule() + test_list_rule_names_by_target() + test_list_rules() + test_list_targets_by_rule() + test_remove_targets() From 6c85a85e0d90c5e7ad0fdc1f7723c9fe2548466f Mon Sep 17 00:00:00 2001 From: Justin Wiley Date: Thu, 1 Dec 2016 19:10:59 -0800 Subject: [PATCH 004/213] Removed OrderedDicts for 2.6 and dict.iteritems() calls for 3.3+ compatibility. --- moto/events/models.py | 29 ++++++++++++----------------- tests/test_events/test_events.py | 10 +++++----- 2 files changed, 17 insertions(+), 22 deletions(-) diff --git a/moto/events/models.py b/moto/events/models.py index 6063a8b7d..12ee7ef02 100644 --- a/moto/events/models.py +++ b/moto/events/models.py @@ -40,9 +40,14 @@ class Rule(object): class EventsBackend(BaseBackend): def __init__(self): - self.rules = OrderedDict() + self.rules = {} + # This array tracks the order in which the rules have been added, since 2.6 doesn't have OrderedDicts + self.rules_order = [] self.next_tokens = {} + def _get_rule_by_index(self, i): + return self.rules.get(self.rules_order[i]) + def _gen_next_token(self, index): token = os.urandom(128).encode('base64') self.next_tokens[token] = index @@ -65,6 +70,7 @@ class EventsBackend(BaseBackend): return start_index, end_index, new_next_token def delete_rule(self, name): + self.rules_order.pop(self.rules_order.index(name)) return self.rules.pop(name) is not None def describe_rule(self, name): @@ -84,19 +90,14 @@ class EventsBackend(BaseBackend): return False - def generate_presigned_url(self): - pass - def list_rule_names_by_target(self, target_arn, next_token=None, limit=None): - rules_array = self.rules.values() - matching_rules = [] return_obj = {} - start_index, end_index, new_next_token = self._process_token_and_limits(len(rules_array), next_token, limit) + start_index, end_index, new_next_token = self._process_token_and_limits(len(self.rules), next_token, limit) for i in range(start_index, end_index): - rule = rules_array[i] + rule = self._get_rule_by_index(i) for target in rule.targets: if rule.targets[target]['Arn'] == target_arn: matching_rules.append(rule.name) @@ -108,8 +109,6 @@ class EventsBackend(BaseBackend): return return_obj def list_rules(self, prefix=None, next_token=None, limit=None): - rules_array = self.rules.values() - match_string = '.*' if prefix is not None: match_string = '^' + prefix + match_string @@ -119,10 +118,10 @@ class EventsBackend(BaseBackend): matching_rules = [] return_obj = {} - start_index, end_index, new_next_token = self._process_token_and_limits(len(rules_array), next_token, limit) + start_index, end_index, new_next_token = self._process_token_and_limits(len(self.rules), next_token, limit) for i in range(start_index, end_index): - rule = rules_array[i] + rule = self._get_rule_by_index(i) if match_regex.match(rule.name): matching_rules.append(rule) @@ -150,14 +149,10 @@ class EventsBackend(BaseBackend): return return_obj - def put_events(self): - # For the purposes of this mock, there is no backend action for putting an event. - # Response module will deal with replying. - pass - def put_rule(self, name, **kwargs): rule = Rule(name, **kwargs) self.rules[rule.name] = rule + self.rules_order.append(rule.name) return rule.arn def put_targets(self, name, targets): diff --git a/tests/test_events/test_events.py b/tests/test_events/test_events.py index 96d55bd0d..a2d5a5d47 100644 --- a/tests/test_events/test_events.py +++ b/tests/test_events/test_events.py @@ -61,9 +61,9 @@ def generate_environment(): ) targets = [] - for target, target_attr in TARGETS.iteritems(): - if rule['Name'] in target_attr.get('Rules'): - targets.append({'Id': target, 'Arn': target_attr['Arn']}) + for target in TARGETS: + if rule['Name'] in TARGETS[target].get('Rules'): + targets.append({'Id': target, 'Arn': TARGETS[target]['Arn']}) client.put_targets(Rule=rule['Name'], Targets=targets) @@ -140,8 +140,8 @@ def test_list_targets_by_rule(): targets = client.list_targets_by_rule(Rule=rule_name) expected_targets = [] - for target, attrs in TARGETS.iteritems(): - if rule_name in attrs.get('Rules'): + for target in TARGETS: + if rule_name in TARGETS[target].get('Rules'): expected_targets.append(target) assert(len(targets['Targets']) == len(expected_targets)) From 5d7a102e4c453a2b4eb64cb4ad617798017b9605 Mon Sep 17 00:00:00 2001 From: Justin Wiley Date: Thu, 1 Dec 2016 19:24:40 -0800 Subject: [PATCH 005/213] Stoopid import was still hanging around. --- moto/events/models.py | 1 - moto/events/urls.py | 2 +- 2 files changed, 1 insertion(+), 2 deletions(-) diff --git a/moto/events/models.py b/moto/events/models.py index 12ee7ef02..83386cf87 100644 --- a/moto/events/models.py +++ b/moto/events/models.py @@ -1,6 +1,5 @@ import os import re -from collections import OrderedDict from moto.core import BaseBackend diff --git a/moto/events/urls.py b/moto/events/urls.py index c1ad554ff..bff05da3f 100644 --- a/moto/events/urls.py +++ b/moto/events/urls.py @@ -8,4 +8,4 @@ url_bases = [ url_paths = { "{0}/": EventsHandler.dispatch, -} \ No newline at end of file +} From c7757f953cbc90633fd0f54518c2bb70f9142cbc Mon Sep 17 00:00:00 2001 From: Justin Wiley Date: Thu, 1 Dec 2016 19:52:00 -0800 Subject: [PATCH 006/213] Can't iterate over dict values in Python 3.3+. Changed Rule.targets from a dict to an array, which is probably better anyway since the dict doesn't maintain order, making API calls with the Limit parameter specified unreliable. --- moto/events/models.py | 33 +++++++++++++++++++++++---------- 1 file changed, 23 insertions(+), 10 deletions(-) diff --git a/moto/events/models.py b/moto/events/models.py index 83386cf87..94cca5ee7 100644 --- a/moto/events/models.py +++ b/moto/events/models.py @@ -17,7 +17,7 @@ class Rule(object): self.state = kwargs.get('State') or 'ENABLED' self.description = kwargs.get('Description') self.role_arn = kwargs.get('RoleArn') - self.targets = {} + self.targets = [] def enable(self): self.state = 'ENABLED' @@ -25,22 +25,35 @@ class Rule(object): def disable(self): self.state = 'DISABLED' + # This song and dance for targets is because we need order for Limits and NextTokens, but can't use OrderedDicts + # with Python 2.6, so tracking it with an array it is. + def _check_target_exists(self, target_id): + for i in range(0, len(self.targets)): + if target_id == self.targets[i]['Id']: + return i + return None + def put_targets(self, targets): # Not testing for valid ARNs. for target in targets: - self.targets[target['Id']] = target + index = self._check_target_exists(target['Id']) + if index is not None: + self.targets[index] = target + else: + self.targets.append(target) def remove_targets(self, ids): - for target in ids: - if target in self.targets: - self.targets.pop(target) + for target_id in ids: + index = self._check_target_exists(target_id) + if index is not None: + self.targets.pop(index) class EventsBackend(BaseBackend): def __init__(self): self.rules = {} - # This array tracks the order in which the rules have been added, since 2.6 doesn't have OrderedDicts + # This array tracks the order in which the rules have been added, since 2.6 doesn't have OrderedDicts. self.rules_order = [] self.next_tokens = {} @@ -98,7 +111,7 @@ class EventsBackend(BaseBackend): for i in range(start_index, end_index): rule = self._get_rule_by_index(i) for target in rule.targets: - if rule.targets[target]['Arn'] == target_arn: + if target['Arn'] == target_arn: matching_rules.append(rule.name) return_obj['RuleNames'] = matching_rules @@ -132,15 +145,15 @@ class EventsBackend(BaseBackend): def list_targets_by_rule(self, rule, next_token=None, limit=None): # We'll let a KeyError exception be thrown for response to handle if rule doesn't exist. - targets = self.rules[rule].targets.values() + rule = self.rules[rule] - start_index, end_index, new_next_token = self._process_token_and_limits(len(targets), next_token, limit) + start_index, end_index, new_next_token = self._process_token_and_limits(len(rule.targets), next_token, limit) returned_targets = [] return_obj = {} for i in range(start_index, end_index): - returned_targets.append(targets[i]) + returned_targets.append(rule.targets[i]) return_obj['Targets'] = returned_targets if new_next_token is not None: From 24fdf5b6fec8c431d564e9fb4b1bf68ddc1b913b Mon Sep 17 00:00:00 2001 From: Justin Wiley Date: Thu, 1 Dec 2016 20:02:54 -0800 Subject: [PATCH 007/213] Added myself as a contributor. :P --- AUTHORS.md | 1 + 1 file changed, 1 insertion(+) diff --git a/AUTHORS.md b/AUTHORS.md index be500fae8..356f8826a 100644 --- a/AUTHORS.md +++ b/AUTHORS.md @@ -42,3 +42,4 @@ Moto is written by Steve Pulec with contributions from: * [Pior Bastida](https://github.com/pior) * [Dustin J. Mitchell](https://github.com/djmitche) * [Jean-Baptiste Barth](https://github.com/jbbarth) +* [Justin Wiley](https://github.com/SectorNine50) From ee8e72766a087e97f9bd3bd0c1b0b491f67df6f0 Mon Sep 17 00:00:00 2001 From: Paul Cieslar Date: Sat, 3 Dec 2016 23:12:22 +0000 Subject: [PATCH 008/213] Support for ecs describe_task_definition (#777) * describe_task_definition support * Refactor fetch_task_definition * Add ECS Descriptors (#772) * Add support for "DescribeServices" in ecs mock * Add support for "DescribeTaskDefinition" in ecs * Let ecs responses handle baseobject for services * Update Cloudformation/parsing#load_parameters to split commadelimitedlists into lists (#774) * Fix JSON dump error in ecs.update_service with task_definition (#775) * Fix s3bucketpath handling for IP based requests (#765) * check HTTP header for IPv4 or IPv6 addresses and default to path based S3 * improved IPv4 and IPv6 checking with optional ports * typo * Freezetime. * Add S3 ACL for aws-exec-read. Closes #740. * Fixed time formatting in ec2/models.py (#778) * Fixed time formatting in ec2/models.py * Used freezegun on test that was failing due to time progression causing timestamp differences. * rename duplicate rds/models db_instance_identifier to physical_resource_id (#776) * rename duplicate db_instance_identifier to physical_resource_id * Update create_from_cloudformation_json to use db_source_identifier str * Update code to be more conventional. * describe_task_definition support * Refactor fetch_task_definition --- moto/ecs/models.py | 10 +++++----- moto/ecs/responses.py | 8 ++++++++ tests/test_ecs/test_ecs_boto3.py | 25 +++++++++++++++++++++++++ 3 files changed, 38 insertions(+), 5 deletions(-) diff --git a/moto/ecs/models.py b/moto/ecs/models.py index 65f44c8bf..0b3b3ec49 100644 --- a/moto/ecs/models.py +++ b/moto/ecs/models.py @@ -150,7 +150,7 @@ class EC2ContainerServiceBackend(BaseBackend): self.services = {} self.container_instances = {} - def fetch_task_definition(self, task_definition_str): + def describe_task_definition(self, task_definition_str): task_definition_components = task_definition_str.split(':') if len(task_definition_components) == 2: family, revision = task_definition_components @@ -246,7 +246,7 @@ class EC2ContainerServiceBackend(BaseBackend): cluster = self.clusters[cluster_name] else: raise Exception("{0} is not a cluster".format(cluster_name)) - task_definition = self.fetch_task_definition(task_definition_str) + task_definition = self.describe_task_definition(task_definition_str) if cluster_name not in self.tasks: self.tasks[cluster_name] = {} tasks = [] @@ -268,7 +268,7 @@ class EC2ContainerServiceBackend(BaseBackend): cluster = self.clusters[cluster_name] else: raise Exception("{0} is not a cluster".format(cluster_name)) - task_definition = self.fetch_task_definition(task_definition_str) + task_definition = self.describe_task_definition(task_definition_str) if cluster_name not in self.tasks: self.tasks[cluster_name] = {} tasks = [] @@ -346,7 +346,7 @@ class EC2ContainerServiceBackend(BaseBackend): cluster = self.clusters[cluster_name] else: raise Exception("{0} is not a cluster".format(cluster_name)) - task_definition = self.fetch_task_definition(task_definition_str) + task_definition = self.describe_task_definition(task_definition_str) desired_count = desired_count if desired_count is not None else 0 service = Service(cluster, service_name, task_definition, desired_count) cluster_service_pair = '{0}:{1}'.format(cluster_name, service_name) @@ -375,7 +375,7 @@ class EC2ContainerServiceBackend(BaseBackend): cluster_service_pair = '{0}:{1}'.format(cluster_name, service_name) if cluster_service_pair in self.services: if task_definition_str is not None: - self.fetch_task_definition(task_definition_str) + task_definition = self.describe_task_definition(task_definition_str) self.services[cluster_service_pair].task_definition = task_definition_str if desired_count is not None: self.services[cluster_service_pair].desired_count = desired_count diff --git a/moto/ecs/responses.py b/moto/ecs/responses.py index 9be5e0b63..ce90de379 100644 --- a/moto/ecs/responses.py +++ b/moto/ecs/responses.py @@ -105,6 +105,14 @@ class EC2ContainerServiceResponse(BaseResponse): 'failures': [] }) + def describe_task_definition(self): + task_definition_str = self._get_param('taskDefinition') + data = self.ecs_backend.describe_task_definition(task_definition_str) + return json.dumps({ + 'taskDefinition': data.response_object, + 'failures': [] + }) + def start_task(self): cluster_str = self._get_param('cluster') overrides = self._get_param('overrides') diff --git a/tests/test_ecs/test_ecs_boto3.py b/tests/test_ecs/test_ecs_boto3.py index 271545efc..baf236ece 100644 --- a/tests/test_ecs/test_ecs_boto3.py +++ b/tests/test_ecs/test_ecs_boto3.py @@ -832,6 +832,31 @@ def test_describe_tasks(): set([response['tasks'][0]['taskArn'], response['tasks'][1]['taskArn']]).should.equal(set(tasks_arns)) +@mock_ecs +def describe_task_definition(): + client = boto3.client('ecs', region_name='us-east-1') + container_definition = { + 'name': 'hello_world', + 'image': 'docker/hello-world:latest', + 'cpu': 1024, + 'memory': 400, + 'essential': True, + 'environment': [{ + 'name': 'AWS_ACCESS_KEY_ID', + 'value': 'SOME_ACCESS_KEY' + }], + 'logConfiguration': {'logDriver': 'json-file'} + } + task_definition = client.register_task_definition( + family='test_ecs_task', + containerDefinitions=[container_definition] + ) + family = task_definition['family'] + task = client.describe_task_definition(taskDefinition=family) + task['containerDefinitions'][0].should.equal(container_definition) + task['taskDefinitionArn'].should.equal('arn:aws:ecs:us-east-1:012345678910:task-definition/test_ecs_task2:1') + task['volumes'].should.equal([]) + @mock_ec2 @mock_ecs def test_stop_task(): From c54985a39fff340873538ee668c01e103c9dac5d Mon Sep 17 00:00:00 2001 From: Tom V Date: Sat, 3 Dec 2016 23:13:24 +0000 Subject: [PATCH 009/213] Fix for #748. Turn on autoescape for S3 templates. (#779) --- AUTHORS.md | 1 + CHANGELOG.md | 2 ++ moto/s3/responses.py | 4 ++++ tests/test_s3/test_s3.py | 8 ++++++++ 4 files changed, 15 insertions(+) diff --git a/AUTHORS.md b/AUTHORS.md index be500fae8..28c69cbde 100644 --- a/AUTHORS.md +++ b/AUTHORS.md @@ -42,3 +42,4 @@ Moto is written by Steve Pulec with contributions from: * [Pior Bastida](https://github.com/pior) * [Dustin J. Mitchell](https://github.com/djmitche) * [Jean-Baptiste Barth](https://github.com/jbbarth) +* [Tom Viner](https://github.com/tomviner) diff --git a/CHANGELOG.md b/CHANGELOG.md index 2cdc8b56c..0cddbe645 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -4,6 +4,8 @@ Moto Changelog Latest ------ + * Turn on variable escaping in templates for S3 XML documents + 0.4.30 ------ diff --git a/moto/s3/responses.py b/moto/s3/responses.py index b441b9ac5..5ae776e68 100644 --- a/moto/s3/responses.py +++ b/moto/s3/responses.py @@ -38,6 +38,10 @@ class ResponseObject(_TemplateEnvironmentMixin): super(ResponseObject, self).__init__() self.backend = backend + @property + def should_autoescape(self): + return True + def all_buckets(self): # No bucket specified. Listing all buckets all_buckets = self.backend.get_all_buckets() diff --git a/tests/test_s3/test_s3.py b/tests/test_s3/test_s3.py index 9a5181074..0d8f7cb49 100644 --- a/tests/test_s3/test_s3.py +++ b/tests/test_s3/test_s3.py @@ -989,6 +989,14 @@ def test_boto3_key_etag(): resp = s3.get_object(Bucket='mybucket', Key='steve') resp['ETag'].should.equal('"d32bda93738f7e03adb22e66c90fbc04"') +@mock_s3 +def test_boto3_list_keys_xml_escaped(): + s3 = boto3.client('s3', region_name='us-east-1') + s3.create_bucket(Bucket='mybucket') + key_name = 'Q&A.txt' + s3.put_object(Bucket='mybucket', Key=key_name, Body=b'is awesome') + resp = s3.list_objects_v2(Bucket='mybucket', Prefix=key_name) + assert resp['Contents'][0]['Key'] == key_name @mock_s3 def test_boto3_bucket_create(): From 5dc8e59fab9bd2009437914cb574b1c479239d8b Mon Sep 17 00:00:00 2001 From: mfranke <2mf@users.noreply.github.com> Date: Sun, 4 Dec 2016 00:15:24 +0100 Subject: [PATCH 010/213] Fix s3bucket_path (#784) * check HTTP header for IPv4 or IPv6 addresses and default to path based S3 * improved IPv4 and IPv6 checking with optional ports * typo * subdomain bucket creation with trailing '/' did not work * Use regex for Host field check to determine IPv4/IPv6 * add testcases for trailing slash, IPv4 and IPv6 --- moto/s3/responses.py | 17 +++---- moto/s3/urls.py | 14 +++++- .../test_bucket_path_server.py | 44 +++++++++++++++++++ 3 files changed, 62 insertions(+), 13 deletions(-) diff --git a/moto/s3/responses.py b/moto/s3/responses.py index 5ae776e68..ac1533eb0 100644 --- a/moto/s3/responses.py +++ b/moto/s3/responses.py @@ -5,7 +5,6 @@ import re import six from six.moves.urllib.parse import parse_qs, urlparse -import socket import xmltodict from moto.core.responses import _TemplateEnvironmentMixin @@ -57,21 +56,17 @@ class ResponseObject(_TemplateEnvironmentMixin): match = re.match(r'^([^\[\]:]+)(:\d+)?$', host) if match: - try: - socket.inet_pton(socket.AF_INET, match.groups()[0]) - # For IPv4, default to path-based buckets + match = re.match(r'((25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)(\.|$)){4}', + match.groups()[0]) + if match: return False - except socket.error: - pass match = re.match(r'^\[(.+)\](:\d+)?$', host) if match: - try: - socket.inet_pton(socket.AF_INET6, match.groups()[0]) - # For IPv6, default to path-based buckets + match = re.match(r'^(((?=.*(::))(?!.*\3.+\3))\3?|[\dA-F]{1,4}:)([\dA-F]{1,4}(\3|:\b)|\2){5}(([\dA-F]{1,4}(\3|:\b|$)|\2){2}|(((2[0-4]|1\d|[1-9])?\d|25[0-5])\.?\b){4})\Z', + match.groups()[0], re.IGNORECASE) + if match: return False - except socket.error: - pass path_based = (host == 's3.amazonaws.com' or re.match(r"s3[\.\-]([^.]*)\.amazonaws\.com", host)) return not path_based diff --git a/moto/s3/urls.py b/moto/s3/urls.py index 1f0677956..98686e495 100644 --- a/moto/s3/urls.py +++ b/moto/s3/urls.py @@ -7,13 +7,23 @@ url_bases = [ "https?://(?P[a-zA-Z0-9\-_.]*)\.?s3(.*).amazonaws.com" ] + +def ambiguous_response1(*args, **kwargs): + return S3ResponseInstance.ambiguous_response(*args, **kwargs) + + +def ambiguous_response2(*args, **kwargs): + return S3ResponseInstance.ambiguous_response(*args, **kwargs) + + url_paths = { # subdomain bucket '{0}/$': S3ResponseInstance.bucket_response, # subdomain key of path-based bucket - '{0}/(?P[^/]+)/?$': S3ResponseInstance.ambiguous_response, - + '{0}/(?P[^/]+)$': ambiguous_response1, + # subdomain key of path-based bucket + '{0}/(?P[^/]+)/$': ambiguous_response2, # path-based bucket + key '{0}/(?P[^/]+)/(?P.+)': S3ResponseInstance.key_response, } diff --git a/tests/test_s3bucket_path/test_bucket_path_server.py b/tests/test_s3bucket_path/test_bucket_path_server.py index 4434c02fd..adc5de532 100644 --- a/tests/test_s3bucket_path/test_bucket_path_server.py +++ b/tests/test_s3bucket_path/test_bucket_path_server.py @@ -31,6 +31,16 @@ def test_s3_server_bucket_create(): res.status_code.should.equal(200) res.data.should.contain(b"ListBucketResult") + res = test_client.put('/foobar2/', 'http://localhost:5000') + res.status_code.should.equal(200) + + res = test_client.get('/') + res.data.should.contain(b'foobar2') + + res = test_client.get('/foobar2/', 'http://localhost:5000') + res.status_code.should.equal(200) + res.data.should.contain(b"ListBucketResult") + res = test_client.get('/missing-bucket', 'http://localhost:5000') res.status_code.should.equal(404) @@ -57,3 +67,37 @@ def test_s3_server_post_to_bucket(): res = test_client.get('/foobar2/the-key', 'http://localhost:5000/') res.status_code.should.equal(200) res.data.should.equal(b"nothing") + + +def test_s3_server_put_ipv6(): + backend = server.create_backend_app("s3bucket_path") + test_client = backend.test_client() + + res = test_client.put('/foobar2', 'http://[::]:5000/') + res.status_code.should.equal(200) + + test_client.post('/foobar2', "https://[::]:5000/", data={ + 'key': 'the-key', + 'file': 'nothing' + }) + + res = test_client.get('/foobar2/the-key', 'http://[::]:5000/') + res.status_code.should.equal(200) + res.data.should.equal(b"nothing") + + +def test_s3_server_put_ipv4(): + backend = server.create_backend_app("s3bucket_path") + test_client = backend.test_client() + + res = test_client.put('/foobar2', 'http://127.0.0.1:5000/') + res.status_code.should.equal(200) + + test_client.post('/foobar2', "https://127.0.0.1:5000/", data={ + 'key': 'the-key', + 'file': 'nothing' + }) + + res = test_client.get('/foobar2/the-key', 'http://127.0.0.1:5000/') + res.status_code.should.equal(200) + res.data.should.equal(b"nothing") From ed0e81fc611250f4af692b5bc22f4ef07106d5ce Mon Sep 17 00:00:00 2001 From: Rob Walker Date: Sun, 4 Dec 2016 09:17:15 +1000 Subject: [PATCH 011/213] Fixup lambda for ResponseRequest (#781) * Fixup lambda for ResponseRequest * one day will get this right --- moto/awslambda/models.py | 14 +++-- tests/test_awslambda/test_lambda.py | 84 +++++++++++++++-------------- 2 files changed, 54 insertions(+), 44 deletions(-) diff --git a/moto/awslambda/models.py b/moto/awslambda/models.py index 260d2d338..069717ca4 100644 --- a/moto/awslambda/models.py +++ b/moto/awslambda/models.py @@ -112,7 +112,7 @@ class LambdaFunction(object): def convert(self, s): try: - return str(s, encoding='utf8') + return str(s, encoding='utf-8') except: return s @@ -128,7 +128,8 @@ class LambdaFunction(object): try: mycode = "\n".join(['import json', self.convert(self.code), - self.convert('print(lambda_handler(%s, %s))' % (self.is_json(self.convert(event)), context))]) + self.convert('print(json.dumps(lambda_handler(%s, %s)))' % (self.is_json(self.convert(event)), context))]) + #print("moto_lambda_debug: ", mycode) except Exception as ex: print("Exception %s", ex) @@ -141,7 +142,9 @@ class LambdaFunction(object): exec(mycode) exec_err = codeErr.getvalue() exec_out = codeOut.getvalue() - result = "\n".join([exec_out, self.convert(exec_err)]) + result = self.convert(exec_out.strip()) + if exec_err: + result = "\n".join([exec_out.strip(), self.convert(exec_err)]) except Exception as ex: result = '%s\n\n\nException %s' % (mycode, ex) finally: @@ -160,8 +163,11 @@ class LambdaFunction(object): encoded = base64.b64encode(r.encode('utf-8')) headers["x-amz-log-result"] = encoded.decode('utf-8') payload['result'] = headers["x-amz-log-result"] + result = r.encode('utf-8') + else: + result = json.dumps(payload) - return json.dumps(payload, indent=4) + return result @classmethod def create_from_cloudformation_json(cls, resource_name, cloudformation_json, region_name): diff --git a/tests/test_awslambda/test_lambda.py b/tests/test_awslambda/test_lambda.py index 6b0655d98..ce8892dc9 100644 --- a/tests/test_awslambda/test_lambda.py +++ b/tests/test_awslambda/test_lambda.py @@ -25,7 +25,7 @@ def _process_lamda(pfunc): def get_test_zip_file1(): pfunc = """ def lambda_handler(event, context): - return (event, context) + return event """ return _process_lamda(pfunc) @@ -39,6 +39,7 @@ def lambda_handler(event, context): ec2 = boto3.resource('ec2', region_name='us-west-2') vol = ec2.Volume(volume_id) print('Volume - %s state=%s, size=%s' % (volume_id, vol.state, vol.size)) + return event """ return _process_lamda(pfunc) @@ -50,33 +51,6 @@ def test_list_functions(): result = conn.list_functions() result['Functions'].should.have.length_of(0) -@mock_lambda -@freeze_time('2015-01-01 00:00:00') -def test_invoke_event_function(): - conn = boto3.client('lambda', 'us-west-2') - conn.create_function( - FunctionName='testFunction', - Runtime='python2.7', - Role='test-iam-role', - Handler='lambda_function.handler', - Code={ - 'ZipFile': get_test_zip_file1(), - }, - Description='test lambda function', - Timeout=3, - MemorySize=128, - Publish=True, - ) - - success_result = conn.invoke(FunctionName='testFunction', InvocationType='Event', Payload=json.dumps({'msg': 'Mostly Harmless'})) - success_result["StatusCode"].should.equal(202) - - conn.invoke.when.called_with( - FunctionName='notAFunction', - InvocationType='Event', - Payload='{}' - ).should.throw(botocore.client.ClientError) - @mock_lambda @freeze_time('2015-01-01 00:00:00') @@ -96,12 +70,44 @@ def test_invoke_requestresponse_function(): Publish=True, ) + in_data = {'msg': 'So long and thanks for all the fish'} success_result = conn.invoke(FunctionName='testFunction', InvocationType='RequestResponse', - Payload=json.dumps({'msg': 'So long and thanks for all the fish'})) - success_result["StatusCode"].should.equal(202) + Payload=json.dumps(in_data)) + + success_result["StatusCode"].should.equal(202) + base64.b64decode(success_result["LogResult"]).decode('utf-8').should.equal(json.dumps(in_data)) + json.loads(success_result["Payload"].read().decode('utf-8')).should.equal(in_data) + + +@mock_lambda +@freeze_time('2015-01-01 00:00:00') +def test_invoke_event_function(): + conn = boto3.client('lambda', 'us-west-2') + conn.create_function( + FunctionName='testFunction', + Runtime='python2.7', + Role='test-iam-role', + Handler='lambda_function.handler', + Code={ + 'ZipFile': get_test_zip_file1(), + }, + Description='test lambda function', + Timeout=3, + MemorySize=128, + Publish=True, + ) + + conn.invoke.when.called_with( + FunctionName='notAFunction', + InvocationType='Event', + Payload='{}' + ).should.throw(botocore.client.ClientError) + + in_data = {'msg': 'So long and thanks for all the fish'} + success_result = conn.invoke(FunctionName='testFunction', InvocationType='Event', Payload=json.dumps(in_data)) + success_result["StatusCode"].should.equal(202) + json.loads(success_result['Payload'].read().decode('utf-8')).should.equal({}) - #nasty hack - hope someone has better solution dealing with unicode tests working for Py2 and Py3. - base64.b64decode(success_result["LogResult"]).decode('utf-8').replace("u'", "'").should.equal("({'msg': 'So long and thanks for all the fish'}, {})\n\n") @mock_ec2 @mock_lambda @@ -126,14 +132,12 @@ def test_invoke_function_get_ec2_volume(): Publish=True, ) - import json - success_result = conn.invoke(FunctionName='testFunction', InvocationType='RequestResponse', Payload=json.dumps({'volume_id': vol.id})) - success_result["StatusCode"].should.equal(202) - - import base64 - msg = 'get volume details for %s\nVolume - %s state=%s, size=%s\nNone\n\n' % (vol.id, vol.id, vol.state, vol.size) - # yet again hacky solution to allow code to run tests for python2 and python3 - pls someone fix :( - base64.b64decode(success_result["LogResult"]).decode('utf-8').replace("u'", "'").should.equal(msg) + in_data = {'volume_id': vol.id} + result = conn.invoke(FunctionName='testFunction', InvocationType='RequestResponse', Payload=json.dumps(in_data)) + result["StatusCode"].should.equal(202) + msg = 'get volume details for %s\nVolume - %s state=%s, size=%s\n%s' % (vol.id, vol.id, vol.state, vol.size, json.dumps(in_data)) + base64.b64decode(result["LogResult"]).decode('utf-8').should.equal(msg) + result['Payload'].read().decode('utf-8').should.equal(msg) @mock_lambda From 2c6a967f45a94cce04d625fa182c41501f62cc84 Mon Sep 17 00:00:00 2001 From: Steve Pulec Date: Sat, 3 Dec 2016 18:59:28 -0500 Subject: [PATCH 012/213] Disable strict_slashes for Flask so that S3 works in server mode. --- moto/s3/urls.py | 4 +--- moto/server.py | 1 + 2 files changed, 2 insertions(+), 3 deletions(-) diff --git a/moto/s3/urls.py b/moto/s3/urls.py index 98686e495..8faad6282 100644 --- a/moto/s3/urls.py +++ b/moto/s3/urls.py @@ -21,9 +21,7 @@ url_paths = { '{0}/$': S3ResponseInstance.bucket_response, # subdomain key of path-based bucket - '{0}/(?P[^/]+)$': ambiguous_response1, - # subdomain key of path-based bucket - '{0}/(?P[^/]+)/$': ambiguous_response2, + '{0}/(?P[^/]+)/?$': S3ResponseInstance.ambiguous_response, # path-based bucket + key '{0}/(?P[^/]+)/(?P.+)': S3ResponseInstance.key_response, } diff --git a/moto/server.py b/moto/server.py index 6e43c47ac..5ee12362e 100644 --- a/moto/server.py +++ b/moto/server.py @@ -113,6 +113,7 @@ def create_backend_app(service): endpoint=endpoint, methods=HTTP_METHODS, view_func=convert_flask_to_httpretty_response(handler), + strict_slashes=False, ) backend_app.test_client_class = AWSTestHelper From 0115267f2ace863715f2a94aa4767144e85a2add Mon Sep 17 00:00:00 2001 From: Hugo Lopes Tavares Date: Tue, 20 Dec 2016 10:37:18 -0500 Subject: [PATCH 013/213] Add ECS CloudFormation support (#795) * Add cloudformation support to AWS::ECS::Cluster * Add CloudFormation support to AWS::ECS::TaskDefinition * Add CloudFormation support to AWS::ECS::Service * Add support to update AWS::ECS::Cluster through CloudFormation * Fix Cluster.update_from_cloudformation_json to return original_resource if nothing changed * Implement TaskDefinition.update_from_cloudformation_json * Implement Service.update_from_cloudformation_json --- moto/cloudformation/parsing.py | 7 + moto/ecs/models.py | 106 ++++++++++++- tests/test_ecs/test_ecs_boto3.py | 255 +++++++++++++++++++++++++++++++ 3 files changed, 366 insertions(+), 2 deletions(-) diff --git a/moto/cloudformation/parsing.py b/moto/cloudformation/parsing.py index 971adbbf6..3e348ac37 100644 --- a/moto/cloudformation/parsing.py +++ b/moto/cloudformation/parsing.py @@ -3,11 +3,13 @@ import collections import functools import logging import copy +import warnings from moto.autoscaling import models as autoscaling_models from moto.awslambda import models as lambda_models from moto.datapipeline import models as datapipeline_models from moto.ec2 import models as ec2_models +from moto.ecs import models as ecs_models from moto.elb import models as elb_models from moto.iam import models as iam_models from moto.kms import models as kms_models @@ -43,6 +45,9 @@ MODEL_MAP = { "AWS::EC2::VPC": ec2_models.VPC, "AWS::EC2::VPCGatewayAttachment": ec2_models.VPCGatewayAttachment, "AWS::EC2::VPCPeeringConnection": ec2_models.VPCPeeringConnection, + "AWS::ECS::Cluster": ecs_models.Cluster, + "AWS::ECS::TaskDefinition": ecs_models.TaskDefinition, + "AWS::ECS::Service": ecs_models.Service, "AWS::ElasticLoadBalancing::LoadBalancer": elb_models.FakeLoadBalancer, "AWS::DataPipeline::Pipeline": datapipeline_models.Pipeline, "AWS::IAM::InstanceProfile": iam_models.InstanceProfile, @@ -175,6 +180,8 @@ def parse_resource(logical_id, resource_json, resources_map): resource_type = resource_json['Type'] resource_class = resource_class_from_type(resource_type) if not resource_class: + warnings.warn( + "Tried to parse {0} but it's not supported by moto's CloudFormation implementation".format(resource_type)) return None resource_json = clean_json(resource_json, resources_map) diff --git a/moto/ecs/models.py b/moto/ecs/models.py index 0b3b3ec49..cdb04fcd6 100644 --- a/moto/ecs/models.py +++ b/moto/ecs/models.py @@ -1,6 +1,6 @@ from __future__ import unicode_literals import uuid -from random import randint +from random import randint, random from moto.core import BaseBackend from moto.ec2 import ec2_backends @@ -48,13 +48,39 @@ class Cluster(BaseObject): del response_object['arn'], response_object['name'] return response_object + @classmethod + def create_from_cloudformation_json(cls, resource_name, cloudformation_json, region_name): + properties = cloudformation_json['Properties'] + + ecs_backend = ecs_backends[region_name] + return ecs_backend.create_cluster( + # ClusterName is optional in CloudFormation, thus create a random name if necessary + cluster_name=properties.get('ClusterName', 'ecscluster{0}'.format(int(random() * 10 ** 6))), + ) + @classmethod + def update_from_cloudformation_json(cls, original_resource, new_resource_name, cloudformation_json, region_name): + properties = cloudformation_json['Properties'] + + if original_resource.name != properties['ClusterName']: + ecs_backend = ecs_backends[region_name] + ecs_backend.delete_cluster(original_resource.arn) + return ecs_backend.create_cluster( + # ClusterName is optional in CloudFormation, thus create a random name if necessary + cluster_name=properties.get('ClusterName', 'ecscluster{0}'.format(int(random() * 10 ** 6))), + ) + else: + # no-op when nothing changed between old and new resources + return original_resource + class TaskDefinition(BaseObject): def __init__(self, family, revision, container_definitions, volumes=None): self.family = family self.arn = 'arn:aws:ecs:us-east-1:012345678910:task-definition/{0}:{1}'.format(family, revision) self.container_definitions = container_definitions - if volumes is not None: + if volumes is None: + self.volumes = [] + else: self.volumes = volumes @property @@ -64,6 +90,37 @@ class TaskDefinition(BaseObject): del response_object['arn'] return response_object + @classmethod + def create_from_cloudformation_json(cls, resource_name, cloudformation_json, region_name): + properties = cloudformation_json['Properties'] + + family = properties.get('Family', 'task-definition-{0}'.format(int(random() * 10 ** 6))) + container_definitions = properties['ContainerDefinitions'] + volumes = properties['Volumes'] + + ecs_backend = ecs_backends[region_name] + return ecs_backend.register_task_definition( + family=family, container_definitions=container_definitions, volumes=volumes) + + @classmethod + def update_from_cloudformation_json(cls, original_resource, new_resource_name, cloudformation_json, region_name): + properties = cloudformation_json['Properties'] + + family = properties.get('Family', 'task-definition-{0}'.format(int(random() * 10 ** 6))) + container_definitions = properties['ContainerDefinitions'] + volumes = properties['Volumes'] + if (original_resource.family != family or + original_resource.container_definitions != container_definitions or + original_resource.volumes != volumes + # currently TaskRoleArn isn't stored at TaskDefinition instances + ): + ecs_backend = ecs_backends[region_name] + ecs_backend.deregister_task_definition(original_resource.arn) + return ecs_backend.register_task_definition( + family=family, container_definitions=container_definitions, volumes=volumes) + else: + # no-op when nothing changed between old and new resources + return original_resource class Task(BaseObject): def __init__(self, cluster, task_definition, container_instance_arn, overrides={}, started_by=''): @@ -105,6 +162,51 @@ class Service(BaseObject): response_object['serviceArn'] = self.arn return response_object + @classmethod + def create_from_cloudformation_json(cls, resource_name, cloudformation_json, region_name): + properties = cloudformation_json['Properties'] + if isinstance(properties['Cluster'], Cluster): + cluster = properties['Cluster'].name + else: + cluster = properties['Cluster'] + if isinstance(properties['TaskDefinition'], TaskDefinition): + task_definition = properties['TaskDefinition'].family + else: + task_definition = properties['TaskDefinition'] + service_name = '{0}Service{1}'.format(cluster, int(random() * 10 ** 6)) + desired_count = properties['DesiredCount'] + # TODO: LoadBalancers + # TODO: Role + + ecs_backend = ecs_backends[region_name] + return ecs_backend.create_service( + cluster, service_name, task_definition, desired_count) + + @classmethod + def update_from_cloudformation_json(cls, original_resource, new_resource_name, cloudformation_json, region_name): + properties = cloudformation_json['Properties'] + if isinstance(properties['Cluster'], Cluster): + cluster_name = properties['Cluster'].name + else: + cluster_name = properties['Cluster'] + if isinstance(properties['TaskDefinition'], TaskDefinition): + task_definition = properties['TaskDefinition'].family + else: + task_definition = properties['TaskDefinition'] + desired_count = properties['DesiredCount'] + + ecs_backend = ecs_backends[region_name] + service_name = original_resource.name + if original_resource.cluster_arn != Cluster(cluster_name).arn: + # TODO: LoadBalancers + # TODO: Role + ecs_backend.delete_service(cluster_name, service_name) + new_service_name = '{0}Service{1}'.format(cluster_name, int(random() * 10 ** 6)) + return ecs_backend.create_service( + cluster_name, new_service_name, task_definition, desired_count) + else: + return ecs_backend.update_service(cluster_name, service_name, task_definition, desired_count) + class ContainerInstance(BaseObject): def __init__(self, ec2_instance_id): diff --git a/tests/test_ecs/test_ecs_boto3.py b/tests/test_ecs/test_ecs_boto3.py index baf236ece..5cd85549d 100644 --- a/tests/test_ecs/test_ecs_boto3.py +++ b/tests/test_ecs/test_ecs_boto3.py @@ -1,10 +1,14 @@ from __future__ import unicode_literals + +from copy import deepcopy + import boto3 import sure # noqa import json from moto.ec2 import utils as ec2_utils from uuid import UUID +from moto import mock_cloudformation from moto import mock_ecs from moto import mock_ec2 @@ -918,3 +922,254 @@ def test_stop_task(): stop_response['task']['lastStatus'].should.equal('STOPPED') stop_response['task']['desiredStatus'].should.equal('STOPPED') stop_response['task']['stoppedReason'].should.equal('moto testing') + + +@mock_ecs +@mock_cloudformation +def test_create_cluster_through_cloudformation(): + template = { + "AWSTemplateFormatVersion": "2010-09-09", + "Description": "ECS Cluster Test CloudFormation", + "Resources": { + "testCluster": { + "Type": "AWS::ECS::Cluster", + "Properties": { + "ClusterName": "testcluster" + } + } + } + } + template_json = json.dumps(template) + cfn_conn = boto3.client('cloudformation', region_name='us-west-1') + cfn_conn.create_stack( + StackName="test_stack", + TemplateBody=template_json, + ) + + ecs_conn = boto3.client('ecs', region_name='us-west-1') + resp = ecs_conn.list_clusters() + len(resp['clusterArns']).should.equal(1) + + +@mock_ecs +@mock_cloudformation +def test_update_cluster_name_through_cloudformation_should_trigger_a_replacement(): + template1 = { + "AWSTemplateFormatVersion": "2010-09-09", + "Description": "ECS Cluster Test CloudFormation", + "Resources": { + "testCluster": { + "Type": "AWS::ECS::Cluster", + "Properties": { + "ClusterName": "testcluster1" + } + } + } + } + template2 = deepcopy(template1) + template2['Resources']['testCluster']['Properties']['ClusterName'] = 'testcluster2' + template1_json = json.dumps(template1) + cfn_conn = boto3.client('cloudformation', region_name='us-west-1') + stack_resp = cfn_conn.create_stack( + StackName="test_stack", + TemplateBody=template1_json, + ) + + template2_json = json.dumps(template2) + cfn_conn.update_stack( + StackName=stack_resp['StackId'], + TemplateBody=template2_json + ) + ecs_conn = boto3.client('ecs', region_name='us-west-1') + resp = ecs_conn.list_clusters() + len(resp['clusterArns']).should.equal(1) + resp['clusterArns'][0].endswith('testcluster2').should.be.true + + +@mock_ecs +@mock_cloudformation +def test_create_task_definition_through_cloudformation(): + template = { + "AWSTemplateFormatVersion": "2010-09-09", + "Description": "ECS Cluster Test CloudFormation", + "Resources": { + "testTaskDefinition": { + "Type" : "AWS::ECS::TaskDefinition", + "Properties" : { + "ContainerDefinitions" : [ + { + "Name": "ecs-sample", + "Image":"amazon/amazon-ecs-sample", + "Cpu": "200", + "Memory": "500", + "Essential": "true" + } + ], + "Volumes" : [], + } + } + } + } + template_json = json.dumps(template) + cfn_conn = boto3.client('cloudformation', region_name='us-west-1') + cfn_conn.create_stack( + StackName="test_stack", + TemplateBody=template_json, + ) + + ecs_conn = boto3.client('ecs', region_name='us-west-1') + resp = ecs_conn.list_task_definitions() + len(resp['taskDefinitionArns']).should.equal(1) + + +@mock_ecs +@mock_cloudformation +def test_update_task_definition_family_through_cloudformation_should_trigger_a_replacement(): + template1 = { + "AWSTemplateFormatVersion": "2010-09-09", + "Description": "ECS Cluster Test CloudFormation", + "Resources": { + "testTaskDefinition": { + "Type" : "AWS::ECS::TaskDefinition", + "Properties" : { + "Family": "testTaskDefinition1", + "ContainerDefinitions" : [ + { + "Name": "ecs-sample", + "Image":"amazon/amazon-ecs-sample", + "Cpu": "200", + "Memory": "500", + "Essential": "true" + } + ], + "Volumes" : [], + } + } + } + } + template1_json = json.dumps(template1) + cfn_conn = boto3.client('cloudformation', region_name='us-west-1') + cfn_conn.create_stack( + StackName="test_stack", + TemplateBody=template1_json, + ) + + template2 = deepcopy(template1) + template2['Resources']['testTaskDefinition']['Properties']['Family'] = 'testTaskDefinition2' + template2_json = json.dumps(template2) + cfn_conn.update_stack( + StackName="test_stack", + TemplateBody=template2_json, + ) + + ecs_conn = boto3.client('ecs', region_name='us-west-1') + resp = ecs_conn.list_task_definitions(familyPrefix='testTaskDefinition') + len(resp['taskDefinitionArns']).should.equal(1) + resp['taskDefinitionArns'][0].endswith('testTaskDefinition2:1').should.be.true + + +@mock_ecs +@mock_cloudformation +def test_create_service_through_cloudformation(): + template = { + "AWSTemplateFormatVersion": "2010-09-09", + "Description": "ECS Cluster Test CloudFormation", + "Resources": { + "testCluster": { + "Type": "AWS::ECS::Cluster", + "Properties": { + "ClusterName": "testcluster" + } + }, + "testTaskDefinition": { + "Type" : "AWS::ECS::TaskDefinition", + "Properties" : { + "ContainerDefinitions" : [ + { + "Name": "ecs-sample", + "Image":"amazon/amazon-ecs-sample", + "Cpu": "200", + "Memory": "500", + "Essential": "true" + } + ], + "Volumes" : [], + } + }, + "testService": { + "Type": "AWS::ECS::Service", + "Properties": { + "Cluster": {"Ref": "testCluster"}, + "DesiredCount": 10, + "TaskDefinition": {"Ref": "testTaskDefinition"}, + } + } + } + } + template_json = json.dumps(template) + cfn_conn = boto3.client('cloudformation', region_name='us-west-1') + cfn_conn.create_stack( + StackName="test_stack", + TemplateBody=template_json, + ) + + ecs_conn = boto3.client('ecs', region_name='us-west-1') + resp = ecs_conn.list_services(cluster='testcluster') + len(resp['serviceArns']).should.equal(1) + + +@mock_ecs +@mock_cloudformation +def test_update_service_through_cloudformation_should_trigger_replacement(): + template1 = { + "AWSTemplateFormatVersion": "2010-09-09", + "Description": "ECS Cluster Test CloudFormation", + "Resources": { + "testCluster": { + "Type": "AWS::ECS::Cluster", + "Properties": { + "ClusterName": "testcluster" + } + }, + "testTaskDefinition": { + "Type" : "AWS::ECS::TaskDefinition", + "Properties" : { + "ContainerDefinitions" : [ + { + "Name": "ecs-sample", + "Image":"amazon/amazon-ecs-sample", + "Cpu": "200", + "Memory": "500", + "Essential": "true" + } + ], + "Volumes" : [], + } + }, + "testService": { + "Type": "AWS::ECS::Service", + "Properties": { + "Cluster": {"Ref": "testCluster"}, + "TaskDefinition": {"Ref": "testTaskDefinition"}, + "DesiredCount": 10, + } + } + } + } + template_json1 = json.dumps(template1) + cfn_conn = boto3.client('cloudformation', region_name='us-west-1') + cfn_conn.create_stack( + StackName="test_stack", + TemplateBody=template_json1, + ) + template2 = deepcopy(template1) + template2['Resources']['testService']['Properties']['DesiredCount'] = 5 + template2_json = json.dumps(template2) + cfn_conn.update_stack( + StackName="test_stack", + TemplateBody=template2_json, + ) + + ecs_conn = boto3.client('ecs', region_name='us-west-1') + resp = ecs_conn.list_services(cluster='testcluster') + len(resp['serviceArns']).should.equal(1) From b5ff3345bee181f136aaf235428078e2aaa910a1 Mon Sep 17 00:00:00 2001 From: Hugo Lopes Tavares Date: Tue, 20 Dec 2016 10:37:49 -0500 Subject: [PATCH 014/213] Add service ARNs support to the `DescribeServices` ECS API (#796) --- moto/ecs/models.py | 15 ++++++++------- tests/test_ecs/test_ecs_boto3.py | 2 +- 2 files changed, 9 insertions(+), 8 deletions(-) diff --git a/moto/ecs/models.py b/moto/ecs/models.py index cdb04fcd6..e020e3208 100644 --- a/moto/ecs/models.py +++ b/moto/ecs/models.py @@ -463,14 +463,15 @@ class EC2ContainerServiceBackend(BaseBackend): service_arns.append(self.services[key].arn) return sorted(service_arns) - def describe_services(self, cluster_str, service_names): + def describe_services(self, cluster_str, service_names_or_arns): cluster_name = cluster_str.split('/')[-1] - services = [] - for service_name in service_names: - cluster_service_pair = '{0}:{1}'.format(cluster_name, service_name) - if cluster_service_pair in self.services: - services.append(self.services[cluster_service_pair]) - return services + result = [] + for existing_service_name, existing_service_obj in sorted(self.services.items()): + for requested_name_or_arn in service_names_or_arns: + cluster_service_pair = '{0}:{1}'.format(cluster_name, requested_name_or_arn) + if cluster_service_pair == existing_service_name or existing_service_obj.arn == requested_name_or_arn: + result.append(existing_service_obj) + return result def update_service(self, cluster_str, service_name, task_definition_str, desired_count): cluster_name = cluster_str.split('/')[-1] diff --git a/tests/test_ecs/test_ecs_boto3.py b/tests/test_ecs/test_ecs_boto3.py index 5cd85549d..f073628a9 100644 --- a/tests/test_ecs/test_ecs_boto3.py +++ b/tests/test_ecs/test_ecs_boto3.py @@ -359,7 +359,7 @@ def test_describe_services(): ) response = client.describe_services( cluster='test_ecs_cluster', - services=['test_ecs_service1', 'test_ecs_service2'] + services=['test_ecs_service1', 'arn:aws:ecs:us-east-1:012345678910:service/test_ecs_service2'] ) len(response['services']).should.equal(2) response['services'][0]['serviceArn'].should.equal('arn:aws:ecs:us-east-1:012345678910:service/test_ecs_service1') From d07c646032a610021aebaf52ce38244d5bf0579c Mon Sep 17 00:00:00 2001 From: Raghavendra D Prabhu Date: Thu, 12 Jan 2017 07:05:56 +0530 Subject: [PATCH 015/213] sqs: Use unix_time in place of time.time() (#787) unix_time() from moto.core.utils is used as the time source through moto, and it is identical to time.time() in output. Hence, using unix_time() since it makes mocking easier during testing (when time is mocked out). --- moto/sqs/models.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/moto/sqs/models.py b/moto/sqs/models.py index 0afda61af..90a3f30f9 100644 --- a/moto/sqs/models.py +++ b/moto/sqs/models.py @@ -8,7 +8,7 @@ from xml.sax.saxutils import escape import boto.sqs from moto.core import BaseBackend -from moto.core.utils import camelcase_to_underscores, get_random_message_id, unix_time_millis +from moto.core.utils import camelcase_to_underscores, get_random_message_id, unix_time, unix_time_millis from .utils import generate_receipt_handle from .exceptions import ( ReceiptHandleIsInvalid, @@ -115,7 +115,7 @@ class Queue(object): self.wait_time_seconds = wait_time_seconds or 0 self._messages = [] - now = time.time() + now = unix_time() self.created_timestamp = now self.delay_seconds = 0 @@ -281,7 +281,7 @@ class SQSBackend(BaseBackend): queue = self.get_queue(queue_name) result = [] - polling_end = time.time() + wait_seconds_timeout + polling_end = unix_time() + wait_seconds_timeout # queue.messages only contains visible messages while True: @@ -295,7 +295,7 @@ class SQSBackend(BaseBackend): if len(result) >= count: break - if result or time.time() > polling_end: + if result or unix_time() > polling_end: break return result From 231d3cadcbdcdd15dd7bfe4c97e46e12e0d5444b Mon Sep 17 00:00:00 2001 From: Andrew Garrett Date: Wed, 11 Jan 2017 19:36:45 -0600 Subject: [PATCH 016/213] Fix the CloudFormation ValidationError message (#788) It should be a string, not a tuple, and it has a different form. I'm not sure if it used to be different, but in the most recent boto3/botocore, the message is "Stack with id {id} does not exist" ```python >>> cf = boto3.client('cloudformation', region_name='us-west-2') >>> try: ... cf.describe_stacks(StackName='adfgfhghg') ... except botocore.exceptions.ClientError as e: ... print e.response['Error']['Message'] ... Stack with id adfgfhghg does not exist ``` I am on boto3 1.4.2 and botocore 1.4.82 as of this commit message. --- moto/cloudformation/exceptions.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/moto/cloudformation/exceptions.py b/moto/cloudformation/exceptions.py index 4bbbbbcff..ed2856826 100644 --- a/moto/cloudformation/exceptions.py +++ b/moto/cloudformation/exceptions.py @@ -11,7 +11,7 @@ class UnformattedGetAttTemplateException(Exception): class ValidationError(BadRequest): def __init__(self, name_or_id, message=None): if message is None: - message="Stack:{0} does not exist".format(name_or_id), + message="Stack with id {0} does not exist".format(name_or_id) template = Template(ERROR_RESPONSE) super(ValidationError, self).__init__() From 02324ad70844a95ec85f526466b9277ec0cc6b8b Mon Sep 17 00:00:00 2001 From: Taro Sato Date: Wed, 11 Jan 2017 17:37:57 -0800 Subject: [PATCH 017/213] Add more availability regions and implement default VPC (#773) Fix filter name for availability zone Fix bug assuming dict keys are ordered Fix tests Fix tests Fix bug --- moto/__init__.py | 2 +- moto/ec2/models.py | 119 +++++++++++------- moto/ec2/responses/subnets.py | 22 +++- moto/ec2/urls.py | 2 +- moto/elb/models.py | 5 +- .../test_cloudformation_stack_integration.py | 7 +- .../test_availability_zones_and_regions.py | 41 ++++-- tests/test_ec2/test_key_pairs.py | 13 +- tests/test_ec2/test_network_acls.py | 44 +++---- tests/test_ec2/test_route_tables.py | 24 ++-- tests/test_ec2/test_security_groups.py | 17 +-- tests/test_ec2/test_subnets.py | 48 ++++--- tests/test_ec2/test_vpcs.py | 28 ++--- 13 files changed, 224 insertions(+), 148 deletions(-) diff --git a/moto/__init__.py b/moto/__init__.py index 5636ffb2b..c57586362 100644 --- a/moto/__init__.py +++ b/moto/__init__.py @@ -1,6 +1,6 @@ from __future__ import unicode_literals import logging -logging.getLogger('boto').setLevel(logging.CRITICAL) +#logging.getLogger('boto').setLevel(logging.CRITICAL) __title__ = 'moto' __version__ = '0.4.30' diff --git a/moto/ec2/models.py b/moto/ec2/models.py index 496fc5b8f..9146b283d 100755 --- a/moto/ec2/models.py +++ b/moto/ec2/models.py @@ -1086,33 +1086,34 @@ class Zone(object): class RegionsAndZonesBackend(object): regions = [ + Region("ap-northeast-1", "ec2.ap-northeast-1.amazonaws.com"), + Region("ap-northeast-2", "ec2.ap-northeast-2.amazonaws.com"), + Region("ap-south-1", "ec2.ap-south-1.amazonaws.com"), + Region("ap-southeast-1", "ec2.ap-southeast-1.amazonaws.com"), + Region("ap-southeast-2", "ec2.ap-southeast-2.amazonaws.com"), + Region("cn-north-1", "ec2.cn-north-1.amazonaws.com.cn"), + Region("eu-central-1", "ec2.eu-central-1.amazonaws.com"), Region("eu-west-1", "ec2.eu-west-1.amazonaws.com"), Region("sa-east-1", "ec2.sa-east-1.amazonaws.com"), Region("us-east-1", "ec2.us-east-1.amazonaws.com"), - Region("ap-northeast-1", "ec2.ap-northeast-1.amazonaws.com"), - Region("us-west-2", "ec2.us-west-2.amazonaws.com"), + Region("us-east-2", "ec2.us-east-2.amazonaws.com"), + Region("us-gov-west-1", "ec2.us-gov-west-1.amazonaws.com"), Region("us-west-1", "ec2.us-west-1.amazonaws.com"), - Region("ap-southeast-1", "ec2.ap-southeast-1.amazonaws.com"), - Region("ap-southeast-2", "ec2.ap-southeast-2.amazonaws.com"), + Region("us-west-2", "ec2.us-west-2.amazonaws.com"), ] - # TODO: cleanup. For now, pretend everything is us-east-1. 'merica. - zones = [ - Zone("us-east-1a", "us-east-1"), - Zone("us-east-1b", "us-east-1"), - Zone("us-east-1c", "us-east-1"), - Zone("us-east-1d", "us-east-1"), - Zone("us-east-1e", "us-east-1"), - ] + zones = dict( + (region, [Zone(region + c, region) for c in 'abc']) + for region in [r.name for r in regions]) def describe_regions(self): return self.regions def describe_availability_zones(self): - return self.zones + return self.zones[self.region_name] def get_zone_by_name(self, name): - for zone in self.zones: + for zone in self.zones[self.region_name]: if zone.name == name: return zone @@ -1794,15 +1795,15 @@ class VPC(TaggedEC2Resource): return self.id def get_filter_value(self, filter_name): - if filter_name == 'vpc-id': + if filter_name in ('vpc-id', 'vpcId'): return self.id - elif filter_name == 'cidr': + elif filter_name in ('cidr', 'cidr-block', 'cidrBlock'): return self.cidr_block - elif filter_name == 'isDefault': + elif filter_name in ('is-default', 'isDefault'): return self.is_default elif filter_name == 'state': return self.state - elif filter_name == 'dhcp-options-id': + elif filter_name in ('dhcp-options-id', 'dhcpOptionsId'): if not self.dhcp_options: return None @@ -2013,12 +2014,7 @@ class Subnet(TaggedEC2Resource): @property def availability_zone(self): - if self._availability_zone is None: - # This could probably be smarter, but there doesn't appear to be a - # way to pull AZs for a region in boto - return self.ec2_backend.region_name + "a" - else: - return self._availability_zone + return self._availability_zone @property def physical_resource_id(self): @@ -2043,11 +2039,11 @@ class Subnet(TaggedEC2Resource): """ if filter_name in ('cidr', 'cidrBlock', 'cidr-block'): return self.cidr_block - elif filter_name == 'vpc-id': + elif filter_name in ('vpc-id', 'vpcId'): return self.vpc_id elif filter_name == 'subnet-id': return self.id - elif filter_name == 'availabilityZone': + elif filter_name in ('availabilityZone', 'availability-zone'): return self.availability_zone elif filter_name in ('defaultForAz', 'default-for-az'): return self.default_for_az @@ -2068,37 +2064,49 @@ class Subnet(TaggedEC2Resource): class SubnetBackend(object): def __init__(self): - self.subnets = {} + # maps availability zone to dict of (subnet_id, subnet) + self.subnets = defaultdict(dict) super(SubnetBackend, self).__init__() def get_subnet(self, subnet_id): - subnet = self.subnets.get(subnet_id, None) - if not subnet: - raise InvalidSubnetIdError(subnet_id) - return subnet + for subnets in self.subnets.values(): + if subnet_id in subnets: + return subnets[subnet_id] + raise InvalidSubnetIdError(subnet_id) - def create_subnet(self, vpc_id, cidr_block, availability_zone=None): + def create_subnet(self, vpc_id, cidr_block, availability_zone): subnet_id = random_subnet_id() vpc = self.get_vpc(vpc_id) # Validate VPC exists - default_for_az = vpc.is_default - map_public_ip_on_launch = vpc.is_default - subnet = Subnet(self, subnet_id, vpc_id, cidr_block, availability_zone, default_for_az, map_public_ip_on_launch) + + # if this is the first subnet for an availability zone, + # consider it the default + default_for_az = str(availability_zone not in self.subnets).lower() + map_public_ip_on_launch = default_for_az + subnet = Subnet(self, subnet_id, vpc_id, cidr_block, availability_zone, + default_for_az, map_public_ip_on_launch) # AWS associates a new subnet with the default Network ACL self.associate_default_network_acl_with_subnet(subnet_id) - self.subnets[subnet_id] = subnet + self.subnets[availability_zone][subnet_id] = subnet return subnet - def get_all_subnets(self, filters=None): - subnets = self.subnets.values() - + def get_all_subnets(self, subnet_ids=None, filters=None): + subnets = [] + if subnet_ids: + for subnet_id in subnet_ids: + for items in self.subnets.values(): + if subnet_id in items: + subnets.append(items[subnet_id]) + else: + for items in self.subnets.values(): + subnets.extend(items.values()) return generic_filter(filters, subnets) def delete_subnet(self, subnet_id): - deleted = self.subnets.pop(subnet_id, None) - if not deleted: - raise InvalidSubnetIdError(subnet_id) - return deleted + for subnets in self.subnets.values(): + if subnet_id in subnets: + return subnets.pop(subnet_id, None) + raise InvalidSubnetIdError(subnet_id) def modify_subnet_attribute(self, subnet_id, map_public_ip): subnet = self.get_subnet(subnet_id) @@ -3377,6 +3385,29 @@ class EC2Backend(BaseBackend, InstanceBackend, TagBackend, AmiBackend, super(EC2Backend, self).__init__() self.region_name = region_name + # Default VPC exists by default, which is the current behavior + # of EC2-VPC. See for detail: + # + # docs.aws.amazon.com/AmazonVPC/latest/UserGuide/default-vpc.html + # + if not self.vpcs: + vpc = self.create_vpc('172.31.0.0/16') + else: + # For now this is included for potential + # backward-compatibility issues + vpc = self.vpcs.values()[0] + + # Create default subnet for each availability zone + ip, _ = vpc.cidr_block.split('/') + ip = ip.split('.') + ip[2] = 0 + + for zone in self.describe_availability_zones(): + az_name = zone.name + cidr_block = '.'.join(str(i) for i in ip) + '/20' + self.create_subnet(vpc.id, cidr_block, availability_zone=az_name) + ip[2] += 16 + def reset(self): region_name = self.region_name self.__dict__ = {} @@ -3434,5 +3465,5 @@ class EC2Backend(BaseBackend, InstanceBackend, TagBackend, AmiBackend, return True ec2_backends = {} -for region in boto.ec2.regions(): +for region in RegionsAndZonesBackend.regions: ec2_backends[region.name] = EC2Backend(region.name) diff --git a/moto/ec2/responses/subnets.py b/moto/ec2/responses/subnets.py index af5ec7605..9486a3ca1 100644 --- a/moto/ec2/responses/subnets.py +++ b/moto/ec2/responses/subnets.py @@ -1,4 +1,5 @@ from __future__ import unicode_literals +import random from moto.core.responses import BaseResponse from moto.ec2.utils import filters_from_querystring @@ -10,11 +11,12 @@ class Subnets(BaseResponse): if 'AvailabilityZone' in self.querystring: availability_zone = self.querystring['AvailabilityZone'][0] else: - availability_zone = None + zone = random.choice(self.ec2_backend.describe_availability_zones()) + availability_zone = zone.name subnet = self.ec2_backend.create_subnet( - vpc_id, - cidr_block, - availability_zone, + vpc_id, + cidr_block, + availability_zone, ) template = self.response_template(CREATE_SUBNET_RESPONSE) return template.render(subnet=subnet) @@ -27,7 +29,17 @@ class Subnets(BaseResponse): def describe_subnets(self): filters = filters_from_querystring(self.querystring) - subnets = self.ec2_backend.get_all_subnets(filters) + + subnet_ids = [] + idx = 1 + key = 'SubnetId.{0}'.format(idx) + while key in self.querystring: + v = self.querystring[key] + subnet_ids.append(v[0]) + idx += 1 + key = 'SubnetId.{0}'.format(idx) + + subnets = self.ec2_backend.get_all_subnets(subnet_ids, filters) template = self.response_template(DESCRIBE_SUBNETS_RESPONSE) return template.render(subnets=subnets) diff --git a/moto/ec2/urls.py b/moto/ec2/urls.py index 768a8cd3b..241ab7133 100644 --- a/moto/ec2/urls.py +++ b/moto/ec2/urls.py @@ -3,7 +3,7 @@ from .responses import EC2Response url_bases = [ - "https?://ec2.(.+).amazonaws.com", + "https?://ec2.(.+).amazonaws.com(|.cn)", ] url_paths = { diff --git a/moto/elb/models.py b/moto/elb/models.py index 79c8551e9..28a791c2d 100644 --- a/moto/elb/models.py +++ b/moto/elb/models.py @@ -19,6 +19,7 @@ from .exceptions import ( ) + class FakeHealthCheck(object): def __init__(self, timeout, healthy_threshold, unhealthy_threshold, interval, target): @@ -337,5 +338,5 @@ class ELBBackend(BaseBackend): elb_backends = {} -for region in boto.ec2.elb.regions(): - elb_backends[region.name] = ELBBackend(region.name) +for region in ec2_backends.keys(): + elb_backends[region] = ELBBackend(region) diff --git a/tests/test_cloudformation/test_cloudformation_stack_integration.py b/tests/test_cloudformation/test_cloudformation_stack_integration.py index 684b7e420..0fb74bef9 100644 --- a/tests/test_cloudformation/test_cloudformation_stack_integration.py +++ b/tests/test_cloudformation/test_cloudformation_stack_integration.py @@ -661,13 +661,14 @@ def test_vpc_single_instance_in_subnet(): ) vpc_conn = boto.vpc.connect_to_region("us-west-1") - vpc = vpc_conn.get_all_vpcs()[0] + + vpc = vpc_conn.get_all_vpcs(filters={'cidrBlock': '10.0.0.0/16'})[0] vpc.cidr_block.should.equal("10.0.0.0/16") # Add this once we implement the endpoint # vpc_conn.get_all_internet_gateways().should.have.length_of(1) - subnet = vpc_conn.get_all_subnets()[0] + subnet = vpc_conn.get_all_subnets(filters={'vpcId': vpc.id})[0] subnet.vpc_id.should.equal(vpc.id) ec2_conn = boto.ec2.connect_to_region("us-west-1") @@ -1355,7 +1356,7 @@ def test_vpc_gateway_attachment_creation_should_attach_itself_to_vpc(): ) vpc_conn = boto.vpc.connect_to_region("us-west-1") - vpc = vpc_conn.get_all_vpcs()[0] + vpc = vpc_conn.get_all_vpcs(filters={'cidrBlock': '10.0.0.0/16'})[0] igws = vpc_conn.get_all_internet_gateways( filters={'attachment.vpc-id': vpc.id} ) diff --git a/tests/test_ec2/test_availability_zones_and_regions.py b/tests/test_ec2/test_availability_zones_and_regions.py index f59bc20fe..2ab8b9994 100644 --- a/tests/test_ec2/test_availability_zones_and_regions.py +++ b/tests/test_ec2/test_availability_zones_and_regions.py @@ -1,5 +1,7 @@ from __future__ import unicode_literals import boto +import boto.ec2 +import boto3 import sure # noqa from moto import mock_ec2 @@ -9,16 +11,39 @@ from moto import mock_ec2 def test_describe_regions(): conn = boto.connect_ec2('the_key', 'the_secret') regions = conn.get_all_regions() - regions.should.have.length_of(8) - regions[0].name.should.equal('eu-west-1') - regions[0].endpoint.should.equal('ec2.eu-west-1.amazonaws.com') + regions.should.have.length_of(14) + for region in regions: + region.endpoint.should.contain(region.name) @mock_ec2 def test_availability_zones(): - # Just testing us-east-1 for now conn = boto.connect_ec2('the_key', 'the_secret') - zones = conn.get_all_zones() - zones.should.have.length_of(5) - zones[0].name.should.equal('us-east-1a') - zones[0].region_name.should.equal('us-east-1') + regions = conn.get_all_regions() + for region in regions: + conn = boto.ec2.connect_to_region(region.name) + if conn is None: + continue + for zone in conn.get_all_zones(): + zone.name.should.contain(region.name) + + +@mock_ec2 +def test_boto3_describe_regions(): + ec2 = boto3.client('ec2', 'us-east-1') + resp = ec2.describe_regions() + resp['Regions'].should.have.length_of(14) + for rec in resp['Regions']: + rec['Endpoint'].should.contain(rec['RegionName']) + + +@mock_ec2 +def test_boto3_availability_zones(): + ec2 = boto3.client('ec2', 'us-east-1') + resp = ec2.describe_regions() + regions = [r['RegionName'] for r in resp['Regions']] + for region in regions: + conn = boto3.client('ec2', region) + resp = conn.describe_availability_zones() + for rec in resp['AvailabilityZones']: + rec['ZoneName'].should.contain(region) diff --git a/tests/test_ec2/test_key_pairs.py b/tests/test_ec2/test_key_pairs.py index e661f5b3a..7d45e79db 100644 --- a/tests/test_ec2/test_key_pairs.py +++ b/tests/test_ec2/test_key_pairs.py @@ -52,15 +52,12 @@ def test_key_pairs_create_two(): kp = conn.create_key_pair('bar') assert kp.material.startswith('---- BEGIN RSA PRIVATE KEY ----') kps = conn.get_all_key_pairs() - assert len(kps) == 2 - # on Python 3, these are reversed for some reason - if six.PY3: - return - assert kps[0].name == 'foo' - assert kps[1].name == 'bar' + kps.should.have.length_of(2) + [i.name for i in kps].should.contain('foo') + [i.name for i in kps].should.contain('bar') kps = conn.get_all_key_pairs('foo') - assert len(kps) == 1 - assert kps[0].name == 'foo' + kps.should.have.length_of(1) + kps[0].name.should.equal('foo') @mock_ec2 diff --git a/tests/test_ec2/test_network_acls.py b/tests/test_ec2/test_network_acls.py index 6afa18c2b..5ab16b51b 100644 --- a/tests/test_ec2/test_network_acls.py +++ b/tests/test_ec2/test_network_acls.py @@ -7,41 +7,37 @@ from moto import mock_ec2 @mock_ec2 def test_default_network_acl_created_with_vpc(): - conn = boto.connect_vpc('the_key', 'the secret') vpc = conn.create_vpc("10.0.0.0/16") - - all_network_acls = conn.get_all_network_acls() - all_network_acls.should.have.length_of(1) - -@mock_ec2 -def test_network_acls(): - - conn = boto.connect_vpc('the_key', 'the secret') - vpc = conn.create_vpc("10.0.0.0/16") - - network_acl = conn.create_network_acl(vpc.id) - all_network_acls = conn.get_all_network_acls() all_network_acls.should.have.length_of(2) -@mock_ec2 -def test_new_subnet_associates_with_default_network_acl(): +@mock_ec2 +def test_network_acls(): conn = boto.connect_vpc('the_key', 'the secret') vpc = conn.create_vpc("10.0.0.0/16") + network_acl = conn.create_network_acl(vpc.id) + all_network_acls = conn.get_all_network_acls() + all_network_acls.should.have.length_of(3) + + +@mock_ec2 +def test_new_subnet_associates_with_default_network_acl(): + conn = boto.connect_vpc('the_key', 'the secret') + vpc = conn.get_all_vpcs()[0] subnet = conn.create_subnet(vpc.id, "10.0.0.0/18") all_network_acls = conn.get_all_network_acls() all_network_acls.should.have.length_of(1) acl = all_network_acls[0] - acl.associations.should.have.length_of(1) - acl.associations[0].subnet_id.should.equal(subnet.id) + acl.associations.should.have.length_of(4) + [a.subnet_id for a in acl.associations].should.contain(subnet.id) + @mock_ec2 def test_network_acl_entries(): - conn = boto.connect_vpc('the_key', 'the secret') vpc = conn.create_vpc("10.0.0.0/16") @@ -55,7 +51,7 @@ def test_network_acl_entries(): ) all_network_acls = conn.get_all_network_acls() - all_network_acls.should.have.length_of(2) + all_network_acls.should.have.length_of(3) test_network_acl = next(na for na in all_network_acls if na.id == network_acl.id) @@ -68,7 +64,6 @@ def test_network_acl_entries(): @mock_ec2 def test_associate_new_network_acl_with_subnet(): - conn = boto.connect_vpc('the_key', 'the secret') vpc = conn.create_vpc("10.0.0.0/16") subnet = conn.create_subnet(vpc.id, "10.0.0.0/18") @@ -77,7 +72,7 @@ def test_associate_new_network_acl_with_subnet(): conn.associate_network_acl(network_acl.id, subnet.id) all_network_acls = conn.get_all_network_acls() - all_network_acls.should.have.length_of(2) + all_network_acls.should.have.length_of(3) test_network_acl = next(na for na in all_network_acls if na.id == network_acl.id) @@ -88,28 +83,26 @@ def test_associate_new_network_acl_with_subnet(): @mock_ec2 def test_delete_network_acl(): - conn = boto.connect_vpc('the_key', 'the secret') vpc = conn.create_vpc("10.0.0.0/16") subnet = conn.create_subnet(vpc.id, "10.0.0.0/18") network_acl = conn.create_network_acl(vpc.id) all_network_acls = conn.get_all_network_acls() - all_network_acls.should.have.length_of(2) + all_network_acls.should.have.length_of(3) any(acl.id == network_acl.id for acl in all_network_acls).should.be.ok conn.delete_network_acl(network_acl.id) updated_network_acls = conn.get_all_network_acls() - updated_network_acls.should.have.length_of(1) + updated_network_acls.should.have.length_of(2) any(acl.id == network_acl.id for acl in updated_network_acls).shouldnt.be.ok @mock_ec2 def test_network_acl_tagging(): - conn = boto.connect_vpc('the_key', 'the secret') vpc = conn.create_vpc("10.0.0.0/16") network_acl = conn.create_network_acl(vpc.id) @@ -125,4 +118,3 @@ def test_network_acl_tagging(): if na.id == network_acl.id) test_network_acl.tags.should.have.length_of(1) test_network_acl.tags["a key"].should.equal("some value") - diff --git a/tests/test_ec2/test_route_tables.py b/tests/test_ec2/test_route_tables.py index 2adec61a6..41e5786e6 100644 --- a/tests/test_ec2/test_route_tables.py +++ b/tests/test_ec2/test_route_tables.py @@ -17,7 +17,7 @@ def test_route_tables_defaults(): conn = boto.connect_vpc('the_key', 'the_secret') vpc = conn.create_vpc("10.0.0.0/16") - all_route_tables = conn.get_all_route_tables() + all_route_tables = conn.get_all_route_tables(filters={'vpc-id': vpc.id}) all_route_tables.should.have.length_of(1) main_route_table = all_route_tables[0] @@ -33,7 +33,7 @@ def test_route_tables_defaults(): vpc.delete() - all_route_tables = conn.get_all_route_tables() + all_route_tables = conn.get_all_route_tables(filters={'vpc-id': vpc.id}) all_route_tables.should.have.length_of(0) @@ -43,7 +43,7 @@ def test_route_tables_additional(): vpc = conn.create_vpc("10.0.0.0/16") route_table = conn.create_route_table(vpc.id) - all_route_tables = conn.get_all_route_tables() + all_route_tables = conn.get_all_route_tables(filters={'vpc-id': vpc.id}) all_route_tables.should.have.length_of(2) all_route_tables[0].vpc_id.should.equal(vpc.id) all_route_tables[1].vpc_id.should.equal(vpc.id) @@ -67,7 +67,7 @@ def test_route_tables_additional(): conn.delete_route_table(route_table.id) - all_route_tables = conn.get_all_route_tables() + all_route_tables = conn.get_all_route_tables(filters={'vpc-id': vpc.id}) all_route_tables.should.have.length_of(1) with assert_raises(EC2ResponseError) as cm: @@ -88,11 +88,11 @@ def test_route_tables_filters_standard(): route_table2 = conn.create_route_table(vpc2.id) all_route_tables = conn.get_all_route_tables() - all_route_tables.should.have.length_of(4) + all_route_tables.should.have.length_of(5) # Filter by main route table main_route_tables = conn.get_all_route_tables(filters={'association.main':'true'}) - main_route_tables.should.have.length_of(2) + main_route_tables.should.have.length_of(3) main_route_table_ids = [route_table.id for route_table in main_route_tables] main_route_table_ids.should_not.contain(route_table1.id) main_route_table_ids.should_not.contain(route_table2.id) @@ -131,7 +131,7 @@ def test_route_tables_filters_associations(): association_id3 = conn.associate_route_table(route_table2.id, subnet3.id) all_route_tables = conn.get_all_route_tables() - all_route_tables.should.have.length_of(3) + all_route_tables.should.have.length_of(4) # Filter by association ID association1_route_tables = conn.get_all_route_tables(filters={'association.route-table-association-id':association_id1}) @@ -160,7 +160,7 @@ def test_route_table_associations(): route_table = conn.create_route_table(vpc.id) all_route_tables = conn.get_all_route_tables() - all_route_tables.should.have.length_of(2) + all_route_tables.should.have.length_of(3) # Refresh route_table = conn.get_all_route_tables(route_table.id)[0] @@ -232,7 +232,7 @@ def test_route_table_replace_route_table_association(): route_table2 = conn.create_route_table(vpc.id) all_route_tables = conn.get_all_route_tables() - all_route_tables.should.have.length_of(3) + all_route_tables.should.have.length_of(4) # Refresh route_table1 = conn.get_all_route_tables(route_table1.id)[0] @@ -330,14 +330,14 @@ def test_route_table_get_by_tag_boto3(): def test_routes_additional(): conn = boto.connect_vpc('the_key', 'the_secret') vpc = conn.create_vpc("10.0.0.0/16") - main_route_table = conn.get_all_route_tables()[0] + main_route_table = conn.get_all_route_tables(filters={'vpc-id': vpc.id})[0] local_route = main_route_table.routes[0] igw = conn.create_internet_gateway() ROUTE_CIDR = "10.0.0.4/24" conn.create_route(main_route_table.id, ROUTE_CIDR, gateway_id=igw.id) - main_route_table = conn.get_all_route_tables()[0] # Refresh route table + main_route_table = conn.get_all_route_tables(filters={'vpc-id': vpc.id})[0] # Refresh route table main_route_table.routes.should.have.length_of(2) new_routes = [route for route in main_route_table.routes if route.destination_cidr_block != vpc.cidr_block] @@ -351,7 +351,7 @@ def test_routes_additional(): conn.delete_route(main_route_table.id, ROUTE_CIDR) - main_route_table = conn.get_all_route_tables()[0] # Refresh route table + main_route_table = conn.get_all_route_tables(filters={'vpc-id': vpc.id})[0] # Refresh route table main_route_table.routes.should.have.length_of(1) new_routes = [route for route in main_route_table.routes if route.destination_cidr_block != vpc.cidr_block] diff --git a/tests/test_ec2/test_security_groups.py b/tests/test_ec2/test_security_groups.py index 3ccd1e4ab..585f97eeb 100644 --- a/tests/test_ec2/test_security_groups.py +++ b/tests/test_ec2/test_security_groups.py @@ -37,7 +37,7 @@ def test_create_and_describe_security_group(): cm.exception.request_id.should_not.be.none all_groups = conn.get_all_security_groups() - all_groups.should.have.length_of(2) # The default group gets created automatically + all_groups.should.have.length_of(3) # The default group gets created automatically group_names = [group.name for group in all_groups] set(group_names).should.equal(set(["default", "test security group"])) @@ -57,7 +57,7 @@ def test_create_security_group_without_description_raises_error(): def test_default_security_group(): conn = boto.ec2.connect_to_region('us-east-1') groups = conn.get_all_security_groups() - groups.should.have.length_of(1) + groups.should.have.length_of(2) groups[0].name.should.equal("default") @@ -98,7 +98,7 @@ def test_create_two_security_groups_with_same_name_in_different_vpc(): all_groups = conn.get_all_security_groups() - all_groups.should.have.length_of(3) + all_groups.should.have.length_of(4) group_names = [group.name for group in all_groups] # The default group is created automatically set(group_names).should.equal(set(["default", "test security group"])) @@ -110,7 +110,7 @@ def test_deleting_security_groups(): security_group1 = conn.create_security_group('test1', 'test1') conn.create_security_group('test2', 'test2') - conn.get_all_security_groups().should.have.length_of(3) # We need to include the default security group + conn.get_all_security_groups().should.have.length_of(4) # Deleting a group that doesn't exist should throw an error with assert_raises(EC2ResponseError) as cm: @@ -127,11 +127,11 @@ def test_deleting_security_groups(): ex.exception.message.should.equal('An error occurred (DryRunOperation) when calling the DeleteSecurityGroup operation: Request would have succeeded, but DryRun flag is set') conn.delete_security_group('test2') - conn.get_all_security_groups().should.have.length_of(2) + conn.get_all_security_groups().should.have.length_of(3) # Delete by group id conn.delete_security_group(group_id=security_group1.id) - conn.get_all_security_groups().should.have.length_of(1) + conn.get_all_security_groups().should.have.length_of(2) @mock_ec2 @@ -267,6 +267,7 @@ def test_authorize_other_group_egress_and_revoke(): sg01.revoke_egress(IpPermissions=[ip_permission]) sg01.ip_permissions_egress.should.have.length_of(1) + @mock_ec2 def test_authorize_group_in_vpc(): conn = boto.connect_ec2('the_key', 'the_secret') @@ -316,7 +317,7 @@ def test_get_all_security_groups(): resp[0].id.should.equal(sg1.id) resp = conn.get_all_security_groups() - resp.should.have.length_of(3) # We need to include the default group here + resp.should.have.length_of(4) @mock_ec2 @@ -376,7 +377,7 @@ def test_authorize_all_protocols_with_no_port_specification(): sg.rules[0].from_port.should.equal(None) sg.rules[0].to_port.should.equal(None) - + ''' Boto3 ''' diff --git a/tests/test_ec2/test_subnets.py b/tests/test_ec2/test_subnets.py index 8ca17232d..8e6a2a4ea 100644 --- a/tests/test_ec2/test_subnets.py +++ b/tests/test_ec2/test_subnets.py @@ -16,17 +16,18 @@ from moto import mock_cloudformation, mock_ec2 @mock_ec2 def test_subnets(): + ec2 = boto.connect_ec2('the_key', 'the_secret') conn = boto.connect_vpc('the_key', 'the_secret') vpc = conn.create_vpc("10.0.0.0/16") subnet = conn.create_subnet(vpc.id, "10.0.0.0/18") all_subnets = conn.get_all_subnets() - all_subnets.should.have.length_of(1) + all_subnets.should.have.length_of(1 + len(ec2.get_all_zones())) conn.delete_subnet(subnet.id) all_subnets = conn.get_all_subnets() - all_subnets.should.have.length_of(0) + all_subnets.should.have.length_of(0 + len(ec2.get_all_zones())) with assert_raises(EC2ResponseError) as cm: conn.delete_subnet(subnet.id) @@ -59,7 +60,7 @@ def test_subnet_tagging(): tag.value.should.equal("some value") # Refresh the subnet - subnet = conn.get_all_subnets()[0] + subnet = conn.get_all_subnets(subnet_ids=[subnet.id])[0] subnet.tags.should.have.length_of(1) subnet.tags["a key"].should.equal("some value") @@ -76,22 +77,32 @@ def test_subnet_should_have_proper_availability_zone_set(): def test_default_subnet(): ec2 = boto3.resource('ec2', region_name='us-west-1') - # Create the default VPC - default_vpc = ec2.create_vpc(CidrBlock='172.31.0.0/16') + default_vpc = list(ec2.vpcs.all())[0] + default_vpc.cidr_block.should.equal('172.31.0.0/16') default_vpc.reload() default_vpc.is_default.should.be.ok subnet = ec2.create_subnet(VpcId=default_vpc.id, CidrBlock='172.31.0.0/20', AvailabilityZone='us-west-1a') subnet.reload() - subnet.map_public_ip_on_launch.should.be.ok + subnet.map_public_ip_on_launch.shouldnt.be.ok @mock_ec2 def test_non_default_subnet(): - ec2 = boto3.resource('ec2', region_name='us-west-1') + vpc_cli = boto.vpc.connect_to_region('us-west-1') - # Create the default VPC - ec2.create_vpc(CidrBlock='172.31.0.0/16') + # Create the non default VPC + vpc = vpc_cli.create_vpc("10.0.0.0/16") + vpc.is_default.shouldnt.be.ok + + subnet = vpc_cli.create_subnet(vpc.id, "10.0.0.0/24") + subnet = vpc_cli.get_all_subnets(subnet_ids=[subnet.id])[0] + subnet.mapPublicIpOnLaunch.should.equal('false') + + +@mock_ec2 +def test_boto3_non_default_subnet(): + ec2 = boto3.resource('ec2', region_name='us-west-1') # Create the non default VPC vpc = ec2.create_vpc(CidrBlock='10.0.0.0/16') @@ -108,10 +119,9 @@ def test_modify_subnet_attribute(): ec2 = boto3.resource('ec2', region_name='us-west-1') client = boto3.client('ec2', region_name='us-west-1') - # Create the default VPC - ec2.create_vpc(CidrBlock='172.31.0.0/16') + # Get the default VPC + vpc = list(ec2.vpcs.all())[0] - vpc = ec2.create_vpc(CidrBlock='10.0.0.0/16') subnet = ec2.create_subnet(VpcId=vpc.id, CidrBlock='10.0.0.0/24', AvailabilityZone='us-west-1a') # 'map_public_ip_on_launch' is set when calling 'DescribeSubnets' action @@ -120,11 +130,15 @@ def test_modify_subnet_attribute(): # For non default subnet, attribute value should be 'False' subnet.map_public_ip_on_launch.shouldnt.be.ok + client.modify_subnet_attribute(SubnetId=subnet.id, MapPublicIpOnLaunch={'Value': False}) + subnet.reload() + subnet.map_public_ip_on_launch.shouldnt.be.ok + client.modify_subnet_attribute(SubnetId=subnet.id, MapPublicIpOnLaunch={'Value': True}) subnet.reload() - subnet.map_public_ip_on_launch.should.be.ok + @mock_ec2 def test_modify_subnet_attribute_validation(): ec2 = boto3.resource('ec2', region_name='us-west-1') @@ -135,8 +149,10 @@ def test_modify_subnet_attribute_validation(): with assert_raises(ParamValidationError): client.modify_subnet_attribute(SubnetId=subnet.id, MapPublicIpOnLaunch={'Value': 'invalid'}) + @mock_ec2 def test_get_subnets_filtering(): + ec2 = boto.ec2.connect_to_region('us-west-1') conn = boto.vpc.connect_to_region('us-west-1') vpcA = conn.create_vpc("10.0.0.0/16") subnetA = conn.create_subnet(vpcA.id, "10.0.0.0/24", availability_zone='us-west-1a') @@ -145,7 +161,7 @@ def test_get_subnets_filtering(): subnetB2 = conn.create_subnet(vpcB.id, "10.0.1.0/24", availability_zone='us-west-1b') all_subnets = conn.get_all_subnets() - all_subnets.should.have.length_of(3) + all_subnets.should.have.length_of(3 + len(ec2.get_all_zones())) # Filter by VPC ID subnets_by_vpc = conn.get_all_subnets(filters={'vpc-id': vpcB.id}) @@ -181,9 +197,9 @@ def test_get_subnets_filtering(): set([subnet.id for subnet in subnets_by_az]).should.equal(set([subnetB1.id])) # Filter by defaultForAz + subnets_by_az = conn.get_all_subnets(filters={'defaultForAz': "true"}) - subnets_by_az.should.have.length_of(1) - set([subnet.id for subnet in subnets_by_az]).should.equal(set([subnetA.id])) + subnets_by_az.should.have.length_of(len(conn.get_all_zones())) # Unsupported filter conn.get_all_subnets.when.called_with(filters={'not-implemented-filter': 'foobar'}).should.throw(NotImplementedError) diff --git a/tests/test_ec2/test_vpcs.py b/tests/test_ec2/test_vpcs.py index 8349b8f6f..def2700e3 100644 --- a/tests/test_ec2/test_vpcs.py +++ b/tests/test_ec2/test_vpcs.py @@ -21,12 +21,12 @@ def test_vpcs(): vpc.cidr_block.should.equal('10.0.0.0/16') all_vpcs = conn.get_all_vpcs() - all_vpcs.should.have.length_of(1) + all_vpcs.should.have.length_of(2) vpc.delete() all_vpcs = conn.get_all_vpcs() - all_vpcs.should.have.length_of(0) + all_vpcs.should.have.length_of(1) with assert_raises(EC2ResponseError) as cm: conn.delete_vpc("vpc-1234abcd") @@ -40,14 +40,14 @@ def test_vpc_defaults(): conn = boto.connect_vpc('the_key', 'the_secret') vpc = conn.create_vpc("10.0.0.0/16") - conn.get_all_vpcs().should.have.length_of(1) - conn.get_all_route_tables().should.have.length_of(1) + conn.get_all_vpcs().should.have.length_of(2) + conn.get_all_route_tables().should.have.length_of(2) conn.get_all_security_groups(filters={'vpc-id': [vpc.id]}).should.have.length_of(1) vpc.delete() - conn.get_all_vpcs().should.have.length_of(0) - conn.get_all_route_tables().should.have.length_of(0) + conn.get_all_vpcs().should.have.length_of(1) + conn.get_all_route_tables().should.have.length_of(1) conn.get_all_security_groups(filters={'vpc-id': [vpc.id]}).should.have.length_of(0) @mock_ec2 @@ -56,7 +56,7 @@ def test_vpc_isdefault_filter(): vpc = conn.create_vpc("10.0.0.0/16") conn.get_all_vpcs(filters={'isDefault': 'true'}).should.have.length_of(1) vpc.delete() - conn.get_all_vpcs(filters={'isDefault': 'true'}).should.have.length_of(0) + conn.get_all_vpcs(filters={'isDefault': 'true'}).should.have.length_of(1) @mock_ec2 @@ -65,10 +65,10 @@ def test_multiple_vpcs_default_filter(): conn.create_vpc("10.8.0.0/16") conn.create_vpc("10.0.0.0/16") conn.create_vpc("192.168.0.0/16") - conn.get_all_vpcs().should.have.length_of(3) + conn.get_all_vpcs().should.have.length_of(4) vpc = conn.get_all_vpcs(filters={'isDefault': 'true'}) vpc.should.have.length_of(1) - vpc[0].cidr_block.should.equal('10.8.0.0/16') + vpc[0].cidr_block.should.equal('172.31.0.0/16') @mock_ec2 @@ -76,9 +76,9 @@ def test_vpc_state_available_filter(): conn = boto.connect_vpc('the_key', 'the_secret') vpc = conn.create_vpc("10.0.0.0/16") conn.create_vpc("10.1.0.0/16") - conn.get_all_vpcs(filters={'state': 'available'}).should.have.length_of(2) + conn.get_all_vpcs(filters={'state': 'available'}).should.have.length_of(3) vpc.delete() - conn.get_all_vpcs(filters={'state': 'available'}).should.have.length_of(1) + conn.get_all_vpcs(filters={'state': 'available'}).should.have.length_of(2) @mock_ec2 def test_vpc_tagging(): @@ -86,13 +86,12 @@ def test_vpc_tagging(): vpc = conn.create_vpc("10.0.0.0/16") vpc.add_tag("a key", "some value") - tag = conn.get_all_tags()[0] tag.name.should.equal("a key") tag.value.should.equal("some value") # Refresh the vpc - vpc = conn.get_all_vpcs()[0] + vpc = conn.get_all_vpcs(vpc_ids=[vpc.id])[0] vpc.tags.should.have.length_of(1) vpc.tags["a key"].should.equal("some value") @@ -245,7 +244,8 @@ def test_default_vpc(): ec2 = boto3.resource('ec2', region_name='us-west-1') # Create the default VPC - default_vpc = ec2.create_vpc(CidrBlock='172.31.0.0/16') + default_vpc = list(ec2.vpcs.all())[0] + default_vpc.cidr_block.should.equal('172.31.0.0/16') default_vpc.reload() default_vpc.is_default.should.be.ok From f212d701043073d95eff021e988d9fa829972759 Mon Sep 17 00:00:00 2001 From: Brian Kruger Date: Wed, 11 Jan 2017 17:38:55 -0800 Subject: [PATCH 018/213] Use a sane aws sender_id from SQS. (#791) --- moto/sqs/models.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/moto/sqs/models.py b/moto/sqs/models.py index 90a3f30f9..13b8c34b6 100644 --- a/moto/sqs/models.py +++ b/moto/sqs/models.py @@ -16,7 +16,7 @@ from .exceptions import ( ) DEFAULT_ACCOUNT_ID = 123456789012 - +DEFAULT_SENDER_ID = "AIDAIT2UOQQY3AUEKVGXU" class Message(object): def __init__(self, message_id, body): @@ -24,7 +24,7 @@ class Message(object): self._body = body self.message_attributes = {} self.receipt_handle = None - self.sender_id = DEFAULT_ACCOUNT_ID + self.sender_id = DEFAULT_SENDER_ID self.sent_timestamp = None self.approximate_first_receive_timestamp = None self.approximate_receive_count = 0 From 2a02259a3c3f8a6b634af3f429ef5c5a7299c05f Mon Sep 17 00:00:00 2001 From: Jia Chen Date: Wed, 11 Jan 2017 20:40:57 -0500 Subject: [PATCH 019/213] Adding PolicyType to scaling policy and implementing filtering in describe_policies (#797) * Adding PolicyType to FakeScalingPolicy * Implement filtering for AutoScalingBackend.describe_policies(...) * Unit test for describe_policies fuction in autoscaling --- moto/autoscaling/models.py | 14 ++++-- moto/autoscaling/responses.py | 7 ++- tests/test_autoscaling/test_autoscaling.py | 54 ++++++++++++++++++++++ 3 files changed, 69 insertions(+), 6 deletions(-) diff --git a/moto/autoscaling/models.py b/moto/autoscaling/models.py index 2dbdc077d..53a0f62df 100644 --- a/moto/autoscaling/models.py +++ b/moto/autoscaling/models.py @@ -16,9 +16,10 @@ class InstanceState(object): class FakeScalingPolicy(object): - def __init__(self, name, adjustment_type, as_name, scaling_adjustment, + def __init__(self, name, policy_type, adjustment_type, as_name, scaling_adjustment, cooldown, autoscaling_backend): self.name = name + self.policy_type = policy_type self.adjustment_type = adjustment_type self.as_name = as_name self.scaling_adjustment = scaling_adjustment @@ -407,16 +408,19 @@ class AutoScalingBackend(BaseBackend): desired_capacity = int(desired_capacity) self.set_desired_capacity(group_name, desired_capacity) - def create_autoscaling_policy(self, name, adjustment_type, as_name, + def create_autoscaling_policy(self, name, policy_type, adjustment_type, as_name, scaling_adjustment, cooldown): - policy = FakeScalingPolicy(name, adjustment_type, as_name, + policy = FakeScalingPolicy(name, policy_type, adjustment_type, as_name, scaling_adjustment, cooldown, self) self.policies[name] = policy return policy - def describe_policies(self): - return list(self.policies.values()) + def describe_policies(self, autoscaling_group_name=None, policy_names=None, policy_types=None): + return [policy for policy in self.policies.values() + if (not autoscaling_group_name or policy.as_name == autoscaling_group_name) and + (not policy_names or policy.name in policy_names) and + (not policy_types or policy.policy_type in policy_types)] def delete_policy(self, group_name): self.policies.pop(group_name, None) diff --git a/moto/autoscaling/responses.py b/moto/autoscaling/responses.py index 1840e029c..976199131 100644 --- a/moto/autoscaling/responses.py +++ b/moto/autoscaling/responses.py @@ -120,6 +120,7 @@ class AutoScalingResponse(BaseResponse): def put_scaling_policy(self): policy = self.autoscaling_backend.create_autoscaling_policy( name=self._get_param('PolicyName'), + policy_type=self._get_param('PolicyType'), adjustment_type=self._get_param('AdjustmentType'), as_name=self._get_param('AutoScalingGroupName'), scaling_adjustment=self._get_int_param('ScalingAdjustment'), @@ -129,7 +130,10 @@ class AutoScalingResponse(BaseResponse): return template.render(policy=policy) def describe_policies(self): - policies = self.autoscaling_backend.describe_policies() + policies = self.autoscaling_backend.describe_policies( + autoscaling_group_name=self._get_param('AutoScalingGroupName'), + policy_names=self._get_multi_param('PolicyNames.member'), + policy_types=self._get_multi_param('PolicyTypes.member')) template = self.response_template(DESCRIBE_SCALING_POLICIES_TEMPLATE) return template.render(policies=policies) @@ -373,6 +377,7 @@ DESCRIBE_SCALING_POLICIES_TEMPLATE = """ Date: Wed, 11 Jan 2017 20:54:37 -0500 Subject: [PATCH 020/213] Fix bug with listing IAM users. --- moto/iam/models.py | 8 ++++---- moto/iam/responses.py | 6 +++--- tests/test_iam/test_iam.py | 8 ++++---- 3 files changed, 11 insertions(+), 11 deletions(-) diff --git a/moto/iam/models.py b/moto/iam/models.py index 4502d6da0..60b9b743d 100644 --- a/moto/iam/models.py +++ b/moto/iam/models.py @@ -176,15 +176,15 @@ class Group(object): class User(object): - def __init__(self, name, path='/'): + def __init__(self, name, path=None): self.name = name self.id = random_resource_id() - self.path = path + self.path = path if path else "/" self.created = datetime.strftime( datetime.utcnow(), "%Y-%m-%d-%H-%M-%S" ) - self.arn = 'arn:aws:iam::123456789012:user/{0}'.format(name) + self.arn = 'arn:aws:iam::123456789012:user{0}{1}'.format(self.path, name) self.policies = {} self.access_keys = [] self.password = None @@ -591,7 +591,7 @@ class IAMBackend(BaseBackend): def list_users(self, path_prefix, marker, max_items): users = None try: - users = self.users + users = self.users.values() except KeyError: raise IAMNotFoundException("Users {0}, {1}, {2} not found".format(path_prefix, marker, max_items)) diff --git a/moto/iam/responses.py b/moto/iam/responses.py index d707c35ed..223691e1e 100644 --- a/moto/iam/responses.py +++ b/moto/iam/responses.py @@ -204,9 +204,9 @@ class IamResponse(BaseResponse): path_prefix = self._get_param('PathPrefix') marker = self._get_param('Marker') max_items = self._get_param('MaxItems') - user = iam_backend.list_users(path_prefix, marker, max_items) + users = iam_backend.list_users(path_prefix, marker, max_items) template = self.response_template(LIST_USERS_TEMPLATE) - return template.render(action='List', user=user) + return template.render(action='List', users=users) def create_login_profile(self): user_name = self._get_param('UserName') @@ -724,7 +724,7 @@ LIST_USERS_TEMPLATE = """<{{ action }}UsersResponse> {{ user.id }} {{ user.path }} {{ user.name }} - arn:aws:iam::123456789012:user/{{ user.path }}/{{ user.name }} + {{ user.arn }} {% endfor %} diff --git a/tests/test_iam/test_iam.py b/tests/test_iam/test_iam.py index c276c8a80..bedea4e01 100644 --- a/tests/test_iam/test_iam.py +++ b/tests/test_iam/test_iam.py @@ -196,10 +196,10 @@ def test_list_users(): conn = boto3.client('iam') conn.create_user(UserName='my-user') response = conn.list_users(PathPrefix=path_prefix, MaxItems=max_items) - assert_equals( - response['Users'], - [] - ) + user = response['Users'][0] + user['UserName'].should.equal('my-user') + user['Path'].should.equal('/') + user['Arn'].should.equal('arn:aws:iam::123456789012:user/my-user') @mock_iam() From 74bbd9c8e537286bb00d562c3d28870cef89cd6d Mon Sep 17 00:00:00 2001 From: Michael Nussbaum Date: Wed, 11 Jan 2017 18:02:51 -0800 Subject: [PATCH 021/213] Various RDS, RDS/Cloudformation, RDS/KMS improvements. (#789) We need to mock out deploying RDS instances with full disk encryption and detailed tagging. We also need to be able do deploy these instances with Cloudformation, and then access them with both boto and boto3. * Join RDS and RDS2 backends - this makes RDS resources created via either of the two boto RDS APIs visible to both, more closely mirroring how AWS works * Fix RDS responses that were returning JSON but should be returning XML * Add mocking of RDS Cloudformation calls * Add mocking of RDS full disk encryption with KMS * Add mocking of RDS DBParameterGroups * Fix mocking of RDS DBSecurityGroupIngress rules * Make mocking of RDS OptionGroupOptions more accurate * Fix mocking of RDS cross-region DB replication * Add RDS tag support to: * DBs * DBSubnetGroups * DBSecurityGroups Signed-off-by: Andrew Garrett --- moto/cloudformation/parsing.py | 5 +- moto/rds/models.py | 153 +-- moto/rds/responses.py | 24 +- moto/rds2/exceptions.py | 25 +- moto/rds2/models.py | 478 ++++++-- moto/rds2/responses.py | 561 +++++---- .../rds_mysql_with_db_parameter_group.py | 201 ++++ .../fixtures/rds_mysql_with_read_replica.py | 7 +- .../test_cloudformation_stack_integration.py | 40 + tests/test_rds/test_rds.py | 26 + tests/test_rds2/test_rds2.py | 1069 ++++++++++++----- 11 files changed, 1868 insertions(+), 721 deletions(-) create mode 100644 tests/test_cloudformation/fixtures/rds_mysql_with_db_parameter_group.py diff --git a/moto/cloudformation/parsing.py b/moto/cloudformation/parsing.py index 3e348ac37..521658cee 100644 --- a/moto/cloudformation/parsing.py +++ b/moto/cloudformation/parsing.py @@ -14,6 +14,7 @@ from moto.elb import models as elb_models from moto.iam import models as iam_models from moto.kms import models as kms_models from moto.rds import models as rds_models +from moto.rds2 import models as rds2_models from moto.redshift import models as redshift_models from moto.route53 import models as route53_models from moto.s3 import models as s3_models @@ -56,6 +57,7 @@ MODEL_MAP = { "AWS::RDS::DBInstance": rds_models.Database, "AWS::RDS::DBSecurityGroup": rds_models.SecurityGroup, "AWS::RDS::DBSubnetGroup": rds_models.SubnetGroup, + "AWS::RDS::DBParameterGroup": rds2_models.DBParameterGroup, "AWS::Redshift::Cluster": redshift_models.Cluster, "AWS::Redshift::ClusterParameterGroup": redshift_models.ParameterGroup, "AWS::Redshift::ClusterSubnetGroup": redshift_models.SubnetGroup, @@ -311,7 +313,8 @@ class ResourceMap(collections.Mapping): if not resource_json: raise KeyError(resource_logical_id) new_resource = parse_and_create_resource(resource_logical_id, resource_json, self, self._region_name) - self._parsed_resources[resource_logical_id] = new_resource + if new_resource is not None: + self._parsed_resources[resource_logical_id] = new_resource return new_resource def __iter__(self): diff --git a/moto/rds/models.py b/moto/rds/models.py index 3ce005e5e..b63a30737 100644 --- a/moto/rds/models.py +++ b/moto/rds/models.py @@ -10,6 +10,7 @@ from moto.cloudformation.exceptions import UnformattedGetAttTemplateException from moto.core import BaseBackend from moto.core.utils import get_random_hex from moto.ec2.models import ec2_backends +from moto.rds2.models import rds2_backends from .exceptions import DBInstanceNotFoundError, DBSecurityGroupNotFoundError, DBSubnetGroupNotFoundError @@ -26,6 +27,11 @@ class Database(object): if self.engine_version is None: self.engine_version = "5.6.21" self.iops = kwargs.get("iops") + self.storage_encrypted = kwargs.get("storage_encrypted", False) + if self.storage_encrypted: + self.kms_key_id = kwargs.get("kms_key_id", "default_kms_key_id") + else: + self.kms_key_id = kwargs.get("kms_key_id") self.storage_type = kwargs.get("storage_type") self.master_username = kwargs.get('master_username') self.master_password = kwargs.get('master_password') @@ -119,6 +125,7 @@ class Database(object): "engine": properties.get("Engine"), "engine_version": properties.get("EngineVersion"), "iops": properties.get("Iops"), + "kms_key_id": properties.get("KmsKeyId"), "master_password": properties.get('MasterUserPassword'), "master_username": properties.get('MasterUsername'), "multi_az": properties.get("MultiAZ"), @@ -126,7 +133,9 @@ class Database(object): "publicly_accessible": properties.get("PubliclyAccessible"), "region": region_name, "security_groups": security_groups, + "storage_encrypted": properties.get("StorageEncrypted"), "storage_type": properties.get("StorageType"), + "tags": properties.get("Tags"), } rds_backend = rds_backends[region_name] @@ -204,6 +213,10 @@ class Database(object): {{ database.publicly_accessible }} {{ database.auto_minor_version_upgrade }} {{ database.allocated_storage }} + {{ database.storage_encrypted }} + {% if database.kms_key_id %} + {{ database.kms_key_id }} + {% endif %} {% if database.iops %} {{ database.iops }} io1 @@ -220,6 +233,10 @@ class Database(object): """) return template.render(database=self) + def delete(self, region_name): + backend = rds_backends[region_name] + backend.delete_database(self.db_instance_identifier) + class SecurityGroup(object): def __init__(self, group_name, description): @@ -267,25 +284,33 @@ class SecurityGroup(object): properties = cloudformation_json['Properties'] group_name = resource_name.lower() + get_random_hex(12) description = properties['GroupDescription'] - security_group_ingress = properties['DBSecurityGroupIngress'] + security_group_ingress_rules = properties.get('DBSecurityGroupIngress', []) + tags = properties.get('Tags') ec2_backend = ec2_backends[region_name] rds_backend = rds_backends[region_name] security_group = rds_backend.create_security_group( group_name, description, + tags, ) - for ingress_type, ingress_value in security_group_ingress.items(): - if ingress_type == "CIDRIP": - security_group.authorize_cidr(ingress_value) - elif ingress_type == "EC2SecurityGroupName": - subnet = ec2_backend.get_security_group_from_name(ingress_value) - security_group.authorize_security_group(subnet) - elif ingress_type == "EC2SecurityGroupId": - subnet = ec2_backend.get_security_group_from_id(ingress_value) - security_group.authorize_security_group(subnet) + + for security_group_ingress in security_group_ingress_rules: + for ingress_type, ingress_value in security_group_ingress.items(): + if ingress_type == "CIDRIP": + security_group.authorize_cidr(ingress_value) + elif ingress_type == "EC2SecurityGroupName": + subnet = ec2_backend.get_security_group_from_name(ingress_value) + security_group.authorize_security_group(subnet) + elif ingress_type == "EC2SecurityGroupId": + subnet = ec2_backend.get_security_group_from_id(ingress_value) + security_group.authorize_security_group(subnet) return security_group + def delete(self, region_name): + backend = rds_backends[region_name] + backend.delete_security_group(self.group_name) + class SubnetGroup(object): def __init__(self, subnet_name, description, subnets): @@ -324,6 +349,7 @@ class SubnetGroup(object): subnet_name = resource_name.lower() + get_random_hex(12) description = properties['DBSubnetGroupDescription'] subnet_ids = properties['SubnetIds'] + tags = properties.get('Tags') ec2_backend = ec2_backends[region_name] subnets = [ec2_backend.get_subnet(subnet_id) for subnet_id in subnet_ids] @@ -332,102 +358,31 @@ class SubnetGroup(object): subnet_name, description, subnets, + tags, ) return subnet_group + def delete(self, region_name): + backend = rds_backends[region_name] + backend.delete_subnet_group(self.subnet_name) + class RDSBackend(BaseBackend): - def __init__(self): - self.databases = {} - self.security_groups = {} - self.subnet_groups = {} + def __init__(self, region): + self.region = region - def create_database(self, db_kwargs): - database_id = db_kwargs['db_instance_identifier'] - database = Database(**db_kwargs) - self.databases[database_id] = database - return database + def __getattr__(self, attr): + return self.rds2_backend().__getattribute__(attr) - def create_database_replica(self, db_kwargs): - database_id = db_kwargs['db_instance_identifier'] - source_database_id = db_kwargs['source_db_identifier'] - primary = self.describe_databases(source_database_id)[0] - replica = copy.deepcopy(primary) - replica.update(db_kwargs) - replica.set_as_replica() - self.databases[database_id] = replica - primary.add_replica(replica) - return replica + def reset(self): + # preserve region + region = self.region + self.rds2_backend().reset() + self.__dict__ = {} + self.__init__(region) - def describe_databases(self, db_instance_identifier=None): - if db_instance_identifier: - if db_instance_identifier in self.databases: - return [self.databases[db_instance_identifier]] - else: - raise DBInstanceNotFoundError(db_instance_identifier) - return self.databases.values() + def rds2_backend(self): + return rds2_backends[self.region] - def modify_database(self, db_instance_identifier, db_kwargs): - database = self.describe_databases(db_instance_identifier)[0] - database.update(db_kwargs) - return database - - def delete_database(self, db_instance_identifier): - if db_instance_identifier in self.databases: - database = self.databases.pop(db_instance_identifier) - if database.is_replica: - primary = self.describe_databases(database.source_db_identifier)[0] - primary.remove_replica(database) - database.status = 'deleting' - return database - else: - raise DBInstanceNotFoundError(db_instance_identifier) - - def create_security_group(self, group_name, description): - security_group = SecurityGroup(group_name, description) - self.security_groups[group_name] = security_group - return security_group - - def describe_security_groups(self, security_group_name): - if security_group_name: - if security_group_name in self.security_groups: - return [self.security_groups[security_group_name]] - else: - raise DBSecurityGroupNotFoundError(security_group_name) - return self.security_groups.values() - - def delete_security_group(self, security_group_name): - if security_group_name in self.security_groups: - return self.security_groups.pop(security_group_name) - else: - raise DBSecurityGroupNotFoundError(security_group_name) - - def authorize_security_group(self, security_group_name, cidr_ip): - security_group = self.describe_security_groups(security_group_name)[0] - security_group.authorize_cidr(cidr_ip) - return security_group - - def create_subnet_group(self, subnet_name, description, subnets): - subnet_group = SubnetGroup(subnet_name, description, subnets) - self.subnet_groups[subnet_name] = subnet_group - return subnet_group - - def describe_subnet_groups(self, subnet_group_name): - if subnet_group_name: - if subnet_group_name in self.subnet_groups: - return [self.subnet_groups[subnet_group_name]] - else: - raise DBSubnetGroupNotFoundError(subnet_group_name) - return self.subnet_groups.values() - - def delete_subnet_group(self, subnet_name): - if subnet_name in self.subnet_groups: - return self.subnet_groups.pop(subnet_name) - else: - raise DBSubnetGroupNotFoundError(subnet_name) - - -rds_backends = {} -for region in boto.rds.regions(): - rds_backends[region.name] = RDSBackend() +rds_backends = dict((region.name, RDSBackend(region.name)) for region in boto.rds.regions()) diff --git a/moto/rds/responses.py b/moto/rds/responses.py index 98015e7bb..5207264f6 100644 --- a/moto/rds/responses.py +++ b/moto/rds/responses.py @@ -12,7 +12,7 @@ class RDSResponse(BaseResponse): return rds_backends[self.region] def _get_db_kwargs(self): - return { + args = { "auto_minor_version_upgrade": self._get_param('AutoMinorVersionUpgrade'), "allocated_storage": self._get_int_param('AllocatedStorage'), "availability_zone": self._get_param("AvailabilityZone"), @@ -25,6 +25,7 @@ class RDSResponse(BaseResponse): "engine": self._get_param("Engine"), "engine_version": self._get_param("EngineVersion"), "iops": self._get_int_param("Iops"), + "kms_key_id": self._get_param("KmsKeyId"), "master_password": self._get_param('MasterUserPassword'), "master_username": self._get_param('MasterUsername'), "multi_az": self._get_bool_param("MultiAZ"), @@ -35,9 +36,13 @@ class RDSResponse(BaseResponse): "publicly_accessible": self._get_param("PubliclyAccessible"), "region": self.region, "security_groups": self._get_multi_param('DBSecurityGroups.member'), + "storage_encrypted": self._get_param("StorageEncrypted"), "storage_type": self._get_param("StorageType"), # VpcSecurityGroupIds.member.N + "tags": list(), } + args['tags'] = self.unpack_complex_list_params('Tags.Tag', ('Key', 'Value')) + return args def _get_db_replica_kwargs(self): return { @@ -54,6 +59,17 @@ class RDSResponse(BaseResponse): "storage_type": self._get_param("StorageType"), } + def unpack_complex_list_params(self, label, names): + unpacked_list = list() + count = 1 + while self._get_param('{0}.{1}.{2}'.format(label, count, names[0])): + param = dict() + for i in range(len(names)): + param[names[i]] = self._get_param('{0}.{1}.{2}'.format(label, count, names[i])) + unpacked_list.append(param) + count += 1 + return unpacked_list + def create_dbinstance(self): db_kwargs = self._get_db_kwargs() @@ -90,7 +106,8 @@ class RDSResponse(BaseResponse): def create_dbsecurity_group(self): group_name = self._get_param('DBSecurityGroupName') description = self._get_param('DBSecurityGroupDescription') - security_group = self.backend.create_security_group(group_name, description) + tags = self.unpack_complex_list_params('Tags.Tag', ('Key', 'Value')) + security_group = self.backend.create_security_group(group_name, description, tags) template = self.response_template(CREATE_SECURITY_GROUP_TEMPLATE) return template.render(security_group=security_group) @@ -118,7 +135,8 @@ class RDSResponse(BaseResponse): description = self._get_param('DBSubnetGroupDescription') subnet_ids = self._get_multi_param('SubnetIds.member') subnets = [ec2_backends[self.region].get_subnet(subnet_id) for subnet_id in subnet_ids] - subnet_group = self.backend.create_subnet_group(subnet_name, description, subnets) + tags = self.unpack_complex_list_params('Tags.Tag', ('Key', 'Value')) + subnet_group = self.backend.create_subnet_group(subnet_name, description, subnets, tags) template = self.response_template(CREATE_SUBNET_GROUP_TEMPLATE) return template.render(subnet_group=subnet_group) diff --git a/moto/rds2/exceptions.py b/moto/rds2/exceptions.py index a5c935659..6fcae4b56 100644 --- a/moto/rds2/exceptions.py +++ b/moto/rds2/exceptions.py @@ -1,20 +1,22 @@ from __future__ import unicode_literals -import json +from jinja2 import Template from werkzeug.exceptions import BadRequest class RDSClientError(BadRequest): def __init__(self, code, message): super(RDSClientError, self).__init__() - self.description = json.dumps({ - "Error": { - "Code": code, - "Message": message, - 'Type': 'Sender', - }, - 'RequestId': '6876f774-7273-11e4-85dc-39e55ca848d1', - }) + template = Template(""" + + + {{ code }} + {{ message }} + Sender + + 6876f774-7273-11e4-85dc-39e55ca848d1 + """) + self.description = template.render(code=code, message=message) class DBInstanceNotFoundError(RDSClientError): @@ -37,3 +39,8 @@ class DBSubnetGroupNotFoundError(RDSClientError): 'DBSubnetGroupNotFound', "Subnet Group {0} not found.".format(subnet_group_name)) +class DBParameterGroupNotFoundError(RDSClientError): + def __init__(self, db_parameter_group_name): + super(DBParameterGroupNotFoundError, self).__init__( + 'DBParameterGroupNotFound', + 'DB Parameter Group {0} not found.'.format(db_parameter_group_name)) diff --git a/moto/rds2/models.py b/moto/rds2/models.py index 37ecbf873..9bb1f8200 100644 --- a/moto/rds2/models.py +++ b/moto/rds2/models.py @@ -2,6 +2,7 @@ from __future__ import unicode_literals import copy +from collections import defaultdict import boto.rds2 import json from jinja2 import Template @@ -10,7 +11,12 @@ from moto.cloudformation.exceptions import UnformattedGetAttTemplateException from moto.core import BaseBackend from moto.core.utils import get_random_hex from moto.ec2.models import ec2_backends -from .exceptions import RDSClientError, DBInstanceNotFoundError, DBSecurityGroupNotFoundError, DBSubnetGroupNotFoundError +from .exceptions import (RDSClientError, + DBInstanceNotFoundError, + DBSecurityGroupNotFoundError, + DBSubnetGroupNotFoundError, + DBParameterGroupNotFoundError) + class Database(object): @@ -35,6 +41,11 @@ class Database(object): if not self.engine_version and self.engine in self.default_engine_versions: self.engine_version = self.default_engine_versions[self.engine] self.iops = kwargs.get("iops") + self.storage_encrypted = kwargs.get("storage_encrypted", False) + if self.storage_encrypted: + self.kms_key_id = kwargs.get("kms_key_id", "default_kms_key_id") + else: + self.kms_key_id = kwargs.get("kms_key_id") self.storage_type = kwargs.get("storage_type") self.master_username = kwargs.get('master_username') self.master_user_password = kwargs.get('master_user_password') @@ -64,13 +75,9 @@ class Database(object): self.security_groups = kwargs.get('security_groups', []) self.vpc_security_group_ids = kwargs.get('vpc_security_group_ids', []) self.preferred_maintenance_window = kwargs.get('preferred_maintenance_window', 'wed:06:38-wed:07:08') - self.db_parameter_group_name = kwargs.get('db_parameter_group_name', None) - self.default_parameter_groups = {"MySQL": "default.mysql5.6", - "mysql": "default.mysql5.6", - "postgres": "default.postgres9.3" - } - if not self.db_parameter_group_name and self.engine in self.default_parameter_groups: - self.db_parameter_group_name = self.default_parameter_groups[self.engine] + self.db_parameter_group_name = kwargs.get('db_parameter_group_name') + if self.db_parameter_group_name and self.db_parameter_group_name not in rds2_backends[self.region].db_parameter_groups: + raise DBParameterGroupNotFoundError(self.db_parameter_group_name) self.preferred_backup_window = kwargs.get('preferred_backup_window', '13:14-13:44') self.license_model = kwargs.get('license_model', 'general-public-license') @@ -84,6 +91,120 @@ class Database(object): self.character_set_name = kwargs.get('character_set_name', None) self.tags = kwargs.get('tags', []) + @property + def physical_resource_id(self): + return self.db_instance_identifier + + def db_parameter_groups(self): + if not self.db_parameter_group_name: + db_family, db_parameter_group_name = self.default_db_parameter_group_details() + description = 'Default parameter group for {0}'.format(db_family) + return [DBParameterGroup(name=db_parameter_group_name, + family=db_family, + description=description, + tags={})] + else: + return [rds2_backends[self.region].db_parameter_groups[self.db_parameter_group_name]] + + def default_db_parameter_group_details(self): + if not self.engine_version: + return (None, None) + + minor_engine_version = '.'.join(self.engine_version.rsplit('.')[:-1]) + db_family = '{0}{1}'.format(self.engine.lower(), minor_engine_version) + + return db_family, 'default.{0}'.format(db_family) + + def to_xml(self): + template = Template(""" + {{ database.backup_retention_period }} + {{ database.status }} + {{ database.multi_az }} + + {{ database.db_instance_identifier }} + 03:50-04:20 + wed:06:38-wed:07:08 + + {% for replica_id in database.replicas %} + {{ replica_id }} + {% endfor %} + + + {% if database.is_replica %} + + read replication + replicating + true + + + {% endif %} + + {% if database.is_replica %} + {{ database.source_db_identifier }} + {% endif %} + {{ database.engine }} + general-public-license + {{ database.engine_version }} + + + + {% for db_parameter_group in database.db_parameter_groups() %} + + in-sync + {{ db_parameter_group.name }} + + {% endfor %} + + + {% for security_group in database.security_groups %} + + active + {{ security_group }} + + {% endfor %} + + {% if database.db_subnet_group %} + + {{ database.db_subnet_group.subnet_name }} + {{ database.db_subnet_group.description }} + {{ database.db_subnet_group.status }} + + {% for subnet in database.db_subnet_group.subnets %} + + Active + {{ subnet.id }} + + {{ subnet.availability_zone }} + false + + + {% endfor %} + + {{ database.db_subnet_group.vpc_id }} + + {% endif %} + {{ database.publicly_accessible }} + {{ database.auto_minor_version_upgrade }} + {{ database.allocated_storage }} + {{ database.storage_encrypted }} + {% if database.kms_key_id %} + {{ database.kms_key_id }} + {% endif %} + {% if database.iops %} + {{ database.iops }} + io1 + {% else %} + {{ database.storage_type }} + {% endif %} + {{ database.db_instance_class }} + {{ database.master_username }} + +
{{ database.address }}
+ {{ database.port }} +
+
""") + return template.render(database=self) + @property def address(self): return "{0}.aaaaaaaaaa.{1}.rds.amazonaws.com".format(self.db_instance_identifier, self.region) @@ -135,21 +256,25 @@ class Database(object): "engine": properties.get("Engine"), "engine_version": properties.get("EngineVersion"), "iops": properties.get("Iops"), + "kms_key_id": properties.get("KmsKeyId"), "master_user_password": properties.get('MasterUserPassword'), "master_username": properties.get('MasterUsername'), "multi_az": properties.get("MultiAZ"), + "db_parameter_group_name": properties.get('DBParameterGroupName'), "port": properties.get('Port', 3306), "publicly_accessible": properties.get("PubliclyAccessible"), "region": region_name, "security_groups": security_groups, + "storage_encrypted": properties.get("StorageEncrypted"), "storage_type": properties.get("StorageType"), + "tags": properties.get("Tags"), } rds2_backend = rds2_backends[region_name] source_db_identifier = properties.get("SourceDBInstanceIdentifier") if source_db_identifier: # Replica - db_kwargs["source_db_identifier"] = source_db_identifier.db_instance_identifier + db_kwargs["source_db_identifier"] = source_db_identifier database = rds2_backend.create_database_replica(db_kwargs) else: database = rds2_backend.create_database(db_kwargs) @@ -236,15 +361,19 @@ class Database(object): def remove_tags(self, tag_keys): self.tags = [tag_set for tag_set in self.tags if tag_set['Key'] not in tag_keys] + def delete(self, region_name): + backend = rds2_backends[region_name] + backend.delete_database(self.db_instance_identifier) + class SecurityGroup(object): - def __init__(self, group_name, description): + def __init__(self, group_name, description, tags): self.group_name = group_name self.description = description self.status = "authorized" self.ip_ranges = [] self.ec2_security_groups = [] - self.tags = [] + self.tags = tags self.owner_id = '1234567890' self.vpc_id = None @@ -301,27 +430,29 @@ class SecurityGroup(object): properties = cloudformation_json['Properties'] group_name = resource_name.lower() + get_random_hex(12) description = properties['GroupDescription'] - security_group_ingress = properties['DBSecurityGroupIngress'] + security_group_ingress_rules = properties.get('DBSecurityGroupIngress', []) + tags = properties.get('Tags') ec2_backend = ec2_backends[region_name] rds2_backend = rds2_backends[region_name] security_group = rds2_backend.create_security_group( group_name, description, + tags, ) - for ingress_type, ingress_value in security_group_ingress.items(): - if ingress_type == "CIDRIP": - security_group.authorize_cidr(ingress_value) - elif ingress_type == "EC2SecurityGroupName": - subnet = ec2_backend.get_security_group_from_name(ingress_value) - security_group.authorize_security_group(subnet) - elif ingress_type == "EC2SecurityGroupId": - subnet = ec2_backend.get_security_group_from_id(ingress_value) - security_group.authorize_security_group(subnet) + for security_group_ingress in security_group_ingress_rules: + for ingress_type, ingress_value in security_group_ingress.items(): + if ingress_type == "CIDRIP": + security_group.authorize_cidr(ingress_value) + elif ingress_type == "EC2SecurityGroupName": + subnet = ec2_backend.get_security_group_from_name(ingress_value) + security_group.authorize_security_group(subnet) + elif ingress_type == "EC2SecurityGroupId": + subnet = ec2_backend.get_security_group_from_id(ingress_value) + security_group.authorize_security_group(subnet) return security_group def get_tags(self): - # TODO: Write tags add/remove/list tests for SecurityGroups return self.tags def add_tags(self, tags): @@ -333,14 +464,18 @@ class SecurityGroup(object): def remove_tags(self, tag_keys): self.tags = [tag_set for tag_set in self.tags if tag_set['Key'] not in tag_keys] + def delete(self, region_name): + backend = rds2_backends[region_name] + backend.delete_security_group(self.group_name) + class SubnetGroup(object): - def __init__(self, subnet_name, description, subnets): + def __init__(self, subnet_name, description, subnets, tags): self.subnet_name = subnet_name self.description = description self.subnets = subnets self.status = "Complete" - self.tags = [] + self.tags = tags self.vpc_id = self.subnets[0].vpc_id def to_xml(self): @@ -392,6 +527,7 @@ class SubnetGroup(object): subnet_name = resource_name.lower() + get_random_hex(12) description = properties['DBSubnetGroupDescription'] subnet_ids = properties['SubnetIds'] + tags = properties.get('Tags') ec2_backend = ec2_backends[region_name] subnets = [ec2_backend.get_subnet(subnet_id) for subnet_id in subnet_ids] @@ -400,11 +536,11 @@ class SubnetGroup(object): subnet_name, description, subnets, + tags, ) return subnet_group def get_tags(self): - # TODO: Write tags add/remove/list tests for SubnetGroups return self.tags def add_tags(self, tags): @@ -416,15 +552,27 @@ class SubnetGroup(object): def remove_tags(self, tag_keys): self.tags = [tag_set for tag_set in self.tags if tag_set['Key'] not in tag_keys] + def delete(self, region_name): + backend = rds2_backends[region_name] + backend.delete_subnet_group(self.subnet_name) + class RDS2Backend(BaseBackend): - def __init__(self): + def __init__(self, region): + self.region = region self.arn_regex = re_compile(r'^arn:aws:rds:.*:[0-9]*:(db|es|og|pg|ri|secgrp|snapshot|subgrp):.*$') self.databases = {} + self.db_parameter_groups = {} + self.option_groups = {} self.security_groups = {} self.subnet_groups = {} - self.option_groups = {} + + def reset(self): + # preserve region + region = self.region + self.__dict__ = {} + self.__init__(region) def create_database(self, db_kwargs): database_id = db_kwargs['db_instance_identifier'] @@ -435,7 +583,10 @@ class RDS2Backend(BaseBackend): def create_database_replica(self, db_kwargs): database_id = db_kwargs['db_instance_identifier'] source_database_id = db_kwargs['source_db_identifier'] - primary = self.describe_databases(source_database_id)[0] + primary = self.find_db_from_id(source_database_id) + if self.arn_regex.match(source_database_id): + db_kwargs['region'] = self.region + replica = copy.deepcopy(primary) replica.update(db_kwargs) replica.set_as_replica() @@ -460,18 +611,31 @@ class RDS2Backend(BaseBackend): database = self.describe_databases(db_instance_identifier)[0] return database + def find_db_from_id(self, db_id): + if self.arn_regex.match(db_id): + arn_breakdown = db_id.split(':') + region = arn_breakdown[3] + backend = rds2_backends[region] + db_name = arn_breakdown[-1] + else: + backend = self + db_name = db_id + + return backend.describe_databases(db_name)[0] + def delete_database(self, db_instance_identifier): if db_instance_identifier in self.databases: database = self.databases.pop(db_instance_identifier) if database.is_replica: - primary = self.describe_databases(database.source_db_identifier)[0] + primary = self.find_db_from_id(database.source_db_identifier) primary.remove_replica(database) + database.status = 'deleting' return database else: raise DBInstanceNotFoundError(db_instance_identifier) - def create_security_group(self, group_name, description): - security_group = SecurityGroup(group_name, description) + def create_security_group(self, group_name, description, tags): + security_group = SecurityGroup(group_name, description, tags) self.security_groups[group_name] = security_group return security_group @@ -489,13 +653,19 @@ class RDS2Backend(BaseBackend): else: raise DBSecurityGroupNotFoundError(security_group_name) + def delete_db_parameter_group(self, db_parameter_group_name): + if db_parameter_group_name in self.db_parameter_groups: + return self.db_parameter_groups.pop(db_parameter_group_name) + else: + raise DBParameterGroupNotFoundError(db_parameter_group_name) + def authorize_security_group(self, security_group_name, cidr_ip): security_group = self.describe_security_groups(security_group_name)[0] security_group.authorize_cidr(cidr_ip) return security_group - def create_subnet_group(self, subnet_name, description, subnets): - subnet_group = SubnetGroup(subnet_name, description, subnets) + def create_subnet_group(self, subnet_name, description, subnets, tags): + subnet_group = SubnetGroup(subnet_name, description, subnets, tags) self.subnet_groups[subnet_name] = subnet_group return subnet_group @@ -580,24 +750,18 @@ class RDS2Backend(BaseBackend): @staticmethod def describe_option_group_options(engine_name, major_engine_version=None): - default_option_group_options = { - 'mysql': {'all': '{"DescribeOptionGroupOptionsResponse": {"DescribeOptionGroupOptionsResult": {"Marker": null, "OptionGroupOptions": [{"MinimumRequiredMinorEngineVersion": "12", "OptionsDependedOn": [], "MajorEngineVersion": "5.6", "Persistent": false, "DefaultPort": 11211, "Permanent": false, "OptionGroupOptionSettings": [{"SettingDescription": "Specifies how many memcached read operations (get) to perform before doing a COMMIT to start a new transaction", "DefaultValue": "1", "AllowedValues": "1-4294967295", "IsModifiable": true, "SettingName": "DAEMON_MEMCACHED_R_BATCH_SIZE", "ApplyType": "STATIC"}, {"SettingDescription": "Specifies how many memcached write operations, such as add, set, or incr, to perform before doing a COMMIT to start a new transaction", "DefaultValue": "1", "AllowedValues": "1-4294967295", "IsModifiable": true, "SettingName": "DAEMON_MEMCACHED_W_BATCH_SIZE", "ApplyType": "STATIC"}, {"SettingDescription": "Specifies how often to auto-commit idle connections that use the InnoDB memcached interface.", "DefaultValue": "5", "AllowedValues": "1-1073741824", "IsModifiable": true, "SettingName": "INNODB_API_BK_COMMIT_INTERVAL", "ApplyType": "DYNAMIC"}, {"SettingDescription": "Disables the use of row locks when using the InnoDB memcached interface.", "DefaultValue": "0", "AllowedValues": "0,1", "IsModifiable": true, "SettingName": "INNODB_API_DISABLE_ROWLOCK", "ApplyType": "STATIC"}, {"SettingDescription": "Locks the table used by the InnoDB memcached plugin, so that it cannot be dropped or altered by DDL through the SQL interface.", "DefaultValue": "0", "AllowedValues": "0,1", "IsModifiable": true, "SettingName": "INNODB_API_ENABLE_MDL", "ApplyType": "STATIC"}, {"SettingDescription": "Lets you control the transaction isolation level on queries processed by the memcached interface.", "DefaultValue": "0", "AllowedValues": "0-3", "IsModifiable": true, "SettingName": "INNODB_API_TRX_LEVEL", "ApplyType": "STATIC"}, {"SettingDescription": "The binding protocol to use which can be either auto, ascii, or binary. The default is auto which means the server automatically negotiates the protocol with the client.", "DefaultValue": "auto", "AllowedValues": "auto,ascii,binary", "IsModifiable": true, "SettingName": "BINDING_PROTOCOL", "ApplyType": "STATIC"}, {"SettingDescription": "The backlog queue configures how many network connections can be waiting to be processed by memcached", "DefaultValue": "1024", "AllowedValues": "1-2048", "IsModifiable": true, "SettingName": "BACKLOG_QUEUE_LIMIT", "ApplyType": "STATIC"}, {"SettingDescription": "Disable the use of compare and swap (CAS) which reduces the per-item size by 8 bytes.", "DefaultValue": "0", "AllowedValues": "0,1", "IsModifiable": true, "SettingName": "CAS_DISABLED", "ApplyType": "STATIC"}, {"SettingDescription": "Minimum chunk size in bytes to allocate for the smallest item\'s key, value, and flags. The default is 48 and you can get a significant memory efficiency gain with a lower value.", "DefaultValue": "48", "AllowedValues": "1-48", "IsModifiable": true, "SettingName": "CHUNK_SIZE", "ApplyType": "STATIC"}, {"SettingDescription": "Chunk size growth factor that controls the size of each successive chunk with each chunk growing times this amount larger than the previous chunk.", "DefaultValue": "1.25", "AllowedValues": "1-2", "IsModifiable": true, "SettingName": "CHUNK_SIZE_GROWTH_FACTOR", "ApplyType": "STATIC"}, {"SettingDescription": "If enabled when there is no more memory to store items, memcached will return an error rather than evicting items.", "DefaultValue": "0", "AllowedValues": "0,1", "IsModifiable": true, "SettingName": "ERROR_ON_MEMORY_EXHAUSTED", "ApplyType": "STATIC"}, {"SettingDescription": "Maximum number of concurrent connections. Setting this value to anything less than 10 prevents MySQL from starting.", "DefaultValue": "1024", "AllowedValues": "10-1024", "IsModifiable": true, "SettingName": "MAX_SIMULTANEOUS_CONNECTIONS", "ApplyType": "STATIC"}, {"SettingDescription": "Verbose level for memcached.", "DefaultValue": "v", "AllowedValues": "v,vv,vvv", "IsModifiable": true, "SettingName": "VERBOSITY", "ApplyType": "STATIC"}], "EngineName": "mysql", "Name": "MEMCACHED", "PortRequired": true, "Description": "Innodb Memcached for MySQL"}]}, "ResponseMetadata": {"RequestId": "c9847a08-9fca-11e4-9084-5754f80d5144"}}}', - '5.6': '{"DescribeOptionGroupOptionsResponse": {"DescribeOptionGroupOptionsResult": {"Marker": null, "OptionGroupOptions": [{"MinimumRequiredMinorEngineVersion": "12", "OptionsDependedOn": [], "MajorEngineVersion": "5.6", "Persistent": false, "DefaultPort": 11211, "Permanent": false, "OptionGroupOptionSettings": [{"SettingDescription": "Specifies how many memcached read operations (get) to perform before doing a COMMIT to start a new transaction", "DefaultValue": "1", "AllowedValues": "1-4294967295", "IsModifiable": true, "SettingName": "DAEMON_MEMCACHED_R_BATCH_SIZE", "ApplyType": "STATIC"}, {"SettingDescription": "Specifies how many memcached write operations, such as add, set, or incr, to perform before doing a COMMIT to start a new transaction", "DefaultValue": "1", "AllowedValues": "1-4294967295", "IsModifiable": true, "SettingName": "DAEMON_MEMCACHED_W_BATCH_SIZE", "ApplyType": "STATIC"}, {"SettingDescription": "Specifies how often to auto-commit idle connections that use the InnoDB memcached interface.", "DefaultValue": "5", "AllowedValues": "1-1073741824", "IsModifiable": true, "SettingName": "INNODB_API_BK_COMMIT_INTERVAL", "ApplyType": "DYNAMIC"}, {"SettingDescription": "Disables the use of row locks when using the InnoDB memcached interface.", "DefaultValue": "0", "AllowedValues": "0,1", "IsModifiable": true, "SettingName": "INNODB_API_DISABLE_ROWLOCK", "ApplyType": "STATIC"}, {"SettingDescription": "Locks the table used by the InnoDB memcached plugin, so that it cannot be dropped or altered by DDL through the SQL interface.", "DefaultValue": "0", "AllowedValues": "0,1", "IsModifiable": true, "SettingName": "INNODB_API_ENABLE_MDL", "ApplyType": "STATIC"}, {"SettingDescription": "Lets you control the transaction isolation level on queries processed by the memcached interface.", "DefaultValue": "0", "AllowedValues": "0-3", "IsModifiable": true, "SettingName": "INNODB_API_TRX_LEVEL", "ApplyType": "STATIC"}, {"SettingDescription": "The binding protocol to use which can be either auto, ascii, or binary. The default is auto which means the server automatically negotiates the protocol with the client.", "DefaultValue": "auto", "AllowedValues": "auto,ascii,binary", "IsModifiable": true, "SettingName": "BINDING_PROTOCOL", "ApplyType": "STATIC"}, {"SettingDescription": "The backlog queue configures how many network connections can be waiting to be processed by memcached", "DefaultValue": "1024", "AllowedValues": "1-2048", "IsModifiable": true, "SettingName": "BACKLOG_QUEUE_LIMIT", "ApplyType": "STATIC"}, {"SettingDescription": "Disable the use of compare and swap (CAS) which reduces the per-item size by 8 bytes.", "DefaultValue": "0", "AllowedValues": "0,1", "IsModifiable": true, "SettingName": "CAS_DISABLED", "ApplyType": "STATIC"}, {"SettingDescription": "Minimum chunk size in bytes to allocate for the smallest item\'s key, value, and flags. The default is 48 and you can get a significant memory efficiency gain with a lower value.", "DefaultValue": "48", "AllowedValues": "1-48", "IsModifiable": true, "SettingName": "CHUNK_SIZE", "ApplyType": "STATIC"}, {"SettingDescription": "Chunk size growth factor that controls the size of each successive chunk with each chunk growing times this amount larger than the previous chunk.", "DefaultValue": "1.25", "AllowedValues": "1-2", "IsModifiable": true, "SettingName": "CHUNK_SIZE_GROWTH_FACTOR", "ApplyType": "STATIC"}, {"SettingDescription": "If enabled when there is no more memory to store items, memcached will return an error rather than evicting items.", "DefaultValue": "0", "AllowedValues": "0,1", "IsModifiable": true, "SettingName": "ERROR_ON_MEMORY_EXHAUSTED", "ApplyType": "STATIC"}, {"SettingDescription": "Maximum number of concurrent connections. Setting this value to anything less than 10 prevents MySQL from starting.", "DefaultValue": "1024", "AllowedValues": "10-1024", "IsModifiable": true, "SettingName": "MAX_SIMULTANEOUS_CONNECTIONS", "ApplyType": "STATIC"}, {"SettingDescription": "Verbose level for memcached.", "DefaultValue": "v", "AllowedValues": "v,vv,vvv", "IsModifiable": true, "SettingName": "VERBOSITY", "ApplyType": "STATIC"}], "EngineName": "mysql", "Name": "MEMCACHED", "PortRequired": true, "Description": "Innodb Memcached for MySQL"}]}, "ResponseMetadata": {"RequestId": "c9847a08-9fca-11e4-9084-5754f80d5144"}}}', - }, - 'sqlserver-ee': {'all': '{"DescribeOptionGroupOptionsResponse": {"DescribeOptionGroupOptionsResult": {"Marker": null, "OptionGroupOptions": [{"MinimumRequiredMinorEngineVersion": "2789.0.v1", "OptionsDependedOn": [], "MajorEngineVersion": "10.50", "Persistent": false, "DefaultPort": null, "Permanent": false, "OptionGroupOptionSettings": [], "EngineName": "sqlserver-ee", "Name": "Mirroring", "PortRequired": false, "Description": "SQLServer Database Mirroring"}, {"MinimumRequiredMinorEngineVersion": "2789.0.v1", "OptionsDependedOn": [], "MajorEngineVersion": "10.50", "Persistent": true, "DefaultPort": null, "Permanent": false, "OptionGroupOptionSettings": [], "EngineName": "sqlserver-ee", "Name": "TDE", "PortRequired": false, "Description": "SQL Server - Transparent Data Encryption"}, {"MinimumRequiredMinorEngineVersion": "2100.60.v1", "OptionsDependedOn": [], "MajorEngineVersion": "11.00", "Persistent": false, "DefaultPort": null, "Permanent": false, "OptionGroupOptionSettings": [], "EngineName": "sqlserver-ee", "Name": "Mirroring", "PortRequired": false, "Description": "SQLServer Database Mirroring"}, {"MinimumRequiredMinorEngineVersion": "2100.60.v1", "OptionsDependedOn": [], "MajorEngineVersion": "11.00", "Persistent": true, "DefaultPort": null, "Permanent": false, "OptionGroupOptionSettings": [], "EngineName": "sqlserver-ee", "Name": "TDE", "PortRequired": false, "Description": "SQL Server - Transparent Data Encryption"}]}, "ResponseMetadata": {"RequestId": "c9f2fd9b-9fcb-11e4-8add-31b6fe33145f"}}}', - '10.50': '{"DescribeOptionGroupOptionsResponse": {"DescribeOptionGroupOptionsResult": {"Marker": null, "OptionGroupOptions": [{"MinimumRequiredMinorEngineVersion": "2789.0.v1", "OptionsDependedOn": [], "MajorEngineVersion": "10.50", "Persistent": false, "DefaultPort": null, "Permanent": false, "OptionGroupOptionSettings": [], "EngineName": "sqlserver-ee", "Name": "Mirroring", "PortRequired": false, "Description": "SQLServer Database Mirroring"}, {"MinimumRequiredMinorEngineVersion": "2789.0.v1", "OptionsDependedOn": [], "MajorEngineVersion": "10.50", "Persistent": true, "DefaultPort": null, "Permanent": false, "OptionGroupOptionSettings": [], "EngineName": "sqlserver-ee", "Name": "TDE", "PortRequired": false, "Description": "SQL Server - Transparent Data Encryption"}]}, "ResponseMetadata": {"RequestId": "e6326fd0-9fcb-11e4-99cf-55e92d4bbada"}}}', - '11.00': '{"DescribeOptionGroupOptionsResponse": {"DescribeOptionGroupOptionsResult": {"Marker": null, "OptionGroupOptions": [{"MinimumRequiredMinorEngineVersion": "2100.60.v1", "OptionsDependedOn": [], "MajorEngineVersion": "11.00", "Persistent": false, "DefaultPort": null, "Permanent": false, "OptionGroupOptionSettings": [], "EngineName": "sqlserver-ee", "Name": "Mirroring", "PortRequired": false, "Description": "SQLServer Database Mirroring"}, {"MinimumRequiredMinorEngineVersion": "2100.60.v1", "OptionsDependedOn": [], "MajorEngineVersion": "11.00", "Persistent": true, "DefaultPort": null, "Permanent": false, "OptionGroupOptionSettings": [], "EngineName": "sqlserver-ee", "Name": "TDE", "PortRequired": false, "Description": "SQL Server - Transparent Data Encryption"}]}, "ResponseMetadata": {"RequestId": "222cbeeb-9fcc-11e4-bb07-576f5bf522b5"}}}' - }, - 'oracle-ee': {'all': '{"DescribeOptionGroupOptionsResponse": {"DescribeOptionGroupOptionsResult": {"Marker": null, "OptionGroupOptions": [{"MinimumRequiredMinorEngineVersion": "0.2.v4", "OptionsDependedOn": ["XMLDB"], "MajorEngineVersion": "11.2", "Persistent": false, "DefaultPort": null, "Permanent": false, "OptionGroupOptionSettings": [], "EngineName": "oracle-ee", "Name": "APEX", "PortRequired": false, "Description": "Oracle Application Express Runtime Environment"}, {"MinimumRequiredMinorEngineVersion": "0.2.v4", "OptionsDependedOn": ["APEX"], "MajorEngineVersion": "11.2", "Persistent": false, "DefaultPort": null, "Permanent": false, "OptionGroupOptionSettings": [], "EngineName": "oracle-ee", "Name": "APEX-DEV", "PortRequired": false, "Description": "Oracle Application Express Development Environment"}, {"MinimumRequiredMinorEngineVersion": "0.2.v3", "OptionsDependedOn": [], "MajorEngineVersion": "11.2", "Persistent": false, "DefaultPort": null, "Permanent": false, "OptionGroupOptionSettings": [{"SettingDescription": "Specifies the desired encryption behavior", "DefaultValue": "REQUESTED", "AllowedValues": "ACCEPTED,REJECTED,REQUESTED,REQUIRED", "IsModifiable": true, "SettingName": "SQLNET.ENCRYPTION_SERVER", "ApplyType": "STATIC"}, {"SettingDescription": "Specifies the desired data integrity behavior", "DefaultValue": "REQUESTED", "AllowedValues": "ACCEPTED,REJECTED,REQUESTED,REQUIRED", "IsModifiable": true, "SettingName": "SQLNET.CRYPTO_CHECKSUM_SERVER", "ApplyType": "STATIC"}, {"SettingDescription": "Specifies list of encryption algorithms in order of intended use", "DefaultValue": "RC4_256,AES256,AES192,3DES168,RC4_128,AES128,3DES112,RC4_56,DES,RC4_40,DES40", "AllowedValues": "RC4_256,AES256,AES192,3DES168,RC4_128,AES128,3DES112,RC4_56,DES,RC4_40,DES40", "IsModifiable": true, "SettingName": "SQLNET.ENCRYPTION_TYPES_SERVER", "ApplyType": "STATIC"}, {"SettingDescription": "Specifies list of checksumming algorithms in order of intended use", "DefaultValue": "SHA1,MD5", "AllowedValues": "SHA1,MD5", "IsModifiable": true, "SettingName": "SQLNET.CRYPTO_CHECKSUM_TYPES_SERVER", "ApplyType": "STATIC"}], "EngineName": "oracle-ee", "Name": "NATIVE_NETWORK_ENCRYPTION", "PortRequired": false, "Description": "Oracle Advanced Security - Native Network Encryption"}, {"MinimumRequiredMinorEngineVersion": "0.2.v3", "OptionsDependedOn": [], "MajorEngineVersion": "11.2", "Persistent": false, "DefaultPort": 1158, "Permanent": false, "OptionGroupOptionSettings": [], "EngineName": "oracle-ee", "Name": "OEM", "PortRequired": true, "Description": "Oracle Enterprise Manager (Database Control only)"}, {"MinimumRequiredMinorEngineVersion": "0.2.v3", "OptionsDependedOn": [], "MajorEngineVersion": "11.2", "Persistent": false, "DefaultPort": null, "Permanent": false, "OptionGroupOptionSettings": [], "EngineName": "oracle-ee", "Name": "STATSPACK", "PortRequired": false, "Description": "Oracle Statspack"}, {"MinimumRequiredMinorEngineVersion": "0.2.v3", "OptionsDependedOn": [], "MajorEngineVersion": "11.2", "Persistent": true, "DefaultPort": null, "Permanent": true, "OptionGroupOptionSettings": [], "EngineName": "oracle-ee", "Name": "TDE", "PortRequired": false, "Description": "Oracle Advanced Security - Transparent Data Encryption"}, {"MinimumRequiredMinorEngineVersion": "0.2.v3", "OptionsDependedOn": [], "MajorEngineVersion": "11.2", "Persistent": true, "DefaultPort": null, "Permanent": true, "OptionGroupOptionSettings": [], "EngineName": "oracle-ee", "Name": "TDE_HSM", "PortRequired": false, "Description": "Oracle Advanced Security - TDE with HSM"}, {"MinimumRequiredMinorEngineVersion": "0.2.v3", "OptionsDependedOn": [], "MajorEngineVersion": "11.2", "Persistent": true, "DefaultPort": null, "Permanent": true, "OptionGroupOptionSettings": [{"SettingDescription": "Specifies the timezone the user wants to change the system time to", "DefaultValue": "UTC", "AllowedValues": "Africa/Cairo,Africa/Casablanca,Africa/Harare,Africa/Monrovia,Africa/Nairobi,Africa/Tripoli,Africa/Windhoek,America/Araguaina,America/Asuncion,America/Bogota,America/Caracas,America/Chihuahua,America/Cuiaba,America/Denver,America/Fortaleza,America/Guatemala,America/Halifax,America/Manaus,America/Matamoros,America/Monterrey,America/Montevideo,America/Phoenix,America/Santiago,America/Tijuana,Asia/Amman,Asia/Ashgabat,Asia/Baghdad,Asia/Baku,Asia/Bangkok,Asia/Beirut,Asia/Calcutta,Asia/Damascus,Asia/Dhaka,Asia/Irkutsk,Asia/Jerusalem,Asia/Kabul,Asia/Karachi,Asia/Kathmandu,Asia/Krasnoyarsk,Asia/Magadan,Asia/Muscat,Asia/Novosibirsk,Asia/Riyadh,Asia/Seoul,Asia/Shanghai,Asia/Singapore,Asia/Taipei,Asia/Tehran,Asia/Tokyo,Asia/Ulaanbaatar,Asia/Vladivostok,Asia/Yakutsk,Asia/Yerevan,Atlantic/Azores,Australia/Adelaide,Australia/Brisbane,Australia/Darwin,Australia/Hobart,Australia/Perth,Australia/Sydney,Brazil/East,Canada/Newfoundland,Canada/Saskatchewan,Europe/Amsterdam,Europe/Athens,Europe/Dublin,Europe/Helsinki,Europe/Istanbul,Europe/Kaliningrad,Europe/Moscow,Europe/Paris,Europe/Prague,Europe/Sarajevo,Pacific/Auckland,Pacific/Fiji,Pacific/Guam,Pacific/Honolulu,Pacific/Samoa,US/Alaska,US/Central,US/Eastern,US/East-Indiana,US/Pacific,UTC", "IsModifiable": true, "SettingName": "TIME_ZONE", "ApplyType": "DYNAMIC"}], "EngineName": "oracle-ee", "Name": "Timezone", "PortRequired": false, "Description": "Change time zone"}, {"MinimumRequiredMinorEngineVersion": "0.2.v4", "OptionsDependedOn": [], "MajorEngineVersion": "11.2", "Persistent": false, "DefaultPort": null, "Permanent": false, "OptionGroupOptionSettings": [], "EngineName": "oracle-ee", "Name": "XMLDB", "PortRequired": false, "Description": "Oracle XMLDB Repository"}]}, "ResponseMetadata": {"RequestId": "36a0a612-9fcc-11e4-a07c-e12b0fcebb71"}}}', - '11.2': '{"DescribeOptionGroupOptionsResponse": {"DescribeOptionGroupOptionsResult": {"Marker": null, "OptionGroupOptions": [{"MinimumRequiredMinorEngineVersion": "0.2.v4", "OptionsDependedOn": ["XMLDB"], "MajorEngineVersion": "11.2", "Persistent": false, "DefaultPort": null, "Permanent": false, "OptionGroupOptionSettings": [], "EngineName": "oracle-ee", "Name": "APEX", "PortRequired": false, "Description": "Oracle Application Express Runtime Environment"}, {"MinimumRequiredMinorEngineVersion": "0.2.v4", "OptionsDependedOn": ["APEX"], "MajorEngineVersion": "11.2", "Persistent": false, "DefaultPort": null, "Permanent": false, "OptionGroupOptionSettings": [], "EngineName": "oracle-ee", "Name": "APEX-DEV", "PortRequired": false, "Description": "Oracle Application Express Development Environment"}, {"MinimumRequiredMinorEngineVersion": "0.2.v3", "OptionsDependedOn": [], "MajorEngineVersion": "11.2", "Persistent": false, "DefaultPort": null, "Permanent": false, "OptionGroupOptionSettings": [{"SettingDescription": "Specifies the desired encryption behavior", "DefaultValue": "REQUESTED", "AllowedValues": "ACCEPTED,REJECTED,REQUESTED,REQUIRED", "IsModifiable": true, "SettingName": "SQLNET.ENCRYPTION_SERVER", "ApplyType": "STATIC"}, {"SettingDescription": "Specifies the desired data integrity behavior", "DefaultValue": "REQUESTED", "AllowedValues": "ACCEPTED,REJECTED,REQUESTED,REQUIRED", "IsModifiable": true, "SettingName": "SQLNET.CRYPTO_CHECKSUM_SERVER", "ApplyType": "STATIC"}, {"SettingDescription": "Specifies list of encryption algorithms in order of intended use", "DefaultValue": "RC4_256,AES256,AES192,3DES168,RC4_128,AES128,3DES112,RC4_56,DES,RC4_40,DES40", "AllowedValues": "RC4_256,AES256,AES192,3DES168,RC4_128,AES128,3DES112,RC4_56,DES,RC4_40,DES40", "IsModifiable": true, "SettingName": "SQLNET.ENCRYPTION_TYPES_SERVER", "ApplyType": "STATIC"}, {"SettingDescription": "Specifies list of checksumming algorithms in order of intended use", "DefaultValue": "SHA1,MD5", "AllowedValues": "SHA1,MD5", "IsModifiable": true, "SettingName": "SQLNET.CRYPTO_CHECKSUM_TYPES_SERVER", "ApplyType": "STATIC"}], "EngineName": "oracle-ee", "Name": "NATIVE_NETWORK_ENCRYPTION", "PortRequired": false, "Description": "Oracle Advanced Security - Native Network Encryption"}, {"MinimumRequiredMinorEngineVersion": "0.2.v3", "OptionsDependedOn": [], "MajorEngineVersion": "11.2", "Persistent": false, "DefaultPort": 1158, "Permanent": false, "OptionGroupOptionSettings": [], "EngineName": "oracle-ee", "Name": "OEM", "PortRequired": true, "Description": "Oracle Enterprise Manager (Database Control only)"}, {"MinimumRequiredMinorEngineVersion": "0.2.v3", "OptionsDependedOn": [], "MajorEngineVersion": "11.2", "Persistent": false, "DefaultPort": null, "Permanent": false, "OptionGroupOptionSettings": [], "EngineName": "oracle-ee", "Name": "STATSPACK", "PortRequired": false, "Description": "Oracle Statspack"}, {"MinimumRequiredMinorEngineVersion": "0.2.v3", "OptionsDependedOn": [], "MajorEngineVersion": "11.2", "Persistent": true, "DefaultPort": null, "Permanent": true, "OptionGroupOptionSettings": [], "EngineName": "oracle-ee", "Name": "TDE", "PortRequired": false, "Description": "Oracle Advanced Security - Transparent Data Encryption"}, {"MinimumRequiredMinorEngineVersion": "0.2.v3", "OptionsDependedOn": [], "MajorEngineVersion": "11.2", "Persistent": true, "DefaultPort": null, "Permanent": true, "OptionGroupOptionSettings": [], "EngineName": "oracle-ee", "Name": "TDE_HSM", "PortRequired": false, "Description": "Oracle Advanced Security - TDE with HSM"}, {"MinimumRequiredMinorEngineVersion": "0.2.v3", "OptionsDependedOn": [], "MajorEngineVersion": "11.2", "Persistent": true, "DefaultPort": null, "Permanent": true, "OptionGroupOptionSettings": [{"SettingDescription": "Specifies the timezone the user wants to change the system time to", "DefaultValue": "UTC", "AllowedValues": "Africa/Cairo,Africa/Casablanca,Africa/Harare,Africa/Monrovia,Africa/Nairobi,Africa/Tripoli,Africa/Windhoek,America/Araguaina,America/Asuncion,America/Bogota,America/Caracas,America/Chihuahua,America/Cuiaba,America/Denver,America/Fortaleza,America/Guatemala,America/Halifax,America/Manaus,America/Matamoros,America/Monterrey,America/Montevideo,America/Phoenix,America/Santiago,America/Tijuana,Asia/Amman,Asia/Ashgabat,Asia/Baghdad,Asia/Baku,Asia/Bangkok,Asia/Beirut,Asia/Calcutta,Asia/Damascus,Asia/Dhaka,Asia/Irkutsk,Asia/Jerusalem,Asia/Kabul,Asia/Karachi,Asia/Kathmandu,Asia/Krasnoyarsk,Asia/Magadan,Asia/Muscat,Asia/Novosibirsk,Asia/Riyadh,Asia/Seoul,Asia/Shanghai,Asia/Singapore,Asia/Taipei,Asia/Tehran,Asia/Tokyo,Asia/Ulaanbaatar,Asia/Vladivostok,Asia/Yakutsk,Asia/Yerevan,Atlantic/Azores,Australia/Adelaide,Australia/Brisbane,Australia/Darwin,Australia/Hobart,Australia/Perth,Australia/Sydney,Brazil/East,Canada/Newfoundland,Canada/Saskatchewan,Europe/Amsterdam,Europe/Athens,Europe/Dublin,Europe/Helsinki,Europe/Istanbul,Europe/Kaliningrad,Europe/Moscow,Europe/Paris,Europe/Prague,Europe/Sarajevo,Pacific/Auckland,Pacific/Fiji,Pacific/Guam,Pacific/Honolulu,Pacific/Samoa,US/Alaska,US/Central,US/Eastern,US/East-Indiana,US/Pacific,UTC", "IsModifiable": true, "SettingName": "TIME_ZONE", "ApplyType": "DYNAMIC"}], "EngineName": "oracle-ee", "Name": "Timezone", "PortRequired": false, "Description": "Change time zone"}, {"MinimumRequiredMinorEngineVersion": "0.2.v4", "OptionsDependedOn": [], "MajorEngineVersion": "11.2", "Persistent": false, "DefaultPort": null, "Permanent": false, "OptionGroupOptionSettings": [], "EngineName": "oracle-ee", "Name": "XMLDB", "PortRequired": false, "Description": "Oracle XMLDB Repository"}]}, "ResponseMetadata": {"RequestId": "36a0a612-9fcc-11e4-a07c-e12b0fcebb71"}}}' - }, - 'oracle-sa': {'all': '{"DescribeOptionGroupOptionsResponse": {"DescribeOptionGroupOptionsResult": {"Marker": null, "OptionGroupOptions": [{"MinimumRequiredMinorEngineVersion": "0.2.v4", "OptionsDependedOn": ["XMLDB"], "MajorEngineVersion": "11.2", "Persistent": false, "DefaultPort": null, "Permanent": false, "OptionGroupOptionSettings": [], "EngineName": "oracle-ee", "Name": "APEX", "PortRequired": false, "Description": "Oracle Application Express Runtime Environment"}, {"MinimumRequiredMinorEngineVersion": "0.2.v4", "OptionsDependedOn": ["APEX"], "MajorEngineVersion": "11.2", "Persistent": false, "DefaultPort": null, "Permanent": false, "OptionGroupOptionSettings": [], "EngineName": "oracle-ee", "Name": "APEX-DEV", "PortRequired": false, "Description": "Oracle Application Express Development Environment"}, {"MinimumRequiredMinorEngineVersion": "0.2.v3", "OptionsDependedOn": [], "MajorEngineVersion": "11.2", "Persistent": false, "DefaultPort": null, "Permanent": false, "OptionGroupOptionSettings": [{"SettingDescription": "Specifies the desired encryption behavior", "DefaultValue": "REQUESTED", "AllowedValues": "ACCEPTED,REJECTED,REQUESTED,REQUIRED", "IsModifiable": true, "SettingName": "SQLNET.ENCRYPTION_SERVER", "ApplyType": "STATIC"}, {"SettingDescription": "Specifies the desired data integrity behavior", "DefaultValue": "REQUESTED", "AllowedValues": "ACCEPTED,REJECTED,REQUESTED,REQUIRED", "IsModifiable": true, "SettingName": "SQLNET.CRYPTO_CHECKSUM_SERVER", "ApplyType": "STATIC"}, {"SettingDescription": "Specifies list of encryption algorithms in order of intended use", "DefaultValue": "RC4_256,AES256,AES192,3DES168,RC4_128,AES128,3DES112,RC4_56,DES,RC4_40,DES40", "AllowedValues": "RC4_256,AES256,AES192,3DES168,RC4_128,AES128,3DES112,RC4_56,DES,RC4_40,DES40", "IsModifiable": true, "SettingName": "SQLNET.ENCRYPTION_TYPES_SERVER", "ApplyType": "STATIC"}, {"SettingDescription": "Specifies list of checksumming algorithms in order of intended use", "DefaultValue": "SHA1,MD5", "AllowedValues": "SHA1,MD5", "IsModifiable": true, "SettingName": "SQLNET.CRYPTO_CHECKSUM_TYPES_SERVER", "ApplyType": "STATIC"}], "EngineName": "oracle-ee", "Name": "NATIVE_NETWORK_ENCRYPTION", "PortRequired": false, "Description": "Oracle Advanced Security - Native Network Encryption"}, {"MinimumRequiredMinorEngineVersion": "0.2.v3", "OptionsDependedOn": [], "MajorEngineVersion": "11.2", "Persistent": false, "DefaultPort": 1158, "Permanent": false, "OptionGroupOptionSettings": [], "EngineName": "oracle-ee", "Name": "OEM", "PortRequired": true, "Description": "Oracle Enterprise Manager (Database Control only)"}, {"MinimumRequiredMinorEngineVersion": "0.2.v3", "OptionsDependedOn": [], "MajorEngineVersion": "11.2", "Persistent": false, "DefaultPort": null, "Permanent": false, "OptionGroupOptionSettings": [], "EngineName": "oracle-ee", "Name": "STATSPACK", "PortRequired": false, "Description": "Oracle Statspack"}, {"MinimumRequiredMinorEngineVersion": "0.2.v3", "OptionsDependedOn": [], "MajorEngineVersion": "11.2", "Persistent": true, "DefaultPort": null, "Permanent": true, "OptionGroupOptionSettings": [], "EngineName": "oracle-ee", "Name": "TDE", "PortRequired": false, "Description": "Oracle Advanced Security - Transparent Data Encryption"}, {"MinimumRequiredMinorEngineVersion": "0.2.v3", "OptionsDependedOn": [], "MajorEngineVersion": "11.2", "Persistent": true, "DefaultPort": null, "Permanent": true, "OptionGroupOptionSettings": [], "EngineName": "oracle-ee", "Name": "TDE_HSM", "PortRequired": false, "Description": "Oracle Advanced Security - TDE with HSM"}, {"MinimumRequiredMinorEngineVersion": "0.2.v3", "OptionsDependedOn": [], "MajorEngineVersion": "11.2", "Persistent": true, "DefaultPort": null, "Permanent": true, "OptionGroupOptionSettings": [{"SettingDescription": "Specifies the timezone the user wants to change the system time to", "DefaultValue": "UTC", "AllowedValues": "Africa/Cairo,Africa/Casablanca,Africa/Harare,Africa/Monrovia,Africa/Nairobi,Africa/Tripoli,Africa/Windhoek,America/Araguaina,America/Asuncion,America/Bogota,America/Caracas,America/Chihuahua,America/Cuiaba,America/Denver,America/Fortaleza,America/Guatemala,America/Halifax,America/Manaus,America/Matamoros,America/Monterrey,America/Montevideo,America/Phoenix,America/Santiago,America/Tijuana,Asia/Amman,Asia/Ashgabat,Asia/Baghdad,Asia/Baku,Asia/Bangkok,Asia/Beirut,Asia/Calcutta,Asia/Damascus,Asia/Dhaka,Asia/Irkutsk,Asia/Jerusalem,Asia/Kabul,Asia/Karachi,Asia/Kathmandu,Asia/Krasnoyarsk,Asia/Magadan,Asia/Muscat,Asia/Novosibirsk,Asia/Riyadh,Asia/Seoul,Asia/Shanghai,Asia/Singapore,Asia/Taipei,Asia/Tehran,Asia/Tokyo,Asia/Ulaanbaatar,Asia/Vladivostok,Asia/Yakutsk,Asia/Yerevan,Atlantic/Azores,Australia/Adelaide,Australia/Brisbane,Australia/Darwin,Australia/Hobart,Australia/Perth,Australia/Sydney,Brazil/East,Canada/Newfoundland,Canada/Saskatchewan,Europe/Amsterdam,Europe/Athens,Europe/Dublin,Europe/Helsinki,Europe/Istanbul,Europe/Kaliningrad,Europe/Moscow,Europe/Paris,Europe/Prague,Europe/Sarajevo,Pacific/Auckland,Pacific/Fiji,Pacific/Guam,Pacific/Honolulu,Pacific/Samoa,US/Alaska,US/Central,US/Eastern,US/East-Indiana,US/Pacific,UTC", "IsModifiable": true, "SettingName": "TIME_ZONE", "ApplyType": "DYNAMIC"}], "EngineName": "oracle-ee", "Name": "Timezone", "PortRequired": false, "Description": "Change time zone"}, {"MinimumRequiredMinorEngineVersion": "0.2.v4", "OptionsDependedOn": [], "MajorEngineVersion": "11.2", "Persistent": false, "DefaultPort": null, "Permanent": false, "OptionGroupOptionSettings": [], "EngineName": "oracle-ee", "Name": "XMLDB", "PortRequired": false, "Description": "Oracle XMLDB Repository"}]}, "ResponseMetadata": {"RequestId": "36a0a612-9fcc-11e4-a07c-e12b0fcebb71"}}}', - '11.2': '{"DescribeOptionGroupOptionsResponse": {"DescribeOptionGroupOptionsResult": {"Marker": null, "OptionGroupOptions": [{"MinimumRequiredMinorEngineVersion": "0.2.v4", "OptionsDependedOn": ["XMLDB"], "MajorEngineVersion": "11.2", "Persistent": false, "DefaultPort": null, "Permanent": false, "OptionGroupOptionSettings": [], "EngineName": "oracle-ee", "Name": "APEX", "PortRequired": false, "Description": "Oracle Application Express Runtime Environment"}, {"MinimumRequiredMinorEngineVersion": "0.2.v4", "OptionsDependedOn": ["APEX"], "MajorEngineVersion": "11.2", "Persistent": false, "DefaultPort": null, "Permanent": false, "OptionGroupOptionSettings": [], "EngineName": "oracle-ee", "Name": "APEX-DEV", "PortRequired": false, "Description": "Oracle Application Express Development Environment"}, {"MinimumRequiredMinorEngineVersion": "0.2.v3", "OptionsDependedOn": [], "MajorEngineVersion": "11.2", "Persistent": false, "DefaultPort": null, "Permanent": false, "OptionGroupOptionSettings": [{"SettingDescription": "Specifies the desired encryption behavior", "DefaultValue": "REQUESTED", "AllowedValues": "ACCEPTED,REJECTED,REQUESTED,REQUIRED", "IsModifiable": true, "SettingName": "SQLNET.ENCRYPTION_SERVER", "ApplyType": "STATIC"}, {"SettingDescription": "Specifies the desired data integrity behavior", "DefaultValue": "REQUESTED", "AllowedValues": "ACCEPTED,REJECTED,REQUESTED,REQUIRED", "IsModifiable": true, "SettingName": "SQLNET.CRYPTO_CHECKSUM_SERVER", "ApplyType": "STATIC"}, {"SettingDescription": "Specifies list of encryption algorithms in order of intended use", "DefaultValue": "RC4_256,AES256,AES192,3DES168,RC4_128,AES128,3DES112,RC4_56,DES,RC4_40,DES40", "AllowedValues": "RC4_256,AES256,AES192,3DES168,RC4_128,AES128,3DES112,RC4_56,DES,RC4_40,DES40", "IsModifiable": true, "SettingName": "SQLNET.ENCRYPTION_TYPES_SERVER", "ApplyType": "STATIC"}, {"SettingDescription": "Specifies list of checksumming algorithms in order of intended use", "DefaultValue": "SHA1,MD5", "AllowedValues": "SHA1,MD5", "IsModifiable": true, "SettingName": "SQLNET.CRYPTO_CHECKSUM_TYPES_SERVER", "ApplyType": "STATIC"}], "EngineName": "oracle-ee", "Name": "NATIVE_NETWORK_ENCRYPTION", "PortRequired": false, "Description": "Oracle Advanced Security - Native Network Encryption"}, {"MinimumRequiredMinorEngineVersion": "0.2.v3", "OptionsDependedOn": [], "MajorEngineVersion": "11.2", "Persistent": false, "DefaultPort": 1158, "Permanent": false, "OptionGroupOptionSettings": [], "EngineName": "oracle-ee", "Name": "OEM", "PortRequired": true, "Description": "Oracle Enterprise Manager (Database Control only)"}, {"MinimumRequiredMinorEngineVersion": "0.2.v3", "OptionsDependedOn": [], "MajorEngineVersion": "11.2", "Persistent": false, "DefaultPort": null, "Permanent": false, "OptionGroupOptionSettings": [], "EngineName": "oracle-ee", "Name": "STATSPACK", "PortRequired": false, "Description": "Oracle Statspack"}, {"MinimumRequiredMinorEngineVersion": "0.2.v3", "OptionsDependedOn": [], "MajorEngineVersion": "11.2", "Persistent": true, "DefaultPort": null, "Permanent": true, "OptionGroupOptionSettings": [], "EngineName": "oracle-ee", "Name": "TDE", "PortRequired": false, "Description": "Oracle Advanced Security - Transparent Data Encryption"}, {"MinimumRequiredMinorEngineVersion": "0.2.v3", "OptionsDependedOn": [], "MajorEngineVersion": "11.2", "Persistent": true, "DefaultPort": null, "Permanent": true, "OptionGroupOptionSettings": [], "EngineName": "oracle-ee", "Name": "TDE_HSM", "PortRequired": false, "Description": "Oracle Advanced Security - TDE with HSM"}, {"MinimumRequiredMinorEngineVersion": "0.2.v3", "OptionsDependedOn": [], "MajorEngineVersion": "11.2", "Persistent": true, "DefaultPort": null, "Permanent": true, "OptionGroupOptionSettings": [{"SettingDescription": "Specifies the timezone the user wants to change the system time to", "DefaultValue": "UTC", "AllowedValues": "Africa/Cairo,Africa/Casablanca,Africa/Harare,Africa/Monrovia,Africa/Nairobi,Africa/Tripoli,Africa/Windhoek,America/Araguaina,America/Asuncion,America/Bogota,America/Caracas,America/Chihuahua,America/Cuiaba,America/Denver,America/Fortaleza,America/Guatemala,America/Halifax,America/Manaus,America/Matamoros,America/Monterrey,America/Montevideo,America/Phoenix,America/Santiago,America/Tijuana,Asia/Amman,Asia/Ashgabat,Asia/Baghdad,Asia/Baku,Asia/Bangkok,Asia/Beirut,Asia/Calcutta,Asia/Damascus,Asia/Dhaka,Asia/Irkutsk,Asia/Jerusalem,Asia/Kabul,Asia/Karachi,Asia/Kathmandu,Asia/Krasnoyarsk,Asia/Magadan,Asia/Muscat,Asia/Novosibirsk,Asia/Riyadh,Asia/Seoul,Asia/Shanghai,Asia/Singapore,Asia/Taipei,Asia/Tehran,Asia/Tokyo,Asia/Ulaanbaatar,Asia/Vladivostok,Asia/Yakutsk,Asia/Yerevan,Atlantic/Azores,Australia/Adelaide,Australia/Brisbane,Australia/Darwin,Australia/Hobart,Australia/Perth,Australia/Sydney,Brazil/East,Canada/Newfoundland,Canada/Saskatchewan,Europe/Amsterdam,Europe/Athens,Europe/Dublin,Europe/Helsinki,Europe/Istanbul,Europe/Kaliningrad,Europe/Moscow,Europe/Paris,Europe/Prague,Europe/Sarajevo,Pacific/Auckland,Pacific/Fiji,Pacific/Guam,Pacific/Honolulu,Pacific/Samoa,US/Alaska,US/Central,US/Eastern,US/East-Indiana,US/Pacific,UTC", "IsModifiable": true, "SettingName": "TIME_ZONE", "ApplyType": "DYNAMIC"}], "EngineName": "oracle-ee", "Name": "Timezone", "PortRequired": false, "Description": "Change time zone"}, {"MinimumRequiredMinorEngineVersion": "0.2.v4", "OptionsDependedOn": [], "MajorEngineVersion": "11.2", "Persistent": false, "DefaultPort": null, "Permanent": false, "OptionGroupOptionSettings": [], "EngineName": "oracle-ee", "Name": "XMLDB", "PortRequired": false, "Description": "Oracle XMLDB Repository"}]}, "ResponseMetadata": {"RequestId": "36a0a612-9fcc-11e4-a07c-e12b0fcebb71"}}}' - }, - 'oracle-sa1': {'all': '{"DescribeOptionGroupOptionsResponse": {"DescribeOptionGroupOptionsResult": {"Marker": null, "OptionGroupOptions": [{"MinimumRequiredMinorEngineVersion": "0.2.v4", "OptionsDependedOn": ["XMLDB"], "MajorEngineVersion": "11.2", "Persistent": false, "DefaultPort": null, "Permanent": false, "OptionGroupOptionSettings": [], "EngineName": "oracle-ee", "Name": "APEX", "PortRequired": false, "Description": "Oracle Application Express Runtime Environment"}, {"MinimumRequiredMinorEngineVersion": "0.2.v4", "OptionsDependedOn": ["APEX"], "MajorEngineVersion": "11.2", "Persistent": false, "DefaultPort": null, "Permanent": false, "OptionGroupOptionSettings": [], "EngineName": "oracle-ee", "Name": "APEX-DEV", "PortRequired": false, "Description": "Oracle Application Express Development Environment"}, {"MinimumRequiredMinorEngineVersion": "0.2.v3", "OptionsDependedOn": [], "MajorEngineVersion": "11.2", "Persistent": false, "DefaultPort": null, "Permanent": false, "OptionGroupOptionSettings": [{"SettingDescription": "Specifies the desired encryption behavior", "DefaultValue": "REQUESTED", "AllowedValues": "ACCEPTED,REJECTED,REQUESTED,REQUIRED", "IsModifiable": true, "SettingName": "SQLNET.ENCRYPTION_SERVER", "ApplyType": "STATIC"}, {"SettingDescription": "Specifies the desired data integrity behavior", "DefaultValue": "REQUESTED", "AllowedValues": "ACCEPTED,REJECTED,REQUESTED,REQUIRED", "IsModifiable": true, "SettingName": "SQLNET.CRYPTO_CHECKSUM_SERVER", "ApplyType": "STATIC"}, {"SettingDescription": "Specifies list of encryption algorithms in order of intended use", "DefaultValue": "RC4_256,AES256,AES192,3DES168,RC4_128,AES128,3DES112,RC4_56,DES,RC4_40,DES40", "AllowedValues": "RC4_256,AES256,AES192,3DES168,RC4_128,AES128,3DES112,RC4_56,DES,RC4_40,DES40", "IsModifiable": true, "SettingName": "SQLNET.ENCRYPTION_TYPES_SERVER", "ApplyType": "STATIC"}, {"SettingDescription": "Specifies list of checksumming algorithms in order of intended use", "DefaultValue": "SHA1,MD5", "AllowedValues": "SHA1,MD5", "IsModifiable": true, "SettingName": "SQLNET.CRYPTO_CHECKSUM_TYPES_SERVER", "ApplyType": "STATIC"}], "EngineName": "oracle-ee", "Name": "NATIVE_NETWORK_ENCRYPTION", "PortRequired": false, "Description": "Oracle Advanced Security - Native Network Encryption"}, {"MinimumRequiredMinorEngineVersion": "0.2.v3", "OptionsDependedOn": [], "MajorEngineVersion": "11.2", "Persistent": false, "DefaultPort": 1158, "Permanent": false, "OptionGroupOptionSettings": [], "EngineName": "oracle-ee", "Name": "OEM", "PortRequired": true, "Description": "Oracle Enterprise Manager (Database Control only)"}, {"MinimumRequiredMinorEngineVersion": "0.2.v3", "OptionsDependedOn": [], "MajorEngineVersion": "11.2", "Persistent": false, "DefaultPort": null, "Permanent": false, "OptionGroupOptionSettings": [], "EngineName": "oracle-ee", "Name": "STATSPACK", "PortRequired": false, "Description": "Oracle Statspack"}, {"MinimumRequiredMinorEngineVersion": "0.2.v3", "OptionsDependedOn": [], "MajorEngineVersion": "11.2", "Persistent": true, "DefaultPort": null, "Permanent": true, "OptionGroupOptionSettings": [], "EngineName": "oracle-ee", "Name": "TDE", "PortRequired": false, "Description": "Oracle Advanced Security - Transparent Data Encryption"}, {"MinimumRequiredMinorEngineVersion": "0.2.v3", "OptionsDependedOn": [], "MajorEngineVersion": "11.2", "Persistent": true, "DefaultPort": null, "Permanent": true, "OptionGroupOptionSettings": [], "EngineName": "oracle-ee", "Name": "TDE_HSM", "PortRequired": false, "Description": "Oracle Advanced Security - TDE with HSM"}, {"MinimumRequiredMinorEngineVersion": "0.2.v3", "OptionsDependedOn": [], "MajorEngineVersion": "11.2", "Persistent": true, "DefaultPort": null, "Permanent": true, "OptionGroupOptionSettings": [{"SettingDescription": "Specifies the timezone the user wants to change the system time to", "DefaultValue": "UTC", "AllowedValues": "Africa/Cairo,Africa/Casablanca,Africa/Harare,Africa/Monrovia,Africa/Nairobi,Africa/Tripoli,Africa/Windhoek,America/Araguaina,America/Asuncion,America/Bogota,America/Caracas,America/Chihuahua,America/Cuiaba,America/Denver,America/Fortaleza,America/Guatemala,America/Halifax,America/Manaus,America/Matamoros,America/Monterrey,America/Montevideo,America/Phoenix,America/Santiago,America/Tijuana,Asia/Amman,Asia/Ashgabat,Asia/Baghdad,Asia/Baku,Asia/Bangkok,Asia/Beirut,Asia/Calcutta,Asia/Damascus,Asia/Dhaka,Asia/Irkutsk,Asia/Jerusalem,Asia/Kabul,Asia/Karachi,Asia/Kathmandu,Asia/Krasnoyarsk,Asia/Magadan,Asia/Muscat,Asia/Novosibirsk,Asia/Riyadh,Asia/Seoul,Asia/Shanghai,Asia/Singapore,Asia/Taipei,Asia/Tehran,Asia/Tokyo,Asia/Ulaanbaatar,Asia/Vladivostok,Asia/Yakutsk,Asia/Yerevan,Atlantic/Azores,Australia/Adelaide,Australia/Brisbane,Australia/Darwin,Australia/Hobart,Australia/Perth,Australia/Sydney,Brazil/East,Canada/Newfoundland,Canada/Saskatchewan,Europe/Amsterdam,Europe/Athens,Europe/Dublin,Europe/Helsinki,Europe/Istanbul,Europe/Kaliningrad,Europe/Moscow,Europe/Paris,Europe/Prague,Europe/Sarajevo,Pacific/Auckland,Pacific/Fiji,Pacific/Guam,Pacific/Honolulu,Pacific/Samoa,US/Alaska,US/Central,US/Eastern,US/East-Indiana,US/Pacific,UTC", "IsModifiable": true, "SettingName": "TIME_ZONE", "ApplyType": "DYNAMIC"}], "EngineName": "oracle-ee", "Name": "Timezone", "PortRequired": false, "Description": "Change time zone"}, {"MinimumRequiredMinorEngineVersion": "0.2.v4", "OptionsDependedOn": [], "MajorEngineVersion": "11.2", "Persistent": false, "DefaultPort": null, "Permanent": false, "OptionGroupOptionSettings": [], "EngineName": "oracle-ee", "Name": "XMLDB", "PortRequired": false, "Description": "Oracle XMLDB Repository"}]}, "ResponseMetadata": {"RequestId": "36a0a612-9fcc-11e4-a07c-e12b0fcebb71"}}}', - '11.2': '{"DescribeOptionGroupOptionsResponse": {"DescribeOptionGroupOptionsResult": {"Marker": null, "OptionGroupOptions": [{"MinimumRequiredMinorEngineVersion": "0.2.v4", "OptionsDependedOn": ["XMLDB"], "MajorEngineVersion": "11.2", "Persistent": false, "DefaultPort": null, "Permanent": false, "OptionGroupOptionSettings": [], "EngineName": "oracle-ee", "Name": "APEX", "PortRequired": false, "Description": "Oracle Application Express Runtime Environment"}, {"MinimumRequiredMinorEngineVersion": "0.2.v4", "OptionsDependedOn": ["APEX"], "MajorEngineVersion": "11.2", "Persistent": false, "DefaultPort": null, "Permanent": false, "OptionGroupOptionSettings": [], "EngineName": "oracle-ee", "Name": "APEX-DEV", "PortRequired": false, "Description": "Oracle Application Express Development Environment"}, {"MinimumRequiredMinorEngineVersion": "0.2.v3", "OptionsDependedOn": [], "MajorEngineVersion": "11.2", "Persistent": false, "DefaultPort": null, "Permanent": false, "OptionGroupOptionSettings": [{"SettingDescription": "Specifies the desired encryption behavior", "DefaultValue": "REQUESTED", "AllowedValues": "ACCEPTED,REJECTED,REQUESTED,REQUIRED", "IsModifiable": true, "SettingName": "SQLNET.ENCRYPTION_SERVER", "ApplyType": "STATIC"}, {"SettingDescription": "Specifies the desired data integrity behavior", "DefaultValue": "REQUESTED", "AllowedValues": "ACCEPTED,REJECTED,REQUESTED,REQUIRED", "IsModifiable": true, "SettingName": "SQLNET.CRYPTO_CHECKSUM_SERVER", "ApplyType": "STATIC"}, {"SettingDescription": "Specifies list of encryption algorithms in order of intended use", "DefaultValue": "RC4_256,AES256,AES192,3DES168,RC4_128,AES128,3DES112,RC4_56,DES,RC4_40,DES40", "AllowedValues": "RC4_256,AES256,AES192,3DES168,RC4_128,AES128,3DES112,RC4_56,DES,RC4_40,DES40", "IsModifiable": true, "SettingName": "SQLNET.ENCRYPTION_TYPES_SERVER", "ApplyType": "STATIC"}, {"SettingDescription": "Specifies list of checksumming algorithms in order of intended use", "DefaultValue": "SHA1,MD5", "AllowedValues": "SHA1,MD5", "IsModifiable": true, "SettingName": "SQLNET.CRYPTO_CHECKSUM_TYPES_SERVER", "ApplyType": "STATIC"}], "EngineName": "oracle-ee", "Name": "NATIVE_NETWORK_ENCRYPTION", "PortRequired": false, "Description": "Oracle Advanced Security - Native Network Encryption"}, {"MinimumRequiredMinorEngineVersion": "0.2.v3", "OptionsDependedOn": [], "MajorEngineVersion": "11.2", "Persistent": false, "DefaultPort": 1158, "Permanent": false, "OptionGroupOptionSettings": [], "EngineName": "oracle-ee", "Name": "OEM", "PortRequired": true, "Description": "Oracle Enterprise Manager (Database Control only)"}, {"MinimumRequiredMinorEngineVersion": "0.2.v3", "OptionsDependedOn": [], "MajorEngineVersion": "11.2", "Persistent": false, "DefaultPort": null, "Permanent": false, "OptionGroupOptionSettings": [], "EngineName": "oracle-ee", "Name": "STATSPACK", "PortRequired": false, "Description": "Oracle Statspack"}, {"MinimumRequiredMinorEngineVersion": "0.2.v3", "OptionsDependedOn": [], "MajorEngineVersion": "11.2", "Persistent": true, "DefaultPort": null, "Permanent": true, "OptionGroupOptionSettings": [], "EngineName": "oracle-ee", "Name": "TDE", "PortRequired": false, "Description": "Oracle Advanced Security - Transparent Data Encryption"}, {"MinimumRequiredMinorEngineVersion": "0.2.v3", "OptionsDependedOn": [], "MajorEngineVersion": "11.2", "Persistent": true, "DefaultPort": null, "Permanent": true, "OptionGroupOptionSettings": [], "EngineName": "oracle-ee", "Name": "TDE_HSM", "PortRequired": false, "Description": "Oracle Advanced Security - TDE with HSM"}, {"MinimumRequiredMinorEngineVersion": "0.2.v3", "OptionsDependedOn": [], "MajorEngineVersion": "11.2", "Persistent": true, "DefaultPort": null, "Permanent": true, "OptionGroupOptionSettings": [{"SettingDescription": "Specifies the timezone the user wants to change the system time to", "DefaultValue": "UTC", "AllowedValues": "Africa/Cairo,Africa/Casablanca,Africa/Harare,Africa/Monrovia,Africa/Nairobi,Africa/Tripoli,Africa/Windhoek,America/Araguaina,America/Asuncion,America/Bogota,America/Caracas,America/Chihuahua,America/Cuiaba,America/Denver,America/Fortaleza,America/Guatemala,America/Halifax,America/Manaus,America/Matamoros,America/Monterrey,America/Montevideo,America/Phoenix,America/Santiago,America/Tijuana,Asia/Amman,Asia/Ashgabat,Asia/Baghdad,Asia/Baku,Asia/Bangkok,Asia/Beirut,Asia/Calcutta,Asia/Damascus,Asia/Dhaka,Asia/Irkutsk,Asia/Jerusalem,Asia/Kabul,Asia/Karachi,Asia/Kathmandu,Asia/Krasnoyarsk,Asia/Magadan,Asia/Muscat,Asia/Novosibirsk,Asia/Riyadh,Asia/Seoul,Asia/Shanghai,Asia/Singapore,Asia/Taipei,Asia/Tehran,Asia/Tokyo,Asia/Ulaanbaatar,Asia/Vladivostok,Asia/Yakutsk,Asia/Yerevan,Atlantic/Azores,Australia/Adelaide,Australia/Brisbane,Australia/Darwin,Australia/Hobart,Australia/Perth,Australia/Sydney,Brazil/East,Canada/Newfoundland,Canada/Saskatchewan,Europe/Amsterdam,Europe/Athens,Europe/Dublin,Europe/Helsinki,Europe/Istanbul,Europe/Kaliningrad,Europe/Moscow,Europe/Paris,Europe/Prague,Europe/Sarajevo,Pacific/Auckland,Pacific/Fiji,Pacific/Guam,Pacific/Honolulu,Pacific/Samoa,US/Alaska,US/Central,US/Eastern,US/East-Indiana,US/Pacific,UTC", "IsModifiable": true, "SettingName": "TIME_ZONE", "ApplyType": "DYNAMIC"}], "EngineName": "oracle-ee", "Name": "Timezone", "PortRequired": false, "Description": "Change time zone"}, {"MinimumRequiredMinorEngineVersion": "0.2.v4", "OptionsDependedOn": [], "MajorEngineVersion": "11.2", "Persistent": false, "DefaultPort": null, "Permanent": false, "OptionGroupOptionSettings": [], "EngineName": "oracle-ee", "Name": "XMLDB", "PortRequired": false, "Description": "Oracle XMLDB Repository"}]}, "ResponseMetadata": {"RequestId": "36a0a612-9fcc-11e4-a07c-e12b0fcebb71"}}}' - } - } + default_option_group_options = {'mysql': {'5.6': '\n \n \n \n 5.611211TrueInnodb Memcached for MySQLMEMCACHED1-4294967295STATIC1TrueSpecifies how many memcached read operations (get) to perform before doing a COMMIT to start a new transactionDAEMON_MEMCACHED_R_BATCH_SIZE1-4294967295STATIC1TrueSpecifies how many memcached write operations, such as add, set, or incr, to perform before doing a COMMIT to start a new transactionDAEMON_MEMCACHED_W_BATCH_SIZE1-1073741824DYNAMIC5TrueSpecifies how often to auto-commit idle connections that use the InnoDB memcached interface.INNODB_API_BK_COMMIT_INTERVAL0,1STATIC0TrueDisables the use of row locks when using the InnoDB memcached interface.INNODB_API_DISABLE_ROWLOCK0,1STATIC0TrueLocks the table used by the InnoDB memcached plugin, so that it cannot be dropped or altered by DDL through the SQL interface.INNODB_API_ENABLE_MDL0-3STATIC0TrueLets you control the transaction isolation level on queries processed by the memcached interface.INNODB_API_TRX_LEVELauto,ascii,binarySTATICautoTrueThe binding protocol to use which can be either auto, ascii, or binary. The default is auto which means the server automatically negotiates the protocol with the client.BINDING_PROTOCOL1-2048STATIC1024TrueThe backlog queue configures how many network connections can be waiting to be processed by memcachedBACKLOG_QUEUE_LIMIT0,1STATIC0TrueDisable the use of compare and swap (CAS) which reduces the per-item size by 8 bytes.CAS_DISABLED1-48STATIC48TrueMinimum chunk size in bytes to allocate for the smallest item\'s key, value, and flags. The default is 48 and you can get a significant memory efficiency gain with a lower value.CHUNK_SIZE1-2STATIC1.25TrueChunk size growth factor that controls the size of each successive chunk with each chunk growing times this amount larger than the previous chunk.CHUNK_SIZE_GROWTH_FACTOR0,1STATIC0TrueIf enabled when there is no more memory to store items, memcached will return an error rather than evicting items.ERROR_ON_MEMORY_EXHAUSTED10-1024STATIC1024TrueMaximum number of concurrent connections. Setting this value to anything less than 10 prevents MySQL from starting.MAX_SIMULTANEOUS_CONNECTIONSv,vv,vvvSTATICvTrueVerbose level for memcached.VERBOSITYmysql\n \n \n \n \n 457f7bb8-9fbf-11e4-9084-5754f80d5144\n \n', + 'all': '\n \n \n \n 5.611211TrueInnodb Memcached for MySQLMEMCACHED1-4294967295STATIC1TrueSpecifies how many memcached read operations (get) to perform before doing a COMMIT to start a new transactionDAEMON_MEMCACHED_R_BATCH_SIZE1-4294967295STATIC1TrueSpecifies how many memcached write operations, such as add, set, or incr, to perform before doing a COMMIT to start a new transactionDAEMON_MEMCACHED_W_BATCH_SIZE1-1073741824DYNAMIC5TrueSpecifies how often to auto-commit idle connections that use the InnoDB memcached interface.INNODB_API_BK_COMMIT_INTERVAL0,1STATIC0TrueDisables the use of row locks when using the InnoDB memcached interface.INNODB_API_DISABLE_ROWLOCK0,1STATIC0TrueLocks the table used by the InnoDB memcached plugin, so that it cannot be dropped or altered by DDL through the SQL interface.INNODB_API_ENABLE_MDL0-3STATIC0TrueLets you control the transaction isolation level on queries processed by the memcached interface.INNODB_API_TRX_LEVELauto,ascii,binarySTATICautoTrueThe binding protocol to use which can be either auto, ascii, or binary. The default is auto which means the server automatically negotiates the protocol with the client.BINDING_PROTOCOL1-2048STATIC1024TrueThe backlog queue configures how many network connections can be waiting to be processed by memcachedBACKLOG_QUEUE_LIMIT0,1STATIC0TrueDisable the use of compare and swap (CAS) which reduces the per-item size by 8 bytes.CAS_DISABLED1-48STATIC48TrueMinimum chunk size in bytes to allocate for the smallest item\'s key, value, and flags. The default is 48 and you can get a significant memory efficiency gain with a lower value.CHUNK_SIZE1-2STATIC1.25TrueChunk size growth factor that controls the size of each successive chunk with each chunk growing times this amount larger than the previous chunk.CHUNK_SIZE_GROWTH_FACTOR0,1STATIC0TrueIf enabled when there is no more memory to store items, memcached will return an error rather than evicting items.ERROR_ON_MEMORY_EXHAUSTED10-1024STATIC1024TrueMaximum number of concurrent connections. Setting this value to anything less than 10 prevents MySQL from starting.MAX_SIMULTANEOUS_CONNECTIONSv,vv,vvvSTATICvTrueVerbose level for memcached.VERBOSITYmysql\n \n \n \n \n 457f7bb8-9fbf-11e4-9084-5754f80d5144\n \n'}, + 'oracle-ee': {'11.2': '\n \n \n \n 11.2XMLDBOracle Application Express Runtime EnvironmentAPEXoracle-ee\n \n 11.2APEXOracle Application Express Development EnvironmentAPEX-DEVoracle-ee\n \n 11.2Oracle Advanced Security - Native Network EncryptionNATIVE_NETWORK_ENCRYPTIONACCEPTED,REJECTED,REQUESTED,REQUIREDSTATICREQUESTEDTrueSpecifies the desired encryption behaviorSQLNET.ENCRYPTION_SERVERACCEPTED,REJECTED,REQUESTED,REQUIREDSTATICREQUESTEDTrueSpecifies the desired data integrity behaviorSQLNET.CRYPTO_CHECKSUM_SERVERRC4_256,AES256,AES192,3DES168,RC4_128,AES128,3DES112,RC4_56,DES,RC4_40,DES40STATICRC4_256,AES256,AES192,3DES168,RC4_128,AES128,3DES112,RC4_56,DES,RC4_40,DES40TrueSpecifies list of encryption algorithms in order of intended useSQLNET.ENCRYPTION_TYPES_SERVERSHA1,MD5STATICSHA1,MD5TrueSpecifies list of checksumming algorithms in order of intended useSQLNET.CRYPTO_CHECKSUM_TYPES_SERVERoracle-ee\n \n 11.21158TrueOracle Enterprise Manager (Database Control only)OEMoracle-ee\n \n 11.2Oracle StatspackSTATSPACKoracle-ee\n \n 11.2TrueTrueOracle Advanced Security - Transparent Data EncryptionTDEoracle-ee\n \n 11.2TrueTrueOracle Advanced Security - TDE with HSMTDE_HSMoracle-ee\n \n 11.2TrueTrueChange time zoneTimezoneAfrica/Cairo,Africa/Casablanca,Africa/Harare,Africa/Monrovia,Africa/Nairobi,Africa/Tripoli,Africa/Windhoek,America/Araguaina,America/Asuncion,America/Bogota,America/Caracas,America/Chihuahua,America/Cuiaba,America/Denver,America/Fortaleza,America/Guatemala,America/Halifax,America/Manaus,America/Matamoros,America/Monterrey,America/Montevideo,America/Phoenix,America/Santiago,America/Tijuana,Asia/Amman,Asia/Ashgabat,Asia/Baghdad,Asia/Baku,Asia/Bangkok,Asia/Beirut,Asia/Calcutta,Asia/Damascus,Asia/Dhaka,Asia/Irkutsk,Asia/Jerusalem,Asia/Kabul,Asia/Karachi,Asia/Kathmandu,Asia/Krasnoyarsk,Asia/Magadan,Asia/Muscat,Asia/Novosibirsk,Asia/Riyadh,Asia/Seoul,Asia/Shanghai,Asia/Singapore,Asia/Taipei,Asia/Tehran,Asia/Tokyo,Asia/Ulaanbaatar,Asia/Vladivostok,Asia/Yakutsk,Asia/Yerevan,Atlantic/Azores,Australia/Adelaide,Australia/Brisbane,Australia/Darwin,Australia/Hobart,Australia/Perth,Australia/Sydney,Brazil/East,Canada/Newfoundland,Canada/Saskatchewan,Europe/Amsterdam,Europe/Athens,Europe/Dublin,Europe/Helsinki,Europe/Istanbul,Europe/Kaliningrad,Europe/Moscow,Europe/Paris,Europe/Prague,Europe/Sarajevo,Pacific/Auckland,Pacific/Fiji,Pacific/Guam,Pacific/Honolulu,Pacific/Samoa,US/Alaska,US/Central,US/Eastern,US/East-Indiana,US/Pacific,UTCDYNAMICUTCTrueSpecifies the timezone the user wants to change the system time toTIME_ZONEoracle-ee\n \n 11.2Oracle XMLDB RepositoryXMLDBoracle-ee\n \n \n \n \n 457f7bb8-9fbf-11e4-9084-5754f80d5144\n \n', + 'all': '\n \n \n \n 11.2XMLDBOracle Application Express Runtime EnvironmentAPEXoracle-ee\n \n 11.2APEXOracle Application Express Development EnvironmentAPEX-DEVoracle-ee\n \n 11.2Oracle Advanced Security - Native Network EncryptionNATIVE_NETWORK_ENCRYPTIONACCEPTED,REJECTED,REQUESTED,REQUIREDSTATICREQUESTEDTrueSpecifies the desired encryption behaviorSQLNET.ENCRYPTION_SERVERACCEPTED,REJECTED,REQUESTED,REQUIREDSTATICREQUESTEDTrueSpecifies the desired data integrity behaviorSQLNET.CRYPTO_CHECKSUM_SERVERRC4_256,AES256,AES192,3DES168,RC4_128,AES128,3DES112,RC4_56,DES,RC4_40,DES40STATICRC4_256,AES256,AES192,3DES168,RC4_128,AES128,3DES112,RC4_56,DES,RC4_40,DES40TrueSpecifies list of encryption algorithms in order of intended useSQLNET.ENCRYPTION_TYPES_SERVERSHA1,MD5STATICSHA1,MD5TrueSpecifies list of checksumming algorithms in order of intended useSQLNET.CRYPTO_CHECKSUM_TYPES_SERVERoracle-ee\n \n 11.21158TrueOracle Enterprise Manager (Database Control only)OEMoracle-ee\n \n 11.2Oracle StatspackSTATSPACKoracle-ee\n \n 11.2TrueTrueOracle Advanced Security - Transparent Data EncryptionTDEoracle-ee\n \n 11.2TrueTrueOracle Advanced Security - TDE with HSMTDE_HSMoracle-ee\n \n 11.2TrueTrueChange time zoneTimezoneAfrica/Cairo,Africa/Casablanca,Africa/Harare,Africa/Monrovia,Africa/Nairobi,Africa/Tripoli,Africa/Windhoek,America/Araguaina,America/Asuncion,America/Bogota,America/Caracas,America/Chihuahua,America/Cuiaba,America/Denver,America/Fortaleza,America/Guatemala,America/Halifax,America/Manaus,America/Matamoros,America/Monterrey,America/Montevideo,America/Phoenix,America/Santiago,America/Tijuana,Asia/Amman,Asia/Ashgabat,Asia/Baghdad,Asia/Baku,Asia/Bangkok,Asia/Beirut,Asia/Calcutta,Asia/Damascus,Asia/Dhaka,Asia/Irkutsk,Asia/Jerusalem,Asia/Kabul,Asia/Karachi,Asia/Kathmandu,Asia/Krasnoyarsk,Asia/Magadan,Asia/Muscat,Asia/Novosibirsk,Asia/Riyadh,Asia/Seoul,Asia/Shanghai,Asia/Singapore,Asia/Taipei,Asia/Tehran,Asia/Tokyo,Asia/Ulaanbaatar,Asia/Vladivostok,Asia/Yakutsk,Asia/Yerevan,Atlantic/Azores,Australia/Adelaide,Australia/Brisbane,Australia/Darwin,Australia/Hobart,Australia/Perth,Australia/Sydney,Brazil/East,Canada/Newfoundland,Canada/Saskatchewan,Europe/Amsterdam,Europe/Athens,Europe/Dublin,Europe/Helsinki,Europe/Istanbul,Europe/Kaliningrad,Europe/Moscow,Europe/Paris,Europe/Prague,Europe/Sarajevo,Pacific/Auckland,Pacific/Fiji,Pacific/Guam,Pacific/Honolulu,Pacific/Samoa,US/Alaska,US/Central,US/Eastern,US/East-Indiana,US/Pacific,UTCDYNAMICUTCTrueSpecifies the timezone the user wants to change the system time toTIME_ZONEoracle-ee\n \n 11.2Oracle XMLDB RepositoryXMLDBoracle-ee\n \n \n \n \n 457f7bb8-9fbf-11e4-9084-5754f80d5144\n \n'}, + 'oracle-sa': {'11.2': '\n \n \n \n 11.2XMLDBOracle Application Express Runtime EnvironmentAPEXoracle-ee\n \n 11.2APEXOracle Application Express Development EnvironmentAPEX-DEVoracle-ee\n \n 11.2Oracle Advanced Security - Native Network EncryptionNATIVE_NETWORK_ENCRYPTIONACCEPTED,REJECTED,REQUESTED,REQUIREDSTATICREQUESTEDTrueSpecifies the desired encryption behaviorSQLNET.ENCRYPTION_SERVERACCEPTED,REJECTED,REQUESTED,REQUIREDSTATICREQUESTEDTrueSpecifies the desired data integrity behaviorSQLNET.CRYPTO_CHECKSUM_SERVERRC4_256,AES256,AES192,3DES168,RC4_128,AES128,3DES112,RC4_56,DES,RC4_40,DES40STATICRC4_256,AES256,AES192,3DES168,RC4_128,AES128,3DES112,RC4_56,DES,RC4_40,DES40TrueSpecifies list of encryption algorithms in order of intended useSQLNET.ENCRYPTION_TYPES_SERVERSHA1,MD5STATICSHA1,MD5TrueSpecifies list of checksumming algorithms in order of intended useSQLNET.CRYPTO_CHECKSUM_TYPES_SERVERoracle-ee\n \n 11.21158TrueOracle Enterprise Manager (Database Control only)OEMoracle-ee\n \n 11.2Oracle StatspackSTATSPACKoracle-ee\n \n 11.2TrueTrueOracle Advanced Security - Transparent Data EncryptionTDEoracle-ee\n \n 11.2TrueTrueOracle Advanced Security - TDE with HSMTDE_HSMoracle-ee\n \n 11.2TrueTrueChange time zoneTimezoneAfrica/Cairo,Africa/Casablanca,Africa/Harare,Africa/Monrovia,Africa/Nairobi,Africa/Tripoli,Africa/Windhoek,America/Araguaina,America/Asuncion,America/Bogota,America/Caracas,America/Chihuahua,America/Cuiaba,America/Denver,America/Fortaleza,America/Guatemala,America/Halifax,America/Manaus,America/Matamoros,America/Monterrey,America/Montevideo,America/Phoenix,America/Santiago,America/Tijuana,Asia/Amman,Asia/Ashgabat,Asia/Baghdad,Asia/Baku,Asia/Bangkok,Asia/Beirut,Asia/Calcutta,Asia/Damascus,Asia/Dhaka,Asia/Irkutsk,Asia/Jerusalem,Asia/Kabul,Asia/Karachi,Asia/Kathmandu,Asia/Krasnoyarsk,Asia/Magadan,Asia/Muscat,Asia/Novosibirsk,Asia/Riyadh,Asia/Seoul,Asia/Shanghai,Asia/Singapore,Asia/Taipei,Asia/Tehran,Asia/Tokyo,Asia/Ulaanbaatar,Asia/Vladivostok,Asia/Yakutsk,Asia/Yerevan,Atlantic/Azores,Australia/Adelaide,Australia/Brisbane,Australia/Darwin,Australia/Hobart,Australia/Perth,Australia/Sydney,Brazil/East,Canada/Newfoundland,Canada/Saskatchewan,Europe/Amsterdam,Europe/Athens,Europe/Dublin,Europe/Helsinki,Europe/Istanbul,Europe/Kaliningrad,Europe/Moscow,Europe/Paris,Europe/Prague,Europe/Sarajevo,Pacific/Auckland,Pacific/Fiji,Pacific/Guam,Pacific/Honolulu,Pacific/Samoa,US/Alaska,US/Central,US/Eastern,US/East-Indiana,US/Pacific,UTCDYNAMICUTCTrueSpecifies the timezone the user wants to change the system time toTIME_ZONEoracle-ee\n \n 11.2Oracle XMLDB RepositoryXMLDBoracle-ee\n \n \n \n \n 457f7bb8-9fbf-11e4-9084-5754f80d5144\n \n', + 'all': '\n \n \n \n 11.2XMLDBOracle Application Express Runtime EnvironmentAPEXoracle-ee\n \n 11.2APEXOracle Application Express Development EnvironmentAPEX-DEVoracle-ee\n \n 11.2Oracle Advanced Security - Native Network EncryptionNATIVE_NETWORK_ENCRYPTIONACCEPTED,REJECTED,REQUESTED,REQUIREDSTATICREQUESTEDTrueSpecifies the desired encryption behaviorSQLNET.ENCRYPTION_SERVERACCEPTED,REJECTED,REQUESTED,REQUIREDSTATICREQUESTEDTrueSpecifies the desired data integrity behaviorSQLNET.CRYPTO_CHECKSUM_SERVERRC4_256,AES256,AES192,3DES168,RC4_128,AES128,3DES112,RC4_56,DES,RC4_40,DES40STATICRC4_256,AES256,AES192,3DES168,RC4_128,AES128,3DES112,RC4_56,DES,RC4_40,DES40TrueSpecifies list of encryption algorithms in order of intended useSQLNET.ENCRYPTION_TYPES_SERVERSHA1,MD5STATICSHA1,MD5TrueSpecifies list of checksumming algorithms in order of intended useSQLNET.CRYPTO_CHECKSUM_TYPES_SERVERoracle-ee\n \n 11.21158TrueOracle Enterprise Manager (Database Control only)OEMoracle-ee\n \n 11.2Oracle StatspackSTATSPACKoracle-ee\n \n 11.2TrueTrueOracle Advanced Security - Transparent Data EncryptionTDEoracle-ee\n \n 11.2TrueTrueOracle Advanced Security - TDE with HSMTDE_HSMoracle-ee\n \n 11.2TrueTrueChange time zoneTimezoneAfrica/Cairo,Africa/Casablanca,Africa/Harare,Africa/Monrovia,Africa/Nairobi,Africa/Tripoli,Africa/Windhoek,America/Araguaina,America/Asuncion,America/Bogota,America/Caracas,America/Chihuahua,America/Cuiaba,America/Denver,America/Fortaleza,America/Guatemala,America/Halifax,America/Manaus,America/Matamoros,America/Monterrey,America/Montevideo,America/Phoenix,America/Santiago,America/Tijuana,Asia/Amman,Asia/Ashgabat,Asia/Baghdad,Asia/Baku,Asia/Bangkok,Asia/Beirut,Asia/Calcutta,Asia/Damascus,Asia/Dhaka,Asia/Irkutsk,Asia/Jerusalem,Asia/Kabul,Asia/Karachi,Asia/Kathmandu,Asia/Krasnoyarsk,Asia/Magadan,Asia/Muscat,Asia/Novosibirsk,Asia/Riyadh,Asia/Seoul,Asia/Shanghai,Asia/Singapore,Asia/Taipei,Asia/Tehran,Asia/Tokyo,Asia/Ulaanbaatar,Asia/Vladivostok,Asia/Yakutsk,Asia/Yerevan,Atlantic/Azores,Australia/Adelaide,Australia/Brisbane,Australia/Darwin,Australia/Hobart,Australia/Perth,Australia/Sydney,Brazil/East,Canada/Newfoundland,Canada/Saskatchewan,Europe/Amsterdam,Europe/Athens,Europe/Dublin,Europe/Helsinki,Europe/Istanbul,Europe/Kaliningrad,Europe/Moscow,Europe/Paris,Europe/Prague,Europe/Sarajevo,Pacific/Auckland,Pacific/Fiji,Pacific/Guam,Pacific/Honolulu,Pacific/Samoa,US/Alaska,US/Central,US/Eastern,US/East-Indiana,US/Pacific,UTCDYNAMICUTCTrueSpecifies the timezone the user wants to change the system time toTIME_ZONEoracle-ee\n \n 11.2Oracle XMLDB RepositoryXMLDBoracle-ee\n \n \n \n \n 457f7bb8-9fbf-11e4-9084-5754f80d5144\n \n'}, + 'oracle-sa1': {'11.2': '\n \n \n \n 11.2XMLDBOracle Application Express Runtime EnvironmentAPEXoracle-ee\n \n 11.2APEXOracle Application Express Development EnvironmentAPEX-DEVoracle-ee\n \n 11.2Oracle Advanced Security - Native Network EncryptionNATIVE_NETWORK_ENCRYPTIONACCEPTED,REJECTED,REQUESTED,REQUIREDSTATICREQUESTEDTrueSpecifies the desired encryption behaviorSQLNET.ENCRYPTION_SERVERACCEPTED,REJECTED,REQUESTED,REQUIREDSTATICREQUESTEDTrueSpecifies the desired data integrity behaviorSQLNET.CRYPTO_CHECKSUM_SERVERRC4_256,AES256,AES192,3DES168,RC4_128,AES128,3DES112,RC4_56,DES,RC4_40,DES40STATICRC4_256,AES256,AES192,3DES168,RC4_128,AES128,3DES112,RC4_56,DES,RC4_40,DES40TrueSpecifies list of encryption algorithms in order of intended useSQLNET.ENCRYPTION_TYPES_SERVERSHA1,MD5STATICSHA1,MD5TrueSpecifies list of checksumming algorithms in order of intended useSQLNET.CRYPTO_CHECKSUM_TYPES_SERVERoracle-ee\n \n 11.21158TrueOracle Enterprise Manager (Database Control only)OEMoracle-ee\n \n 11.2Oracle StatspackSTATSPACKoracle-ee\n \n 11.2TrueTrueOracle Advanced Security - Transparent Data EncryptionTDEoracle-ee\n \n 11.2TrueTrueOracle Advanced Security - TDE with HSMTDE_HSMoracle-ee\n \n 11.2TrueTrueChange time zoneTimezoneAfrica/Cairo,Africa/Casablanca,Africa/Harare,Africa/Monrovia,Africa/Nairobi,Africa/Tripoli,Africa/Windhoek,America/Araguaina,America/Asuncion,America/Bogota,America/Caracas,America/Chihuahua,America/Cuiaba,America/Denver,America/Fortaleza,America/Guatemala,America/Halifax,America/Manaus,America/Matamoros,America/Monterrey,America/Montevideo,America/Phoenix,America/Santiago,America/Tijuana,Asia/Amman,Asia/Ashgabat,Asia/Baghdad,Asia/Baku,Asia/Bangkok,Asia/Beirut,Asia/Calcutta,Asia/Damascus,Asia/Dhaka,Asia/Irkutsk,Asia/Jerusalem,Asia/Kabul,Asia/Karachi,Asia/Kathmandu,Asia/Krasnoyarsk,Asia/Magadan,Asia/Muscat,Asia/Novosibirsk,Asia/Riyadh,Asia/Seoul,Asia/Shanghai,Asia/Singapore,Asia/Taipei,Asia/Tehran,Asia/Tokyo,Asia/Ulaanbaatar,Asia/Vladivostok,Asia/Yakutsk,Asia/Yerevan,Atlantic/Azores,Australia/Adelaide,Australia/Brisbane,Australia/Darwin,Australia/Hobart,Australia/Perth,Australia/Sydney,Brazil/East,Canada/Newfoundland,Canada/Saskatchewan,Europe/Amsterdam,Europe/Athens,Europe/Dublin,Europe/Helsinki,Europe/Istanbul,Europe/Kaliningrad,Europe/Moscow,Europe/Paris,Europe/Prague,Europe/Sarajevo,Pacific/Auckland,Pacific/Fiji,Pacific/Guam,Pacific/Honolulu,Pacific/Samoa,US/Alaska,US/Central,US/Eastern,US/East-Indiana,US/Pacific,UTCDYNAMICUTCTrueSpecifies the timezone the user wants to change the system time toTIME_ZONEoracle-ee\n \n 11.2Oracle XMLDB RepositoryXMLDBoracle-ee\n \n \n \n \n 457f7bb8-9fbf-11e4-9084-5754f80d5144\n \n', + 'all': '\n \n \n \n 11.2XMLDBOracle Application Express Runtime EnvironmentAPEXoracle-ee\n \n 11.2APEXOracle Application Express Development EnvironmentAPEX-DEVoracle-ee\n \n 11.2Oracle Advanced Security - Native Network EncryptionNATIVE_NETWORK_ENCRYPTIONACCEPTED,REJECTED,REQUESTED,REQUIREDSTATICREQUESTEDTrueSpecifies the desired encryption behaviorSQLNET.ENCRYPTION_SERVERACCEPTED,REJECTED,REQUESTED,REQUIREDSTATICREQUESTEDTrueSpecifies the desired data integrity behaviorSQLNET.CRYPTO_CHECKSUM_SERVERRC4_256,AES256,AES192,3DES168,RC4_128,AES128,3DES112,RC4_56,DES,RC4_40,DES40STATICRC4_256,AES256,AES192,3DES168,RC4_128,AES128,3DES112,RC4_56,DES,RC4_40,DES40TrueSpecifies list of encryption algorithms in order of intended useSQLNET.ENCRYPTION_TYPES_SERVERSHA1,MD5STATICSHA1,MD5TrueSpecifies list of checksumming algorithms in order of intended useSQLNET.CRYPTO_CHECKSUM_TYPES_SERVERoracle-ee\n \n 11.21158TrueOracle Enterprise Manager (Database Control only)OEMoracle-ee\n \n 11.2Oracle StatspackSTATSPACKoracle-ee\n \n 11.2TrueTrueOracle Advanced Security - Transparent Data EncryptionTDEoracle-ee\n \n 11.2TrueTrueOracle Advanced Security - TDE with HSMTDE_HSMoracle-ee\n \n 11.2TrueTrueChange time zoneTimezoneAfrica/Cairo,Africa/Casablanca,Africa/Harare,Africa/Monrovia,Africa/Nairobi,Africa/Tripoli,Africa/Windhoek,America/Araguaina,America/Asuncion,America/Bogota,America/Caracas,America/Chihuahua,America/Cuiaba,America/Denver,America/Fortaleza,America/Guatemala,America/Halifax,America/Manaus,America/Matamoros,America/Monterrey,America/Montevideo,America/Phoenix,America/Santiago,America/Tijuana,Asia/Amman,Asia/Ashgabat,Asia/Baghdad,Asia/Baku,Asia/Bangkok,Asia/Beirut,Asia/Calcutta,Asia/Damascus,Asia/Dhaka,Asia/Irkutsk,Asia/Jerusalem,Asia/Kabul,Asia/Karachi,Asia/Kathmandu,Asia/Krasnoyarsk,Asia/Magadan,Asia/Muscat,Asia/Novosibirsk,Asia/Riyadh,Asia/Seoul,Asia/Shanghai,Asia/Singapore,Asia/Taipei,Asia/Tehran,Asia/Tokyo,Asia/Ulaanbaatar,Asia/Vladivostok,Asia/Yakutsk,Asia/Yerevan,Atlantic/Azores,Australia/Adelaide,Australia/Brisbane,Australia/Darwin,Australia/Hobart,Australia/Perth,Australia/Sydney,Brazil/East,Canada/Newfoundland,Canada/Saskatchewan,Europe/Amsterdam,Europe/Athens,Europe/Dublin,Europe/Helsinki,Europe/Istanbul,Europe/Kaliningrad,Europe/Moscow,Europe/Paris,Europe/Prague,Europe/Sarajevo,Pacific/Auckland,Pacific/Fiji,Pacific/Guam,Pacific/Honolulu,Pacific/Samoa,US/Alaska,US/Central,US/Eastern,US/East-Indiana,US/Pacific,UTCDYNAMICUTCTrueSpecifies the timezone the user wants to change the system time toTIME_ZONEoracle-ee\n \n 11.2Oracle XMLDB RepositoryXMLDBoracle-ee\n \n \n \n \n 457f7bb8-9fbf-11e4-9084-5754f80d5144\n \n'}, + 'sqlserver-ee': {'10.50': '\n \n \n \n 10.50SQLServer Database MirroringMirroringsqlserver-ee\n \n 10.50TrueSQL Server - Transparent Data EncryptionTDEsqlserver-ee\n \n \n \n \n 457f7bb8-9fbf-11e4-9084-5754f80d5144\n \n', + '11.00': '\n \n \n \n 11.00SQLServer Database MirroringMirroringsqlserver-ee\n \n 11.00TrueSQL Server - Transparent Data EncryptionTDEsqlserver-ee\n \n \n \n \n 457f7bb8-9fbf-11e4-9084-5754f80d5144\n \n', + 'all': '\n \n \n \n 10.50SQLServer Database MirroringMirroringsqlserver-ee\n \n 10.50TrueSQL Server - Transparent Data EncryptionTDEsqlserver-ee\n \n 11.00SQLServer Database MirroringMirroringsqlserver-ee\n \n 11.00TrueSQL Server - Transparent Data EncryptionTDEsqlserver-ee\n \n \n \n \n 457f7bb8-9fbf-11e4-9084-5754f80d5144\n \n'}} + if engine_name not in default_option_group_options: raise RDSClientError('InvalidParameterValue', 'Invalid DB engine: {0}'.format(engine_name)) if major_engine_version and major_engine_version not in default_option_group_options[engine_name]: @@ -620,6 +784,60 @@ class RDS2Backend(BaseBackend): self.option_groups[option_group_name].add_options(options_to_include) return self.option_groups[option_group_name] + def create_db_parameter_group(self, db_parameter_group_kwargs): + db_parameter_group_id = db_parameter_group_kwargs['name'] + if db_parameter_group_kwargs['name'] in self.db_parameter_groups: + raise RDSClientError('DBParameterGroupAlreadyExistsFault', + 'A DB parameter group named {0} already exists.'.format(db_parameter_group_kwargs['name'])) + if not db_parameter_group_kwargs.get('description'): + raise RDSClientError('InvalidParameterValue', + 'The parameter Description must be provided and must not be blank.') + if not db_parameter_group_kwargs.get('family'): + raise RDSClientError('InvalidParameterValue', + 'The parameter DBParameterGroupName must be provided and must not be blank.') + + db_parameter_group = DBParameterGroup(**db_parameter_group_kwargs) + self.db_parameter_groups[db_parameter_group_id] = db_parameter_group + return db_parameter_group + + def describe_db_parameter_groups(self, db_parameter_group_kwargs): + db_parameter_group_list = [] + + if db_parameter_group_kwargs.get('marker'): + marker = db_parameter_group_kwargs['marker'] + else: + marker = 0 + if db_parameter_group_kwargs.get('max_records'): + if db_parameter_group_kwargs['max_records'] < 20 or db_parameter_group_kwargs['max_records'] > 100: + raise RDSClientError('InvalidParameterValue', + 'Invalid value for max records. Must be between 20 and 100') + max_records = db_parameter_group_kwargs['max_records'] + else: + max_records = 100 + + for db_parameter_group_name, db_parameter_group in self.db_parameter_groups.items(): + if not db_parameter_group_kwargs.get('name') or db_parameter_group.name == db_parameter_group_kwargs.get('name'): + db_parameter_group_list.append(db_parameter_group) + else: + continue + + return db_parameter_group_list[marker:max_records+marker] + + def modify_db_parameter_group(self, db_parameter_group_name, db_parameter_group_parameters): + if db_parameter_group_name not in self.db_parameter_groups: + raise DBParameterGroupNotFoundError(db_parameter_group_name) + + db_parameter_group = self.db_parameter_groups[db_parameter_group_name] + db_parameter_group.update_parameters(db_parameter_group_parameters) + + return db_parameter_group + + def delete_db_parameter_group(self, db_parameter_group_name): + if db_parameter_group_name in self.db_parameter_groups: + return self.db_parameter_groups.pop(db_parameter_group_name) + else: + raise DBParameterGroupNotFoundError(db_parameter_group_name) + def list_tags_for_resource(self, arn): if self.arn_regex.match(arn): arn_breakdown = arn.split(':') @@ -635,19 +853,19 @@ class RDS2Backend(BaseBackend): if resource_name in self.option_groups: return self.option_groups[resource_name].get_tags() elif resource_type == 'pg': # Parameter Group - # TODO: Complete call to tags on resource type Parameter Group - return [] + if resource_name in self.db_parameter_groups: + return self.db_parameter_groups[resource_name].get_tags() elif resource_type == 'ri': # Reserved DB instance # TODO: Complete call to tags on resource type Reserved DB instance return [] elif resource_type == 'secgrp': # DB security group - if resource_type in self.security_groups: + if resource_name in self.security_groups: return self.security_groups[resource_name].get_tags() elif resource_type == 'snapshot': # DB Snapshot # TODO: Complete call to tags on resource type DB Snapshot return [] elif resource_type == 'subgrp': # DB subnet group - if resource_type in self.subnet_groups: + if resource_name in self.subnet_groups: return self.subnet_groups[resource_name].get_tags() else: raise RDSClientError('InvalidParameterValue', @@ -672,16 +890,16 @@ class RDS2Backend(BaseBackend): elif resource_type == 'ri': # Reserved DB instance return None elif resource_type == 'secgrp': # DB security group - if resource_type in self.security_groups: + if resource_name in self.security_groups: return self.security_groups[resource_name].remove_tags(tag_keys) elif resource_type == 'snapshot': # DB Snapshot return None elif resource_type == 'subgrp': # DB subnet group - if resource_type in self.subnet_groups: + if resource_name in self.subnet_groups: return self.subnet_groups[resource_name].remove_tags(tag_keys) else: raise RDSClientError('InvalidParameterValue', - 'Invalid resource name: {}'.format(arn)) + 'Invalid resource name: {0}'.format(arn)) def add_tags_to_resource(self, arn, tags): if self.arn_regex.match(arn): @@ -701,16 +919,16 @@ class RDS2Backend(BaseBackend): elif resource_type == 'ri': # Reserved DB instance return [] elif resource_type == 'secgrp': # DB security group - if resource_type in self.security_groups: + if resource_name in self.security_groups: return self.security_groups[resource_name].add_tags(tags) elif resource_type == 'snapshot': # DB Snapshot return [] elif resource_type == 'subgrp': # DB subnet group - if resource_type in self.subnet_groups: + if resource_name in self.subnet_groups: return self.subnet_groups[resource_name].add_tags(tags) else: raise RDSClientError('InvalidParameterValue', - 'Invalid resource name: {}'.format(arn)) + 'Invalid resource name: {0}'.format(arn)) class OptionGroup(object): @@ -736,6 +954,17 @@ class OptionGroup(object): }""") return template.render(option_group=self) + def to_xml(self): + template = Template(""" + {{ option_group.name }} + {{ option_group.vpc_and_non_vpc_instance_memberships }} + {{ option_group.major_engine_version }} + {{ option_group.engine_name }} + {{ option_group.description }} + + """) + return template.render(option_group=self) + def remove_options(self, options_to_remove): # TODO: Check for option in self.options and remove if exists. Raise error otherwise return @@ -758,11 +987,20 @@ class OptionGroup(object): class OptionGroupOption(object): - def __init__(self, engine_name, major_engine_version): - self.engine_name = engine_name - self.major_engine_version = major_engine_version - #TODO: Create validation for Options - #TODO: formulate way to store options settings + def __init__(self, **kwargs): + self.default_port = kwargs.get('default_port') + self.description = kwargs.get('description') + self.engine_name = kwargs.get('engine_name') + self.major_engine_version = kwargs.get('major_engine_version') + self.name = kwargs.get('name') + self.option_group_option_settings = self._make_option_group_option_settings(kwargs.get('option_group_option_settings', [])) + self.options_depended_on = kwargs.get('options_depended_on', []) + self.permanent = kwargs.get('permanent') + self.persistent = kwargs.get('persistent') + self.port_required = kwargs.get('port_required') + + def _make_option_group_option_settings(self, option_group_option_settings_kwargs): + return [OptionGroupOptionSetting(**setting_kwargs) for setting_kwargs in option_group_option_settings_kwargs] def to_json(self): template = Template("""{ "MinimumRequiredMinorEngineVersion": @@ -780,7 +1018,109 @@ class OptionGroupOption(object): }""") return template.render(option_group=self) + def to_xml(self): + template = Template(""" + {{ option_group.major_engine_version }} + {{ option_group.default_port }} + {{ option_group.port_required }} + {{ option_group.persistent }} + + {%- for option_name in option_group.options_depended_on -%} + {{ option_name }} + {%- endfor -%} + + {{ option_group.permanent }} + {{ option_group.description }} + {{ option_group.name }} + + {%- for setting in option_group.option_group_option_settings -%} + {{ setting.to_xml() }} + {%- endfor -%} + + {{ option_group.engine_name }} + {{ option_group.minimum_required_minor_engine_version }} +""") + return template.render(option_group=self) -rds2_backends = {} -for region in boto.rds2.regions(): - rds2_backends[region.name] = RDS2Backend() + +class OptionGroupOptionSetting(object): + def __init__(self, *kwargs): + self.allowed_values = kwargs.get('allowed_values') + self.apply_type = kwargs.get('apply_type') + self.default_value = kwargs.get('default_value') + self.is_modifiable = kwargs.get('is_modifiable') + self.setting_description = kwargs.get('setting_description') + self.setting_name = kwargs.get('setting_name') + + def to_xml(self): + template = Template(""" + {{ option_group_option_setting.allowed_values }} + {{ option_group_option_setting.apply_type }} + {{ option_group_option_setting.default_value }} + {{ option_group_option_setting.is_modifiable }} + {{ option_group_option_setting.setting_description }} + {{ option_group_option_setting.setting_name }} +""") + return template.render(option_group_option_setting=self) + +class DBParameterGroup(object): + def __init__(self, name, description, family, tags): + self.name = name + self.description = description + self.family = family + self.tags = tags + self.parameters = defaultdict(dict) + + def to_xml(self): + template = Template(""" + {{ param_group.name }} + {{ param_group.family }} + {{ param_group.description }} + """) + return template.render(param_group=self) + + def get_tags(self): + return self.tags + + def add_tags(self, tags): + new_keys = [tag_set['Key'] for tag_set in tags] + self.tags = [tag_set for tag_set in self.tags if tag_set['Key'] not in new_keys] + self.tags.extend(tags) + return self.tags + + def remove_tags(self, tag_keys): + self.tags = [tag_set for tag_set in self.tags if tag_set['Key'] not in tag_keys] + + def update_parameters(self, new_parameters): + for new_parameter in new_parameters: + parameter = self.parameters[new_parameter['ParameterName']] + parameter.update(new_parameter) + + def delete(self, region_name): + backend = rds2_backends[region_name] + backend.delete_db_parameter_group(self.name) + + @classmethod + def create_from_cloudformation_json(cls, resource_name, cloudformation_json, region_name): + properties = cloudformation_json['Properties'] + + db_parameter_group_kwargs = { + 'description': properties['Description'], + 'family': properties['Family'], + 'name': resource_name.lower(), + 'tags': properties.get("Tags"), + } + db_parameter_group_parameters = [] + for db_parameter, db_parameter_value in properties.get('Parameters', {}).items(): + db_parameter_group_parameters.append({ + 'ParameterName': db_parameter, + 'ParameterValue': db_parameter_value, + }) + + rds2_backend = rds2_backends[region_name] + db_parameter_group = rds2_backend.create_db_parameter_group(db_parameter_group_kwargs) + db_parameter_group.update_parameters(db_parameter_group_parameters) + return db_parameter_group + + +rds2_backends = dict((region.name, RDS2Backend(region.name)) for region in boto.rds2.regions()) diff --git a/moto/rds2/responses.py b/moto/rds2/responses.py index bd51f6ea0..879edbdd3 100644 --- a/moto/rds2/responses.py +++ b/moto/rds2/responses.py @@ -1,8 +1,10 @@ from __future__ import unicode_literals +from collections import defaultdict from moto.core.responses import BaseResponse from moto.ec2.models import ec2_backends from .models import rds2_backends +from .exceptions import DBParameterGroupNotFoundError import json import re @@ -22,11 +24,12 @@ class RDS2Response(BaseResponse): "db_instance_class": self._get_param('DBInstanceClass'), "db_instance_identifier": self._get_param('DBInstanceIdentifier'), "db_name": self._get_param("DBName"), - # DBParameterGroupName + "db_parameter_group_name": self._get_param("DBParameterGroupName"), "db_subnet_group_name": self._get_param("DBSubnetGroupName"), "engine": self._get_param("Engine"), "engine_version": self._get_param("EngineVersion"), "iops": self._get_int_param("Iops"), + "kms_key_id": self._get_param("KmsKeyId"), "master_user_password": self._get_param('MasterUserPassword'), "master_username": self._get_param('MasterUsername'), "multi_az": self._get_bool_param("MultiAZ"), @@ -36,12 +39,13 @@ class RDS2Response(BaseResponse): # PreferredMaintenanceWindow "publicly_accessible": self._get_param("PubliclyAccessible"), "region": self.region, - "security_groups": self._get_multi_param('DBSecurityGroups.member'), + "security_groups": self._get_multi_param('DBSecurityGroups.DBSecurityGroupName'), + "storage_encrypted": self._get_param("StorageEncrypted"), "storage_type": self._get_param("StorageType"), # VpcSecurityGroupIds.member.N - "tags": list() + "tags": list(), } - args['tags'] = self.unpack_complex_list_params('Tags.member', ('Key', 'Value')) + args['tags'] = self.unpack_complex_list_params('Tags.Tag', ('Key', 'Value')) return args def _get_db_replica_kwargs(self): @@ -67,6 +71,14 @@ class RDS2Response(BaseResponse): 'name': self._get_param('OptionGroupName') } + def _get_db_parameter_group_kwargs(self): + return { + 'description': self._get_param('Description'), + 'family': self._get_param('DBParameterGroupFamily'), + 'name': self._get_param('DBParameterGroupName'), + 'tags': self.unpack_complex_list_params('Tags.Tag', ('Key', 'Value')), + } + def unpack_complex_list_params(self, label, names): unpacked_list = list() count = 1 @@ -150,7 +162,7 @@ class RDS2Response(BaseResponse): def add_tags_to_resource(self): arn = self._get_param('ResourceName') - tags = self.unpack_complex_list_params('Tags.member', ('Key', 'Value')) + tags = self.unpack_complex_list_params('Tags.Tag', ('Key', 'Value')) tags = self.backend.add_tags_to_resource(arn, tags) template = self.response_template(ADD_TAGS_TO_RESOURCE_TEMPLATE) return template.render(tags=tags) @@ -168,7 +180,8 @@ class RDS2Response(BaseResponse): def create_db_security_group(self): group_name = self._get_param('DBSecurityGroupName') description = self._get_param('DBSecurityGroupDescription') - security_group = self.backend.create_security_group(group_name, description) + tags = self.unpack_complex_list_params('Tags.Tag', ('Key', 'Value')) + security_group = self.backend.create_security_group(group_name, description, tags) template = self.response_template(CREATE_SECURITY_GROUP_TEMPLATE) return template.render(security_group=security_group) @@ -206,9 +219,10 @@ class RDS2Response(BaseResponse): def create_db_subnet_group(self): subnet_name = self._get_param('DBSubnetGroupName') description = self._get_param('DBSubnetGroupDescription') - subnet_ids = self._get_multi_param('SubnetIds.member') + subnet_ids = self._get_multi_param('SubnetIds.SubnetIdentifier') + tags = self.unpack_complex_list_params('Tags.Tag', ('Key', 'Value')) subnets = [ec2_backends[self.region].get_subnet(subnet_id) for subnet_id in subnet_ids] - subnet_group = self.backend.create_subnet_group(subnet_name, description, subnets) + subnet_group = self.backend.create_subnet_group(subnet_name, description, subnets, tags) template = self.response_template(CREATE_SUBNET_GROUP_TEMPLATE) return template.render(subnet_group=subnet_group) @@ -283,225 +297,326 @@ class RDS2Response(BaseResponse): template = self.response_template(MODIFY_OPTION_GROUP_TEMPLATE) return template.render(option_group=option_group) + def create_dbparameter_group(self): + return self.create_db_parameter_group() -CREATE_DATABASE_TEMPLATE = """{ - "CreateDBInstanceResponse": { - "CreateDBInstanceResult": { - "DBInstance": {{ database.to_json() }} - }, - "ResponseMetadata": { "RequestId": "523e3218-afc7-11c3-90f5-f90431260ab4" } - } -}""" + def create_db_parameter_group(self): + kwargs = self._get_db_parameter_group_kwargs() + db_parameter_group = self.backend.create_db_parameter_group(kwargs) + template = self.response_template(CREATE_DB_PARAMETER_GROUP_TEMPLATE) + return template.render(db_parameter_group=db_parameter_group) -CREATE_DATABASE_REPLICA_TEMPLATE = """{"CreateDBInstanceReadReplicaResponse": { - "ResponseMetadata": { - "RequestId": "5e60c46d-a844-11e4-bb68-17f36418e58f" - }, - "CreateDBInstanceReadReplicaResult": { - "DBInstance": {{ database.to_json() }} - } -}}""" + def describe_dbparameter_groups(self): + return self.describe_db_parameter_groups() -DESCRIBE_DATABASES_TEMPLATE = """{ - "DescribeDBInstancesResponse": { - "DescribeDBInstancesResult": { - "DBInstances": [ - {%- for database in databases -%} - {%- if loop.index != 1 -%},{%- endif -%} - {{ database.to_json() }} + def describe_db_parameter_groups(self): + kwargs = self._get_db_parameter_group_kwargs() + kwargs['max_records'] = self._get_param('MaxRecords') + kwargs['marker'] = self._get_param('Marker') + db_parameter_groups = self.backend.describe_db_parameter_groups(kwargs) + template = self.response_template(DESCRIBE_DB_PARAMETER_GROUPS_TEMPLATE) + return template.render(db_parameter_groups=db_parameter_groups) + + def modify_dbparameter_group(self): + return self.modify_db_parameter_group() + + def modify_db_parameter_group(self): + db_parameter_group_name = self._get_param('DBParameterGroupName') + db_parameter_group_parameters = self._get_db_parameter_group_paramters() + db_parameter_group = self.backend.modify_db_parameter_group(db_parameter_group_name, + db_parameter_group_parameters) + template = self.response_template(MODIFY_DB_PARAMETER_GROUP_TEMPLATE) + return template.render(db_parameter_group=db_parameter_group) + + def _get_db_parameter_group_paramters(self): + parameter_group_parameters = defaultdict(dict) + for param_name, value in self.querystring.items(): + if not param_name.startswith('Parameters.Parameter'): + continue + + split_param_name = param_name.split('.') + param_id = split_param_name[2] + param_setting = split_param_name[3] + + parameter_group_parameters[param_id][param_setting] = value[0] + + return parameter_group_parameters.values() + + def describe_dbparameters(self): + return self.describe_db_parameters() + + def describe_db_parameters(self): + db_parameter_group_name = self._get_param('DBParameterGroupName') + db_parameter_groups = self.backend.describe_db_parameter_groups({'name': db_parameter_group_name}) + if not db_parameter_groups: + raise DBParameterGroupNotFoundError(db_parameter_group_name) + + template = self.response_template(DESCRIBE_DB_PARAMETERS_TEMPLATE) + return template.render(db_parameter_group=db_parameter_groups[0]) + + def delete_dbparameter_group(self): + return self.delete_db_parameter_group() + + def delete_db_parameter_group(self): + kwargs = self._get_db_parameter_group_kwargs() + db_parameter_group = self.backend.delete_db_parameter_group(kwargs['name']) + template = self.response_template(DELETE_DB_PARAMETER_GROUP_TEMPLATE) + return template.render(db_parameter_group=db_parameter_group) + + +CREATE_DATABASE_TEMPLATE = """ + + {{ database.to_xml() }} + + + 523e3218-afc7-11c3-90f5-f90431260ab4 + +""" + +CREATE_DATABASE_REPLICA_TEMPLATE = """ + + {{ database.to_xml() }} + + + 5e60c46d-a844-11e4-bb68-17f36418e58f + +""" + +DESCRIBE_DATABASES_TEMPLATE = """ + + + {%- for database in databases -%} + {{ database.to_xml() }} + {%- endfor -%} + + + + 523e3218-afc7-11c3-90f5-f90431260ab4 + +""" + +MODIFY_DATABASE_TEMPLATE = """ + + {{ database.to_xml() }} + + + bb58476c-a1a8-11e4-99cf-55e92d4bbada + +""" + +REBOOT_DATABASE_TEMPLATE = """ + + {{ database.to_xml() }} + + + d55711cb-a1ab-11e4-99cf-55e92d4bbada + +""" + + +DELETE_DATABASE_TEMPLATE = """ + + {{ database.to_xml() }} + + + 7369556f-b70d-11c3-faca-6ba18376ea1b + +""" + +CREATE_SECURITY_GROUP_TEMPLATE = """ + + {{ security_group.to_xml() }} + + + 462165d0-a77a-11e4-a5fa-75b30c556f97 + +""" + +DESCRIBE_SECURITY_GROUPS_TEMPLATE = """ + + + {% for security_group in security_groups %} + {{ security_group.to_xml() }} + {% endfor %} + + + + 5df2014e-a779-11e4-bdb0-594def064d0c + +""" + +DELETE_SECURITY_GROUP_TEMPLATE = """ + + 97e846bd-a77d-11e4-ac58-91351c0f3426 + +""" + +AUTHORIZE_SECURITY_GROUP_TEMPLATE = """ + + {{ security_group.to_xml() }} + + + 75d32fd5-a77e-11e4-8892-b10432f7a87d + +""" + +CREATE_SUBNET_GROUP_TEMPLATE = """ + + {{ subnet_group.to_xml() }} + + + 3a401b3f-bb9e-11d3-f4c6-37db295f7674 + +""" + +DESCRIBE_SUBNET_GROUPS_TEMPLATE = """ + + + {% for subnet_group in subnet_groups %} + {{ subnet_group.to_xml() }} + {% endfor %} + + + + b783db3b-b98c-11d3-fbc7-5c0aad74da7c + +""" + +DELETE_SUBNET_GROUP_TEMPLATE = """ + + 13785dd5-a7fc-11e4-bb9c-7f371d0859b0 + +""" + +CREATE_OPTION_GROUP_TEMPLATE = """ + + {{ option_group.to_xml() }} + + + 1e38dad4-9f50-11e4-87ea-a31c60ed2e36 + +""" + +DELETE_OPTION_GROUP_TEMPLATE = """ + + e2590367-9fa2-11e4-99cf-55e92d41c60e + +""" + +DESCRIBE_OPTION_GROUP_TEMPLATE = """ + + + {%- for option_group in option_groups -%} + {{ option_group.to_xml() }} + {%- endfor -%} + + + + 4caf445d-9fbc-11e4-87ea-a31c60ed2e36 + +""" + +DESCRIBE_OPTION_GROUP_OPTIONS_TEMPLATE = """ + + + {%- for option_group_option in option_group_options -%} + {{ option_group_option.to_xml() }} + {%- endfor -%} + + + + 457f7bb8-9fbf-11e4-9084-5754f80d5144 + +""" + +MODIFY_OPTION_GROUP_TEMPLATE = """ + + {{ option_group.to_xml() }} + + + ce9284a5-a0de-11e4-b984-a11a53e1f328 + +""" + +CREATE_DB_PARAMETER_GROUP_TEMPLATE = """ + + {{ db_parameter_group.to_xml() }} + + + 7805c127-af22-11c3-96ac-6999cc5f7e72 + +""" + +DESCRIBE_DB_PARAMETER_GROUPS_TEMPLATE = """ + + + {%- for db_parameter_group in db_parameter_groups -%} + {{ db_parameter_group.to_xml() }} + {%- endfor -%} + + + + b75d527a-b98c-11d3-f272-7cd6cce12cc5 + +""" + +MODIFY_DB_PARAMETER_GROUP_TEMPLATE = """ + + {{ db_parameter_group.name }} + + + 12d7435e-bba0-11d3-fe11-33d33a9bb7e3 + +""" + +DELETE_DB_PARAMETER_GROUP_TEMPLATE = """ + + cad6c267-ba25-11d3-fe11-33d33a9bb7e3 + +""" + +DESCRIBE_DB_PARAMETERS_TEMPLATE = """ + + + {%- for db_parameter_name, db_parameter in db_parameter_group.parameters.items() -%} + + {%- for parameter_name, parameter_value in db_parameter.items() -%} + <{{ parameter_name }}>{{ parameter_value }} {%- endfor -%} - ] - }, - "ResponseMetadata": { "RequestId": "523e3218-afc7-11c3-90f5-f90431260ab4" } - } -}""" + + {%- endfor -%} + + + + 8c40488f-b9ff-11d3-a15e-7ac49293f4fa + + +""" -MODIFY_DATABASE_TEMPLATE = """{"ModifyDBInstanceResponse": { - "ModifyDBInstanceResult": { - "DBInstance": {{ database.to_json() }}, - "ResponseMetadata": { - "RequestId": "bb58476c-a1a8-11e4-99cf-55e92d4bbada" - } - } - } -}""" +LIST_TAGS_FOR_RESOURCE_TEMPLATE = """ + + + {%- for tag in tags -%} + + {{ tag['Key'] }} + {{ tag['Value'] }} + + {%- endfor -%} + + + + 8c21ba39-a598-11e4-b688-194eaf8658fa + +""" -REBOOT_DATABASE_TEMPLATE = """{"RebootDBInstanceResponse": { - "RebootDBInstanceResult": { - "DBInstance": {{ database.to_json() }}, - "ResponseMetadata": { - "RequestId": "d55711cb-a1ab-11e4-99cf-55e92d4bbada" - } - } - } -}""" +ADD_TAGS_TO_RESOURCE_TEMPLATE = """ + + b194d9ca-a664-11e4-b688-194eaf8658fa + +""" - -DELETE_DATABASE_TEMPLATE = """{ "DeleteDBInstanceResponse": { - "DeleteDBInstanceResult": { - "DBInstance": {{ database.to_json() }} - }, - "ResponseMetadata": { - "RequestId": "523e3218-afc7-11c3-90f5-f90431260ab4" - } - } -}""" - -CREATE_SECURITY_GROUP_TEMPLATE = """{"CreateDBSecurityGroupResponse": { - "CreateDBSecurityGroupResult": { - "DBSecurityGroup": - {{ security_group.to_json() }}, - "ResponseMetadata": { - "RequestId": "462165d0-a77a-11e4-a5fa-75b30c556f97" - }} - } -}""" - -DESCRIBE_SECURITY_GROUPS_TEMPLATE = """{ - "DescribeDBSecurityGroupsResponse": { - "ResponseMetadata": { - "RequestId": "5df2014e-a779-11e4-bdb0-594def064d0c" - }, - "DescribeDBSecurityGroupsResult": { - "Marker": "null", - "DBSecurityGroups": [ - {% for security_group in security_groups %} - {%- if loop.index != 1 -%},{%- endif -%} - {{ security_group.to_json() }} - {% endfor %} - ] - } - } -}""" - -DELETE_SECURITY_GROUP_TEMPLATE = """{"DeleteDBSecurityGroupResponse": { - "ResponseMetadata": { - "RequestId": "97e846bd-a77d-11e4-ac58-91351c0f3426" - } -}}""" - -AUTHORIZE_SECURITY_GROUP_TEMPLATE = """{ - "AuthorizeDBSecurityGroupIngressResponse": { - "AuthorizeDBSecurityGroupIngressResult": { - "DBSecurityGroup": {{ security_group.to_json() }} - }, - "ResponseMetadata": { - "RequestId": "75d32fd5-a77e-11e4-8892-b10432f7a87d" - } - } -}""" - -CREATE_SUBNET_GROUP_TEMPLATE = """{ - "CreateDBSubnetGroupResponse": { - "CreateDBSubnetGroupResult": - { {{ subnet_group.to_json() }} }, - "ResponseMetadata": { "RequestId": "3a401b3f-bb9e-11d3-f4c6-37db295f7674" } - } -}""" - -DESCRIBE_SUBNET_GROUPS_TEMPLATE = """{ - "DescribeDBSubnetGroupsResponse": { - "DescribeDBSubnetGroupsResult": { - "DBSubnetGroups": [ - {% for subnet_group in subnet_groups %} - { {{ subnet_group.to_json() }} }{%- if not loop.last -%},{%- endif -%} - {% endfor %} - ], - "Marker": null - }, - "ResponseMetadata": { "RequestId": "b783db3b-b98c-11d3-fbc7-5c0aad74da7c" } - } -}""" - - -DELETE_SUBNET_GROUP_TEMPLATE = """{"DeleteDBSubnetGroupResponse": {"ResponseMetadata": {"RequestId": "13785dd5-a7fc-11e4-bb9c-7f371d0859b0"}}}""" - -CREATE_OPTION_GROUP_TEMPLATE = """{ - "CreateOptionGroupResponse": { - "CreateOptionGroupResult": { - "OptionGroup": {{ option_group.to_json() }} - }, - "ResponseMetadata": { - "RequestId": "1e38dad4-9f50-11e4-87ea-a31c60ed2e36" - } - } -}""" - -DELETE_OPTION_GROUP_TEMPLATE = \ - """{"DeleteOptionGroupResponse": {"ResponseMetadata": {"RequestId": "e2590367-9fa2-11e4-99cf-55e92d41c60e"}}}""" - -DESCRIBE_OPTION_GROUP_TEMPLATE = \ - """{"DescribeOptionGroupsResponse": { - "DescribeOptionGroupsResult": { - "Marker": null, - "OptionGroupsList": [ - {%- for option_group in option_groups -%} - {%- if loop.index != 1 -%},{%- endif -%} - {{ option_group.to_json() }} - {%- endfor -%} - ]}, - "ResponseMetadata": {"RequestId": "4caf445d-9fbc-11e4-87ea-a31c60ed2e36"} - }}""" - -DESCRIBE_OPTION_GROUP_OPTIONS_TEMPLATE = \ - """{"DescribeOptionGroupOptionsResponse": { - "DescribeOptionGroupOptionsResult": { - "Marker": null, - "OptionGroupOptions": [ - {%- for option_group_option in option_group_options -%} - {%- if loop.index != 1 -%},{%- endif -%} - {{ option_group_option.to_json() }} - {%- endfor -%} - ]}, - "ResponseMetadata": {"RequestId": "457f7bb8-9fbf-11e4-9084-5754f80d5144"} - }}""" - -MODIFY_OPTION_GROUP_TEMPLATE = \ - """{"ModifyOptionGroupResponse": { - "ResponseMetadata": { - "RequestId": "ce9284a5-a0de-11e4-b984-a11a53e1f328" - }, - "ModifyOptionGroupResult": - {{ option_group.to_json() }} - } - }""" - -LIST_TAGS_FOR_RESOURCE_TEMPLATE = \ - """{"ListTagsForResourceResponse": - {"ListTagsForResourceResult": - {"TagList": [ - {%- for tag in tags -%} - {%- if loop.index != 1 -%},{%- endif -%} - { - "Key": "{{ tag['Key'] }}", - "Value": "{{ tag['Value'] }}" - } - {%- endfor -%} - ]}, - "ResponseMetadata": { - "RequestId": "8c21ba39-a598-11e4-b688-194eaf8658fa" - } - } - }""" - -ADD_TAGS_TO_RESOURCE_TEMPLATE = \ - """{"ListTagsForResourceResponse": { - "ListTagsForResourceResult": { - "TagList": [ - {%- for tag in tags -%} - {%- if loop.index != 1 -%},{%- endif -%} - { - "Key": "{{ tag['Key'] }}", - "Value": "{{ tag['Value'] }}" - } - {%- endfor -%} - ]}, - "ResponseMetadata": { - "RequestId": "b194d9ca-a664-11e4-b688-194eaf8658fa" - } - } - }""" - -REMOVE_TAGS_FROM_RESOURCE_TEMPLATE = \ - """{"RemoveTagsFromResourceResponse": {"ResponseMetadata": {"RequestId": "c6499a01-a664-11e4-8069-fb454b71a80e"}}} - """ +REMOVE_TAGS_FROM_RESOURCE_TEMPLATE = """ + + b194d9ca-a664-11e4-b688-194eaf8658fa + +""" diff --git a/tests/test_cloudformation/fixtures/rds_mysql_with_db_parameter_group.py b/tests/test_cloudformation/fixtures/rds_mysql_with_db_parameter_group.py new file mode 100644 index 000000000..866197125 --- /dev/null +++ b/tests/test_cloudformation/fixtures/rds_mysql_with_db_parameter_group.py @@ -0,0 +1,201 @@ +from __future__ import unicode_literals + +template = { + "AWSTemplateFormatVersion" : "2010-09-09", + + "Description" : "AWS CloudFormation Sample Template RDS_MySQL_With_Read_Replica: Sample template showing how to create a highly-available, RDS DBInstance with a read replica. **WARNING** This template creates an Amazon Relational Database Service database instance and Amazon CloudWatch alarms. You will be billed for the AWS resources used if you create a stack from this template.", + + "Parameters": { + "DBName": { + "Default": "MyDatabase", + "Description" : "The database name", + "Type": "String", + "MinLength": "1", + "MaxLength": "64", + "AllowedPattern" : "[a-zA-Z][a-zA-Z0-9]*", + "ConstraintDescription" : "must begin with a letter and contain only alphanumeric characters." + }, + + "DBInstanceIdentifier": { + "Type": "String" + }, + + "DBUser": { + "NoEcho": "true", + "Description" : "The database admin account username", + "Type": "String", + "MinLength": "1", + "MaxLength": "16", + "AllowedPattern" : "[a-zA-Z][a-zA-Z0-9]*", + "ConstraintDescription" : "must begin with a letter and contain only alphanumeric characters." + }, + + "DBPassword": { + "NoEcho": "true", + "Description" : "The database admin account password", + "Type": "String", + "MinLength": "1", + "MaxLength": "41", + "AllowedPattern" : "[a-zA-Z0-9]+", + "ConstraintDescription" : "must contain only alphanumeric characters." + }, + + "DBAllocatedStorage": { + "Default": "5", + "Description" : "The size of the database (Gb)", + "Type": "Number", + "MinValue": "5", + "MaxValue": "1024", + "ConstraintDescription" : "must be between 5 and 1024Gb." + }, + + "DBInstanceClass": { + "Description" : "The database instance type", + "Type": "String", + "Default": "db.m1.small", + "AllowedValues" : [ "db.t1.micro", "db.m1.small", "db.m1.medium", "db.m1.large", "db.m1.xlarge", "db.m2.xlarge", "db.m2.2xlarge", "db.m2.4xlarge", "db.m3.medium", "db.m3.large", "db.m3.xlarge", "db.m3.2xlarge", "db.r3.large", "db.r3.xlarge", "db.r3.2xlarge", "db.r3.4xlarge", "db.r3.8xlarge", "db.m2.xlarge", "db.m2.2xlarge", "db.m2.4xlarge", "db.cr1.8xlarge"] +, + "ConstraintDescription" : "must select a valid database instance type." + }, + + "EC2SecurityGroup": { + "Description" : "The EC2 security group that contains instances that need access to the database", + "Default": "default", + "Type": "String", + "AllowedPattern" : "[a-zA-Z0-9\\-]+", + "ConstraintDescription" : "must be a valid security group name." + }, + + "MultiAZ" : { + "Description" : "Multi-AZ master database", + "Type" : "String", + "Default" : "false", + "AllowedValues" : [ "true", "false" ], + "ConstraintDescription" : "must be true or false." + } + }, + + "Conditions" : { + "Is-EC2-VPC" : { "Fn::Or" : [ {"Fn::Equals" : [{"Ref" : "AWS::Region"}, "eu-central-1" ]}, + {"Fn::Equals" : [{"Ref" : "AWS::Region"}, "cn-north-1" ]}]}, + "Is-EC2-Classic" : { "Fn::Not" : [{ "Condition" : "Is-EC2-VPC"}]} + }, + + "Resources" : { + "DBParameterGroup": { + "Type": "AWS::RDS::DBParameterGroup", + "Properties" : { + "Description": "DB Parameter Goup", + "Family" : "MySQL5.1", + "Parameters": { + "BACKLOG_QUEUE_LIMIT": "2048" + } + } + }, + + "DBEC2SecurityGroup": { + "Type": "AWS::EC2::SecurityGroup", + "Condition" : "Is-EC2-VPC", + "Properties" : { + "GroupDescription": "Open database for access", + "SecurityGroupIngress" : [{ + "IpProtocol" : "tcp", + "FromPort" : "3306", + "ToPort" : "3306", + "SourceSecurityGroupName" : { "Ref" : "EC2SecurityGroup" } + }] + } + }, + + "DBSecurityGroup": { + "Type": "AWS::RDS::DBSecurityGroup", + "Condition" : "Is-EC2-Classic", + "Properties": { + "DBSecurityGroupIngress": [{ + "EC2SecurityGroupName": { "Ref": "EC2SecurityGroup" } + }], + "GroupDescription": "database access" + } + }, + + "my_vpc": { + "Type" : "AWS::EC2::VPC", + "Properties" : { + "CidrBlock" : "10.0.0.0/16", + } + }, + + "EC2Subnet": { + "Type" : "AWS::EC2::Subnet", + "Condition" : "Is-EC2-VPC", + "Properties" : { + "AvailabilityZone" : "eu-central-1a", + "CidrBlock" : "10.0.1.0/24", + "VpcId" : { "Ref" : "my_vpc" } + } + }, + + "DBSubnet": { + "Type": "AWS::RDS::DBSubnetGroup", + "Condition" : "Is-EC2-VPC", + "Properties": { + "DBSubnetGroupDescription": "my db subnet group", + "SubnetIds" : [ { "Ref": "EC2Subnet" } ], + } + }, + + "MasterDB" : { + "Type" : "AWS::RDS::DBInstance", + "Properties" : { + "DBInstanceIdentifier": { "Ref": "DBInstanceIdentifier" }, + "DBName" : { "Ref" : "DBName" }, + "AllocatedStorage" : { "Ref" : "DBAllocatedStorage" }, + "DBInstanceClass" : { "Ref" : "DBInstanceClass" }, + "Engine" : "MySQL", + "DBSubnetGroupName": {"Fn::If": ["Is-EC2-VPC", { "Ref": "DBSubnet" }, { "Ref": "AWS::NoValue" }]}, + "MasterUsername" : { "Ref" : "DBUser" }, + "MasterUserPassword" : { "Ref" : "DBPassword" }, + "MultiAZ" : { "Ref" : "MultiAZ" }, + "Tags" : [{ "Key" : "Name", "Value" : "Master Database" }], + "VPCSecurityGroups": { "Fn::If" : [ "Is-EC2-VPC", [ { "Fn::GetAtt": [ "DBEC2SecurityGroup", "GroupId" ] } ], { "Ref" : "AWS::NoValue"}]}, + "DBSecurityGroups": { "Fn::If" : [ "Is-EC2-Classic", [ { "Ref": "DBSecurityGroup" } ], { "Ref" : "AWS::NoValue"}]} + }, + "DeletionPolicy" : "Snapshot" + }, + + "ReplicaDB" : { + "Type" : "AWS::RDS::DBInstance", + "Properties" : { + "SourceDBInstanceIdentifier" : { "Ref" : "MasterDB" }, + "DBInstanceClass" : { "Ref" : "DBInstanceClass" }, + "Tags" : [{ "Key" : "Name", "Value" : "Read Replica Database" }] + } + } + }, + + "Outputs" : { + "EC2Platform" : { + "Description" : "Platform in which this stack is deployed", + "Value" : { "Fn::If" : [ "Is-EC2-VPC", "EC2-VPC", "EC2-Classic" ]} + }, + + "MasterJDBCConnectionString": { + "Description" : "JDBC connection string for the master database", + "Value" : { "Fn::Join": [ "", [ "jdbc:mysql://", + { "Fn::GetAtt": [ "MasterDB", "Endpoint.Address" ] }, + ":", + { "Fn::GetAtt": [ "MasterDB", "Endpoint.Port" ] }, + "/", + { "Ref": "DBName" }]]} + }, + "ReplicaJDBCConnectionString": { + "Description" : "JDBC connection string for the replica database", + "Value" : { "Fn::Join": [ "", [ "jdbc:mysql://", + { "Fn::GetAtt": [ "ReplicaDB", "Endpoint.Address" ] }, + ":", + { "Fn::GetAtt": [ "ReplicaDB", "Endpoint.Port" ] }, + "/", + { "Ref": "DBName" }]]} + } + } +} diff --git a/tests/test_cloudformation/fixtures/rds_mysql_with_read_replica.py b/tests/test_cloudformation/fixtures/rds_mysql_with_read_replica.py index b743f46f4..3e5efa04a 100644 --- a/tests/test_cloudformation/fixtures/rds_mysql_with_read_replica.py +++ b/tests/test_cloudformation/fixtures/rds_mysql_with_read_replica.py @@ -82,7 +82,6 @@ template = { }, "Resources" : { - "DBEC2SecurityGroup": { "Type": "AWS::EC2::SecurityGroup", "Condition" : "Is-EC2-VPC", @@ -101,9 +100,9 @@ template = { "Type": "AWS::RDS::DBSecurityGroup", "Condition" : "Is-EC2-Classic", "Properties": { - "DBSecurityGroupIngress": { + "DBSecurityGroupIngress": [{ "EC2SecurityGroupName": { "Ref": "EC2SecurityGroup" } - }, + }], "GroupDescription": "database access" } }, @@ -188,4 +187,4 @@ template = { { "Ref": "DBName" }]]} } } -} \ No newline at end of file +} diff --git a/tests/test_cloudformation/test_cloudformation_stack_integration.py b/tests/test_cloudformation/test_cloudformation_stack_integration.py index 0fb74bef9..ff2cad29e 100644 --- a/tests/test_cloudformation/test_cloudformation_stack_integration.py +++ b/tests/test_cloudformation/test_cloudformation_stack_integration.py @@ -27,6 +27,7 @@ from moto import ( mock_kms, mock_lambda, mock_rds, + mock_rds2, mock_redshift, mock_route53, mock_sns, @@ -36,6 +37,7 @@ from moto import ( from .fixtures import ( ec2_classic_eip, fn_join, + rds_mysql_with_db_parameter_group, rds_mysql_with_read_replica, redshift, route53_ec2_instance_with_public_ip, @@ -693,6 +695,44 @@ def test_vpc_single_instance_in_subnet(): eip_resource = [resource for resource in resources if resource.resource_type == 'AWS::EC2::EIP'][0] eip_resource.physical_resource_id.should.equal(eip.allocation_id) +@mock_cloudformation() +@mock_ec2() +@mock_rds2() +def test_rds_db_parameter_groups(): + ec2_conn = boto.ec2.connect_to_region("us-west-1") + ec2_conn.create_security_group('application', 'Our Application Group') + + template_json = json.dumps(rds_mysql_with_db_parameter_group.template) + conn = boto.cloudformation.connect_to_region("us-west-1") + conn.create_stack( + "test_stack", + template_body=template_json, + parameters=[ + ("DBInstanceIdentifier", "master_db"), + ("DBName", "my_db"), + ("DBUser", "my_user"), + ("DBPassword", "my_password"), + ("DBAllocatedStorage", "20"), + ("DBInstanceClass", "db.m1.medium"), + ("EC2SecurityGroup", "application"), + ("MultiAZ", "true"), + ], + ) + + rds_conn = boto3.client('rds', region_name="us-west-1") + + db_parameter_groups = rds_conn.describe_db_parameter_groups() + len(db_parameter_groups['DBParameterGroups']).should.equal(1) + db_parameter_group_name = db_parameter_groups['DBParameterGroups'][0]['DBParameterGroupName'] + + found_cloudformation_set_parameter = False + for db_parameter in rds_conn.describe_db_parameters(DBParameterGroupName=db_parameter_group_name)['Parameters']: + if db_parameter['ParameterName'] == 'BACKLOG_QUEUE_LIMIT' and db_parameter['ParameterValue'] == '2048': + found_cloudformation_set_parameter = True + + found_cloudformation_set_parameter.should.equal(True) + + @mock_cloudformation() @mock_ec2() diff --git a/tests/test_rds/test_rds.py b/tests/test_rds/test_rds.py index df8ede179..6078b5f6b 100644 --- a/tests/test_rds/test_rds.py +++ b/tests/test_rds/test_rds.py @@ -238,6 +238,32 @@ def test_create_database_replica(): primary = conn.get_all_dbinstances("db-master-1")[0] list(primary.read_replica_dbinstance_identifiers).should.have.length_of(0) +@disable_on_py3() +@mock_rds +def test_create_cross_region_database_replica(): + west_1_conn = boto.rds.connect_to_region("us-west-1") + west_2_conn = boto.rds.connect_to_region("us-west-2") + + primary = west_1_conn.create_dbinstance("db-master-1", 10, 'db.m1.small', 'root', 'hunter2') + + primary_arn = "arn:aws:rds:us-west-1:1234567890:db:db-master-1" + replica = west_2_conn.create_dbinstance_read_replica( + "replica", + primary_arn, + "db.m1.small", + ) + + primary = west_1_conn.get_all_dbinstances("db-master-1")[0] + primary.read_replica_dbinstance_identifiers[0].should.equal("replica") + + replica = west_2_conn.get_all_dbinstances("replica")[0] + replica.instance_class.should.equal("db.m1.small") + + west_2_conn.delete_dbinstance("replica") + + primary = west_1_conn.get_all_dbinstances("db-master-1")[0] + list(primary.read_replica_dbinstance_identifiers).should.have.length_of(0) + @disable_on_py3() @mock_rds diff --git a/tests/test_rds2/test_rds2.py b/tests/test_rds2/test_rds2.py index 7ca48f0aa..4e1c2b73c 100644 --- a/tests/test_rds2/test_rds2.py +++ b/tests/test_rds2/test_rds2.py @@ -2,479 +2,648 @@ from __future__ import unicode_literals import boto.rds2 import boto.vpc -from boto.exception import BotoServerError +from botocore.exceptions import ClientError, ParamValidationError +import boto3 import sure # noqa -from moto import mock_ec2, mock_rds2 +from moto import mock_ec2, mock_kms, mock_rds2 from tests.helpers import disable_on_py3 @disable_on_py3() @mock_rds2 def test_create_database(): - conn = boto.rds2.connect_to_region("us-west-2") - database = conn.create_db_instance(db_instance_identifier='db-master-1', - allocated_storage=10, - engine='postgres', - db_instance_class='db.m1.small', - master_username='root', - master_user_password='hunter2', - db_security_groups=["my_sg"]) - database['CreateDBInstanceResponse']['CreateDBInstanceResult']['DBInstance']['DBInstanceStatus'].should.equal('available') - database['CreateDBInstanceResponse']['CreateDBInstanceResult']['DBInstance']['DBInstanceIdentifier'].should.equal("db-master-1") - database['CreateDBInstanceResponse']['CreateDBInstanceResult']['DBInstance']['AllocatedStorage'].should.equal('10') - database['CreateDBInstanceResponse']['CreateDBInstanceResult']['DBInstance']['DBInstanceClass'].should.equal("db.m1.small") - database['CreateDBInstanceResponse']['CreateDBInstanceResult']['DBInstance']['MasterUsername'].should.equal("root") - database['CreateDBInstanceResponse']['CreateDBInstanceResult']['DBInstance']['DBSecurityGroups'][0]['DBSecurityGroup']['DBSecurityGroupName'].should.equal('my_sg') + conn = boto3.client('rds', region_name='us-west-2') + database = conn.create_db_instance(DBInstanceIdentifier='db-master-1', + AllocatedStorage=10, + Engine='postgres', + DBInstanceClass='db.m1.small', + MasterUsername='root', + MasterUserPassword='hunter2', + Port=1234, + DBSecurityGroups=["my_sg"]) + database['DBInstance']['DBInstanceStatus'].should.equal('available') + database['DBInstance']['DBInstanceIdentifier'].should.equal("db-master-1") + database['DBInstance']['AllocatedStorage'].should.equal(10) + database['DBInstance']['DBInstanceClass'].should.equal("db.m1.small") + database['DBInstance']['MasterUsername'].should.equal("root") + database['DBInstance']['DBSecurityGroups'][0]['DBSecurityGroupName'].should.equal('my_sg') @disable_on_py3() @mock_rds2 def test_get_databases(): - conn = boto.rds2.connect_to_region("us-west-2") + conn = boto3.client('rds', region_name='us-west-2') instances = conn.describe_db_instances() - list(instances['DescribeDBInstancesResponse']['DescribeDBInstancesResult']['DBInstances']).should.have.length_of(0) + list(instances['DBInstances']).should.have.length_of(0) - conn.create_db_instance(db_instance_identifier='db-master-1', - allocated_storage=10, - engine='postgres', - db_instance_class='db.m1.small', - master_username='root', - master_user_password='hunter2', - db_security_groups=["my_sg"]) - conn.create_db_instance(db_instance_identifier='db-master-2', - allocated_storage=10, - engine='postgres', - db_instance_class='db.m1.small', - master_username='root', - master_user_password='hunter2', - db_security_groups=["my_sg"]) + conn.create_db_instance(DBInstanceIdentifier='db-master-1', + AllocatedStorage=10, + DBInstanceClass='postgres', + Engine='db.m1.small', + MasterUsername='root', + MasterUserPassword='hunter2', + Port=1234, + DBSecurityGroups=['my_sg']) + conn.create_db_instance(DBInstanceIdentifier='db-master-2', + AllocatedStorage=10, + DBInstanceClass='postgres', + Engine='db.m1.small', + MasterUsername='root', + MasterUserPassword='hunter2', + Port=1234, + DBSecurityGroups=['my_sg']) instances = conn.describe_db_instances() - list(instances['DescribeDBInstancesResponse']['DescribeDBInstancesResult']['DBInstances']).should.have.length_of(2) + list(instances['DBInstances']).should.have.length_of(2) - instances = conn.describe_db_instances("db-master-1") - list(instances['DescribeDBInstancesResponse']['DescribeDBInstancesResult']['DBInstances']).should.have.length_of(1) - instances['DescribeDBInstancesResponse']['DescribeDBInstancesResult']['DBInstances'][0]['DBInstanceIdentifier'].should.equal("db-master-1") + instances = conn.describe_db_instances(DBInstanceIdentifier="db-master-1") + list(instances['DBInstances']).should.have.length_of(1) + instances['DBInstances'][0]['DBInstanceIdentifier'].should.equal("db-master-1") @disable_on_py3() @mock_rds2 def test_describe_non_existant_database(): - conn = boto.rds2.connect_to_region("us-west-2") - conn.describe_db_instances.when.called_with("not-a-db").should.throw(BotoServerError) + conn = boto3.client('rds', region_name='us-west-2') + conn.describe_db_instances.when.called_with(DBInstanceIdentifier="not-a-db").should.throw(ClientError) @disable_on_py3() @mock_rds2 def test_modify_db_instance(): - conn = boto.rds2.connect_to_region("us-west-2") - database = conn.create_db_instance(db_instance_identifier='db-master-1', - allocated_storage=10, - engine='postgres', - db_instance_class='db.m1.small', - master_username='root', - master_user_password='hunter2', - db_security_groups=["my_sg"]) - instances = conn.describe_db_instances('db-master-1') - instances['DescribeDBInstancesResponse']['DescribeDBInstancesResult']['DBInstances'][0]['AllocatedStorage'].should.equal('10') - conn.modify_db_instance(db_instance_identifier='db-master-1', allocated_storage=20, apply_immediately=True) - instances = conn.describe_db_instances('db-master-1') - instances['DescribeDBInstancesResponse']['DescribeDBInstancesResult']['DBInstances'][0]['AllocatedStorage'].should.equal('20') + conn = boto3.client('rds', region_name='us-west-2') + database = conn.create_db_instance(DBInstanceIdentifier='db-master-1', + AllocatedStorage=10, + DBInstanceClass='postgres', + Engine='db.m1.small', + MasterUsername='root', + MasterUserPassword='hunter2', + Port=1234, + DBSecurityGroups=['my_sg']) + instances = conn.describe_db_instances(DBInstanceIdentifier='db-master-1') + instances['DBInstances'][0]['AllocatedStorage'].should.equal(10) + conn.modify_db_instance(DBInstanceIdentifier='db-master-1', + AllocatedStorage=20, + ApplyImmediately=True) + instances = conn.describe_db_instances(DBInstanceIdentifier='db-master-1') + instances['DBInstances'][0]['AllocatedStorage'].should.equal(20) @disable_on_py3() @mock_rds2 def test_modify_non_existant_database(): - conn = boto.rds2.connect_to_region("us-west-2") - conn.modify_db_instance.when.called_with(db_instance_identifier='not-a-db', - allocated_storage=20, - apply_immediately=True).should.throw(BotoServerError) + conn = boto3.client('rds', region_name='us-west-2') + conn.modify_db_instance.when.called_with(DBInstanceIdentifier='not-a-db', + AllocatedStorage=20, + ApplyImmediately=True).should.throw(ClientError) @disable_on_py3() @mock_rds2 def test_reboot_db_instance(): - conn = boto.rds2.connect_to_region("us-west-2") - conn.create_db_instance(db_instance_identifier='db-master-1', - allocated_storage=10, - engine='postgres', - db_instance_class='db.m1.small', - master_username='root', - master_user_password='hunter2', - db_security_groups=["my_sg"]) - database = conn.reboot_db_instance('db-master-1') - database['RebootDBInstanceResponse']['RebootDBInstanceResult']['DBInstance']['DBInstanceIdentifier'].should.equal("db-master-1") + conn = boto3.client('rds', region_name='us-west-2') + conn.create_db_instance(DBInstanceIdentifier='db-master-1', + AllocatedStorage=10, + DBInstanceClass='postgres', + Engine='db.m1.small', + MasterUsername='root', + MasterUserPassword='hunter2', + Port=1234, + DBSecurityGroups=['my_sg']) + database = conn.reboot_db_instance(DBInstanceIdentifier='db-master-1') + database['DBInstance']['DBInstanceIdentifier'].should.equal("db-master-1") @disable_on_py3() @mock_rds2 def test_reboot_non_existant_database(): - conn = boto.rds2.connect_to_region("us-west-2") - conn.reboot_db_instance.when.called_with("not-a-db").should.throw(BotoServerError) + conn = boto3.client('rds', region_name='us-west-2') + conn.reboot_db_instance.when.called_with(DBInstanceIdentifier="not-a-db").should.throw(ClientError) @disable_on_py3() @mock_rds2 def test_delete_database(): - conn = boto.rds2.connect_to_region("us-west-2") + conn = boto3.client('rds', region_name='us-west-2') instances = conn.describe_db_instances() - list(instances['DescribeDBInstancesResponse']['DescribeDBInstancesResult']['DBInstances']).should.have.length_of(0) - conn.create_db_instance(db_instance_identifier='db-master-1', - allocated_storage=10, - engine='postgres', - db_instance_class='db.m1.small', - master_username='root', - master_user_password='hunter2', - db_security_groups=["my_sg"]) + list(instances['DBInstances']).should.have.length_of(0) + conn.create_db_instance(DBInstanceIdentifier='db-master-1', + AllocatedStorage=10, + DBInstanceClass='postgres', + Engine='db.m1.small', + MasterUsername='root', + MasterUserPassword='hunter2', + Port=1234, + DBSecurityGroups=['my_sg']) instances = conn.describe_db_instances() - list(instances['DescribeDBInstancesResponse']['DescribeDBInstancesResult']['DBInstances']).should.have.length_of(1) + list(instances['DBInstances']).should.have.length_of(1) - conn.delete_db_instance("db-master-1") + conn.delete_db_instance(DBInstanceIdentifier="db-master-1") instances = conn.describe_db_instances() - list(instances['DescribeDBInstancesResponse']['DescribeDBInstancesResult']['DBInstances']).should.have.length_of(0) + list(instances['DBInstances']).should.have.length_of(0) @disable_on_py3() @mock_rds2 def test_delete_non_existant_database(): - conn = boto.rds2.connect_to_region("us-west-2") - conn.delete_db_instance.when.called_with("not-a-db").should.throw(BotoServerError) + conn = boto3.client('rds2', region_name="us-west-2") + conn.delete_db_instance.when.called_with(DBInstanceIdentifier="not-a-db").should.throw(ClientError) @disable_on_py3() @mock_rds2 def test_create_option_group(): - conn = boto.rds2.connect_to_region("us-west-2") - option_group = conn.create_option_group('test', 'mysql', '5.6', 'test option group') - option_group['CreateOptionGroupResponse']['CreateOptionGroupResult']['OptionGroup']['OptionGroupName'].should.equal('test') - option_group['CreateOptionGroupResponse']['CreateOptionGroupResult']['OptionGroup']['EngineName'].should.equal('mysql') - option_group['CreateOptionGroupResponse']['CreateOptionGroupResult']['OptionGroup']['OptionGroupDescription'].should.equal('test option group') - option_group['CreateOptionGroupResponse']['CreateOptionGroupResult']['OptionGroup']['MajorEngineVersion'].should.equal('5.6') + conn = boto3.client('rds', region_name='us-west-2') + option_group = conn.create_option_group(OptionGroupName='test', + EngineName='mysql', + MajorEngineVersion='5.6', + OptionGroupDescription='test option group') + option_group['OptionGroup']['OptionGroupName'].should.equal('test') + option_group['OptionGroup']['EngineName'].should.equal('mysql') + option_group['OptionGroup']['OptionGroupDescription'].should.equal('test option group') + option_group['OptionGroup']['MajorEngineVersion'].should.equal('5.6') @disable_on_py3() @mock_rds2 def test_create_option_group_bad_engine_name(): - conn = boto.rds2.connect_to_region("us-west-2") - conn.create_option_group.when.called_with('test', 'invalid_engine', '5.6', 'test invalid engine').should.throw(BotoServerError) + conn = boto3.client('rds', region_name='us-west-2') + conn.create_option_group.when.called_with(OptionGroupName='test', + EngineName='invalid_engine', + MajorEngineVersion='5.6', + OptionGroupDescription='test invalid engine').should.throw(ClientError) @disable_on_py3() @mock_rds2 def test_create_option_group_bad_engine_major_version(): - conn = boto.rds2.connect_to_region("us-west-2") - conn.create_option_group.when.called_with('test', 'mysql', '6.6.6', 'test invalid engine version').should.throw(BotoServerError) + conn = boto3.client('rds', region_name='us-west-2') + conn.create_option_group.when.called_with(OptionGroupName='test', + EngineName='mysql', + MajorEngineVersion='6.6.6', + OptionGroupDescription='test invalid engine version').should.throw(ClientError) @disable_on_py3() @mock_rds2 def test_create_option_group_empty_description(): - conn = boto.rds2.connect_to_region("us-west-2") - conn.create_option_group.when.called_with('test', 'mysql', '5.6', '').should.throw(BotoServerError) + conn = boto3.client('rds', region_name='us-west-2') + conn.create_option_group.when.called_with(OptionGroupName='test', + EngineName='mysql', + MajorEngineVersion='5.6', + OptionGroupDescription='').should.throw(ClientError) @disable_on_py3() @mock_rds2 def test_create_option_group_duplicate(): - conn = boto.rds2.connect_to_region("us-west-2") - conn.create_option_group('test', 'mysql', '5.6', 'test option group') - conn.create_option_group.when.called_with('test', 'mysql', '5.6', 'foo').should.throw(BotoServerError) + conn = boto3.client('rds', region_name='us-west-2') + conn.create_option_group(OptionGroupName='test', + EngineName='mysql', + MajorEngineVersion='5.6', + OptionGroupDescription='test option group') + conn.create_option_group.when.called_with(OptionGroupName='test', + EngineName='mysql', + MajorEngineVersion='5.6', + OptionGroupDescription='test option group').should.throw(ClientError) @disable_on_py3() @mock_rds2 def test_describe_option_group(): - conn = boto.rds2.connect_to_region("us-west-2") - conn.create_option_group('test', 'mysql', '5.6', 'test option group') - option_groups = conn.describe_option_groups('test') - option_groups['DescribeOptionGroupsResponse']['DescribeOptionGroupsResult']['OptionGroupsList'][0]['OptionGroupName'].should.equal('test') + conn = boto3.client('rds', region_name='us-west-2') + conn.create_option_group(OptionGroupName='test', + EngineName='mysql', + MajorEngineVersion='5.6', + OptionGroupDescription='test option group') + option_groups = conn.describe_option_groups(OptionGroupName='test') + option_groups['OptionGroupsList'][0]['OptionGroupName'].should.equal('test') @disable_on_py3() @mock_rds2 def test_describe_non_existant_option_group(): - conn = boto.rds2.connect_to_region("us-west-2") - conn.describe_option_groups.when.called_with("not-a-option-group").should.throw(BotoServerError) + conn = boto3.client('rds', region_name='us-west-2') + conn.describe_option_groups.when.called_with(OptionGroupName="not-a-option-group").should.throw(ClientError) @disable_on_py3() @mock_rds2 def test_delete_option_group(): - conn = boto.rds2.connect_to_region("us-west-2") - conn.create_option_group('test', 'mysql', '5.6', 'test option group') - option_groups = conn.describe_option_groups('test') - option_groups['DescribeOptionGroupsResponse']['DescribeOptionGroupsResult']['OptionGroupsList'][0]['OptionGroupName'].should.equal('test') - conn.delete_option_group('test') - conn.describe_option_groups.when.called_with('test').should.throw(BotoServerError) + conn = boto3.client('rds', region_name='us-west-2') + conn.create_option_group(OptionGroupName='test', + EngineName='mysql', + MajorEngineVersion='5.6', + OptionGroupDescription='test option group') + option_groups = conn.describe_option_groups(OptionGroupName='test') + option_groups['OptionGroupsList'][0]['OptionGroupName'].should.equal('test') + conn.delete_option_group(OptionGroupName='test') + conn.describe_option_groups.when.called_with(OptionGroupName='test').should.throw(ClientError) @disable_on_py3() @mock_rds2 def test_delete_non_existant_option_group(): - conn = boto.rds2.connect_to_region("us-west-2") - conn.delete_option_group.when.called_with('non-existant').should.throw(BotoServerError) + conn = boto3.client('rds', region_name='us-west-2') + conn.delete_option_group.when.called_with(OptionGroupName='non-existant').should.throw(ClientError) @disable_on_py3() @mock_rds2 def test_describe_option_group_options(): - conn = boto.rds2.connect_to_region("us-west-2") - option_group_options = conn.describe_option_group_options('sqlserver-ee') - len(option_group_options['DescribeOptionGroupOptionsResponse']['DescribeOptionGroupOptionsResult']['OptionGroupOptions']).should.equal(4) - option_group_options = conn.describe_option_group_options('sqlserver-ee', '11.00') - len(option_group_options['DescribeOptionGroupOptionsResponse']['DescribeOptionGroupOptionsResult']['OptionGroupOptions']).should.equal(2) - option_group_options = conn.describe_option_group_options('mysql', '5.6') - len(option_group_options['DescribeOptionGroupOptionsResponse']['DescribeOptionGroupOptionsResult']['OptionGroupOptions']).should.equal(1) - conn.describe_option_group_options.when.called_with('non-existent').should.throw(BotoServerError) - conn.describe_option_group_options.when.called_with('mysql', 'non-existent').should.throw(BotoServerError) + conn = boto3.client('rds', region_name='us-west-2') + option_group_options = conn.describe_option_group_options(EngineName='sqlserver-ee') + len(option_group_options['OptionGroupOptions']).should.equal(4) + option_group_options = conn.describe_option_group_options(EngineName='sqlserver-ee', MajorEngineVersion='11.00') + len(option_group_options['OptionGroupOptions']).should.equal(2) + option_group_options = conn.describe_option_group_options(EngineName='mysql', MajorEngineVersion='5.6') + len(option_group_options['OptionGroupOptions']).should.equal(1) + conn.describe_option_group_options.when.called_with(EngineName='non-existent').should.throw(ClientError) + conn.describe_option_group_options.when.called_with(EngineName='mysql', MajorEngineVersion='non-existent').should.throw(ClientError) @disable_on_py3() @mock_rds2 def test_modify_option_group(): - conn = boto.rds2.connect_to_region("us-west-2") - conn.create_option_group('test', 'mysql', '5.6', 'test option group') + conn = boto3.client('rds', region_name='us-west-2') + conn.create_option_group(OptionGroupName='test', EngineName='mysql', MajorEngineVersion='5.6', OptionGroupDescription='test option group') # TODO: create option and validate before deleting. # if Someone can tell me how the hell to use this function # to add options to an option_group, I can finish coding this. - result = conn.modify_option_group('test', [], ['MEMCACHED'], True) - result['ModifyOptionGroupResponse']['ModifyOptionGroupResult']['EngineName'].should.equal('mysql') - result['ModifyOptionGroupResponse']['ModifyOptionGroupResult']['Options'].should.equal([]) - result['ModifyOptionGroupResponse']['ModifyOptionGroupResult']['OptionGroupName'].should.equal('test') + result = conn.modify_option_group(OptionGroupName='test', OptionsToInclude=[], OptionsToRemove=['MEMCACHED'], ApplyImmediately=True) + result['OptionGroup']['EngineName'].should.equal('mysql') + result['OptionGroup']['Options'].should.equal([]) + result['OptionGroup']['OptionGroupName'].should.equal('test') @disable_on_py3() @mock_rds2 def test_modify_option_group_no_options(): - conn = boto.rds2.connect_to_region("us-west-2") - conn.create_option_group('test', 'mysql', '5.6', 'test option group') - conn.modify_option_group.when.called_with('test').should.throw(BotoServerError) + conn = boto3.client('rds', region_name='us-west-2') + conn.create_option_group(OptionGroupName='test', EngineName='mysql', MajorEngineVersion='5.6', OptionGroupDescription='test option group') + conn.modify_option_group.when.called_with(OptionGroupName='test').should.throw(ClientError) @disable_on_py3() @mock_rds2 def test_modify_non_existant_option_group(): - conn = boto.rds2.connect_to_region("us-west-2") - conn.modify_option_group.when.called_with('non-existant', [('OptionName', 'Port', 'DBSecurityGroupMemberships', 'VpcSecurityGroupMemberships', 'OptionSettings')]).should.throw(BotoServerError) + conn = boto3.client('rds', region_name='us-west-2') + conn.modify_option_group.when.called_with(OptionGroupName='non-existant', OptionsToInclude=[('OptionName', 'Port', 'DBSecurityGroupMemberships', 'VpcSecurityGroupMemberships', 'OptionSettings')]).should.throw(ParamValidationError) @disable_on_py3() @mock_rds2 def test_delete_non_existant_database(): - conn = boto.rds2.connect_to_region("us-west-2") - conn.delete_db_instance.when.called_with("not-a-db").should.throw(BotoServerError) + conn = boto3.client('rds', region_name='us-west-2') + conn.delete_db_instance.when.called_with(DBInstanceIdentifier="not-a-db").should.throw(ClientError) @disable_on_py3() @mock_rds2 def test_list_tags_invalid_arn(): - conn = boto.rds2.connect_to_region("us-west-2") - conn.list_tags_for_resource.when.called_with('arn:aws:rds:bad-arn').should.throw(BotoServerError) + conn = boto3.client('rds', region_name='us-west-2') + conn.list_tags_for_resource.when.called_with(ResourceName='arn:aws:rds:bad-arn').should.throw(ClientError) @disable_on_py3() @mock_rds2 def test_list_tags_db(): - conn = boto.rds2.connect_to_region("us-west-2") - result = conn.list_tags_for_resource('arn:aws:rds:us-west-2:1234567890:db:foo') - result['ListTagsForResourceResponse']['ListTagsForResourceResult']['TagList'].should.equal([]) - conn.create_db_instance(db_instance_identifier='db-with-tags', - allocated_storage=10, - engine='postgres', - db_instance_class='db.m1.small', - master_username='root', - master_user_password='hunter2', - db_security_groups=["my_sg"], - tags=[('foo', 'bar'), ('foo1', 'bar1')]) - result = conn.list_tags_for_resource('arn:aws:rds:us-west-2:1234567890:db:db-with-tags') - result['ListTagsForResourceResponse']['ListTagsForResourceResult']['TagList'].should.equal([{'Value': 'bar', - 'Key': 'foo'}, - {'Value': 'bar1', - 'Key': 'foo1'}]) + conn = boto3.client('rds', region_name='us-west-2') + result = conn.list_tags_for_resource(ResourceName='arn:aws:rds:us-west-2:1234567890:db:foo') + result['TagList'].should.equal([]) + conn.create_db_instance(DBInstanceIdentifier='db-with-tags', + AllocatedStorage=10, + DBInstanceClass='postgres', + Engine='db.m1.small', + MasterUsername='root', + MasterUserPassword='hunter2', + Port=1234, + DBSecurityGroups=['my_sg'], + Tags=[ + { + 'Key': 'foo', + 'Value': 'bar', + }, + { + 'Key': 'foo1', + 'Value': 'bar1', + }, + ]) + result = conn.list_tags_for_resource(ResourceName='arn:aws:rds:us-west-2:1234567890:db:db-with-tags') + result['TagList'].should.equal([{'Value': 'bar', + 'Key': 'foo'}, + {'Value': 'bar1', + 'Key': 'foo1'}]) @disable_on_py3() @mock_rds2 def test_add_tags_db(): - conn = boto.rds2.connect_to_region("us-west-2") - conn.create_db_instance(db_instance_identifier='db-without-tags', - allocated_storage=10, - engine='postgres', - db_instance_class='db.m1.small', - master_username='root', - master_user_password='hunter2', - db_security_groups=["my_sg"], - tags=[('foo', 'bar'), ('foo1', 'bar1')]) - result = conn.list_tags_for_resource('arn:aws:rds:us-west-2:1234567890:db:db-without-tags') - list(result['ListTagsForResourceResponse']['ListTagsForResourceResult']['TagList']).should.have.length_of(2) - conn.add_tags_to_resource('arn:aws:rds:us-west-2:1234567890:db:db-without-tags', - [('foo', 'fish'), ('foo2', 'bar2')]) - result = conn.list_tags_for_resource('arn:aws:rds:us-west-2:1234567890:db:db-without-tags') - list(result['ListTagsForResourceResponse']['ListTagsForResourceResult']['TagList']).should.have.length_of(3) + conn = boto3.client('rds', region_name='us-west-2') + conn.create_db_instance(DBInstanceIdentifier='db-without-tags', + AllocatedStorage=10, + DBInstanceClass='postgres', + Engine='db.m1.small', + MasterUsername='root', + MasterUserPassword='hunter2', + Port=1234, + DBSecurityGroups=['my_sg'], + Tags=[ + { + 'Key': 'foo', + 'Value': 'bar', + }, + { + 'Key': 'foo1', + 'Value': 'bar1', + }, + ]) + result = conn.list_tags_for_resource(ResourceName='arn:aws:rds:us-west-2:1234567890:db:db-without-tags') + list(result['TagList']).should.have.length_of(2) + conn.add_tags_to_resource(ResourceName='arn:aws:rds:us-west-2:1234567890:db:db-without-tags', + Tags=[ + { + 'Key': 'foo', + 'Value': 'fish', + }, + { + 'Key': 'foo2', + 'Value': 'bar2', + }, + ]) + result = conn.list_tags_for_resource(ResourceName='arn:aws:rds:us-west-2:1234567890:db:db-without-tags') + list(result['TagList']).should.have.length_of(3) @disable_on_py3() @mock_rds2 def test_remove_tags_db(): - conn = boto.rds2.connect_to_region("us-west-2") - conn.create_db_instance(db_instance_identifier='db-with-tags', - allocated_storage=10, - engine='postgres', - db_instance_class='db.m1.small', - master_username='root', - master_user_password='hunter2', - db_security_groups=["my_sg"], - tags=[('foo', 'bar'), ('foo1', 'bar1')]) - result = conn.list_tags_for_resource('arn:aws:rds:us-west-2:1234567890:db:db-with-tags') - len(result['ListTagsForResourceResponse']['ListTagsForResourceResult']['TagList']).should.equal(2) - conn.remove_tags_from_resource('arn:aws:rds:us-west-2:1234567890:db:db-with-tags', ['foo']) - result = conn.list_tags_for_resource('arn:aws:rds:us-west-2:1234567890:db:db-with-tags') - len(result['ListTagsForResourceResponse']['ListTagsForResourceResult']['TagList']).should.equal(1) + conn = boto3.client('rds', region_name='us-west-2') + conn.create_db_instance(DBInstanceIdentifier='db-with-tags', + AllocatedStorage=10, + DBInstanceClass='postgres', + Engine='db.m1.small', + MasterUsername='root', + MasterUserPassword='hunter2', + Port=1234, + DBSecurityGroups=['my_sg'], + Tags=[ + { + 'Key': 'foo', + 'Value': 'bar', + }, + { + 'Key': 'foo1', + 'Value': 'bar1', + }, + ]) + result = conn.list_tags_for_resource(ResourceName='arn:aws:rds:us-west-2:1234567890:db:db-with-tags') + list(result['TagList']).should.have.length_of(2) + conn.remove_tags_from_resource(ResourceName='arn:aws:rds:us-west-2:1234567890:db:db-with-tags', TagKeys=['foo']) + result = conn.list_tags_for_resource(ResourceName='arn:aws:rds:us-west-2:1234567890:db:db-with-tags') + len(result['TagList']).should.equal(1) @disable_on_py3() @mock_rds2 def test_add_tags_option_group(): - conn = boto.rds2.connect_to_region("us-west-2") - conn.create_option_group('test', 'mysql', '5.6', 'test option group') - result = conn.list_tags_for_resource('arn:aws:rds:us-west-2:1234567890:og:test') - list(result['ListTagsForResourceResponse']['ListTagsForResourceResult']['TagList']).should.have.length_of(0) - conn.add_tags_to_resource('arn:aws:rds:us-west-2:1234567890:og:test', - [('foo', 'fish'), ('foo2', 'bar2')]) - result = conn.list_tags_for_resource('arn:aws:rds:us-west-2:1234567890:og:test') - list(result['ListTagsForResourceResponse']['ListTagsForResourceResult']['TagList']).should.have.length_of(2) + conn = boto3.client('rds', region_name='us-west-2') + conn.create_option_group(OptionGroupName='test', + EngineName='mysql', + MajorEngineVersion='5.6', + OptionGroupDescription='test option group') + result = conn.list_tags_for_resource(ResourceName='arn:aws:rds:us-west-2:1234567890:og:test') + list(result['TagList']).should.have.length_of(0) + conn.add_tags_to_resource(ResourceName='arn:aws:rds:us-west-2:1234567890:og:test', + Tags=[ + { + 'Key': 'foo', + 'Value': 'fish', + }, + { + 'Key': 'foo2', + 'Value': 'bar2', + }]) + result = conn.list_tags_for_resource(ResourceName='arn:aws:rds:us-west-2:1234567890:og:test') + list(result['TagList']).should.have.length_of(2) @disable_on_py3() @mock_rds2 def test_remove_tags_option_group(): - conn = boto.rds2.connect_to_region("us-west-2") - conn.create_option_group('test', 'mysql', '5.6', 'test option group') - conn.add_tags_to_resource('arn:aws:rds:us-west-2:1234567890:og:test', - [('foo', 'fish'), ('foo2', 'bar2')]) - result = conn.list_tags_for_resource('arn:aws:rds:us-west-2:1234567890:og:test') - list(result['ListTagsForResourceResponse']['ListTagsForResourceResult']['TagList']).should.have.length_of(2) - conn.remove_tags_from_resource('arn:aws:rds:us-west-2:1234567890:og:test', - ['foo']) - result = conn.list_tags_for_resource('arn:aws:rds:us-west-2:1234567890:og:test') - list(result['ListTagsForResourceResponse']['ListTagsForResourceResult']['TagList']).should.have.length_of(1) + conn = boto3.client('rds', region_name='us-west-2') + conn.create_option_group(OptionGroupName='test', + EngineName='mysql', + MajorEngineVersion='5.6', + OptionGroupDescription='test option group') + result = conn.list_tags_for_resource(ResourceName='arn:aws:rds:us-west-2:1234567890:og:test') + conn.add_tags_to_resource(ResourceName='arn:aws:rds:us-west-2:1234567890:og:test', + Tags=[ + { + 'Key': 'foo', + 'Value': 'fish', + }, + { + 'Key': 'foo2', + 'Value': 'bar2', + }]) + result = conn.list_tags_for_resource(ResourceName='arn:aws:rds:us-west-2:1234567890:og:test') + list(result['TagList']).should.have.length_of(2) + conn.remove_tags_from_resource(ResourceName='arn:aws:rds:us-west-2:1234567890:og:test', + TagKeys=['foo']) + result = conn.list_tags_for_resource(ResourceName='arn:aws:rds:us-west-2:1234567890:og:test') + list(result['TagList']).should.have.length_of(1) @disable_on_py3() @mock_rds2 def test_create_database_security_group(): - conn = boto.rds2.connect_to_region("us-west-2") + conn = boto3.client('rds', region_name='us-west-2') - result = conn.create_db_security_group('db_sg', 'DB Security Group') - result['CreateDBSecurityGroupResponse']['CreateDBSecurityGroupResult']['DBSecurityGroup']['DBSecurityGroupName'].should.equal("db_sg") - result['CreateDBSecurityGroupResponse']['CreateDBSecurityGroupResult']['DBSecurityGroup']['DBSecurityGroupDescription'].should.equal("DB Security Group") - result['CreateDBSecurityGroupResponse']['CreateDBSecurityGroupResult']['DBSecurityGroup']['IPRanges'].should.equal([]) + result = conn.create_db_security_group(DBSecurityGroupName='db_sg', DBSecurityGroupDescription='DB Security Group') + result['DBSecurityGroup']['DBSecurityGroupName'].should.equal("db_sg") + result['DBSecurityGroup']['DBSecurityGroupDescription'].should.equal("DB Security Group") + result['DBSecurityGroup']['IPRanges'].should.equal([]) @disable_on_py3() @mock_rds2 def test_get_security_groups(): - conn = boto.rds2.connect_to_region("us-west-2") + conn = boto3.client('rds', region_name='us-west-2') result = conn.describe_db_security_groups() - result['DescribeDBSecurityGroupsResponse']['DescribeDBSecurityGroupsResult']['DBSecurityGroups'].should.have.length_of(0) + result['DBSecurityGroups'].should.have.length_of(0) - conn.create_db_security_group('db_sg1', 'DB Security Group') - conn.create_db_security_group('db_sg2', 'DB Security Group') + conn.create_db_security_group(DBSecurityGroupName='db_sg1', DBSecurityGroupDescription='DB Security Group') + conn.create_db_security_group(DBSecurityGroupName='db_sg2', DBSecurityGroupDescription='DB Security Group') result = conn.describe_db_security_groups() - result['DescribeDBSecurityGroupsResponse']['DescribeDBSecurityGroupsResult']['DBSecurityGroups'].should.have.length_of(2) + result['DBSecurityGroups'].should.have.length_of(2) - result = conn.describe_db_security_groups("db_sg1") - result['DescribeDBSecurityGroupsResponse']['DescribeDBSecurityGroupsResult']['DBSecurityGroups'].should.have.length_of(1) - result['DescribeDBSecurityGroupsResponse']['DescribeDBSecurityGroupsResult']['DBSecurityGroups'][0]['DBSecurityGroupName'].should.equal("db_sg1") + result = conn.describe_db_security_groups(DBSecurityGroupName="db_sg1") + result['DBSecurityGroups'].should.have.length_of(1) + result['DBSecurityGroups'][0]['DBSecurityGroupName'].should.equal("db_sg1") @disable_on_py3() @mock_rds2 def test_get_non_existant_security_group(): - conn = boto.rds2.connect_to_region("us-west-2") - conn.describe_db_security_groups.when.called_with("not-a-sg").should.throw(BotoServerError) + conn = boto3.client('rds', region_name='us-west-2') + conn.describe_db_security_groups.when.called_with(DBSecurityGroupName="not-a-sg").should.throw(ClientError) @disable_on_py3() @mock_rds2 def test_delete_database_security_group(): - conn = boto.rds2.connect_to_region("us-west-2") - conn.create_db_security_group('db_sg', 'DB Security Group') + conn = boto3.client('rds', region_name='us-west-2') + conn.create_db_security_group(DBSecurityGroupName='db_sg', DBSecurityGroupDescription='DB Security Group') result = conn.describe_db_security_groups() - result['DescribeDBSecurityGroupsResponse']['DescribeDBSecurityGroupsResult']['DBSecurityGroups'].should.have.length_of(1) + result['DBSecurityGroups'].should.have.length_of(1) - conn.delete_db_security_group("db_sg") + conn.delete_db_security_group(DBSecurityGroupName="db_sg") result = conn.describe_db_security_groups() - result['DescribeDBSecurityGroupsResponse']['DescribeDBSecurityGroupsResult']['DBSecurityGroups'].should.have.length_of(0) + result['DBSecurityGroups'].should.have.length_of(0) @disable_on_py3() @mock_rds2 def test_delete_non_existant_security_group(): - conn = boto.rds2.connect_to_region("us-west-2") - conn.delete_db_security_group.when.called_with("not-a-db").should.throw(BotoServerError) + conn = boto3.client('rds', region_name='us-west-2') + conn.delete_db_security_group.when.called_with(DBSecurityGroupName="not-a-db").should.throw(ClientError) @disable_on_py3() @mock_rds2 def test_security_group_authorize(): - conn = boto.rds2.connect_to_region("us-west-2") - security_group = conn.create_db_security_group('db_sg', 'DB Security Group') - security_group['CreateDBSecurityGroupResponse']['CreateDBSecurityGroupResult']['DBSecurityGroup']['IPRanges'].should.equal([]) + conn = boto3.client('rds', region_name='us-west-2') + security_group = conn.create_db_security_group(DBSecurityGroupName='db_sg', + DBSecurityGroupDescription='DB Security Group') + security_group['DBSecurityGroup']['IPRanges'].should.equal([]) - conn.authorize_db_security_group_ingress(db_security_group_name='db_sg', - cidrip='10.3.2.45/32') + conn.authorize_db_security_group_ingress(DBSecurityGroupName='db_sg', + CIDRIP='10.3.2.45/32') - result = conn.describe_db_security_groups("db_sg") - result['DescribeDBSecurityGroupsResponse']['DescribeDBSecurityGroupsResult']['DBSecurityGroups'][0]['IPRanges'].should.have.length_of(1) - result['DescribeDBSecurityGroupsResponse']['DescribeDBSecurityGroupsResult']['DBSecurityGroups'][0]['IPRanges'].should.equal(['10.3.2.45/32']) + result = conn.describe_db_security_groups(DBSecurityGroupName="db_sg") + result['DBSecurityGroups'][0]['IPRanges'].should.have.length_of(1) + result['DBSecurityGroups'][0]['IPRanges'].should.equal([{'Status': 'authorized', 'CIDRIP': '10.3.2.45/32'}]) - conn.authorize_db_security_group_ingress(db_security_group_name='db_sg', - cidrip='10.3.2.46/32') - result = conn.describe_db_security_groups("db_sg") - result['DescribeDBSecurityGroupsResponse']['DescribeDBSecurityGroupsResult']['DBSecurityGroups'][0]['IPRanges'].should.have.length_of(2) - result['DescribeDBSecurityGroupsResponse']['DescribeDBSecurityGroupsResult']['DBSecurityGroups'][0]['IPRanges'].should.equal(['10.3.2.45/32', '10.3.2.46/32']) + conn.authorize_db_security_group_ingress(DBSecurityGroupName='db_sg', + CIDRIP='10.3.2.46/32') + result = conn.describe_db_security_groups(DBSecurityGroupName="db_sg") + result['DBSecurityGroups'][0]['IPRanges'].should.have.length_of(2) + result['DBSecurityGroups'][0]['IPRanges'].should.equal([ + {'Status': 'authorized', 'CIDRIP': '10.3.2.45/32'}, + {'Status': 'authorized', 'CIDRIP': '10.3.2.46/32'}, + ]) @disable_on_py3() @mock_rds2 def test_add_security_group_to_database(): - conn = boto.rds2.connect_to_region("us-west-2") + conn = boto3.client('rds', region_name='us-west-2') + + conn.create_db_instance(DBInstanceIdentifier='db-master-1', + AllocatedStorage=10, + DBInstanceClass='postgres', + Engine='db.m1.small', + MasterUsername='root', + MasterUserPassword='hunter2', + Port=1234) - conn.create_db_instance(db_instance_identifier='db-master-1', - allocated_storage=10, - engine='postgres', - db_instance_class='db.m1.small', - master_username='root', - master_user_password='hunter2') result = conn.describe_db_instances() - result['DescribeDBInstancesResponse']['DescribeDBInstancesResult']['DBInstances'][0]['DBSecurityGroups'].should.equal([]) - conn.create_db_security_group('db_sg', 'DB Security Group') - conn.modify_db_instance(db_instance_identifier='db-master-1', - db_security_groups=['db_sg']) + result['DBInstances'][0]['DBSecurityGroups'].should.equal([]) + conn.create_db_security_group(DBSecurityGroupName='db_sg', + DBSecurityGroupDescription='DB Security Group') + conn.modify_db_instance(DBInstanceIdentifier='db-master-1', + DBSecurityGroups=['db_sg']) result = conn.describe_db_instances() - result['DescribeDBInstancesResponse']['DescribeDBInstancesResult']['DBInstances'][0]['DBSecurityGroups'][0]['DBSecurityGroup']['DBSecurityGroupName'].should.equal('db_sg') + result['DBInstances'][0]['DBSecurityGroups'][0]['DBSecurityGroupName'].should.equal('db_sg') + + +@disable_on_py3() +@mock_rds2 +def test_list_tags_security_group(): + conn = boto3.client('rds', region_name='us-west-2') + result = conn.describe_db_subnet_groups() + result['DBSubnetGroups'].should.have.length_of(0) + + security_group = conn.create_db_security_group(DBSecurityGroupName="db_sg", + DBSecurityGroupDescription='DB Security Group', + Tags=[{'Value': 'bar', + 'Key': 'foo'}, + {'Value': 'bar1', + 'Key': 'foo1'}])['DBSecurityGroup']['DBSecurityGroupName'] + resource = 'arn:aws:rds:us-west-2:1234567890:secgrp:{0}'.format(security_group) + result = conn.list_tags_for_resource(ResourceName=resource) + result['TagList'].should.equal([{'Value': 'bar', + 'Key': 'foo'}, + {'Value': 'bar1', + 'Key': 'foo1'}]) + + +@disable_on_py3() +@mock_rds2 +def test_add_tags_security_group(): + conn = boto3.client('rds', region_name='us-west-2') + result = conn.describe_db_subnet_groups() + result['DBSubnetGroups'].should.have.length_of(0) + + security_group = conn.create_db_security_group(DBSecurityGroupName="db_sg", + DBSecurityGroupDescription='DB Security Group')['DBSecurityGroup']['DBSecurityGroupName'] + + resource = 'arn:aws:rds:us-west-2:1234567890:secgrp:{0}'.format(security_group) + conn.add_tags_to_resource(ResourceName=resource, + Tags=[{'Value': 'bar', + 'Key': 'foo'}, + {'Value': 'bar1', + 'Key': 'foo1'}]) + + result = conn.list_tags_for_resource(ResourceName=resource) + result['TagList'].should.equal([{'Value': 'bar', + 'Key': 'foo'}, + {'Value': 'bar1', + 'Key': 'foo1'}]) + +@disable_on_py3() +@mock_rds2 +def test_remove_tags_security_group(): + conn = boto3.client('rds', region_name='us-west-2') + result = conn.describe_db_subnet_groups() + result['DBSubnetGroups'].should.have.length_of(0) + + security_group = conn.create_db_security_group(DBSecurityGroupName="db_sg", + DBSecurityGroupDescription='DB Security Group', + Tags=[{'Value': 'bar', + 'Key': 'foo'}, + {'Value': 'bar1', + 'Key': 'foo1'}])['DBSecurityGroup']['DBSecurityGroupName'] + + resource = 'arn:aws:rds:us-west-2:1234567890:secgrp:{0}'.format(security_group) + conn.remove_tags_from_resource(ResourceName=resource, TagKeys=['foo']) + + result = conn.list_tags_for_resource(ResourceName=resource) + result['TagList'].should.equal([{'Value': 'bar1', 'Key': 'foo1'}]) @disable_on_py3() @mock_ec2 @mock_rds2 def test_create_database_subnet_group(): - vpc_conn = boto.vpc.connect_to_region("us-west-2") - vpc = vpc_conn.create_vpc("10.0.0.0/16") - subnet1 = vpc_conn.create_subnet(vpc.id, "10.1.0.0/24") - subnet2 = vpc_conn.create_subnet(vpc.id, "10.2.0.0/24") + vpc_conn = boto3.client('ec2', 'us-west-2') + vpc = vpc_conn.create_vpc(CidrBlock='10.0.0.0/16')['Vpc'] + subnet1 = vpc_conn.create_subnet(VpcId=vpc['VpcId'], CidrBlock='10.1.0.0/24')['Subnet'] + subnet2 = vpc_conn.create_subnet(VpcId=vpc['VpcId'], CidrBlock='10.1.0.0/26')['Subnet'] - subnet_ids = [subnet1.id, subnet2.id] - conn = boto.rds2.connect_to_region("us-west-2") - result = conn.create_db_subnet_group("db_subnet", "my db subnet", subnet_ids) - result['CreateDBSubnetGroupResponse']['CreateDBSubnetGroupResult']['DBSubnetGroup']['DBSubnetGroupName'].should.equal("db_subnet") - result['CreateDBSubnetGroupResponse']['CreateDBSubnetGroupResult']['DBSubnetGroup']['DBSubnetGroupDescription'].should.equal("my db subnet") - subnets = result['CreateDBSubnetGroupResponse']['CreateDBSubnetGroupResult']['DBSubnetGroup']['Subnets'] - subnet_group_ids = [subnets['Subnet'][0]['SubnetIdentifier'], subnets['Subnet'][1]['SubnetIdentifier']] + subnet_ids = [subnet1['SubnetId'], subnet2['SubnetId']] + conn = boto3.client('rds', region_name='us-west-2') + result = conn.create_db_subnet_group(DBSubnetGroupName='db_subnet', + DBSubnetGroupDescription='my db subnet', + SubnetIds=subnet_ids) + result['DBSubnetGroup']['DBSubnetGroupName'].should.equal("db_subnet") + result['DBSubnetGroup']['DBSubnetGroupDescription'].should.equal("my db subnet") + subnets = result['DBSubnetGroup']['Subnets'] + subnet_group_ids = [subnets[0]['SubnetIdentifier'], subnets[1]['SubnetIdentifier']] list(subnet_group_ids).should.equal(subnet_ids) @@ -482,96 +651,370 @@ def test_create_database_subnet_group(): @mock_ec2 @mock_rds2 def test_create_database_in_subnet_group(): - vpc_conn = boto.vpc.connect_to_region("us-west-2") - vpc = vpc_conn.create_vpc("10.0.0.0/16") - subnet = vpc_conn.create_subnet(vpc.id, "10.1.0.0/24") + vpc_conn = boto3.client('ec2', 'us-west-2') + vpc = vpc_conn.create_vpc(CidrBlock='10.0.0.0/16')['Vpc'] + subnet = vpc_conn.create_subnet(VpcId=vpc['VpcId'], CidrBlock='10.1.0.0/24')['Subnet'] - conn = boto.rds2.connect_to_region("us-west-2") - conn.create_db_subnet_group("db_subnet1", "my db subnet", [subnet.id]) - conn.create_db_instance(db_instance_identifier='db-master-1', - allocated_storage=10, - engine='postgres', - db_instance_class='db.m1.small', - master_username='root', - master_user_password='hunter2', - db_subnet_group_name='db_subnet1') - result = conn.describe_db_instances("db-master-1") - result['DescribeDBInstancesResponse']['DescribeDBInstancesResult']['DBInstances'][0]['DBSubnetGroup']['DBSubnetGroupName'].should.equal("db_subnet1") + conn = boto3.client('rds', region_name='us-west-2') + conn.create_db_subnet_group(DBSubnetGroupName='db_subnet1', + DBSubnetGroupDescription='my db subnet', + SubnetIds=[subnet['SubnetId']]) + conn.create_db_instance(DBInstanceIdentifier='db-master-1', + AllocatedStorage=10, + Engine='postgres', + DBInstanceClass='db.m1.small', + MasterUsername='root', + MasterUserPassword='hunter2', + Port=1234, + DBSubnetGroupName='db_subnet1') + result = conn.describe_db_instances(DBInstanceIdentifier='db-master-1') + result['DBInstances'][0]['DBSubnetGroup']['DBSubnetGroupName'].should.equal('db_subnet1') @disable_on_py3() @mock_ec2 @mock_rds2 def test_describe_database_subnet_group(): - vpc_conn = boto.vpc.connect_to_region("us-west-2") - vpc = vpc_conn.create_vpc("10.0.0.0/16") - subnet = vpc_conn.create_subnet(vpc.id, "10.1.0.0/24") + vpc_conn = boto3.client('ec2', 'us-west-2') + vpc = vpc_conn.create_vpc(CidrBlock='10.0.0.0/16')['Vpc'] + subnet = vpc_conn.create_subnet(VpcId=vpc['VpcId'], CidrBlock='10.1.0.0/24')['Subnet'] - conn = boto.rds2.connect_to_region("us-west-2") - conn.create_db_subnet_group("db_subnet1", "my db subnet", [subnet.id]) - conn.create_db_subnet_group("db_subnet2", "my db subnet", [subnet.id]) + conn = boto3.client('rds', region_name='us-west-2') + conn.create_db_subnet_group(DBSubnetGroupName="db_subnet1", + DBSubnetGroupDescription='my db subnet', + SubnetIds=[subnet['SubnetId']]) + conn.create_db_subnet_group(DBSubnetGroupName='db_subnet2', + DBSubnetGroupDescription='my db subnet', + SubnetIds=[subnet['SubnetId']]) - resp = conn.describe_db_subnet_groups() - groups_resp = resp['DescribeDBSubnetGroupsResponse'] + resp = conn.describe_db_subnet_groups() + resp['DBSubnetGroups'].should.have.length_of(2) - subnet_groups = groups_resp['DescribeDBSubnetGroupsResult']['DBSubnetGroups'] - subnet_groups.should.have.length_of(2) + subnets = resp['DBSubnetGroups'][0]['Subnets'] + subnets.should.have.length_of(1) - subnets = groups_resp['DescribeDBSubnetGroupsResult']['DBSubnetGroups'][0]['DBSubnetGroup']['Subnets'] - subnets.should.have.length_of(1) + list(conn.describe_db_subnet_groups(DBSubnetGroupName="db_subnet1")['DBSubnetGroups']).should.have.length_of(1) - list(resp).should.have.length_of(1) - list(groups_resp).should.have.length_of(2) - list(conn.describe_db_subnet_groups("db_subnet1")).should.have.length_of(1) - - conn.describe_db_subnet_groups.when.called_with("not-a-subnet").should.throw(BotoServerError) + conn.describe_db_subnet_groups.when.called_with(DBSubnetGroupName="not-a-subnet").should.throw(ClientError) @disable_on_py3() @mock_ec2 @mock_rds2 def test_delete_database_subnet_group(): - vpc_conn = boto.vpc.connect_to_region("us-west-2") - vpc = vpc_conn.create_vpc("10.0.0.0/16") - subnet = vpc_conn.create_subnet(vpc.id, "10.1.0.0/24") + vpc_conn = boto3.client('ec2', 'us-west-2') + vpc = vpc_conn.create_vpc(CidrBlock='10.0.0.0/16')['Vpc'] + subnet = vpc_conn.create_subnet(VpcId=vpc['VpcId'], CidrBlock='10.1.0.0/24')['Subnet'] - conn = boto.rds2.connect_to_region("us-west-2") + conn = boto3.client('rds', region_name='us-west-2') result = conn.describe_db_subnet_groups() - result['DescribeDBSubnetGroupsResponse']['DescribeDBSubnetGroupsResult']['DBSubnetGroups'].should.have.length_of(0) + result['DBSubnetGroups'].should.have.length_of(0) - conn.create_db_subnet_group("db_subnet1", "my db subnet", [subnet.id]) + conn.create_db_subnet_group(DBSubnetGroupName="db_subnet1", + DBSubnetGroupDescription='my db subnet', + SubnetIds=[subnet['SubnetId']]) result = conn.describe_db_subnet_groups() - result['DescribeDBSubnetGroupsResponse']['DescribeDBSubnetGroupsResult']['DBSubnetGroups'].should.have.length_of(1) + result['DBSubnetGroups'].should.have.length_of(1) - conn.delete_db_subnet_group("db_subnet1") + conn.delete_db_subnet_group(DBSubnetGroupName="db_subnet1") result = conn.describe_db_subnet_groups() - result['DescribeDBSubnetGroupsResponse']['DescribeDBSubnetGroupsResult']['DBSubnetGroups'].should.have.length_of(0) + result['DBSubnetGroups'].should.have.length_of(0) - conn.delete_db_subnet_group.when.called_with("db_subnet1").should.throw(BotoServerError) + conn.delete_db_subnet_group.when.called_with(DBSubnetGroupName="db_subnet1").should.throw(ClientError) + + +@disable_on_py3() +@mock_ec2 +@mock_rds2 +def test_list_tags_database_subnet_group(): + vpc_conn = boto3.client('ec2', 'us-west-2') + vpc = vpc_conn.create_vpc(CidrBlock='10.0.0.0/16')['Vpc'] + subnet = vpc_conn.create_subnet(VpcId=vpc['VpcId'], CidrBlock='10.1.0.0/24')['Subnet'] + + conn = boto3.client('rds', region_name='us-west-2') + result = conn.describe_db_subnet_groups() + result['DBSubnetGroups'].should.have.length_of(0) + + subnet = conn.create_db_subnet_group(DBSubnetGroupName="db_subnet1", + DBSubnetGroupDescription='my db subnet', + SubnetIds=[subnet['SubnetId']], + Tags=[{'Value': 'bar', + 'Key': 'foo'}, + {'Value': 'bar1', + 'Key': 'foo1'}])['DBSubnetGroup']['DBSubnetGroupName'] + result = conn.list_tags_for_resource(ResourceName='arn:aws:rds:us-west-2:1234567890:subgrp:{0}'.format(subnet)) + result['TagList'].should.equal([{'Value': 'bar', + 'Key': 'foo'}, + {'Value': 'bar1', + 'Key': 'foo1'}]) + +@disable_on_py3() +@mock_ec2 +@mock_rds2 +def test_add_tags_database_subnet_group(): + vpc_conn = boto3.client('ec2', 'us-west-2') + vpc = vpc_conn.create_vpc(CidrBlock='10.0.0.0/16')['Vpc'] + subnet = vpc_conn.create_subnet(VpcId=vpc['VpcId'], CidrBlock='10.1.0.0/24')['Subnet'] + + conn = boto3.client('rds', region_name='us-west-2') + result = conn.describe_db_subnet_groups() + result['DBSubnetGroups'].should.have.length_of(0) + + subnet = conn.create_db_subnet_group(DBSubnetGroupName="db_subnet1", + DBSubnetGroupDescription='my db subnet', + SubnetIds=[subnet['SubnetId']], + Tags=[])['DBSubnetGroup']['DBSubnetGroupName'] + resource = 'arn:aws:rds:us-west-2:1234567890:subgrp:{0}'.format(subnet) + + conn.add_tags_to_resource(ResourceName=resource, + Tags=[{'Value': 'bar', + 'Key': 'foo'}, + {'Value': 'bar1', + 'Key': 'foo1'}]) + + result = conn.list_tags_for_resource(ResourceName=resource) + result['TagList'].should.equal([{'Value': 'bar', + 'Key': 'foo'}, + {'Value': 'bar1', + 'Key': 'foo1'}]) + +@disable_on_py3() +@mock_ec2 +@mock_rds2 +def test_remove_tags_database_subnet_group(): + vpc_conn = boto3.client('ec2', 'us-west-2') + vpc = vpc_conn.create_vpc(CidrBlock='10.0.0.0/16')['Vpc'] + subnet = vpc_conn.create_subnet(VpcId=vpc['VpcId'], CidrBlock='10.1.0.0/24')['Subnet'] + + conn = boto3.client('rds', region_name='us-west-2') + result = conn.describe_db_subnet_groups() + result['DBSubnetGroups'].should.have.length_of(0) + + subnet = conn.create_db_subnet_group(DBSubnetGroupName="db_subnet1", + DBSubnetGroupDescription='my db subnet', + SubnetIds=[subnet['SubnetId']], + Tags=[{'Value': 'bar', + 'Key': 'foo'}, + {'Value': 'bar1', + 'Key': 'foo1'}])['DBSubnetGroup']['DBSubnetGroupName'] + resource = 'arn:aws:rds:us-west-2:1234567890:subgrp:{0}'.format(subnet) + + conn.remove_tags_from_resource(ResourceName=resource, TagKeys=['foo']) + + result = conn.list_tags_for_resource(ResourceName=resource) + result['TagList'].should.equal([{'Value': 'bar1', 'Key': 'foo1'}]) @disable_on_py3() @mock_rds2 def test_create_database_replica(): - conn = boto.rds2.connect_to_region("us-west-2") + conn = boto3.client('rds', region_name='us-west-2') - conn.create_db_instance(db_instance_identifier='db-master-1', - allocated_storage=10, - engine='postgres', - db_instance_class='db.m1.small', - master_username='root', - master_user_password='hunter2', - db_security_groups=["my_sg"]) + database = conn.create_db_instance(DBInstanceIdentifier='db-master-1', + AllocatedStorage=10, + Engine='postgres', + DBInstanceClass='db.m1.small', + MasterUsername='root', + MasterUserPassword='hunter2', + Port=1234, + DBSecurityGroups=["my_sg"]) - replica = conn.create_db_instance_read_replica("db-replica-1", "db-master-1", "db.m1.small") - replica['CreateDBInstanceReadReplicaResponse']['CreateDBInstanceReadReplicaResult']['DBInstance']['ReadReplicaSourceDBInstanceIdentifier'].should.equal('db-master-1') - replica['CreateDBInstanceReadReplicaResponse']['CreateDBInstanceReadReplicaResult']['DBInstance']['DBInstanceClass'].should.equal('db.m1.small') - replica['CreateDBInstanceReadReplicaResponse']['CreateDBInstanceReadReplicaResult']['DBInstance']['DBInstanceIdentifier'].should.equal('db-replica-1') + replica = conn.create_db_instance_read_replica(DBInstanceIdentifier="db-replica-1", + SourceDBInstanceIdentifier="db-master-1", + DBInstanceClass="db.m1.small") + replica['DBInstance']['ReadReplicaSourceDBInstanceIdentifier'].should.equal('db-master-1') + replica['DBInstance']['DBInstanceClass'].should.equal('db.m1.small') + replica['DBInstance']['DBInstanceIdentifier'].should.equal('db-replica-1') - master = conn.describe_db_instances("db-master-1") - master['DescribeDBInstancesResponse']['DescribeDBInstancesResult']['DBInstances'][0]['ReadReplicaDBInstanceIdentifiers'].should.equal(['db-replica-1']) + master = conn.describe_db_instances(DBInstanceIdentifier="db-master-1") + master['DBInstances'][0]['ReadReplicaDBInstanceIdentifiers'].should.equal(['db-replica-1']) - conn.delete_db_instance("db-replica-1") + conn.delete_db_instance(DBInstanceIdentifier="db-replica-1", SkipFinalSnapshot=True) - master = conn.describe_db_instances("db-master-1") - master['DescribeDBInstancesResponse']['DescribeDBInstancesResult']['DBInstances'][0]['ReadReplicaDBInstanceIdentifiers'].should.equal([]) + master = conn.describe_db_instances(DBInstanceIdentifier="db-master-1") + master['DBInstances'][0]['ReadReplicaDBInstanceIdentifiers'].should.equal([]) + +@disable_on_py3() +@mock_rds2 +@mock_kms +def test_create_database_with_encrypted_storage(): + kms_conn = boto3.client('kms', region_name='us-west-2') + key = kms_conn.create_key(Policy='my RDS encryption policy', + Description='RDS encryption key', + KeyUsage='ENCRYPT_DECRYPT') + + conn = boto3.client('rds', region_name='us-west-2') + database = conn.create_db_instance(DBInstanceIdentifier='db-master-1', + AllocatedStorage=10, + Engine='postgres', + DBInstanceClass='db.m1.small', + MasterUsername='root', + MasterUserPassword='hunter2', + Port=1234, + DBSecurityGroups=["my_sg"], + StorageEncrypted=True, + KmsKeyId=key['KeyMetadata']['KeyId']) + + database['DBInstance']['StorageEncrypted'].should.equal(True) + database['DBInstance']['KmsKeyId'].should.equal(key['KeyMetadata']['KeyId']) + +@disable_on_py3() +@mock_rds2 +def test_create_db_parameter_group(): + conn = boto3.client('rds', region_name='us-west-2') + db_parameter_group = conn.create_db_parameter_group(DBParameterGroupName='test', + DBParameterGroupFamily='mysql5.6', + Description='test parameter group') + + db_parameter_group['DBParameterGroup']['DBParameterGroupName'].should.equal('test') + db_parameter_group['DBParameterGroup']['DBParameterGroupFamily'].should.equal('mysql5.6') + db_parameter_group['DBParameterGroup']['Description'].should.equal('test parameter group') + +@disable_on_py3() +@mock_rds2 +def test_create_db_instance_with_parameter_group(): + conn = boto3.client('rds', region_name='us-west-2') + db_parameter_group = conn.create_db_parameter_group(DBParameterGroupName='test', + DBParameterGroupFamily='mysql5.6', + Description='test parameter group') + + database = conn.create_db_instance(DBInstanceIdentifier='db-master-1', + AllocatedStorage=10, + Engine='mysql', + DBInstanceClass='db.m1.small', + DBParameterGroupName='test', + MasterUsername='root', + MasterUserPassword='hunter2', + Port=1234) + + len(database['DBInstance']['DBParameterGroups']).should.equal(1) + database['DBInstance']['DBParameterGroups'][0]['DBParameterGroupName'].should.equal('test') + database['DBInstance']['DBParameterGroups'][0]['ParameterApplyStatus'].should.equal('in-sync') + +@disable_on_py3() +@mock_rds2 +def test_modify_db_instance_with_parameter_group(): + conn = boto3.client('rds', region_name='us-west-2') + database = conn.create_db_instance(DBInstanceIdentifier='db-master-1', + AllocatedStorage=10, + Engine='mysql', + DBInstanceClass='db.m1.small', + MasterUsername='root', + MasterUserPassword='hunter2', + Port=1234) + + len(database['DBInstance']['DBParameterGroups']).should.equal(1) + database['DBInstance']['DBParameterGroups'][0]['DBParameterGroupName'].should.equal('default.mysql5.6') + database['DBInstance']['DBParameterGroups'][0]['ParameterApplyStatus'].should.equal('in-sync') + + db_parameter_group = conn.create_db_parameter_group(DBParameterGroupName='test', + DBParameterGroupFamily='mysql5.6', + Description='test parameter group') + conn.modify_db_instance(DBInstanceIdentifier='db-master-1', + DBParameterGroupName='test', + ApplyImmediately=True) + + database = conn.describe_db_instances(DBInstanceIdentifier='db-master-1')['DBInstances'][0] + len(database['DBParameterGroups']).should.equal(1) + database['DBParameterGroups'][0]['DBParameterGroupName'].should.equal('test') + database['DBParameterGroups'][0]['ParameterApplyStatus'].should.equal('in-sync') + + +@disable_on_py3() +@mock_rds2 +def test_create_db_parameter_group_empty_description(): + conn = boto3.client('rds', region_name='us-west-2') + conn.create_db_parameter_group.when.called_with(DBParameterGroupName='test', + DBParameterGroupFamily='mysql5.6', + Description='').should.throw(ClientError) + + +@disable_on_py3() +@mock_rds2 +def test_create_db_parameter_group_duplicate(): + conn = boto3.client('rds', region_name='us-west-2') + conn.create_db_parameter_group(DBParameterGroupName='test', + DBParameterGroupFamily='mysql5.6', + Description='test parameter group') + conn.create_db_parameter_group.when.called_with(DBParameterGroupName='test', + DBParameterGroupFamily='mysql5.6', + Description='test parameter group').should.throw(ClientError) + + +@disable_on_py3() +@mock_rds2 +def test_describe_db_parameter_group(): + conn = boto3.client('rds', region_name='us-west-2') + conn.create_db_parameter_group(DBParameterGroupName='test', + DBParameterGroupFamily='mysql5.6', + Description='test parameter group') + db_parameter_groups = conn.describe_db_parameter_groups(DBParameterGroupName='test') + db_parameter_groups['DBParameterGroups'][0]['DBParameterGroupName'].should.equal('test') + + +@disable_on_py3() +@mock_rds2 +def test_describe_non_existant_db_parameter_group(): + conn = boto3.client('rds', region_name='us-west-2') + db_parameter_groups = conn.describe_db_parameter_groups(DBParameterGroupName='test') + len(db_parameter_groups['DBParameterGroups']).should.equal(0) + + +@disable_on_py3() +@mock_rds2 +def test_delete_db_parameter_group(): + conn = boto3.client('rds', region_name='us-west-2') + conn.create_db_parameter_group(DBParameterGroupName='test', + DBParameterGroupFamily='mysql5.6', + Description='test parameter group') + db_parameter_groups = conn.describe_db_parameter_groups(DBParameterGroupName='test') + db_parameter_groups['DBParameterGroups'][0]['DBParameterGroupName'].should.equal('test') + conn.delete_db_parameter_group(DBParameterGroupName='test') + db_parameter_groups = conn.describe_db_parameter_groups(DBParameterGroupName='test') + len(db_parameter_groups['DBParameterGroups']).should.equal(0) + +@disable_on_py3() +@mock_rds2 +def test_modify_db_parameter_group(): + conn = boto3.client('rds', region_name='us-west-2') + conn.create_db_parameter_group(DBParameterGroupName='test', + DBParameterGroupFamily='mysql5.6', + Description='test parameter group') + + modify_result = conn.modify_db_parameter_group(DBParameterGroupName='test', + Parameters=[{ + 'ParameterName': 'foo', + 'ParameterValue': 'foo_val', + 'Description': 'test param', + 'ApplyMethod': 'immediate' + }] + ) + + modify_result['DBParameterGroupName'].should.equal('test') + + db_parameters = conn.describe_db_parameters(DBParameterGroupName='test') + db_parameters['Parameters'][0]['ParameterName'].should.equal('foo') + db_parameters['Parameters'][0]['ParameterValue'].should.equal('foo_val') + db_parameters['Parameters'][0]['Description'].should.equal('test param') + db_parameters['Parameters'][0]['ApplyMethod'].should.equal('immediate') + + +@disable_on_py3() +@mock_rds2 +def test_delete_non_existant_db_parameter_group(): + conn = boto3.client('rds', region_name='us-west-2') + conn.delete_db_parameter_group.when.called_with(DBParameterGroupName='non-existant').should.throw(ClientError) + +@disable_on_py3() +@mock_rds2 +def test_create_parameter_group_with_tags(): + conn = boto3.client('rds', region_name='us-west-2') + conn.create_db_parameter_group(DBParameterGroupName='test', + DBParameterGroupFamily='mysql5.6', + Description='test parameter group', + Tags=[{ + 'Key': 'foo', + 'Value': 'bar', + }]) + result = conn.list_tags_for_resource(ResourceName='arn:aws:rds:us-west-2:1234567890:pg:test') + result['TagList'].should.equal([{'Value': 'bar', 'Key': 'foo'}]) From 6ac8c2b4da157c7369e83251b99b9a4159e9a0e9 Mon Sep 17 00:00:00 2001 From: Steve Pulec Date: Wed, 11 Jan 2017 21:21:42 -0500 Subject: [PATCH 022/213] Add boto3 standalone example. --- README.md | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/README.md b/README.md index 0be93054f..c05f1dff4 100644 --- a/README.md +++ b/README.md @@ -248,6 +248,16 @@ proxy_port = 5000 proxy = 127.0.0.1 ``` +If you want to use boto3 with this, you can pass an `endpoint_url` to the resource + +```python +boto3.resource( + service_name='s3', + region_name='us-west-1', + endpoint_url='http://localhost:5000', +) +``` + ## Install ```console From a29215008723668848968e399b12fb46771fcf61 Mon Sep 17 00:00:00 2001 From: Steve Pulec Date: Wed, 11 Jan 2017 22:35:27 -0500 Subject: [PATCH 023/213] Throw exception if same security group rule added twice. Closes #737. --- moto/ec2/models.py | 13 +++++++++++-- tests/test_ec2/test_security_groups.py | 25 +++++++++++++++++++++++-- 2 files changed, 34 insertions(+), 4 deletions(-) diff --git a/moto/ec2/models.py b/moto/ec2/models.py index 9146b283d..a3e333dc7 100755 --- a/moto/ec2/models.py +++ b/moto/ec2/models.py @@ -1255,6 +1255,15 @@ class SecurityGroup(TaggedEC2Resource): return self.id raise UnformattedGetAttTemplateException() + def add_ingress_rule(self, rule): + if rule in self.ingress_rules: + raise InvalidParameterValueError('security_group') + else: + self.ingress_rules.append(rule) + + def add_egress_rule(self, rule): + self.egress_rules.append(rule) + class SecurityGroupBackend(object): @@ -1367,7 +1376,7 @@ class SecurityGroupBackend(object): source_groups.append(source_group) security_rule = SecurityRule(ip_protocol, from_port, to_port, ip_ranges, source_groups) - group.ingress_rules.append(security_rule) + group.add_ingress_rule(security_rule) def revoke_security_group_ingress(self, group_name_or_id, @@ -1432,7 +1441,7 @@ class SecurityGroupBackend(object): source_groups.append(source_group) security_rule = SecurityRule(ip_protocol, from_port, to_port, ip_ranges, source_groups) - group.egress_rules.append(security_rule) + group.add_egress_rule(security_rule) def revoke_security_group_egress(self, group_name_or_id, diff --git a/tests/test_ec2/test_security_groups.py b/tests/test_ec2/test_security_groups.py index 585f97eeb..204380562 100644 --- a/tests/test_ec2/test_security_groups.py +++ b/tests/test_ec2/test_security_groups.py @@ -8,6 +8,7 @@ from nose.tools import assert_raises import boto3 import boto +from botocore.exceptions import ClientError from boto.exception import EC2ResponseError, JSONResponseError import sure # noqa @@ -382,6 +383,26 @@ def test_authorize_all_protocols_with_no_port_specification(): Boto3 ''' +@mock_ec2 +def test_add_same_rule_twice_throws_error(): + ec2 = boto3.resource('ec2', region_name='us-west-1') + + vpc = ec2.create_vpc(CidrBlock='10.0.0.0/16') + sg = ec2.create_security_group(GroupName='sg1', Description='Test security group sg1', VpcId=vpc.id) + + ip_permissions = [ + { + 'IpProtocol': 'tcp', + 'FromPort': 27017, + 'ToPort': 27017, + 'IpRanges': [{"CidrIp": "1.2.3.4/32"}] + }, + ] + sg.authorize_ingress(IpPermissions=ip_permissions) + + with assert_raises(ClientError) as ex: + sg.authorize_ingress(IpPermissions=ip_permissions) + @mock_ec2 def test_security_group_tagging_boto3(): @@ -423,8 +444,8 @@ def test_authorize_and_revoke_in_bulk(): }, { 'IpProtocol': 'tcp', - 'FromPort': 27017, - 'ToPort': 27017, + 'FromPort': 27018, + 'ToPort': 27018, 'UserIdGroupPairs': [{'GroupId': sg02.id, 'UserId': sg02.owner_id}], 'IpRanges': [] }, From d42432bfefc75a3123ac43f471c69a56eccf5b55 Mon Sep 17 00:00:00 2001 From: Jesse Szwedko Date: Wed, 18 Jan 2017 18:36:50 -0800 Subject: [PATCH 024/213] IAM: raise error if requiested instance profile does not exist (#802) Signed-off-by: Andrew Harris --- moto/iam/models.py | 2 ++ tests/test_iam/test_iam.py | 8 ++++++++ 2 files changed, 10 insertions(+) diff --git a/moto/iam/models.py b/moto/iam/models.py index 60b9b743d..d27722f33 100644 --- a/moto/iam/models.py +++ b/moto/iam/models.py @@ -501,6 +501,8 @@ class IAMBackend(BaseBackend): if profile.name == profile_name: return profile + raise IAMNotFoundException("Instance profile {0} not found".format(profile_name)) + def get_instance_profiles(self): return self.instance_profiles.values() diff --git a/tests/test_iam/test_iam.py b/tests/test_iam/test_iam.py index bedea4e01..de8f89a59 100644 --- a/tests/test_iam/test_iam.py +++ b/tests/test_iam/test_iam.py @@ -62,6 +62,14 @@ def test_get_role__should_throw__when_role_does_not_exist(): conn.get_role('unexisting_role') +@mock_iam() +@raises(BotoServerError) +def test_get_instance_profile__should_throw__when_instance_profile_does_not_exist(): + conn = boto.connect_iam() + + conn.get_instance_profile('unexisting_instance_profile') + + @mock_iam() def test_create_role_and_instance_profile(): conn = boto.connect_iam() From a967ec0d39c9df3c3ddcf2dc2f3f1616b2afa48d Mon Sep 17 00:00:00 2001 From: costypetrisor Date: Thu, 19 Jan 2017 02:37:55 +0000 Subject: [PATCH 025/213] made the Security Group backend throw the same error as AWS if the nb of sec groups limit is hit (#742) * made the Security Group backend throw the same error as AWS if the security group limit is hit * included in the security group limit the count of grants to other security groups & updated the unit tests to cover these * refactored a few things about the sec group rule count limit --- moto/ec2/exceptions.py | 8 ++ moto/ec2/models.py | 33 +++++++ tests/test_ec2/test_security_groups.py | 130 +++++++++++++++++++++++++ 3 files changed, 171 insertions(+) diff --git a/moto/ec2/exceptions.py b/moto/ec2/exceptions.py index 1bba89fd5..79ceb776f 100644 --- a/moto/ec2/exceptions.py +++ b/moto/ec2/exceptions.py @@ -318,3 +318,11 @@ class InvalidCIDRSubnetError(EC2ClientError): "InvalidParameterValue", "invalid CIDR subnet specification: {0}" .format(cidr)) + + +class RulesPerSecurityGroupLimitExceededError(EC2ClientError): + def __init__(self): + super(RulesPerSecurityGroupLimitExceededError, self).__init__( + "RulesPerSecurityGroupLimitExceeded", + 'The maximum number of rules per security group ' + 'has been reached.') diff --git a/moto/ec2/models.py b/moto/ec2/models.py index a3e333dc7..5d220a0e2 100755 --- a/moto/ec2/models.py +++ b/moto/ec2/models.py @@ -58,6 +58,7 @@ from .exceptions import ( InvalidVpnGatewayIdError, InvalidVpnConnectionIdError, InvalidCustomerGatewayIdError, + RulesPerSecurityGroupLimitExceededError, ) from .utils import ( EC2_RESOURCE_TO_PREFIX, @@ -1264,6 +1265,16 @@ class SecurityGroup(TaggedEC2Resource): def add_egress_rule(self, rule): self.egress_rules.append(rule) + def get_number_of_ingress_rules(self): + return sum( + len(rule.ip_ranges) + len(rule.source_groups) + for rule in self.ingress_rules) + + def get_number_of_egress_rules(self): + return sum( + len(rule.ip_ranges) + len(rule.source_groups) + for rule in self.egress_rules) + class SecurityGroupBackend(object): @@ -1360,6 +1371,10 @@ class SecurityGroupBackend(object): if not is_valid_cidr(cidr): raise InvalidCIDRSubnetError(cidr=cidr) + self._verify_group_will_respect_rule_count_limit( + group, group.get_number_of_ingress_rules(), + ip_ranges, source_group_names, source_group_ids) + source_group_names = source_group_names if source_group_names else [] source_group_ids = source_group_ids if source_group_ids else [] @@ -1425,6 +1440,10 @@ class SecurityGroupBackend(object): if not is_valid_cidr(cidr): raise InvalidCIDRSubnetError(cidr=cidr) + self._verify_group_will_respect_rule_count_limit( + group, group.get_number_of_egress_rules(), + ip_ranges, source_group_names, source_group_ids) + source_group_names = source_group_names if source_group_names else [] source_group_ids = source_group_ids if source_group_ids else [] @@ -1472,6 +1491,20 @@ class SecurityGroupBackend(object): return security_rule raise InvalidPermissionNotFoundError() + def _verify_group_will_respect_rule_count_limit( + self, group, current_rule_nb, + ip_ranges, source_group_names=None, source_group_ids=None): + max_nb_rules = 50 if group.vpc_id else 100 + future_group_nb_rules = current_rule_nb + if ip_ranges: + future_group_nb_rules += len(ip_ranges) + if source_group_ids: + future_group_nb_rules += len(source_group_ids) + if source_group_names: + future_group_nb_rules += len(source_group_names) + if future_group_nb_rules > max_nb_rules: + raise RulesPerSecurityGroupLimitExceededError + class SecurityGroupIngress(object): diff --git a/tests/test_ec2/test_security_groups.py b/tests/test_ec2/test_security_groups.py index 204380562..83dad6f0c 100644 --- a/tests/test_ec2/test_security_groups.py +++ b/tests/test_ec2/test_security_groups.py @@ -379,6 +379,136 @@ def test_authorize_all_protocols_with_no_port_specification(): sg.rules[0].to_port.should.equal(None) +@mock_ec2 +def test_sec_group_rule_limit(): + ec2_conn = boto.connect_ec2() + sg = ec2_conn.create_security_group('test', 'test') + other_sg = ec2_conn.create_security_group('test_2', 'test_other') + + # INGRESS + with assert_raises(EC2ResponseError) as cm: + ec2_conn.authorize_security_group( + group_id=sg.id, ip_protocol='-1', + cidr_ip=['{0}.0.0.0/0'.format(i) for i in range(110)]) + cm.exception.error_code.should.equal('RulesPerSecurityGroupLimitExceeded') + + sg.rules.should.be.empty + # authorize a rule targeting a different sec group (because this count too) + success = ec2_conn.authorize_security_group( + group_id=sg.id, ip_protocol='-1', + src_security_group_group_id=other_sg.id) + success.should.be.true + # fill the rules up the limit + success = ec2_conn.authorize_security_group( + group_id=sg.id, ip_protocol='-1', + cidr_ip=['{0}.0.0.0/0'.format(i) for i in range(99)]) + success.should.be.true + # verify that we cannot authorize past the limit for a CIDR IP + with assert_raises(EC2ResponseError) as cm: + ec2_conn.authorize_security_group( + group_id=sg.id, ip_protocol='-1', cidr_ip=['100.0.0.0/0']) + cm.exception.error_code.should.equal('RulesPerSecurityGroupLimitExceeded') + # verify that we cannot authorize past the limit for a different sec group + with assert_raises(EC2ResponseError) as cm: + ec2_conn.authorize_security_group( + group_id=sg.id, ip_protocol='-1', + src_security_group_group_id=other_sg.id) + cm.exception.error_code.should.equal('RulesPerSecurityGroupLimitExceeded') + + # EGRESS + # authorize a rule targeting a different sec group (because this count too) + ec2_conn.authorize_security_group_egress( + group_id=sg.id, ip_protocol='-1', + src_group_id=other_sg.id) + # fill the rules up the limit + # remember that by default, when created a sec group contains 1 egress rule + # so our other_sg rule + 98 CIDR IP rules + 1 by default == 100 the limit + for i in range(98): + ec2_conn.authorize_security_group_egress( + group_id=sg.id, ip_protocol='-1', + cidr_ip='{0}.0.0.0/0'.format(i)) + # verify that we cannot authorize past the limit for a CIDR IP + with assert_raises(EC2ResponseError) as cm: + ec2_conn.authorize_security_group_egress( + group_id=sg.id, ip_protocol='-1', + cidr_ip='101.0.0.0/0') + cm.exception.error_code.should.equal('RulesPerSecurityGroupLimitExceeded') + # verify that we cannot authorize past the limit for a different sec group + with assert_raises(EC2ResponseError) as cm: + ec2_conn.authorize_security_group_egress( + group_id=sg.id, ip_protocol='-1', + src_group_id=other_sg.id) + cm.exception.error_code.should.equal('RulesPerSecurityGroupLimitExceeded') + + +@mock_ec2 +def test_sec_group_rule_limit_vpc(): + ec2_conn = boto.connect_ec2() + vpc_conn = boto.connect_vpc() + + vpc = vpc_conn.create_vpc('10.0.0.0/8') + + sg = ec2_conn.create_security_group('test', 'test', vpc_id=vpc.id) + other_sg = ec2_conn.create_security_group('test_2', 'test', vpc_id=vpc.id) + + # INGRESS + with assert_raises(EC2ResponseError) as cm: + ec2_conn.authorize_security_group( + group_id=sg.id, ip_protocol='-1', + cidr_ip=['{0}.0.0.0/0'.format(i) for i in range(110)]) + cm.exception.error_code.should.equal('RulesPerSecurityGroupLimitExceeded') + + sg.rules.should.be.empty + # authorize a rule targeting a different sec group (because this count too) + success = ec2_conn.authorize_security_group( + group_id=sg.id, ip_protocol='-1', + src_security_group_group_id=other_sg.id) + success.should.be.true + # fill the rules up the limit + success = ec2_conn.authorize_security_group( + group_id=sg.id, ip_protocol='-1', + cidr_ip=['{0}.0.0.0/0'.format(i) for i in range(49)]) + # verify that we cannot authorize past the limit for a CIDR IP + success.should.be.true + with assert_raises(EC2ResponseError) as cm: + ec2_conn.authorize_security_group( + group_id=sg.id, ip_protocol='-1', cidr_ip=['100.0.0.0/0']) + cm.exception.error_code.should.equal('RulesPerSecurityGroupLimitExceeded') + # verify that we cannot authorize past the limit for a different sec group + with assert_raises(EC2ResponseError) as cm: + ec2_conn.authorize_security_group( + group_id=sg.id, ip_protocol='-1', + src_security_group_group_id=other_sg.id) + cm.exception.error_code.should.equal('RulesPerSecurityGroupLimitExceeded') + + # EGRESS + # authorize a rule targeting a different sec group (because this count too) + ec2_conn.authorize_security_group_egress( + group_id=sg.id, ip_protocol='-1', + src_group_id=other_sg.id) + # fill the rules up the limit + # remember that by default, when created a sec group contains 1 egress rule + # so our other_sg rule + 48 CIDR IP rules + 1 by default == 50 the limit + for i in range(48): + ec2_conn.authorize_security_group_egress( + group_id=sg.id, ip_protocol='-1', + cidr_ip='{0}.0.0.0/0'.format(i)) + # verify that we cannot authorize past the limit for a CIDR IP + with assert_raises(EC2ResponseError) as cm: + ec2_conn.authorize_security_group_egress( + group_id=sg.id, ip_protocol='-1', + cidr_ip='50.0.0.0/0') + cm.exception.error_code.should.equal('RulesPerSecurityGroupLimitExceeded') + # verify that we cannot authorize past the limit for a different sec group + with assert_raises(EC2ResponseError) as cm: + ec2_conn.authorize_security_group_egress( + group_id=sg.id, ip_protocol='-1', + src_group_id=other_sg.id) + cm.exception.error_code.should.equal('RulesPerSecurityGroupLimitExceeded') + + + + ''' Boto3 ''' From 2bf85fe25dbf7190f9129df3497d07c972c482a8 Mon Sep 17 00:00:00 2001 From: Akhter Ali Date: Wed, 18 Jan 2017 21:40:04 -0500 Subject: [PATCH 026/213] Fix Route53 response and tests (#804) * Fix route53 response \n Fix route53 tests * Fixing index * fix zone ID on test_route53_with_update --- moto/route53/responses.py | 11 +++++++++-- .../test_cloudformation_stack_integration.py | 11 +++++++++++ tests/test_route53/test_route53.py | 2 +- 3 files changed, 21 insertions(+), 3 deletions(-) diff --git a/moto/route53/responses.py b/moto/route53/responses.py index 0e171a52c..429317dae 100644 --- a/moto/route53/responses.py +++ b/moto/route53/responses.py @@ -21,8 +21,15 @@ def list_or_create_hostzone_response(request, full_url, headers): else: comment = None private_zone = False + + + name = elements["CreateHostedZoneRequest"]["Name"] + + if name[-1] != ".": + name += "." + new_zone = route53_backend.create_hosted_zone( - elements["CreateHostedZoneRequest"]["Name"], + name, comment=comment, private_zone=private_zone, ) @@ -247,7 +254,7 @@ LIST_HOSTED_ZONES_RESPONSE = """ Date: Wed, 18 Jan 2017 22:46:51 -0500 Subject: [PATCH 027/213] Fix assert_raises for catching parents of exceptions. --- tests/backport_assert_raises.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/tests/backport_assert_raises.py b/tests/backport_assert_raises.py index 75f9f342e..6ceacaa89 100644 --- a/tests/backport_assert_raises.py +++ b/tests/backport_assert_raises.py @@ -27,6 +27,8 @@ except TypeError: def __exit__(self, exc_type, exc_val, tb): self.exception = exc_val + if issubclass(exc_type, self.expected): + return True nose.tools.assert_equal(exc_type, self.expected) # if you get to this line, the last assertion must have passed # suppress the propagation of this exception From 55f39265dd6b07dadeb269546645bc0ed6f7412b Mon Sep 17 00:00:00 2001 From: Jesse Szwedko Date: Wed, 18 Jan 2017 19:55:22 -0800 Subject: [PATCH 028/213] Upgrade to boto 2.45 (#803) Adds support for additional regions: * ca-central-1 * eu-west-2 Signed-off-by: Andrew Harris --- .travis.yml | 4 ++-- moto/ec2/models.py | 2 ++ tests/test_ec2/test_availability_zones_and_regions.py | 4 ++-- 3 files changed, 6 insertions(+), 4 deletions(-) diff --git a/.travis.yml b/.travis.yml index 117370bc9..35506f2dc 100644 --- a/.travis.yml +++ b/.travis.yml @@ -5,11 +5,11 @@ python: - 2.7 env: matrix: - - BOTO_VERSION=2.43.0 + - BOTO_VERSION=2.45.0 matrix: include: - python: "3.3" - env: BOTO_VERSION=2.43.0 + env: BOTO_VERSION=2.45.0 install: - travis_retry pip install boto==$BOTO_VERSION - travis_retry pip install boto3 diff --git a/moto/ec2/models.py b/moto/ec2/models.py index 5d220a0e2..25befabc0 100755 --- a/moto/ec2/models.py +++ b/moto/ec2/models.py @@ -1092,9 +1092,11 @@ class RegionsAndZonesBackend(object): Region("ap-south-1", "ec2.ap-south-1.amazonaws.com"), Region("ap-southeast-1", "ec2.ap-southeast-1.amazonaws.com"), Region("ap-southeast-2", "ec2.ap-southeast-2.amazonaws.com"), + Region("ca-central-1", "ec2.ca-central-1.amazonaws.com.cn"), Region("cn-north-1", "ec2.cn-north-1.amazonaws.com.cn"), Region("eu-central-1", "ec2.eu-central-1.amazonaws.com"), Region("eu-west-1", "ec2.eu-west-1.amazonaws.com"), + Region("eu-west-2", "ec2.eu-west-2.amazonaws.com"), Region("sa-east-1", "ec2.sa-east-1.amazonaws.com"), Region("us-east-1", "ec2.us-east-1.amazonaws.com"), Region("us-east-2", "ec2.us-east-2.amazonaws.com"), diff --git a/tests/test_ec2/test_availability_zones_and_regions.py b/tests/test_ec2/test_availability_zones_and_regions.py index 2ab8b9994..88453e10b 100644 --- a/tests/test_ec2/test_availability_zones_and_regions.py +++ b/tests/test_ec2/test_availability_zones_and_regions.py @@ -11,7 +11,7 @@ from moto import mock_ec2 def test_describe_regions(): conn = boto.connect_ec2('the_key', 'the_secret') regions = conn.get_all_regions() - regions.should.have.length_of(14) + regions.should.have.length_of(16) for region in regions: region.endpoint.should.contain(region.name) @@ -32,7 +32,7 @@ def test_availability_zones(): def test_boto3_describe_regions(): ec2 = boto3.client('ec2', 'us-east-1') resp = ec2.describe_regions() - resp['Regions'].should.have.length_of(14) + resp['Regions'].should.have.length_of(16) for rec in resp['Regions']: rec['Endpoint'].should.contain(rec['RegionName']) From f68b2963db4ac7b8f0943d8b9ea101fdcbd451a5 Mon Sep 17 00:00:00 2001 From: Jesse Szwedko Date: Wed, 18 Jan 2017 19:59:04 -0800 Subject: [PATCH 029/213] sts: Implement get_caller_identity (#806) Return a canned response Signed-off-by: Andrew Harris --- moto/sts/responses.py | 15 +++++++++++++++ tests/test_sts/test_server.py | 11 +++++++++++ tests/test_sts/test_sts.py | 9 +++++++++ 3 files changed, 35 insertions(+) diff --git a/moto/sts/responses.py b/moto/sts/responses.py index 193085623..d721bfaaa 100644 --- a/moto/sts/responses.py +++ b/moto/sts/responses.py @@ -39,6 +39,9 @@ class TokenResponse(BaseResponse): template = self.response_template(ASSUME_ROLE_RESPONSE) return template.render(role=role) + def get_caller_identity(self): + template = self.response_template(GET_CALLER_IDENTITY_RESPONSE) + return template.render() GET_SESSION_TOKEN_RESPONSE = """ @@ -95,3 +98,15 @@ ASSUME_ROLE_RESPONSE = """ + + arn:aws:sts::123456789012:user/moto + AKIAIOSFODNN7EXAMPLE + 123456789012 + + + c6104cbe-af31-11e0-8154-cbc7ccf896c7 + + +""" diff --git a/tests/test_sts/test_server.py b/tests/test_sts/test_server.py index fdc6879e9..40260a49f 100644 --- a/tests/test_sts/test_server.py +++ b/tests/test_sts/test_server.py @@ -26,3 +26,14 @@ def test_sts_get_federation_token(): res.status_code.should.equal(200) res.data.should.contain(b"SessionToken") res.data.should.contain(b"AccessKeyId") + + +def test_sts_get_caller_identity(): + backend = server.create_backend_app("sts") + test_client = backend.test_client() + + res = test_client.get('/?Action=GetCallerIdentity') + res.status_code.should.equal(200) + res.data.should.contain(b"Arn") + res.data.should.contain(b"UserId") + res.data.should.contain(b"Account") diff --git a/tests/test_sts/test_sts.py b/tests/test_sts/test_sts.py index 067ebcce2..9bd02ce12 100644 --- a/tests/test_sts/test_sts.py +++ b/tests/test_sts/test_sts.py @@ -2,6 +2,7 @@ from __future__ import unicode_literals import json import boto +import boto3 from freezegun import freeze_time import sure # noqa @@ -64,3 +65,11 @@ def test_assume_role(): role.user.arn.should.equal("arn:aws:iam::123456789012:role/test-role") role.user.assume_role_id.should.contain("session-name") + +@mock_sts +def test_get_caller_identity(): + identity = boto3.client("sts").get_caller_identity() + + identity['Arn'].should.equal('arn:aws:sts::123456789012:user/moto') + identity['UserId'].should.equal('AKIAIOSFODNN7EXAMPLE') + identity['Account'].should.equal('123456789012') From e1260bca06b8ac4cf8ecf72b0f4d3234b166b2b6 Mon Sep 17 00:00:00 2001 From: Jesse Szwedko Date: Wed, 18 Jan 2017 19:59:47 -0800 Subject: [PATCH 030/213] cloudformation: Support RoleARN for create and update stack (#807) Signed-off-by: Andrew Harris --- moto/cloudformation/models.py | 13 ++++++++----- moto/cloudformation/responses.py | 7 +++++++ .../test_cloudformation_stack_crud_boto3.py | 15 +++++++++++++++ 3 files changed, 30 insertions(+), 5 deletions(-) diff --git a/moto/cloudformation/models.py b/moto/cloudformation/models.py index d9d09410d..1f091251b 100644 --- a/moto/cloudformation/models.py +++ b/moto/cloudformation/models.py @@ -11,7 +11,7 @@ from .exceptions import ValidationError class FakeStack(object): - def __init__(self, stack_id, name, template, parameters, region_name, notification_arns=None, tags=None): + def __init__(self, stack_id, name, template, parameters, region_name, notification_arns=None, tags=None, role_arn=None): self.stack_id = stack_id self.name = name self.template = template @@ -19,6 +19,7 @@ class FakeStack(object): self.parameters = parameters self.region_name = region_name self.notification_arns = notification_arns if notification_arns else [] + self.role_arn = role_arn self.tags = tags if tags else {} self.events = [] self._add_stack_event("CREATE_IN_PROGRESS", resource_status_reason="User Initiated") @@ -77,13 +78,14 @@ class FakeStack(object): def stack_outputs(self): return self.output_map.values() - def update(self, template): + def update(self, template, role_arn=None): self._add_stack_event("UPDATE_IN_PROGRESS", resource_status_reason="User Initiated") self.template = template self.resource_map.update(json.loads(template)) self.output_map = self._create_output_map() self._add_stack_event("UPDATE_COMPLETE") self.status = "UPDATE_COMPLETE" + self.role_arn = role_arn def delete(self): self._add_stack_event("DELETE_IN_PROGRESS", resource_status_reason="User Initiated") @@ -111,7 +113,7 @@ class CloudFormationBackend(BaseBackend): self.stacks = {} self.deleted_stacks = {} - def create_stack(self, name, template, parameters, region_name, notification_arns=None, tags=None): + def create_stack(self, name, template, parameters, region_name, notification_arns=None, tags=None, role_arn=None): stack_id = generate_stack_id(name) new_stack = FakeStack( stack_id=stack_id, @@ -121,6 +123,7 @@ class CloudFormationBackend(BaseBackend): region_name=region_name, notification_arns=notification_arns, tags=tags, + role_arn=role_arn, ) self.stacks[stack_id] = new_stack return new_stack @@ -154,9 +157,9 @@ class CloudFormationBackend(BaseBackend): if stack.name == name_or_stack_id: return stack - def update_stack(self, name, template): + def update_stack(self, name, template, role_arn=None): stack = self.get_stack(name) - stack.update(template) + stack.update(template, role_arn) return stack def list_stack_resources(self, stack_name_or_id): diff --git a/moto/cloudformation/responses.py b/moto/cloudformation/responses.py index 9cab62a63..d16b3560c 100644 --- a/moto/cloudformation/responses.py +++ b/moto/cloudformation/responses.py @@ -27,6 +27,7 @@ class CloudFormationResponse(BaseResponse): stack_name = self._get_param('StackName') stack_body = self._get_param('TemplateBody') template_url = self._get_param('TemplateURL') + role_arn = self._get_param('RoleARN') parameters_list = self._get_list_prefix("Parameters.member") tags = dict((item['key'], item['value']) for item in self._get_list_prefix("Tags.member")) @@ -47,6 +48,7 @@ class CloudFormationResponse(BaseResponse): region_name=self.region, notification_arns=stack_notification_arns, tags=tags, + role_arn=role_arn, ) if self.request_json: return json.dumps({ @@ -131,6 +133,7 @@ class CloudFormationResponse(BaseResponse): def update_stack(self): stack_name = self._get_param('StackName') + role_arn = self._get_param('RoleARN') if self._get_param('UsePreviousTemplate') == "true": stack_body = self.cloudformation_backend.get_stack(stack_name).template else: @@ -143,6 +146,7 @@ class CloudFormationResponse(BaseResponse): stack = self.cloudformation_backend.update_stack( name=stack_name, template=stack_body, + role_arn=role_arn, ) if self.request_json: stack_body = { @@ -227,6 +231,9 @@ DESCRIBE_STACKS_TEMPLATE = """ {% endfor %} + {% if stack.role_arn %} + {{ stack.role_arn }} + {% endif %} {% for tag_key, tag_value in stack.tags.items() %} diff --git a/tests/test_cloudformation/test_cloudformation_stack_crud_boto3.py b/tests/test_cloudformation/test_cloudformation_stack_crud_boto3.py index 4197d2628..97c3e864a 100644 --- a/tests/test_cloudformation/test_cloudformation_stack_crud_boto3.py +++ b/tests/test_cloudformation/test_cloudformation_stack_crud_boto3.py @@ -104,6 +104,19 @@ def test_create_stack_with_notification_arn(): 'arn:aws:sns:us-east-1:123456789012:fake-queue') +@mock_cloudformation +def test_create_stack_with_role_arn(): + cf = boto3.resource('cloudformation', region_name='us-east-1') + cf.create_stack( + StackName="test_stack_with_notifications", + TemplateBody=dummy_template_json, + RoleARN='arn:aws:iam::123456789012:role/moto', + ) + + stack = list(cf.stacks.all())[0] + stack.role_arn.should.equal('arn:aws:iam::123456789012:role/moto') + + @mock_cloudformation @mock_s3 def test_create_stack_from_s3_url(): @@ -240,6 +253,7 @@ def test_describe_updated_stack(): cf_conn.update_stack( StackName="test_stack", + RoleARN='arn:aws:iam::123456789012:role/moto', TemplateBody=dummy_update_template_json) stack = cf_conn.describe_stacks(StackName="test_stack")['Stacks'][0] @@ -248,6 +262,7 @@ def test_describe_updated_stack(): stack_by_id['StackId'].should.equal(stack['StackId']) stack_by_id['StackName'].should.equal("test_stack") stack_by_id['StackStatus'].should.equal("UPDATE_COMPLETE") + stack_by_id['RoleARN'].should.equal('arn:aws:iam::123456789012:role/moto') @mock_cloudformation From 51129f6ef3f25c9f53fafdbf86fd2f586c5f13e0 Mon Sep 17 00:00:00 2001 From: Michael Nussbaum Date: Wed, 18 Jan 2017 20:02:04 -0800 Subject: [PATCH 031/213] Allow ELB policies to be set via Cloudformation. (#809) Also more closely mirror AWS by not returning ELB BackendServerDescriptions that don't have policies. Signed-off-by: Michael Nussbaum --- moto/elb/models.py | 24 ++++++++++++++++++++++-- moto/elb/responses.py | 16 +++++++--------- 2 files changed, 29 insertions(+), 11 deletions(-) diff --git a/moto/elb/models.py b/moto/elb/models.py index 28a791c2d..055b08e4d 100644 --- a/moto/elb/models.py +++ b/moto/elb/models.py @@ -8,7 +8,10 @@ from boto.ec2.elb.attributes import ( AccessLogAttribute, CrossZoneLoadBalancingAttribute, ) -from boto.ec2.elb.policies import Policies +from boto.ec2.elb.policies import ( + Policies, + OtherPolicy, +) from moto.core import BaseBackend from moto.ec2.models import ec2_backends from .exceptions import ( @@ -104,6 +107,21 @@ class FakeLoadBalancer(object): for instance_id in instance_ids: elb_backend.register_instances(new_elb.name, [instance_id]) + policies = properties.get('Policies', []) + port_policies = {} + for policy in policies: + policy_name = policy["PolicyName"] + other_policy = OtherPolicy() + other_policy.policy_name = policy_name + elb_backend.create_lb_other_policy(new_elb.name, other_policy) + for port in policy.get("InstancePorts", []): + policies_for_port = port_policies.get(port, set()) + policies_for_port.add(policy_name) + port_policies[port] = policies_for_port + + for port, policies in port_policies.items(): + elb_backend.set_load_balancer_policies_of_backend_server(new_elb.name, port, list(policies)) + health_check = properties.get('HealthCheck') if health_check: elb_backend.configure_health_check( @@ -307,7 +325,9 @@ class ELBBackend(BaseBackend): def create_lb_other_policy(self, load_balancer_name, other_policy): load_balancer = self.get_load_balancer(load_balancer_name) - load_balancer.policies.other_policies.append(other_policy) + if other_policy.policy_name not in [p.policy_name for p in load_balancer.policies.other_policies]: + load_balancer.policies.other_policies.append(other_policy) + return load_balancer def create_app_cookie_stickiness_policy(self, load_balancer_name, policy): diff --git a/moto/elb/responses.py b/moto/elb/responses.py index 4cdf45f5a..cba98e4e0 100644 --- a/moto/elb/responses.py +++ b/moto/elb/responses.py @@ -445,18 +445,16 @@ DESCRIBE_LOAD_BALANCERS_TEMPLATE = """ limit: + break + continuation_index += 1 + result_keys = result_keys[continuation_index:] + + if len(result_keys) > max_keys: + is_truncated = 'true' + result_keys = result_keys[:max_keys] + next_continuation_token = result_keys[-1].name + else: + is_truncated = 'false' + next_continuation_token = None + + return template.render( + bucket=bucket, + prefix=prefix or '', + delimiter=delimiter, + result_keys=result_keys, + result_folders=result_folders, + fetch_owner=fetch_owner, + max_keys=max_keys, + is_truncated=is_truncated, + next_continuation_token=next_continuation_token, + start_after=None if continuation_token else start_after + ) + def _bucket_response_put(self, request, body, region_name, bucket_name, querystring, headers): if 'versioning' in querystring: ver = re.search('([A-Za-z]+)', body) @@ -636,6 +681,46 @@ S3_BUCKET_GET_RESPONSE = """ {% endif %} """ +S3_BUCKET_GET_RESPONSE_V2 = """ + + {{ bucket.name }} + {{ prefix }} + {{ max_keys }} + {{ result_keys | length }} +{% if delimiter %} + {{ delimiter }} +{% endif %} + {{ is_truncated }} +{% if next_continuation_token %} + {{ next_continuation_token }} +{% endif %} +{% if start_after %} + {{ start_after }} +{% endif %} + {% for key in result_keys %} + + {{ key.name }} + {{ key.last_modified_ISO8601 }} + {{ key.etag }} + {{ key.size }} + {{ key.storage_class }} + {% if fetch_owner %} + + 75aa57f09aa0c8caeab4f8c24e99d10f8e7faeebf76c078efc7c6caea54ba06a + webfile + + {% endif %} + + {% endfor %} + {% if delimiter %} + {% for folder in result_folders %} + + {{ folder }} + + {% endfor %} + {% endif %} + """ + S3_BUCKET_CREATE_RESPONSE = """ {{ bucket.name }} diff --git a/tests/test_s3/test_s3.py b/tests/test_s3/test_s3.py index 0d8f7cb49..4990d7324 100644 --- a/tests/test_s3/test_s3.py +++ b/tests/test_s3/test_s3.py @@ -995,8 +995,129 @@ def test_boto3_list_keys_xml_escaped(): s3.create_bucket(Bucket='mybucket') key_name = 'Q&A.txt' s3.put_object(Bucket='mybucket', Key=key_name, Body=b'is awesome') + resp = s3.list_objects_v2(Bucket='mybucket', Prefix=key_name) + assert resp['Contents'][0]['Key'] == key_name + assert resp['KeyCount'] == 1 + assert resp['MaxKeys'] == 1000 + assert resp['Prefix'] == key_name + assert resp['IsTruncated'] == False + assert 'Delimiter' not in resp + assert 'StartAfter' not in resp + assert 'NextContinuationToken' not in resp + assert 'Owner' not in resp['Contents'][0] + + +@mock_s3 +def test_boto3_list_objects_v2_truncated_response(): + s3 = boto3.client('s3', region_name='us-east-1') + s3.create_bucket(Bucket='mybucket') + s3.put_object(Bucket='mybucket', Key='one', Body=b'1') + s3.put_object(Bucket='mybucket', Key='two', Body=b'22') + s3.put_object(Bucket='mybucket', Key='three', Body=b'333') + + # First list + resp = s3.list_objects_v2(Bucket='mybucket', MaxKeys=1) + listed_object = resp['Contents'][0] + + assert listed_object['Key'] == 'one' + assert resp['MaxKeys'] == 1 + assert resp['Prefix'] == '' + assert resp['KeyCount'] == 1 + assert resp['IsTruncated'] == True + assert 'Delimiter' not in resp + assert 'StartAfter' not in resp + assert 'Owner' not in listed_object # owner info was not requested + + next_token = resp['NextContinuationToken'] + + + # Second list + resp = s3.list_objects_v2(Bucket='mybucket', MaxKeys=1, ContinuationToken=next_token) + listed_object = resp['Contents'][0] + + assert listed_object['Key'] == 'three' + assert resp['MaxKeys'] == 1 + assert resp['Prefix'] == '' + assert resp['KeyCount'] == 1 + assert resp['IsTruncated'] == True + assert 'Delimiter' not in resp + assert 'StartAfter' not in resp + assert 'Owner' not in listed_object + + next_token = resp['NextContinuationToken'] + + + # Third list + resp = s3.list_objects_v2(Bucket='mybucket', MaxKeys=1, ContinuationToken=next_token) + listed_object = resp['Contents'][0] + + assert listed_object['Key'] == 'two' + assert resp['MaxKeys'] == 1 + assert resp['Prefix'] == '' + assert resp['KeyCount'] == 1 + assert resp['IsTruncated'] == False + assert 'Delimiter' not in resp + assert 'Owner' not in listed_object + assert 'StartAfter' not in resp + assert 'NextContinuationToken' not in resp + + +@mock_s3 +def test_boto3_list_objects_v2_truncated_response_start_after(): + s3 = boto3.client('s3', region_name='us-east-1') + s3.create_bucket(Bucket='mybucket') + s3.put_object(Bucket='mybucket', Key='one', Body=b'1') + s3.put_object(Bucket='mybucket', Key='two', Body=b'22') + s3.put_object(Bucket='mybucket', Key='three', Body=b'333') + + # First list + resp = s3.list_objects_v2(Bucket='mybucket', MaxKeys=1, StartAfter='one') + listed_object = resp['Contents'][0] + + assert listed_object['Key'] == 'three' + assert resp['MaxKeys'] == 1 + assert resp['Prefix'] == '' + assert resp['KeyCount'] == 1 + assert resp['IsTruncated'] == True + assert resp['StartAfter'] == 'one' + assert 'Delimiter' not in resp + assert 'Owner' not in listed_object + + next_token = resp['NextContinuationToken'] + + # Second list + # The ContinuationToken must take precedence over StartAfter. + resp = s3.list_objects_v2(Bucket='mybucket', MaxKeys=1, StartAfter='one', + ContinuationToken=next_token) + listed_object = resp['Contents'][0] + + assert listed_object['Key'] == 'two' + assert resp['MaxKeys'] == 1 + assert resp['Prefix'] == '' + assert resp['KeyCount'] == 1 + assert resp['IsTruncated'] == False + # When ContinuationToken is given, StartAfter is ignored. This also means + # AWS does not return it in the response. + assert 'StartAfter' not in resp + assert 'Delimiter' not in resp + assert 'Owner' not in listed_object + + +@mock_s3 +def test_boto3_list_objects_v2_fetch_owner(): + s3 = boto3.client('s3', region_name='us-east-1') + s3.create_bucket(Bucket='mybucket') + s3.put_object(Bucket='mybucket', Key='one', Body=b'11') + + resp = s3.list_objects_v2(Bucket='mybucket', FetchOwner=True) + owner = resp['Contents'][0]['Owner'] + + assert 'ID' in owner + assert 'DisplayName' in owner + assert len(owner.keys()) == 2 + @mock_s3 def test_boto3_bucket_create(): From 8fc1ad03bd3e735c578ccc67effb5d0d0cff37f2 Mon Sep 17 00:00:00 2001 From: Jeffrey Gelens Date: Thu, 9 Feb 2017 03:22:14 +0100 Subject: [PATCH 036/213] Reload the server on a file change (#817) * Added simple server reload support * updated help text --- moto/server.py | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/moto/server.py b/moto/server.py index 5ee12362e..1780083d8 100644 --- a/moto/server.py +++ b/moto/server.py @@ -137,6 +137,12 @@ def main(argv=sys.argv[1:]): '-p', '--port', type=int, help='Port number to use for connection', default=5000) + parser.add_argument( + '-r', '--reload', + action='store_true', + help='Reload server on a file change', + default=False + ) args = parser.parse_args(argv) @@ -144,7 +150,8 @@ def main(argv=sys.argv[1:]): main_app = DomainDispatcherApplication(create_backend_app, service=args.service) main_app.debug = True - run_simple(args.host, args.port, main_app, threaded=True) + run_simple(args.host, args.port, main_app, threaded=True, use_reloader=args.reload) + if __name__ == '__main__': main() From 1045dca7b225f879bbc3804297d0300725aece71 Mon Sep 17 00:00:00 2001 From: Jason DeTiberus Date: Wed, 8 Feb 2017 21:23:49 -0500 Subject: [PATCH 037/213] make instanceTenancy configurable for VPCs (#819) * make instanceTenancy configurable for VPCs * fix issue with setting tenenancy --- moto/ec2/models.py | 10 +++++++--- moto/ec2/responses/vpcs.py | 7 ++++--- tests/test_ec2/test_vpcs.py | 17 +++++++++++++++++ 3 files changed, 28 insertions(+), 6 deletions(-) diff --git a/moto/ec2/models.py b/moto/ec2/models.py index 25befabc0..00e0f4960 100755 --- a/moto/ec2/models.py +++ b/moto/ec2/models.py @@ -1812,12 +1812,13 @@ class EBSBackend(object): class VPC(TaggedEC2Resource): - def __init__(self, ec2_backend, vpc_id, cidr_block, is_default): + def __init__(self, ec2_backend, vpc_id, cidr_block, is_default, instance_tenancy='default'): self.ec2_backend = ec2_backend self.id = vpc_id self.cidr_block = cidr_block self.dhcp_options = None self.state = 'available' + self.instance_tenancy = instance_tenancy self.is_default = 'true' if is_default else 'false' self.enable_dns_support = 'true' # This attribute is set to 'true' only for default VPCs @@ -1831,6 +1832,7 @@ class VPC(TaggedEC2Resource): ec2_backend = ec2_backends[region_name] vpc = ec2_backend.create_vpc( cidr_block=properties['CidrBlock'], + instance_tenancy=properties.get('InstanceTenancy', 'default') ) return vpc @@ -1843,6 +1845,8 @@ class VPC(TaggedEC2Resource): return self.id elif filter_name in ('cidr', 'cidr-block', 'cidrBlock'): return self.cidr_block + elif filter_name in ('instance_tenancy', 'InstanceTenancy'): + return self.instance_tenancy elif filter_name in ('is-default', 'isDefault'): return self.is_default elif filter_name == 'state': @@ -1866,9 +1870,9 @@ class VPCBackend(object): self.vpcs = {} super(VPCBackend, self).__init__() - def create_vpc(self, cidr_block): + def create_vpc(self, cidr_block, instance_tenancy='default'): vpc_id = random_vpc_id() - vpc = VPC(self, vpc_id, cidr_block, len(self.vpcs) == 0) + vpc = VPC(self, vpc_id, cidr_block, len(self.vpcs) == 0, instance_tenancy) self.vpcs[vpc_id] = vpc # AWS creates a default main route table and security group. diff --git a/moto/ec2/responses/vpcs.py b/moto/ec2/responses/vpcs.py index 58c5e80dd..3d2a99894 100644 --- a/moto/ec2/responses/vpcs.py +++ b/moto/ec2/responses/vpcs.py @@ -7,7 +7,8 @@ from moto.ec2.utils import filters_from_querystring, vpc_ids_from_querystring class VPCs(BaseResponse): def create_vpc(self): cidr_block = self.querystring.get('CidrBlock')[0] - vpc = self.ec2_backend.create_vpc(cidr_block) + instance_tenancy = self.querystring.get('InstanceTenancy', ['default'])[0] + vpc = self.ec2_backend.create_vpc(cidr_block, instance_tenancy) template = self.response_template(CREATE_VPC_RESPONSE) return template.render(vpc=vpc) @@ -51,7 +52,7 @@ CREATE_VPC_RESPONSE = """ pending {{ vpc.cidr_block }} {% if vpc.dhcp_options %}{{ vpc.dhcp_options.id }}{% else %}dopt-1a2b3c4d2{% endif %} - default + {{ vpc.instance_tenancy }} {% for tag in vpc.get_tags() %} @@ -75,7 +76,7 @@ DESCRIBE_VPCS_RESPONSE = """ {{ vpc.state }} {{ vpc.cidr_block }} {% if vpc.dhcp_options %}{{ vpc.dhcp_options.id }}{% else %}dopt-7a8b9c2d{% endif %} - default + {{ vpc.instance_tenancy }} {{ vpc.is_default }} {% for tag in vpc.get_tags() %} diff --git a/tests/test_ec2/test_vpcs.py b/tests/test_ec2/test_vpcs.py index def2700e3..513238001 100644 --- a/tests/test_ec2/test_vpcs.py +++ b/tests/test_ec2/test_vpcs.py @@ -246,6 +246,7 @@ def test_default_vpc(): # Create the default VPC default_vpc = list(ec2.vpcs.all())[0] default_vpc.cidr_block.should.equal('172.31.0.0/16') + default_vpc.instance_tenancy.should.equal('default') default_vpc.reload() default_vpc.is_default.should.be.ok @@ -271,6 +272,9 @@ def test_non_default_vpc(): vpc.reload() vpc.is_default.shouldnt.be.ok + # Test default instance_tenancy + vpc.instance_tenancy.should.equal('default') + # Test default values for VPC attributes response = vpc.describe_attribute(Attribute='enableDnsSupport') attr = response.get('EnableDnsSupport') @@ -280,6 +284,19 @@ def test_non_default_vpc(): attr = response.get('EnableDnsHostnames') attr.get('Value').shouldnt.be.ok +@mock_ec2 +def test_vpc_dedicated_tenancy(): + ec2 = boto3.resource('ec2', region_name='us-west-1') + + # Create the default VPC + ec2.create_vpc(CidrBlock='172.31.0.0/16') + + # Create the non default VPC + vpc = ec2.create_vpc(CidrBlock='10.0.0.0/16', InstanceTenancy='dedicated') + vpc.reload() + vpc.is_default.shouldnt.be.ok + + vpc.instance_tenancy.should.equal('dedicated') @mock_ec2 def test_vpc_modify_enable_dns_support(): From 012dd497f2616cb20e04828ffd504ccaa2dd6268 Mon Sep 17 00:00:00 2001 From: David Wilcox Date: Thu, 9 Feb 2017 13:29:37 +1100 Subject: [PATCH 038/213] make get_all_security_groups filter AND match group ids, not OR them (#822) --- moto/ec2/models.py | 10 ++++++---- tests/test_ec2/test_security_groups.py | 13 +++++++++++++ 2 files changed, 19 insertions(+), 4 deletions(-) diff --git a/moto/ec2/models.py b/moto/ec2/models.py index 00e0f4960..30769fd7e 100755 --- a/moto/ec2/models.py +++ b/moto/ec2/models.py @@ -1309,10 +1309,12 @@ class SecurityGroupBackend(object): if group_ids or groupnames or filters: for group in all_groups: - if ((group_ids and group.id in group_ids) or - (groupnames and group.name in groupnames) or - (filters and group.matches_filters(filters))): - groups.append(group) + if ((group_ids and not group.id in group_ids) or + (groupnames and not group.name in groupnames)): + continue + if filters and not group.matches_filters(filters): + continue + groups.append(group) else: groups = all_groups diff --git a/tests/test_ec2/test_security_groups.py b/tests/test_ec2/test_security_groups.py index 83dad6f0c..19f43862d 100644 --- a/tests/test_ec2/test_security_groups.py +++ b/tests/test_ec2/test_security_groups.py @@ -610,3 +610,16 @@ def test_authorize_and_revoke_in_bulk(): sg01.ip_permissions_egress.should.have.length_of(1) for ip_permission in expected_ip_permissions: sg01.ip_permissions_egress.shouldnt.contain(ip_permission) + +@mock_ec2 +def test_get_all_security_groups_filter_with_same_vpc_id(): + conn = boto.connect_ec2('the_key', 'the_secret') + vpc_id = 'vpc-5300000c' + security_group = conn.create_security_group('test1', 'test1', vpc_id=vpc_id) + security_group2 = conn.create_security_group('test2', 'test2', vpc_id=vpc_id) + + security_group.vpc_id.should.equal(vpc_id) + security_group2.vpc_id.should.equal(vpc_id) + + security_groups = conn.get_all_security_groups(group_ids=[security_group.id], filters={'vpc-id': [vpc_id]}) + security_groups.should.have.length_of(1) From fd7b8e7c8893379db6b86f15ec9c37a8c5cb6679 Mon Sep 17 00:00:00 2001 From: Joseph Lawson Date: Wed, 8 Feb 2017 21:30:27 -0500 Subject: [PATCH 039/213] install server version of moto for Dockerfile (#824) --- Dockerfile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Dockerfile b/Dockerfile index 16d9d7d91..72657903e 100644 --- a/Dockerfile +++ b/Dockerfile @@ -4,7 +4,7 @@ ADD . /moto/ ENV PYTHONUNBUFFERED 1 WORKDIR /moto/ -RUN python setup.py install +RUN pip install ".[server]" CMD ["moto_server"] From 53fbd7dca02f294afb0ffaeac92e793eb8021a95 Mon Sep 17 00:00:00 2001 From: Jack Danger Date: Thu, 9 Feb 2017 19:36:24 -0800 Subject: [PATCH 040/213] KMS encryption under Python 3 (#826) This upgrades the KMS encrypt and decrypt endpoints to work under both Python 2 and 3 --- moto/kms/responses.py | 8 ++++++-- tests/test_kms/test_kms.py | 32 ++++++++++++++------------------ 2 files changed, 20 insertions(+), 20 deletions(-) diff --git a/moto/kms/responses.py b/moto/kms/responses.py index fb5c8590f..bc928f6f3 100644 --- a/moto/kms/responses.py +++ b/moto/kms/responses.py @@ -3,6 +3,7 @@ from __future__ import unicode_literals import base64 import json import re +import six from boto.exception import JSONResponseError from boto.kms.exceptions import AlreadyExistsException, NotFoundException @@ -220,11 +221,14 @@ class KmsResponse(BaseResponse): decode it in decrypt(). """ value = self.parameters.get("Plaintext") - return json.dumps({"CiphertextBlob": base64.b64encode(value).encode("utf-8")}) + if isinstance(value, six.text_type): + value = value.encode('utf-8') + return json.dumps({"CiphertextBlob": base64.b64encode(value).decode("utf-8")}) def decrypt(self): value = self.parameters.get("CiphertextBlob") - return json.dumps({"Plaintext": base64.b64decode(value).encode("utf-8")}) + print("value 3", value) + return json.dumps({"Plaintext": base64.b64decode(value).decode("utf-8")}) def _assert_valid_key_id(key_id): diff --git a/tests/test_kms/test_kms.py b/tests/test_kms/test_kms.py index 9ec4ffce4..04e6fbb4b 100644 --- a/tests/test_kms/test_kms.py +++ b/tests/test_kms/test_kms.py @@ -1,6 +1,5 @@ from __future__ import unicode_literals import re -import six import boto.kms from boto.exception import JSONResponseError @@ -137,25 +136,22 @@ def test_disable_key_rotation(): conn.get_key_rotation_status(key_id)['KeyRotationEnabled'].should.equal(False) -# Scoping encryption/decryption to only Python 2 because our test suite -# hardcodes a dependency on boto version 2.36.0 which is not compatible with -# Python 3 (2.40+, however, passes these tests). -if six.PY2: - @mock_kms - def test_encrypt(): - """ - Using base64 encoding to merely test that the endpoint was called - """ - conn = boto.kms.connect_to_region("us-west-2") - response = conn.encrypt('key_id', 'encryptme'.encode('utf-8')) - response['CiphertextBlob'].should.equal('ZW5jcnlwdG1l') +@mock_kms +def test_encrypt(): + """ + test_encrypt + Using base64 encoding to merely test that the endpoint was called + """ + conn = boto.kms.connect_to_region("us-west-2") + response = conn.encrypt('key_id', 'encryptme'.encode('utf-8')) + response['CiphertextBlob'].should.equal(b'ZW5jcnlwdG1l') - @mock_kms - def test_decrypt(): - conn = boto.kms.connect_to_region('us-west-2') - response = conn.decrypt('ZW5jcnlwdG1l'.encode('utf-8')) - response['Plaintext'].should.equal('encryptme') +@mock_kms +def test_decrypt(): + conn = boto.kms.connect_to_region('us-west-2') + response = conn.decrypt('ZW5jcnlwdG1l'.encode('utf-8')) + response['Plaintext'].should.equal(b'encryptme') @mock_kms From 2d03182ae2aa6d6f1e0204d133c48a58e343fb9f Mon Sep 17 00:00:00 2001 From: Steve Pulec Date: Sat, 11 Feb 2017 19:41:04 -0500 Subject: [PATCH 041/213] Migrate some sqs tests to boto3. --- moto/ec2/responses/ip_addresses.py | 4 +- tests/test_sqs/test_sqs.py | 183 +++++++++++++---------------- 2 files changed, 84 insertions(+), 103 deletions(-) diff --git a/moto/ec2/responses/ip_addresses.py b/moto/ec2/responses/ip_addresses.py index b57be64af..fd58741e2 100644 --- a/moto/ec2/responses/ip_addresses.py +++ b/moto/ec2/responses/ip_addresses.py @@ -1,5 +1,7 @@ from __future__ import unicode_literals -from moto.core.responses import BaseResponse, JSONResponseError + +from boto.exception import JSONResponseError +from moto.core.responses import BaseResponse class IPAddresses(BaseResponse): diff --git a/tests/test_sqs/test_sqs.py b/tests/test_sqs/test_sqs.py index 8972b7283..525f7bf89 100644 --- a/tests/test_sqs/test_sqs.py +++ b/tests/test_sqs/test_sqs.py @@ -1,4 +1,5 @@ from __future__ import unicode_literals + import boto import boto3 import botocore.exceptions @@ -11,102 +12,130 @@ import time from moto import mock_sqs from tests.helpers import requires_boto_gte +import tests.backport_assert_raises # noqa +from nose.tools import assert_raises + +sqs = boto3.resource('sqs') @mock_sqs def test_create_queue(): - conn = boto.connect_sqs('the_key', 'the_secret') - conn.create_queue("test-queue", visibility_timeout=60) + sqs = boto3.resource('sqs', region_name='us-east-1') + new_queue = sqs.create_queue(QueueName='test-queue') + new_queue.should_not.be.none + new_queue.should.have.property('url').should.contain('test-queue') - all_queues = conn.get_all_queues() - all_queues[0].name.should.equal("test-queue") + queue = sqs.get_queue_by_name(QueueName='test-queue') + queue.attributes.get('QueueArn').should_not.be.none + queue.attributes.get('QueueArn').split(':')[-1].should.equal('test-queue') + queue.attributes.get('QueueArn').split(':')[3].should.equal('us-east-1') + queue.attributes.get('VisibilityTimeout').should_not.be.none + queue.attributes.get('VisibilityTimeout').should.equal('30') - all_queues[0].get_timeout().should.equal(60) + +@mock_sqs +def test_get_inexistent_queue(): + sqs = boto3.resource('sqs', region_name='us-east-1') + sqs.get_queue_by_name.when.called_with(QueueName='nonexisting-queue').should.throw(botocore.exceptions.ClientError) + + +@mock_sqs +def test_message_send(): + sqs = boto3.resource('sqs', region_name='us-east-1') + queue = sqs.create_queue(QueueName="blah") + msg = queue.send_message(MessageBody="derp") + + msg.get('MD5OfMessageBody').should.equal('58fd9edd83341c29f1aebba81c31e257') + msg.get('ResponseMetadata', {}).get('RequestId').should.equal('27daac76-34dd-47df-bd01-1f6e873584a0') + msg.get('MessageId').should_not.contain(' \n') + + messages = queue.receive_messages() + messages.should.have.length_of(1) + + +@mock_sqs +def test_set_queue_attributes(): + sqs = boto3.resource('sqs', region_name='us-east-1') + queue = sqs.create_queue(QueueName="blah") + + queue.attributes['VisibilityTimeout'].should.equal("30") + + queue.set_attributes(Attributes={"VisibilityTimeout": "45"}) + queue.attributes['VisibilityTimeout'].should.equal("45") @mock_sqs def test_create_queues_in_multiple_region(): - west1_conn = boto.sqs.connect_to_region("us-west-1") - west1_conn.create_queue("test-queue") + west1_conn = boto3.client('sqs', region_name='us-west-1') + west1_conn.create_queue(QueueName="blah") - west2_conn = boto.sqs.connect_to_region("us-west-2") - west2_conn.create_queue("test-queue") + west2_conn = boto3.client('sqs', region_name='us-west-2') + west2_conn.create_queue(QueueName="test-queue") - list(west1_conn.get_all_queues()).should.have.length_of(1) - list(west2_conn.get_all_queues()).should.have.length_of(1) + list(west1_conn.list_queues()['QueueUrls']).should.have.length_of(1) + list(west2_conn.list_queues()['QueueUrls']).should.have.length_of(1) - west1_conn.get_all_queues()[0].url.should.equal('http://sqs.us-west-1.amazonaws.com/123456789012/test-queue') - - -@mock_sqs -def test_get_queue(): - conn = boto.connect_sqs('the_key', 'the_secret') - conn.create_queue("test-queue", visibility_timeout=60) - - queue = conn.get_queue("test-queue") - queue.name.should.equal("test-queue") - queue.get_timeout().should.equal(60) - - nonexisting_queue = conn.get_queue("nonexisting_queue") - nonexisting_queue.should.be.none + west1_conn.list_queues()['QueueUrls'][0].should.equal('http://sqs.us-west-1.amazonaws.com/123456789012/blah') @mock_sqs def test_get_queue_with_prefix(): - conn = boto.connect_sqs('the_key', 'the_secret') - conn.create_queue("prefixa-queue") - conn.create_queue("prefixb-queue") - conn.create_queue("test-queue") + conn = boto3.client("sqs", region_name='us-west-1') + conn.create_queue(QueueName="prefixa-queue") + conn.create_queue(QueueName="prefixb-queue") + conn.create_queue(QueueName="test-queue") - conn.get_all_queues().should.have.length_of(3) + conn.list_queues()['QueueUrls'].should.have.length_of(3) - queue = conn.get_all_queues("test-") + queue = conn.list_queues(QueueNamePrefix="test-")['QueueUrls'] queue.should.have.length_of(1) - queue[0].name.should.equal("test-queue") + queue[0].should.equal("http://sqs.us-west-1.amazonaws.com/123456789012/test-queue") @mock_sqs def test_delete_queue(): - conn = boto.connect_sqs('the_key', 'the_secret') - queue = conn.create_queue("test-queue", visibility_timeout=60) + conn = boto3.client("sqs") + conn.create_queue(QueueName="test-queue", Attributes={"VisibilityTimeout": "60"}) + queue = sqs.Queue('test-queue') - conn.get_all_queues().should.have.length_of(1) + conn.list_queues()['QueueUrls'].should.have.length_of(1) queue.delete() - conn.get_all_queues().should.have.length_of(0) + conn.list_queues().get('QueueUrls').should.equal(None) - queue.delete.when.called_with().should.throw(SQSError) + with assert_raises(botocore.exceptions.ClientError): + queue.delete() @mock_sqs def test_set_queue_attribute(): - conn = boto.connect_sqs('the_key', 'the_secret') - conn.create_queue("test-queue", visibility_timeout=60) + conn = boto3.client("sqs") + conn.create_queue(QueueName="test-queue", Attributes={"VisibilityTimeout": '60'}) - queue = conn.get_all_queues()[0] - queue.get_timeout().should.equal(60) + queue = sqs.Queue("test-queue") + queue.attributes['VisibilityTimeout'].should.equal('60') - queue.set_attribute("VisibilityTimeout", 45) - queue = conn.get_all_queues()[0] - queue.get_timeout().should.equal(45) + queue.set_attributes(Attributes={"VisibilityTimeout": '45'}) + queue = sqs.Queue("test-queue") + queue.attributes['VisibilityTimeout'].should.equal('45') @mock_sqs def test_send_message(): - conn = boto.connect_sqs('the_key', 'the_secret') - queue = conn.create_queue("test-queue", visibility_timeout=60) - queue.set_message_class(RawMessage) + conn = boto3.client("sqs") + conn.create_queue(QueueName="test-queue") + queue = sqs.Queue("test-queue") body_one = 'this is a test message' body_two = 'this is another test message' - queue.write(queue.new_message(body_one)) - queue.write(queue.new_message(body_two)) + response = queue.send_message(MessageBody=body_one) + response = queue.send_message(MessageBody=body_two) - messages = conn.receive_message(queue, number_messages=2) + messages = conn.receive_message(QueueUrl=queue.url, MaxNumberOfMessages=2)['Messages'] - messages[0].get_body().should.equal(body_one) - messages[1].get_body().should.equal(body_two) + messages[0]['Body'].should.equal(body_one) + messages[1]['Body'].should.equal(body_two) @mock_sqs @@ -495,53 +524,3 @@ def test_delete_message_after_visibility_timeout(): m1_retrieved.delete() assert new_queue.count() == 0 - -""" -boto3 -""" - - -@mock_sqs -def test_boto3_get_queue(): - sqs = boto3.resource('sqs', region_name='us-east-1') - new_queue = sqs.create_queue(QueueName='test-queue') - new_queue.should_not.be.none - new_queue.should.have.property('url').should.contain('test-queue') - - queue = sqs.get_queue_by_name(QueueName='test-queue') - queue.attributes.get('QueueArn').should_not.be.none - queue.attributes.get('QueueArn').split(':')[-1].should.equal('test-queue') - queue.attributes.get('QueueArn').split(':')[3].should.equal('us-east-1') - queue.attributes.get('VisibilityTimeout').should_not.be.none - queue.attributes.get('VisibilityTimeout').should.equal('30') - - -@mock_sqs -def test_boto3_get_inexistent_queue(): - sqs = boto3.resource('sqs', region_name='us-east-1') - sqs.get_queue_by_name.when.called_with(QueueName='nonexisting-queue').should.throw(botocore.exceptions.ClientError) - - -@mock_sqs -def test_boto3_message_send(): - sqs = boto3.resource('sqs', region_name='us-east-1') - queue = sqs.create_queue(QueueName="blah") - msg = queue.send_message(MessageBody="derp") - - msg.get('MD5OfMessageBody').should.equal('58fd9edd83341c29f1aebba81c31e257') - msg.get('ResponseMetadata', {}).get('RequestId').should.equal('27daac76-34dd-47df-bd01-1f6e873584a0') - msg.get('MessageId').should_not.contain(' \n') - - messages = queue.receive_messages() - messages.should.have.length_of(1) - - -@mock_sqs -def test_boto3_set_queue_attributes(): - sqs = boto3.resource('sqs', region_name='us-east-1') - queue = sqs.create_queue(QueueName="blah") - - queue.attributes['VisibilityTimeout'].should.equal("30") - - queue.set_attributes(Attributes={"VisibilityTimeout": "45"}) - queue.attributes['VisibilityTimeout'].should.equal("45") From 9076e48fee95d8eb0fd2a9a4a3e4e47e4c24f1e4 Mon Sep 17 00:00:00 2001 From: Steve Pulec Date: Sat, 11 Feb 2017 19:50:26 -0500 Subject: [PATCH 042/213] Fix sqs tests region. --- tests/test_sqs/test_sqs.py | 12 ++++-------- 1 file changed, 4 insertions(+), 8 deletions(-) diff --git a/tests/test_sqs/test_sqs.py b/tests/test_sqs/test_sqs.py index 525f7bf89..32b026a46 100644 --- a/tests/test_sqs/test_sqs.py +++ b/tests/test_sqs/test_sqs.py @@ -15,12 +15,11 @@ from tests.helpers import requires_boto_gte import tests.backport_assert_raises # noqa from nose.tools import assert_raises -sqs = boto3.resource('sqs') +sqs = boto3.resource('sqs', region_name='us-east-1') @mock_sqs def test_create_queue(): - sqs = boto3.resource('sqs', region_name='us-east-1') new_queue = sqs.create_queue(QueueName='test-queue') new_queue.should_not.be.none new_queue.should.have.property('url').should.contain('test-queue') @@ -35,13 +34,11 @@ def test_create_queue(): @mock_sqs def test_get_inexistent_queue(): - sqs = boto3.resource('sqs', region_name='us-east-1') sqs.get_queue_by_name.when.called_with(QueueName='nonexisting-queue').should.throw(botocore.exceptions.ClientError) @mock_sqs def test_message_send(): - sqs = boto3.resource('sqs', region_name='us-east-1') queue = sqs.create_queue(QueueName="blah") msg = queue.send_message(MessageBody="derp") @@ -55,7 +52,6 @@ def test_message_send(): @mock_sqs def test_set_queue_attributes(): - sqs = boto3.resource('sqs', region_name='us-east-1') queue = sqs.create_queue(QueueName="blah") queue.attributes['VisibilityTimeout'].should.equal("30") @@ -94,7 +90,7 @@ def test_get_queue_with_prefix(): @mock_sqs def test_delete_queue(): - conn = boto3.client("sqs") + conn = boto3.client("sqs", region_name='us-east-1') conn.create_queue(QueueName="test-queue", Attributes={"VisibilityTimeout": "60"}) queue = sqs.Queue('test-queue') @@ -109,7 +105,7 @@ def test_delete_queue(): @mock_sqs def test_set_queue_attribute(): - conn = boto3.client("sqs") + conn = boto3.client("sqs", region_name='us-east-1') conn.create_queue(QueueName="test-queue", Attributes={"VisibilityTimeout": '60'}) queue = sqs.Queue("test-queue") @@ -122,7 +118,7 @@ def test_set_queue_attribute(): @mock_sqs def test_send_message(): - conn = boto3.client("sqs") + conn = boto3.client("sqs", region_name='us-east-1') conn.create_queue(QueueName="test-queue") queue = sqs.Queue("test-queue") From d3df810065c9c453d40fcc971f9be6b7b2846061 Mon Sep 17 00:00:00 2001 From: Steve Pulec Date: Sun, 12 Feb 2017 00:22:29 -0500 Subject: [PATCH 043/213] Generalize decorator code. --- moto/apigateway/__init__.py | 10 ++-------- moto/autoscaling/__init__.py | 10 ++-------- moto/awslambda/__init__.py | 10 ++-------- moto/cloudformation/__init__.py | 10 ++-------- moto/cloudwatch/__init__.py | 10 ++-------- moto/core/models.py | 13 +++++++++++++ moto/datapipeline/__init__.py | 10 ++-------- moto/ec2/__init__.py | 10 ++-------- moto/ecs/__init__.py | 9 ++------- moto/elb/__init__.py | 10 ++-------- moto/emr/__init__.py | 10 ++-------- moto/glacier/__init__.py | 10 ++-------- moto/kinesis/__init__.py | 10 ++-------- moto/kms/__init__.py | 10 ++-------- moto/opsworks/__init__.py | 11 ++--------- moto/rds/__init__.py | 10 ++-------- moto/rds2/__init__.py | 10 ++-------- moto/redshift/__init__.py | 10 ++-------- moto/sns/__init__.py | 10 ++-------- moto/sqs/__init__.py | 10 ++-------- moto/swf/__init__.py | 10 ++-------- 21 files changed, 53 insertions(+), 160 deletions(-) diff --git a/moto/apigateway/__init__.py b/moto/apigateway/__init__.py index 47db4a703..f75a72add 100644 --- a/moto/apigateway/__init__.py +++ b/moto/apigateway/__init__.py @@ -1,12 +1,6 @@ from __future__ import unicode_literals from .models import apigateway_backends -from ..core.models import MockAWS +from ..core.models import MockAWS, base_decorator apigateway_backend = apigateway_backends['us-east-1'] - - -def mock_apigateway(func=None): - if func: - return MockAWS(apigateway_backends)(func) - else: - return MockAWS(apigateway_backends) +mock_apigateway = base_decorator(apigateway_backends) diff --git a/moto/autoscaling/__init__.py b/moto/autoscaling/__init__.py index cefcc3cf7..1d3cc9b3e 100644 --- a/moto/autoscaling/__init__.py +++ b/moto/autoscaling/__init__.py @@ -1,12 +1,6 @@ from __future__ import unicode_literals from .models import autoscaling_backends -from ..core.models import MockAWS +from ..core.models import MockAWS, base_decorator autoscaling_backend = autoscaling_backends['us-east-1'] - - -def mock_autoscaling(func=None): - if func: - return MockAWS(autoscaling_backends)(func) - else: - return MockAWS(autoscaling_backends) +mock_autoscaling = base_decorator(autoscaling_backends) diff --git a/moto/awslambda/__init__.py b/moto/awslambda/__init__.py index 0076f7f76..b27ff9a43 100644 --- a/moto/awslambda/__init__.py +++ b/moto/awslambda/__init__.py @@ -1,13 +1,7 @@ from __future__ import unicode_literals from .models import lambda_backends -from ..core.models import MockAWS +from ..core.models import MockAWS, base_decorator lambda_backend = lambda_backends['us-east-1'] - - -def mock_lambda(func=None): - if func: - return MockAWS(lambda_backends)(func) - else: - return MockAWS(lambda_backends) +mock_lambda = base_decorator(lambda_backends) diff --git a/moto/cloudformation/__init__.py b/moto/cloudformation/__init__.py index 98587fc41..0e4bd28dd 100644 --- a/moto/cloudformation/__init__.py +++ b/moto/cloudformation/__init__.py @@ -1,12 +1,6 @@ from __future__ import unicode_literals from .models import cloudformation_backends -from ..core.models import MockAWS +from ..core.models import MockAWS, base_decorator cloudformation_backend = cloudformation_backends['us-east-1'] - - -def mock_cloudformation(func=None): - if func: - return MockAWS(cloudformation_backends)(func) - else: - return MockAWS(cloudformation_backends) +mock_cloudformation = base_decorator(cloudformation_backends) diff --git a/moto/cloudwatch/__init__.py b/moto/cloudwatch/__init__.py index b354b3be7..ea4bf7185 100644 --- a/moto/cloudwatch/__init__.py +++ b/moto/cloudwatch/__init__.py @@ -1,11 +1,5 @@ from .models import cloudwatch_backends -from ..core.models import MockAWS - +from ..core.models import MockAWS, base_decorator cloudwatch_backend = cloudwatch_backends['us-east-1'] - -def mock_cloudwatch(func=None): - if func: - return MockAWS(cloudwatch_backends)(func) - else: - return MockAWS(cloudwatch_backends) +mock_cloudwatch = base_decorator(cloudwatch_backends) diff --git a/moto/core/models.py b/moto/core/models.py index 60e744ff5..b6f4f541f 100644 --- a/moto/core/models.py +++ b/moto/core/models.py @@ -186,3 +186,16 @@ class BaseBackend(object): return MockAWS({'global': self})(func) else: return MockAWS({'global': self}) + + +class base_decorator(object): + mock_backend = MockAWS + + def __init__(self, backends): + self.backends = backends + + def __call__(self, func=None): + if func: + return self.mock_backend(self.backends)(func) + else: + return self.mock_backend(self.backends) diff --git a/moto/datapipeline/__init__.py b/moto/datapipeline/__init__.py index dcfe2f427..dd013526e 100644 --- a/moto/datapipeline/__init__.py +++ b/moto/datapipeline/__init__.py @@ -1,12 +1,6 @@ from __future__ import unicode_literals from .models import datapipeline_backends -from ..core.models import MockAWS +from ..core.models import MockAWS, base_decorator datapipeline_backend = datapipeline_backends['us-east-1'] - - -def mock_datapipeline(func=None): - if func: - return MockAWS(datapipeline_backends)(func) - else: - return MockAWS(datapipeline_backends) +mock_datapipeline = base_decorator(datapipeline_backends) diff --git a/moto/ec2/__init__.py b/moto/ec2/__init__.py index 2b1cafd88..b269e933b 100644 --- a/moto/ec2/__init__.py +++ b/moto/ec2/__init__.py @@ -1,12 +1,6 @@ from __future__ import unicode_literals from .models import ec2_backends -from ..core.models import MockAWS +from ..core.models import MockAWS, base_decorator ec2_backend = ec2_backends['us-east-1'] - - -def mock_ec2(func=None): - if func: - return MockAWS(ec2_backends)(func) - else: - return MockAWS(ec2_backends) +mock_ec2 = base_decorator(ec2_backends) diff --git a/moto/ecs/__init__.py b/moto/ecs/__init__.py index 0844b256c..9c07a0d55 100644 --- a/moto/ecs/__init__.py +++ b/moto/ecs/__init__.py @@ -1,11 +1,6 @@ from __future__ import unicode_literals from .models import ecs_backends -from ..core.models import MockAWS +from ..core.models import MockAWS, base_decorator ecs_backend = ecs_backends['us-east-1'] - -def mock_ecs(func=None): - if func: - return MockAWS(ecs_backends)(func) - else: - return MockAWS(ecs_backends) +mock_ecs = base_decorator(ecs_backends) diff --git a/moto/elb/__init__.py b/moto/elb/__init__.py index fd53c8587..376dfe0e1 100644 --- a/moto/elb/__init__.py +++ b/moto/elb/__init__.py @@ -1,12 +1,6 @@ from __future__ import unicode_literals from .models import elb_backends -from ..core.models import MockAWS +from ..core.models import MockAWS, base_decorator elb_backend = elb_backends['us-east-1'] - - -def mock_elb(func=None): - if func: - return MockAWS(elb_backends)(func) - else: - return MockAWS(elb_backends) +mock_elb = base_decorator(elb_backends) diff --git a/moto/emr/__init__.py b/moto/emr/__init__.py index f0103319a..f79df39fa 100644 --- a/moto/emr/__init__.py +++ b/moto/emr/__init__.py @@ -1,12 +1,6 @@ from __future__ import unicode_literals from .models import emr_backends -from ..core.models import MockAWS +from ..core.models import MockAWS, base_decorator emr_backend = emr_backends['us-east-1'] - - -def mock_emr(func=None): - if func: - return MockAWS(emr_backends)(func) - else: - return MockAWS(emr_backends) +mock_emr = base_decorator(emr_backends) diff --git a/moto/glacier/__init__.py b/moto/glacier/__init__.py index bc6fd1ff4..3256462a3 100644 --- a/moto/glacier/__init__.py +++ b/moto/glacier/__init__.py @@ -1,12 +1,6 @@ from __future__ import unicode_literals from .models import glacier_backends -from ..core.models import MockAWS +from ..core.models import MockAWS, base_decorator glacier_backend = glacier_backends['us-east-1'] - - -def mock_glacier(func=None): - if func: - return MockAWS(glacier_backends)(func) - else: - return MockAWS(glacier_backends) +mock_glacier = base_decorator(glacier_backends) diff --git a/moto/kinesis/__init__.py b/moto/kinesis/__init__.py index 415b960e1..50bc07155 100644 --- a/moto/kinesis/__init__.py +++ b/moto/kinesis/__init__.py @@ -1,12 +1,6 @@ from __future__ import unicode_literals from .models import kinesis_backends -from ..core.models import MockAWS +from ..core.models import MockAWS, base_decorator kinesis_backend = kinesis_backends['us-east-1'] - - -def mock_kinesis(func=None): - if func: - return MockAWS(kinesis_backends)(func) - else: - return MockAWS(kinesis_backends) +mock_kinesis = base_decorator(kinesis_backends) diff --git a/moto/kms/__init__.py b/moto/kms/__init__.py index d406cc913..4ee6dd2f4 100644 --- a/moto/kms/__init__.py +++ b/moto/kms/__init__.py @@ -1,12 +1,6 @@ from __future__ import unicode_literals from .models import kms_backends -from ..core.models import MockAWS +from ..core.models import MockAWS, base_decorator kms_backend = kms_backends['us-east-1'] - - -def mock_kms(func=None): - if func: - return MockAWS(kms_backends)(func) - else: - return MockAWS(kms_backends) +mock_kms = base_decorator(kms_backends) diff --git a/moto/opsworks/__init__.py b/moto/opsworks/__init__.py index dfcd582e2..ef5190997 100644 --- a/moto/opsworks/__init__.py +++ b/moto/opsworks/__init__.py @@ -1,13 +1,6 @@ from __future__ import unicode_literals from .models import opsworks_backends -from ..core.models import MockAWS +from ..core.models import MockAWS, base_decorator opsworks_backend = opsworks_backends['us-east-1'] - - -def mock_opsworks(func=None): - if func: - return MockAWS(opsworks_backends)(func) - else: - return MockAWS(opsworks_backends) - +mock_opsworks = base_decorator(opsworks_backends) diff --git a/moto/rds/__init__.py b/moto/rds/__init__.py index 407f1680c..d3cafc066 100644 --- a/moto/rds/__init__.py +++ b/moto/rds/__init__.py @@ -1,12 +1,6 @@ from __future__ import unicode_literals from .models import rds_backends -from ..core.models import MockAWS +from ..core.models import MockAWS, base_decorator rds_backend = rds_backends['us-east-1'] - - -def mock_rds(func=None): - if func: - return MockAWS(rds_backends)(func) - else: - return MockAWS(rds_backends) +mock_rds = base_decorator(rds_backends) diff --git a/moto/rds2/__init__.py b/moto/rds2/__init__.py index 602c21ede..b200f9b11 100644 --- a/moto/rds2/__init__.py +++ b/moto/rds2/__init__.py @@ -1,12 +1,6 @@ from __future__ import unicode_literals from .models import rds2_backends -from ..core.models import MockAWS +from ..core.models import MockAWS, base_decorator rds2_backend = rds2_backends['us-west-1'] - - -def mock_rds2(func=None): - if func: - return MockAWS(rds2_backends)(func) - else: - return MockAWS(rds2_backends) +mock_rds2 = base_decorator(rds2_backends) diff --git a/moto/redshift/__init__.py b/moto/redshift/__init__.py index 7adf47865..821408493 100644 --- a/moto/redshift/__init__.py +++ b/moto/redshift/__init__.py @@ -1,12 +1,6 @@ from __future__ import unicode_literals from .models import redshift_backends -from ..core.models import MockAWS +from ..core.models import MockAWS, base_decorator redshift_backend = redshift_backends['us-east-1'] - - -def mock_redshift(func=None): - if func: - return MockAWS(redshift_backends)(func) - else: - return MockAWS(redshift_backends) +mock_redshift = base_decorator(redshift_backends) diff --git a/moto/sns/__init__.py b/moto/sns/__init__.py index 1aa1a0e3e..0ed85e813 100644 --- a/moto/sns/__init__.py +++ b/moto/sns/__init__.py @@ -1,12 +1,6 @@ from __future__ import unicode_literals from .models import sns_backends -from ..core.models import MockAWS +from ..core.models import MockAWS, base_decorator sns_backend = sns_backends['us-east-1'] - - -def mock_sns(func=None): - if func: - return MockAWS(sns_backends)(func) - else: - return MockAWS(sns_backends) +mock_sns = base_decorator(sns_backends) diff --git a/moto/sqs/__init__.py b/moto/sqs/__init__.py index 0a9de1a47..09b4ed9e9 100644 --- a/moto/sqs/__init__.py +++ b/moto/sqs/__init__.py @@ -1,12 +1,6 @@ from __future__ import unicode_literals from .models import sqs_backends -from ..core.models import MockAWS +from ..core.models import MockAWS, base_decorator sqs_backend = sqs_backends['us-east-1'] - - -def mock_sqs(func=None): - if func: - return MockAWS(sqs_backends)(func) - else: - return MockAWS(sqs_backends) +mock_sqs = base_decorator(sqs_backends) diff --git a/moto/swf/__init__.py b/moto/swf/__init__.py index 7e43ca392..180919320 100644 --- a/moto/swf/__init__.py +++ b/moto/swf/__init__.py @@ -1,12 +1,6 @@ from __future__ import unicode_literals from .models import swf_backends -from ..core.models import MockAWS +from ..core.models import MockAWS, base_decorator swf_backend = swf_backends['us-east-1'] - - -def mock_swf(func=None): - if func: - return MockAWS(swf_backends)(func) - else: - return MockAWS(swf_backends) +mock_swf = base_decorator(swf_backends) From fde721bed7db62ad289c13667e428bbfb1399aa1 Mon Sep 17 00:00:00 2001 From: Steve Pulec Date: Wed, 15 Feb 2017 22:35:45 -0500 Subject: [PATCH 044/213] Testing new version of decorator. --- CHANGELOG.md | 4 + README.md | 4 - moto/__init__.py | 47 +- moto/apigateway/__init__.py | 3 +- moto/apigateway/models.py | 16 +- moto/autoscaling/__init__.py | 3 +- moto/awslambda/__init__.py | 4 +- moto/awslambda/responses.py | 14 +- moto/cloudformation/__init__.py | 3 +- moto/cloudwatch/__init__.py | 3 +- moto/core/models.py | 111 +- moto/core/responses.py | 8 +- moto/core/utils.py | 22 + moto/datapipeline/__init__.py | 3 +- moto/dynamodb/__init__.py | 1 + moto/dynamodb2/__init__.py | 1 + moto/ec2/__init__.py | 3 +- moto/ecs/__init__.py | 3 +- moto/elb/__init__.py | 3 +- moto/emr/__init__.py | 3 +- moto/glacier/__init__.py | 3 +- moto/iam/__init__.py | 1 + moto/kinesis/__init__.py | 3 +- moto/kinesis/responses.py | 5 +- moto/kms/__init__.py | 3 +- moto/opsworks/__init__.py | 2 +- moto/opsworks/urls.py | 2 +- moto/packages/__init__.py | 0 moto/packages/httpretty/__init__.py | 60 + moto/packages/httpretty/compat.py | 100 ++ moto/packages/httpretty/core.py | 1071 +++++++++++++++++ moto/packages/httpretty/errors.py | 39 + moto/packages/httpretty/http.py | 155 +++ moto/packages/httpretty/utils.py | 48 + moto/packages/responses | 1 + moto/rds/__init__.py | 3 +- moto/rds2/__init__.py | 3 +- moto/redshift/__init__.py | 3 +- moto/route53/__init__.py | 1 + moto/s3/__init__.py | 1 + moto/s3/models.py | 5 +- moto/s3/responses.py | 15 +- moto/s3bucket_path/__init__.py | 3 - moto/ses/__init__.py | 1 + moto/sns/__init__.py | 3 +- moto/sqs/__init__.py | 3 +- moto/sqs/responses.py | 1 + moto/sts/__init__.py | 1 + moto/swf/__init__.py | 3 +- setup.py | 1 - tests/__init__.py | 7 + tests/test_apigateway/test_apigateway.py | 7 +- tests/test_autoscaling/test_autoscaling.py | 30 +- .../test_launch_configurations.py | 20 +- tests/test_autoscaling/test_policies.py | 18 +- .../test_cloudformation_stack_crud.py | 42 +- .../test_cloudformation_stack_crud_boto3.py | 4 +- .../test_cloudformation_stack_integration.py | 186 +-- tests/test_cloudwatch/test_cloudwatch.py | 12 +- tests/test_core/test_decorator_calls.py | 16 +- tests/test_core/test_nested.py | 6 +- tests/test_datapipeline/test_datapipeline.py | 12 +- tests/test_dynamodb/test_dynamodb.py | 10 +- .../test_dynamodb_table_with_range_key.py | 38 +- .../test_dynamodb_table_without_range_key.py | 36 +- tests/test_dynamodb2/test_dynamodb.py | 8 +- .../test_dynamodb_table_with_range_key.py | 64 +- .../test_dynamodb_table_without_range_key.py | 46 +- tests/test_ec2/test_amis.py | 28 +- .../test_availability_zones_and_regions.py | 6 +- tests/test_ec2/test_customer_gateways.py | 10 +- tests/test_ec2/test_dhcp_options.py | 32 +- tests/test_ec2/test_elastic_block_store.py | 32 +- tests/test_ec2/test_elastic_ip_addresses.py | 32 +- .../test_elastic_network_interfaces.py | 18 +- tests/test_ec2/test_general.py | 6 +- tests/test_ec2/test_instances.py | 78 +- tests/test_ec2/test_internet_gateways.py | 34 +- tests/test_ec2/test_key_pairs.py | 20 +- tests/test_ec2/test_network_acls.py | 16 +- tests/test_ec2/test_regions.py | 10 +- tests/test_ec2/test_route_tables.py | 28 +- tests/test_ec2/test_security_groups.py | 38 +- tests/test_ec2/test_spot_instances.py | 15 +- tests/test_ec2/test_subnets.py | 18 +- tests/test_ec2/test_tags.py | 34 +- .../test_ec2/test_virtual_private_gateways.py | 14 +- tests/test_ec2/test_vpc_peering.py | 12 +- tests/test_ec2/test_vpcs.py | 32 +- tests/test_ec2/test_vpn_connections.py | 10 +- tests/test_elb/test_elb.py | 58 +- tests/test_emr/test_emr.py | 34 +- tests/test_glacier/test_glacier_archives.py | 4 +- tests/test_glacier/test_glacier_jobs.py | 10 +- tests/test_glacier/test_glacier_vaults.py | 6 +- tests/test_iam/test_iam.py | 50 +- tests/test_iam/test_iam_groups.py | 14 +- tests/test_kinesis/test_kinesis.py | 34 +- tests/test_kms/test_kms.py | 82 +- tests/test_opsworks/test_layers.py | 2 +- tests/test_rds/test_rds.py | 50 +- tests/test_rds2/test_rds2.py | 2 - tests/test_redshift/test_redshift.py | 52 +- tests/test_route53/test_route53.py | 24 +- tests/test_s3/test_s3.py | 164 +-- tests/test_s3/test_s3_lifecycle.py | 10 +- .../test_s3bucket_path/test_s3bucket_path.py | 48 +- .../test_s3bucket_path_combo.py | 6 +- tests/test_ses/test_ses.py | 14 +- tests/test_sns/test_application.py | 28 +- tests/test_sns/test_publishing.py | 21 +- tests/test_sns/test_publishing_boto3.py | 8 +- tests/test_sns/test_subscriptions.py | 8 +- tests/test_sns/test_topics.py | 14 +- tests/test_sqs/test_sqs.py | 50 +- tests/test_sts/test_sts.py | 8 +- .../test_swf/responses/test_activity_tasks.py | 28 +- .../test_swf/responses/test_activity_types.py | 22 +- .../test_swf/responses/test_decision_tasks.py | 38 +- tests/test_swf/responses/test_domains.py | 22 +- tests/test_swf/responses/test_timeouts.py | 8 +- .../responses/test_workflow_executions.py | 28 +- .../test_swf/responses/test_workflow_types.py | 23 +- 123 files changed, 2740 insertions(+), 1114 deletions(-) create mode 100644 moto/packages/__init__.py create mode 100644 moto/packages/httpretty/__init__.py create mode 100644 moto/packages/httpretty/compat.py create mode 100644 moto/packages/httpretty/core.py create mode 100644 moto/packages/httpretty/errors.py create mode 100644 moto/packages/httpretty/http.py create mode 100644 moto/packages/httpretty/utils.py create mode 160000 moto/packages/responses diff --git a/CHANGELOG.md b/CHANGELOG.md index 66610511f..790f6de95 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -4,6 +4,10 @@ Moto Changelog Latest ------ + BACKWARDS INCOMPATIBLE + * The normal @mock_ decorators will no longer work with boto. It is suggested that you upgrade to boto3 or use the standalone-server mode. If you would still like to use boto, you must use the @mock__deprecated decorators which will be removed in a future release. + * The @mock_s3bucket_path decorator is now deprecated. Use the @mock_s3 decorator instead. + 0.4.31 ------ diff --git a/README.md b/README.md index c05f1dff4..ae161dc5c 100644 --- a/README.md +++ b/README.md @@ -263,7 +263,3 @@ boto3.resource( ```console $ pip install moto ``` - -## Thanks - -A huge thanks to [Gabriel Falcão](https://github.com/gabrielfalcao) and his [HTTPretty](https://github.com/gabrielfalcao/HTTPretty) library. Moto would not exist without it. diff --git a/moto/__init__.py b/moto/__init__.py index afc98d14a..4accf1d0c 100644 --- a/moto/__init__.py +++ b/moto/__init__.py @@ -6,33 +6,32 @@ __title__ = 'moto' __version__ = '0.4.31' from .apigateway import mock_apigateway # flake8: noqa -from .autoscaling import mock_autoscaling # flake8: noqa +from .autoscaling import mock_autoscaling, mock_autoscaling_deprecated # flake8: noqa from .awslambda import mock_lambda # flake8: noqa -from .cloudformation import mock_cloudformation # flake8: noqa -from .cloudwatch import mock_cloudwatch # flake8: noqa -from .datapipeline import mock_datapipeline # flake8: noqa -from .dynamodb import mock_dynamodb # flake8: noqa -from .dynamodb2 import mock_dynamodb2 # flake8: noqa -from .ec2 import mock_ec2 # flake8: noqa +from .cloudformation import mock_cloudformation, mock_cloudformation_deprecated # flake8: noqa +from .cloudwatch import mock_cloudwatch, mock_cloudwatch_deprecated # flake8: noqa +from .datapipeline import mock_datapipeline, mock_datapipeline_deprecated # flake8: noqa +from .dynamodb import mock_dynamodb, mock_dynamodb_deprecated # flake8: noqa +from .dynamodb2 import mock_dynamodb2, mock_dynamodb2_deprecated # flake8: noqa +from .ec2 import mock_ec2, mock_ec2_deprecated # flake8: noqa from .ecs import mock_ecs # flake8: noqa -from .elb import mock_elb # flake8: noqa -from .emr import mock_emr # flake8: noqa -from .glacier import mock_glacier # flake8: noqa +from .elb import mock_elb, mock_elb_deprecated # flake8: noqa +from .emr import mock_emr, mock_emr_deprecated # flake8: noqa +from .glacier import mock_glacier, mock_glacier_deprecated # flake8: noqa from .opsworks import mock_opsworks # flake8: noqa -from .iam import mock_iam # flake8: noqa -from .kinesis import mock_kinesis # flake8: noqa -from .kms import mock_kms # flake8: noqa -from .rds import mock_rds # flake8: noqa -from .rds2 import mock_rds2 # flake8: noqa -from .redshift import mock_redshift # flake8: noqa -from .s3 import mock_s3 # flake8: noqa -from .s3bucket_path import mock_s3bucket_path # flake8: noqa -from .ses import mock_ses # flake8: noqa -from .sns import mock_sns # flake8: noqa -from .sqs import mock_sqs # flake8: noqa -from .sts import mock_sts # flake8: noqa -from .route53 import mock_route53 # flake8: noqa -from .swf import mock_swf # flake8: noqa +from .iam import mock_iam, mock_iam_deprecated # flake8: noqa +from .kinesis import mock_kinesis, mock_kinesis_deprecated # flake8: noqa +from .kms import mock_kms, mock_kms_deprecated # flake8: noqa +from .rds import mock_rds, mock_rds_deprecated # flake8: noqa +from .rds2 import mock_rds2, mock_rds2_deprecated # flake8: noqa +from .redshift import mock_redshift, mock_redshift_deprecated # flake8: noqa +from .s3 import mock_s3, mock_s3_deprecated # flake8: noqa +from .ses import mock_ses, mock_ses_deprecated # flake8: noqa +from .sns import mock_sns, mock_sns_deprecated # flake8: noqa +from .sqs import mock_sqs, mock_sqs_deprecated # flake8: noqa +from .sts import mock_sts, mock_sts_deprecated # flake8: noqa +from .route53 import mock_route53, mock_route53_deprecated # flake8: noqa +from .swf import mock_swf, mock_swf_deprecated # flake8: noqa try: diff --git a/moto/apigateway/__init__.py b/moto/apigateway/__init__.py index f75a72add..c6ea9a3bc 100644 --- a/moto/apigateway/__init__.py +++ b/moto/apigateway/__init__.py @@ -1,6 +1,7 @@ from __future__ import unicode_literals from .models import apigateway_backends -from ..core.models import MockAWS, base_decorator +from ..core.models import MockAWS, base_decorator, HttprettyMockAWS, deprecated_base_decorator apigateway_backend = apigateway_backends['us-east-1'] mock_apigateway = base_decorator(apigateway_backends) +mock_apigateway_deprecated = deprecated_base_decorator(apigateway_backends) diff --git a/moto/apigateway/models.py b/moto/apigateway/models.py index 11d650e05..be0bfa434 100644 --- a/moto/apigateway/models.py +++ b/moto/apigateway/models.py @@ -1,9 +1,10 @@ +from __future__ import absolute_import from __future__ import unicode_literals import datetime -import httpretty import requests +from moto.packages.responses import responses from moto.core import BaseBackend from moto.core.utils import iso_8601_datetime_with_milliseconds from .utils import create_id @@ -315,8 +316,12 @@ class RestAPI(object): return resource # TODO deal with no matching resource - def resource_callback(self, request, full_url, headers): - path_after_stage_name = '/'.join(request.path.split("/")[2:]) + def resource_callback(self, request, full_url=None, headers=None): + if not headers: + headers = request.headers + + path = request.path if hasattr(request, 'path') else request.path_url + path_after_stage_name = '/'.join(path.split("/")[2:]) if not path_after_stage_name: path_after_stage_name = '/' @@ -325,11 +330,8 @@ class RestAPI(object): return status_code, headers, response def update_integration_mocks(self, stage_name): - httpretty.enable() - stage_url = STAGE_URL.format(api_id=self.id, region_name=self.region_name, stage_name=stage_name) - for method in httpretty.httpretty.METHODS: - httpretty.register_uri(method, stage_url, body=self.resource_callback) + responses.add_callback(responses.GET, stage_url, callback=self.resource_callback) def create_stage(self, name, deployment_id,variables=None,description='',cacheClusterEnabled=None,cacheClusterSize=None): if variables is None: diff --git a/moto/autoscaling/__init__.py b/moto/autoscaling/__init__.py index 1d3cc9b3e..9b5842788 100644 --- a/moto/autoscaling/__init__.py +++ b/moto/autoscaling/__init__.py @@ -1,6 +1,7 @@ from __future__ import unicode_literals from .models import autoscaling_backends -from ..core.models import MockAWS, base_decorator +from ..core.models import MockAWS, base_decorator, HttprettyMockAWS, deprecated_base_decorator autoscaling_backend = autoscaling_backends['us-east-1'] mock_autoscaling = base_decorator(autoscaling_backends) +mock_autoscaling_deprecated = deprecated_base_decorator(autoscaling_backends) diff --git a/moto/awslambda/__init__.py b/moto/awslambda/__init__.py index b27ff9a43..46bc90fbd 100644 --- a/moto/awslambda/__init__.py +++ b/moto/awslambda/__init__.py @@ -1,7 +1,7 @@ from __future__ import unicode_literals from .models import lambda_backends -from ..core.models import MockAWS, base_decorator - +from ..core.models import MockAWS, base_decorator, HttprettyMockAWS, deprecated_base_decorator lambda_backend = lambda_backends['us-east-1'] mock_lambda = base_decorator(lambda_backends) +mock_lambda_deprecated = deprecated_base_decorator(lambda_backends) diff --git a/moto/awslambda/responses.py b/moto/awslambda/responses.py index 468a95766..708a8796e 100644 --- a/moto/awslambda/responses.py +++ b/moto/awslambda/responses.py @@ -9,7 +9,7 @@ from .models import lambda_backends class LambdaResponse(BaseResponse): - + @classmethod def root(cls, request, full_url, headers): if request.method == 'GET': @@ -38,11 +38,13 @@ class LambdaResponse(BaseResponse): def _invoke(self, request, full_url, headers): lambda_backend = self.get_lambda_backend(full_url) - function_name = request.path.split('/')[-2] + path = request.path if hasattr(request, 'path') else request.path_url + function_name = path.split('/')[-2] if lambda_backend.has_function(function_name): fn = lambda_backend.get_function(function_name) payload = fn.invoke(request, headers) + headers['Content-Length'] = str(len(payload)) return 202, headers, payload else: return 404, headers, "{}" @@ -68,7 +70,8 @@ class LambdaResponse(BaseResponse): def _delete_function(self, request, full_url, headers): lambda_backend = self.get_lambda_backend(full_url) - function_name = request.path.split('/')[-1] + path = request.path if hasattr(request, 'path') else request.path_url + function_name = path.split('/')[-1] if lambda_backend.has_function(function_name): lambda_backend.delete_function(function_name) @@ -79,7 +82,8 @@ class LambdaResponse(BaseResponse): def _get_function(self, request, full_url, headers): lambda_backend = self.get_lambda_backend(full_url) - function_name = request.path.split('/')[-1] + path = request.path if hasattr(request, 'path') else request.path_url + function_name = path.split('/')[-1] if lambda_backend.has_function(function_name): fn = lambda_backend.get_function(function_name) @@ -87,7 +91,7 @@ class LambdaResponse(BaseResponse): return 200, headers, json.dumps(code) else: return 404, headers, "{}" - + def get_lambda_backend(self, full_url): from moto.awslambda.models import lambda_backends region = self._get_aws_region(full_url) diff --git a/moto/cloudformation/__init__.py b/moto/cloudformation/__init__.py index 0e4bd28dd..47e840ec6 100644 --- a/moto/cloudformation/__init__.py +++ b/moto/cloudformation/__init__.py @@ -1,6 +1,7 @@ from __future__ import unicode_literals from .models import cloudformation_backends -from ..core.models import MockAWS, base_decorator +from ..core.models import MockAWS, base_decorator, HttprettyMockAWS, deprecated_base_decorator cloudformation_backend = cloudformation_backends['us-east-1'] mock_cloudformation = base_decorator(cloudformation_backends) +mock_cloudformation_deprecated = deprecated_base_decorator(cloudformation_backends) diff --git a/moto/cloudwatch/__init__.py b/moto/cloudwatch/__init__.py index ea4bf7185..17d1c0c50 100644 --- a/moto/cloudwatch/__init__.py +++ b/moto/cloudwatch/__init__.py @@ -1,5 +1,6 @@ from .models import cloudwatch_backends -from ..core.models import MockAWS, base_decorator +from ..core.models import MockAWS, base_decorator, HttprettyMockAWS, deprecated_base_decorator cloudwatch_backend = cloudwatch_backends['us-east-1'] mock_cloudwatch = base_decorator(cloudwatch_backends) +mock_cloudwatch_deprecated = deprecated_base_decorator(cloudwatch_backends) diff --git a/moto/core/models.py b/moto/core/models.py index b6f4f541f..fa6b74834 100644 --- a/moto/core/models.py +++ b/moto/core/models.py @@ -1,22 +1,23 @@ from __future__ import unicode_literals +from __future__ import absolute_import import functools import inspect import re -from httpretty import HTTPretty +from moto.packages.responses import responses +from moto.packages.httpretty import HTTPretty from .responses import metadata_response -from .utils import convert_regex_to_flask_path +from .utils import convert_regex_to_flask_path, convert_flask_to_responses_response - -class MockAWS(object): +class BaseMockAWS(object): nested_count = 0 def __init__(self, backends): self.backends = backends if self.__class__.nested_count == 0: - HTTPretty.reset() + self.reset() def __call__(self, func, reset=True): if inspect.isclass(func): @@ -35,24 +36,7 @@ class MockAWS(object): for backend in self.backends.values(): backend.reset() - if not HTTPretty.is_enabled(): - HTTPretty.enable() - - for method in HTTPretty.METHODS: - backend = list(self.backends.values())[0] - for key, value in backend.urls.items(): - HTTPretty.register_uri( - method=method, - uri=re.compile(key), - body=value, - ) - - # Mock out localhost instance metadata - HTTPretty.register_uri( - method=method, - uri=re.compile('http://169.254.169.254/latest/meta-data/.*'), - body=metadata_response - ) + self.enable_patching() def stop(self): self.__class__.nested_count -= 1 @@ -60,9 +44,7 @@ class MockAWS(object): if self.__class__.nested_count < 0: raise RuntimeError('Called stop() before start().') - if self.__class__.nested_count == 0: - HTTPretty.disable() - HTTPretty.reset() + self.disable_patching() def decorate_callable(self, func, reset): def wrapper(*args, **kwargs): @@ -97,6 +79,73 @@ class MockAWS(object): return klass +class HttprettyMockAWS(BaseMockAWS): + def reset(self): + HTTPretty.reset() + + def enable_patching(self): + if not HTTPretty.is_enabled(): + HTTPretty.enable() + + for method in HTTPretty.METHODS: + backend = list(self.backends.values())[0] + for key, value in backend.urls.items(): + HTTPretty.register_uri( + method=method, + uri=re.compile(key), + body=value, + ) + + # Mock out localhost instance metadata + HTTPretty.register_uri( + method=method, + uri=re.compile('http://169.254.169.254/latest/meta-data/.*'), + body=metadata_response + ) + + def disable_patching(self): + if self.__class__.nested_count == 0: + HTTPretty.disable() + HTTPretty.reset() + + +RESPONSES_METHODS = [responses.GET, responses.DELETE, responses.HEAD, + responses.OPTIONS, responses.PATCH, responses.POST, responses.PUT] + + +class ResponsesMockAWS(BaseMockAWS): + def reset(self): + responses.reset() + + def enable_patching(self): + responses.start() + for method in RESPONSES_METHODS: + backend = list(self.backends.values())[0] + for key, value in backend.urls.items(): + responses.add_callback( + method=method, + url=re.compile(key), + callback=convert_flask_to_responses_response(value), + ) + + # Mock out localhost instance metadata + responses.add_callback( + method=method, + url=re.compile('http://169.254.169.254/latest/meta-data/.*'), + callback=convert_flask_to_responses_response(metadata_response), + ) + for pattern in responses.mock._urls: + pattern['stream'] = True + + def disable_patching(self): + if self.__class__.nested_count == 0: + try: + responses.stop() + except AttributeError: + pass + responses.reset() +MockAWS = ResponsesMockAWS + class Model(type): def __new__(self, clsname, bases, namespace): cls = super(Model, self).__new__(self, clsname, bases, namespace) @@ -187,6 +236,12 @@ class BaseBackend(object): else: return MockAWS({'global': self}) + def deprecated_decorator(self, func=None): + if func: + return HttprettyMockAWS({'global': self})(func) + else: + return HttprettyMockAWS({'global': self}) + class base_decorator(object): mock_backend = MockAWS @@ -199,3 +254,7 @@ class base_decorator(object): return self.mock_backend(self.backends)(func) else: return self.mock_backend(self.backends) + + +class deprecated_base_decorator(base_decorator): + mock_backend = HttprettyMockAWS diff --git a/moto/core/responses.py b/moto/core/responses.py index af4217245..337227d3c 100644 --- a/moto/core/responses.py +++ b/moto/core/responses.py @@ -138,7 +138,7 @@ class BaseResponse(_TemplateEnvironmentMixin): flat = flatten_json_request_body('', decoded, input_spec) for key, value in flat.items(): querystring[key] = [value] - else: + elif self.body: querystring.update(parse_qs(self.body, keep_blank_values=True)) if not querystring: querystring.update(headers) @@ -152,6 +152,8 @@ class BaseResponse(_TemplateEnvironmentMixin): self.region = self.get_region_from_url(full_url) self.headers = request.headers + if 'host' not in self.headers: + self.headers['host'] = urlparse(full_url).netloc self.response_headers = headers def get_region_from_url(self, full_url): @@ -189,6 +191,9 @@ class BaseResponse(_TemplateEnvironmentMixin): body, new_headers = response status = new_headers.get('status', 200) headers.update(new_headers) + # Cast status to string + if "status" in headers: + headers['status'] = str(headers['status']) return status, headers, body raise NotImplementedError("The {0} action has not been implemented".format(action)) @@ -327,6 +332,7 @@ def metadata_response(request, full_url, headers): http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/AESDG-chapter-instancedata.html """ + parsed_url = urlparse(full_url) tomorrow = datetime.datetime.utcnow() + datetime.timedelta(days=1) credentials = dict( diff --git a/moto/core/utils.py b/moto/core/utils.py index 0b30556ac..0f4b20b6d 100644 --- a/moto/core/utils.py +++ b/moto/core/utils.py @@ -103,6 +103,28 @@ class convert_flask_to_httpretty_response(object): return response, status, headers +class convert_flask_to_responses_response(object): + + def __init__(self, callback): + self.callback = callback + + @property + def __name__(self): + # For instance methods, use class and method names. Otherwise + # use module and method name + if inspect.ismethod(self.callback): + outer = self.callback.__self__.__class__.__name__ + else: + outer = self.callback.__module__ + return "{0}.{1}".format(outer, self.callback.__name__) + + def __call__(self, request, *args, **kwargs): + result = self.callback(request, request.url, request.headers) + # result is a status, headers, response tuple + status, headers, response = result + return status, headers, response + + def iso_8601_datetime_with_milliseconds(datetime): return datetime.strftime("%Y-%m-%dT%H:%M:%S.%f")[:-3] + 'Z' diff --git a/moto/datapipeline/__init__.py b/moto/datapipeline/__init__.py index dd013526e..cebcf22bf 100644 --- a/moto/datapipeline/__init__.py +++ b/moto/datapipeline/__init__.py @@ -1,6 +1,7 @@ from __future__ import unicode_literals from .models import datapipeline_backends -from ..core.models import MockAWS, base_decorator +from ..core.models import MockAWS, base_decorator, HttprettyMockAWS, deprecated_base_decorator datapipeline_backend = datapipeline_backends['us-east-1'] mock_datapipeline = base_decorator(datapipeline_backends) +mock_datapipeline_deprecated = deprecated_base_decorator(datapipeline_backends) diff --git a/moto/dynamodb/__init__.py b/moto/dynamodb/__init__.py index 6f2509f79..008050317 100644 --- a/moto/dynamodb/__init__.py +++ b/moto/dynamodb/__init__.py @@ -1,3 +1,4 @@ from __future__ import unicode_literals from .models import dynamodb_backend mock_dynamodb = dynamodb_backend.decorator +mock_dynamodb_deprecated = dynamodb_backend.deprecated_decorator diff --git a/moto/dynamodb2/__init__.py b/moto/dynamodb2/__init__.py index 8579c48df..f0892d13f 100644 --- a/moto/dynamodb2/__init__.py +++ b/moto/dynamodb2/__init__.py @@ -1,3 +1,4 @@ from __future__ import unicode_literals from .models import dynamodb_backend2 mock_dynamodb2 = dynamodb_backend2.decorator +mock_dynamodb2_deprecated = dynamodb_backend2.deprecated_decorator \ No newline at end of file diff --git a/moto/ec2/__init__.py b/moto/ec2/__init__.py index b269e933b..608173577 100644 --- a/moto/ec2/__init__.py +++ b/moto/ec2/__init__.py @@ -1,6 +1,7 @@ from __future__ import unicode_literals from .models import ec2_backends -from ..core.models import MockAWS, base_decorator +from ..core.models import MockAWS, base_decorator, HttprettyMockAWS, deprecated_base_decorator ec2_backend = ec2_backends['us-east-1'] mock_ec2 = base_decorator(ec2_backends) +mock_ec2_deprecated = deprecated_base_decorator(ec2_backends) diff --git a/moto/ecs/__init__.py b/moto/ecs/__init__.py index 9c07a0d55..6864355ad 100644 --- a/moto/ecs/__init__.py +++ b/moto/ecs/__init__.py @@ -1,6 +1,7 @@ from __future__ import unicode_literals from .models import ecs_backends -from ..core.models import MockAWS, base_decorator +from ..core.models import MockAWS, base_decorator, HttprettyMockAWS, deprecated_base_decorator ecs_backend = ecs_backends['us-east-1'] mock_ecs = base_decorator(ecs_backends) +mock_ecs_deprecated = deprecated_base_decorator(ecs_backends) diff --git a/moto/elb/__init__.py b/moto/elb/__init__.py index 376dfe0e1..a8e8dab8d 100644 --- a/moto/elb/__init__.py +++ b/moto/elb/__init__.py @@ -1,6 +1,7 @@ from __future__ import unicode_literals from .models import elb_backends -from ..core.models import MockAWS, base_decorator +from ..core.models import MockAWS, base_decorator, HttprettyMockAWS, deprecated_base_decorator elb_backend = elb_backends['us-east-1'] mock_elb = base_decorator(elb_backends) +mock_elb_deprecated = deprecated_base_decorator(elb_backends) diff --git a/moto/emr/__init__.py b/moto/emr/__init__.py index f79df39fa..fc6b4d4ab 100644 --- a/moto/emr/__init__.py +++ b/moto/emr/__init__.py @@ -1,6 +1,7 @@ from __future__ import unicode_literals from .models import emr_backends -from ..core.models import MockAWS, base_decorator +from ..core.models import MockAWS, base_decorator, HttprettyMockAWS, deprecated_base_decorator emr_backend = emr_backends['us-east-1'] mock_emr = base_decorator(emr_backends) +mock_emr_deprecated = deprecated_base_decorator(emr_backends) diff --git a/moto/glacier/__init__.py b/moto/glacier/__init__.py index 3256462a3..49b3375e1 100644 --- a/moto/glacier/__init__.py +++ b/moto/glacier/__init__.py @@ -1,6 +1,7 @@ from __future__ import unicode_literals from .models import glacier_backends -from ..core.models import MockAWS, base_decorator +from ..core.models import MockAWS, base_decorator, HttprettyMockAWS, deprecated_base_decorator glacier_backend = glacier_backends['us-east-1'] mock_glacier = base_decorator(glacier_backends) +mock_glacier_deprecated = deprecated_base_decorator(glacier_backends) diff --git a/moto/iam/__init__.py b/moto/iam/__init__.py index 483969e19..02519cbc9 100644 --- a/moto/iam/__init__.py +++ b/moto/iam/__init__.py @@ -1,3 +1,4 @@ from __future__ import unicode_literals from .models import iam_backend mock_iam = iam_backend.decorator +mock_iam_deprecated = iam_backend.deprecated_decorator \ No newline at end of file diff --git a/moto/kinesis/__init__.py b/moto/kinesis/__init__.py index 50bc07155..c3f06d5b1 100644 --- a/moto/kinesis/__init__.py +++ b/moto/kinesis/__init__.py @@ -1,6 +1,7 @@ from __future__ import unicode_literals from .models import kinesis_backends -from ..core.models import MockAWS, base_decorator +from ..core.models import MockAWS, base_decorator, HttprettyMockAWS, deprecated_base_decorator kinesis_backend = kinesis_backends['us-east-1'] mock_kinesis = base_decorator(kinesis_backends) +mock_kinesis_deprecated = deprecated_base_decorator(kinesis_backends) diff --git a/moto/kinesis/responses.py b/moto/kinesis/responses.py index 264f53a2c..d0a90a61e 100644 --- a/moto/kinesis/responses.py +++ b/moto/kinesis/responses.py @@ -19,7 +19,10 @@ class KinesisResponse(BaseResponse): @property def is_firehose(self): - host = self.headers.get('host') or self.headers['Host'] + try: + host = self.headers.get('host') or self.headers['Host'] + except KeyError: + import pdb;pdb.set_trace() return host.startswith('firehose') def create_stream(self): diff --git a/moto/kms/__init__.py b/moto/kms/__init__.py index 4ee6dd2f4..b6bffa804 100644 --- a/moto/kms/__init__.py +++ b/moto/kms/__init__.py @@ -1,6 +1,7 @@ from __future__ import unicode_literals from .models import kms_backends -from ..core.models import MockAWS, base_decorator +from ..core.models import MockAWS, base_decorator, HttprettyMockAWS, deprecated_base_decorator kms_backend = kms_backends['us-east-1'] mock_kms = base_decorator(kms_backends) +mock_kms_deprecated = deprecated_base_decorator(kms_backends) diff --git a/moto/opsworks/__init__.py b/moto/opsworks/__init__.py index ef5190997..75f49eba5 100644 --- a/moto/opsworks/__init__.py +++ b/moto/opsworks/__init__.py @@ -1,6 +1,6 @@ from __future__ import unicode_literals from .models import opsworks_backends -from ..core.models import MockAWS, base_decorator +from ..core.models import MockAWS, base_decorator, HttprettyMockAWS, deprecated_base_decorator opsworks_backend = opsworks_backends['us-east-1'] mock_opsworks = base_decorator(opsworks_backends) diff --git a/moto/opsworks/urls.py b/moto/opsworks/urls.py index 6913de6bb..3d72bb0dd 100644 --- a/moto/opsworks/urls.py +++ b/moto/opsworks/urls.py @@ -4,7 +4,7 @@ from .responses import OpsWorksResponse # AWS OpsWorks has a single endpoint: opsworks.us-east-1.amazonaws.com # and only supports HTTPS requests. url_bases = [ - "opsworks.us-east-1.amazonaws.com" + "https?://opsworks.us-east-1.amazonaws.com" ] url_paths = { diff --git a/moto/packages/__init__.py b/moto/packages/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/moto/packages/httpretty/__init__.py b/moto/packages/httpretty/__init__.py new file mode 100644 index 000000000..a752b452a --- /dev/null +++ b/moto/packages/httpretty/__init__.py @@ -0,0 +1,60 @@ +# #!/usr/bin/env python +# -*- coding: utf-8 -*- +# +# Copyright (C) <2011-2013> Gabriel Falcão +# +# Permission is hereby granted, free of charge, to any person +# obtaining a copy of this software and associated documentation +# files (the "Software"), to deal in the Software without +# restriction, including without limitation the rights to use, +# copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the +# Software is furnished to do so, subject to the following +# conditions: +# +# The above copyright notice and this permission notice shall be +# included in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES +# OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT +# HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR +# OTHER DEALINGS IN THE SOFTWARE. +from __future__ import unicode_literals + +__version__ = version = '0.8.10' + +from .core import httpretty, httprettified, EmptyRequestHeaders +from .errors import HTTPrettyError, UnmockedError +from .core import URIInfo + +HTTPretty = httpretty +activate = httprettified + +enable = httpretty.enable +register_uri = httpretty.register_uri +disable = httpretty.disable +is_enabled = httpretty.is_enabled +reset = httpretty.reset +Response = httpretty.Response + +GET = httpretty.GET +PUT = httpretty.PUT +POST = httpretty.POST +DELETE = httpretty.DELETE +HEAD = httpretty.HEAD +PATCH = httpretty.PATCH +OPTIONS = httpretty.OPTIONS +CONNECT = httpretty.CONNECT + + +def last_request(): + """returns the last request""" + return httpretty.last_request + +def has_request(): + """returns a boolean indicating whether any request has been made""" + return not isinstance(httpretty.last_request.headers, EmptyRequestHeaders) diff --git a/moto/packages/httpretty/compat.py b/moto/packages/httpretty/compat.py new file mode 100644 index 000000000..6805cf638 --- /dev/null +++ b/moto/packages/httpretty/compat.py @@ -0,0 +1,100 @@ +# #!/usr/bin/env python +# -*- coding: utf-8 -*- +# +# +# Copyright (C) <2011-2013> Gabriel Falcão +# +# Permission is hereby granted, free of charge, to any person +# obtaining a copy of this software and associated documentation +# files (the "Software"), to deal in the Software without +# restriction, including without limitation the rights to use, +# copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the +# Software is furnished to do so, subject to the following +# conditions: +# +# The above copyright notice and this permission notice shall be +# included in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES +# OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT +# HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR +# OTHER DEALINGS IN THE SOFTWARE. +from __future__ import unicode_literals + +import sys +import types + +PY3 = sys.version_info[0] == 3 +if PY3: # pragma: no cover + text_type = str + byte_type = bytes + import io + StringIO = io.BytesIO + basestring = (str, bytes) + + class BaseClass(object): + def __repr__(self): + return self.__str__() +else: # pragma: no cover + text_type = unicode + byte_type = str + import StringIO + StringIO = StringIO.StringIO + basestring = basestring + + +class BaseClass(object): + def __repr__(self): + ret = self.__str__() + if PY3: # pragma: no cover + return ret + else: + return ret.encode('utf-8') + + +try: # pragma: no cover + from urllib.parse import urlsplit, urlunsplit, parse_qs, quote, quote_plus, unquote + unquote_utf8 = unquote +except ImportError: # pragma: no cover + from urlparse import urlsplit, urlunsplit, parse_qs, unquote + from urllib import quote, quote_plus + def unquote_utf8(qs): + if isinstance(qs, text_type): + qs = qs.encode('utf-8') + s = unquote(qs) + if isinstance(s, byte_type): + return s.decode("utf-8") + else: + return s + + +try: # pragma: no cover + from http.server import BaseHTTPRequestHandler +except ImportError: # pragma: no cover + from BaseHTTPServer import BaseHTTPRequestHandler + + +ClassTypes = (type,) +if not PY3: # pragma: no cover + ClassTypes = (type, types.ClassType) + + +__all__ = [ + 'PY3', + 'StringIO', + 'text_type', + 'byte_type', + 'BaseClass', + 'BaseHTTPRequestHandler', + 'quote', + 'quote_plus', + 'urlunsplit', + 'urlsplit', + 'parse_qs', + 'ClassTypes', +] diff --git a/moto/packages/httpretty/core.py b/moto/packages/httpretty/core.py new file mode 100644 index 000000000..4764cbba9 --- /dev/null +++ b/moto/packages/httpretty/core.py @@ -0,0 +1,1071 @@ +# #!/usr/bin/env python +# -*- coding: utf-8 -*- +# +# Copyright (C) <2011-2013> Gabriel Falcão +# +# Permission is hereby granted, free of charge, to any person +# obtaining a copy of this software and associated documentation +# files (the "Software"), to deal in the Software without +# restriction, including without limitation the rights to use, +# copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the +# Software is furnished to do so, subject to the following +# conditions: +# +# The above copyright notice and this permission notice shall be +# included in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES +# OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT +# HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR +# OTHER DEALINGS IN THE SOFTWARE. +from __future__ import unicode_literals + +import re +import codecs +import inspect +import socket +import functools +import itertools +import warnings +import logging +import traceback +import json +import contextlib + + +from .compat import ( + PY3, + StringIO, + text_type, + BaseClass, + BaseHTTPRequestHandler, + quote, + quote_plus, + urlunsplit, + urlsplit, + parse_qs, + unquote, + unquote_utf8, + ClassTypes, + basestring +) +from .http import ( + STATUSES, + HttpBaseClass, + parse_requestline, + last_requestline, +) + +from .utils import ( + utf8, + decode_utf8, +) + +from .errors import HTTPrettyError, UnmockedError + +from datetime import datetime +from datetime import timedelta +from errno import EAGAIN + +old_socket = socket.socket +old_create_connection = socket.create_connection +old_gethostbyname = socket.gethostbyname +old_gethostname = socket.gethostname +old_getaddrinfo = socket.getaddrinfo +old_socksocket = None +old_ssl_wrap_socket = None +old_sslwrap_simple = None +old_sslsocket = None + +if PY3: # pragma: no cover + basestring = (bytes, str) +try: # pragma: no cover + import socks + old_socksocket = socks.socksocket +except ImportError: + socks = None + +try: # pragma: no cover + import ssl + old_ssl_wrap_socket = ssl.wrap_socket + if not PY3: + old_sslwrap_simple = ssl.sslwrap_simple + old_sslsocket = ssl.SSLSocket +except ImportError: # pragma: no cover + ssl = None + + +DEFAULT_HTTP_PORTS = frozenset([80]) +POTENTIAL_HTTP_PORTS = set(DEFAULT_HTTP_PORTS) +DEFAULT_HTTPS_PORTS = frozenset([443]) +POTENTIAL_HTTPS_PORTS = set(DEFAULT_HTTPS_PORTS) + + +class HTTPrettyRequest(BaseHTTPRequestHandler, BaseClass): + """Represents a HTTP request. It takes a valid multi-line, `\r\n` + separated string with HTTP headers and parse them out using the + internal `parse_request` method. + + It also replaces the `rfile` and `wfile` attributes with StringIO + instances so that we garantee that it won't make any I/O, neighter + for writing nor reading. + + It has some convenience attributes: + + `headers` -> a mimetype object that can be cast into a dictionary, + contains all the request headers + + `method` -> the HTTP method used in this request + + `querystring` -> a dictionary containing lists with the + attributes. Please notice that if you need a single value from a + query string you will need to get it manually like: + + ```python + >>> request.querystring + {'name': ['Gabriel Falcao']} + >>> print request.querystring['name'][0] + ``` + + `parsed_body` -> a dictionary containing parsed request body or + None if HTTPrettyRequest doesn't know how to parse it. It + currently supports parsing body data that was sent under the + `content-type` headers values: 'application/json' or + 'application/x-www-form-urlencoded' + """ + def __init__(self, headers, body=''): + # first of all, lets make sure that if headers or body are + # unicode strings, it must be converted into a utf-8 encoded + # byte string + self.raw_headers = utf8(headers.strip()) + self.body = utf8(body) + + # Now let's concatenate the headers with the body, and create + # `rfile` based on it + self.rfile = StringIO(b'\r\n\r\n'.join([self.raw_headers, self.body])) + self.wfile = StringIO() # Creating `wfile` as an empty + # StringIO, just to avoid any real + # I/O calls + + # parsing the request line preemptively + self.raw_requestline = self.rfile.readline() + + # initiating the error attributes with None + self.error_code = None + self.error_message = None + + # Parse the request based on the attributes above + if not self.parse_request(): + return + + # making the HTTP method string available as the command + self.method = self.command + + # Now 2 convenient attributes for the HTTPretty API: + + # `querystring` holds a dictionary with the parsed query string + try: + self.path = self.path.encode('iso-8859-1') + except UnicodeDecodeError: + pass + + self.path = decode_utf8(self.path) + + qstring = self.path.split("?", 1)[-1] + self.querystring = self.parse_querystring(qstring) + + # And the body will be attempted to be parsed as + # `application/json` or `application/x-www-form-urlencoded` + self.parsed_body = self.parse_request_body(self.body) + + def __str__(self): + return ''.format( + self.headers.get('content-type', ''), + len(self.headers), + len(self.body), + ) + + def parse_querystring(self, qs): + expanded = unquote_utf8(qs) + parsed = parse_qs(expanded) + result = {} + for k in parsed: + result[k] = list(map(decode_utf8, parsed[k])) + + return result + + def parse_request_body(self, body): + """ Attempt to parse the post based on the content-type passed. Return the regular body if not """ + + PARSING_FUNCTIONS = { + 'application/json': json.loads, + 'text/json': json.loads, + 'application/x-www-form-urlencoded': self.parse_querystring, + } + FALLBACK_FUNCTION = lambda x: x + + content_type = self.headers.get('content-type', '') + + do_parse = PARSING_FUNCTIONS.get(content_type, FALLBACK_FUNCTION) + try: + body = decode_utf8(body) + return do_parse(body) + except: + return body + + +class EmptyRequestHeaders(dict): + pass + + +class HTTPrettyRequestEmpty(object): + body = '' + headers = EmptyRequestHeaders() + + +class FakeSockFile(StringIO): + def close(self): + self.socket.close() + StringIO.close(self) + + +class FakeSSLSocket(object): + def __init__(self, sock, *args, **kw): + self._httpretty_sock = sock + + def __getattr__(self, attr): + return getattr(self._httpretty_sock, attr) + + +class fakesock(object): + class socket(object): + _entry = None + debuglevel = 0 + _sent_data = [] + + def __init__(self, family=socket.AF_INET, type=socket.SOCK_STREAM, + protocol=0): + self.truesock = (old_socket(family, type, protocol) + if httpretty.allow_net_connect + else None) + self._closed = True + self.fd = FakeSockFile() + self.fd.socket = self + self.timeout = socket._GLOBAL_DEFAULT_TIMEOUT + self._sock = self + self.is_http = False + self._bufsize = 1024 + + def getpeercert(self, *a, **kw): + now = datetime.now() + shift = now + timedelta(days=30 * 12) + return { + 'notAfter': shift.strftime('%b %d %H:%M:%S GMT'), + 'subjectAltName': ( + ('DNS', '*%s' % self._host), + ('DNS', self._host), + ('DNS', '*'), + ), + 'subject': ( + ( + ('organizationName', '*.%s' % self._host), + ), + ( + ('organizationalUnitName', + 'Domain Control Validated'), + ), + ( + ('commonName', '*.%s' % self._host), + ), + ), + } + + def ssl(self, sock, *args, **kw): + return sock + + def setsockopt(self, level, optname, value): + if self.truesock: + self.truesock.setsockopt(level, optname, value) + + def connect(self, address): + self._closed = False + + try: + self._address = (self._host, self._port) = address + except ValueError: + # We get here when the address is just a string pointing to a + # unix socket path/file + # + # See issue #206 + self.is_http = False + else: + self.is_http = self._port in POTENTIAL_HTTP_PORTS | POTENTIAL_HTTPS_PORTS + + if not self.is_http: + if self.truesock: + self.truesock.connect(self._address) + else: + raise UnmockedError() + + def close(self): + if not (self.is_http and self._closed): + if self.truesock: + self.truesock.close() + self._closed = True + + def makefile(self, mode='r', bufsize=-1): + """Returns this fake socket's own StringIO buffer. + + If there is an entry associated with the socket, the file + descriptor gets filled in with the entry data before being + returned. + """ + self._mode = mode + self._bufsize = bufsize + + if self._entry: + self._entry.fill_filekind(self.fd) + + return self.fd + + def real_sendall(self, data, *args, **kw): + """Sends data to the remote server. This method is called + when HTTPretty identifies that someone is trying to send + non-http data. + + The received bytes are written in this socket's StringIO + buffer so that HTTPretty can return it accordingly when + necessary. + """ + + if not self.truesock: + raise UnmockedError() + + if not self.is_http: + return self.truesock.sendall(data, *args, **kw) + + self.truesock.connect(self._address) + + self.truesock.setblocking(1) + self.truesock.sendall(data, *args, **kw) + + should_continue = True + while should_continue: + try: + received = self.truesock.recv(self._bufsize) + self.fd.write(received) + should_continue = len(received) == self._bufsize + + except socket.error as e: + if e.errno == EAGAIN: + continue + break + + self.fd.seek(0) + + def sendall(self, data, *args, **kw): + self._sent_data.append(data) + self.fd = FakeSockFile() + self.fd.socket = self + try: + requestline, _ = data.split(b'\r\n', 1) + method, path, version = parse_requestline(decode_utf8(requestline)) + is_parsing_headers = True + except ValueError: + is_parsing_headers = False + + if not self._entry: + # If the previous request wasn't mocked, don't mock the subsequent sending of data + return self.real_sendall(data, *args, **kw) + + self.fd.seek(0) + + if not is_parsing_headers: + if len(self._sent_data) > 1: + headers = utf8(last_requestline(self._sent_data)) + meta = self._entry.request.headers + body = utf8(self._sent_data[-1]) + if meta.get('transfer-encoding', '') == 'chunked': + if not body.isdigit() and body != b'\r\n' and body != b'0\r\n\r\n': + self._entry.request.body += body + else: + self._entry.request.body += body + + httpretty.historify_request(headers, body, False) + return + + # path might come with + s = urlsplit(path) + POTENTIAL_HTTP_PORTS.add(int(s.port or 80)) + headers, body = list(map(utf8, data.split(b'\r\n\r\n', 1))) + + request = httpretty.historify_request(headers, body) + + info = URIInfo(hostname=self._host, port=self._port, + path=s.path, + query=s.query, + last_request=request) + + matcher, entries = httpretty.match_uriinfo(info) + + if not entries: + self._entry = None + self.real_sendall(data) + return + + self._entry = matcher.get_next_entry(method, info, request) + + def debug(self, truesock_func, *a, **kw): + if self.is_http: + frame = inspect.stack()[0][0] + lines = list(map(utf8, traceback.format_stack(frame))) + + message = [ + "HTTPretty intercepted and unexpected socket method call.", + ("Please open an issue at " + "'https://github.com/gabrielfalcao/HTTPretty/issues'"), + "And paste the following traceback:\n", + "".join(decode_utf8(lines)), + ] + raise RuntimeError("\n".join(message)) + if not self.truesock: + raise UnmockedError() + return getattr(self.truesock, truesock_func)(*a, **kw) + + def settimeout(self, new_timeout): + self.timeout = new_timeout + + def send(self, *args, **kwargs): + return self.debug('send', *args, **kwargs) + + def sendto(self, *args, **kwargs): + return self.debug('sendto', *args, **kwargs) + + def recvfrom_into(self, *args, **kwargs): + return self.debug('recvfrom_into', *args, **kwargs) + + def recv_into(self, *args, **kwargs): + return self.debug('recv_into', *args, **kwargs) + + def recvfrom(self, *args, **kwargs): + return self.debug('recvfrom', *args, **kwargs) + + def recv(self, *args, **kwargs): + return self.debug('recv', *args, **kwargs) + + def __getattr__(self, name): + if not self.truesock: + raise UnmockedError() + return getattr(self.truesock, name) + + +def fake_wrap_socket(s, *args, **kw): + return s + + +def create_fake_connection(address, timeout=socket._GLOBAL_DEFAULT_TIMEOUT, source_address=None): + s = fakesock.socket(socket.AF_INET, socket.SOCK_STREAM, socket.IPPROTO_TCP) + if timeout is not socket._GLOBAL_DEFAULT_TIMEOUT: + s.settimeout(timeout) + if source_address: + s.bind(source_address) + s.connect(address) + return s + + +def fake_gethostbyname(host): + return '127.0.0.1' + + +def fake_gethostname(): + return 'localhost' + + +def fake_getaddrinfo( + host, port, family=None, socktype=None, proto=None, flags=None): + return [(2, 1, 6, '', (host, port))] + + +class Entry(BaseClass): + def __init__(self, method, uri, body, + adding_headers=None, + forcing_headers=None, + status=200, + streaming=False, + **headers): + + self.method = method + self.uri = uri + self.info = None + self.request = None + + self.body_is_callable = False + if hasattr(body, "__call__"): + self.callable_body = body + self.body = None + self.body_is_callable = True + elif isinstance(body, text_type): + self.body = utf8(body) + else: + self.body = body + + self.streaming = streaming + if not streaming and not self.body_is_callable: + self.body_length = len(self.body or '') + else: + self.body_length = 0 + + self.adding_headers = adding_headers or {} + self.forcing_headers = forcing_headers or {} + self.status = int(status) + + for k, v in headers.items(): + name = "-".join(k.split("_")).title() + self.adding_headers[name] = v + + self.validate() + + def validate(self): + content_length_keys = 'Content-Length', 'content-length' + for key in content_length_keys: + got = self.adding_headers.get( + key, self.forcing_headers.get(key, None)) + + if got is None: + continue + + try: + igot = int(got) + except ValueError: + warnings.warn( + 'HTTPretty got to register the Content-Length header ' \ + 'with "%r" which is not a number' % got, + ) + + if igot > self.body_length: + raise HTTPrettyError( + 'HTTPretty got inconsistent parameters. The header ' \ + 'Content-Length you registered expects size "%d" but ' \ + 'the body you registered for that has actually length ' \ + '"%d".' % ( + igot, self.body_length, + ) + ) + + def __str__(self): + return r'' % ( + self.method, self.uri, self.status) + + def normalize_headers(self, headers): + new = {} + for k in headers: + new_k = '-'.join([s.lower() for s in k.split('-')]) + new[new_k] = headers[k] + + return new + + def fill_filekind(self, fk): + now = datetime.utcnow() + + headers = { + 'status': self.status, + 'date': now.strftime('%a, %d %b %Y %H:%M:%S GMT'), + 'server': 'Python/HTTPretty', + 'connection': 'close', + } + + if self.forcing_headers: + headers = self.forcing_headers + + if self.adding_headers: + headers.update(self.normalize_headers(self.adding_headers)) + + headers = self.normalize_headers(headers) + status = headers.get('status', self.status) + if self.body_is_callable: + status, headers, self.body = self.callable_body(self.request, self.info.full_url(), headers) + if self.request.method != "HEAD": + headers.update({ + 'content-length': len(self.body) + }) + + string_list = [ + 'HTTP/1.1 %d %s' % (status, STATUSES[status]), + ] + + if 'date' in headers: + string_list.append('date: %s' % headers.pop('date')) + + if not self.forcing_headers: + content_type = headers.pop('content-type', + 'text/plain; charset=utf-8') + + content_length = headers.pop('content-length', self.body_length) + + string_list.append('content-type: %s' % content_type) + if not self.streaming: + string_list.append('content-length: %s' % content_length) + + string_list.append('server: %s' % headers.pop('server')) + + for k, v in headers.items(): + string_list.append( + '{0}: {1}'.format(k, v), + ) + + for item in string_list: + fk.write(utf8(item) + b'\n') + + fk.write(b'\r\n') + + if self.streaming: + self.body, body = itertools.tee(self.body) + for chunk in body: + fk.write(utf8(chunk)) + else: + fk.write(utf8(self.body)) + + fk.seek(0) + + +def url_fix(s, charset='utf-8'): + scheme, netloc, path, querystring, fragment = urlsplit(s) + path = quote(path, b'/%') + querystring = quote_plus(querystring, b':&=') + return urlunsplit((scheme, netloc, path, querystring, fragment)) + + +class URIInfo(BaseClass): + def __init__(self, + username='', + password='', + hostname='', + port=80, + path='/', + query='', + fragment='', + scheme='', + last_request=None): + + self.username = username or '' + self.password = password or '' + self.hostname = hostname or '' + + if port: + port = int(port) + + elif scheme == 'https': + port = 443 + + self.port = port or 80 + self.path = path or '' + self.query = query or '' + if scheme: + self.scheme = scheme + elif self.port in POTENTIAL_HTTPS_PORTS: + self.scheme = 'https' + else: + self.scheme = 'http' + self.fragment = fragment or '' + self.last_request = last_request + + def __str__(self): + attrs = ( + 'username', + 'password', + 'hostname', + 'port', + 'path', + ) + fmt = ", ".join(['%s="%s"' % (k, getattr(self, k, '')) for k in attrs]) + return r'' % fmt + + def __hash__(self): + return hash(text_type(self)) + + def __eq__(self, other): + self_tuple = ( + self.port, + decode_utf8(self.hostname.lower()), + url_fix(decode_utf8(self.path)), + ) + other_tuple = ( + other.port, + decode_utf8(other.hostname.lower()), + url_fix(decode_utf8(other.path)), + ) + return self_tuple == other_tuple + + def full_url(self, use_querystring=True): + credentials = "" + if self.password: + credentials = "{0}:{1}@".format( + self.username, self.password) + + query = "" + if use_querystring and self.query: + query = "?{0}".format(decode_utf8(self.query)) + + result = "{scheme}://{credentials}{domain}{path}{query}".format( + scheme=self.scheme, + credentials=credentials, + domain=self.get_full_domain(), + path=decode_utf8(self.path), + query=query + ) + return result + + def get_full_domain(self): + hostname = decode_utf8(self.hostname) + # Port 80/443 should not be appended to the url + if self.port not in DEFAULT_HTTP_PORTS | DEFAULT_HTTPS_PORTS: + return ":".join([hostname, str(self.port)]) + + return hostname + + @classmethod + def from_uri(cls, uri, entry): + result = urlsplit(uri) + if result.scheme == 'https': + POTENTIAL_HTTPS_PORTS.add(int(result.port or 443)) + else: + POTENTIAL_HTTP_PORTS.add(int(result.port or 80)) + return cls(result.username, + result.password, + result.hostname, + result.port, + result.path, + result.query, + result.fragment, + result.scheme, + entry) + + +class URIMatcher(object): + regex = None + info = None + + def __init__(self, uri, entries, match_querystring=False): + self._match_querystring = match_querystring + if type(uri).__name__ == 'SRE_Pattern': + self.regex = uri + result = urlsplit(uri.pattern) + if result.scheme == 'https': + POTENTIAL_HTTPS_PORTS.add(int(result.port or 443)) + else: + POTENTIAL_HTTP_PORTS.add(int(result.port or 80)) + else: + self.info = URIInfo.from_uri(uri, entries) + + self.entries = entries + + #hash of current_entry pointers, per method. + self.current_entries = {} + + def matches(self, info): + if self.info: + return self.info == info + else: + return self.regex.search(info.full_url( + use_querystring=self._match_querystring)) + + def __str__(self): + wrap = 'URLMatcher({0})' + if self.info: + return wrap.format(text_type(self.info)) + else: + return wrap.format(self.regex.pattern) + + def get_next_entry(self, method, info, request): + """Cycle through available responses, but only once. + Any subsequent requests will receive the last response""" + + if method not in self.current_entries: + self.current_entries[method] = 0 + + #restrict selection to entries that match the requested method + entries_for_method = [e for e in self.entries if e.method == method] + + if self.current_entries[method] >= len(entries_for_method): + self.current_entries[method] = -1 + + if not self.entries or not entries_for_method: + raise ValueError('I have no entries for method %s: %s' + % (method, self)) + + entry = entries_for_method[self.current_entries[method]] + if self.current_entries[method] != -1: + self.current_entries[method] += 1 + + # Attach more info to the entry + # So the callback can be more clever about what to do + # This does also fix the case where the callback + # would be handed a compiled regex as uri instead of the + # real uri + entry.info = info + entry.request = request + return entry + + def __hash__(self): + return hash(text_type(self)) + + def __eq__(self, other): + return text_type(self) == text_type(other) + + +class httpretty(HttpBaseClass): + """The URI registration class""" + _entries = {} + latest_requests = [] + + last_request = HTTPrettyRequestEmpty() + _is_enabled = False + allow_net_connect = True + + @classmethod + def match_uriinfo(cls, info): + for matcher, value in cls._entries.items(): + if matcher.matches(info): + return (matcher, info) + + return (None, []) + + @classmethod + @contextlib.contextmanager + def record(cls, filename, indentation=4, encoding='utf-8'): + try: + import urllib3 + except ImportError: + raise RuntimeError('HTTPretty requires urllib3 installed for recording actual requests.') + + + http = urllib3.PoolManager() + + cls.enable() + calls = [] + def record_request(request, uri, headers): + cls.disable() + + response = http.request(request.method, uri) + calls.append({ + 'request': { + 'uri': uri, + 'method': request.method, + 'headers': dict(request.headers), + 'body': decode_utf8(request.body), + 'querystring': request.querystring + }, + 'response': { + 'status': response.status, + 'body': decode_utf8(response.data), + 'headers': dict(response.headers) + } + }) + cls.enable() + return response.status, response.headers, response.data + + for method in cls.METHODS: + cls.register_uri(method, re.compile(r'.*', re.M), body=record_request) + + yield + cls.disable() + with codecs.open(filename, 'w', encoding) as f: + f.write(json.dumps(calls, indent=indentation)) + + @classmethod + @contextlib.contextmanager + def playback(cls, origin): + cls.enable() + + data = json.loads(open(origin).read()) + for item in data: + uri = item['request']['uri'] + method = item['request']['method'] + cls.register_uri(method, uri, body=item['response']['body'], forcing_headers=item['response']['headers']) + + yield + cls.disable() + + @classmethod + def reset(cls): + POTENTIAL_HTTP_PORTS.intersection_update(DEFAULT_HTTP_PORTS) + POTENTIAL_HTTPS_PORTS.intersection_update(DEFAULT_HTTPS_PORTS) + cls._entries.clear() + cls.latest_requests = [] + cls.last_request = HTTPrettyRequestEmpty() + + @classmethod + def historify_request(cls, headers, body='', append=True): + request = HTTPrettyRequest(headers, body) + cls.last_request = request + if append or not cls.latest_requests: + cls.latest_requests.append(request) + else: + cls.latest_requests[-1] = request + return request + + @classmethod + def register_uri(cls, method, uri, body='HTTPretty :)', + adding_headers=None, + forcing_headers=None, + status=200, + responses=None, match_querystring=False, + **headers): + + uri_is_string = isinstance(uri, basestring) + + if uri_is_string and re.search(r'^\w+://[^/]+[.]\w{2,}$', uri): + uri += '/' + + if isinstance(responses, list) and len(responses) > 0: + for response in responses: + response.uri = uri + response.method = method + entries_for_this_uri = responses + else: + headers[str('body')] = body + headers[str('adding_headers')] = adding_headers + headers[str('forcing_headers')] = forcing_headers + headers[str('status')] = status + + entries_for_this_uri = [ + cls.Response(method=method, uri=uri, **headers), + ] + + matcher = URIMatcher(uri, entries_for_this_uri, + match_querystring) + if matcher in cls._entries: + matcher.entries.extend(cls._entries[matcher]) + del cls._entries[matcher] + + cls._entries[matcher] = entries_for_this_uri + + def __str__(self): + return '' % len(self._entries) + + @classmethod + def Response(cls, body, method=None, uri=None, adding_headers=None, forcing_headers=None, + status=200, streaming=False, **headers): + + headers[str('body')] = body + headers[str('adding_headers')] = adding_headers + headers[str('forcing_headers')] = forcing_headers + headers[str('status')] = int(status) + headers[str('streaming')] = streaming + return Entry(method, uri, **headers) + + @classmethod + def disable(cls): + cls._is_enabled = False + socket.socket = old_socket + socket.SocketType = old_socket + socket._socketobject = old_socket + + socket.create_connection = old_create_connection + socket.gethostname = old_gethostname + socket.gethostbyname = old_gethostbyname + socket.getaddrinfo = old_getaddrinfo + + socket.__dict__['socket'] = old_socket + socket.__dict__['_socketobject'] = old_socket + socket.__dict__['SocketType'] = old_socket + + socket.__dict__['create_connection'] = old_create_connection + socket.__dict__['gethostname'] = old_gethostname + socket.__dict__['gethostbyname'] = old_gethostbyname + socket.__dict__['getaddrinfo'] = old_getaddrinfo + + if socks: + socks.socksocket = old_socksocket + socks.__dict__['socksocket'] = old_socksocket + + if ssl: + ssl.wrap_socket = old_ssl_wrap_socket + ssl.SSLSocket = old_sslsocket + ssl.__dict__['wrap_socket'] = old_ssl_wrap_socket + ssl.__dict__['SSLSocket'] = old_sslsocket + + if not PY3: + ssl.sslwrap_simple = old_sslwrap_simple + ssl.__dict__['sslwrap_simple'] = old_sslwrap_simple + + @classmethod + def is_enabled(cls): + return cls._is_enabled + + @classmethod + def enable(cls): + cls._is_enabled = True + # Some versions of python internally shadowed the + # SocketType variable incorrectly https://bugs.python.org/issue20386 + bad_socket_shadow = (socket.socket != socket.SocketType) + + socket.socket = fakesock.socket + socket._socketobject = fakesock.socket + if not bad_socket_shadow: + socket.SocketType = fakesock.socket + + socket.create_connection = create_fake_connection + socket.gethostname = fake_gethostname + socket.gethostbyname = fake_gethostbyname + socket.getaddrinfo = fake_getaddrinfo + + socket.__dict__['socket'] = fakesock.socket + socket.__dict__['_socketobject'] = fakesock.socket + if not bad_socket_shadow: + socket.__dict__['SocketType'] = fakesock.socket + + socket.__dict__['create_connection'] = create_fake_connection + socket.__dict__['gethostname'] = fake_gethostname + socket.__dict__['gethostbyname'] = fake_gethostbyname + socket.__dict__['getaddrinfo'] = fake_getaddrinfo + + if socks: + socks.socksocket = fakesock.socket + socks.__dict__['socksocket'] = fakesock.socket + + if ssl: + ssl.wrap_socket = fake_wrap_socket + ssl.SSLSocket = FakeSSLSocket + + ssl.__dict__['wrap_socket'] = fake_wrap_socket + ssl.__dict__['SSLSocket'] = FakeSSLSocket + + if not PY3: + ssl.sslwrap_simple = fake_wrap_socket + ssl.__dict__['sslwrap_simple'] = fake_wrap_socket + + +def httprettified(test): + "A decorator tests that use HTTPretty" + def decorate_class(klass): + for attr in dir(klass): + if not attr.startswith('test_'): + continue + + attr_value = getattr(klass, attr) + if not hasattr(attr_value, "__call__"): + continue + + setattr(klass, attr, decorate_callable(attr_value)) + return klass + + def decorate_callable(test): + @functools.wraps(test) + def wrapper(*args, **kw): + httpretty.reset() + httpretty.enable() + try: + return test(*args, **kw) + finally: + httpretty.disable() + return wrapper + + if isinstance(test, ClassTypes): + return decorate_class(test) + return decorate_callable(test) diff --git a/moto/packages/httpretty/errors.py b/moto/packages/httpretty/errors.py new file mode 100644 index 000000000..cb6479bf5 --- /dev/null +++ b/moto/packages/httpretty/errors.py @@ -0,0 +1,39 @@ +# #!/usr/bin/env python +# -*- coding: utf-8 -*- +# +# +# Copyright (C) <2011-2013> Gabriel Falcão +# +# Permission is hereby granted, free of charge, to any person +# obtaining a copy of this software and associated documentation +# files (the "Software"), to deal in the Software without +# restriction, including without limitation the rights to use, +# copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the +# Software is furnished to do so, subject to the following +# conditions: +# +# The above copyright notice and this permission notice shall be +# included in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES +# OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT +# HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR +# OTHER DEALINGS IN THE SOFTWARE. +from __future__ import unicode_literals + + +class HTTPrettyError(Exception): + pass + + +class UnmockedError(HTTPrettyError): + def __init__(self): + super(UnmockedError, self).__init__( + 'No mocking was registered, and real connections are ' + 'not allowed (httpretty.allow_net_connect = False).' + ) diff --git a/moto/packages/httpretty/http.py b/moto/packages/httpretty/http.py new file mode 100644 index 000000000..7e9a56885 --- /dev/null +++ b/moto/packages/httpretty/http.py @@ -0,0 +1,155 @@ +# #!/usr/bin/env python +# -*- coding: utf-8 -*- +# +# Copyright (C) <2011-2013> Gabriel Falcão +# +# Permission is hereby granted, free of charge, to any person +# obtaining a copy of this software and associated documentation +# files (the "Software"), to deal in the Software without +# restriction, including without limitation the rights to use, +# copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the +# Software is furnished to do so, subject to the following +# conditions: +# +# The above copyright notice and this permission notice shall be +# included in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES +# OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT +# HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR +# OTHER DEALINGS IN THE SOFTWARE. +from __future__ import unicode_literals + +import re +from .compat import BaseClass +from .utils import decode_utf8 + + +STATUSES = { + 100: "Continue", + 101: "Switching Protocols", + 102: "Processing", + 200: "OK", + 201: "Created", + 202: "Accepted", + 203: "Non-Authoritative Information", + 204: "No Content", + 205: "Reset Content", + 206: "Partial Content", + 207: "Multi-Status", + 208: "Already Reported", + 226: "IM Used", + 300: "Multiple Choices", + 301: "Moved Permanently", + 302: "Found", + 303: "See Other", + 304: "Not Modified", + 305: "Use Proxy", + 306: "Switch Proxy", + 307: "Temporary Redirect", + 308: "Permanent Redirect", + 400: "Bad Request", + 401: "Unauthorized", + 402: "Payment Required", + 403: "Forbidden", + 404: "Not Found", + 405: "Method Not Allowed", + 406: "Not Acceptable", + 407: "Proxy Authentication Required", + 408: "Request a Timeout", + 409: "Conflict", + 410: "Gone", + 411: "Length Required", + 412: "Precondition Failed", + 413: "Request Entity Too Large", + 414: "Request-URI Too Long", + 415: "Unsupported Media Type", + 416: "Requested Range Not Satisfiable", + 417: "Expectation Failed", + 418: "I'm a teapot", + 420: "Enhance Your Calm", + 422: "Unprocessable Entity", + 423: "Locked", + 424: "Failed Dependency", + 424: "Method Failure", + 425: "Unordered Collection", + 426: "Upgrade Required", + 428: "Precondition Required", + 429: "Too Many Requests", + 431: "Request Header Fields Too Large", + 444: "No Response", + 449: "Retry With", + 450: "Blocked by Windows Parental Controls", + 451: "Unavailable For Legal Reasons", + 451: "Redirect", + 494: "Request Header Too Large", + 495: "Cert Error", + 496: "No Cert", + 497: "HTTP to HTTPS", + 499: "Client Closed Request", + 500: "Internal Server Error", + 501: "Not Implemented", + 502: "Bad Gateway", + 503: "Service Unavailable", + 504: "Gateway Timeout", + 505: "HTTP Version Not Supported", + 506: "Variant Also Negotiates", + 507: "Insufficient Storage", + 508: "Loop Detected", + 509: "Bandwidth Limit Exceeded", + 510: "Not Extended", + 511: "Network Authentication Required", + 598: "Network read timeout error", + 599: "Network connect timeout error", +} + + +class HttpBaseClass(BaseClass): + GET = 'GET' + PUT = 'PUT' + POST = 'POST' + DELETE = 'DELETE' + HEAD = 'HEAD' + PATCH = 'PATCH' + OPTIONS = 'OPTIONS' + CONNECT = 'CONNECT' + METHODS = (GET, PUT, POST, DELETE, HEAD, PATCH, OPTIONS, CONNECT) + + +def parse_requestline(s): + """ + http://www.w3.org/Protocols/rfc2616/rfc2616-sec5.html#sec5 + + >>> parse_requestline('GET / HTTP/1.0') + ('GET', '/', '1.0') + >>> parse_requestline('post /testurl htTP/1.1') + ('POST', '/testurl', '1.1') + >>> parse_requestline('Im not a RequestLine') + Traceback (most recent call last): + ... + ValueError: Not a Request-Line + """ + methods = '|'.join(HttpBaseClass.METHODS) + m = re.match(r'(' + methods + ')\s+(.*)\s+HTTP/(1.[0|1])', s, re.I) + if m: + return m.group(1).upper(), m.group(2), m.group(3) + else: + raise ValueError('Not a Request-Line') + + +def last_requestline(sent_data): + """ + Find the last line in sent_data that can be parsed with parse_requestline + """ + for line in reversed(sent_data): + try: + parse_requestline(decode_utf8(line)) + except ValueError: + pass + else: + return line diff --git a/moto/packages/httpretty/utils.py b/moto/packages/httpretty/utils.py new file mode 100644 index 000000000..caa8fa13b --- /dev/null +++ b/moto/packages/httpretty/utils.py @@ -0,0 +1,48 @@ +# #!/usr/bin/env python +# -*- coding: utf-8 -*- +# +# Copyright (C) <2011-2013> Gabriel Falcão +# +# Permission is hereby granted, free of charge, to any person +# obtaining a copy of this software and associated documentation +# files (the "Software"), to deal in the Software without +# restriction, including without limitation the rights to use, +# copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the +# Software is furnished to do so, subject to the following +# conditions: +# +# The above copyright notice and this permission notice shall be +# included in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES +# OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT +# HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR +# OTHER DEALINGS IN THE SOFTWARE. +from __future__ import unicode_literals + +from .compat import ( + byte_type, text_type +) + + +def utf8(s): + if isinstance(s, text_type): + s = s.encode('utf-8') + elif s is None: + return byte_type() + + return byte_type(s) + + +def decode_utf8(s): + if isinstance(s, byte_type): + s = s.decode("utf-8") + elif s is None: + return text_type() + + return text_type(s) diff --git a/moto/packages/responses b/moto/packages/responses new file mode 160000 index 000000000..8d500447e --- /dev/null +++ b/moto/packages/responses @@ -0,0 +1 @@ +Subproject commit 8d500447e3d5c2b96ace2eb7ab0f60158e921ed8 diff --git a/moto/rds/__init__.py b/moto/rds/__init__.py index d3cafc066..2c8c0ba97 100644 --- a/moto/rds/__init__.py +++ b/moto/rds/__init__.py @@ -1,6 +1,7 @@ from __future__ import unicode_literals from .models import rds_backends -from ..core.models import MockAWS, base_decorator +from ..core.models import MockAWS, base_decorator, HttprettyMockAWS, deprecated_base_decorator rds_backend = rds_backends['us-east-1'] mock_rds = base_decorator(rds_backends) +mock_rds_deprecated = deprecated_base_decorator(rds_backends) diff --git a/moto/rds2/__init__.py b/moto/rds2/__init__.py index b200f9b11..0feecfac4 100644 --- a/moto/rds2/__init__.py +++ b/moto/rds2/__init__.py @@ -1,6 +1,7 @@ from __future__ import unicode_literals from .models import rds2_backends -from ..core.models import MockAWS, base_decorator +from ..core.models import MockAWS, base_decorator, deprecated_base_decorator rds2_backend = rds2_backends['us-west-1'] mock_rds2 = base_decorator(rds2_backends) +mock_rds2_deprecated = deprecated_base_decorator(rds2_backends) diff --git a/moto/redshift/__init__.py b/moto/redshift/__init__.py index 821408493..58be5fc70 100644 --- a/moto/redshift/__init__.py +++ b/moto/redshift/__init__.py @@ -1,6 +1,7 @@ from __future__ import unicode_literals from .models import redshift_backends -from ..core.models import MockAWS, base_decorator +from ..core.models import MockAWS, base_decorator, HttprettyMockAWS, deprecated_base_decorator redshift_backend = redshift_backends['us-east-1'] mock_redshift = base_decorator(redshift_backends) +mock_redshift_deprecated = deprecated_base_decorator(redshift_backends) diff --git a/moto/route53/__init__.py b/moto/route53/__init__.py index 2c6bd223f..df629880f 100644 --- a/moto/route53/__init__.py +++ b/moto/route53/__init__.py @@ -1,3 +1,4 @@ from __future__ import unicode_literals from .models import route53_backend mock_route53 = route53_backend.decorator +mock_route53_deprecated = route53_backend.deprecated_decorator diff --git a/moto/s3/__init__.py b/moto/s3/__init__.py index 6590d4324..7d0df53bd 100644 --- a/moto/s3/__init__.py +++ b/moto/s3/__init__.py @@ -1,3 +1,4 @@ from __future__ import unicode_literals from .models import s3_backend mock_s3 = s3_backend.decorator +mock_s3_deprecated = s3_backend.deprecated_decorator \ No newline at end of file diff --git a/moto/s3/models.py b/moto/s3/models.py index c41ff3901..40370b5dd 100644 --- a/moto/s3/models.py +++ b/moto/s3/models.py @@ -25,7 +25,7 @@ class FakeKey(object): self.value = value self.last_modified = datetime.datetime.utcnow() self.acl = get_canned_acl('private') - self._storage_class = storage + self._storage_class = storage if storage else "STANDARD" self._metadata = {} self._expiry = None self._etag = etag @@ -92,6 +92,7 @@ class FakeKey(object): r = { 'etag': self.etag, 'last-modified': self.last_modified_RFC1123, + 'content-length': str(len(self.value)), } if self._storage_class != 'STANDARD': r['x-amz-storage-class'] = self._storage_class @@ -100,7 +101,7 @@ class FakeKey(object): r['x-amz-restore'] = rhdr.format(self.expiry_date) if self._is_versioned: - r['x-amz-version-id'] = self._version_id + r['x-amz-version-id'] = str(self._version_id) return r diff --git a/moto/s3/responses.py b/moto/s3/responses.py index d6855265e..3fbd058f2 100644 --- a/moto/s3/responses.py +++ b/moto/s3/responses.py @@ -49,6 +49,8 @@ class ResponseObject(_TemplateEnvironmentMixin): def subdomain_based_buckets(self, request): host = request.headers.get('host', request.headers.get('Host')) + if not host: + host = urlparse(request.url).netloc if not host or host.startswith("localhost"): # For localhost, default to path-based buckets @@ -130,6 +132,8 @@ class ResponseObject(_TemplateEnvironmentMixin): else: # Flask server body = request.data + if body is None: + body = '' body = body.decode('utf-8') if method == 'HEAD': @@ -334,7 +338,8 @@ class ResponseObject(_TemplateEnvironmentMixin): return 409, headers, template.render(bucket=removed_bucket) def _bucket_response_post(self, request, body, bucket_name, headers): - if self.is_delete_keys(request, request.path, bucket_name): + path = request.path if hasattr(request, 'path') else request.path_url + if self.is_delete_keys(request, path, bucket_name): return self._bucket_response_delete_keys(request, body, bucket_name, headers) # POST to bucket-url should create file from form @@ -344,7 +349,7 @@ class ResponseObject(_TemplateEnvironmentMixin): else: # HTTPretty, build new form object form = {} - for kv in request.body.decode('utf-8').split('&'): + for kv in body.decode('utf-8').split('&'): k, v = kv.split('=') form[k] = v @@ -428,9 +433,13 @@ class ResponseObject(_TemplateEnvironmentMixin): if hasattr(request, 'body'): # Boto body = request.body + if hasattr(body, 'read'): + body = body.read() else: # Flask server body = request.data + if body is None: + body = b'' if method == 'GET': return self._key_response_get(bucket_name, query, key_name, headers) @@ -546,7 +555,7 @@ class ResponseObject(_TemplateEnvironmentMixin): if key: headers.update(key.metadata) headers.update(key.response_dict) - return 200, headers, key.value + return 200, headers, "" else: return 404, headers, "" diff --git a/moto/s3bucket_path/__init__.py b/moto/s3bucket_path/__init__.py index 85031a06e..baffc4882 100644 --- a/moto/s3bucket_path/__init__.py +++ b/moto/s3bucket_path/__init__.py @@ -1,4 +1 @@ from __future__ import unicode_literals - -from moto import mock_s3 -mock_s3bucket_path = mock_s3 diff --git a/moto/ses/__init__.py b/moto/ses/__init__.py index 3b0e93c14..e1ec4b41a 100644 --- a/moto/ses/__init__.py +++ b/moto/ses/__init__.py @@ -1,3 +1,4 @@ from __future__ import unicode_literals from .models import ses_backend mock_ses = ses_backend.decorator +mock_ses_deprecated = ses_backend.deprecated_decorator \ No newline at end of file diff --git a/moto/sns/__init__.py b/moto/sns/__init__.py index 0ed85e813..a50911e3b 100644 --- a/moto/sns/__init__.py +++ b/moto/sns/__init__.py @@ -1,6 +1,7 @@ from __future__ import unicode_literals from .models import sns_backends -from ..core.models import MockAWS, base_decorator +from ..core.models import MockAWS, base_decorator, HttprettyMockAWS, deprecated_base_decorator sns_backend = sns_backends['us-east-1'] mock_sns = base_decorator(sns_backends) +mock_sns_deprecated = deprecated_base_decorator(sns_backends) diff --git a/moto/sqs/__init__.py b/moto/sqs/__init__.py index 09b4ed9e9..946ba8f47 100644 --- a/moto/sqs/__init__.py +++ b/moto/sqs/__init__.py @@ -1,6 +1,7 @@ from __future__ import unicode_literals from .models import sqs_backends -from ..core.models import MockAWS, base_decorator +from ..core.models import MockAWS, base_decorator, HttprettyMockAWS, deprecated_base_decorator sqs_backend = sqs_backends['us-east-1'] mock_sqs = base_decorator(sqs_backends) +mock_sqs_deprecated = deprecated_base_decorator(sqs_backends) diff --git a/moto/sqs/responses.py b/moto/sqs/responses.py index 15c067613..d57ec3430 100644 --- a/moto/sqs/responses.py +++ b/moto/sqs/responses.py @@ -122,6 +122,7 @@ class SQSResponse(BaseResponse): queue = self.sqs_backend.delete_queue(queue_name) if not queue: return "A queue with name {0} does not exist".format(queue_name), dict(status=404) + template = self.response_template(DELETE_QUEUE_RESPONSE) return template.render(queue=queue) diff --git a/moto/sts/__init__.py b/moto/sts/__init__.py index 04e93e2e7..57456c1b3 100644 --- a/moto/sts/__init__.py +++ b/moto/sts/__init__.py @@ -1,3 +1,4 @@ from __future__ import unicode_literals from .models import sts_backend mock_sts = sts_backend.decorator +mock_sts_deprecated = sts_backend.deprecated_decorator diff --git a/moto/swf/__init__.py b/moto/swf/__init__.py index 180919320..5ac59fbb6 100644 --- a/moto/swf/__init__.py +++ b/moto/swf/__init__.py @@ -1,6 +1,7 @@ from __future__ import unicode_literals from .models import swf_backends -from ..core.models import MockAWS, base_decorator +from ..core.models import MockAWS, base_decorator, HttprettyMockAWS, deprecated_base_decorator swf_backend = swf_backends['us-east-1'] mock_swf = base_decorator(swf_backends) +mock_swf_deprecated = deprecated_base_decorator(swf_backends) diff --git a/setup.py b/setup.py index bfd8bbb87..52635d00b 100644 --- a/setup.py +++ b/setup.py @@ -5,7 +5,6 @@ from setuptools import setup, find_packages install_requires = [ "Jinja2>=2.8", "boto>=2.36.0", - "httpretty==0.8.10", "requests", "xmltodict", "six", diff --git a/tests/__init__.py b/tests/__init__.py index baffc4882..bf582e0b3 100644 --- a/tests/__init__.py +++ b/tests/__init__.py @@ -1 +1,8 @@ from __future__ import unicode_literals + +import logging +# Disable extra logging for tests +logging.getLogger('boto').setLevel(logging.CRITICAL) +logging.getLogger('boto3').setLevel(logging.CRITICAL) +logging.getLogger('botocore').setLevel(logging.CRITICAL) +logging.getLogger('nose').setLevel(logging.CRITICAL) diff --git a/tests/test_apigateway/test_apigateway.py b/tests/test_apigateway/test_apigateway.py index fc41d3bc0..6bd6eb5e5 100644 --- a/tests/test_apigateway/test_apigateway.py +++ b/tests/test_apigateway/test_apigateway.py @@ -5,11 +5,11 @@ from datetime import datetime from dateutil.tz import tzutc import boto3 from freezegun import freeze_time -import httpretty import requests import sure # noqa from botocore.exceptions import ClientError +from moto.packages.responses import responses from moto import mock_apigateway @@ -883,11 +883,10 @@ def test_deployment(): stage['description'].should.equal('_new_description_') -@httpretty.activate @mock_apigateway def test_http_proxying_integration(): - httpretty.register_uri( - httpretty.GET, "http://httpbin.org/robots.txt", body='a fake response' + responses.add( + responses.GET, "http://httpbin.org/robots.txt", body='a fake response' ) region_name = 'us-west-2' diff --git a/tests/test_autoscaling/test_autoscaling.py b/tests/test_autoscaling/test_autoscaling.py index a048e81f5..4d0905196 100644 --- a/tests/test_autoscaling/test_autoscaling.py +++ b/tests/test_autoscaling/test_autoscaling.py @@ -8,12 +8,12 @@ from boto.ec2.autoscale import Tag import boto.ec2.elb import sure # noqa -from moto import mock_autoscaling, mock_ec2, mock_elb +from moto import mock_autoscaling, mock_ec2_deprecated, mock_elb_deprecated, mock_autoscaling_deprecated from tests.helpers import requires_boto_gte -@mock_autoscaling -@mock_elb +@mock_autoscaling_deprecated +@mock_elb_deprecated def test_create_autoscaling_group(): elb_conn = boto.ec2.elb.connect_to_region('us-east-1') elb_conn.create_load_balancer('test_lb', zones=[], listeners=[(80, 8080, 'http')]) @@ -73,7 +73,7 @@ def test_create_autoscaling_group(): tag.propagate_at_launch.should.equal(True) -@mock_autoscaling +@mock_autoscaling_deprecated def test_create_autoscaling_groups_defaults(): """ Test with the minimum inputs and check that all of the proper defaults are assigned for the other attributes """ @@ -112,7 +112,7 @@ def test_create_autoscaling_groups_defaults(): list(group.tags).should.equal([]) -@mock_autoscaling +@mock_autoscaling_deprecated def test_autoscaling_group_describe_filter(): conn = boto.connect_autoscale() config = LaunchConfiguration( @@ -138,7 +138,7 @@ def test_autoscaling_group_describe_filter(): conn.get_all_groups().should.have.length_of(3) -@mock_autoscaling +@mock_autoscaling_deprecated def test_autoscaling_update(): conn = boto.connect_autoscale() config = LaunchConfiguration( @@ -169,7 +169,7 @@ def test_autoscaling_update(): group.vpc_zone_identifier.should.equal('subnet-5678efgh') -@mock_autoscaling +@mock_autoscaling_deprecated def test_autoscaling_tags_update(): conn = boto.connect_autoscale() config = LaunchConfiguration( @@ -211,7 +211,7 @@ def test_autoscaling_tags_update(): group.tags.should.have.length_of(2) -@mock_autoscaling +@mock_autoscaling_deprecated def test_autoscaling_group_delete(): conn = boto.connect_autoscale() config = LaunchConfiguration( @@ -235,8 +235,8 @@ def test_autoscaling_group_delete(): conn.get_all_groups().should.have.length_of(0) -@mock_ec2 -@mock_autoscaling +@mock_ec2_deprecated +@mock_autoscaling_deprecated def test_autoscaling_group_describe_instances(): conn = boto.connect_autoscale() config = LaunchConfiguration( @@ -269,7 +269,7 @@ def test_autoscaling_group_describe_instances(): @requires_boto_gte("2.8") -@mock_autoscaling +@mock_autoscaling_deprecated def test_set_desired_capacity_up(): conn = boto.connect_autoscale() config = LaunchConfiguration( @@ -304,7 +304,7 @@ def test_set_desired_capacity_up(): @requires_boto_gte("2.8") -@mock_autoscaling +@mock_autoscaling_deprecated def test_set_desired_capacity_down(): conn = boto.connect_autoscale() config = LaunchConfiguration( @@ -339,7 +339,7 @@ def test_set_desired_capacity_down(): @requires_boto_gte("2.8") -@mock_autoscaling +@mock_autoscaling_deprecated def test_set_desired_capacity_the_same(): conn = boto.connect_autoscale() config = LaunchConfiguration( @@ -372,8 +372,8 @@ def test_set_desired_capacity_the_same(): instances = list(conn.get_all_autoscaling_instances()) instances.should.have.length_of(2) -@mock_autoscaling -@mock_elb +@mock_autoscaling_deprecated +@mock_elb_deprecated def test_autoscaling_group_with_elb(): elb_conn = boto.connect_elb() zones = ['us-east-1a', 'us-east-1b'] diff --git a/tests/test_autoscaling/test_launch_configurations.py b/tests/test_autoscaling/test_launch_configurations.py index 8020e46f6..b2e21b03e 100644 --- a/tests/test_autoscaling/test_launch_configurations.py +++ b/tests/test_autoscaling/test_launch_configurations.py @@ -5,11 +5,11 @@ from boto.ec2.blockdevicemapping import BlockDeviceType, BlockDeviceMapping import sure # noqa -from moto import mock_autoscaling +from moto import mock_autoscaling_deprecated from tests.helpers import requires_boto_gte -@mock_autoscaling +@mock_autoscaling_deprecated def test_create_launch_configuration(): conn = boto.connect_autoscale() config = LaunchConfiguration( @@ -38,7 +38,7 @@ def test_create_launch_configuration(): @requires_boto_gte("2.27.0") -@mock_autoscaling +@mock_autoscaling_deprecated def test_create_launch_configuration_with_block_device_mappings(): block_device_mapping = BlockDeviceMapping() @@ -101,7 +101,7 @@ def test_create_launch_configuration_with_block_device_mappings(): @requires_boto_gte("2.12") -@mock_autoscaling +@mock_autoscaling_deprecated def test_create_launch_configuration_for_2_12(): conn = boto.connect_autoscale() config = LaunchConfiguration( @@ -116,7 +116,7 @@ def test_create_launch_configuration_for_2_12(): @requires_boto_gte("2.25.0") -@mock_autoscaling +@mock_autoscaling_deprecated def test_create_launch_configuration_using_ip_association(): conn = boto.connect_autoscale() config = LaunchConfiguration( @@ -131,7 +131,7 @@ def test_create_launch_configuration_using_ip_association(): @requires_boto_gte("2.25.0") -@mock_autoscaling +@mock_autoscaling_deprecated def test_create_launch_configuration_using_ip_association_should_default_to_false(): conn = boto.connect_autoscale() config = LaunchConfiguration( @@ -144,7 +144,7 @@ def test_create_launch_configuration_using_ip_association_should_default_to_fals launch_config.associate_public_ip_address.should.equal(False) -@mock_autoscaling +@mock_autoscaling_deprecated def test_create_launch_configuration_defaults(): """ Test with the minimum inputs and check that all of the proper defaults are assigned for the other attributes """ @@ -171,7 +171,7 @@ def test_create_launch_configuration_defaults(): @requires_boto_gte("2.12") -@mock_autoscaling +@mock_autoscaling_deprecated def test_create_launch_configuration_defaults_for_2_12(): conn = boto.connect_autoscale() config = LaunchConfiguration( @@ -184,7 +184,7 @@ def test_create_launch_configuration_defaults_for_2_12(): launch_config.ebs_optimized.should.equal(False) -@mock_autoscaling +@mock_autoscaling_deprecated def test_launch_configuration_describe_filter(): conn = boto.connect_autoscale() config = LaunchConfiguration( @@ -202,7 +202,7 @@ def test_launch_configuration_describe_filter(): conn.get_all_launch_configurations().should.have.length_of(3) -@mock_autoscaling +@mock_autoscaling_deprecated def test_launch_configuration_delete(): conn = boto.connect_autoscale() config = LaunchConfiguration( diff --git a/tests/test_autoscaling/test_policies.py b/tests/test_autoscaling/test_policies.py index 8ca585e89..54c64b749 100644 --- a/tests/test_autoscaling/test_policies.py +++ b/tests/test_autoscaling/test_policies.py @@ -5,7 +5,7 @@ from boto.ec2.autoscale.group import AutoScalingGroup from boto.ec2.autoscale.policy import ScalingPolicy import sure # noqa -from moto import mock_autoscaling +from moto import mock_autoscaling_deprecated def setup_autoscale_group(): @@ -27,7 +27,7 @@ def setup_autoscale_group(): return group -@mock_autoscaling +@mock_autoscaling_deprecated def test_create_policy(): setup_autoscale_group() conn = boto.connect_autoscale() @@ -48,7 +48,7 @@ def test_create_policy(): policy.cooldown.should.equal(60) -@mock_autoscaling +@mock_autoscaling_deprecated def test_create_policy_default_values(): setup_autoscale_group() conn = boto.connect_autoscale() @@ -67,7 +67,7 @@ def test_create_policy_default_values(): policy.cooldown.should.equal(300) -@mock_autoscaling +@mock_autoscaling_deprecated def test_update_policy(): setup_autoscale_group() conn = boto.connect_autoscale() @@ -94,7 +94,7 @@ def test_update_policy(): policy.scaling_adjustment.should.equal(2) -@mock_autoscaling +@mock_autoscaling_deprecated def test_delete_policy(): setup_autoscale_group() conn = boto.connect_autoscale() @@ -112,7 +112,7 @@ def test_delete_policy(): conn.get_all_policies().should.have.length_of(0) -@mock_autoscaling +@mock_autoscaling_deprecated def test_execute_policy_exact_capacity(): setup_autoscale_group() conn = boto.connect_autoscale() @@ -130,7 +130,7 @@ def test_execute_policy_exact_capacity(): instances.should.have.length_of(3) -@mock_autoscaling +@mock_autoscaling_deprecated def test_execute_policy_positive_change_in_capacity(): setup_autoscale_group() conn = boto.connect_autoscale() @@ -148,7 +148,7 @@ def test_execute_policy_positive_change_in_capacity(): instances.should.have.length_of(5) -@mock_autoscaling +@mock_autoscaling_deprecated def test_execute_policy_percent_change_in_capacity(): setup_autoscale_group() conn = boto.connect_autoscale() @@ -166,7 +166,7 @@ def test_execute_policy_percent_change_in_capacity(): instances.should.have.length_of(3) -@mock_autoscaling +@mock_autoscaling_deprecated def test_execute_policy_small_percent_change_in_capacity(): """ http://docs.aws.amazon.com/AutoScaling/latest/DeveloperGuide/as-scale-based-on-demand.html If PercentChangeInCapacity returns a value between 0 and 1, diff --git a/tests/test_cloudformation/test_cloudformation_stack_crud.py b/tests/test_cloudformation/test_cloudformation_stack_crud.py index e45dafbfa..3d41c9d91 100644 --- a/tests/test_cloudformation/test_cloudformation_stack_crud.py +++ b/tests/test_cloudformation/test_cloudformation_stack_crud.py @@ -12,7 +12,7 @@ import sure # noqa import tests.backport_assert_raises # noqa from nose.tools import assert_raises -from moto import mock_cloudformation, mock_s3 +from moto import mock_cloudformation_deprecated, mock_s3_deprecated from moto.cloudformation import cloudformation_backends dummy_template = { @@ -46,7 +46,7 @@ dummy_template_json2 = json.dumps(dummy_template2) dummy_template_json3 = json.dumps(dummy_template3) -@mock_cloudformation +@mock_cloudformation_deprecated def test_create_stack(): conn = boto.connect_cloudformation() conn.create_stack( @@ -69,7 +69,7 @@ def test_create_stack(): }) -@mock_cloudformation +@mock_cloudformation_deprecated def test_creating_stacks_across_regions(): west1_conn = boto.cloudformation.connect_to_region("us-west-1") west1_conn.create_stack("test_stack", template_body=dummy_template_json) @@ -81,7 +81,7 @@ def test_creating_stacks_across_regions(): list(west2_conn.describe_stacks()).should.have.length_of(1) -@mock_cloudformation +@mock_cloudformation_deprecated def test_create_stack_with_notification_arn(): conn = boto.connect_cloudformation() conn.create_stack( @@ -94,8 +94,8 @@ def test_create_stack_with_notification_arn(): [n.value for n in stack.notification_arns].should.contain('arn:aws:sns:us-east-1:123456789012:fake-queue') -@mock_cloudformation -@mock_s3 +@mock_cloudformation_deprecated +@mock_s3_deprecated def test_create_stack_from_s3_url(): s3_conn = boto.s3.connect_to_region('us-west-1') bucket = s3_conn.create_bucket("foobar") @@ -123,7 +123,7 @@ def test_create_stack_from_s3_url(): }) -@mock_cloudformation +@mock_cloudformation_deprecated def test_describe_stack_by_name(): conn = boto.connect_cloudformation() conn.create_stack( @@ -135,7 +135,7 @@ def test_describe_stack_by_name(): stack.stack_name.should.equal('test_stack') -@mock_cloudformation +@mock_cloudformation_deprecated def test_describe_stack_by_stack_id(): conn = boto.connect_cloudformation() conn.create_stack( @@ -149,7 +149,7 @@ def test_describe_stack_by_stack_id(): stack_by_id.stack_name.should.equal("test_stack") -@mock_cloudformation +@mock_cloudformation_deprecated def test_describe_deleted_stack(): conn = boto.connect_cloudformation() conn.create_stack( @@ -166,7 +166,7 @@ def test_describe_deleted_stack(): stack_by_id.stack_status.should.equal("DELETE_COMPLETE") -@mock_cloudformation +@mock_cloudformation_deprecated def test_get_template_by_name(): conn = boto.connect_cloudformation() conn.create_stack( @@ -188,7 +188,7 @@ def test_get_template_by_name(): }) -@mock_cloudformation +@mock_cloudformation_deprecated def test_list_stacks(): conn = boto.connect_cloudformation() conn.create_stack( @@ -205,7 +205,7 @@ def test_list_stacks(): stacks[0].template_description.should.equal("Stack 1") -@mock_cloudformation +@mock_cloudformation_deprecated def test_delete_stack_by_name(): conn = boto.connect_cloudformation() conn.create_stack( @@ -218,7 +218,7 @@ def test_delete_stack_by_name(): conn.list_stacks().should.have.length_of(0) -@mock_cloudformation +@mock_cloudformation_deprecated def test_delete_stack_by_id(): conn = boto.connect_cloudformation() stack_id = conn.create_stack( @@ -235,7 +235,7 @@ def test_delete_stack_by_id(): conn.describe_stacks(stack_id).should.have.length_of(1) -@mock_cloudformation +@mock_cloudformation_deprecated def test_delete_stack_with_resource_missing_delete_attr(): conn = boto.connect_cloudformation() conn.create_stack( @@ -248,14 +248,14 @@ def test_delete_stack_with_resource_missing_delete_attr(): conn.list_stacks().should.have.length_of(0) -@mock_cloudformation +@mock_cloudformation_deprecated def test_bad_describe_stack(): conn = boto.connect_cloudformation() with assert_raises(BotoServerError): conn.describe_stacks("bad_stack") -@mock_cloudformation() +@mock_cloudformation_deprecated() def test_cloudformation_params(): dummy_template = { "AWSTemplateFormatVersion": "2010-09-09", @@ -279,7 +279,7 @@ def test_cloudformation_params(): param.value.should.equal('testing123') -@mock_cloudformation +@mock_cloudformation_deprecated def test_stack_tags(): conn = boto.connect_cloudformation() conn.create_stack( @@ -292,7 +292,7 @@ def test_stack_tags(): dict(stack.tags).should.equal({"foo": "bar", "baz": "bleh"}) -@mock_cloudformation +@mock_cloudformation_deprecated def test_update_stack(): conn = boto.connect_cloudformation() conn.create_stack( @@ -316,7 +316,7 @@ def test_update_stack(): }) -@mock_cloudformation +@mock_cloudformation_deprecated def test_update_stack(): conn = boto.connect_cloudformation() conn.create_stack( @@ -339,7 +339,7 @@ def test_update_stack(): }) -@mock_cloudformation +@mock_cloudformation_deprecated def test_update_stack_when_rolled_back(): conn = boto.connect_cloudformation() stack_id = conn.create_stack("test_stack", template_body=dummy_template_json) @@ -355,7 +355,7 @@ def test_update_stack_when_rolled_back(): ex.reason.should.equal('Bad Request') ex.status.should.equal(400) -@mock_cloudformation +@mock_cloudformation_deprecated def test_describe_stack_events_shows_create_update_and_delete(): conn = boto.connect_cloudformation() stack_id = conn.create_stack("test_stack", template_body=dummy_template_json) diff --git a/tests/test_cloudformation/test_cloudformation_stack_crud_boto3.py b/tests/test_cloudformation/test_cloudformation_stack_crud_boto3.py index 97c3e864a..95ac6ede4 100644 --- a/tests/test_cloudformation/test_cloudformation_stack_crud_boto3.py +++ b/tests/test_cloudformation/test_cloudformation_stack_crud_boto3.py @@ -5,7 +5,7 @@ import boto import boto.s3 import boto.s3.key from botocore.exceptions import ClientError -from moto import mock_cloudformation, mock_s3 +from moto import mock_cloudformation, mock_s3_deprecated import json import sure # noqa @@ -118,7 +118,7 @@ def test_create_stack_with_role_arn(): @mock_cloudformation -@mock_s3 +@mock_s3_deprecated def test_create_stack_from_s3_url(): s3_conn = boto.s3.connect_to_region('us-west-1') bucket = s3_conn.create_bucket("foobar") diff --git a/tests/test_cloudformation/test_cloudformation_stack_integration.py b/tests/test_cloudformation/test_cloudformation_stack_integration.py index 4237bee19..1b9330a9f 100644 --- a/tests/test_cloudformation/test_cloudformation_stack_integration.py +++ b/tests/test_cloudformation/test_cloudformation_stack_integration.py @@ -18,20 +18,26 @@ import boto3 import sure # noqa from moto import ( - mock_autoscaling, + mock_autoscaling_deprecated, mock_cloudformation, - mock_datapipeline, + mock_cloudformation_deprecated, + mock_datapipeline_deprecated, mock_ec2, + mock_ec2_deprecated, mock_elb, - mock_iam, + mock_elb_deprecated, + mock_iam_deprecated, mock_kms, mock_lambda, - mock_rds, + mock_rds_deprecated, mock_rds2, + mock_rds2_deprecated, mock_redshift, - mock_route53, - mock_sns, + mock_redshift_deprecated, + mock_route53_deprecated, + mock_sns_deprecated, mock_sqs, + mock_sqs_deprecated, ) from .fixtures import ( @@ -49,7 +55,7 @@ from .fixtures import ( ) -@mock_cloudformation() +@mock_cloudformation_deprecated() def test_stack_sqs_integration(): sqs_template = { "AWSTemplateFormatVersion": "2010-09-09", @@ -79,7 +85,7 @@ def test_stack_sqs_integration(): queue.physical_resource_id.should.equal("my-queue") -@mock_cloudformation() +@mock_cloudformation_deprecated() def test_stack_list_resources(): sqs_template = { "AWSTemplateFormatVersion": "2010-09-09", @@ -110,8 +116,8 @@ def test_stack_list_resources(): queue.physical_resource_id.should.equal("my-queue") -@mock_cloudformation() -@mock_sqs() +@mock_cloudformation_deprecated() +@mock_sqs_deprecated() def test_update_stack(): sqs_template = { "AWSTemplateFormatVersion": "2010-09-09", @@ -148,8 +154,8 @@ def test_update_stack(): queues[0].get_attributes('VisibilityTimeout')['VisibilityTimeout'].should.equal('100') -@mock_cloudformation() -@mock_sqs() +@mock_cloudformation_deprecated() +@mock_sqs_deprecated() def test_update_stack_and_remove_resource(): sqs_template = { "AWSTemplateFormatVersion": "2010-09-09", @@ -184,8 +190,8 @@ def test_update_stack_and_remove_resource(): queues.should.have.length_of(0) -@mock_cloudformation() -@mock_sqs() +@mock_cloudformation_deprecated() +@mock_sqs_deprecated() def test_update_stack_and_add_resource(): sqs_template = { "AWSTemplateFormatVersion": "2010-09-09", @@ -223,8 +229,8 @@ def test_update_stack_and_add_resource(): queues.should.have.length_of(1) -@mock_ec2() -@mock_cloudformation() +@mock_ec2_deprecated() +@mock_cloudformation_deprecated() def test_stack_ec2_integration(): ec2_template = { "AWSTemplateFormatVersion": "2010-09-09", @@ -257,9 +263,9 @@ def test_stack_ec2_integration(): instance.physical_resource_id.should.equal(ec2_instance.id) -@mock_ec2() -@mock_elb() -@mock_cloudformation() +@mock_ec2_deprecated() +@mock_elb_deprecated() +@mock_cloudformation_deprecated() def test_stack_elb_integration_with_attached_ec2_instances(): elb_template = { "AWSTemplateFormatVersion": "2010-09-09", @@ -307,8 +313,8 @@ def test_stack_elb_integration_with_attached_ec2_instances(): list(load_balancer.availability_zones).should.equal(['us-east-1']) -@mock_elb() -@mock_cloudformation() +@mock_elb_deprecated() +@mock_cloudformation_deprecated() def test_stack_elb_integration_with_health_check(): elb_template = { "AWSTemplateFormatVersion": "2010-09-09", @@ -355,8 +361,8 @@ def test_stack_elb_integration_with_health_check(): health_check.unhealthy_threshold.should.equal(2) -@mock_elb() -@mock_cloudformation() +@mock_elb_deprecated() +@mock_cloudformation_deprecated() def test_stack_elb_integration_with_update(): elb_template = { "AWSTemplateFormatVersion": "2010-09-09", @@ -399,9 +405,9 @@ def test_stack_elb_integration_with_update(): load_balancer.availability_zones[0].should.equal('us-west-1b') -@mock_ec2() -@mock_redshift() -@mock_cloudformation() +@mock_ec2_deprecated() +@mock_redshift_deprecated() +@mock_cloudformation_deprecated() def test_redshift_stack(): redshift_template_json = json.dumps(redshift.template) @@ -443,8 +449,8 @@ def test_redshift_stack(): group.rules[0].grants[0].cidr_ip.should.equal("10.0.0.1/16") -@mock_ec2() -@mock_cloudformation() +@mock_ec2_deprecated() +@mock_cloudformation_deprecated() def test_stack_security_groups(): security_group_template = { "AWSTemplateFormatVersion": "2010-09-09", @@ -519,9 +525,9 @@ def test_stack_security_groups(): rule2.grants[0].group_id.should.equal(other_group.id) -@mock_autoscaling() -@mock_elb() -@mock_cloudformation() +@mock_autoscaling_deprecated() +@mock_elb_deprecated() +@mock_cloudformation_deprecated() def test_autoscaling_group_with_elb(): web_setup_template = { @@ -601,8 +607,8 @@ def test_autoscaling_group_with_elb(): elb_resource.physical_resource_id.should.contain("my-elb") -@mock_autoscaling() -@mock_cloudformation() +@mock_autoscaling_deprecated() +@mock_cloudformation_deprecated() def test_autoscaling_group_update(): asg_template = { "AWSTemplateFormatVersion": "2010-09-09", @@ -650,8 +656,8 @@ def test_autoscaling_group_update(): asg.max_size.should.equal(3) -@mock_ec2() -@mock_cloudformation() +@mock_ec2_deprecated() +@mock_cloudformation_deprecated() def test_vpc_single_instance_in_subnet(): template_json = json.dumps(vpc_single_instance_in_subnet.template) @@ -695,8 +701,8 @@ def test_vpc_single_instance_in_subnet(): eip_resource = [resource for resource in resources if resource.resource_type == 'AWS::EC2::EIP'][0] eip_resource.physical_resource_id.should.equal(eip.allocation_id) -@mock_cloudformation() -@mock_ec2() +@mock_cloudformation_deprecated() +@mock_ec2_deprecated() @mock_rds2() def test_rds_db_parameter_groups(): ec2_conn = boto.ec2.connect_to_region("us-west-1") @@ -734,9 +740,9 @@ def test_rds_db_parameter_groups(): -@mock_cloudformation() -@mock_ec2() -@mock_rds() +@mock_cloudformation_deprecated() +@mock_ec2_deprecated() +@mock_rds_deprecated() def test_rds_mysql_with_read_replica(): ec2_conn = boto.ec2.connect_to_region("us-west-1") ec2_conn.create_security_group('application', 'Our Application Group') @@ -776,9 +782,9 @@ def test_rds_mysql_with_read_replica(): security_group.ec2_groups[0].name.should.equal("application") -@mock_cloudformation() -@mock_ec2() -@mock_rds() +@mock_cloudformation_deprecated() +@mock_ec2_deprecated() +@mock_rds_deprecated() def test_rds_mysql_with_read_replica_in_vpc(): template_json = json.dumps(rds_mysql_with_read_replica.template) conn = boto.cloudformation.connect_to_region("eu-central-1") @@ -804,9 +810,9 @@ def test_rds_mysql_with_read_replica_in_vpc(): subnet_group.description.should.equal("my db subnet group") -@mock_autoscaling() -@mock_iam() -@mock_cloudformation() +@mock_autoscaling_deprecated() +@mock_iam_deprecated() +@mock_cloudformation_deprecated() def test_iam_roles(): iam_template = { "AWSTemplateFormatVersion": "2010-09-09", @@ -923,8 +929,8 @@ def test_iam_roles(): role_resource.physical_resource_id.should.equal(role.role_id) -@mock_ec2() -@mock_cloudformation() +@mock_ec2_deprecated() +@mock_cloudformation_deprecated() def test_single_instance_with_ebs_volume(): template_json = json.dumps(single_instance_with_ebs_volume.template) @@ -951,7 +957,7 @@ def test_single_instance_with_ebs_volume(): ebs_volumes[0].physical_resource_id.should.equal(volume.id) -@mock_cloudformation() +@mock_cloudformation_deprecated() def test_create_template_without_required_param(): template_json = json.dumps(single_instance_with_ebs_volume.template) conn = boto.cloudformation.connect_to_region("us-west-1") @@ -961,8 +967,8 @@ def test_create_template_without_required_param(): ).should.throw(BotoServerError) -@mock_ec2() -@mock_cloudformation() +@mock_ec2_deprecated() +@mock_cloudformation_deprecated() def test_classic_eip(): template_json = json.dumps(ec2_classic_eip.template) @@ -977,8 +983,8 @@ def test_classic_eip(): cfn_eip.physical_resource_id.should.equal(eip.public_ip) -@mock_ec2() -@mock_cloudformation() +@mock_ec2_deprecated() +@mock_cloudformation_deprecated() def test_vpc_eip(): template_json = json.dumps(vpc_eip.template) @@ -993,8 +999,8 @@ def test_vpc_eip(): cfn_eip.physical_resource_id.should.equal(eip.allocation_id) -@mock_ec2() -@mock_cloudformation() +@mock_ec2_deprecated() +@mock_cloudformation_deprecated() def test_fn_join(): template_json = json.dumps(fn_join.template) @@ -1008,8 +1014,8 @@ def test_fn_join(): fn_join_output.value.should.equal('test eip:{0}'.format(eip.public_ip)) -@mock_cloudformation() -@mock_sqs() +@mock_cloudformation_deprecated() +@mock_sqs_deprecated() def test_conditional_resources(): sqs_template = { "AWSTemplateFormatVersion": "2010-09-09", @@ -1054,8 +1060,8 @@ def test_conditional_resources(): list(sqs_conn.get_all_queues()).should.have.length_of(1) -@mock_cloudformation() -@mock_ec2() +@mock_cloudformation_deprecated() +@mock_ec2_deprecated() def test_conditional_if_handling(): dummy_template = { "AWSTemplateFormatVersion": "2010-09-09", @@ -1110,8 +1116,8 @@ def test_conditional_if_handling(): ec2_instance.image_id.should.equal("ami-00000000") -@mock_cloudformation() -@mock_ec2() +@mock_cloudformation_deprecated() +@mock_ec2_deprecated() def test_cloudformation_mapping(): dummy_template = { "AWSTemplateFormatVersion": "2010-09-09", @@ -1155,8 +1161,8 @@ def test_cloudformation_mapping(): ec2_instance.image_id.should.equal("ami-c9c7978c") -@mock_cloudformation() -@mock_route53() +@mock_cloudformation_deprecated() +@mock_route53_deprecated() def test_route53_roundrobin(): route53_conn = boto.connect_route53() @@ -1198,9 +1204,9 @@ def test_route53_roundrobin(): output.value.should.equal('arn:aws:route53:::hostedzone/{0}'.format(zone_id)) -@mock_cloudformation() -@mock_ec2() -@mock_route53() +@mock_cloudformation_deprecated() +@mock_ec2_deprecated() +@mock_route53_deprecated() def test_route53_ec2_instance_with_public_ip(): route53_conn = boto.connect_route53() ec2_conn = boto.ec2.connect_to_region("us-west-1") @@ -1233,8 +1239,8 @@ def test_route53_ec2_instance_with_public_ip(): record_set1.resource_records[0].should.equal("10.0.0.25") -@mock_cloudformation() -@mock_route53() +@mock_cloudformation_deprecated() +@mock_route53_deprecated() def test_route53_associate_health_check(): route53_conn = boto.connect_route53() @@ -1270,8 +1276,8 @@ def test_route53_associate_health_check(): record_set.health_check.should.equal(health_check_id) -@mock_cloudformation() -@mock_route53() +@mock_cloudformation_deprecated() +@mock_route53_deprecated() def test_route53_with_update(): route53_conn = boto.connect_route53() @@ -1314,8 +1320,8 @@ def test_route53_with_update(): record_set.resource_records.should.equal(["my_other.example.com"]) -@mock_cloudformation() -@mock_sns() +@mock_cloudformation_deprecated() +@mock_sns_deprecated() def test_sns_topic(): dummy_template = { "AWSTemplateFormatVersion": "2010-09-09", @@ -1367,8 +1373,8 @@ def test_sns_topic(): topic_arn_output.value.should.equal(topic_arn) -@mock_cloudformation -@mock_ec2 +@mock_cloudformation_deprecated +@mock_ec2_deprecated def test_vpc_gateway_attachment_creation_should_attach_itself_to_vpc(): template = { "AWSTemplateFormatVersion": "2010-09-09", @@ -1415,8 +1421,8 @@ def test_vpc_gateway_attachment_creation_should_attach_itself_to_vpc(): igws.should.have.length_of(1) -@mock_cloudformation -@mock_ec2 +@mock_cloudformation_deprecated +@mock_ec2_deprecated def test_vpc_peering_creation(): vpc_conn = boto.vpc.connect_to_region("us-west-1") vpc_source = vpc_conn.create_vpc("10.0.0.0/16") @@ -1445,8 +1451,8 @@ def test_vpc_peering_creation(): peering_connections.should.have.length_of(1) -@mock_cloudformation -@mock_ec2 +@mock_cloudformation_deprecated +@mock_ec2_deprecated def test_multiple_security_group_ingress_separate_from_security_group_by_id(): template = { "AWSTemplateFormatVersion": "2010-09-09", @@ -1507,8 +1513,8 @@ def test_multiple_security_group_ingress_separate_from_security_group_by_id(): security_group1.rules[0].to_port.should.equal('8080') -@mock_cloudformation -@mock_ec2 +@mock_cloudformation_deprecated +@mock_ec2_deprecated def test_security_group_ingress_separate_from_security_group_by_id(): ec2_conn = boto.ec2.connect_to_region("us-west-1") ec2_conn.create_security_group("test-security-group1", "test security group") @@ -1558,8 +1564,8 @@ def test_security_group_ingress_separate_from_security_group_by_id(): security_group1.rules[0].to_port.should.equal('8080') -@mock_cloudformation -@mock_ec2 +@mock_cloudformation_deprecated +@mock_ec2_deprecated def test_security_group_ingress_separate_from_security_group_by_id_using_vpc(): vpc_conn = boto.vpc.connect_to_region("us-west-1") vpc = vpc_conn.create_vpc("10.0.0.0/16") @@ -1624,8 +1630,8 @@ def test_security_group_ingress_separate_from_security_group_by_id_using_vpc(): security_group1.rules[0].to_port.should.equal('8080') -@mock_cloudformation -@mock_ec2 +@mock_cloudformation_deprecated +@mock_ec2_deprecated def test_security_group_with_update(): vpc_conn = boto.vpc.connect_to_region("us-west-1") vpc1 = vpc_conn.create_vpc("10.0.0.0/16") @@ -1669,8 +1675,8 @@ def test_security_group_with_update(): security_group.vpc_id.should.equal(vpc2.id) -@mock_cloudformation -@mock_ec2 +@mock_cloudformation_deprecated +@mock_ec2_deprecated def test_subnets_should_be_created_with_availability_zone(): vpc_conn = boto.vpc.connect_to_region('us-west-1') vpc = vpc_conn.create_vpc("10.0.0.0/16") @@ -1698,8 +1704,8 @@ def test_subnets_should_be_created_with_availability_zone(): subnet.availability_zone.should.equal('us-west-1b') -@mock_cloudformation -@mock_datapipeline +@mock_cloudformation_deprecated +@mock_datapipeline_deprecated def test_datapipeline(): dp_template = { "AWSTemplateFormatVersion": "2010-09-09", @@ -1796,11 +1802,10 @@ def lambda_handler(event, context): return _process_lamda(pfunc) -@mock_cloudformation +@mock_cloudformation_deprecated @mock_lambda def test_lambda_function(): # switch this to python as backend lambda only supports python execution. - conn = boto3.client('lambda', 'us-east-1') template = { "AWSTemplateFormatVersion": "2010-09-09", "Resources": { @@ -1827,6 +1832,7 @@ def test_lambda_function(): template_body=template_json, ) + conn = boto3.client('lambda', 'us-east-1') result = conn.list_functions() result['Functions'].should.have.length_of(1) result['Functions'][0]['Description'].should.equal('Test function') diff --git a/tests/test_cloudwatch/test_cloudwatch.py b/tests/test_cloudwatch/test_cloudwatch.py index 7354241f0..88a3190c6 100644 --- a/tests/test_cloudwatch/test_cloudwatch.py +++ b/tests/test_cloudwatch/test_cloudwatch.py @@ -2,7 +2,7 @@ import boto from boto.ec2.cloudwatch.alarm import MetricAlarm import sure # noqa -from moto import mock_cloudwatch +from moto import mock_cloudwatch_deprecated def alarm_fixture(name="tester", action=None): action = action or ['arn:alarm'] @@ -23,7 +23,7 @@ def alarm_fixture(name="tester", action=None): unit='Seconds', ) -@mock_cloudwatch +@mock_cloudwatch_deprecated def test_create_alarm(): conn = boto.connect_cloudwatch() @@ -49,7 +49,7 @@ def test_create_alarm(): alarm.unit.should.equal('Seconds') -@mock_cloudwatch +@mock_cloudwatch_deprecated def test_delete_alarm(): conn = boto.connect_cloudwatch() @@ -68,7 +68,7 @@ def test_delete_alarm(): alarms.should.have.length_of(0) -@mock_cloudwatch +@mock_cloudwatch_deprecated def test_put_metric_data(): conn = boto.connect_cloudwatch() @@ -87,7 +87,7 @@ def test_put_metric_data(): dict(metric.dimensions).should.equal({'InstanceId': ['i-0123456,i-0123457']}) -@mock_cloudwatch +@mock_cloudwatch_deprecated def test_describe_alarms(): conn = boto.connect_cloudwatch() @@ -114,7 +114,7 @@ def test_describe_alarms(): alarms = conn.describe_alarms() alarms.should.have.length_of(0) -@mock_cloudwatch +@mock_cloudwatch_deprecated def test_describe_state_value_unimplemented(): conn = boto.connect_cloudwatch() diff --git a/tests/test_core/test_decorator_calls.py b/tests/test_core/test_decorator_calls.py index 7d32bc8b3..81dc0639a 100644 --- a/tests/test_core/test_decorator_calls.py +++ b/tests/test_core/test_decorator_calls.py @@ -7,19 +7,19 @@ import unittest import tests.backport_assert_raises # noqa from nose.tools import assert_raises -from moto import mock_ec2, mock_s3 +from moto import mock_ec2_deprecated, mock_s3_deprecated ''' Test the different ways that the decorator can be used ''' -@mock_ec2 +@mock_ec2_deprecated def test_basic_connect(): boto.connect_ec2() -@mock_ec2 +@mock_ec2_deprecated def test_basic_decorator(): conn = boto.connect_ec2('the_key', 'the_secret') list(conn.get_all_instances()).should.equal([]) @@ -30,7 +30,7 @@ def test_context_manager(): with assert_raises(EC2ResponseError): conn.get_all_instances() - with mock_ec2(): + with mock_ec2_deprecated(): conn = boto.connect_ec2('the_key', 'the_secret') list(conn.get_all_instances()).should.equal([]) @@ -44,7 +44,7 @@ def test_decorator_start_and_stop(): with assert_raises(EC2ResponseError): conn.get_all_instances() - mock = mock_ec2() + mock = mock_ec2_deprecated() mock.start() conn = boto.connect_ec2('the_key', 'the_secret') list(conn.get_all_instances()).should.equal([]) @@ -54,7 +54,7 @@ def test_decorator_start_and_stop(): conn.get_all_instances() -@mock_ec2 +@mock_ec2_deprecated def test_decorater_wrapped_gets_set(): """ Moto decorator's __wrapped__ should get set to the tests function @@ -62,7 +62,7 @@ def test_decorater_wrapped_gets_set(): test_decorater_wrapped_gets_set.__wrapped__.__name__.should.equal('test_decorater_wrapped_gets_set') -@mock_ec2 +@mock_ec2_deprecated class Tester(object): def test_the_class(self): conn = boto.connect_ec2() @@ -73,7 +73,7 @@ class Tester(object): list(conn.get_all_instances()).should.have.length_of(0) -@mock_s3 +@mock_s3_deprecated class TesterWithSetup(unittest.TestCase): def setUp(self): self.conn = boto.connect_s3() diff --git a/tests/test_core/test_nested.py b/tests/test_core/test_nested.py index 09967d743..7c0b8f687 100644 --- a/tests/test_core/test_nested.py +++ b/tests/test_core/test_nested.py @@ -5,12 +5,12 @@ from boto.sqs.connection import SQSConnection from boto.sqs.message import Message from boto.ec2 import EC2Connection -from moto import mock_sqs, mock_ec2 +from moto import mock_sqs_deprecated, mock_ec2_deprecated class TestNestedDecorators(unittest.TestCase): - @mock_sqs + @mock_sqs_deprecated def setup_sqs_queue(self): conn = SQSConnection() q = conn.create_queue('some-queue') @@ -21,7 +21,7 @@ class TestNestedDecorators(unittest.TestCase): self.assertEqual(q.count(), 1) - @mock_ec2 + @mock_ec2_deprecated def test_nested(self): self.setup_sqs_queue() diff --git a/tests/test_datapipeline/test_datapipeline.py b/tests/test_datapipeline/test_datapipeline.py index 5a958492f..aaa9f7f77 100644 --- a/tests/test_datapipeline/test_datapipeline.py +++ b/tests/test_datapipeline/test_datapipeline.py @@ -3,7 +3,7 @@ from __future__ import unicode_literals import boto.datapipeline import sure # noqa -from moto import mock_datapipeline +from moto import mock_datapipeline_deprecated from moto.datapipeline.utils import remove_capitalization_of_dict_keys @@ -13,7 +13,7 @@ def get_value_from_fields(key, fields): return field['stringValue'] -@mock_datapipeline +@mock_datapipeline_deprecated def test_create_pipeline(): conn = boto.datapipeline.connect_to_region("us-west-2") @@ -78,7 +78,7 @@ PIPELINE_OBJECTS = [ ] -@mock_datapipeline +@mock_datapipeline_deprecated def test_creating_pipeline_definition(): conn = boto.datapipeline.connect_to_region("us-west-2") res = conn.create_pipeline("mypipeline", "some-unique-id") @@ -97,7 +97,7 @@ def test_creating_pipeline_definition(): }]) -@mock_datapipeline +@mock_datapipeline_deprecated def test_describing_pipeline_objects(): conn = boto.datapipeline.connect_to_region("us-west-2") res = conn.create_pipeline("mypipeline", "some-unique-id") @@ -116,7 +116,7 @@ def test_describing_pipeline_objects(): }]) -@mock_datapipeline +@mock_datapipeline_deprecated def test_activate_pipeline(): conn = boto.datapipeline.connect_to_region("us-west-2") @@ -133,7 +133,7 @@ def test_activate_pipeline(): get_value_from_fields('@pipelineState', fields).should.equal("SCHEDULED") -@mock_datapipeline +@mock_datapipeline_deprecated def test_listing_pipelines(): conn = boto.datapipeline.connect_to_region("us-west-2") res1 = conn.create_pipeline("mypipeline1", "some-unique-id1") diff --git a/tests/test_dynamodb/test_dynamodb.py b/tests/test_dynamodb/test_dynamodb.py index 1f85ce4d8..7ea56faa9 100644 --- a/tests/test_dynamodb/test_dynamodb.py +++ b/tests/test_dynamodb/test_dynamodb.py @@ -7,13 +7,13 @@ import requests import tests.backport_assert_raises from nose.tools import assert_raises -from moto import mock_dynamodb +from moto import mock_dynamodb, mock_dynamodb_deprecated from moto.dynamodb import dynamodb_backend from boto.exception import DynamoDBResponseError -@mock_dynamodb +@mock_dynamodb_deprecated def test_list_tables(): name = 'TestTable' dynamodb_backend.create_table(name, hash_key_attr="name", hash_key_type="S") @@ -21,7 +21,7 @@ def test_list_tables(): assert conn.list_tables() == ['TestTable'] -@mock_dynamodb +@mock_dynamodb_deprecated def test_list_tables_layer_1(): dynamodb_backend.create_table("test_1", hash_key_attr="name", hash_key_type="S") dynamodb_backend.create_table("test_2", hash_key_attr="name", hash_key_type="S") @@ -35,7 +35,7 @@ def test_list_tables_layer_1(): res.should.equal(expected) -@mock_dynamodb +@mock_dynamodb_deprecated def test_describe_missing_table(): conn = boto.connect_dynamodb('the_key', 'the_secret') with assert_raises(DynamoDBResponseError): @@ -49,7 +49,7 @@ def test_sts_handler(): res.text.should.contain("SecretAccessKey") -@mock_dynamodb +@mock_dynamodb_deprecated def test_dynamodb_with_connect_to_region(): # this will work if connected with boto.connect_dynamodb() dynamodb = boto.dynamodb.connect_to_region('us-west-2') diff --git a/tests/test_dynamodb/test_dynamodb_table_with_range_key.py b/tests/test_dynamodb/test_dynamodb_table_with_range_key.py index f6ba9b307..c7832b08f 100644 --- a/tests/test_dynamodb/test_dynamodb_table_with_range_key.py +++ b/tests/test_dynamodb/test_dynamodb_table_with_range_key.py @@ -4,7 +4,7 @@ import boto import sure # noqa from freezegun import freeze_time -from moto import mock_dynamodb +from moto import mock_dynamodb_deprecated from boto.dynamodb import condition from boto.dynamodb.exceptions import DynamoDBKeyNotFoundError, DynamoDBValidationError @@ -29,7 +29,7 @@ def create_table(conn): @freeze_time("2012-01-14") -@mock_dynamodb +@mock_dynamodb_deprecated def test_create_table(): conn = boto.connect_dynamodb() create_table(conn) @@ -60,7 +60,7 @@ def test_create_table(): conn.describe_table('messages').should.equal(expected) -@mock_dynamodb +@mock_dynamodb_deprecated def test_delete_table(): conn = boto.connect_dynamodb() create_table(conn) @@ -72,7 +72,7 @@ def test_delete_table(): conn.layer1.delete_table.when.called_with('messages').should.throw(DynamoDBResponseError) -@mock_dynamodb +@mock_dynamodb_deprecated def test_update_table_throughput(): conn = boto.connect_dynamodb() table = create_table(conn) @@ -86,7 +86,7 @@ def test_update_table_throughput(): table.write_units.should.equal(6) -@mock_dynamodb +@mock_dynamodb_deprecated def test_item_add_and_describe_and_update(): conn = boto.connect_dynamodb() table = create_table(conn) @@ -133,7 +133,7 @@ def test_item_add_and_describe_and_update(): }) -@mock_dynamodb +@mock_dynamodb_deprecated def test_item_put_without_table(): conn = boto.connect_dynamodb() @@ -146,7 +146,7 @@ def test_item_put_without_table(): ).should.throw(DynamoDBResponseError) -@mock_dynamodb +@mock_dynamodb_deprecated def test_get_missing_item(): conn = boto.connect_dynamodb() table = create_table(conn) @@ -158,7 +158,7 @@ def test_get_missing_item(): table.has_item("foobar", "more").should.equal(False) -@mock_dynamodb +@mock_dynamodb_deprecated def test_get_item_with_undeclared_table(): conn = boto.connect_dynamodb() @@ -171,7 +171,7 @@ def test_get_item_with_undeclared_table(): ).should.throw(DynamoDBKeyNotFoundError) -@mock_dynamodb +@mock_dynamodb_deprecated def test_get_item_without_range_key(): conn = boto.connect_dynamodb() message_table_schema = conn.create_schema( @@ -195,7 +195,7 @@ def test_get_item_without_range_key(): table.get_item.when.called_with(hash_key=hash_key).should.throw(DynamoDBValidationError) -@mock_dynamodb +@mock_dynamodb_deprecated def test_delete_item(): conn = boto.connect_dynamodb() table = create_table(conn) @@ -223,7 +223,7 @@ def test_delete_item(): item.delete.when.called_with().should.throw(DynamoDBResponseError) -@mock_dynamodb +@mock_dynamodb_deprecated def test_delete_item_with_attribute_response(): conn = boto.connect_dynamodb() table = create_table(conn) @@ -260,7 +260,7 @@ def test_delete_item_with_attribute_response(): item.delete.when.called_with().should.throw(DynamoDBResponseError) -@mock_dynamodb +@mock_dynamodb_deprecated def test_delete_item_with_undeclared_table(): conn = boto.connect_dynamodb() @@ -273,7 +273,7 @@ def test_delete_item_with_undeclared_table(): ).should.throw(DynamoDBResponseError) -@mock_dynamodb +@mock_dynamodb_deprecated def test_query(): conn = boto.connect_dynamodb() table = create_table(conn) @@ -323,7 +323,7 @@ def test_query(): results.response['Items'].should.have.length_of(1) -@mock_dynamodb +@mock_dynamodb_deprecated def test_query_with_undeclared_table(): conn = boto.connect_dynamodb() @@ -339,7 +339,7 @@ def test_query_with_undeclared_table(): ).should.throw(DynamoDBResponseError) -@mock_dynamodb +@mock_dynamodb_deprecated def test_scan(): conn = boto.connect_dynamodb() table = create_table(conn) @@ -402,7 +402,7 @@ def test_scan(): results.response['Items'].should.have.length_of(1) -@mock_dynamodb +@mock_dynamodb_deprecated def test_scan_with_undeclared_table(): conn = boto.connect_dynamodb() @@ -419,7 +419,7 @@ def test_scan_with_undeclared_table(): ).should.throw(DynamoDBResponseError) -@mock_dynamodb +@mock_dynamodb_deprecated def test_scan_after_has_item(): conn = boto.connect_dynamodb() table = create_table(conn) @@ -430,7 +430,7 @@ def test_scan_after_has_item(): list(table.scan()).should.equal([]) -@mock_dynamodb +@mock_dynamodb_deprecated def test_write_batch(): conn = boto.connect_dynamodb() table = create_table(conn) @@ -474,7 +474,7 @@ def test_write_batch(): table.item_count.should.equal(1) -@mock_dynamodb +@mock_dynamodb_deprecated def test_batch_read(): conn = boto.connect_dynamodb() table = create_table(conn) diff --git a/tests/test_dynamodb/test_dynamodb_table_without_range_key.py b/tests/test_dynamodb/test_dynamodb_table_without_range_key.py index fa8492620..18d353928 100644 --- a/tests/test_dynamodb/test_dynamodb_table_without_range_key.py +++ b/tests/test_dynamodb/test_dynamodb_table_without_range_key.py @@ -4,7 +4,7 @@ import boto import sure # noqa from freezegun import freeze_time -from moto import mock_dynamodb +from moto import mock_dynamodb_deprecated from boto.dynamodb import condition from boto.dynamodb.exceptions import DynamoDBKeyNotFoundError @@ -27,7 +27,7 @@ def create_table(conn): @freeze_time("2012-01-14") -@mock_dynamodb +@mock_dynamodb_deprecated def test_create_table(): conn = boto.connect_dynamodb() create_table(conn) @@ -54,7 +54,7 @@ def test_create_table(): conn.describe_table('messages').should.equal(expected) -@mock_dynamodb +@mock_dynamodb_deprecated def test_delete_table(): conn = boto.connect_dynamodb() create_table(conn) @@ -66,7 +66,7 @@ def test_delete_table(): conn.layer1.delete_table.when.called_with('messages').should.throw(DynamoDBResponseError) -@mock_dynamodb +@mock_dynamodb_deprecated def test_update_table_throughput(): conn = boto.connect_dynamodb() table = create_table(conn) @@ -80,7 +80,7 @@ def test_update_table_throughput(): table.write_units.should.equal(6) -@mock_dynamodb +@mock_dynamodb_deprecated def test_item_add_and_describe_and_update(): conn = boto.connect_dynamodb() table = create_table(conn) @@ -120,7 +120,7 @@ def test_item_add_and_describe_and_update(): }) -@mock_dynamodb +@mock_dynamodb_deprecated def test_item_put_without_table(): conn = boto.connect_dynamodb() @@ -132,7 +132,7 @@ def test_item_put_without_table(): ).should.throw(DynamoDBResponseError) -@mock_dynamodb +@mock_dynamodb_deprecated def test_get_missing_item(): conn = boto.connect_dynamodb() table = create_table(conn) @@ -142,7 +142,7 @@ def test_get_missing_item(): ).should.throw(DynamoDBKeyNotFoundError) -@mock_dynamodb +@mock_dynamodb_deprecated def test_get_item_with_undeclared_table(): conn = boto.connect_dynamodb() @@ -154,7 +154,7 @@ def test_get_item_with_undeclared_table(): ).should.throw(DynamoDBKeyNotFoundError) -@mock_dynamodb +@mock_dynamodb_deprecated def test_delete_item(): conn = boto.connect_dynamodb() table = create_table(conn) @@ -181,7 +181,7 @@ def test_delete_item(): item.delete.when.called_with().should.throw(DynamoDBResponseError) -@mock_dynamodb +@mock_dynamodb_deprecated def test_delete_item_with_attribute_response(): conn = boto.connect_dynamodb() table = create_table(conn) @@ -216,7 +216,7 @@ def test_delete_item_with_attribute_response(): item.delete.when.called_with().should.throw(DynamoDBResponseError) -@mock_dynamodb +@mock_dynamodb_deprecated def test_delete_item_with_undeclared_table(): conn = boto.connect_dynamodb() @@ -228,7 +228,7 @@ def test_delete_item_with_undeclared_table(): ).should.throw(DynamoDBResponseError) -@mock_dynamodb +@mock_dynamodb_deprecated def test_query(): conn = boto.connect_dynamodb() table = create_table(conn) @@ -248,7 +248,7 @@ def test_query(): results.response['Items'].should.have.length_of(1) -@mock_dynamodb +@mock_dynamodb_deprecated def test_query_with_undeclared_table(): conn = boto.connect_dynamodb() @@ -258,7 +258,7 @@ def test_query_with_undeclared_table(): ).should.throw(DynamoDBResponseError) -@mock_dynamodb +@mock_dynamodb_deprecated def test_scan(): conn = boto.connect_dynamodb() table = create_table(conn) @@ -318,7 +318,7 @@ def test_scan(): results.response['Items'].should.have.length_of(1) -@mock_dynamodb +@mock_dynamodb_deprecated def test_scan_with_undeclared_table(): conn = boto.connect_dynamodb() @@ -335,7 +335,7 @@ def test_scan_with_undeclared_table(): ).should.throw(DynamoDBResponseError) -@mock_dynamodb +@mock_dynamodb_deprecated def test_scan_after_has_item(): conn = boto.connect_dynamodb() table = create_table(conn) @@ -346,7 +346,7 @@ def test_scan_after_has_item(): list(table.scan()).should.equal([]) -@mock_dynamodb +@mock_dynamodb_deprecated def test_write_batch(): conn = boto.connect_dynamodb() table = create_table(conn) @@ -388,7 +388,7 @@ def test_write_batch(): table.item_count.should.equal(1) -@mock_dynamodb +@mock_dynamodb_deprecated def test_batch_read(): conn = boto.connect_dynamodb() table = create_table(conn) diff --git a/tests/test_dynamodb2/test_dynamodb.py b/tests/test_dynamodb2/test_dynamodb.py index 552611fa6..d66d36d9f 100644 --- a/tests/test_dynamodb2/test_dynamodb.py +++ b/tests/test_dynamodb2/test_dynamodb.py @@ -4,7 +4,7 @@ import six import boto import sure # noqa import requests -from moto import mock_dynamodb2 +from moto import mock_dynamodb2, mock_dynamodb2_deprecated from moto.dynamodb2 import dynamodb_backend2 from boto.exception import JSONResponseError from tests.helpers import requires_boto_gte @@ -16,7 +16,7 @@ except ImportError: print("This boto version is not supported") @requires_boto_gte("2.9") -@mock_dynamodb2 +@mock_dynamodb2_deprecated def test_list_tables(): name = 'TestTable' #{'schema': } @@ -32,7 +32,7 @@ def test_list_tables(): @requires_boto_gte("2.9") -@mock_dynamodb2 +@mock_dynamodb2_deprecated def test_list_tables_layer_1(): dynamodb_backend2.create_table("test_1",schema=[ {u'KeyType': u'HASH', u'AttributeName': u'name'} @@ -55,7 +55,7 @@ def test_list_tables_layer_1(): @requires_boto_gte("2.9") -@mock_dynamodb2 +@mock_dynamodb2_deprecated def test_describe_missing_table(): conn = boto.dynamodb2.connect_to_region( 'us-west-2', diff --git a/tests/test_dynamodb2/test_dynamodb_table_with_range_key.py b/tests/test_dynamodb2/test_dynamodb_table_with_range_key.py index 7e4403daa..029506378 100644 --- a/tests/test_dynamodb2/test_dynamodb_table_with_range_key.py +++ b/tests/test_dynamodb2/test_dynamodb_table_with_range_key.py @@ -7,7 +7,7 @@ import boto3 from boto3.dynamodb.conditions import Key import sure # noqa from freezegun import freeze_time -from moto import mock_dynamodb2 +from moto import mock_dynamodb2, mock_dynamodb2_deprecated from boto.exception import JSONResponseError from tests.helpers import requires_boto_gte try: @@ -61,7 +61,7 @@ def iterate_results(res): @requires_boto_gte("2.9") -@mock_dynamodb2 +@mock_dynamodb2_deprecated @freeze_time("2012-01-14") def test_create_table(): table = create_table() @@ -90,7 +90,7 @@ def test_create_table(): @requires_boto_gte("2.9") -@mock_dynamodb2 +@mock_dynamodb2_deprecated @freeze_time("2012-01-14") def test_create_table_with_local_index(): table = create_table_with_local_indexes() @@ -132,7 +132,7 @@ def test_create_table_with_local_index(): @requires_boto_gte("2.9") -@mock_dynamodb2 +@mock_dynamodb2_deprecated def test_delete_table(): conn = boto.dynamodb2.layer1.DynamoDBConnection() table = create_table() @@ -144,7 +144,7 @@ def test_delete_table(): @requires_boto_gte("2.9") -@mock_dynamodb2 +@mock_dynamodb2_deprecated def test_update_table_throughput(): table = create_table() table.throughput["read"].should.equal(10) @@ -169,7 +169,7 @@ def test_update_table_throughput(): @requires_boto_gte("2.9") -@mock_dynamodb2 +@mock_dynamodb2_deprecated def test_item_add_and_describe_and_update(): table = create_table() ok = table.put_item(data={ @@ -212,7 +212,7 @@ def test_item_add_and_describe_and_update(): @requires_boto_gte("2.9") -@mock_dynamodb2 +@mock_dynamodb2_deprecated def test_item_partial_save(): table = create_table() @@ -242,7 +242,7 @@ def test_item_partial_save(): @requires_boto_gte("2.9") -@mock_dynamodb2 +@mock_dynamodb2_deprecated def test_item_put_without_table(): table = Table('undeclared-table') item_data = { @@ -256,7 +256,7 @@ def test_item_put_without_table(): @requires_boto_gte("2.9") -@mock_dynamodb2 +@mock_dynamodb2_deprecated def test_get_missing_item(): table = create_table() @@ -267,14 +267,14 @@ def test_get_missing_item(): @requires_boto_gte("2.9") -@mock_dynamodb2 +@mock_dynamodb2_deprecated def test_get_item_with_undeclared_table(): table = Table('undeclared-table') table.get_item.when.called_with(test_hash=3241526475).should.throw(JSONResponseError) @requires_boto_gte("2.9") -@mock_dynamodb2 +@mock_dynamodb2_deprecated def test_get_item_without_range_key(): table = Table.create('messages', schema=[ HashKey('test_hash'), @@ -291,7 +291,7 @@ def test_get_item_without_range_key(): @requires_boto_gte("2.30.0") -@mock_dynamodb2 +@mock_dynamodb2_deprecated def test_delete_item(): table = create_table() item_data = { @@ -313,7 +313,7 @@ def test_delete_item(): @requires_boto_gte("2.9") -@mock_dynamodb2 +@mock_dynamodb2_deprecated def test_delete_item_with_undeclared_table(): table = Table("undeclared-table") item_data = { @@ -327,7 +327,7 @@ def test_delete_item_with_undeclared_table(): @requires_boto_gte("2.9") -@mock_dynamodb2 +@mock_dynamodb2_deprecated def test_query(): table = create_table() @@ -384,7 +384,7 @@ def test_query(): @requires_boto_gte("2.9") -@mock_dynamodb2 +@mock_dynamodb2_deprecated def test_query_with_undeclared_table(): table = Table('undeclared') results = table.query( @@ -396,7 +396,7 @@ def test_query_with_undeclared_table(): @requires_boto_gte("2.9") -@mock_dynamodb2 +@mock_dynamodb2_deprecated def test_scan(): table = create_table() item_data = { @@ -451,7 +451,7 @@ def test_scan(): @requires_boto_gte("2.9") -@mock_dynamodb2 +@mock_dynamodb2_deprecated def test_scan_with_undeclared_table(): conn = boto.dynamodb2.layer1.DynamoDBConnection() conn.scan.when.called_with( @@ -468,7 +468,7 @@ def test_scan_with_undeclared_table(): @requires_boto_gte("2.9") -@mock_dynamodb2 +@mock_dynamodb2_deprecated def test_write_batch(): table = create_table() with table.batch_write() as batch: @@ -498,7 +498,7 @@ def test_write_batch(): @requires_boto_gte("2.9") -@mock_dynamodb2 +@mock_dynamodb2_deprecated def test_batch_read(): table = create_table() item_data = { @@ -542,14 +542,14 @@ def test_batch_read(): @requires_boto_gte("2.9") -@mock_dynamodb2 +@mock_dynamodb2_deprecated def test_get_key_fields(): table = create_table() kf = table.get_key_fields() kf.should.equal(['forum_name', 'subject']) -@mock_dynamodb2 +@mock_dynamodb2_deprecated def test_create_with_global_indexes(): conn = boto.dynamodb2.layer1.DynamoDBConnection() @@ -594,7 +594,7 @@ def test_create_with_global_indexes(): ]) -@mock_dynamodb2 +@mock_dynamodb2_deprecated def test_query_with_global_indexes(): table = Table.create('messages', schema=[ HashKey('subject'), @@ -638,7 +638,7 @@ def test_query_with_global_indexes(): list(results).should.have.length_of(0) -@mock_dynamodb2 +@mock_dynamodb2_deprecated def test_query_with_local_indexes(): table = create_table_with_local_indexes() item_data = { @@ -658,7 +658,7 @@ def test_query_with_local_indexes(): @requires_boto_gte("2.9") -@mock_dynamodb2 +@mock_dynamodb2_deprecated def test_query_filter_eq(): table = create_table_with_local_indexes() item_data = [ @@ -691,7 +691,7 @@ def test_query_filter_eq(): @requires_boto_gte("2.9") -@mock_dynamodb2 +@mock_dynamodb2_deprecated def test_query_filter_lt(): table = create_table_with_local_indexes() item_data = [ @@ -726,7 +726,7 @@ def test_query_filter_lt(): @requires_boto_gte("2.9") -@mock_dynamodb2 +@mock_dynamodb2_deprecated def test_query_filter_gt(): table = create_table_with_local_indexes() item_data = [ @@ -760,7 +760,7 @@ def test_query_filter_gt(): @requires_boto_gte("2.9") -@mock_dynamodb2 +@mock_dynamodb2_deprecated def test_query_filter_lte(): table = create_table_with_local_indexes() item_data = [ @@ -794,7 +794,7 @@ def test_query_filter_lte(): @requires_boto_gte("2.9") -@mock_dynamodb2 +@mock_dynamodb2_deprecated def test_query_filter_gte(): table = create_table_with_local_indexes() item_data = [ @@ -827,7 +827,7 @@ def test_query_filter_gte(): list(results).should.have.length_of(2) -@mock_dynamodb2 +@mock_dynamodb2_deprecated def test_reverse_query(): conn = boto.dynamodb2.layer1.DynamoDBConnection() @@ -851,7 +851,7 @@ def test_reverse_query(): [r['created_at'] for r in results].should.equal(expected) -@mock_dynamodb2 +@mock_dynamodb2_deprecated def test_lookup(): from decimal import Decimal table = Table.create('messages', schema=[ @@ -871,7 +871,7 @@ def test_lookup(): message.get('test_range').should.equal(Decimal(range_key)) -@mock_dynamodb2 +@mock_dynamodb2_deprecated def test_failed_overwrite(): table = Table.create('messages', schema=[ HashKey('id'), @@ -900,7 +900,7 @@ def test_failed_overwrite(): dict(returned_item).should.equal(data4) -@mock_dynamodb2 +@mock_dynamodb2_deprecated def test_conflicting_writes(): table = Table.create('messages', schema=[ HashKey('id'), diff --git a/tests/test_dynamodb2/test_dynamodb_table_without_range_key.py b/tests/test_dynamodb2/test_dynamodb_table_without_range_key.py index 691e14818..83eff6519 100644 --- a/tests/test_dynamodb2/test_dynamodb_table_without_range_key.py +++ b/tests/test_dynamodb2/test_dynamodb_table_without_range_key.py @@ -6,7 +6,7 @@ from boto3.dynamodb.conditions import Key import sure # noqa from freezegun import freeze_time from boto.exception import JSONResponseError -from moto import mock_dynamodb2 +from moto import mock_dynamodb2, mock_dynamodb2_deprecated from tests.helpers import requires_boto_gte import botocore try: @@ -29,7 +29,7 @@ def create_table(): @requires_boto_gte("2.9") -@mock_dynamodb2 +@mock_dynamodb2_deprecated @freeze_time("2012-01-14") def test_create_table(): create_table() @@ -62,7 +62,7 @@ def test_create_table(): @requires_boto_gte("2.9") -@mock_dynamodb2 +@mock_dynamodb2_deprecated def test_delete_table(): create_table() conn = boto.dynamodb2.layer1.DynamoDBConnection() @@ -75,7 +75,7 @@ def test_delete_table(): @requires_boto_gte("2.9") -@mock_dynamodb2 +@mock_dynamodb2_deprecated def test_update_table_throughput(): table = create_table() table.throughput["read"].should.equal(10) @@ -91,7 +91,7 @@ def test_update_table_throughput(): @requires_boto_gte("2.9") -@mock_dynamodb2 +@mock_dynamodb2_deprecated def test_item_add_and_describe_and_update(): table = create_table() @@ -125,7 +125,7 @@ def test_item_add_and_describe_and_update(): @requires_boto_gte("2.9") -@mock_dynamodb2 +@mock_dynamodb2_deprecated def test_item_partial_save(): table = create_table() @@ -152,7 +152,7 @@ def test_item_partial_save(): @requires_boto_gte("2.9") -@mock_dynamodb2 +@mock_dynamodb2_deprecated def test_item_put_without_table(): conn = boto.dynamodb2.layer1.DynamoDBConnection() @@ -167,7 +167,7 @@ def test_item_put_without_table(): @requires_boto_gte("2.9") -@mock_dynamodb2 +@mock_dynamodb2_deprecated def test_get_item_with_undeclared_table(): conn = boto.dynamodb2.layer1.DynamoDBConnection() @@ -178,7 +178,7 @@ def test_get_item_with_undeclared_table(): @requires_boto_gte("2.30.0") -@mock_dynamodb2 +@mock_dynamodb2_deprecated def test_delete_item(): table = create_table() @@ -202,7 +202,7 @@ def test_delete_item(): @requires_boto_gte("2.9") -@mock_dynamodb2 +@mock_dynamodb2_deprecated def test_delete_item_with_undeclared_table(): conn = boto.dynamodb2.layer1.DynamoDBConnection() @@ -213,7 +213,7 @@ def test_delete_item_with_undeclared_table(): @requires_boto_gte("2.9") -@mock_dynamodb2 +@mock_dynamodb2_deprecated def test_query(): table = create_table() @@ -233,7 +233,7 @@ def test_query(): @requires_boto_gte("2.9") -@mock_dynamodb2 +@mock_dynamodb2_deprecated def test_query_with_undeclared_table(): conn = boto.dynamodb2.layer1.DynamoDBConnection() @@ -244,7 +244,7 @@ def test_query_with_undeclared_table(): @requires_boto_gte("2.9") -@mock_dynamodb2 +@mock_dynamodb2_deprecated def test_scan(): table = create_table() @@ -295,7 +295,7 @@ def test_scan(): @requires_boto_gte("2.9") -@mock_dynamodb2 +@mock_dynamodb2_deprecated def test_scan_with_undeclared_table(): conn = boto.dynamodb2.layer1.DynamoDBConnection() @@ -313,7 +313,7 @@ def test_scan_with_undeclared_table(): @requires_boto_gte("2.9") -@mock_dynamodb2 +@mock_dynamodb2_deprecated def test_write_batch(): table = create_table() @@ -344,7 +344,7 @@ def test_write_batch(): @requires_boto_gte("2.9") -@mock_dynamodb2 +@mock_dynamodb2_deprecated def test_batch_read(): table = create_table() @@ -385,7 +385,7 @@ def test_batch_read(): @requires_boto_gte("2.9") -@mock_dynamodb2 +@mock_dynamodb2_deprecated def test_get_key_fields(): table = create_table() kf = table.get_key_fields() @@ -393,14 +393,14 @@ def test_get_key_fields(): @requires_boto_gte("2.9") -@mock_dynamodb2 +@mock_dynamodb2_deprecated def test_get_missing_item(): table = create_table() table.get_item.when.called_with(forum_name='missing').should.throw(ItemNotFound) @requires_boto_gte("2.9") -@mock_dynamodb2 +@mock_dynamodb2_deprecated def test_get_special_item(): table = Table.create('messages', schema=[ HashKey('date-joined') @@ -418,7 +418,7 @@ def test_get_special_item(): dict(returned_item).should.equal(data) -@mock_dynamodb2 +@mock_dynamodb2_deprecated def test_update_item_remove(): conn = boto.dynamodb2.connect_to_region("us-west-2") table = Table.create('messages', schema=[ @@ -444,7 +444,7 @@ def test_update_item_remove(): }) -@mock_dynamodb2 +@mock_dynamodb2_deprecated def test_update_item_set(): conn = boto.dynamodb2.connect_to_region("us-west-2") table = Table.create('messages', schema=[ @@ -471,7 +471,7 @@ def test_update_item_set(): -@mock_dynamodb2 +@mock_dynamodb2_deprecated def test_failed_overwrite(): table = Table.create('messages', schema=[ HashKey('id'), @@ -499,7 +499,7 @@ def test_failed_overwrite(): dict(returned_item).should.equal(data4) -@mock_dynamodb2 +@mock_dynamodb2_deprecated def test_conflicting_writes(): table = Table.create('messages', schema=[ HashKey('id'), diff --git a/tests/test_ec2/test_amis.py b/tests/test_ec2/test_amis.py index 095979f74..9c3fbd40d 100755 --- a/tests/test_ec2/test_amis.py +++ b/tests/test_ec2/test_amis.py @@ -9,11 +9,11 @@ from boto.exception import EC2ResponseError, JSONResponseError import sure # noqa -from moto import mock_ec2 +from moto import mock_emr_deprecated from tests.helpers import requires_boto_gte -@mock_ec2 +@mock_emr_deprecated def test_ami_create_and_delete(): conn = boto.connect_ec2('the_key', 'the_secret') reservation = conn.run_instances('ami-1234abcd') @@ -69,7 +69,7 @@ def test_ami_create_and_delete(): @requires_boto_gte("2.14.0") -@mock_ec2 +@mock_emr_deprecated def test_ami_copy(): conn = boto.ec2.connect_to_region("us-west-1") reservation = conn.run_instances('ami-1234abcd') @@ -119,7 +119,7 @@ def test_ami_copy(): cm.exception.request_id.should_not.be.none -@mock_ec2 +@mock_emr_deprecated def test_ami_tagging(): conn = boto.connect_vpc('the_key', 'the_secret') reservation = conn.run_instances('ami-1234abcd') @@ -145,7 +145,7 @@ def test_ami_tagging(): image.tags["a key"].should.equal("some value") -@mock_ec2 +@mock_emr_deprecated def test_ami_create_from_missing_instance(): conn = boto.connect_ec2('the_key', 'the_secret') args = ["i-abcdefg", "test-ami", "this is a test ami"] @@ -157,7 +157,7 @@ def test_ami_create_from_missing_instance(): cm.exception.request_id.should_not.be.none -@mock_ec2 +@mock_emr_deprecated def test_ami_pulls_attributes_from_instance(): conn = boto.connect_ec2('the_key', 'the_secret') reservation = conn.run_instances('ami-1234abcd') @@ -169,7 +169,7 @@ def test_ami_pulls_attributes_from_instance(): image.kernel_id.should.equal('test-kernel') -@mock_ec2 +@mock_emr_deprecated def test_ami_filters(): conn = boto.connect_ec2('the_key', 'the_secret') @@ -220,7 +220,7 @@ def test_ami_filters(): set([ami.id for ami in amis_by_nonpublic]).should.equal(set([imageA.id])) -@mock_ec2 +@mock_emr_deprecated def test_ami_filtering_via_tag(): conn = boto.connect_vpc('the_key', 'the_secret') @@ -243,7 +243,7 @@ def test_ami_filtering_via_tag(): set([ami.id for ami in amis_by_tagB]).should.equal(set([imageB.id])) -@mock_ec2 +@mock_emr_deprecated def test_getting_missing_ami(): conn = boto.connect_ec2('the_key', 'the_secret') @@ -254,7 +254,7 @@ def test_getting_missing_ami(): cm.exception.request_id.should_not.be.none -@mock_ec2 +@mock_emr_deprecated def test_getting_malformed_ami(): conn = boto.connect_ec2('the_key', 'the_secret') @@ -265,7 +265,7 @@ def test_getting_malformed_ami(): cm.exception.request_id.should_not.be.none -@mock_ec2 +@mock_emr_deprecated def test_ami_attribute_group_permissions(): conn = boto.connect_ec2('the_key', 'the_secret') reservation = conn.run_instances('ami-1234abcd') @@ -318,7 +318,7 @@ def test_ami_attribute_group_permissions(): conn.modify_image_attribute.when.called_with(**REMOVE_GROUP_ARGS).should_not.throw(EC2ResponseError) -@mock_ec2 +@mock_emr_deprecated def test_ami_attribute_user_permissions(): conn = boto.connect_ec2('the_key', 'the_secret') reservation = conn.run_instances('ami-1234abcd') @@ -383,7 +383,7 @@ def test_ami_attribute_user_permissions(): conn.modify_image_attribute.when.called_with(**REMOVE_USERS_ARGS).should_not.throw(EC2ResponseError) -@mock_ec2 +@mock_emr_deprecated def test_ami_attribute_user_and_group_permissions(): """ Boto supports adding/removing both users and groups at the same time. @@ -435,7 +435,7 @@ def test_ami_attribute_user_and_group_permissions(): image.is_public.should.equal(False) -@mock_ec2 +@mock_emr_deprecated def test_ami_attribute_error_cases(): conn = boto.connect_ec2('the_key', 'the_secret') reservation = conn.run_instances('ami-1234abcd') diff --git a/tests/test_ec2/test_availability_zones_and_regions.py b/tests/test_ec2/test_availability_zones_and_regions.py index 88453e10b..7226cacaf 100644 --- a/tests/test_ec2/test_availability_zones_and_regions.py +++ b/tests/test_ec2/test_availability_zones_and_regions.py @@ -4,10 +4,10 @@ import boto.ec2 import boto3 import sure # noqa -from moto import mock_ec2 +from moto import mock_ec2, mock_ec2_deprecated -@mock_ec2 +@mock_ec2_deprecated def test_describe_regions(): conn = boto.connect_ec2('the_key', 'the_secret') regions = conn.get_all_regions() @@ -16,7 +16,7 @@ def test_describe_regions(): region.endpoint.should.contain(region.name) -@mock_ec2 +@mock_ec2_deprecated def test_availability_zones(): conn = boto.connect_ec2('the_key', 'the_secret') regions = conn.get_all_regions() diff --git a/tests/test_ec2/test_customer_gateways.py b/tests/test_ec2/test_customer_gateways.py index efd6ce993..93e35dc6a 100644 --- a/tests/test_ec2/test_customer_gateways.py +++ b/tests/test_ec2/test_customer_gateways.py @@ -5,10 +5,10 @@ from nose.tools import assert_raises from nose.tools import assert_false from boto.exception import EC2ResponseError -from moto import mock_ec2 +from moto import mock_ec2_deprecated -@mock_ec2 +@mock_ec2_deprecated def test_create_customer_gateways(): conn = boto.connect_vpc('the_key', 'the_secret') @@ -19,7 +19,7 @@ def test_create_customer_gateways(): customer_gateway.bgp_asn.should.equal(65534) customer_gateway.ip_address.should.equal('205.251.242.54') -@mock_ec2 +@mock_ec2_deprecated def test_describe_customer_gateways(): conn = boto.connect_vpc('the_key', 'the_secret') customer_gateway = conn.create_customer_gateway('ipsec.1', '205.251.242.54', 65534) @@ -27,7 +27,7 @@ def test_describe_customer_gateways(): cgws.should.have.length_of(1) cgws[0].id.should.match(customer_gateway.id) -@mock_ec2 +@mock_ec2_deprecated def test_delete_customer_gateways(): conn = boto.connect_vpc('the_key', 'the_secret') @@ -39,7 +39,7 @@ def test_delete_customer_gateways(): cgws = conn.get_all_customer_gateways() cgws.should.have.length_of(0) -@mock_ec2 +@mock_ec2_deprecated def test_delete_customer_gateways_bad_id(): conn = boto.connect_vpc('the_key', 'the_secret') with assert_raises(EC2ResponseError) as cm: diff --git a/tests/test_ec2/test_dhcp_options.py b/tests/test_ec2/test_dhcp_options.py index ef671ec11..0279a3d54 100644 --- a/tests/test_ec2/test_dhcp_options.py +++ b/tests/test_ec2/test_dhcp_options.py @@ -9,13 +9,13 @@ from boto.exception import EC2ResponseError import sure # noqa -from moto import mock_ec2 +from moto import mock_ec2, mock_ec2_deprecated SAMPLE_DOMAIN_NAME = u'example.com' SAMPLE_NAME_SERVERS = [u'10.0.0.6', u'10.0.0.7'] -@mock_ec2 +@mock_ec2_deprecated def test_dhcp_options_associate(): """ associate dhcp option """ conn = boto.connect_vpc('the_key', 'the_secret') @@ -26,7 +26,7 @@ def test_dhcp_options_associate(): rval.should.be.equal(True) -@mock_ec2 +@mock_ec2_deprecated def test_dhcp_options_associate_invalid_dhcp_id(): """ associate dhcp option bad dhcp options id """ conn = boto.connect_vpc('the_key', 'the_secret') @@ -39,7 +39,7 @@ def test_dhcp_options_associate_invalid_dhcp_id(): cm.exception.request_id.should_not.be.none -@mock_ec2 +@mock_ec2_deprecated def test_dhcp_options_associate_invalid_vpc_id(): """ associate dhcp option invalid vpc id """ conn = boto.connect_vpc('the_key', 'the_secret') @@ -52,7 +52,7 @@ def test_dhcp_options_associate_invalid_vpc_id(): cm.exception.request_id.should_not.be.none -@mock_ec2 +@mock_ec2_deprecated def test_dhcp_options_delete_with_vpc(): """Test deletion of dhcp options with vpc""" conn = boto.connect_vpc('the_key', 'the_secret') @@ -78,7 +78,7 @@ def test_dhcp_options_delete_with_vpc(): cm.exception.request_id.should_not.be.none -@mock_ec2 +@mock_ec2_deprecated def test_create_dhcp_options(): """Create most basic dhcp option""" conn = boto.connect_vpc('the_key', 'the_secret') @@ -89,7 +89,7 @@ def test_create_dhcp_options(): dhcp_option.options[u'domain-name-servers'][1].should.be.equal(SAMPLE_NAME_SERVERS[1]) -@mock_ec2 +@mock_ec2_deprecated def test_create_dhcp_options_invalid_options(): """Create invalid dhcp options""" conn = boto.connect_vpc('the_key', 'the_secret') @@ -108,7 +108,7 @@ def test_create_dhcp_options_invalid_options(): cm.exception.request_id.should_not.be.none -@mock_ec2 +@mock_ec2_deprecated def test_describe_dhcp_options(): """Test dhcp options lookup by id""" conn = boto.connect_vpc('the_key', 'the_secret') @@ -121,7 +121,7 @@ def test_describe_dhcp_options(): dhcp_options.should.be.length_of(1) -@mock_ec2 +@mock_ec2_deprecated def test_describe_dhcp_options_invalid_id(): """get error on invalid dhcp_option_id lookup""" conn = boto.connect_vpc('the_key', 'the_secret') @@ -133,7 +133,7 @@ def test_describe_dhcp_options_invalid_id(): cm.exception.request_id.should_not.be.none -@mock_ec2 +@mock_ec2_deprecated def test_delete_dhcp_options(): """delete dhcp option""" conn = boto.connect_vpc('the_key', 'the_secret') @@ -151,7 +151,7 @@ def test_delete_dhcp_options(): cm.exception.request_id.should_not.be.none -@mock_ec2 +@mock_ec2_deprecated def test_delete_dhcp_options_invalid_id(): conn = boto.connect_vpc('the_key', 'the_secret') @@ -164,7 +164,7 @@ def test_delete_dhcp_options_invalid_id(): cm.exception.request_id.should_not.be.none -@mock_ec2 +@mock_ec2_deprecated def test_delete_dhcp_options_malformed_id(): conn = boto.connect_vpc('the_key', 'the_secret') @@ -177,7 +177,7 @@ def test_delete_dhcp_options_malformed_id(): cm.exception.request_id.should_not.be.none -@mock_ec2 +@mock_ec2_deprecated def test_dhcp_tagging(): conn = boto.connect_vpc('the_key', 'the_secret') dhcp_option = conn.create_dhcp_options() @@ -194,7 +194,7 @@ def test_dhcp_tagging(): dhcp_option.tags["a key"].should.equal("some value") -@mock_ec2 +@mock_ec2_deprecated def test_dhcp_options_get_by_tag(): conn = boto.connect_vpc('the_key', 'the_secret') @@ -230,7 +230,7 @@ def test_dhcp_options_get_by_tag(): dhcp_options_sets.should.have.length_of(2) -@mock_ec2 +@mock_ec2_deprecated def test_dhcp_options_get_by_id(): conn = boto.connect_vpc('the_key', 'the_secret') @@ -308,7 +308,7 @@ def test_dhcp_options_get_by_key_filter(): dhcp_options_sets.should.have.length_of(3) -@mock_ec2 +@mock_ec2_deprecated def test_dhcp_options_get_by_invalid_filter(): conn = boto.connect_vpc('the_key', 'the_secret') diff --git a/tests/test_ec2/test_elastic_block_store.py b/tests/test_ec2/test_elastic_block_store.py index f99cef5e4..c4794b1c8 100644 --- a/tests/test_ec2/test_elastic_block_store.py +++ b/tests/test_ec2/test_elastic_block_store.py @@ -8,10 +8,10 @@ import boto from boto.exception import EC2ResponseError, JSONResponseError import sure # noqa -from moto import mock_ec2 +from moto import mock_ec2_deprecated -@mock_ec2 +@mock_ec2_deprecated def test_create_and_delete_volume(): conn = boto.connect_ec2('the_key', 'the_secret') volume = conn.create_volume(80, "us-east-1a") @@ -43,7 +43,7 @@ def test_create_and_delete_volume(): -@mock_ec2 +@mock_ec2_deprecated def test_create_encrypted_volume_dryrun(): conn = boto.connect_ec2('the_key', 'the_secret') with assert_raises(JSONResponseError) as ex: @@ -53,7 +53,7 @@ def test_create_encrypted_volume_dryrun(): ex.exception.message.should.equal('An error occurred (DryRunOperation) when calling the CreateVolume operation: Request would have succeeded, but DryRun flag is set') -@mock_ec2 +@mock_ec2_deprecated def test_create_encrypted_volume(): conn = boto.connect_ec2('the_key', 'the_secret') conn.create_volume(80, "us-east-1a", encrypted=True) @@ -68,7 +68,7 @@ def test_create_encrypted_volume(): all_volumes[0].encrypted.should.be(True) -@mock_ec2 +@mock_ec2_deprecated def test_filter_volume_by_id(): conn = boto.connect_ec2('the_key', 'the_secret') volume1 = conn.create_volume(80, "us-east-1a") @@ -82,7 +82,7 @@ def test_filter_volume_by_id(): vol2.should.have.length_of(2) -@mock_ec2 +@mock_ec2_deprecated def test_volume_filters(): conn = boto.connect_ec2('the_key', 'the_secret') @@ -155,7 +155,7 @@ def test_volume_filters(): ) -@mock_ec2 +@mock_ec2_deprecated def test_volume_attach_and_detach(): conn = boto.connect_ec2('the_key', 'the_secret') reservation = conn.run_instances('ami-1234abcd') @@ -209,7 +209,7 @@ def test_volume_attach_and_detach(): cm3.exception.request_id.should_not.be.none -@mock_ec2 +@mock_ec2_deprecated def test_create_snapshot(): conn = boto.connect_ec2('the_key', 'the_secret') volume = conn.create_volume(80, "us-east-1a") @@ -245,7 +245,7 @@ def test_create_snapshot(): cm.exception.request_id.should_not.be.none -@mock_ec2 +@mock_ec2_deprecated def test_create_encrypted_snapshot(): conn = boto.connect_ec2('the_key', 'the_secret') volume = conn.create_volume(80, "us-east-1a", encrypted=True) @@ -260,7 +260,7 @@ def test_create_encrypted_snapshot(): snapshots[0].encrypted.should.be(True) -@mock_ec2 +@mock_ec2_deprecated def test_filter_snapshot_by_id(): conn = boto.connect_ec2('the_key', 'the_secret') volume1 = conn.create_volume(36, "us-east-1a") @@ -281,7 +281,7 @@ def test_filter_snapshot_by_id(): s.region.name.should.equal(conn.region.name) -@mock_ec2 +@mock_ec2_deprecated def test_snapshot_filters(): conn = boto.connect_ec2('the_key', 'the_secret') volume1 = conn.create_volume(20, "us-east-1a", encrypted=False) @@ -322,7 +322,7 @@ def test_snapshot_filters(): set([snap.id for snap in snapshots_by_encrypted]).should.equal(set([snapshot3.id])) -@mock_ec2 +@mock_ec2_deprecated def test_snapshot_attribute(): import copy @@ -418,7 +418,7 @@ def test_snapshot_attribute(): user_ids=['user']).should.throw(NotImplementedError) -@mock_ec2 +@mock_ec2_deprecated def test_create_volume_from_snapshot(): conn = boto.connect_ec2('the_key', 'the_secret') volume = conn.create_volume(80, "us-east-1a") @@ -439,7 +439,7 @@ def test_create_volume_from_snapshot(): new_volume.snapshot_id.should.equal(snapshot.id) -@mock_ec2 +@mock_ec2_deprecated def test_create_volume_from_encrypted_snapshot(): conn = boto.connect_ec2('the_key', 'the_secret') volume = conn.create_volume(80, "us-east-1a", encrypted=True) @@ -454,7 +454,7 @@ def test_create_volume_from_encrypted_snapshot(): new_volume.encrypted.should.be(True) -@mock_ec2 +@mock_ec2_deprecated def test_modify_attribute_blockDeviceMapping(): """ Reproduces the missing feature explained at [0], where we want to mock a @@ -481,7 +481,7 @@ def test_modify_attribute_blockDeviceMapping(): instance.block_device_mapping['/dev/sda1'].delete_on_termination.should.be(True) -@mock_ec2 +@mock_ec2_deprecated def test_volume_tag_escaping(): conn = boto.connect_ec2('the_key', 'the_secret') vol = conn.create_volume(10, 'us-east-1a') diff --git a/tests/test_ec2/test_elastic_ip_addresses.py b/tests/test_ec2/test_elastic_ip_addresses.py index df939a313..dc7910379 100644 --- a/tests/test_ec2/test_elastic_ip_addresses.py +++ b/tests/test_ec2/test_elastic_ip_addresses.py @@ -10,12 +10,12 @@ import six import sure # noqa -from moto import mock_ec2 +from moto import mock_ec2, mock_ec2_deprecated import logging -@mock_ec2 +@mock_ec2_deprecated def test_eip_allocate_classic(): """Allocate/release Classic EIP""" conn = boto.connect_ec2('the_key', 'the_secret') @@ -42,7 +42,7 @@ def test_eip_allocate_classic(): standard.should_not.be.within(conn.get_all_addresses()) -@mock_ec2 +@mock_ec2_deprecated def test_eip_allocate_vpc(): """Allocate/release VPC EIP""" conn = boto.connect_ec2('the_key', 'the_secret') @@ -60,7 +60,7 @@ def test_eip_allocate_vpc(): vpc.release() -@mock_ec2 +@mock_ec2_deprecated def test_eip_allocate_invalid_domain(): """Allocate EIP invalid domain""" conn = boto.connect_ec2('the_key', 'the_secret') @@ -72,7 +72,7 @@ def test_eip_allocate_invalid_domain(): cm.exception.request_id.should_not.be.none -@mock_ec2 +@mock_ec2_deprecated def test_eip_associate_classic(): """Associate/Disassociate EIP to classic instance""" conn = boto.connect_ec2('the_key', 'the_secret') @@ -114,7 +114,7 @@ def test_eip_associate_classic(): instance.terminate() -@mock_ec2 +@mock_ec2_deprecated def test_eip_associate_vpc(): """Associate/Disassociate EIP to VPC instance""" conn = boto.connect_ec2('the_key', 'the_secret') @@ -176,7 +176,7 @@ def test_eip_boto3_vpc_association(): instance.public_dns_name.should_not.be.none -@mock_ec2 +@mock_ec2_deprecated def test_eip_associate_network_interface(): """Associate/Disassociate EIP to NIC""" conn = boto.connect_vpc('the_key', 'the_secret') @@ -204,7 +204,7 @@ def test_eip_associate_network_interface(): eip.release() eip = None -@mock_ec2 +@mock_ec2_deprecated def test_eip_reassociate(): """reassociate EIP""" conn = boto.connect_ec2('the_key', 'the_secret') @@ -233,7 +233,7 @@ def test_eip_reassociate(): instance1.terminate() instance2.terminate() -@mock_ec2 +@mock_ec2_deprecated def test_eip_reassociate_nic(): """reassociate EIP""" conn = boto.connect_vpc('the_key', 'the_secret') @@ -261,7 +261,7 @@ def test_eip_reassociate_nic(): eip.release() eip = None -@mock_ec2 +@mock_ec2_deprecated def test_eip_associate_invalid_args(): """Associate EIP, invalid args """ conn = boto.connect_ec2('the_key', 'the_secret') @@ -280,7 +280,7 @@ def test_eip_associate_invalid_args(): instance.terminate() -@mock_ec2 +@mock_ec2_deprecated def test_eip_disassociate_bogus_association(): """Disassociate bogus EIP""" conn = boto.connect_ec2('the_key', 'the_secret') @@ -291,7 +291,7 @@ def test_eip_disassociate_bogus_association(): cm.exception.status.should.equal(400) cm.exception.request_id.should_not.be.none -@mock_ec2 +@mock_ec2_deprecated def test_eip_release_bogus_eip(): """Release bogus EIP""" conn = boto.connect_ec2('the_key', 'the_secret') @@ -303,7 +303,7 @@ def test_eip_release_bogus_eip(): cm.exception.request_id.should_not.be.none -@mock_ec2 +@mock_ec2_deprecated def test_eip_disassociate_arg_error(): """Invalid arguments disassociate address""" conn = boto.connect_ec2('the_key', 'the_secret') @@ -315,7 +315,7 @@ def test_eip_disassociate_arg_error(): cm.exception.request_id.should_not.be.none -@mock_ec2 +@mock_ec2_deprecated def test_eip_release_arg_error(): """Invalid arguments release address""" conn = boto.connect_ec2('the_key', 'the_secret') @@ -327,7 +327,7 @@ def test_eip_release_arg_error(): cm.exception.request_id.should_not.be.none -@mock_ec2 +@mock_ec2_deprecated def test_eip_describe(): """Listing of allocated Elastic IP Addresses.""" conn = boto.connect_ec2('the_key', 'the_secret') @@ -363,7 +363,7 @@ def test_eip_describe(): len(conn.get_all_addresses()).should.be.equal(0) -@mock_ec2 +@mock_ec2_deprecated def test_eip_describe_none(): """Error when search for bogus IP""" conn = boto.connect_ec2('the_key', 'the_secret') diff --git a/tests/test_ec2/test_elastic_network_interfaces.py b/tests/test_ec2/test_elastic_network_interfaces.py index 9b3f88a45..6f60c85a8 100644 --- a/tests/test_ec2/test_elastic_network_interfaces.py +++ b/tests/test_ec2/test_elastic_network_interfaces.py @@ -10,13 +10,13 @@ import boto.ec2 from boto.exception import EC2ResponseError, JSONResponseError import sure # noqa -from moto import mock_ec2, mock_cloudformation +from moto import mock_ec2, mock_cloudformation_deprecated, mock_ec2_deprecated from tests.helpers import requires_boto_gte from tests.test_cloudformation.fixtures import vpc_eni import json -@mock_ec2 +@mock_ec2_deprecated def test_elastic_network_interfaces(): conn = boto.connect_vpc('the_key', 'the_secret') vpc = conn.create_vpc("10.0.0.0/16") @@ -54,7 +54,7 @@ def test_elastic_network_interfaces(): cm.exception.request_id.should_not.be.none -@mock_ec2 +@mock_ec2_deprecated def test_elastic_network_interfaces_subnet_validation(): conn = boto.connect_vpc('the_key', 'the_secret') @@ -65,7 +65,7 @@ def test_elastic_network_interfaces_subnet_validation(): cm.exception.request_id.should_not.be.none -@mock_ec2 +@mock_ec2_deprecated def test_elastic_network_interfaces_with_private_ip(): conn = boto.connect_vpc('the_key', 'the_secret') vpc = conn.create_vpc("10.0.0.0/16") @@ -83,7 +83,7 @@ def test_elastic_network_interfaces_with_private_ip(): eni.private_ip_addresses[0].private_ip_address.should.equal(private_ip) -@mock_ec2 +@mock_ec2_deprecated def test_elastic_network_interfaces_with_groups(): conn = boto.connect_vpc('the_key', 'the_secret') vpc = conn.create_vpc("10.0.0.0/16") @@ -101,7 +101,7 @@ def test_elastic_network_interfaces_with_groups(): @requires_boto_gte("2.12.0") -@mock_ec2 +@mock_ec2_deprecated def test_elastic_network_interfaces_modify_attribute(): conn = boto.connect_vpc('the_key', 'the_secret') vpc = conn.create_vpc("10.0.0.0/16") @@ -133,7 +133,7 @@ def test_elastic_network_interfaces_modify_attribute(): eni.groups[0].id.should.equal(security_group2.id) -@mock_ec2 +@mock_ec2_deprecated def test_elastic_network_interfaces_filtering(): conn = boto.connect_vpc('the_key', 'the_secret') vpc = conn.create_vpc("10.0.0.0/16") @@ -281,8 +281,8 @@ def test_elastic_network_interfaces_get_by_subnet_id(): enis.should.have.length_of(0) -@mock_ec2 -@mock_cloudformation +@mock_ec2_deprecated +@mock_cloudformation_deprecated def test_elastic_network_interfaces_cloudformation(): template = vpc_eni.template template_json = json.dumps(template) diff --git a/tests/test_ec2/test_general.py b/tests/test_ec2/test_general.py index 83225bc0e..1dc77df82 100644 --- a/tests/test_ec2/test_general.py +++ b/tests/test_ec2/test_general.py @@ -7,10 +7,10 @@ import boto from boto.exception import EC2ResponseError import sure # noqa -from moto import mock_ec2 +from moto import mock_ec2_deprecated -@mock_ec2 +@mock_ec2_deprecated def test_console_output(): conn = boto.connect_ec2('the_key', 'the_secret') reservation = conn.run_instances('ami-1234abcd') @@ -20,7 +20,7 @@ def test_console_output(): output.output.should_not.equal(None) -@mock_ec2 +@mock_ec2_deprecated def test_console_output_without_instance(): conn = boto.connect_ec2('the_key', 'the_secret') diff --git a/tests/test_ec2/test_instances.py b/tests/test_ec2/test_instances.py index 7b0d734ea..a310c05a4 100644 --- a/tests/test_ec2/test_instances.py +++ b/tests/test_ec2/test_instances.py @@ -12,7 +12,7 @@ from boto.exception import EC2ResponseError, JSONResponseError from freezegun import freeze_time import sure # noqa -from moto import mock_ec2 +from moto import mock_ec2_deprecated from tests.helpers import requires_boto_gte @@ -23,7 +23,7 @@ def add_servers(ami_id, count): conn.run_instances(ami_id) -@mock_ec2 +@mock_ec2_deprecated def test_add_servers(): add_servers('ami-1234abcd', 2) @@ -37,7 +37,7 @@ def test_add_servers(): @freeze_time("2014-01-01 05:00:00") -@mock_ec2 +@mock_ec2_deprecated def test_instance_launch_and_terminate(): conn = boto.connect_ec2('the_key', 'the_secret') @@ -87,14 +87,14 @@ def test_instance_launch_and_terminate(): instance.state.should.equal('terminated') -@mock_ec2 +@mock_ec2_deprecated def test_terminate_empty_instances(): conn = boto.connect_ec2('the_key', 'the_secret') conn.terminate_instances.when.called_with([]).should.throw(EC2ResponseError) @freeze_time("2014-01-01 05:00:00") -@mock_ec2 +@mock_ec2_deprecated def test_instance_attach_volume(): conn = boto.connect_ec2('the_key', 'the_secret') reservation = conn.run_instances('ami-1234abcd') @@ -123,7 +123,7 @@ def test_instance_attach_volume(): v.status.should.equal('in-use') -@mock_ec2 +@mock_ec2_deprecated def test_get_instances_by_id(): conn = boto.connect_ec2() reservation = conn.run_instances('ami-1234abcd', min_count=2) @@ -150,7 +150,7 @@ def test_get_instances_by_id(): cm.exception.request_id.should_not.be.none -@mock_ec2 +@mock_ec2_deprecated def test_get_instances_filtering_by_state(): conn = boto.connect_ec2() reservation = conn.run_instances('ami-1234abcd', min_count=3) @@ -178,7 +178,7 @@ def test_get_instances_filtering_by_state(): conn.get_all_instances.when.called_with(filters={'not-implemented-filter': 'foobar'}).should.throw(NotImplementedError) -@mock_ec2 +@mock_ec2_deprecated def test_get_instances_filtering_by_instance_id(): conn = boto.connect_ec2() reservation = conn.run_instances('ami-1234abcd', min_count=3) @@ -197,7 +197,7 @@ def test_get_instances_filtering_by_instance_id(): reservations.should.have.length_of(0) -@mock_ec2 +@mock_ec2_deprecated def test_get_instances_filtering_by_instance_type(): conn = boto.connect_ec2() reservation1 = conn.run_instances('ami-1234abcd', instance_type='m1.small') @@ -238,7 +238,7 @@ def test_get_instances_filtering_by_instance_type(): #bogus instance-type should return none reservations.should.have.length_of(0) -@mock_ec2 +@mock_ec2_deprecated def test_get_instances_filtering_by_reason_code(): conn = boto.connect_ec2() reservation = conn.run_instances('ami-1234abcd', min_count=3) @@ -257,7 +257,7 @@ def test_get_instances_filtering_by_reason_code(): reservations[0].instances[0].id.should.equal(instance3.id) -@mock_ec2 +@mock_ec2_deprecated def test_get_instances_filtering_by_source_dest_check(): conn = boto.connect_ec2() reservation = conn.run_instances('ami-1234abcd', min_count=2) @@ -274,7 +274,7 @@ def test_get_instances_filtering_by_source_dest_check(): source_dest_check_true[0].instances[0].id.should.equal(instance2.id) -@mock_ec2 +@mock_ec2_deprecated def test_get_instances_filtering_by_vpc_id(): conn = boto.connect_vpc('the_key', 'the_secret') vpc1 = conn.create_vpc("10.0.0.0/16") @@ -298,7 +298,7 @@ def test_get_instances_filtering_by_vpc_id(): reservations2[0].instances[0].id.should.equal(instance2.id) -@mock_ec2 +@mock_ec2_deprecated def test_get_instances_filtering_by_architecture(): conn = boto.connect_ec2() reservation = conn.run_instances('ami-1234abcd', min_count=1) @@ -309,7 +309,7 @@ def test_get_instances_filtering_by_architecture(): reservations[0].instances.should.have.length_of(1) -@mock_ec2 +@mock_ec2_deprecated def test_get_instances_filtering_by_tag(): conn = boto.connect_ec2() reservation = conn.run_instances('ami-1234abcd', min_count=3) @@ -351,7 +351,7 @@ def test_get_instances_filtering_by_tag(): reservations[0].instances[1].id.should.equal(instance3.id) -@mock_ec2 +@mock_ec2_deprecated def test_get_instances_filtering_by_tag_value(): conn = boto.connect_ec2() reservation = conn.run_instances('ami-1234abcd', min_count=3) @@ -388,7 +388,7 @@ def test_get_instances_filtering_by_tag_value(): reservations[0].instances[0].id.should.equal(instance1.id) reservations[0].instances[1].id.should.equal(instance3.id) -@mock_ec2 +@mock_ec2_deprecated def test_get_instances_filtering_by_tag_name(): conn = boto.connect_ec2() reservation = conn.run_instances('ami-1234abcd', min_count=3) @@ -418,7 +418,7 @@ def test_get_instances_filtering_by_tag_name(): reservations[0].instances[1].id.should.equal(instance2.id) reservations[0].instances[2].id.should.equal(instance3.id) -@mock_ec2 +@mock_ec2_deprecated def test_instance_start_and_stop(): conn = boto.connect_ec2('the_key', 'the_secret') reservation = conn.run_instances('ami-1234abcd', min_count=2) @@ -448,7 +448,7 @@ def test_instance_start_and_stop(): started_instances[0].state.should.equal('pending') -@mock_ec2 +@mock_ec2_deprecated def test_instance_reboot(): conn = boto.connect_ec2('the_key', 'the_secret') reservation = conn.run_instances('ami-1234abcd') @@ -464,7 +464,7 @@ def test_instance_reboot(): instance.state.should.equal('pending') -@mock_ec2 +@mock_ec2_deprecated def test_instance_attribute_instance_type(): conn = boto.connect_ec2('the_key', 'the_secret') reservation = conn.run_instances('ami-1234abcd') @@ -482,7 +482,7 @@ def test_instance_attribute_instance_type(): instance_attribute.should.be.a(InstanceAttribute) instance_attribute.get('instanceType').should.equal("m1.small") -@mock_ec2 +@mock_ec2_deprecated def test_modify_instance_attribute_security_groups(): conn = boto.connect_ec2('the_key', 'the_secret') reservation = conn.run_instances('ami-1234abcd') @@ -506,7 +506,7 @@ def test_modify_instance_attribute_security_groups(): any(g.id == sg_id2 for g in group_list).should.be.ok -@mock_ec2 +@mock_ec2_deprecated def test_instance_attribute_user_data(): conn = boto.connect_ec2('the_key', 'the_secret') reservation = conn.run_instances('ami-1234abcd') @@ -525,7 +525,7 @@ def test_instance_attribute_user_data(): instance_attribute.get("userData").should.equal("this is my user data") -@mock_ec2 +@mock_ec2_deprecated def test_instance_attribute_source_dest_check(): conn = boto.connect_ec2('the_key', 'the_secret') reservation = conn.run_instances('ami-1234abcd') @@ -566,7 +566,7 @@ def test_instance_attribute_source_dest_check(): instance_attribute.get("sourceDestCheck").should.equal(True) -@mock_ec2 +@mock_ec2_deprecated def test_user_data_with_run_instance(): user_data = b"some user data" conn = boto.connect_ec2('the_key', 'the_secret') @@ -580,7 +580,7 @@ def test_user_data_with_run_instance(): decoded_user_data.should.equal(b"some user data") -@mock_ec2 +@mock_ec2_deprecated def test_run_instance_with_security_group_name(): conn = boto.connect_ec2('the_key', 'the_secret') @@ -600,7 +600,7 @@ def test_run_instance_with_security_group_name(): instance.groups[0].name.should.equal("group1") -@mock_ec2 +@mock_ec2_deprecated def test_run_instance_with_security_group_id(): conn = boto.connect_ec2('the_key', 'the_secret') group = conn.create_security_group('group1', "some description") @@ -612,7 +612,7 @@ def test_run_instance_with_security_group_id(): instance.groups[0].name.should.equal("group1") -@mock_ec2 +@mock_ec2_deprecated def test_run_instance_with_instance_type(): conn = boto.connect_ec2('the_key', 'the_secret') reservation = conn.run_instances('ami-1234abcd', instance_type="t1.micro") @@ -621,7 +621,7 @@ def test_run_instance_with_instance_type(): instance.instance_type.should.equal("t1.micro") -@mock_ec2 +@mock_ec2_deprecated def test_run_instance_with_default_placement(): conn = boto.connect_ec2('the_key', 'the_secret') reservation = conn.run_instances('ami-1234abcd') @@ -630,7 +630,7 @@ def test_run_instance_with_default_placement(): instance.placement.should.equal("us-east-1a") -@mock_ec2 +@mock_ec2_deprecated def test_run_instance_with_placement(): conn = boto.connect_ec2('the_key', 'the_secret') reservation = conn.run_instances('ami-1234abcd', placement="us-east-1b") @@ -639,7 +639,7 @@ def test_run_instance_with_placement(): instance.placement.should.equal("us-east-1b") -@mock_ec2 +@mock_ec2_deprecated def test_run_instance_with_subnet(): conn = boto.connect_vpc('the_key', 'the_secret') vpc = conn.create_vpc("10.0.0.0/16") @@ -653,7 +653,7 @@ def test_run_instance_with_subnet(): all_enis.should.have.length_of(1) -@mock_ec2 +@mock_ec2_deprecated def test_run_instance_with_nic_autocreated(): conn = boto.connect_vpc('the_key', 'the_secret') vpc = conn.create_vpc("10.0.0.0/16") @@ -686,7 +686,7 @@ def test_run_instance_with_nic_autocreated(): eni.private_ip_addresses[0].private_ip_address.should.equal(private_ip) -@mock_ec2 +@mock_ec2_deprecated def test_run_instance_with_nic_preexisting(): conn = boto.connect_vpc('the_key', 'the_secret') vpc = conn.create_vpc("10.0.0.0/16") @@ -724,7 +724,7 @@ def test_run_instance_with_nic_preexisting(): @requires_boto_gte("2.32.0") -@mock_ec2 +@mock_ec2_deprecated def test_instance_with_nic_attach_detach(): conn = boto.connect_vpc('the_key', 'the_secret') vpc = conn.create_vpc("10.0.0.0/16") @@ -790,7 +790,7 @@ def test_instance_with_nic_attach_detach(): cm.exception.request_id.should_not.be.none -@mock_ec2 +@mock_ec2_deprecated def test_ec2_classic_has_public_ip_address(): conn = boto.connect_ec2('the_key', 'the_secret') reservation = conn.run_instances('ami-1234abcd', key_name="keypair_name") @@ -802,7 +802,7 @@ def test_ec2_classic_has_public_ip_address(): instance.private_dns_name.should.contain(instance.private_ip_address) -@mock_ec2 +@mock_ec2_deprecated def test_run_instance_with_keypair(): conn = boto.connect_ec2('the_key', 'the_secret') reservation = conn.run_instances('ami-1234abcd', key_name="keypair_name") @@ -811,14 +811,14 @@ def test_run_instance_with_keypair(): instance.key_name.should.equal("keypair_name") -@mock_ec2 +@mock_ec2_deprecated def test_describe_instance_status_no_instances(): conn = boto.connect_ec2('the_key', 'the_secret') all_status = conn.get_all_instance_status() len(all_status).should.equal(0) -@mock_ec2 +@mock_ec2_deprecated def test_describe_instance_status_with_instances(): conn = boto.connect_ec2('the_key', 'the_secret') conn.run_instances('ami-1234abcd', key_name="keypair_name") @@ -829,7 +829,7 @@ def test_describe_instance_status_with_instances(): all_status[0].system_status.status.should.equal('ok') -@mock_ec2 +@mock_ec2_deprecated def test_describe_instance_status_with_instance_filter(): conn = boto.connect_ec2('the_key', 'the_secret') @@ -852,7 +852,7 @@ def test_describe_instance_status_with_instance_filter(): cm.exception.request_id.should_not.be.none @requires_boto_gte("2.32.0") -@mock_ec2 +@mock_ec2_deprecated def test_describe_instance_status_with_non_running_instances(): conn = boto.connect_ec2('the_key', 'the_secret') reservation = conn.run_instances('ami-1234abcd', min_count=3) @@ -877,7 +877,7 @@ def test_describe_instance_status_with_non_running_instances(): status3 = next((s for s in all_status if s.id == instance3.id), None) status3.state_name.should.equal('running') -@mock_ec2 +@mock_ec2_deprecated def test_get_instance_by_security_group(): conn = boto.connect_ec2('the_key', 'the_secret') diff --git a/tests/test_ec2/test_internet_gateways.py b/tests/test_ec2/test_internet_gateways.py index 4a08fe108..12b37860e 100644 --- a/tests/test_ec2/test_internet_gateways.py +++ b/tests/test_ec2/test_internet_gateways.py @@ -10,14 +10,14 @@ from boto.exception import EC2ResponseError, JSONResponseError import sure # noqa -from moto import mock_ec2 +from moto import mock_ec2_deprecated VPC_CIDR="10.0.0.0/16" BAD_VPC="vpc-deadbeef" BAD_IGW="igw-deadbeef" -@mock_ec2 +@mock_ec2_deprecated def test_igw_create(): """ internet gateway create """ conn = boto.connect_vpc('the_key', 'the_secret') @@ -37,7 +37,7 @@ def test_igw_create(): igw = conn.get_all_internet_gateways()[0] igw.attachments.should.have.length_of(0) -@mock_ec2 +@mock_ec2_deprecated def test_igw_attach(): """ internet gateway attach """ conn = boto.connect_vpc('the_key', 'the_secret') @@ -55,7 +55,7 @@ def test_igw_attach(): igw = conn.get_all_internet_gateways()[0] igw.attachments[0].vpc_id.should.be.equal(vpc.id) -@mock_ec2 +@mock_ec2_deprecated def test_igw_attach_bad_vpc(): """ internet gateway fail to attach w/ bad vpc """ conn = boto.connect_vpc('the_key', 'the_secret') @@ -67,7 +67,7 @@ def test_igw_attach_bad_vpc(): cm.exception.status.should.equal(400) cm.exception.request_id.should_not.be.none -@mock_ec2 +@mock_ec2_deprecated def test_igw_attach_twice(): """ internet gateway fail to attach twice """ conn = boto.connect_vpc('the_key', 'the_secret') @@ -82,7 +82,7 @@ def test_igw_attach_twice(): cm.exception.status.should.equal(400) cm.exception.request_id.should_not.be.none -@mock_ec2 +@mock_ec2_deprecated def test_igw_detach(): """ internet gateway detach""" conn = boto.connect_vpc('the_key', 'the_secret') @@ -100,7 +100,7 @@ def test_igw_detach(): igw = conn.get_all_internet_gateways()[0] igw.attachments.should.have.length_of(0) -@mock_ec2 +@mock_ec2_deprecated def test_igw_detach_wrong_vpc(): """ internet gateway fail to detach w/ wrong vpc """ conn = boto.connect_vpc('the_key', 'the_secret') @@ -115,7 +115,7 @@ def test_igw_detach_wrong_vpc(): cm.exception.status.should.equal(400) cm.exception.request_id.should_not.be.none -@mock_ec2 +@mock_ec2_deprecated def test_igw_detach_invalid_vpc(): """ internet gateway fail to detach w/ invalid vpc """ conn = boto.connect_vpc('the_key', 'the_secret') @@ -129,7 +129,7 @@ def test_igw_detach_invalid_vpc(): cm.exception.status.should.equal(400) cm.exception.request_id.should_not.be.none -@mock_ec2 +@mock_ec2_deprecated def test_igw_detach_unattached(): """ internet gateway fail to detach unattached """ conn = boto.connect_vpc('the_key', 'the_secret') @@ -142,7 +142,7 @@ def test_igw_detach_unattached(): cm.exception.status.should.equal(400) cm.exception.request_id.should_not.be.none -@mock_ec2 +@mock_ec2_deprecated def test_igw_delete(): """ internet gateway delete""" conn = boto.connect_vpc('the_key', 'the_secret') @@ -160,7 +160,7 @@ def test_igw_delete(): conn.delete_internet_gateway(igw.id) conn.get_all_internet_gateways().should.have.length_of(0) -@mock_ec2 +@mock_ec2_deprecated def test_igw_delete_attached(): """ internet gateway fail to delete attached """ conn = boto.connect_vpc('the_key', 'the_secret') @@ -174,7 +174,7 @@ def test_igw_delete_attached(): cm.exception.status.should.equal(400) cm.exception.request_id.should_not.be.none -@mock_ec2 +@mock_ec2_deprecated def test_igw_desribe(): """ internet gateway fetch by id """ conn = boto.connect_vpc('the_key', 'the_secret') @@ -182,7 +182,7 @@ def test_igw_desribe(): igw_by_search = conn.get_all_internet_gateways([igw.id])[0] igw.id.should.equal(igw_by_search.id) -@mock_ec2 +@mock_ec2_deprecated def test_igw_desribe_bad_id(): """ internet gateway fail to fetch by bad id """ conn = boto.connect_vpc('the_key', 'the_secret') @@ -193,7 +193,7 @@ def test_igw_desribe_bad_id(): cm.exception.request_id.should_not.be.none -@mock_ec2 +@mock_ec2_deprecated def test_igw_filter_by_vpc_id(): """ internet gateway filter by vpc id """ conn = boto.connect_vpc('the_key', 'the_secret') @@ -208,7 +208,7 @@ def test_igw_filter_by_vpc_id(): result[0].id.should.equal(igw1.id) -@mock_ec2 +@mock_ec2_deprecated def test_igw_filter_by_tags(): """ internet gateway filter by vpc id """ conn = boto.connect_vpc('the_key', 'the_secret') @@ -222,7 +222,7 @@ def test_igw_filter_by_tags(): result[0].id.should.equal(igw1.id) -@mock_ec2 +@mock_ec2_deprecated def test_igw_filter_by_internet_gateway_id(): """ internet gateway filter by internet gateway id """ conn = boto.connect_vpc('the_key', 'the_secret') @@ -235,7 +235,7 @@ def test_igw_filter_by_internet_gateway_id(): result[0].id.should.equal(igw1.id) -@mock_ec2 +@mock_ec2_deprecated def test_igw_filter_by_attachment_state(): """ internet gateway filter by attachment state """ conn = boto.connect_vpc('the_key', 'the_secret') diff --git a/tests/test_ec2/test_key_pairs.py b/tests/test_ec2/test_key_pairs.py index 7d45e79db..a35f0b962 100644 --- a/tests/test_ec2/test_key_pairs.py +++ b/tests/test_ec2/test_key_pairs.py @@ -8,16 +8,16 @@ import six import sure # noqa from boto.exception import EC2ResponseError, JSONResponseError -from moto import mock_ec2 +from moto import mock_ec2_deprecated -@mock_ec2 +@mock_ec2_deprecated def test_key_pairs_empty(): conn = boto.connect_ec2('the_key', 'the_secret') assert len(conn.get_all_key_pairs()) == 0 -@mock_ec2 +@mock_ec2_deprecated def test_key_pairs_invalid_id(): conn = boto.connect_ec2('the_key', 'the_secret') @@ -28,7 +28,7 @@ def test_key_pairs_invalid_id(): cm.exception.request_id.should_not.be.none -@mock_ec2 +@mock_ec2_deprecated def test_key_pairs_create(): conn = boto.connect_ec2('the_key', 'the_secret') @@ -45,7 +45,7 @@ def test_key_pairs_create(): assert kps[0].name == 'foo' -@mock_ec2 +@mock_ec2_deprecated def test_key_pairs_create_two(): conn = boto.connect_ec2('the_key', 'the_secret') kp = conn.create_key_pair('foo') @@ -60,7 +60,7 @@ def test_key_pairs_create_two(): kps[0].name.should.equal('foo') -@mock_ec2 +@mock_ec2_deprecated def test_key_pairs_create_exist(): conn = boto.connect_ec2('the_key', 'the_secret') kp = conn.create_key_pair('foo') @@ -74,7 +74,7 @@ def test_key_pairs_create_exist(): cm.exception.request_id.should_not.be.none -@mock_ec2 +@mock_ec2_deprecated def test_key_pairs_delete_no_exist(): conn = boto.connect_ec2('the_key', 'the_secret') assert len(conn.get_all_key_pairs()) == 0 @@ -82,7 +82,7 @@ def test_key_pairs_delete_no_exist(): r.should.be.ok -@mock_ec2 +@mock_ec2_deprecated def test_key_pairs_delete_exist(): conn = boto.connect_ec2('the_key', 'the_secret') conn.create_key_pair('foo') @@ -98,7 +98,7 @@ def test_key_pairs_delete_exist(): assert len(conn.get_all_key_pairs()) == 0 -@mock_ec2 +@mock_ec2_deprecated def test_key_pairs_import(): conn = boto.connect_ec2('the_key', 'the_secret') @@ -115,7 +115,7 @@ def test_key_pairs_import(): assert kps[0].name == 'foo' -@mock_ec2 +@mock_ec2_deprecated def test_key_pairs_import_exist(): conn = boto.connect_ec2('the_key', 'the_secret') kp = conn.import_key_pair('foo', b'content') diff --git a/tests/test_ec2/test_network_acls.py b/tests/test_ec2/test_network_acls.py index 5ab16b51b..91158e0bf 100644 --- a/tests/test_ec2/test_network_acls.py +++ b/tests/test_ec2/test_network_acls.py @@ -2,10 +2,10 @@ from __future__ import unicode_literals import boto import sure # noqa -from moto import mock_ec2 +from moto import mock_ec2_deprecated -@mock_ec2 +@mock_ec2_deprecated def test_default_network_acl_created_with_vpc(): conn = boto.connect_vpc('the_key', 'the secret') vpc = conn.create_vpc("10.0.0.0/16") @@ -13,7 +13,7 @@ def test_default_network_acl_created_with_vpc(): all_network_acls.should.have.length_of(2) -@mock_ec2 +@mock_ec2_deprecated def test_network_acls(): conn = boto.connect_vpc('the_key', 'the secret') vpc = conn.create_vpc("10.0.0.0/16") @@ -22,7 +22,7 @@ def test_network_acls(): all_network_acls.should.have.length_of(3) -@mock_ec2 +@mock_ec2_deprecated def test_new_subnet_associates_with_default_network_acl(): conn = boto.connect_vpc('the_key', 'the secret') vpc = conn.get_all_vpcs()[0] @@ -36,7 +36,7 @@ def test_new_subnet_associates_with_default_network_acl(): [a.subnet_id for a in acl.associations].should.contain(subnet.id) -@mock_ec2 +@mock_ec2_deprecated def test_network_acl_entries(): conn = boto.connect_vpc('the_key', 'the secret') vpc = conn.create_vpc("10.0.0.0/16") @@ -62,7 +62,7 @@ def test_network_acl_entries(): entries[0].rule_action.should.equal('ALLOW') -@mock_ec2 +@mock_ec2_deprecated def test_associate_new_network_acl_with_subnet(): conn = boto.connect_vpc('the_key', 'the secret') vpc = conn.create_vpc("10.0.0.0/16") @@ -81,7 +81,7 @@ def test_associate_new_network_acl_with_subnet(): test_network_acl.associations[0].subnet_id.should.equal(subnet.id) -@mock_ec2 +@mock_ec2_deprecated def test_delete_network_acl(): conn = boto.connect_vpc('the_key', 'the secret') vpc = conn.create_vpc("10.0.0.0/16") @@ -101,7 +101,7 @@ def test_delete_network_acl(): any(acl.id == network_acl.id for acl in updated_network_acls).shouldnt.be.ok -@mock_ec2 +@mock_ec2_deprecated def test_network_acl_tagging(): conn = boto.connect_vpc('the_key', 'the secret') vpc = conn.create_vpc("10.0.0.0/16") diff --git a/tests/test_ec2/test_regions.py b/tests/test_ec2/test_regions.py index 9375314d4..07e02c526 100644 --- a/tests/test_ec2/test_regions.py +++ b/tests/test_ec2/test_regions.py @@ -3,7 +3,7 @@ import boto.ec2 import boto.ec2.autoscale import boto.ec2.elb import sure -from moto import mock_ec2, mock_autoscaling, mock_elb +from moto import mock_ec2_deprecated, mock_autoscaling_deprecated, mock_elb_deprecated def add_servers_to_region(ami_id, count, region): @@ -12,7 +12,7 @@ def add_servers_to_region(ami_id, count, region): conn.run_instances(ami_id) -@mock_ec2 +@mock_ec2_deprecated def test_add_servers_to_a_single_region(): region = 'ap-northeast-1' add_servers_to_region('ami-1234abcd', 1, region) @@ -27,7 +27,7 @@ def test_add_servers_to_a_single_region(): reservations[1].instances[0].image_id.should.equal('ami-5678efgh') -@mock_ec2 +@mock_ec2_deprecated def test_add_servers_to_multiple_regions(): region1 = 'us-east-1' region2 = 'ap-northeast-1' @@ -46,8 +46,8 @@ def test_add_servers_to_multiple_regions(): ap_reservations[0].instances[0].image_id.should.equal('ami-5678efgh') -@mock_autoscaling -@mock_elb +@mock_autoscaling_deprecated +@mock_elb_deprecated def test_create_autoscaling_group(): elb_conn = boto.ec2.elb.connect_to_region('us-east-1') elb_conn.create_load_balancer('us_test_lb', zones=[], listeners=[(80, 8080, 'http')]) diff --git a/tests/test_ec2/test_route_tables.py b/tests/test_ec2/test_route_tables.py index 41e5786e6..3aa4b460a 100644 --- a/tests/test_ec2/test_route_tables.py +++ b/tests/test_ec2/test_route_tables.py @@ -8,11 +8,11 @@ import boto3 from boto.exception import EC2ResponseError import sure # noqa -from moto import mock_ec2 +from moto import mock_ec2, mock_ec2_deprecated from tests.helpers import requires_boto_gte -@mock_ec2 +@mock_ec2_deprecated def test_route_tables_defaults(): conn = boto.connect_vpc('the_key', 'the_secret') vpc = conn.create_vpc("10.0.0.0/16") @@ -37,7 +37,7 @@ def test_route_tables_defaults(): all_route_tables.should.have.length_of(0) -@mock_ec2 +@mock_ec2_deprecated def test_route_tables_additional(): conn = boto.connect_vpc('the_key', 'the_secret') vpc = conn.create_vpc("10.0.0.0/16") @@ -77,7 +77,7 @@ def test_route_tables_additional(): cm.exception.request_id.should_not.be.none -@mock_ec2 +@mock_ec2_deprecated def test_route_tables_filters_standard(): conn = boto.connect_vpc('the_key', 'the_secret') @@ -115,7 +115,7 @@ def test_route_tables_filters_standard(): conn.get_all_route_tables.when.called_with(filters={'not-implemented-filter': 'foobar'}).should.throw(NotImplementedError) -@mock_ec2 +@mock_ec2_deprecated def test_route_tables_filters_associations(): conn = boto.connect_vpc('the_key', 'the_secret') @@ -152,7 +152,7 @@ def test_route_tables_filters_associations(): association1_route_tables[0].associations.should.have.length_of(2) -@mock_ec2 +@mock_ec2_deprecated def test_route_table_associations(): conn = boto.connect_vpc('the_key', 'the_secret') vpc = conn.create_vpc("10.0.0.0/16") @@ -219,7 +219,7 @@ def test_route_table_associations(): @requires_boto_gte("2.16.0") -@mock_ec2 +@mock_ec2_deprecated def test_route_table_replace_route_table_association(): """ Note: Boto has deprecated replace_route_table_assocation (which returns status) @@ -289,7 +289,7 @@ def test_route_table_replace_route_table_association(): cm.exception.request_id.should_not.be.none -@mock_ec2 +@mock_ec2_deprecated def test_route_table_get_by_tag(): conn = boto.connect_vpc('the_key', 'the_secret') @@ -326,7 +326,7 @@ def test_route_table_get_by_tag_boto3(): route_tables[0].tags[0].should.equal({'Key': 'Name', 'Value': 'TestRouteTable'}) -@mock_ec2 +@mock_ec2_deprecated def test_routes_additional(): conn = boto.connect_vpc('the_key', 'the_secret') vpc = conn.create_vpc("10.0.0.0/16") @@ -364,7 +364,7 @@ def test_routes_additional(): cm.exception.request_id.should_not.be.none -@mock_ec2 +@mock_ec2_deprecated def test_routes_replace(): conn = boto.connect_vpc('the_key', 'the_secret') vpc = conn.create_vpc("10.0.0.0/16") @@ -412,7 +412,7 @@ def test_routes_replace(): @requires_boto_gte("2.19.0") -@mock_ec2 +@mock_ec2_deprecated def test_routes_not_supported(): conn = boto.connect_vpc('the_key', 'the_secret') vpc = conn.create_vpc("10.0.0.0/16") @@ -431,7 +431,7 @@ def test_routes_not_supported(): @requires_boto_gte("2.34.0") -@mock_ec2 +@mock_ec2_deprecated def test_routes_vpc_peering_connection(): conn = boto.connect_vpc('the_key', 'the_secret') vpc = conn.create_vpc("10.0.0.0/16") @@ -458,7 +458,7 @@ def test_routes_vpc_peering_connection(): @requires_boto_gte("2.34.0") -@mock_ec2 +@mock_ec2_deprecated def test_routes_vpn_gateway(): conn = boto.connect_vpc('the_key', 'the_secret') @@ -480,7 +480,7 @@ def test_routes_vpn_gateway(): new_route.vpc_peering_connection_id.should.be.none -@mock_ec2 +@mock_ec2_deprecated def test_network_acl_tagging(): conn = boto.connect_vpc('the_key', 'the secret') diff --git a/tests/test_ec2/test_security_groups.py b/tests/test_ec2/test_security_groups.py index 19f43862d..3968d9151 100644 --- a/tests/test_ec2/test_security_groups.py +++ b/tests/test_ec2/test_security_groups.py @@ -12,10 +12,10 @@ from botocore.exceptions import ClientError from boto.exception import EC2ResponseError, JSONResponseError import sure # noqa -from moto import mock_ec2 +from moto import mock_ec2, mock_ec2_deprecated -@mock_ec2 +@mock_ec2_deprecated def test_create_and_describe_security_group(): conn = boto.connect_ec2('the_key', 'the_secret') @@ -43,7 +43,7 @@ def test_create_and_describe_security_group(): set(group_names).should.equal(set(["default", "test security group"])) -@mock_ec2 +@mock_ec2_deprecated def test_create_security_group_without_description_raises_error(): conn = boto.connect_ec2('the_key', 'the_secret') @@ -54,7 +54,7 @@ def test_create_security_group_without_description_raises_error(): cm.exception.request_id.should_not.be.none -@mock_ec2 +@mock_ec2_deprecated def test_default_security_group(): conn = boto.ec2.connect_to_region('us-east-1') groups = conn.get_all_security_groups() @@ -62,7 +62,7 @@ def test_default_security_group(): groups[0].name.should.equal("default") -@mock_ec2 +@mock_ec2_deprecated def test_create_and_describe_vpc_security_group(): conn = boto.connect_ec2('the_key', 'the_secret') vpc_id = 'vpc-5300000c' @@ -88,7 +88,7 @@ def test_create_and_describe_vpc_security_group(): all_groups[0].name.should.equal('test security group') -@mock_ec2 +@mock_ec2_deprecated def test_create_two_security_groups_with_same_name_in_different_vpc(): conn = boto.connect_ec2('the_key', 'the_secret') vpc_id = 'vpc-5300000c' @@ -105,7 +105,7 @@ def test_create_two_security_groups_with_same_name_in_different_vpc(): set(group_names).should.equal(set(["default", "test security group"])) -@mock_ec2 +@mock_ec2_deprecated def test_deleting_security_groups(): conn = boto.connect_ec2('the_key', 'the_secret') security_group1 = conn.create_security_group('test1', 'test1') @@ -135,7 +135,7 @@ def test_deleting_security_groups(): conn.get_all_security_groups().should.have.length_of(2) -@mock_ec2 +@mock_ec2_deprecated def test_delete_security_group_in_vpc(): conn = boto.connect_ec2('the_key', 'the_secret') vpc_id = "vpc-12345" @@ -145,7 +145,7 @@ def test_delete_security_group_in_vpc(): conn.delete_security_group(group_id=security_group1.id) -@mock_ec2 +@mock_ec2_deprecated def test_authorize_ip_range_and_revoke(): conn = boto.connect_ec2('the_key', 'the_secret') security_group = conn.create_security_group('test', 'test') @@ -216,7 +216,7 @@ def test_authorize_ip_range_and_revoke(): egress_security_group.rules_egress.should.have.length_of(1) -@mock_ec2 +@mock_ec2_deprecated def test_authorize_other_group_and_revoke(): conn = boto.connect_ec2('the_key', 'the_secret') security_group = conn.create_security_group('test', 'test') @@ -269,7 +269,7 @@ def test_authorize_other_group_egress_and_revoke(): sg01.ip_permissions_egress.should.have.length_of(1) -@mock_ec2 +@mock_ec2_deprecated def test_authorize_group_in_vpc(): conn = boto.connect_ec2('the_key', 'the_secret') vpc_id = "vpc-12345" @@ -295,7 +295,7 @@ def test_authorize_group_in_vpc(): security_group.rules.should.have.length_of(0) -@mock_ec2 +@mock_ec2_deprecated def test_get_all_security_groups(): conn = boto.connect_ec2() sg1 = conn.create_security_group(name='test1', description='test1', vpc_id='vpc-mjm05d27') @@ -321,7 +321,7 @@ def test_get_all_security_groups(): resp.should.have.length_of(4) -@mock_ec2 +@mock_ec2_deprecated def test_authorize_bad_cidr_throws_invalid_parameter_value(): conn = boto.connect_ec2('the_key', 'the_secret') security_group = conn.create_security_group('test', 'test') @@ -332,7 +332,7 @@ def test_authorize_bad_cidr_throws_invalid_parameter_value(): cm.exception.request_id.should_not.be.none -@mock_ec2 +@mock_ec2_deprecated def test_security_group_tagging(): conn = boto.connect_vpc() vpc = conn.create_vpc("10.0.0.0/16") @@ -356,7 +356,7 @@ def test_security_group_tagging(): group.tags["Test"].should.equal("Tag") -@mock_ec2 +@mock_ec2_deprecated def test_security_group_tag_filtering(): conn = boto.connect_ec2() sg = conn.create_security_group("test-sg", "Test SG") @@ -366,7 +366,7 @@ def test_security_group_tag_filtering(): groups.should.have.length_of(1) -@mock_ec2 +@mock_ec2_deprecated def test_authorize_all_protocols_with_no_port_specification(): conn = boto.connect_ec2() sg = conn.create_security_group('test', 'test') @@ -379,7 +379,7 @@ def test_authorize_all_protocols_with_no_port_specification(): sg.rules[0].to_port.should.equal(None) -@mock_ec2 +@mock_ec2_deprecated def test_sec_group_rule_limit(): ec2_conn = boto.connect_ec2() sg = ec2_conn.create_security_group('test', 'test') @@ -441,7 +441,7 @@ def test_sec_group_rule_limit(): cm.exception.error_code.should.equal('RulesPerSecurityGroupLimitExceeded') -@mock_ec2 +@mock_ec2_deprecated def test_sec_group_rule_limit_vpc(): ec2_conn = boto.connect_ec2() vpc_conn = boto.connect_vpc() @@ -611,7 +611,7 @@ def test_authorize_and_revoke_in_bulk(): for ip_permission in expected_ip_permissions: sg01.ip_permissions_egress.shouldnt.contain(ip_permission) -@mock_ec2 +@mock_ec2_deprecated def test_get_all_security_groups_filter_with_same_vpc_id(): conn = boto.connect_ec2('the_key', 'the_secret') vpc_id = 'vpc-5300000c' diff --git a/tests/test_ec2/test_spot_instances.py b/tests/test_ec2/test_spot_instances.py index 790ffeb65..1933613e8 100644 --- a/tests/test_ec2/test_spot_instances.py +++ b/tests/test_ec2/test_spot_instances.py @@ -7,12 +7,13 @@ import boto3 import sure # noqa from boto.exception import JSONResponseError -from moto import mock_ec2 +from moto import mock_ec2, mock_ec2_deprecated from moto.backends import get_model from moto.core.utils import iso_8601_datetime_with_milliseconds @mock_ec2 +@mock_ec2_deprecated def test_request_spot_instances(): conn = boto3.client('ec2', 'us-east-1') vpc = conn.create_vpc(CidrBlock="10.0.0.0/8")['Vpc'] @@ -73,7 +74,7 @@ def test_request_spot_instances(): request.launch_specification.subnet_id.should.equal(subnet_id) -@mock_ec2 +@mock_ec2_deprecated def test_request_spot_instances_default_arguments(): """ Test that moto set the correct default arguments @@ -106,7 +107,7 @@ def test_request_spot_instances_default_arguments(): request.launch_specification.subnet_id.should.equal(None) -@mock_ec2 +@mock_ec2_deprecated def test_cancel_spot_instance_request(): conn = boto.connect_ec2() @@ -130,7 +131,7 @@ def test_cancel_spot_instance_request(): requests.should.have.length_of(0) -@mock_ec2 +@mock_ec2_deprecated def test_request_spot_instances_fulfilled(): """ Test that moto correctly fullfills a spot instance request @@ -156,7 +157,7 @@ def test_request_spot_instances_fulfilled(): request.state.should.equal("active") -@mock_ec2 +@mock_ec2_deprecated def test_tag_spot_instance_request(): """ Test that moto correctly tags a spot instance request @@ -177,7 +178,7 @@ def test_tag_spot_instance_request(): tag_dict.should.equal({'tag1': 'value1', 'tag2': 'value2'}) -@mock_ec2 +@mock_ec2_deprecated def test_get_all_spot_instance_requests_filtering(): """ Test that moto correctly filters spot instance requests @@ -211,7 +212,7 @@ def test_get_all_spot_instance_requests_filtering(): requests.should.have.length_of(1) -@mock_ec2 +@mock_ec2_deprecated def test_request_spot_instances_setting_instance_id(): conn = boto.ec2.connect_to_region("us-east-1") request = conn.request_spot_instances( diff --git a/tests/test_ec2/test_subnets.py b/tests/test_ec2/test_subnets.py index 8e6a2a4ea..0a9b41b8e 100644 --- a/tests/test_ec2/test_subnets.py +++ b/tests/test_ec2/test_subnets.py @@ -11,10 +11,10 @@ from botocore.exceptions import ParamValidationError import json import sure # noqa -from moto import mock_cloudformation, mock_ec2 +from moto import mock_cloudformation_deprecated, mock_ec2, mock_ec2_deprecated -@mock_ec2 +@mock_ec2_deprecated def test_subnets(): ec2 = boto.connect_ec2('the_key', 'the_secret') conn = boto.connect_vpc('the_key', 'the_secret') @@ -36,7 +36,7 @@ def test_subnets(): cm.exception.request_id.should_not.be.none -@mock_ec2 +@mock_ec2_deprecated def test_subnet_create_vpc_validation(): conn = boto.connect_vpc('the_key', 'the_secret') @@ -47,7 +47,7 @@ def test_subnet_create_vpc_validation(): cm.exception.request_id.should_not.be.none -@mock_ec2 +@mock_ec2_deprecated def test_subnet_tagging(): conn = boto.connect_vpc('the_key', 'the_secret') vpc = conn.create_vpc("10.0.0.0/16") @@ -65,7 +65,7 @@ def test_subnet_tagging(): subnet.tags["a key"].should.equal("some value") -@mock_ec2 +@mock_ec2_deprecated def test_subnet_should_have_proper_availability_zone_set(): conn = boto.vpc.connect_to_region('us-west-1') vpcA = conn.create_vpc("10.0.0.0/16") @@ -87,7 +87,7 @@ def test_default_subnet(): subnet.map_public_ip_on_launch.shouldnt.be.ok -@mock_ec2 +@mock_ec2_deprecated def test_non_default_subnet(): vpc_cli = boto.vpc.connect_to_region('us-west-1') @@ -150,7 +150,7 @@ def test_modify_subnet_attribute_validation(): client.modify_subnet_attribute(SubnetId=subnet.id, MapPublicIpOnLaunch={'Value': 'invalid'}) -@mock_ec2 +@mock_ec2_deprecated def test_get_subnets_filtering(): ec2 = boto.ec2.connect_to_region('us-west-1') conn = boto.vpc.connect_to_region('us-west-1') @@ -205,8 +205,8 @@ def test_get_subnets_filtering(): conn.get_all_subnets.when.called_with(filters={'not-implemented-filter': 'foobar'}).should.throw(NotImplementedError) -@mock_ec2 -@mock_cloudformation +@mock_ec2_deprecated +@mock_cloudformation_deprecated def test_subnet_tags_through_cloudformation(): vpc_conn = boto.vpc.connect_to_region('us-west-1') vpc = vpc_conn.create_vpc("10.0.0.0/16") diff --git a/tests/test_ec2/test_tags.py b/tests/test_ec2/test_tags.py index 4a85eb6e1..1084e44c4 100644 --- a/tests/test_ec2/test_tags.py +++ b/tests/test_ec2/test_tags.py @@ -8,11 +8,11 @@ from boto.exception import EC2ResponseError, JSONResponseError from boto.ec2.instance import Reservation import sure # noqa -from moto import mock_ec2 +from moto import mock_ec2_deprecated from nose.tools import assert_raises -@mock_ec2 +@mock_ec2_deprecated def test_add_tag(): conn = boto.connect_ec2('the_key', 'the_secret') reservation = conn.run_instances('ami-1234abcd') @@ -32,7 +32,7 @@ def test_add_tag(): existing_instance.tags["a key"].should.equal("some value") -@mock_ec2 +@mock_ec2_deprecated def test_remove_tag(): conn = boto.connect_ec2('the_key', 'the_secret') reservation = conn.run_instances('ami-1234abcd') @@ -59,7 +59,7 @@ def test_remove_tag(): instance.remove_tag("a key", "some value") -@mock_ec2 +@mock_ec2_deprecated def test_get_all_tags(): conn = boto.connect_ec2('the_key', 'the_secret') reservation = conn.run_instances('ami-1234abcd') @@ -73,7 +73,7 @@ def test_get_all_tags(): tag.value.should.equal("some value") -@mock_ec2 +@mock_ec2_deprecated def test_get_all_tags_with_special_characters(): conn = boto.connect_ec2('the_key', 'the_secret') reservation = conn.run_instances('ami-1234abcd') @@ -87,7 +87,7 @@ def test_get_all_tags_with_special_characters(): tag.value.should.equal("some<> value") -@mock_ec2 +@mock_ec2_deprecated def test_create_tags(): conn = boto.connect_ec2('the_key', 'the_secret') reservation = conn.run_instances('ami-1234abcd') @@ -108,7 +108,7 @@ def test_create_tags(): set([tag_dict[key] for key in tag_dict]).should.equal(set([tag.value for tag in tags])) -@mock_ec2 +@mock_ec2_deprecated def test_tag_limit_exceeded(): conn = boto.connect_ec2('the_key', 'the_secret') reservation = conn.run_instances('ami-1234abcd') @@ -137,7 +137,7 @@ def test_tag_limit_exceeded(): tag.value.should.equal("a value") -@mock_ec2 +@mock_ec2_deprecated def test_invalid_parameter_tag_null(): conn = boto.connect_ec2('the_key', 'the_secret') reservation = conn.run_instances('ami-1234abcd') @@ -150,7 +150,7 @@ def test_invalid_parameter_tag_null(): cm.exception.request_id.should_not.be.none -@mock_ec2 +@mock_ec2_deprecated def test_invalid_id(): conn = boto.connect_ec2('the_key', 'the_secret') with assert_raises(EC2ResponseError) as cm: @@ -166,7 +166,7 @@ def test_invalid_id(): cm.exception.request_id.should_not.be.none -@mock_ec2 +@mock_ec2_deprecated def test_get_all_tags_resource_id_filter(): conn = boto.connect_ec2('the_key', 'the_secret') reservation = conn.run_instances('ami-1234abcd') @@ -193,7 +193,7 @@ def test_get_all_tags_resource_id_filter(): tag.value.should.equal("some value") -@mock_ec2 +@mock_ec2_deprecated def test_get_all_tags_resource_type_filter(): conn = boto.connect_ec2('the_key', 'the_secret') reservation = conn.run_instances('ami-1234abcd') @@ -220,7 +220,7 @@ def test_get_all_tags_resource_type_filter(): tag.value.should.equal("some value") -@mock_ec2 +@mock_ec2_deprecated def test_get_all_tags_key_filter(): conn = boto.connect_ec2('the_key', 'the_secret') reservation = conn.run_instances('ami-1234abcd') @@ -239,7 +239,7 @@ def test_get_all_tags_key_filter(): tag.value.should.equal("some value") -@mock_ec2 +@mock_ec2_deprecated def test_get_all_tags_value_filter(): conn = boto.connect_ec2('the_key', 'the_secret') reservation = conn.run_instances('ami-1234abcd') @@ -283,7 +283,7 @@ def test_get_all_tags_value_filter(): tags.should.have.length_of(1) -@mock_ec2 +@mock_ec2_deprecated def test_retrieved_instances_must_contain_their_tags(): tag_key = 'Tag name' tag_value = 'Tag value' @@ -314,7 +314,7 @@ def test_retrieved_instances_must_contain_their_tags(): retrieved_tags[tag_key].should.equal(tag_value) -@mock_ec2 +@mock_ec2_deprecated def test_retrieved_volumes_must_contain_their_tags(): tag_key = 'Tag name' tag_value = 'Tag value' @@ -337,7 +337,7 @@ def test_retrieved_volumes_must_contain_their_tags(): retrieved_tags[tag_key].should.equal(tag_value) -@mock_ec2 +@mock_ec2_deprecated def test_retrieved_snapshots_must_contain_their_tags(): tag_key = 'Tag name' tag_value = 'Tag value' @@ -359,7 +359,7 @@ def test_retrieved_snapshots_must_contain_their_tags(): retrieved_tags[tag_key].should.equal(tag_value) -@mock_ec2 +@mock_ec2_deprecated def test_filter_instances_by_wildcard_tags(): conn = boto.connect_ec2(aws_access_key_id='the_key', aws_secret_access_key='the_secret') reservation = conn.run_instances('ami-1234abcd') diff --git a/tests/test_ec2/test_virtual_private_gateways.py b/tests/test_ec2/test_virtual_private_gateways.py index 8050559f1..0a7e34ea5 100644 --- a/tests/test_ec2/test_virtual_private_gateways.py +++ b/tests/test_ec2/test_virtual_private_gateways.py @@ -2,10 +2,10 @@ from __future__ import unicode_literals import boto import sure # noqa -from moto import mock_ec2 +from moto import mock_ec2_deprecated -@mock_ec2 +@mock_ec2_deprecated def test_virtual_private_gateways(): conn = boto.connect_vpc('the_key', 'the_secret') @@ -16,7 +16,7 @@ def test_virtual_private_gateways(): vpn_gateway.state.should.equal('available') vpn_gateway.availability_zone.should.equal('us-east-1a') -@mock_ec2 +@mock_ec2_deprecated def test_describe_vpn_gateway(): conn = boto.connect_vpc('the_key', 'the_secret') vpn_gateway = conn.create_vpn_gateway('ipsec.1', 'us-east-1a') @@ -32,7 +32,7 @@ def test_describe_vpn_gateway(): vpn_gateway.availability_zone.should.equal('us-east-1a') -@mock_ec2 +@mock_ec2_deprecated def test_vpn_gateway_vpc_attachment(): conn = boto.connect_vpc('the_key', 'the_secret') vpc = conn.create_vpc("10.0.0.0/16") @@ -50,7 +50,7 @@ def test_vpn_gateway_vpc_attachment(): attachments[0].state.should.equal('attached') -@mock_ec2 +@mock_ec2_deprecated def test_delete_vpn_gateway(): conn = boto.connect_vpc('the_key', 'the_secret') vpn_gateway = conn.create_vpn_gateway('ipsec.1', 'us-east-1a') @@ -60,7 +60,7 @@ def test_delete_vpn_gateway(): vgws.should.have.length_of(0) -@mock_ec2 +@mock_ec2_deprecated def test_vpn_gateway_tagging(): conn = boto.connect_vpc('the_key', 'the_secret') vpn_gateway = conn.create_vpn_gateway('ipsec.1', 'us-east-1a') @@ -76,7 +76,7 @@ def test_vpn_gateway_tagging(): vpn_gateway.tags["a key"].should.equal("some value") -@mock_ec2 +@mock_ec2_deprecated def test_detach_vpn_gateway(): conn = boto.connect_vpc('the_key', 'the_secret') diff --git a/tests/test_ec2/test_vpc_peering.py b/tests/test_ec2/test_vpc_peering.py index d41c3ab7b..c6a2feffb 100644 --- a/tests/test_ec2/test_vpc_peering.py +++ b/tests/test_ec2/test_vpc_peering.py @@ -7,12 +7,12 @@ import boto from boto.exception import EC2ResponseError import sure # noqa -from moto import mock_ec2 +from moto import mock_ec2_deprecated from tests.helpers import requires_boto_gte @requires_boto_gte("2.32.0") -@mock_ec2 +@mock_ec2_deprecated def test_vpc_peering_connections(): conn = boto.connect_vpc('the_key', 'the_secret') vpc = conn.create_vpc("10.0.0.0/16") @@ -25,7 +25,7 @@ def test_vpc_peering_connections(): @requires_boto_gte("2.32.0") -@mock_ec2 +@mock_ec2_deprecated def test_vpc_peering_connections_get_all(): conn = boto.connect_vpc('the_key', 'the_secret') vpc_pcx = test_vpc_peering_connections() @@ -37,7 +37,7 @@ def test_vpc_peering_connections_get_all(): @requires_boto_gte("2.32.0") -@mock_ec2 +@mock_ec2_deprecated def test_vpc_peering_connections_accept(): conn = boto.connect_vpc('the_key', 'the_secret') vpc_pcx = test_vpc_peering_connections() @@ -57,7 +57,7 @@ def test_vpc_peering_connections_accept(): @requires_boto_gte("2.32.0") -@mock_ec2 +@mock_ec2_deprecated def test_vpc_peering_connections_reject(): conn = boto.connect_vpc('the_key', 'the_secret') vpc_pcx = test_vpc_peering_connections() @@ -77,7 +77,7 @@ def test_vpc_peering_connections_reject(): @requires_boto_gte("2.32.1") -@mock_ec2 +@mock_ec2_deprecated def test_vpc_peering_connections_delete(): conn = boto.connect_vpc('the_key', 'the_secret') vpc_pcx = test_vpc_peering_connections() diff --git a/tests/test_ec2/test_vpcs.py b/tests/test_ec2/test_vpcs.py index 513238001..c4dbf788e 100644 --- a/tests/test_ec2/test_vpcs.py +++ b/tests/test_ec2/test_vpcs.py @@ -8,13 +8,13 @@ import boto from boto.exception import EC2ResponseError import sure # noqa -from moto import mock_ec2 +from moto import mock_ec2, mock_ec2_deprecated SAMPLE_DOMAIN_NAME = u'example.com' SAMPLE_NAME_SERVERS = [u'10.0.0.6', u'10.0.0.7'] -@mock_ec2 +@mock_ec2_deprecated def test_vpcs(): conn = boto.connect_vpc('the_key', 'the_secret') vpc = conn.create_vpc("10.0.0.0/16") @@ -35,7 +35,7 @@ def test_vpcs(): cm.exception.request_id.should_not.be.none -@mock_ec2 +@mock_ec2_deprecated def test_vpc_defaults(): conn = boto.connect_vpc('the_key', 'the_secret') vpc = conn.create_vpc("10.0.0.0/16") @@ -50,7 +50,7 @@ def test_vpc_defaults(): conn.get_all_route_tables().should.have.length_of(1) conn.get_all_security_groups(filters={'vpc-id': [vpc.id]}).should.have.length_of(0) -@mock_ec2 +@mock_ec2_deprecated def test_vpc_isdefault_filter(): conn = boto.connect_vpc('the_key', 'the_secret') vpc = conn.create_vpc("10.0.0.0/16") @@ -59,7 +59,7 @@ def test_vpc_isdefault_filter(): conn.get_all_vpcs(filters={'isDefault': 'true'}).should.have.length_of(1) -@mock_ec2 +@mock_ec2_deprecated def test_multiple_vpcs_default_filter(): conn = boto.connect_vpc('the_key', 'the_secret') conn.create_vpc("10.8.0.0/16") @@ -71,7 +71,7 @@ def test_multiple_vpcs_default_filter(): vpc[0].cidr_block.should.equal('172.31.0.0/16') -@mock_ec2 +@mock_ec2_deprecated def test_vpc_state_available_filter(): conn = boto.connect_vpc('the_key', 'the_secret') vpc = conn.create_vpc("10.0.0.0/16") @@ -80,7 +80,7 @@ def test_vpc_state_available_filter(): vpc.delete() conn.get_all_vpcs(filters={'state': 'available'}).should.have.length_of(2) -@mock_ec2 +@mock_ec2_deprecated def test_vpc_tagging(): conn = boto.connect_vpc() vpc = conn.create_vpc("10.0.0.0/16") @@ -96,7 +96,7 @@ def test_vpc_tagging(): vpc.tags["a key"].should.equal("some value") -@mock_ec2 +@mock_ec2_deprecated def test_vpc_get_by_id(): conn = boto.connect_vpc() vpc1 = conn.create_vpc("10.0.0.0/16") @@ -110,7 +110,7 @@ def test_vpc_get_by_id(): vpc2.id.should.be.within(vpc_ids) -@mock_ec2 +@mock_ec2_deprecated def test_vpc_get_by_cidr_block(): conn = boto.connect_vpc() vpc1 = conn.create_vpc("10.0.0.0/16") @@ -124,7 +124,7 @@ def test_vpc_get_by_cidr_block(): vpc2.id.should.be.within(vpc_ids) -@mock_ec2 +@mock_ec2_deprecated def test_vpc_get_by_dhcp_options_id(): conn = boto.connect_vpc() dhcp_options = conn.create_dhcp_options(SAMPLE_DOMAIN_NAME, SAMPLE_NAME_SERVERS) @@ -142,7 +142,7 @@ def test_vpc_get_by_dhcp_options_id(): vpc2.id.should.be.within(vpc_ids) -@mock_ec2 +@mock_ec2_deprecated def test_vpc_get_by_tag(): conn = boto.connect_vpc() vpc1 = conn.create_vpc("10.0.0.0/16") @@ -160,7 +160,7 @@ def test_vpc_get_by_tag(): vpc2.id.should.be.within(vpc_ids) -@mock_ec2 +@mock_ec2_deprecated def test_vpc_get_by_tag_key_superset(): conn = boto.connect_vpc() vpc1 = conn.create_vpc("10.0.0.0/16") @@ -180,7 +180,7 @@ def test_vpc_get_by_tag_key_superset(): vpc2.id.should.be.within(vpc_ids) -@mock_ec2 +@mock_ec2_deprecated def test_vpc_get_by_tag_key_subset(): conn = boto.connect_vpc() vpc1 = conn.create_vpc("10.0.0.0/16") @@ -200,7 +200,7 @@ def test_vpc_get_by_tag_key_subset(): vpc2.id.should.be.within(vpc_ids) -@mock_ec2 +@mock_ec2_deprecated def test_vpc_get_by_tag_value_superset(): conn = boto.connect_vpc() vpc1 = conn.create_vpc("10.0.0.0/16") @@ -220,7 +220,7 @@ def test_vpc_get_by_tag_value_superset(): vpc2.id.should.be.within(vpc_ids) -@mock_ec2 +@mock_ec2_deprecated def test_vpc_get_by_tag_value_subset(): conn = boto.connect_vpc() vpc1 = conn.create_vpc("10.0.0.0/16") @@ -339,7 +339,7 @@ def test_vpc_modify_enable_dns_hostnames(): attr = response.get('EnableDnsHostnames') attr.get('Value').should.be.ok -@mock_ec2 +@mock_ec2_deprecated def test_vpc_associate_dhcp_options(): conn = boto.connect_vpc() dhcp_options = conn.create_dhcp_options(SAMPLE_DOMAIN_NAME, SAMPLE_NAME_SERVERS) diff --git a/tests/test_ec2/test_vpn_connections.py b/tests/test_ec2/test_vpn_connections.py index dd96e7b65..864c1c3ee 100644 --- a/tests/test_ec2/test_vpn_connections.py +++ b/tests/test_ec2/test_vpn_connections.py @@ -4,10 +4,10 @@ from nose.tools import assert_raises import sure # noqa from boto.exception import EC2ResponseError -from moto import mock_ec2 +from moto import mock_ec2_deprecated -@mock_ec2 +@mock_ec2_deprecated def test_create_vpn_connections(): conn = boto.connect_vpc('the_key', 'the_secret') vpn_connection = conn.create_vpn_connection('ipsec.1', 'vgw-0123abcd', 'cgw-0123abcd') @@ -15,7 +15,7 @@ def test_create_vpn_connections(): vpn_connection.id.should.match(r'vpn-\w+') vpn_connection.type.should.equal('ipsec.1') -@mock_ec2 +@mock_ec2_deprecated def test_delete_vpn_connections(): conn = boto.connect_vpc('the_key', 'the_secret') vpn_connection = conn.create_vpn_connection('ipsec.1', 'vgw-0123abcd', 'cgw-0123abcd') @@ -25,13 +25,13 @@ def test_delete_vpn_connections(): list_of_vpn_connections = conn.get_all_vpn_connections() list_of_vpn_connections.should.have.length_of(0) -@mock_ec2 +@mock_ec2_deprecated def test_delete_vpn_connections_bad_id(): conn = boto.connect_vpc('the_key', 'the_secret') with assert_raises(EC2ResponseError): conn.delete_vpn_connection('vpn-0123abcd') -@mock_ec2 +@mock_ec2_deprecated def test_describe_vpn_connections(): conn = boto.connect_vpc('the_key', 'the_secret') list_of_vpn_connections = conn.get_all_vpn_connections() diff --git a/tests/test_elb/test_elb.py b/tests/test_elb/test_elb.py index 4f7687941..fa13fc23b 100644 --- a/tests/test_elb/test_elb.py +++ b/tests/test_elb/test_elb.py @@ -18,9 +18,9 @@ from boto.ec2.elb.policies import ( from boto.exception import BotoServerError import sure # noqa -from moto import mock_elb, mock_ec2 +from moto import mock_elb, mock_ec2, mock_elb_deprecated, mock_ec2_deprecated -@mock_elb +@mock_elb_deprecated def test_create_load_balancer(): conn = boto.connect_elb() @@ -43,13 +43,13 @@ def test_create_load_balancer(): listener2.protocol.should.equal("TCP") -@mock_elb +@mock_elb_deprecated def test_getting_missing_elb(): conn = boto.connect_elb() conn.get_all_load_balancers.when.called_with(load_balancer_names='aaa').should.throw(BotoServerError) -@mock_elb +@mock_elb_deprecated def test_create_elb_in_multiple_region(): zones = ['us-east-1a', 'us-east-1b'] ports = [(80, 8080, 'http'), (443, 8443, 'tcp')] @@ -63,7 +63,7 @@ def test_create_elb_in_multiple_region(): list(west1_conn.get_all_load_balancers()).should.have.length_of(1) list(west2_conn.get_all_load_balancers()).should.have.length_of(1) -@mock_elb +@mock_elb_deprecated def test_create_load_balancer_with_certificate(): conn = boto.connect_elb() @@ -99,7 +99,7 @@ def test_create_and_delete_boto3_support(): ) list(client.describe_load_balancers()['LoadBalancerDescriptions']).should.have.length_of(0) -@mock_elb +@mock_elb_deprecated def test_add_listener(): conn = boto.connect_elb() zones = ['us-east-1a', 'us-east-1b'] @@ -119,7 +119,7 @@ def test_add_listener(): listener2.protocol.should.equal("TCP") -@mock_elb +@mock_elb_deprecated def test_delete_listener(): conn = boto.connect_elb() @@ -161,7 +161,7 @@ def test_create_and_delete_listener_boto3_support(): balancer['ListenerDescriptions'][1]['Listener']['InstancePort'].should.equal(8443) -@mock_elb +@mock_elb_deprecated def test_set_sslcertificate(): conn = boto.connect_elb() @@ -178,7 +178,7 @@ def test_set_sslcertificate(): listener1.ssl_certificate_id.should.equal("arn:certificate") -@mock_elb +@mock_elb_deprecated def test_get_load_balancers_by_name(): conn = boto.connect_elb() @@ -193,7 +193,7 @@ def test_get_load_balancers_by_name(): conn.get_all_load_balancers(load_balancer_names=['my-lb1', 'my-lb2']).should.have.length_of(2) -@mock_elb +@mock_elb_deprecated def test_delete_load_balancer(): conn = boto.connect_elb() @@ -209,7 +209,7 @@ def test_delete_load_balancer(): balancers.should.have.length_of(0) -@mock_elb +@mock_elb_deprecated def test_create_health_check(): conn = boto.connect_elb() @@ -262,8 +262,8 @@ def test_create_health_check_boto3(): balancer['HealthCheck']['UnhealthyThreshold'].should.equal(5) -@mock_ec2 -@mock_elb +@mock_ec2_deprecated +@mock_elb_deprecated def test_register_instances(): ec2_conn = boto.connect_ec2() reservation = ec2_conn.run_instances('ami-1234abcd', 2) @@ -307,8 +307,8 @@ def test_register_instances_boto3(): set(instance_ids).should.equal(set([instance_id1, instance_id2])) -@mock_ec2 -@mock_elb +@mock_ec2_deprecated +@mock_elb_deprecated def test_deregister_instances(): ec2_conn = boto.connect_ec2() reservation = ec2_conn.run_instances('ami-1234abcd', 2) @@ -365,7 +365,7 @@ def test_deregister_instances_boto3(): balancer['Instances'][0]['InstanceId'].should.equal(instance_id2) -@mock_elb +@mock_elb_deprecated def test_default_attributes(): conn = boto.connect_elb() ports = [(80, 8080, 'http'), (443, 8443, 'tcp')] @@ -378,7 +378,7 @@ def test_default_attributes(): attributes.connecting_settings.idle_timeout.should.equal(60) -@mock_elb +@mock_elb_deprecated def test_cross_zone_load_balancing_attribute(): conn = boto.connect_elb() ports = [(80, 8080, 'http'), (443, 8443, 'tcp')] @@ -393,7 +393,7 @@ def test_cross_zone_load_balancing_attribute(): attributes.cross_zone_load_balancing.enabled.should.be.false -@mock_elb +@mock_elb_deprecated def test_connection_draining_attribute(): conn = boto.connect_elb() ports = [(80, 8080, 'http'), (443, 8443, 'tcp')] @@ -419,7 +419,7 @@ def test_connection_draining_attribute(): attributes.connection_draining.enabled.should.be.false -@mock_elb +@mock_elb_deprecated def test_access_log_attribute(): conn = boto.connect_elb() ports = [(80, 8080, 'http'), (443, 8443, 'tcp')] @@ -444,7 +444,7 @@ def test_access_log_attribute(): attributes.access_log.enabled.should.be.false -@mock_elb +@mock_elb_deprecated def test_connection_settings_attribute(): conn = boto.connect_elb() ports = [(80, 8080, 'http'), (443, 8443, 'tcp')] @@ -462,7 +462,7 @@ def test_connection_settings_attribute(): attributes = lb.get_attributes(force=True) attributes.connecting_settings.idle_timeout.should.equal(60) -@mock_elb +@mock_elb_deprecated def test_create_lb_cookie_stickiness_policy(): conn = boto.connect_elb() ports = [(80, 8080, 'http'), (443, 8443, 'tcp')] @@ -482,7 +482,7 @@ def test_create_lb_cookie_stickiness_policy(): int(cookie_expiration_period_response_str).should.equal(cookie_expiration_period) lb.policies.lb_cookie_stickiness_policies[0].policy_name.should.equal(policy_name) -@mock_elb +@mock_elb_deprecated def test_create_lb_cookie_stickiness_policy_no_expiry(): conn = boto.connect_elb() ports = [(80, 8080, 'http'), (443, 8443, 'tcp')] @@ -495,7 +495,7 @@ def test_create_lb_cookie_stickiness_policy_no_expiry(): lb.policies.lb_cookie_stickiness_policies[0].cookie_expiration_period.should.be.none lb.policies.lb_cookie_stickiness_policies[0].policy_name.should.equal(policy_name) -@mock_elb +@mock_elb_deprecated def test_create_app_cookie_stickiness_policy(): conn = boto.connect_elb() ports = [(80, 8080, 'http'), (443, 8443, 'tcp')] @@ -509,7 +509,7 @@ def test_create_app_cookie_stickiness_policy(): lb.policies.app_cookie_stickiness_policies[0].cookie_name.should.equal(cookie_name) lb.policies.app_cookie_stickiness_policies[0].policy_name.should.equal(policy_name) -@mock_elb +@mock_elb_deprecated def test_create_lb_policy(): conn = boto.connect_elb() ports = [(80, 8080, 'http'), (443, 8443, 'tcp')] @@ -521,7 +521,7 @@ def test_create_lb_policy(): lb = conn.get_all_load_balancers()[0] lb.policies.other_policies[0].policy_name.should.equal(policy_name) -@mock_elb +@mock_elb_deprecated def test_set_policies_of_listener(): conn = boto.connect_elb() ports = [(80, 8080, 'http'), (443, 8443, 'tcp')] @@ -543,7 +543,7 @@ def test_set_policies_of_listener(): # by contrast to a backend, a listener stores only policy name strings listener.policy_names[0].should.equal(policy_name) -@mock_elb +@mock_elb_deprecated def test_set_policies_of_backend_server(): conn = boto.connect_elb() ports = [(80, 8080, 'http'), (443, 8443, 'tcp')] @@ -562,8 +562,8 @@ def test_set_policies_of_backend_server(): # by contrast to a listener, a backend stores OtherPolicy objects backend.policies[0].policy_name.should.equal(policy_name) -@mock_ec2 -@mock_elb +@mock_ec2_deprecated +@mock_elb_deprecated def test_describe_instance_health(): ec2_conn = boto.connect_ec2() reservation = ec2_conn.run_instances('ami-1234abcd', 2) @@ -765,7 +765,7 @@ def test_subnets(): lb.should.have.key('VPCId').which.should.equal(vpc.id) -@mock_elb +@mock_elb_deprecated def test_create_load_balancer_duplicate(): conn = boto.connect_elb() ports = [(80, 8080, 'http'), (443, 8443, 'tcp')] diff --git a/tests/test_emr/test_emr.py b/tests/test_emr/test_emr.py index 71b3b8ec5..a24aa4bd4 100644 --- a/tests/test_emr/test_emr.py +++ b/tests/test_emr/test_emr.py @@ -11,7 +11,7 @@ from boto.emr.step import StreamingStep import six import sure # noqa -from moto import mock_emr +from moto import mock_emr_deprecated from tests.helpers import requires_boto_gte @@ -35,7 +35,7 @@ input_instance_groups = [ ] -@mock_emr +@mock_emr_deprecated def test_describe_cluster(): conn = boto.connect_emr() args = run_jobflow_args.copy() @@ -106,7 +106,7 @@ def test_describe_cluster(): cluster.visibletoallusers.should.equal('true') -@mock_emr +@mock_emr_deprecated def test_describe_jobflows(): conn = boto.connect_emr() args = run_jobflow_args.copy() @@ -158,7 +158,7 @@ def test_describe_jobflows(): resp.should.have.length_of(200) -@mock_emr +@mock_emr_deprecated def test_describe_jobflow(): conn = boto.connect_emr() args = run_jobflow_args.copy() @@ -241,7 +241,7 @@ def test_describe_jobflow(): jf.visibletoallusers.should.equal('true') -@mock_emr +@mock_emr_deprecated def test_list_clusters(): conn = boto.connect_emr() args = run_jobflow_args.copy() @@ -309,7 +309,7 @@ def test_list_clusters(): resp.clusters.should.have.length_of(30) -@mock_emr +@mock_emr_deprecated def test_run_jobflow(): conn = boto.connect_emr() args = run_jobflow_args.copy() @@ -326,7 +326,7 @@ def test_run_jobflow(): job_flow.steps.should.have.length_of(0) -@mock_emr +@mock_emr_deprecated def test_run_jobflow_in_multiple_regions(): regions = {} for region in ['us-east-1', 'eu-west-1']: @@ -343,7 +343,7 @@ def test_run_jobflow_in_multiple_regions(): @requires_boto_gte("2.8") -@mock_emr +@mock_emr_deprecated def test_run_jobflow_with_new_params(): # Test that run_jobflow works with newer params conn = boto.connect_emr() @@ -351,7 +351,7 @@ def test_run_jobflow_with_new_params(): @requires_boto_gte("2.8") -@mock_emr +@mock_emr_deprecated def test_run_jobflow_with_visible_to_all_users(): conn = boto.connect_emr() for expected in (True, False): @@ -364,7 +364,7 @@ def test_run_jobflow_with_visible_to_all_users(): @requires_boto_gte("2.8") -@mock_emr +@mock_emr_deprecated def test_run_jobflow_with_instance_groups(): input_groups = dict((g.name, g) for g in input_instance_groups) conn = boto.connect_emr() @@ -384,7 +384,7 @@ def test_run_jobflow_with_instance_groups(): @requires_boto_gte("2.8") -@mock_emr +@mock_emr_deprecated def test_set_termination_protection(): conn = boto.connect_emr() job_id = conn.run_jobflow(**run_jobflow_args) @@ -401,7 +401,7 @@ def test_set_termination_protection(): @requires_boto_gte("2.8") -@mock_emr +@mock_emr_deprecated def test_set_visible_to_all_users(): conn = boto.connect_emr() args = run_jobflow_args.copy() @@ -419,7 +419,7 @@ def test_set_visible_to_all_users(): job_flow.visibletoallusers.should.equal('false') -@mock_emr +@mock_emr_deprecated def test_terminate_jobflow(): conn = boto.connect_emr() job_id = conn.run_jobflow(**run_jobflow_args) @@ -433,7 +433,7 @@ def test_terminate_jobflow(): # testing multiple end points for each feature -@mock_emr +@mock_emr_deprecated def test_bootstrap_actions(): bootstrap_actions = [ BootstrapAction( @@ -466,7 +466,7 @@ def test_bootstrap_actions(): list(arg.value for arg in x.args).should.equal(y.args()) -@mock_emr +@mock_emr_deprecated def test_instance_groups(): input_groups = dict((g.name, g) for g in input_instance_groups) @@ -536,7 +536,7 @@ def test_instance_groups(): int(igs['task-2'].instancerunningcount).should.equal(3) -@mock_emr +@mock_emr_deprecated def test_steps(): input_steps = [ StreamingStep( @@ -633,7 +633,7 @@ def test_steps(): test_list_steps_with_states() -@mock_emr +@mock_emr_deprecated def test_tags(): input_tags = {"tag1": "val1", "tag2": "val2"} diff --git a/tests/test_glacier/test_glacier_archives.py b/tests/test_glacier/test_glacier_archives.py index 6a139a91c..e8fa6045e 100644 --- a/tests/test_glacier/test_glacier_archives.py +++ b/tests/test_glacier/test_glacier_archives.py @@ -4,10 +4,10 @@ from tempfile import NamedTemporaryFile import boto.glacier import sure # noqa -from moto import mock_glacier +from moto import mock_glacier_deprecated -@mock_glacier +@mock_glacier_deprecated def test_create_and_delete_archive(): the_file = NamedTemporaryFile(delete=False) the_file.write(b"some stuff") diff --git a/tests/test_glacier/test_glacier_jobs.py b/tests/test_glacier/test_glacier_jobs.py index 7eff3566e..ef4a00b75 100644 --- a/tests/test_glacier/test_glacier_jobs.py +++ b/tests/test_glacier/test_glacier_jobs.py @@ -5,10 +5,10 @@ import json from boto.glacier.layer1 import Layer1 import sure # noqa -from moto import mock_glacier +from moto import mock_glacier_deprecated -@mock_glacier +@mock_glacier_deprecated def test_init_glacier_job(): conn = Layer1(region_name="us-west-2") vault_name = "my_vault" @@ -23,7 +23,7 @@ def test_init_glacier_job(): job_response['Location'].should.equal("//vaults/my_vault/jobs/{0}".format(job_id)) -@mock_glacier +@mock_glacier_deprecated def test_describe_job(): conn = Layer1(region_name="us-west-2") vault_name = "my_vault" @@ -56,7 +56,7 @@ def test_describe_job(): }) -@mock_glacier +@mock_glacier_deprecated def test_list_glacier_jobs(): conn = Layer1(region_name="us-west-2") vault_name = "my_vault" @@ -77,7 +77,7 @@ def test_list_glacier_jobs(): len(jobs['JobList']).should.equal(2) -@mock_glacier +@mock_glacier_deprecated def test_get_job_output(): conn = Layer1(region_name="us-west-2") vault_name = "my_vault" diff --git a/tests/test_glacier/test_glacier_vaults.py b/tests/test_glacier/test_glacier_vaults.py index 40f20e58e..e64f40a90 100644 --- a/tests/test_glacier/test_glacier_vaults.py +++ b/tests/test_glacier/test_glacier_vaults.py @@ -3,10 +3,10 @@ from __future__ import unicode_literals import boto.glacier import sure # noqa -from moto import mock_glacier +from moto import mock_glacier_deprecated -@mock_glacier +@mock_glacier_deprecated def test_create_vault(): conn = boto.glacier.connect_to_region("us-west-2") @@ -17,7 +17,7 @@ def test_create_vault(): vaults[0].name.should.equal("my_vault") -@mock_glacier +@mock_glacier_deprecated def test_delete_vault(): conn = boto.glacier.connect_to_region("us-west-2") diff --git a/tests/test_iam/test_iam.py b/tests/test_iam/test_iam.py index de8f89a59..a51240b2f 100644 --- a/tests/test_iam/test_iam.py +++ b/tests/test_iam/test_iam.py @@ -6,7 +6,7 @@ import boto3 import sure # noqa from boto.exception import BotoServerError from botocore.exceptions import ClientError -from moto import mock_iam +from moto import mock_iam, mock_iam_deprecated from moto.iam.models import aws_managed_policies from nose.tools import assert_raises, assert_equals, assert_not_equals from nose.tools import raises @@ -14,7 +14,7 @@ from nose.tools import raises from tests.helpers import requires_boto_gte -@mock_iam() +@mock_iam_deprecated() def test_get_all_server_certs(): conn = boto.connect_iam() @@ -26,7 +26,7 @@ def test_get_all_server_certs(): cert1.arn.should.equal("arn:aws:iam::123456789012:server-certificate/certname") -@mock_iam() +@mock_iam_deprecated() def test_get_server_cert_doesnt_exist(): conn = boto.connect_iam() @@ -34,7 +34,7 @@ def test_get_server_cert_doesnt_exist(): conn.get_server_certificate("NonExistant") -@mock_iam() +@mock_iam_deprecated() def test_get_server_cert(): conn = boto.connect_iam() @@ -44,7 +44,7 @@ def test_get_server_cert(): cert.arn.should.equal("arn:aws:iam::123456789012:server-certificate/certname") -@mock_iam() +@mock_iam_deprecated() def test_upload_server_cert(): conn = boto.connect_iam() @@ -54,7 +54,7 @@ def test_upload_server_cert(): cert.arn.should.equal("arn:aws:iam::123456789012:server-certificate/certname") -@mock_iam() +@mock_iam_deprecated() @raises(BotoServerError) def test_get_role__should_throw__when_role_does_not_exist(): conn = boto.connect_iam() @@ -62,7 +62,7 @@ def test_get_role__should_throw__when_role_does_not_exist(): conn.get_role('unexisting_role') -@mock_iam() +@mock_iam_deprecated() @raises(BotoServerError) def test_get_instance_profile__should_throw__when_instance_profile_does_not_exist(): conn = boto.connect_iam() @@ -70,7 +70,7 @@ def test_get_instance_profile__should_throw__when_instance_profile_does_not_exis conn.get_instance_profile('unexisting_instance_profile') -@mock_iam() +@mock_iam_deprecated() def test_create_role_and_instance_profile(): conn = boto.connect_iam() conn.create_instance_profile("my-profile", path="my-path") @@ -91,7 +91,7 @@ def test_create_role_and_instance_profile(): conn.list_roles().roles[0].role_name.should.equal('my-role') -@mock_iam() +@mock_iam_deprecated() def test_remove_role_from_instance_profile(): conn = boto.connect_iam() conn.create_instance_profile("my-profile", path="my-path") @@ -108,7 +108,7 @@ def test_remove_role_from_instance_profile(): dict(profile.roles).should.be.empty -@mock_iam() +@mock_iam_deprecated() def test_list_instance_profiles(): conn = boto.connect_iam() conn.create_instance_profile("my-profile", path="my-path") @@ -123,7 +123,7 @@ def test_list_instance_profiles(): profiles[0].roles.role_name.should.equal("my-role") -@mock_iam() +@mock_iam_deprecated() def test_list_instance_profiles_for_role(): conn = boto.connect_iam() @@ -153,7 +153,7 @@ def test_list_instance_profiles_for_role(): len(profile_list).should.equal(0) -@mock_iam() +@mock_iam_deprecated() def test_list_role_policies(): conn = boto.connect_iam() conn.create_role("my-role") @@ -162,7 +162,7 @@ def test_list_role_policies(): role.policy_names[0].should.equal("test policy") -@mock_iam() +@mock_iam_deprecated() def test_put_role_policy(): conn = boto.connect_iam() conn.create_role("my-role", assume_role_policy_document="some policy", path="my-path") @@ -171,7 +171,7 @@ def test_put_role_policy(): policy.should.equal("test policy") -@mock_iam() +@mock_iam_deprecated() def test_update_assume_role_policy(): conn = boto.connect_iam() role = conn.create_role("my-role") @@ -180,7 +180,7 @@ def test_update_assume_role_policy(): role.assume_role_policy_document.should.equal("my-policy") -@mock_iam() +@mock_iam_deprecated() def test_create_user(): conn = boto.connect_iam() conn.create_user('my-user') @@ -188,7 +188,7 @@ def test_create_user(): conn.create_user('my-user') -@mock_iam() +@mock_iam_deprecated() def test_get_user(): conn = boto.connect_iam() with assert_raises(BotoServerError): @@ -210,7 +210,7 @@ def test_list_users(): user['Arn'].should.equal('arn:aws:iam::123456789012:user/my-user') -@mock_iam() +@mock_iam_deprecated() def test_create_login_profile(): conn = boto.connect_iam() with assert_raises(BotoServerError): @@ -221,7 +221,7 @@ def test_create_login_profile(): conn.create_login_profile('my-user', 'my-pass') -@mock_iam() +@mock_iam_deprecated() def test_delete_login_profile(): conn = boto.connect_iam() conn.create_user('my-user') @@ -231,7 +231,7 @@ def test_delete_login_profile(): conn.delete_login_profile('my-user') -@mock_iam() +@mock_iam_deprecated() def test_create_access_key(): conn = boto.connect_iam() with assert_raises(BotoServerError): @@ -240,7 +240,7 @@ def test_create_access_key(): conn.create_access_key('my-user') -@mock_iam() +@mock_iam_deprecated() def test_get_all_access_keys(): conn = boto.connect_iam() conn.create_user('my-user') @@ -257,7 +257,7 @@ def test_get_all_access_keys(): ) -@mock_iam() +@mock_iam_deprecated() def test_delete_access_key(): conn = boto.connect_iam() conn.create_user('my-user') @@ -265,7 +265,7 @@ def test_delete_access_key(): conn.delete_access_key(access_key_id, 'my-user') -@mock_iam() +@mock_iam_deprecated() def test_delete_user(): conn = boto.connect_iam() with assert_raises(BotoServerError): @@ -274,7 +274,7 @@ def test_delete_user(): conn.delete_user('my-user') -@mock_iam() +@mock_iam_deprecated() def test_generate_credential_report(): conn = boto.connect_iam() result = conn.generate_credential_report() @@ -283,7 +283,7 @@ def test_generate_credential_report(): result['generate_credential_report_response']['generate_credential_report_result']['state'].should.equal('COMPLETE') -@mock_iam() +@mock_iam_deprecated() def test_get_credential_report(): conn = boto.connect_iam() conn.create_user('my-user') @@ -298,7 +298,7 @@ def test_get_credential_report(): @requires_boto_gte('2.39') -@mock_iam() +@mock_iam_deprecated() def test_managed_policy(): conn = boto.connect_iam() diff --git a/tests/test_iam/test_iam_groups.py b/tests/test_iam/test_iam_groups.py index 412484a70..6fd0f47dd 100644 --- a/tests/test_iam/test_iam_groups.py +++ b/tests/test_iam/test_iam_groups.py @@ -4,10 +4,10 @@ import sure # noqa from nose.tools import assert_raises from boto.exception import BotoServerError -from moto import mock_iam +from moto import mock_iam, mock_iam_deprecated -@mock_iam() +@mock_iam_deprecated() def test_create_group(): conn = boto.connect_iam() conn.create_group('my-group') @@ -15,7 +15,7 @@ def test_create_group(): conn.create_group('my-group') -@mock_iam() +@mock_iam_deprecated() def test_get_group(): conn = boto.connect_iam() conn.create_group('my-group') @@ -24,7 +24,7 @@ def test_get_group(): conn.get_group('not-group') -@mock_iam() +@mock_iam_deprecated() def test_get_all_groups(): conn = boto.connect_iam() conn.create_group('my-group1') @@ -33,7 +33,7 @@ def test_get_all_groups(): groups.should.have.length_of(2) -@mock_iam() +@mock_iam_deprecated() def test_add_user_to_group(): conn = boto.connect_iam() with assert_raises(BotoServerError): @@ -45,7 +45,7 @@ def test_add_user_to_group(): conn.add_user_to_group('my-group', 'my-user') -@mock_iam() +@mock_iam_deprecated() def test_remove_user_from_group(): conn = boto.connect_iam() with assert_raises(BotoServerError): @@ -58,7 +58,7 @@ def test_remove_user_from_group(): conn.remove_user_from_group('my-group', 'my-user') -@mock_iam() +@mock_iam_deprecated() def test_get_groups_for_user(): conn = boto.connect_iam() conn.create_group('my-group1') diff --git a/tests/test_kinesis/test_kinesis.py b/tests/test_kinesis/test_kinesis.py index 0e4f29625..a86bce44c 100644 --- a/tests/test_kinesis/test_kinesis.py +++ b/tests/test_kinesis/test_kinesis.py @@ -4,10 +4,10 @@ import boto.kinesis from boto.kinesis.exceptions import ResourceNotFoundException, InvalidArgumentException import sure # noqa -from moto import mock_kinesis +from moto import mock_kinesis_deprecated -@mock_kinesis +@mock_kinesis_deprecated def test_create_cluster(): conn = boto.kinesis.connect_to_region("us-west-2") @@ -25,13 +25,13 @@ def test_create_cluster(): shards.should.have.length_of(2) -@mock_kinesis +@mock_kinesis_deprecated def test_describe_non_existant_stream(): conn = boto.kinesis.connect_to_region("us-east-1") conn.describe_stream.when.called_with("not-a-stream").should.throw(ResourceNotFoundException) -@mock_kinesis +@mock_kinesis_deprecated def test_list_and_delete_stream(): conn = boto.kinesis.connect_to_region("us-west-2") @@ -48,7 +48,7 @@ def test_list_and_delete_stream(): conn.delete_stream.when.called_with("not-a-stream").should.throw(ResourceNotFoundException) -@mock_kinesis +@mock_kinesis_deprecated def test_basic_shard_iterator(): conn = boto.kinesis.connect_to_region("us-west-2") @@ -66,7 +66,7 @@ def test_basic_shard_iterator(): response['Records'].should.equal([]) -@mock_kinesis +@mock_kinesis_deprecated def test_get_invalid_shard_iterator(): conn = boto.kinesis.connect_to_region("us-west-2") @@ -76,7 +76,7 @@ def test_get_invalid_shard_iterator(): conn.get_shard_iterator.when.called_with(stream_name, "123", 'TRIM_HORIZON').should.throw(ResourceNotFoundException) -@mock_kinesis +@mock_kinesis_deprecated def test_put_records(): conn = boto.kinesis.connect_to_region("us-west-2") @@ -107,7 +107,7 @@ def test_put_records(): record["SequenceNumber"].should.equal("1") -@mock_kinesis +@mock_kinesis_deprecated def test_get_records_limit(): conn = boto.kinesis.connect_to_region("us-west-2") @@ -136,7 +136,7 @@ def test_get_records_limit(): response['Records'].should.have.length_of(2) -@mock_kinesis +@mock_kinesis_deprecated def test_get_records_at_sequence_number(): # AT_SEQUENCE_NUMBER - Start reading exactly from the position denoted by a specific sequence number. conn = boto.kinesis.connect_to_region("us-west-2") @@ -167,7 +167,7 @@ def test_get_records_at_sequence_number(): response['Records'][0]['Data'].should.equal('2') -@mock_kinesis +@mock_kinesis_deprecated def test_get_records_after_sequence_number(): # AFTER_SEQUENCE_NUMBER - Start reading right after the position denoted by a specific sequence number. conn = boto.kinesis.connect_to_region("us-west-2") @@ -197,7 +197,7 @@ def test_get_records_after_sequence_number(): response['Records'][0]['Data'].should.equal('3') -@mock_kinesis +@mock_kinesis_deprecated def test_get_records_latest(): # LATEST - Start reading just after the most recent record in the shard, so that you always read the most recent data in the shard. conn = boto.kinesis.connect_to_region("us-west-2") @@ -232,7 +232,7 @@ def test_get_records_latest(): response['Records'][0]['Data'].should.equal('last_record') -@mock_kinesis +@mock_kinesis_deprecated def test_invalid_shard_iterator_type(): conn = boto.kinesis.connect_to_region("us-west-2") stream_name = "my_stream" @@ -244,7 +244,7 @@ def test_invalid_shard_iterator_type(): stream_name, shard_id, 'invalid-type').should.throw(InvalidArgumentException) -@mock_kinesis +@mock_kinesis_deprecated def test_add_tags(): conn = boto.kinesis.connect_to_region("us-west-2") stream_name = "my_stream" @@ -257,7 +257,7 @@ def test_add_tags(): conn.add_tags_to_stream(stream_name, {'tag2':'val4'}) -@mock_kinesis +@mock_kinesis_deprecated def test_list_tags(): conn = boto.kinesis.connect_to_region("us-west-2") stream_name = "my_stream" @@ -278,7 +278,7 @@ def test_list_tags(): tags.get('tag2').should.equal('val4') -@mock_kinesis +@mock_kinesis_deprecated def test_remove_tags(): conn = boto.kinesis.connect_to_region("us-west-2") stream_name = "my_stream" @@ -300,7 +300,7 @@ def test_remove_tags(): tags.get('tag2').should.equal(None) -@mock_kinesis +@mock_kinesis_deprecated def test_split_shard(): conn = boto.kinesis.connect_to_region("us-west-2") stream_name = 'my_stream' @@ -341,7 +341,7 @@ def test_split_shard(): sum([shard['SequenceNumberRange']['EndingSequenceNumber'] for shard in shards]).should.equal(99) -@mock_kinesis +@mock_kinesis_deprecated def test_merge_shards(): conn = boto.kinesis.connect_to_region("us-west-2") stream_name = 'my_stream' diff --git a/tests/test_kms/test_kms.py b/tests/test_kms/test_kms.py index 04e6fbb4b..27850d4ad 100644 --- a/tests/test_kms/test_kms.py +++ b/tests/test_kms/test_kms.py @@ -5,10 +5,10 @@ import boto.kms from boto.exception import JSONResponseError from boto.kms.exceptions import AlreadyExistsException, NotFoundException import sure # noqa -from moto import mock_kms +from moto import mock_kms_deprecated from nose.tools import assert_raises -@mock_kms +@mock_kms_deprecated def test_create_key(): conn = boto.kms.connect_to_region("us-west-2") @@ -19,7 +19,7 @@ def test_create_key(): key['KeyMetadata']['Enabled'].should.equal(True) -@mock_kms +@mock_kms_deprecated def test_describe_key(): conn = boto.kms.connect_to_region("us-west-2") key = conn.create_key(policy="my policy", description="my key", key_usage='ENCRYPT_DECRYPT') @@ -30,7 +30,7 @@ def test_describe_key(): key['KeyMetadata']['KeyUsage'].should.equal("ENCRYPT_DECRYPT") -@mock_kms +@mock_kms_deprecated def test_describe_key_via_alias(): conn = boto.kms.connect_to_region("us-west-2") key = conn.create_key(policy="my policy", description="my key", key_usage='ENCRYPT_DECRYPT') @@ -42,7 +42,7 @@ def test_describe_key_via_alias(): alias_key['KeyMetadata']['Arn'].should.equal(key['KeyMetadata']['Arn']) -@mock_kms +@mock_kms_deprecated def test_describe_key_via_alias_not_found(): conn = boto.kms.connect_to_region("us-west-2") key = conn.create_key(policy="my policy", description="my key", key_usage='ENCRYPT_DECRYPT') @@ -51,7 +51,7 @@ def test_describe_key_via_alias_not_found(): conn.describe_key.when.called_with('alias/not-found-alias').should.throw(JSONResponseError) -@mock_kms +@mock_kms_deprecated def test_describe_key_via_arn(): conn = boto.kms.connect_to_region("us-west-2") key = conn.create_key(policy="my policy", description="my key", key_usage='ENCRYPT_DECRYPT') @@ -63,13 +63,13 @@ def test_describe_key_via_arn(): the_key['KeyMetadata']['KeyId'].should.equal(key['KeyMetadata']['KeyId']) -@mock_kms +@mock_kms_deprecated def test_describe_missing_key(): conn = boto.kms.connect_to_region("us-west-2") conn.describe_key.when.called_with("not-a-key").should.throw(JSONResponseError) -@mock_kms +@mock_kms_deprecated def test_list_keys(): conn = boto.kms.connect_to_region("us-west-2") @@ -80,7 +80,7 @@ def test_list_keys(): keys['Keys'].should.have.length_of(2) -@mock_kms +@mock_kms_deprecated def test_enable_key_rotation(): conn = boto.kms.connect_to_region("us-west-2") @@ -91,7 +91,7 @@ def test_enable_key_rotation(): conn.get_key_rotation_status(key_id)['KeyRotationEnabled'].should.equal(True) -@mock_kms +@mock_kms_deprecated def test_enable_key_rotation_via_arn(): conn = boto.kms.connect_to_region("us-west-2") @@ -104,13 +104,13 @@ def test_enable_key_rotation_via_arn(): -@mock_kms +@mock_kms_deprecated def test_enable_key_rotation_with_missing_key(): conn = boto.kms.connect_to_region("us-west-2") conn.enable_key_rotation.when.called_with("not-a-key").should.throw(JSONResponseError) -@mock_kms +@mock_kms_deprecated def test_enable_key_rotation_with_alias_name_should_fail(): conn = boto.kms.connect_to_region("us-west-2") key = conn.create_key(policy="my policy", description="my key", key_usage='ENCRYPT_DECRYPT') @@ -122,7 +122,7 @@ def test_enable_key_rotation_with_alias_name_should_fail(): conn.enable_key_rotation.when.called_with('alias/my-alias').should.throw(JSONResponseError) -@mock_kms +@mock_kms_deprecated def test_disable_key_rotation(): conn = boto.kms.connect_to_region("us-west-2") @@ -136,7 +136,7 @@ def test_disable_key_rotation(): conn.get_key_rotation_status(key_id)['KeyRotationEnabled'].should.equal(False) -@mock_kms +@mock_kms_deprecated def test_encrypt(): """ test_encrypt @@ -147,26 +147,26 @@ def test_encrypt(): response['CiphertextBlob'].should.equal(b'ZW5jcnlwdG1l') -@mock_kms +@mock_kms_deprecated def test_decrypt(): conn = boto.kms.connect_to_region('us-west-2') response = conn.decrypt('ZW5jcnlwdG1l'.encode('utf-8')) response['Plaintext'].should.equal(b'encryptme') -@mock_kms +@mock_kms_deprecated def test_disable_key_rotation_with_missing_key(): conn = boto.kms.connect_to_region("us-west-2") conn.disable_key_rotation.when.called_with("not-a-key").should.throw(JSONResponseError) -@mock_kms +@mock_kms_deprecated def test_get_key_rotation_status_with_missing_key(): conn = boto.kms.connect_to_region("us-west-2") conn.get_key_rotation_status.when.called_with("not-a-key").should.throw(JSONResponseError) -@mock_kms +@mock_kms_deprecated def test_get_key_rotation_status(): conn = boto.kms.connect_to_region("us-west-2") @@ -176,7 +176,7 @@ def test_get_key_rotation_status(): conn.get_key_rotation_status(key_id)['KeyRotationEnabled'].should.equal(False) -@mock_kms +@mock_kms_deprecated def test_create_key_defaults_key_rotation(): conn = boto.kms.connect_to_region("us-west-2") @@ -186,7 +186,7 @@ def test_create_key_defaults_key_rotation(): conn.get_key_rotation_status(key_id)['KeyRotationEnabled'].should.equal(False) -@mock_kms +@mock_kms_deprecated def test_get_key_policy(): conn = boto.kms.connect_to_region('us-west-2') @@ -196,7 +196,7 @@ def test_get_key_policy(): policy = conn.get_key_policy(key_id, 'default') policy['Policy'].should.equal('my policy') -@mock_kms +@mock_kms_deprecated def test_get_key_policy_via_arn(): conn = boto.kms.connect_to_region('us-west-2') @@ -205,7 +205,7 @@ def test_get_key_policy_via_arn(): policy['Policy'].should.equal('my policy') -@mock_kms +@mock_kms_deprecated def test_put_key_policy(): conn = boto.kms.connect_to_region('us-west-2') @@ -217,7 +217,7 @@ def test_put_key_policy(): policy['Policy'].should.equal('new policy') -@mock_kms +@mock_kms_deprecated def test_put_key_policy_via_arn(): conn = boto.kms.connect_to_region('us-west-2') @@ -229,7 +229,7 @@ def test_put_key_policy_via_arn(): policy['Policy'].should.equal('new policy') -@mock_kms +@mock_kms_deprecated def test_put_key_policy_via_alias_should_not_update(): conn = boto.kms.connect_to_region('us-west-2') @@ -242,7 +242,7 @@ def test_put_key_policy_via_alias_should_not_update(): policy['Policy'].should.equal('my policy') -@mock_kms +@mock_kms_deprecated def test_put_key_policy(): conn = boto.kms.connect_to_region('us-west-2') @@ -253,7 +253,7 @@ def test_put_key_policy(): policy['Policy'].should.equal('new policy') -@mock_kms +@mock_kms_deprecated def test_list_key_policies(): conn = boto.kms.connect_to_region('us-west-2') @@ -264,7 +264,7 @@ def test_list_key_policies(): policies['PolicyNames'].should.equal(['default']) -@mock_kms +@mock_kms_deprecated def test__create_alias__returns_none_if_correct(): kms = boto.connect_kms() create_resp = kms.create_key() @@ -275,7 +275,7 @@ def test__create_alias__returns_none_if_correct(): resp.should.be.none -@mock_kms +@mock_kms_deprecated def test__create_alias__raises_if_reserved_alias(): kms = boto.connect_kms() create_resp = kms.create_key() @@ -300,7 +300,7 @@ def test__create_alias__raises_if_reserved_alias(): ex.status.should.equal(400) -@mock_kms +@mock_kms_deprecated def test__create_alias__can_create_multiple_aliases_for_same_key_id(): kms = boto.connect_kms() create_resp = kms.create_key() @@ -311,7 +311,7 @@ def test__create_alias__can_create_multiple_aliases_for_same_key_id(): kms.create_alias('alias/my-alias5', key_id).should.be.none -@mock_kms +@mock_kms_deprecated def test__create_alias__raises_if_wrong_prefix(): kms = boto.connect_kms() create_resp = kms.create_key() @@ -328,7 +328,7 @@ def test__create_alias__raises_if_wrong_prefix(): ex.status.should.equal(400) -@mock_kms +@mock_kms_deprecated def test__create_alias__raises_if_duplicate(): region = 'us-west-2' kms = boto.kms.connect_to_region(region) @@ -354,7 +354,7 @@ def test__create_alias__raises_if_duplicate(): ex.status.should.equal(400) -@mock_kms +@mock_kms_deprecated def test__create_alias__raises_if_alias_has_restricted_characters(): kms = boto.connect_kms() create_resp = kms.create_key() @@ -378,7 +378,7 @@ def test__create_alias__raises_if_alias_has_restricted_characters(): ex.status.should.equal(400) -@mock_kms +@mock_kms_deprecated def test__create_alias__raises_if_alias_has_colon_character(): # For some reason, colons are not accepted for an alias, even though they are accepted by regex ^[a-zA-Z0-9:/_-]+$ kms = boto.connect_kms() @@ -401,7 +401,7 @@ def test__create_alias__raises_if_alias_has_colon_character(): ex.status.should.equal(400) -@mock_kms +@mock_kms_deprecated def test__create_alias__accepted_characters(): kms = boto.connect_kms() create_resp = kms.create_key() @@ -416,7 +416,7 @@ def test__create_alias__accepted_characters(): kms.create_alias(alias_name, key_id) -@mock_kms +@mock_kms_deprecated def test__create_alias__raises_if_target_key_id_is_existing_alias(): kms = boto.connect_kms() create_resp = kms.create_key() @@ -437,7 +437,7 @@ def test__create_alias__raises_if_target_key_id_is_existing_alias(): ex.status.should.equal(400) -@mock_kms +@mock_kms_deprecated def test__delete_alias(): kms = boto.connect_kms() create_resp = kms.create_key() @@ -454,7 +454,7 @@ def test__delete_alias(): kms.create_alias(alias, key_id) -@mock_kms +@mock_kms_deprecated def test__delete_alias__raises_if_wrong_prefix(): kms = boto.connect_kms() @@ -470,7 +470,7 @@ def test__delete_alias__raises_if_wrong_prefix(): ex.status.should.equal(400) -@mock_kms +@mock_kms_deprecated def test__delete_alias__raises_if_alias_is_not_found(): region = 'us-west-2' kms = boto.kms.connect_to_region(region) @@ -490,7 +490,7 @@ def test__delete_alias__raises_if_alias_is_not_found(): ex.status.should.equal(400) -@mock_kms +@mock_kms_deprecated def test__list_aliases(): region = "eu-west-1" kms = boto.kms.connect_to_region(region) @@ -532,7 +532,7 @@ def test__list_aliases(): len(aliases).should.equal(7) -@mock_kms +@mock_kms_deprecated def test__assert_valid_key_id(): from moto.kms.responses import _assert_valid_key_id import uuid @@ -541,7 +541,7 @@ def test__assert_valid_key_id(): _assert_valid_key_id.when.called_with(str(uuid.uuid4())).should_not.throw(JSONResponseError) -@mock_kms +@mock_kms_deprecated def test__assert_default_policy(): from moto.kms.responses import _assert_default_policy diff --git a/tests/test_opsworks/test_layers.py b/tests/test_opsworks/test_layers.py index 1392d8c6e..dc268bbe5 100644 --- a/tests/test_opsworks/test_layers.py +++ b/tests/test_opsworks/test_layers.py @@ -66,7 +66,7 @@ def test_describe_layers(): rv1 = client.describe_layers(StackId=stack_id) rv2 = client.describe_layers(LayerIds=[layer_id]) - rv1.should.equal(rv2) + rv1['Layers'].should.equal(rv2['Layers']) rv1['Layers'][0]['Name'].should.equal("TestLayer") diff --git a/tests/test_rds/test_rds.py b/tests/test_rds/test_rds.py index 6078b5f6b..7a6cab633 100644 --- a/tests/test_rds/test_rds.py +++ b/tests/test_rds/test_rds.py @@ -5,12 +5,12 @@ import boto.vpc from boto.exception import BotoServerError import sure # noqa -from moto import mock_ec2, mock_rds +from moto import mock_ec2_deprecated, mock_rds_deprecated from tests.helpers import disable_on_py3 @disable_on_py3() -@mock_rds +@mock_rds_deprecated def test_create_database(): conn = boto.rds.connect_to_region("us-west-2") @@ -27,7 +27,7 @@ def test_create_database(): @disable_on_py3() -@mock_rds +@mock_rds_deprecated def test_get_databases(): conn = boto.rds.connect_to_region("us-west-2") @@ -44,14 +44,14 @@ def test_get_databases(): databases[0].id.should.equal("db-master-1") -@mock_rds +@mock_rds_deprecated def test_describe_non_existant_database(): conn = boto.rds.connect_to_region("us-west-2") conn.get_all_dbinstances.when.called_with("not-a-db").should.throw(BotoServerError) @disable_on_py3() -@mock_rds +@mock_rds_deprecated def test_delete_database(): conn = boto.rds.connect_to_region("us-west-2") list(conn.get_all_dbinstances()).should.have.length_of(0) @@ -63,13 +63,13 @@ def test_delete_database(): list(conn.get_all_dbinstances()).should.have.length_of(0) -@mock_rds +@mock_rds_deprecated def test_delete_non_existant_database(): conn = boto.rds.connect_to_region("us-west-2") conn.delete_dbinstance.when.called_with("not-a-db").should.throw(BotoServerError) -@mock_rds +@mock_rds_deprecated def test_create_database_security_group(): conn = boto.rds.connect_to_region("us-west-2") @@ -79,7 +79,7 @@ def test_create_database_security_group(): list(security_group.ip_ranges).should.equal([]) -@mock_rds +@mock_rds_deprecated def test_get_security_groups(): conn = boto.rds.connect_to_region("us-west-2") @@ -96,13 +96,13 @@ def test_get_security_groups(): databases[0].name.should.equal("db_sg1") -@mock_rds +@mock_rds_deprecated def test_get_non_existant_security_group(): conn = boto.rds.connect_to_region("us-west-2") conn.get_all_dbsecurity_groups.when.called_with("not-a-sg").should.throw(BotoServerError) -@mock_rds +@mock_rds_deprecated def test_delete_database_security_group(): conn = boto.rds.connect_to_region("us-west-2") conn.create_dbsecurity_group('db_sg', 'DB Security Group') @@ -113,14 +113,14 @@ def test_delete_database_security_group(): list(conn.get_all_dbsecurity_groups()).should.have.length_of(0) -@mock_rds +@mock_rds_deprecated def test_delete_non_existant_security_group(): conn = boto.rds.connect_to_region("us-west-2") conn.delete_dbsecurity_group.when.called_with("not-a-db").should.throw(BotoServerError) @disable_on_py3() -@mock_rds +@mock_rds_deprecated def test_security_group_authorize(): conn = boto.rds.connect_to_region("us-west-2") security_group = conn.create_dbsecurity_group('db_sg', 'DB Security Group') @@ -133,7 +133,7 @@ def test_security_group_authorize(): @disable_on_py3() -@mock_rds +@mock_rds_deprecated def test_add_security_group_to_database(): conn = boto.rds.connect_to_region("us-west-2") @@ -147,8 +147,8 @@ def test_add_security_group_to_database(): database.security_groups[0].name.should.equal("db_sg") -@mock_ec2 -@mock_rds +@mock_ec2_deprecated +@mock_rds_deprecated def test_add_database_subnet_group(): vpc_conn = boto.vpc.connect_to_region("us-west-2") vpc = vpc_conn.create_vpc("10.0.0.0/16") @@ -163,8 +163,8 @@ def test_add_database_subnet_group(): list(subnet_group.subnet_ids).should.equal(subnet_ids) -@mock_ec2 -@mock_rds +@mock_ec2_deprecated +@mock_rds_deprecated def test_describe_database_subnet_group(): vpc_conn = boto.vpc.connect_to_region("us-west-2") vpc = vpc_conn.create_vpc("10.0.0.0/16") @@ -180,8 +180,8 @@ def test_describe_database_subnet_group(): conn.get_all_db_subnet_groups.when.called_with("not-a-subnet").should.throw(BotoServerError) -@mock_ec2 -@mock_rds +@mock_ec2_deprecated +@mock_rds_deprecated def test_delete_database_subnet_group(): vpc_conn = boto.vpc.connect_to_region("us-west-2") vpc = vpc_conn.create_vpc("10.0.0.0/16") @@ -198,8 +198,8 @@ def test_delete_database_subnet_group(): @disable_on_py3() -@mock_ec2 -@mock_rds +@mock_ec2_deprecated +@mock_rds_deprecated def test_create_database_in_subnet_group(): vpc_conn = boto.vpc.connect_to_region("us-west-2") vpc = vpc_conn.create_vpc("10.0.0.0/16") @@ -216,7 +216,7 @@ def test_create_database_in_subnet_group(): @disable_on_py3() -@mock_rds +@mock_rds_deprecated def test_create_database_replica(): conn = boto.rds.connect_to_region("us-west-2") @@ -239,7 +239,7 @@ def test_create_database_replica(): list(primary.read_replica_dbinstance_identifiers).should.have.length_of(0) @disable_on_py3() -@mock_rds +@mock_rds_deprecated def test_create_cross_region_database_replica(): west_1_conn = boto.rds.connect_to_region("us-west-1") west_2_conn = boto.rds.connect_to_region("us-west-2") @@ -266,7 +266,7 @@ def test_create_cross_region_database_replica(): @disable_on_py3() -@mock_rds +@mock_rds_deprecated def test_connecting_to_us_east_1(): # boto does not use us-east-1 in the URL for RDS, # and that broke moto in the past: @@ -286,7 +286,7 @@ def test_connecting_to_us_east_1(): @disable_on_py3() -@mock_rds +@mock_rds_deprecated def test_create_database_with_iops(): conn = boto.rds.connect_to_region("us-west-2") diff --git a/tests/test_rds2/test_rds2.py b/tests/test_rds2/test_rds2.py index 4e1c2b73c..581209655 100644 --- a/tests/test_rds2/test_rds2.py +++ b/tests/test_rds2/test_rds2.py @@ -1,7 +1,5 @@ from __future__ import unicode_literals -import boto.rds2 -import boto.vpc from botocore.exceptions import ClientError, ParamValidationError import boto3 import sure # noqa diff --git a/tests/test_redshift/test_redshift.py b/tests/test_redshift/test_redshift.py index 700301418..13acf6d7c 100644 --- a/tests/test_redshift/test_redshift.py +++ b/tests/test_redshift/test_redshift.py @@ -10,10 +10,10 @@ from boto.redshift.exceptions import ( ) import sure # noqa -from moto import mock_ec2, mock_redshift +from moto import mock_ec2_deprecated, mock_redshift_deprecated -@mock_redshift +@mock_redshift_deprecated def test_create_cluster(): conn = boto.redshift.connect_to_region("us-east-1") cluster_identifier = 'my_cluster' @@ -54,7 +54,7 @@ def test_create_cluster(): cluster['NumberOfNodes'].should.equal(3) -@mock_redshift +@mock_redshift_deprecated def test_create_single_node_cluster(): conn = boto.redshift.connect_to_region("us-east-1") cluster_identifier = 'my_cluster' @@ -78,7 +78,7 @@ def test_create_single_node_cluster(): cluster['NumberOfNodes'].should.equal(1) -@mock_redshift +@mock_redshift_deprecated def test_default_cluster_attibutes(): conn = boto.redshift.connect_to_region("us-east-1") cluster_identifier = 'my_cluster' @@ -105,8 +105,8 @@ def test_default_cluster_attibutes(): cluster['NumberOfNodes'].should.equal(1) -@mock_redshift -@mock_ec2 +@mock_redshift_deprecated +@mock_ec2_deprecated def test_create_cluster_in_subnet_group(): vpc_conn = boto.connect_vpc() vpc = vpc_conn.create_vpc("10.0.0.0/16") @@ -131,7 +131,7 @@ def test_create_cluster_in_subnet_group(): cluster['ClusterSubnetGroupName'].should.equal('my_subnet_group') -@mock_redshift +@mock_redshift_deprecated def test_create_cluster_with_security_group(): conn = boto.redshift.connect_to_region("us-east-1") conn.create_cluster_security_group( @@ -158,8 +158,8 @@ def test_create_cluster_with_security_group(): set(group_names).should.equal(set(["security_group1", "security_group2"])) -@mock_redshift -@mock_ec2 +@mock_redshift_deprecated +@mock_ec2_deprecated def test_create_cluster_with_vpc_security_groups(): vpc_conn = boto.connect_vpc() ec2_conn = boto.connect_ec2() @@ -181,7 +181,7 @@ def test_create_cluster_with_vpc_security_groups(): list(group_ids).should.equal([security_group.id]) -@mock_redshift +@mock_redshift_deprecated def test_create_cluster_with_parameter_group(): conn = boto.connect_redshift() conn.create_cluster_parameter_group( @@ -203,13 +203,13 @@ def test_create_cluster_with_parameter_group(): cluster['ClusterParameterGroups'][0]['ParameterGroupName'].should.equal("my_parameter_group") -@mock_redshift +@mock_redshift_deprecated def test_describe_non_existant_cluster(): conn = boto.redshift.connect_to_region("us-east-1") conn.describe_clusters.when.called_with("not-a-cluster").should.throw(ClusterNotFound) -@mock_redshift +@mock_redshift_deprecated def test_delete_cluster(): conn = boto.connect_redshift() cluster_identifier = 'my_cluster' @@ -233,7 +233,7 @@ def test_delete_cluster(): conn.delete_cluster.when.called_with("not-a-cluster").should.throw(ClusterNotFound) -@mock_redshift +@mock_redshift_deprecated def test_modify_cluster(): conn = boto.connect_redshift() cluster_identifier = 'my_cluster' @@ -281,8 +281,8 @@ def test_modify_cluster(): cluster['NumberOfNodes'].should.equal(2) -@mock_redshift -@mock_ec2 +@mock_redshift_deprecated +@mock_ec2_deprecated def test_create_cluster_subnet_group(): vpc_conn = boto.connect_vpc() vpc = vpc_conn.create_vpc("10.0.0.0/16") @@ -306,8 +306,8 @@ def test_create_cluster_subnet_group(): set(subnet_ids).should.equal(set([subnet1.id, subnet2.id])) -@mock_redshift -@mock_ec2 +@mock_redshift_deprecated +@mock_ec2_deprecated def test_create_invalid_cluster_subnet_group(): redshift_conn = boto.connect_redshift() redshift_conn.create_cluster_subnet_group.when.called_with( @@ -317,14 +317,14 @@ def test_create_invalid_cluster_subnet_group(): ).should.throw(InvalidSubnet) -@mock_redshift +@mock_redshift_deprecated def test_describe_non_existant_subnet_group(): conn = boto.redshift.connect_to_region("us-east-1") conn.describe_cluster_subnet_groups.when.called_with("not-a-subnet-group").should.throw(ClusterSubnetGroupNotFound) -@mock_redshift -@mock_ec2 +@mock_redshift_deprecated +@mock_ec2_deprecated def test_delete_cluster_subnet_group(): vpc_conn = boto.connect_vpc() vpc = vpc_conn.create_vpc("10.0.0.0/16") @@ -351,7 +351,7 @@ def test_delete_cluster_subnet_group(): redshift_conn.delete_cluster_subnet_group.when.called_with("not-a-subnet-group").should.throw(ClusterSubnetGroupNotFound) -@mock_redshift +@mock_redshift_deprecated def test_create_cluster_security_group(): conn = boto.connect_redshift() conn.create_cluster_security_group( @@ -367,13 +367,13 @@ def test_create_cluster_security_group(): list(my_group['IPRanges']).should.equal([]) -@mock_redshift +@mock_redshift_deprecated def test_describe_non_existant_security_group(): conn = boto.redshift.connect_to_region("us-east-1") conn.describe_cluster_security_groups.when.called_with("not-a-security-group").should.throw(ClusterSecurityGroupNotFound) -@mock_redshift +@mock_redshift_deprecated def test_delete_cluster_security_group(): conn = boto.connect_redshift() conn.create_cluster_security_group( @@ -395,7 +395,7 @@ def test_delete_cluster_security_group(): conn.delete_cluster_security_group.when.called_with("not-a-security-group").should.throw(ClusterSecurityGroupNotFound) -@mock_redshift +@mock_redshift_deprecated def test_create_cluster_parameter_group(): conn = boto.connect_redshift() conn.create_cluster_parameter_group( @@ -412,13 +412,13 @@ def test_create_cluster_parameter_group(): my_group['Description'].should.equal("This is my parameter group") -@mock_redshift +@mock_redshift_deprecated def test_describe_non_existant_parameter_group(): conn = boto.redshift.connect_to_region("us-east-1") conn.describe_cluster_parameter_groups.when.called_with("not-a-parameter-group").should.throw(ClusterParameterGroupNotFound) -@mock_redshift +@mock_redshift_deprecated def test_delete_cluster_parameter_group(): conn = boto.connect_redshift() conn.create_cluster_parameter_group( diff --git a/tests/test_route53/test_route53.py b/tests/test_route53/test_route53.py index b5b2000cc..dd68eec0e 100644 --- a/tests/test_route53/test_route53.py +++ b/tests/test_route53/test_route53.py @@ -9,10 +9,10 @@ import sure # noqa import uuid -from moto import mock_route53 +from moto import mock_route53, mock_route53_deprecated -@mock_route53 +@mock_route53_deprecated def test_hosted_zone(): conn = boto.connect_route53('the_key', 'the_secret') firstzone = conn.create_hosted_zone("testdns.aws.com") @@ -34,7 +34,7 @@ def test_hosted_zone(): conn.get_hosted_zone.when.called_with("abcd").should.throw(boto.route53.exception.DNSServerError, "404 Not Found") -@mock_route53 +@mock_route53_deprecated def test_rrset(): conn = boto.connect_route53('the_key', 'the_secret') @@ -117,7 +117,7 @@ def test_rrset(): rrsets.should.have.length_of(0) -@mock_route53 +@mock_route53_deprecated def test_rrset_with_multiple_values(): conn = boto.connect_route53('the_key', 'the_secret') zone = conn.create_hosted_zone("testdns.aws.com") @@ -134,7 +134,7 @@ def test_rrset_with_multiple_values(): set(rrsets[0].resource_records).should.equal(set(['1.2.3.4', '5.6.7.8'])) -@mock_route53 +@mock_route53_deprecated def test_alias_rrset(): conn = boto.connect_route53('the_key', 'the_secret') zone = conn.create_hosted_zone("testdns.aws.com") @@ -153,7 +153,7 @@ def test_alias_rrset(): rrsets[0].resource_records[0].should.equal('bar.testdns.aws.com') -@mock_route53 +@mock_route53_deprecated def test_create_health_check(): conn = boto.connect_route53('the_key', 'the_secret') @@ -183,7 +183,7 @@ def test_create_health_check(): config['FailureThreshold'].should.equal("2") -@mock_route53 +@mock_route53_deprecated def test_delete_health_check(): conn = boto.connect_route53('the_key', 'the_secret') @@ -204,7 +204,7 @@ def test_delete_health_check(): list(checks).should.have.length_of(0) -@mock_route53 +@mock_route53_deprecated def test_use_health_check_in_resource_record_set(): conn = boto.connect_route53('the_key', 'the_secret') @@ -229,7 +229,7 @@ def test_use_health_check_in_resource_record_set(): record_sets[0].health_check.should.equal(check_id) -@mock_route53 +@mock_route53_deprecated def test_hosted_zone_comment_preserved(): conn = boto.connect_route53('the_key', 'the_secret') @@ -246,7 +246,7 @@ def test_hosted_zone_comment_preserved(): zone.config["Comment"].should.equal("test comment") -@mock_route53 +@mock_route53_deprecated def test_deleting_weighted_route(): conn = boto.connect_route53() @@ -266,7 +266,7 @@ def test_deleting_weighted_route(): cname.identifier.should.equal('success-test-bar') -@mock_route53 +@mock_route53_deprecated def test_deleting_latency_route(): conn = boto.connect_route53() @@ -288,7 +288,7 @@ def test_deleting_latency_route(): cname.region.should.equal('us-west-1') -@mock_route53 +@mock_route53_deprecated def test_hosted_zone_private_zone_preserved(): conn = boto.connect_route53('the_key', 'the_secret') diff --git a/tests/test_s3/test_s3.py b/tests/test_s3/test_s3.py index 4990d7324..874230737 100644 --- a/tests/test_s3/test_s3.py +++ b/tests/test_s3/test_s3.py @@ -20,7 +20,7 @@ from nose.tools import assert_raises import sure # noqa -from moto import mock_s3 +from moto import mock_s3, mock_s3_deprecated REDUCED_PART_SIZE = 256 @@ -56,7 +56,7 @@ class MyModel(object): k.set_contents_from_string(self.value) -@mock_s3 +@mock_s3_deprecated def test_my_model_save(): # Create Bucket so that test can run conn = boto.connect_s3('the_key', 'the_secret') @@ -69,7 +69,7 @@ def test_my_model_save(): conn.get_bucket('mybucket').get_key('steve').get_contents_as_string().should.equal(b'is awesome') -@mock_s3 +@mock_s3_deprecated def test_key_etag(): # Create Bucket so that test can run conn = boto.connect_s3('the_key', 'the_secret') @@ -83,7 +83,7 @@ def test_key_etag(): '"d32bda93738f7e03adb22e66c90fbc04"') -@mock_s3 +@mock_s3_deprecated def test_multipart_upload_too_small(): conn = boto.connect_s3('the_key', 'the_secret') bucket = conn.create_bucket("foobar") @@ -95,7 +95,7 @@ def test_multipart_upload_too_small(): multipart.complete_upload.should.throw(S3ResponseError) -@mock_s3 +@mock_s3_deprecated @reduced_min_part_size def test_multipart_upload(): conn = boto.connect_s3('the_key', 'the_secret') @@ -112,7 +112,7 @@ def test_multipart_upload(): bucket.get_key("the-key").get_contents_as_string().should.equal(part1 + part2) -@mock_s3 +@mock_s3_deprecated @reduced_min_part_size def test_multipart_upload_out_of_order(): conn = boto.connect_s3('the_key', 'the_secret') @@ -129,7 +129,7 @@ def test_multipart_upload_out_of_order(): bucket.get_key("the-key").get_contents_as_string().should.equal(part1 + part2) -@mock_s3 +@mock_s3_deprecated @reduced_min_part_size def test_multipart_upload_with_headers(): conn = boto.connect_s3('the_key', 'the_secret') @@ -144,7 +144,7 @@ def test_multipart_upload_with_headers(): key.metadata.should.equal({"foo": "bar"}) -@mock_s3 +@mock_s3_deprecated @reduced_min_part_size def test_multipart_upload_with_copy_key(): conn = boto.connect_s3('the_key', 'the_secret') @@ -161,7 +161,7 @@ def test_multipart_upload_with_copy_key(): bucket.get_key("the-key").get_contents_as_string().should.equal(part1 + b"key_") -@mock_s3 +@mock_s3_deprecated @reduced_min_part_size def test_multipart_upload_cancel(): conn = boto.connect_s3('the_key', 'the_secret') @@ -175,7 +175,7 @@ def test_multipart_upload_cancel(): # have the ability to list mulipart uploads for a bucket. -@mock_s3 +@mock_s3_deprecated @reduced_min_part_size def test_multipart_etag(): # Create Bucket so that test can run @@ -194,7 +194,7 @@ def test_multipart_etag(): '"66d1a1a2ed08fd05c137f316af4ff255-2"') -@mock_s3 +@mock_s3_deprecated @reduced_min_part_size def test_multipart_invalid_order(): # Create Bucket so that test can run @@ -214,7 +214,7 @@ def test_multipart_invalid_order(): multipart.key_name, multipart.id, xml).should.throw(S3ResponseError) -@mock_s3 +@mock_s3_deprecated @reduced_min_part_size def test_multipart_duplicate_upload(): conn = boto.connect_s3('the_key', 'the_secret') @@ -232,7 +232,7 @@ def test_multipart_duplicate_upload(): bucket.get_key("the-key").get_contents_as_string().should.equal(part1 + part2) -@mock_s3 +@mock_s3_deprecated def test_list_multiparts(): # Create Bucket so that test can run conn = boto.connect_s3('the_key', 'the_secret') @@ -253,7 +253,7 @@ def test_list_multiparts(): uploads.should.be.empty -@mock_s3 +@mock_s3_deprecated def test_key_save_to_missing_bucket(): conn = boto.connect_s3('the_key', 'the_secret') bucket = conn.get_bucket('mybucket', validate=False) @@ -263,14 +263,14 @@ def test_key_save_to_missing_bucket(): key.set_contents_from_string.when.called_with("foobar").should.throw(S3ResponseError) -@mock_s3 +@mock_s3_deprecated def test_missing_key(): conn = boto.connect_s3('the_key', 'the_secret') bucket = conn.create_bucket("foobar") bucket.get_key("the-key").should.equal(None) -@mock_s3 +@mock_s3_deprecated def test_missing_key_urllib2(): conn = boto.connect_s3('the_key', 'the_secret') conn.create_bucket("foobar") @@ -278,7 +278,7 @@ def test_missing_key_urllib2(): urlopen.when.called_with("http://foobar.s3.amazonaws.com/the-key").should.throw(HTTPError) -@mock_s3 +@mock_s3_deprecated def test_empty_key(): conn = boto.connect_s3('the_key', 'the_secret') bucket = conn.create_bucket("foobar") @@ -291,7 +291,7 @@ def test_empty_key(): key.get_contents_as_string().should.equal(b'') -@mock_s3 +@mock_s3_deprecated def test_empty_key_set_on_existing_key(): conn = boto.connect_s3('the_key', 'the_secret') bucket = conn.create_bucket("foobar") @@ -307,7 +307,7 @@ def test_empty_key_set_on_existing_key(): bucket.get_key("the-key").get_contents_as_string().should.equal(b'') -@mock_s3 +@mock_s3_deprecated def test_large_key_save(): conn = boto.connect_s3('the_key', 'the_secret') bucket = conn.create_bucket("foobar") @@ -318,7 +318,7 @@ def test_large_key_save(): bucket.get_key("the-key").get_contents_as_string().should.equal(b'foobar' * 100000) -@mock_s3 +@mock_s3_deprecated def test_copy_key(): conn = boto.connect_s3('the_key', 'the_secret') bucket = conn.create_bucket("foobar") @@ -332,7 +332,7 @@ def test_copy_key(): bucket.get_key("new-key").get_contents_as_string().should.equal(b"some value") -@mock_s3 +@mock_s3_deprecated def test_copy_key_with_version(): conn = boto.connect_s3('the_key', 'the_secret') bucket = conn.create_bucket("foobar") @@ -348,7 +348,7 @@ def test_copy_key_with_version(): bucket.get_key("new-key").get_contents_as_string().should.equal(b"some value") -@mock_s3 +@mock_s3_deprecated def test_set_metadata(): conn = boto.connect_s3('the_key', 'the_secret') bucket = conn.create_bucket("foobar") @@ -360,7 +360,7 @@ def test_set_metadata(): bucket.get_key('the-key').get_metadata('md').should.equal('Metadatastring') -@mock_s3 +@mock_s3_deprecated def test_copy_key_replace_metadata(): conn = boto.connect_s3('the_key', 'the_secret') bucket = conn.create_bucket("foobar") @@ -377,7 +377,7 @@ def test_copy_key_replace_metadata(): @freeze_time("2012-01-01 12:00:00") -@mock_s3 +@mock_s3_deprecated def test_last_modified(): # See https://github.com/boto/boto/issues/466 conn = boto.connect_s3() @@ -392,19 +392,19 @@ def test_last_modified(): bucket.get_key("the-key").last_modified.should.equal('Sun, 01 Jan 2012 12:00:00 GMT') -@mock_s3 +@mock_s3_deprecated def test_missing_bucket(): conn = boto.connect_s3('the_key', 'the_secret') conn.get_bucket.when.called_with('mybucket').should.throw(S3ResponseError) -@mock_s3 +@mock_s3_deprecated def test_bucket_with_dash(): conn = boto.connect_s3('the_key', 'the_secret') conn.get_bucket.when.called_with('mybucket-test').should.throw(S3ResponseError) -@mock_s3 +@mock_s3_deprecated def test_create_existing_bucket(): "Trying to create a bucket that already exists should raise an Error" conn = boto.s3.connect_to_region("us-west-2") @@ -413,7 +413,7 @@ def test_create_existing_bucket(): conn.create_bucket('foobar') -@mock_s3 +@mock_s3_deprecated def test_create_existing_bucket_in_us_east_1(): "Trying to create a bucket that already exists in us-east-1 returns the bucket" @@ -430,14 +430,14 @@ def test_create_existing_bucket_in_us_east_1(): bucket.name.should.equal("foobar") -@mock_s3 +@mock_s3_deprecated def test_other_region(): conn = S3Connection('key', 'secret', host='s3-website-ap-southeast-2.amazonaws.com') conn.create_bucket("foobar") list(conn.get_bucket("foobar").get_all_keys()).should.equal([]) -@mock_s3 +@mock_s3_deprecated def test_bucket_deletion(): conn = boto.connect_s3('the_key', 'the_secret') bucket = conn.create_bucket("foobar") @@ -459,7 +459,7 @@ def test_bucket_deletion(): conn.delete_bucket.when.called_with("foobar").should.throw(S3ResponseError) -@mock_s3 +@mock_s3_deprecated def test_get_all_buckets(): conn = boto.connect_s3('the_key', 'the_secret') conn.create_bucket("foobar") @@ -470,6 +470,7 @@ def test_get_all_buckets(): @mock_s3 +@mock_s3_deprecated def test_post_to_bucket(): conn = boto.connect_s3('the_key', 'the_secret') bucket = conn.create_bucket("foobar") @@ -483,6 +484,7 @@ def test_post_to_bucket(): @mock_s3 +@mock_s3_deprecated def test_post_with_metadata_to_bucket(): conn = boto.connect_s3('the_key', 'the_secret') bucket = conn.create_bucket("foobar") @@ -496,7 +498,7 @@ def test_post_with_metadata_to_bucket(): bucket.get_key('the-key').get_metadata('test').should.equal('metadata') -@mock_s3 +@mock_s3_deprecated def test_delete_missing_key(): conn = boto.connect_s3('the_key', 'the_secret') bucket = conn.create_bucket('foobar') @@ -505,7 +507,7 @@ def test_delete_missing_key(): deleted_key.key.should.equal("foobar") -@mock_s3 +@mock_s3_deprecated def test_delete_keys(): conn = boto.connect_s3('the_key', 'the_secret') bucket = conn.create_bucket('foobar') @@ -523,7 +525,7 @@ def test_delete_keys(): keys[0].name.should.equal('file1') -@mock_s3 +@mock_s3_deprecated def test_delete_keys_with_invalid(): conn = boto.connect_s3('the_key', 'the_secret') bucket = conn.create_bucket('foobar') @@ -552,7 +554,7 @@ def test_key_method_not_implemented(): requests.post.when.called_with("https://foobar.s3.amazonaws.com/foo").should.throw(NotImplementedError) -@mock_s3 +@mock_s3_deprecated def test_bucket_name_with_dot(): conn = boto.connect_s3() bucket = conn.create_bucket('firstname.lastname') @@ -561,7 +563,7 @@ def test_bucket_name_with_dot(): k.set_contents_from_string('somedata') -@mock_s3 +@mock_s3_deprecated def test_key_with_special_characters(): conn = boto.connect_s3() bucket = conn.create_bucket('test_bucket_name') @@ -574,7 +576,7 @@ def test_key_with_special_characters(): keys[0].name.should.equal("test_list_keys_2/x?y") -@mock_s3 +@mock_s3_deprecated def test_unicode_key_with_slash(): conn = boto.connect_s3('the_key', 'the_secret') bucket = conn.create_bucket("foobar") @@ -586,7 +588,7 @@ def test_unicode_key_with_slash(): key.get_contents_as_string().should.equal(b'value') -@mock_s3 +@mock_s3_deprecated def test_bucket_key_listing_order(): conn = boto.connect_s3() bucket = conn.create_bucket('test_bucket') @@ -628,7 +630,7 @@ def test_bucket_key_listing_order(): keys.should.equal([u'toplevel/x/']) -@mock_s3 +@mock_s3_deprecated def test_key_with_reduced_redundancy(): conn = boto.connect_s3() bucket = conn.create_bucket('test_bucket_name') @@ -640,7 +642,7 @@ def test_key_with_reduced_redundancy(): list(bucket)[0].storage_class.should.equal('REDUCED_REDUNDANCY') -@mock_s3 +@mock_s3_deprecated def test_copy_key_reduced_redundancy(): conn = boto.connect_s3('the_key', 'the_secret') bucket = conn.create_bucket("foobar") @@ -658,7 +660,7 @@ def test_copy_key_reduced_redundancy(): @freeze_time("2012-01-01 12:00:00") -@mock_s3 +@mock_s3_deprecated def test_restore_key(): conn = boto.connect_s3('the_key', 'the_secret') bucket = conn.create_bucket("foobar") @@ -679,7 +681,7 @@ def test_restore_key(): @freeze_time("2012-01-01 12:00:00") -@mock_s3 +@mock_s3_deprecated def test_restore_key_headers(): conn = boto.connect_s3('the_key', 'the_secret') bucket = conn.create_bucket("foobar") @@ -693,7 +695,7 @@ def test_restore_key_headers(): key.expiry_date.should.equal("Mon, 02 Jan 2012 12:00:00 GMT") -@mock_s3 +@mock_s3_deprecated def test_get_versioning_status(): conn = boto.connect_s3('the_key', 'the_secret') bucket = conn.create_bucket('foobar') @@ -710,7 +712,7 @@ def test_get_versioning_status(): d.should.have.key('Versioning').being.equal('Suspended') -@mock_s3 +@mock_s3_deprecated def test_key_version(): conn = boto.connect_s3('the_key', 'the_secret') bucket = conn.create_bucket('foobar') @@ -728,7 +730,7 @@ def test_key_version(): key.version_id.should.equal('1') -@mock_s3 +@mock_s3_deprecated def test_list_versions(): conn = boto.connect_s3('the_key', 'the_secret') bucket = conn.create_bucket('foobar') @@ -754,7 +756,7 @@ def test_list_versions(): versions[1].get_contents_as_string().should.equal(b"Version 2") -@mock_s3 +@mock_s3_deprecated def test_acl_setting(): conn = boto.connect_s3() bucket = conn.create_bucket('foobar') @@ -775,7 +777,7 @@ def test_acl_setting(): g.permission == 'READ' for g in grants), grants -@mock_s3 +@mock_s3_deprecated def test_acl_setting_via_headers(): conn = boto.connect_s3() bucket = conn.create_bucket('foobar') @@ -797,7 +799,7 @@ def test_acl_setting_via_headers(): g.permission == 'FULL_CONTROL' for g in grants), grants -@mock_s3 +@mock_s3_deprecated def test_acl_switching(): conn = boto.connect_s3() bucket = conn.create_bucket('foobar') @@ -814,7 +816,7 @@ def test_acl_switching(): g.permission == 'READ' for g in grants), grants -@mock_s3 +@mock_s3_deprecated def test_bucket_acl_setting(): conn = boto.connect_s3() bucket = conn.create_bucket('foobar') @@ -826,7 +828,7 @@ def test_bucket_acl_setting(): g.permission == 'READ' for g in grants), grants -@mock_s3 +@mock_s3_deprecated def test_bucket_acl_switching(): conn = boto.connect_s3() bucket = conn.create_bucket('foobar') @@ -839,7 +841,7 @@ def test_bucket_acl_switching(): g.permission == 'READ' for g in grants), grants -@mock_s3 +@mock_s3_deprecated def test_unicode_key(): conn = boto.connect_s3() bucket = conn.create_bucket('mybucket') @@ -852,7 +854,7 @@ def test_unicode_key(): assert fetched_key.get_contents_as_string().decode("utf-8") == 'Hello world!' -@mock_s3 +@mock_s3_deprecated def test_unicode_value(): conn = boto.connect_s3() bucket = conn.create_bucket('mybucket') @@ -864,7 +866,7 @@ def test_unicode_value(): assert key.get_contents_as_string().decode("utf-8") == u'こんにちは.jpg' -@mock_s3 +@mock_s3_deprecated def test_setting_content_encoding(): conn = boto.connect_s3() bucket = conn.create_bucket('mybucket') @@ -877,14 +879,14 @@ def test_setting_content_encoding(): key.content_encoding.should.equal("gzip") -@mock_s3 +@mock_s3_deprecated def test_bucket_location(): conn = boto.s3.connect_to_region("us-west-2") bucket = conn.create_bucket('mybucket') bucket.get_location().should.equal("us-west-2") -@mock_s3 +@mock_s3_deprecated def test_ranged_get(): conn = boto.connect_s3() bucket = conn.create_bucket('mybucket') @@ -926,7 +928,7 @@ def test_ranged_get(): key.size.should.equal(100) -@mock_s3 +@mock_s3_deprecated def test_policy(): conn = boto.connect_s3() bucket_name = 'mybucket' @@ -976,6 +978,31 @@ def test_policy(): bucket.get_policy() +@mock_s3_deprecated +def test_website_configuration_xml(): + conn = boto.connect_s3() + bucket = conn.create_bucket('test-bucket') + bucket.set_website_configuration_xml(TEST_XML) + bucket.get_website_configuration_xml().should.equal(TEST_XML) + + +@mock_s3_deprecated +def test_key_with_trailing_slash_in_ordinary_calling_format(): + conn = boto.connect_s3( + 'access_key', + 'secret_key', + calling_format=boto.s3.connection.OrdinaryCallingFormat() + ) + bucket = conn.create_bucket('test_bucket_name') + + key_name = 'key_with_slash/' + + key = Key(bucket, key_name) + key.set_contents_from_string('some value') + + [k.name for k in bucket.get_all_keys()].should.contain(key_name) + + """ boto3 """ @@ -1235,28 +1262,3 @@ TEST_XML = """\ """ - - -@mock_s3 -def test_website_configuration_xml(): - conn = boto.connect_s3() - bucket = conn.create_bucket('test-bucket') - bucket.set_website_configuration_xml(TEST_XML) - bucket.get_website_configuration_xml().should.equal(TEST_XML) - - -@mock_s3 -def test_key_with_trailing_slash_in_ordinary_calling_format(): - conn = boto.connect_s3( - 'access_key', - 'secret_key', - calling_format=boto.s3.connection.OrdinaryCallingFormat() - ) - bucket = conn.create_bucket('test_bucket_name') - - key_name = 'key_with_slash/' - - key = Key(bucket, key_name) - key.set_contents_from_string('some value') - - [k.name for k in bucket.get_all_keys()].should.contain(key_name) diff --git a/tests/test_s3/test_s3_lifecycle.py b/tests/test_s3/test_s3_lifecycle.py index 60613de44..f0a70bc6f 100644 --- a/tests/test_s3/test_s3_lifecycle.py +++ b/tests/test_s3/test_s3_lifecycle.py @@ -6,10 +6,10 @@ from boto.s3.lifecycle import Lifecycle, Transition, Expiration, Rule import sure # noqa -from moto import mock_s3 +from moto import mock_s3_deprecated -@mock_s3 +@mock_s3_deprecated def test_lifecycle_create(): conn = boto.s3.connect_to_region("us-west-1") bucket = conn.create_bucket("foobar") @@ -26,7 +26,7 @@ def test_lifecycle_create(): list(lifecycle.transition).should.equal([]) -@mock_s3 +@mock_s3_deprecated def test_lifecycle_with_glacier_transition(): conn = boto.s3.connect_to_region("us-west-1") bucket = conn.create_bucket("foobar") @@ -44,7 +44,7 @@ def test_lifecycle_with_glacier_transition(): transition.date.should.equal(None) -@mock_s3 +@mock_s3_deprecated def test_lifecycle_multi(): conn = boto.s3.connect_to_region("us-west-1") bucket = conn.create_bucket("foobar") @@ -86,7 +86,7 @@ def test_lifecycle_multi(): assert False, "Invalid rule id" -@mock_s3 +@mock_s3_deprecated def test_lifecycle_delete(): conn = boto.s3.connect_to_region("us-west-1") bucket = conn.create_bucket("foobar") diff --git a/tests/test_s3bucket_path/test_s3bucket_path.py b/tests/test_s3bucket_path/test_s3bucket_path.py index eff01bf55..24c5f7fa5 100644 --- a/tests/test_s3bucket_path/test_s3bucket_path.py +++ b/tests/test_s3bucket_path/test_s3bucket_path.py @@ -12,7 +12,7 @@ import requests import sure # noqa -from moto import mock_s3bucket_path +from moto import mock_s3, mock_s3_deprecated def create_connection(key=None, secret=None): @@ -32,7 +32,7 @@ class MyModel(object): k.set_contents_from_string(self.value) -@mock_s3bucket_path +@mock_s3_deprecated def test_my_model_save(): # Create Bucket so that test can run conn = create_connection('the_key', 'the_secret') @@ -45,14 +45,14 @@ def test_my_model_save(): conn.get_bucket('mybucket').get_key('steve').get_contents_as_string().should.equal(b'is awesome') -@mock_s3bucket_path +@mock_s3_deprecated def test_missing_key(): conn = create_connection('the_key', 'the_secret') bucket = conn.create_bucket("foobar") bucket.get_key("the-key").should.equal(None) -@mock_s3bucket_path +@mock_s3_deprecated def test_missing_key_urllib2(): conn = create_connection('the_key', 'the_secret') conn.create_bucket("foobar") @@ -60,7 +60,7 @@ def test_missing_key_urllib2(): urlopen.when.called_with("http://s3.amazonaws.com/foobar/the-key").should.throw(HTTPError) -@mock_s3bucket_path +@mock_s3_deprecated def test_empty_key(): conn = create_connection('the_key', 'the_secret') bucket = conn.create_bucket("foobar") @@ -71,7 +71,7 @@ def test_empty_key(): bucket.get_key("the-key").get_contents_as_string().should.equal(b'') -@mock_s3bucket_path +@mock_s3_deprecated def test_empty_key_set_on_existing_key(): conn = create_connection('the_key', 'the_secret') bucket = conn.create_bucket("foobar") @@ -85,7 +85,7 @@ def test_empty_key_set_on_existing_key(): bucket.get_key("the-key").get_contents_as_string().should.equal(b'') -@mock_s3bucket_path +@mock_s3_deprecated def test_large_key_save(): conn = create_connection('the_key', 'the_secret') bucket = conn.create_bucket("foobar") @@ -96,7 +96,7 @@ def test_large_key_save(): bucket.get_key("the-key").get_contents_as_string().should.equal(b'foobar' * 100000) -@mock_s3bucket_path +@mock_s3_deprecated def test_copy_key(): conn = create_connection('the_key', 'the_secret') bucket = conn.create_bucket("foobar") @@ -110,7 +110,7 @@ def test_copy_key(): bucket.get_key("new-key").get_contents_as_string().should.equal(b"some value") -@mock_s3bucket_path +@mock_s3_deprecated def test_set_metadata(): conn = create_connection('the_key', 'the_secret') bucket = conn.create_bucket("foobar") @@ -123,7 +123,7 @@ def test_set_metadata(): @freeze_time("2012-01-01 12:00:00") -@mock_s3bucket_path +@mock_s3_deprecated def test_last_modified(): # See https://github.com/boto/boto/issues/466 conn = create_connection() @@ -138,19 +138,19 @@ def test_last_modified(): bucket.get_key("the-key").last_modified.should.equal('Sun, 01 Jan 2012 12:00:00 GMT') -@mock_s3bucket_path +@mock_s3_deprecated def test_missing_bucket(): conn = create_connection('the_key', 'the_secret') conn.get_bucket.when.called_with('mybucket').should.throw(S3ResponseError) -@mock_s3bucket_path +@mock_s3_deprecated def test_bucket_with_dash(): conn = create_connection('the_key', 'the_secret') conn.get_bucket.when.called_with('mybucket-test').should.throw(S3ResponseError) -@mock_s3bucket_path +@mock_s3_deprecated def test_bucket_deletion(): conn = create_connection('the_key', 'the_secret') bucket = conn.create_bucket("foobar") @@ -172,7 +172,7 @@ def test_bucket_deletion(): conn.delete_bucket.when.called_with("foobar").should.throw(S3ResponseError) -@mock_s3bucket_path +@mock_s3_deprecated def test_get_all_buckets(): conn = create_connection('the_key', 'the_secret') conn.create_bucket("foobar") @@ -182,7 +182,8 @@ def test_get_all_buckets(): buckets.should.have.length_of(2) -@mock_s3bucket_path +@mock_s3 +@mock_s3_deprecated def test_post_to_bucket(): conn = create_connection('the_key', 'the_secret') bucket = conn.create_bucket("foobar") @@ -195,7 +196,8 @@ def test_post_to_bucket(): bucket.get_key('the-key').get_contents_as_string().should.equal(b'nothing') -@mock_s3bucket_path +@mock_s3 +@mock_s3_deprecated def test_post_with_metadata_to_bucket(): conn = create_connection('the_key', 'the_secret') bucket = conn.create_bucket("foobar") @@ -209,17 +211,17 @@ def test_post_with_metadata_to_bucket(): bucket.get_key('the-key').get_metadata('test').should.equal('metadata') -@mock_s3bucket_path +@mock_s3 def test_bucket_method_not_implemented(): requests.patch.when.called_with("https://s3.amazonaws.com/foobar").should.throw(NotImplementedError) -@mock_s3bucket_path +@mock_s3 def test_key_method_not_implemented(): requests.post.when.called_with("https://s3.amazonaws.com/foobar/foo").should.throw(NotImplementedError) -@mock_s3bucket_path +@mock_s3_deprecated def test_bucket_name_with_dot(): conn = create_connection() bucket = conn.create_bucket('firstname.lastname') @@ -228,7 +230,7 @@ def test_bucket_name_with_dot(): k.set_contents_from_string('somedata') -@mock_s3bucket_path +@mock_s3_deprecated def test_key_with_special_characters(): conn = create_connection() bucket = conn.create_bucket('test_bucket_name') @@ -241,7 +243,7 @@ def test_key_with_special_characters(): keys[0].name.should.equal("test_list_keys_2/*x+?^@~!y") -@mock_s3bucket_path +@mock_s3_deprecated def test_bucket_key_listing_order(): conn = create_connection() bucket = conn.create_bucket('test_bucket') @@ -283,7 +285,7 @@ def test_bucket_key_listing_order(): keys.should.equal(['toplevel/x/']) -@mock_s3bucket_path +@mock_s3_deprecated def test_delete_keys(): conn = create_connection() bucket = conn.create_bucket('foobar') @@ -301,7 +303,7 @@ def test_delete_keys(): keys[0].name.should.equal('file1') -@mock_s3bucket_path +@mock_s3_deprecated def test_delete_keys_with_invalid(): conn = create_connection() bucket = conn.create_bucket('foobar') diff --git a/tests/test_s3bucket_path/test_s3bucket_path_combo.py b/tests/test_s3bucket_path/test_s3bucket_path_combo.py index 48d65d497..e1b1075ee 100644 --- a/tests/test_s3bucket_path/test_s3bucket_path_combo.py +++ b/tests/test_s3bucket_path/test_s3bucket_path_combo.py @@ -3,7 +3,7 @@ from __future__ import unicode_literals import boto from boto.s3.connection import OrdinaryCallingFormat -from moto import mock_s3bucket_path, mock_s3 +from moto import mock_s3_deprecated def create_connection(key=None, secret=None): @@ -11,12 +11,12 @@ def create_connection(key=None, secret=None): def test_bucketpath_combo_serial(): - @mock_s3bucket_path + @mock_s3_deprecated def make_bucket_path(): conn = create_connection() conn.create_bucket('mybucketpath') - @mock_s3 + @mock_s3_deprecated def make_bucket(): conn = boto.connect_s3('the_key', 'the_secret') conn.create_bucket('mybucket') diff --git a/tests/test_ses/test_ses.py b/tests/test_ses/test_ses.py index e9b64b78b..7771b9a65 100644 --- a/tests/test_ses/test_ses.py +++ b/tests/test_ses/test_ses.py @@ -6,10 +6,10 @@ from boto.exception import BotoServerError import sure # noqa -from moto import mock_ses +from moto import mock_ses_deprecated -@mock_ses +@mock_ses_deprecated def test_verify_email_identity(): conn = boto.connect_ses('the_key', 'the_secret') conn.verify_email_identity("test@example.com") @@ -19,7 +19,7 @@ def test_verify_email_identity(): address.should.equal('test@example.com') -@mock_ses +@mock_ses_deprecated def test_domain_verify(): conn = boto.connect_ses('the_key', 'the_secret') @@ -31,7 +31,7 @@ def test_domain_verify(): domains.should.equal(['domain1.com', 'domain2.com']) -@mock_ses +@mock_ses_deprecated def test_delete_identity(): conn = boto.connect_ses('the_key', 'the_secret') conn.verify_email_identity("test@example.com") @@ -41,7 +41,7 @@ def test_delete_identity(): conn.list_identities()['ListIdentitiesResponse']['ListIdentitiesResult']['Identities'].should.have.length_of(0) -@mock_ses +@mock_ses_deprecated def test_send_email(): conn = boto.connect_ses('the_key', 'the_secret') @@ -56,7 +56,7 @@ def test_send_email(): sent_count = int(send_quota['GetSendQuotaResponse']['GetSendQuotaResult']['SentLast24Hours']) sent_count.should.equal(1) -@mock_ses +@mock_ses_deprecated def test_send_html_email(): conn = boto.connect_ses('the_key', 'the_secret') @@ -71,7 +71,7 @@ def test_send_html_email(): sent_count = int(send_quota['GetSendQuotaResponse']['GetSendQuotaResult']['SentLast24Hours']) sent_count.should.equal(1) -@mock_ses +@mock_ses_deprecated def test_send_raw_email(): conn = boto.connect_ses('the_key', 'the_secret') diff --git a/tests/test_sns/test_application.py b/tests/test_sns/test_application.py index 0566adeb3..31db73f62 100644 --- a/tests/test_sns/test_application.py +++ b/tests/test_sns/test_application.py @@ -2,11 +2,11 @@ from __future__ import unicode_literals import boto from boto.exception import BotoServerError -from moto import mock_sns +from moto import mock_sns_deprecated import sure # noqa -@mock_sns +@mock_sns_deprecated def test_create_platform_application(): conn = boto.connect_sns() platform_application = conn.create_platform_application( @@ -21,7 +21,7 @@ def test_create_platform_application(): application_arn.should.equal('arn:aws:sns:us-east-1:123456789012:app/APNS/my-application') -@mock_sns +@mock_sns_deprecated def test_get_platform_application_attributes(): conn = boto.connect_sns() platform_application = conn.create_platform_application( @@ -40,13 +40,13 @@ def test_get_platform_application_attributes(): }) -@mock_sns +@mock_sns_deprecated def test_get_missing_platform_application_attributes(): conn = boto.connect_sns() conn.get_platform_application_attributes.when.called_with("a-fake-arn").should.throw(BotoServerError) -@mock_sns +@mock_sns_deprecated def test_set_platform_application_attributes(): conn = boto.connect_sns() platform_application = conn.create_platform_application( @@ -68,7 +68,7 @@ def test_set_platform_application_attributes(): }) -@mock_sns +@mock_sns_deprecated def test_list_platform_applications(): conn = boto.connect_sns() conn.create_platform_application( @@ -85,7 +85,7 @@ def test_list_platform_applications(): applications.should.have.length_of(2) -@mock_sns +@mock_sns_deprecated def test_delete_platform_application(): conn = boto.connect_sns() conn.create_platform_application( @@ -109,7 +109,7 @@ def test_delete_platform_application(): applications.should.have.length_of(1) -@mock_sns +@mock_sns_deprecated def test_create_platform_endpoint(): conn = boto.connect_sns() platform_application = conn.create_platform_application( @@ -131,7 +131,7 @@ def test_create_platform_endpoint(): endpoint_arn.should.contain("arn:aws:sns:us-east-1:123456789012:endpoint/APNS/my-application/") -@mock_sns +@mock_sns_deprecated def test_get_list_endpoints_by_platform_application(): conn = boto.connect_sns() platform_application = conn.create_platform_application( @@ -159,7 +159,7 @@ def test_get_list_endpoints_by_platform_application(): endpoint_list[0]['EndpointArn'].should.equal(endpoint_arn) -@mock_sns +@mock_sns_deprecated def test_get_endpoint_attributes(): conn = boto.connect_sns() platform_application = conn.create_platform_application( @@ -187,13 +187,13 @@ def test_get_endpoint_attributes(): }) -@mock_sns +@mock_sns_deprecated def test_get_missing_endpoint_attributes(): conn = boto.connect_sns() conn.get_endpoint_attributes.when.called_with("a-fake-arn").should.throw(BotoServerError) -@mock_sns +@mock_sns_deprecated def test_set_endpoint_attributes(): conn = boto.connect_sns() platform_application = conn.create_platform_application( @@ -224,7 +224,7 @@ def test_set_endpoint_attributes(): }) -@mock_sns +@mock_sns_deprecated def test_delete_endpoint(): conn = boto.connect_sns() platform_application = conn.create_platform_application( @@ -258,7 +258,7 @@ def test_delete_endpoint(): endpoint_list.should.have.length_of(0) -@mock_sns +@mock_sns_deprecated def test_publish_to_platform_endpoint(): conn = boto.connect_sns() platform_application = conn.create_platform_application( diff --git a/tests/test_sns/test_publishing.py b/tests/test_sns/test_publishing.py index 3805d9e5e..8f8bfb0a1 100644 --- a/tests/test_sns/test_publishing.py +++ b/tests/test_sns/test_publishing.py @@ -3,14 +3,14 @@ from six.moves.urllib.parse import parse_qs import boto from freezegun import freeze_time -import httpretty import sure # noqa -from moto import mock_sns, mock_sqs +from moto.packages.responses import responses +from moto import mock_sns, mock_sns_deprecated, mock_sqs_deprecated -@mock_sqs -@mock_sns +@mock_sqs_deprecated +@mock_sns_deprecated def test_publish_to_sqs(): conn = boto.connect_sns() conn.create_topic("some-topic") @@ -29,8 +29,8 @@ def test_publish_to_sqs(): message.get_body().should.equal('my message') -@mock_sqs -@mock_sns +@mock_sqs_deprecated +@mock_sns_deprecated def test_publish_to_sqs_in_different_region(): conn = boto.sns.connect_to_region("us-west-1") conn.create_topic("some-topic") @@ -51,10 +51,11 @@ def test_publish_to_sqs_in_different_region(): @freeze_time("2013-01-01") @mock_sns +@mock_sns_deprecated def test_publish_to_http(): - httpretty.HTTPretty.register_uri( + responses.add( method="POST", - uri="http://example.com/foobar", + url="http://example.com/foobar", ) conn = boto.connect_sns() @@ -67,7 +68,7 @@ def test_publish_to_http(): response = conn.publish(topic=topic_arn, message="my message", subject="my subject") message_id = response['PublishResponse']['PublishResult']['MessageId'] - last_request = httpretty.last_request() + last_request = responses.calls[-1].request last_request.method.should.equal("POST") parse_qs(last_request.body.decode('utf-8')).should.equal({ "Type": ["Notification"], @@ -81,3 +82,5 @@ def test_publish_to_http(): "SigningCertURL": ["https://sns.us-east-1.amazonaws.com/SimpleNotificationService-f3ecfb7224c7233fe7bb5f59f96de52f.pem"], "UnsubscribeURL": ["https://sns.us-east-1.amazonaws.com/?Action=Unsubscribe&SubscriptionArn=arn:aws:sns:us-east-1:123456789012:some-topic:2bcfbf39-05c3-41de-beaa-fcfcc21c8f55"], }) + + diff --git a/tests/test_sns/test_publishing_boto3.py b/tests/test_sns/test_publishing_boto3.py index 90d063971..b37522641 100644 --- a/tests/test_sns/test_publishing_boto3.py +++ b/tests/test_sns/test_publishing_boto3.py @@ -3,9 +3,9 @@ from six.moves.urllib.parse import parse_qs import boto3 from freezegun import freeze_time -import httpretty import sure # noqa +from moto.packages.responses import responses from moto import mock_sns, mock_sqs @@ -56,9 +56,9 @@ def test_publish_to_sqs_in_different_region(): @freeze_time("2013-01-01") @mock_sns def test_publish_to_http(): - httpretty.HTTPretty.register_uri( + responses.add( method="POST", - uri="http://example.com/foobar", + url="http://example.com/foobar", ) conn = boto3.client('sns', region_name='us-east-1') @@ -73,7 +73,7 @@ def test_publish_to_http(): response = conn.publish(TopicArn=topic_arn, Message="my message", Subject="my subject") message_id = response['MessageId'] - last_request = httpretty.last_request() + last_request = responses.calls[-2].request last_request.method.should.equal("POST") parse_qs(last_request.body.decode('utf-8')).should.equal({ "Type": ["Notification"], diff --git a/tests/test_sns/test_subscriptions.py b/tests/test_sns/test_subscriptions.py index a202edf36..e141c503a 100644 --- a/tests/test_sns/test_subscriptions.py +++ b/tests/test_sns/test_subscriptions.py @@ -3,11 +3,11 @@ import boto import sure # noqa -from moto import mock_sns +from moto import mock_sns_deprecated from moto.sns.models import DEFAULT_PAGE_SIZE -@mock_sns +@mock_sns_deprecated def test_creating_subscription(): conn = boto.connect_sns() conn.create_topic("some-topic") @@ -32,7 +32,7 @@ def test_creating_subscription(): subscriptions.should.have.length_of(0) -@mock_sns +@mock_sns_deprecated def test_getting_subscriptions_by_topic(): conn = boto.connect_sns() conn.create_topic("topic1") @@ -51,7 +51,7 @@ def test_getting_subscriptions_by_topic(): topic1_subscriptions[0]['Endpoint'].should.equal("http://example1.com/") -@mock_sns +@mock_sns_deprecated def test_subscription_paging(): conn = boto.connect_sns() conn.create_topic("topic1") diff --git a/tests/test_sns/test_topics.py b/tests/test_sns/test_topics.py index a2a8092ee..ab2f06382 100644 --- a/tests/test_sns/test_topics.py +++ b/tests/test_sns/test_topics.py @@ -5,11 +5,11 @@ import six import sure # noqa from boto.exception import BotoServerError -from moto import mock_sns +from moto import mock_sns_deprecated from moto.sns.models import DEFAULT_TOPIC_POLICY, DEFAULT_EFFECTIVE_DELIVERY_POLICY, DEFAULT_PAGE_SIZE -@mock_sns +@mock_sns_deprecated def test_create_and_delete_topic(): conn = boto.connect_sns() conn.create_topic("some-topic") @@ -31,20 +31,20 @@ def test_create_and_delete_topic(): topics.should.have.length_of(0) -@mock_sns +@mock_sns_deprecated def test_get_missing_topic(): conn = boto.connect_sns() conn.get_topic_attributes.when.called_with("a-fake-arn").should.throw(BotoServerError) -@mock_sns +@mock_sns_deprecated def test_create_topic_in_multiple_regions(): for region in ['us-west-1', 'us-west-2']: conn = boto.sns.connect_to_region(region) conn.create_topic("some-topic") list(conn.get_all_topics()["ListTopicsResponse"]["ListTopicsResult"]["Topics"]).should.have.length_of(1) -@mock_sns +@mock_sns_deprecated def test_topic_corresponds_to_region(): for region in ['us-east-1', 'us-west-2']: conn = boto.sns.connect_to_region(region) @@ -53,7 +53,7 @@ def test_topic_corresponds_to_region(): topic_arn = topics_json["ListTopicsResponse"]["ListTopicsResult"]["Topics"][0]['TopicArn'] topic_arn.should.equal("arn:aws:sns:{0}:123456789012:some-topic".format(region)) -@mock_sns +@mock_sns_deprecated def test_topic_attributes(): conn = boto.connect_sns() conn.create_topic("some-topic") @@ -95,7 +95,7 @@ def test_topic_attributes(): attributes["DisplayName"].should.equal("My display name") attributes["DeliveryPolicy"].should.equal("{'http': {'defaultHealthyRetryPolicy': {'numRetries': 5}}}") -@mock_sns +@mock_sns_deprecated def test_topic_paging(): conn = boto.connect_sns() for index in range(DEFAULT_PAGE_SIZE + int(DEFAULT_PAGE_SIZE / 2)): diff --git a/tests/test_sqs/test_sqs.py b/tests/test_sqs/test_sqs.py index 32b026a46..b3eaaab75 100644 --- a/tests/test_sqs/test_sqs.py +++ b/tests/test_sqs/test_sqs.py @@ -10,16 +10,15 @@ import requests import sure # noqa import time -from moto import mock_sqs +from moto import mock_sqs, mock_sqs_deprecated from tests.helpers import requires_boto_gte import tests.backport_assert_raises # noqa from nose.tools import assert_raises -sqs = boto3.resource('sqs', region_name='us-east-1') - @mock_sqs def test_create_queue(): + sqs = boto3.resource('sqs', region_name='us-east-1') new_queue = sqs.create_queue(QueueName='test-queue') new_queue.should_not.be.none new_queue.should.have.property('url').should.contain('test-queue') @@ -34,11 +33,13 @@ def test_create_queue(): @mock_sqs def test_get_inexistent_queue(): + sqs = boto3.resource('sqs', region_name='us-east-1') sqs.get_queue_by_name.when.called_with(QueueName='nonexisting-queue').should.throw(botocore.exceptions.ClientError) @mock_sqs def test_message_send(): + sqs = boto3.resource('sqs', region_name='us-east-1') queue = sqs.create_queue(QueueName="blah") msg = queue.send_message(MessageBody="derp") @@ -52,6 +53,8 @@ def test_message_send(): @mock_sqs def test_set_queue_attributes(): + sqs = boto3.resource('sqs', region_name='us-east-1') + conn = boto3.client('sqs', region_name='us-west-1') queue = sqs.create_queue(QueueName="blah") queue.attributes['VisibilityTimeout'].should.equal("30") @@ -90,6 +93,7 @@ def test_get_queue_with_prefix(): @mock_sqs def test_delete_queue(): + sqs = boto3.resource('sqs', region_name='us-east-1') conn = boto3.client("sqs", region_name='us-east-1') conn.create_queue(QueueName="test-queue", Attributes={"VisibilityTimeout": "60"}) queue = sqs.Queue('test-queue') @@ -105,6 +109,7 @@ def test_delete_queue(): @mock_sqs def test_set_queue_attribute(): + sqs = boto3.resource('sqs', region_name='us-east-1') conn = boto3.client("sqs", region_name='us-east-1') conn.create_queue(QueueName="test-queue", Attributes={"VisibilityTimeout": '60'}) @@ -118,6 +123,7 @@ def test_set_queue_attribute(): @mock_sqs def test_send_message(): + sqs = boto3.resource('sqs', region_name='us-east-1') conn = boto3.client("sqs", region_name='us-east-1') conn.create_queue(QueueName="test-queue") queue = sqs.Queue("test-queue") @@ -134,7 +140,7 @@ def test_send_message(): messages[1]['Body'].should.equal(body_two) -@mock_sqs +@mock_sqs_deprecated def test_send_message_with_xml_characters(): conn = boto.connect_sqs('the_key', 'the_secret') queue = conn.create_queue("test-queue", visibility_timeout=60) @@ -150,7 +156,7 @@ def test_send_message_with_xml_characters(): @requires_boto_gte("2.28") -@mock_sqs +@mock_sqs_deprecated def test_send_message_with_attributes(): conn = boto.connect_sqs('the_key', 'the_secret') queue = conn.create_queue("test-queue", visibility_timeout=60) @@ -175,7 +181,7 @@ def test_send_message_with_attributes(): dict(messages[0].message_attributes[name]).should.equal(value) -@mock_sqs +@mock_sqs_deprecated def test_send_message_with_delay(): conn = boto.connect_sqs('the_key', 'the_secret') queue = conn.create_queue("test-queue", visibility_timeout=60) @@ -196,7 +202,7 @@ def test_send_message_with_delay(): queue.count().should.equal(0) -@mock_sqs +@mock_sqs_deprecated def test_send_large_message_fails(): conn = boto.connect_sqs('the_key', 'the_secret') queue = conn.create_queue("test-queue", visibility_timeout=60) @@ -208,7 +214,7 @@ def test_send_large_message_fails(): queue.write.when.called_with(huge_message).should.throw(SQSError) -@mock_sqs +@mock_sqs_deprecated def test_message_becomes_inflight_when_received(): conn = boto.connect_sqs('the_key', 'the_secret') queue = conn.create_queue("test-queue", visibility_timeout=2) @@ -229,7 +235,7 @@ def test_message_becomes_inflight_when_received(): queue.count().should.equal(1) -@mock_sqs +@mock_sqs_deprecated def test_receive_message_with_explicit_visibility_timeout(): conn = boto.connect_sqs('the_key', 'the_secret') queue = conn.create_queue("test-queue", visibility_timeout=60) @@ -246,7 +252,7 @@ def test_receive_message_with_explicit_visibility_timeout(): # Message should remain visible queue.count().should.equal(1) -@mock_sqs +@mock_sqs_deprecated def test_change_message_visibility(): conn = boto.connect_sqs('the_key', 'the_secret') queue = conn.create_queue("test-queue", visibility_timeout=2) @@ -280,7 +286,7 @@ def test_change_message_visibility(): queue.count().should.equal(0) -@mock_sqs +@mock_sqs_deprecated def test_message_attributes(): conn = boto.connect_sqs('the_key', 'the_secret') queue = conn.create_queue("test-queue", visibility_timeout=2) @@ -304,7 +310,7 @@ def test_message_attributes(): assert message_attributes.get('SenderId') -@mock_sqs +@mock_sqs_deprecated def test_read_message_from_queue(): conn = boto.connect_sqs() queue = conn.create_queue('testqueue') @@ -316,7 +322,7 @@ def test_read_message_from_queue(): message.get_body().should.equal(body) -@mock_sqs +@mock_sqs_deprecated def test_queue_length(): conn = boto.connect_sqs('the_key', 'the_secret') queue = conn.create_queue("test-queue", visibility_timeout=60) @@ -327,7 +333,7 @@ def test_queue_length(): queue.count().should.equal(2) -@mock_sqs +@mock_sqs_deprecated def test_delete_message(): conn = boto.connect_sqs('the_key', 'the_secret') queue = conn.create_queue("test-queue", visibility_timeout=60) @@ -348,7 +354,7 @@ def test_delete_message(): queue.count().should.equal(0) -@mock_sqs +@mock_sqs_deprecated def test_send_batch_operation(): conn = boto.connect_sqs('the_key', 'the_secret') queue = conn.create_queue("test-queue", visibility_timeout=60) @@ -370,7 +376,7 @@ def test_send_batch_operation(): @requires_boto_gte("2.28") -@mock_sqs +@mock_sqs_deprecated def test_send_batch_operation_with_message_attributes(): conn = boto.connect_sqs('the_key', 'the_secret') queue = conn.create_queue("test-queue", visibility_timeout=60) @@ -386,7 +392,7 @@ def test_send_batch_operation_with_message_attributes(): dict(messages[0].message_attributes[name]).should.equal(value) -@mock_sqs +@mock_sqs_deprecated def test_delete_batch_operation(): conn = boto.connect_sqs('the_key', 'the_secret') queue = conn.create_queue("test-queue", visibility_timeout=60) @@ -408,7 +414,7 @@ def test_sqs_method_not_implemented(): requests.post.when.called_with("https://sqs.amazonaws.com/?Action=[foobar]").should.throw(NotImplementedError) -@mock_sqs +@mock_sqs_deprecated def test_queue_attributes(): conn = boto.connect_sqs('the_key', 'the_secret') @@ -438,7 +444,7 @@ def test_queue_attributes(): attribute_names.should.contain('QueueArn') -@mock_sqs +@mock_sqs_deprecated def test_change_message_visibility_on_invalid_receipt(): conn = boto.connect_sqs('the_key', 'the_secret') queue = conn.create_queue("test-queue", visibility_timeout=1) @@ -465,7 +471,7 @@ def test_change_message_visibility_on_invalid_receipt(): original_message.change_visibility.when.called_with(100).should.throw(SQSError) -@mock_sqs +@mock_sqs_deprecated def test_change_message_visibility_on_visible_message(): conn = boto.connect_sqs('the_key', 'the_secret') queue = conn.create_queue("test-queue", visibility_timeout=1) @@ -488,7 +494,7 @@ def test_change_message_visibility_on_visible_message(): original_message.change_visibility.when.called_with(100).should.throw(SQSError) -@mock_sqs +@mock_sqs_deprecated def test_purge_action(): conn = boto.sqs.connect_to_region("us-east-1") @@ -501,7 +507,7 @@ def test_purge_action(): queue.count().should.equal(0) -@mock_sqs +@mock_sqs_deprecated def test_delete_message_after_visibility_timeout(): VISIBILITY_TIMEOUT = 1 conn = boto.sqs.connect_to_region("us-east-1") diff --git a/tests/test_sts/test_sts.py b/tests/test_sts/test_sts.py index 9bd02ce12..870f14860 100644 --- a/tests/test_sts/test_sts.py +++ b/tests/test_sts/test_sts.py @@ -6,11 +6,11 @@ import boto3 from freezegun import freeze_time import sure # noqa -from moto import mock_sts +from moto import mock_sts, mock_sts_deprecated @freeze_time("2012-01-01 12:00:00") -@mock_sts +@mock_sts_deprecated def test_get_session_token(): conn = boto.connect_sts() token = conn.get_session_token(duration=123) @@ -22,7 +22,7 @@ def test_get_session_token(): @freeze_time("2012-01-01 12:00:00") -@mock_sts +@mock_sts_deprecated def test_get_federation_token(): conn = boto.connect_sts() token = conn.get_federation_token(duration=123, name="Bob") @@ -36,7 +36,7 @@ def test_get_federation_token(): @freeze_time("2012-01-01 12:00:00") -@mock_sts +@mock_sts_deprecated def test_assume_role(): conn = boto.connect_sts() diff --git a/tests/test_swf/responses/test_activity_tasks.py b/tests/test_swf/responses/test_activity_tasks.py index 31eaeeddd..e6671e9e9 100644 --- a/tests/test_swf/responses/test_activity_tasks.py +++ b/tests/test_swf/responses/test_activity_tasks.py @@ -1,14 +1,14 @@ from boto.swf.exceptions import SWFResponseError from freezegun import freeze_time -from moto import mock_swf +from moto import mock_swf_deprecated from moto.swf import swf_backend from ..utils import setup_workflow, SCHEDULE_ACTIVITY_TASK_DECISION # PollForActivityTask endpoint -@mock_swf +@mock_swf_deprecated def test_poll_for_activity_task_when_one(): conn = setup_workflow() decision_token = conn.poll_for_decision_task("test-domain", "queue")["taskToken"] @@ -26,14 +26,14 @@ def test_poll_for_activity_task_when_one(): ) -@mock_swf +@mock_swf_deprecated def test_poll_for_activity_task_when_none(): conn = setup_workflow() resp = conn.poll_for_activity_task("test-domain", "activity-task-list") resp.should.equal({"startedEventId": 0}) -@mock_swf +@mock_swf_deprecated def test_poll_for_activity_task_on_non_existent_queue(): conn = setup_workflow() resp = conn.poll_for_activity_task("test-domain", "non-existent-queue") @@ -41,7 +41,7 @@ def test_poll_for_activity_task_on_non_existent_queue(): # CountPendingActivityTasks endpoint -@mock_swf +@mock_swf_deprecated def test_count_pending_activity_tasks(): conn = setup_workflow() decision_token = conn.poll_for_decision_task("test-domain", "queue")["taskToken"] @@ -53,7 +53,7 @@ def test_count_pending_activity_tasks(): resp.should.equal({"count": 1, "truncated": False}) -@mock_swf +@mock_swf_deprecated def test_count_pending_decision_tasks_on_non_existent_task_list(): conn = setup_workflow() resp = conn.count_pending_activity_tasks("test-domain", "non-existent") @@ -61,7 +61,7 @@ def test_count_pending_decision_tasks_on_non_existent_task_list(): # RespondActivityTaskCompleted endpoint -@mock_swf +@mock_swf_deprecated def test_respond_activity_task_completed(): conn = setup_workflow() decision_token = conn.poll_for_decision_task("test-domain", "queue")["taskToken"] @@ -80,7 +80,7 @@ def test_respond_activity_task_completed(): ) -@mock_swf +@mock_swf_deprecated def test_respond_activity_task_completed_on_closed_workflow_execution(): conn = setup_workflow() decision_token = conn.poll_for_decision_task("test-domain", "queue")["taskToken"] @@ -99,7 +99,7 @@ def test_respond_activity_task_completed_on_closed_workflow_execution(): ).should.throw(SWFResponseError, "WorkflowExecution=") -@mock_swf +@mock_swf_deprecated def test_respond_activity_task_completed_with_task_already_completed(): conn = setup_workflow() decision_token = conn.poll_for_decision_task("test-domain", "queue")["taskToken"] @@ -116,7 +116,7 @@ def test_respond_activity_task_completed_with_task_already_completed(): # RespondActivityTaskFailed endpoint -@mock_swf +@mock_swf_deprecated def test_respond_activity_task_failed(): conn = setup_workflow() decision_token = conn.poll_for_decision_task("test-domain", "queue")["taskToken"] @@ -138,7 +138,7 @@ def test_respond_activity_task_failed(): ) -@mock_swf +@mock_swf_deprecated def test_respond_activity_task_completed_with_wrong_token(): # NB: we just test ONE failure case for RespondActivityTaskFailed # because the safeguards are shared with RespondActivityTaskCompleted, so @@ -155,7 +155,7 @@ def test_respond_activity_task_completed_with_wrong_token(): # RecordActivityTaskHeartbeat endpoint -@mock_swf +@mock_swf_deprecated def test_record_activity_task_heartbeat(): conn = setup_workflow() decision_token = conn.poll_for_decision_task("test-domain", "queue")["taskToken"] @@ -168,7 +168,7 @@ def test_record_activity_task_heartbeat(): resp.should.equal({"cancelRequested": False}) -@mock_swf +@mock_swf_deprecated def test_record_activity_task_heartbeat_with_wrong_token(): conn = setup_workflow() decision_token = conn.poll_for_decision_task("test-domain", "queue")["taskToken"] @@ -182,7 +182,7 @@ def test_record_activity_task_heartbeat_with_wrong_token(): ).should.throw(SWFResponseError) -@mock_swf +@mock_swf_deprecated def test_record_activity_task_heartbeat_sets_details_in_case_of_timeout(): conn = setup_workflow() decision_token = conn.poll_for_decision_task("test-domain", "queue")["taskToken"] diff --git a/tests/test_swf/responses/test_activity_types.py b/tests/test_swf/responses/test_activity_types.py index 872cd7f64..20c44dc5f 100644 --- a/tests/test_swf/responses/test_activity_types.py +++ b/tests/test_swf/responses/test_activity_types.py @@ -1,11 +1,11 @@ import boto from boto.swf.exceptions import SWFResponseError -from moto import mock_swf +from moto import mock_swf_deprecated # RegisterActivityType endpoint -@mock_swf +@mock_swf_deprecated def test_register_activity_type(): conn = boto.connect_swf("the_key", "the_secret") conn.register_domain("test-domain", "60") @@ -17,7 +17,7 @@ def test_register_activity_type(): actype["activityType"]["version"].should.equal("v1.0") -@mock_swf +@mock_swf_deprecated def test_register_already_existing_activity_type(): conn = boto.connect_swf("the_key", "the_secret") conn.register_domain("test-domain", "60") @@ -28,7 +28,7 @@ def test_register_already_existing_activity_type(): ).should.throw(SWFResponseError) -@mock_swf +@mock_swf_deprecated def test_register_with_wrong_parameter_type(): conn = boto.connect_swf("the_key", "the_secret") conn.register_domain("test-domain", "60") @@ -39,7 +39,7 @@ def test_register_with_wrong_parameter_type(): # ListActivityTypes endpoint -@mock_swf +@mock_swf_deprecated def test_list_activity_types(): conn = boto.connect_swf("the_key", "the_secret") conn.register_domain("test-domain", "60") @@ -52,7 +52,7 @@ def test_list_activity_types(): names.should.equal(["a-test-activity", "b-test-activity", "c-test-activity"]) -@mock_swf +@mock_swf_deprecated def test_list_activity_types_reverse_order(): conn = boto.connect_swf("the_key", "the_secret") conn.register_domain("test-domain", "60") @@ -67,7 +67,7 @@ def test_list_activity_types_reverse_order(): # DeprecateActivityType endpoint -@mock_swf +@mock_swf_deprecated def test_deprecate_activity_type(): conn = boto.connect_swf("the_key", "the_secret") conn.register_domain("test-domain", "60") @@ -80,7 +80,7 @@ def test_deprecate_activity_type(): actype["activityType"]["version"].should.equal("v1.0") -@mock_swf +@mock_swf_deprecated def test_deprecate_already_deprecated_activity_type(): conn = boto.connect_swf("the_key", "the_secret") conn.register_domain("test-domain", "60") @@ -92,7 +92,7 @@ def test_deprecate_already_deprecated_activity_type(): ).should.throw(SWFResponseError) -@mock_swf +@mock_swf_deprecated def test_deprecate_non_existent_activity_type(): conn = boto.connect_swf("the_key", "the_secret") conn.register_domain("test-domain", "60") @@ -103,7 +103,7 @@ def test_deprecate_non_existent_activity_type(): # DescribeActivityType endpoint -@mock_swf +@mock_swf_deprecated def test_describe_activity_type(): conn = boto.connect_swf("the_key", "the_secret") conn.register_domain("test-domain", "60") @@ -118,7 +118,7 @@ def test_describe_activity_type(): infos["status"].should.equal("REGISTERED") -@mock_swf +@mock_swf_deprecated def test_describe_non_existent_activity_type(): conn = boto.connect_swf("the_key", "the_secret") conn.register_domain("test-domain", "60") diff --git a/tests/test_swf/responses/test_decision_tasks.py b/tests/test_swf/responses/test_decision_tasks.py index b16a6441a..b552723cb 100644 --- a/tests/test_swf/responses/test_decision_tasks.py +++ b/tests/test_swf/responses/test_decision_tasks.py @@ -1,14 +1,14 @@ from boto.swf.exceptions import SWFResponseError from freezegun import freeze_time -from moto import mock_swf +from moto import mock_swf_deprecated from moto.swf import swf_backend from ..utils import setup_workflow # PollForDecisionTask endpoint -@mock_swf +@mock_swf_deprecated def test_poll_for_decision_task_when_one(): conn = setup_workflow() @@ -23,7 +23,7 @@ def test_poll_for_decision_task_when_one(): resp["events"][-1]["decisionTaskStartedEventAttributes"]["identity"].should.equal("srv01") -@mock_swf +@mock_swf_deprecated def test_poll_for_decision_task_when_none(): conn = setup_workflow() conn.poll_for_decision_task("test-domain", "queue") @@ -34,14 +34,14 @@ def test_poll_for_decision_task_when_none(): resp.should.equal({"previousStartedEventId": 0, "startedEventId": 0}) -@mock_swf +@mock_swf_deprecated def test_poll_for_decision_task_on_non_existent_queue(): conn = setup_workflow() resp = conn.poll_for_decision_task("test-domain", "non-existent-queue") resp.should.equal({"previousStartedEventId": 0, "startedEventId": 0}) -@mock_swf +@mock_swf_deprecated def test_poll_for_decision_task_with_reverse_order(): conn = setup_workflow() resp = conn.poll_for_decision_task("test-domain", "queue", reverse_order=True) @@ -50,7 +50,7 @@ def test_poll_for_decision_task_with_reverse_order(): # CountPendingDecisionTasks endpoint -@mock_swf +@mock_swf_deprecated def test_count_pending_decision_tasks(): conn = setup_workflow() conn.poll_for_decision_task("test-domain", "queue") @@ -58,14 +58,14 @@ def test_count_pending_decision_tasks(): resp.should.equal({"count": 1, "truncated": False}) -@mock_swf +@mock_swf_deprecated def test_count_pending_decision_tasks_on_non_existent_task_list(): conn = setup_workflow() resp = conn.count_pending_decision_tasks("test-domain", "non-existent") resp.should.equal({"count": 0, "truncated": False}) -@mock_swf +@mock_swf_deprecated def test_count_pending_decision_tasks_after_decision_completes(): conn = setup_workflow() resp = conn.poll_for_decision_task("test-domain", "queue") @@ -76,7 +76,7 @@ def test_count_pending_decision_tasks_after_decision_completes(): # RespondDecisionTaskCompleted endpoint -@mock_swf +@mock_swf_deprecated def test_respond_decision_task_completed_with_no_decision(): conn = setup_workflow() @@ -108,7 +108,7 @@ def test_respond_decision_task_completed_with_no_decision(): resp["latestExecutionContext"].should.equal("free-form context") -@mock_swf +@mock_swf_deprecated def test_respond_decision_task_completed_with_wrong_token(): conn = setup_workflow() conn.poll_for_decision_task("test-domain", "queue") @@ -117,7 +117,7 @@ def test_respond_decision_task_completed_with_wrong_token(): ).should.throw(SWFResponseError) -@mock_swf +@mock_swf_deprecated def test_respond_decision_task_completed_on_close_workflow_execution(): conn = setup_workflow() resp = conn.poll_for_decision_task("test-domain", "queue") @@ -133,7 +133,7 @@ def test_respond_decision_task_completed_on_close_workflow_execution(): ).should.throw(SWFResponseError) -@mock_swf +@mock_swf_deprecated def test_respond_decision_task_completed_with_task_already_completed(): conn = setup_workflow() resp = conn.poll_for_decision_task("test-domain", "queue") @@ -145,7 +145,7 @@ def test_respond_decision_task_completed_with_task_already_completed(): ).should.throw(SWFResponseError) -@mock_swf +@mock_swf_deprecated def test_respond_decision_task_completed_with_complete_workflow_execution(): conn = setup_workflow() resp = conn.poll_for_decision_task("test-domain", "queue") @@ -170,7 +170,7 @@ def test_respond_decision_task_completed_with_complete_workflow_execution(): resp["events"][-1]["workflowExecutionCompletedEventAttributes"]["result"].should.equal("foo bar") -@mock_swf +@mock_swf_deprecated def test_respond_decision_task_completed_with_close_decision_not_last(): conn = setup_workflow() resp = conn.poll_for_decision_task("test-domain", "queue") @@ -186,7 +186,7 @@ def test_respond_decision_task_completed_with_close_decision_not_last(): ).should.throw(SWFResponseError, r"Close must be last decision in list") -@mock_swf +@mock_swf_deprecated def test_respond_decision_task_completed_with_invalid_decision_type(): conn = setup_workflow() resp = conn.poll_for_decision_task("test-domain", "queue") @@ -204,7 +204,7 @@ def test_respond_decision_task_completed_with_invalid_decision_type(): ) -@mock_swf +@mock_swf_deprecated def test_respond_decision_task_completed_with_missing_attributes(): conn = setup_workflow() resp = conn.poll_for_decision_task("test-domain", "queue") @@ -226,7 +226,7 @@ def test_respond_decision_task_completed_with_missing_attributes(): ) -@mock_swf +@mock_swf_deprecated def test_respond_decision_task_completed_with_missing_attributes_totally(): conn = setup_workflow() resp = conn.poll_for_decision_task("test-domain", "queue") @@ -245,7 +245,7 @@ def test_respond_decision_task_completed_with_missing_attributes_totally(): ) -@mock_swf +@mock_swf_deprecated def test_respond_decision_task_completed_with_fail_workflow_execution(): conn = setup_workflow() resp = conn.poll_for_decision_task("test-domain", "queue") @@ -272,7 +272,7 @@ def test_respond_decision_task_completed_with_fail_workflow_execution(): attrs["details"].should.equal("foo") -@mock_swf +@mock_swf_deprecated @freeze_time("2015-01-01 12:00:00") def test_respond_decision_task_completed_with_schedule_activity_task(): conn = setup_workflow() diff --git a/tests/test_swf/responses/test_domains.py b/tests/test_swf/responses/test_domains.py index fc89ea752..1f785095c 100644 --- a/tests/test_swf/responses/test_domains.py +++ b/tests/test_swf/responses/test_domains.py @@ -1,11 +1,11 @@ import boto from boto.swf.exceptions import SWFResponseError -from moto import mock_swf +from moto import mock_swf_deprecated # RegisterDomain endpoint -@mock_swf +@mock_swf_deprecated def test_register_domain(): conn = boto.connect_swf("the_key", "the_secret") conn.register_domain("test-domain", "60", description="A test domain") @@ -18,7 +18,7 @@ def test_register_domain(): domain["description"].should.equal("A test domain") -@mock_swf +@mock_swf_deprecated def test_register_already_existing_domain(): conn = boto.connect_swf("the_key", "the_secret") conn.register_domain("test-domain", "60", description="A test domain") @@ -28,7 +28,7 @@ def test_register_already_existing_domain(): ).should.throw(SWFResponseError) -@mock_swf +@mock_swf_deprecated def test_register_with_wrong_parameter_type(): conn = boto.connect_swf("the_key", "the_secret") @@ -38,7 +38,7 @@ def test_register_with_wrong_parameter_type(): # ListDomains endpoint -@mock_swf +@mock_swf_deprecated def test_list_domains_order(): conn = boto.connect_swf("the_key", "the_secret") conn.register_domain("b-test-domain", "60") @@ -50,7 +50,7 @@ def test_list_domains_order(): names.should.equal(["a-test-domain", "b-test-domain", "c-test-domain"]) -@mock_swf +@mock_swf_deprecated def test_list_domains_reverse_order(): conn = boto.connect_swf("the_key", "the_secret") conn.register_domain("b-test-domain", "60") @@ -63,7 +63,7 @@ def test_list_domains_reverse_order(): # DeprecateDomain endpoint -@mock_swf +@mock_swf_deprecated def test_deprecate_domain(): conn = boto.connect_swf("the_key", "the_secret") conn.register_domain("test-domain", "60", description="A test domain") @@ -75,7 +75,7 @@ def test_deprecate_domain(): domain["name"].should.equal("test-domain") -@mock_swf +@mock_swf_deprecated def test_deprecate_already_deprecated_domain(): conn = boto.connect_swf("the_key", "the_secret") conn.register_domain("test-domain", "60", description="A test domain") @@ -86,7 +86,7 @@ def test_deprecate_already_deprecated_domain(): ).should.throw(SWFResponseError) -@mock_swf +@mock_swf_deprecated def test_deprecate_non_existent_domain(): conn = boto.connect_swf("the_key", "the_secret") @@ -96,7 +96,7 @@ def test_deprecate_non_existent_domain(): # DescribeDomain endpoint -@mock_swf +@mock_swf_deprecated def test_describe_domain(): conn = boto.connect_swf("the_key", "the_secret") conn.register_domain("test-domain", "60", description="A test domain") @@ -108,7 +108,7 @@ def test_describe_domain(): domain["domainInfo"]["status"].should.equal("REGISTERED") -@mock_swf +@mock_swf_deprecated def test_describe_non_existent_domain(): conn = boto.connect_swf("the_key", "the_secret") diff --git a/tests/test_swf/responses/test_timeouts.py b/tests/test_swf/responses/test_timeouts.py index afa130c21..726410e76 100644 --- a/tests/test_swf/responses/test_timeouts.py +++ b/tests/test_swf/responses/test_timeouts.py @@ -1,13 +1,13 @@ from freezegun import freeze_time -from moto import mock_swf +from moto import mock_swf_deprecated from ..utils import setup_workflow, SCHEDULE_ACTIVITY_TASK_DECISION # Activity Task Heartbeat timeout # Default value in workflow helpers: 5 mins -@mock_swf +@mock_swf_deprecated def test_activity_task_heartbeat_timeout(): with freeze_time("2015-01-01 12:00:00"): conn = setup_workflow() @@ -36,7 +36,7 @@ def test_activity_task_heartbeat_timeout(): # Decision Task Start to Close timeout # Default value in workflow helpers: 5 mins -@mock_swf +@mock_swf_deprecated def test_decision_task_start_to_close_timeout(): pass with freeze_time("2015-01-01 12:00:00"): @@ -70,7 +70,7 @@ def test_decision_task_start_to_close_timeout(): # Workflow Execution Start to Close timeout # Default value in workflow helpers: 2 hours -@mock_swf +@mock_swf_deprecated def test_workflow_execution_start_to_close_timeout(): pass with freeze_time("2015-01-01 12:00:00"): diff --git a/tests/test_swf/responses/test_workflow_executions.py b/tests/test_swf/responses/test_workflow_executions.py index f4a949687..d5dc44a38 100644 --- a/tests/test_swf/responses/test_workflow_executions.py +++ b/tests/test_swf/responses/test_workflow_executions.py @@ -6,12 +6,12 @@ import sure # noqa # Ensure 'assert_raises' context manager support for Python 2.6 import tests.backport_assert_raises # noqa -from moto import mock_swf +from moto import mock_swf_deprecated from moto.core.utils import unix_time # Utils -@mock_swf +@mock_swf_deprecated def setup_swf_environment(): conn = boto.connect_swf("the_key", "the_secret") conn.register_domain("test-domain", "60", description="A test domain") @@ -26,7 +26,7 @@ def setup_swf_environment(): # StartWorkflowExecution endpoint -@mock_swf +@mock_swf_deprecated def test_start_workflow_execution(): conn = setup_swf_environment() @@ -34,7 +34,7 @@ def test_start_workflow_execution(): wf.should.contain("runId") -@mock_swf +@mock_swf_deprecated def test_start_already_started_workflow_execution(): conn = setup_swf_environment() conn.start_workflow_execution("test-domain", "uid-abcd1234", "test-workflow", "v1.0") @@ -44,7 +44,7 @@ def test_start_already_started_workflow_execution(): ).should.throw(SWFResponseError) -@mock_swf +@mock_swf_deprecated def test_start_workflow_execution_on_deprecated_type(): conn = setup_swf_environment() conn.deprecate_workflow_type("test-domain", "test-workflow", "v1.0") @@ -55,7 +55,7 @@ def test_start_workflow_execution_on_deprecated_type(): # DescribeWorkflowExecution endpoint -@mock_swf +@mock_swf_deprecated def test_describe_workflow_execution(): conn = setup_swf_environment() hsh = conn.start_workflow_execution("test-domain", "uid-abcd1234", "test-workflow", "v1.0") @@ -66,7 +66,7 @@ def test_describe_workflow_execution(): wfe["executionInfo"]["executionStatus"].should.equal("OPEN") -@mock_swf +@mock_swf_deprecated def test_describe_non_existent_workflow_execution(): conn = setup_swf_environment() @@ -76,7 +76,7 @@ def test_describe_non_existent_workflow_execution(): # GetWorkflowExecutionHistory endpoint -@mock_swf +@mock_swf_deprecated def test_get_workflow_execution_history(): conn = setup_swf_environment() hsh = conn.start_workflow_execution("test-domain", "uid-abcd1234", "test-workflow", "v1.0") @@ -87,7 +87,7 @@ def test_get_workflow_execution_history(): types.should.equal(["WorkflowExecutionStarted", "DecisionTaskScheduled"]) -@mock_swf +@mock_swf_deprecated def test_get_workflow_execution_history_with_reverse_order(): conn = setup_swf_environment() hsh = conn.start_workflow_execution("test-domain", "uid-abcd1234", "test-workflow", "v1.0") @@ -99,7 +99,7 @@ def test_get_workflow_execution_history_with_reverse_order(): types.should.equal(["DecisionTaskScheduled", "WorkflowExecutionStarted"]) -@mock_swf +@mock_swf_deprecated def test_get_workflow_execution_history_on_non_existent_workflow_execution(): conn = setup_swf_environment() @@ -109,7 +109,7 @@ def test_get_workflow_execution_history_on_non_existent_workflow_execution(): # ListOpenWorkflowExecutions endpoint -@mock_swf +@mock_swf_deprecated def test_list_open_workflow_executions(): conn = setup_swf_environment() # One open workflow execution @@ -143,7 +143,7 @@ def test_list_open_workflow_executions(): # ListClosedWorkflowExecutions endpoint -@mock_swf +@mock_swf_deprecated def test_list_closed_workflow_executions(): conn = setup_swf_environment() # Leave one workflow execution open to make sure it isn't displayed @@ -178,7 +178,7 @@ def test_list_closed_workflow_executions(): # TerminateWorkflowExecution endpoint -@mock_swf +@mock_swf_deprecated def test_terminate_workflow_execution(): conn = setup_swf_environment() run_id = conn.start_workflow_execution( @@ -200,7 +200,7 @@ def test_terminate_workflow_execution(): attrs["cause"].should.equal("OPERATOR_INITIATED") -@mock_swf +@mock_swf_deprecated def test_terminate_workflow_execution_with_wrong_workflow_or_run_id(): conn = setup_swf_environment() run_id = conn.start_workflow_execution( diff --git a/tests/test_swf/responses/test_workflow_types.py b/tests/test_swf/responses/test_workflow_types.py index 04521ff6e..1e838c2ee 100644 --- a/tests/test_swf/responses/test_workflow_types.py +++ b/tests/test_swf/responses/test_workflow_types.py @@ -1,11 +1,12 @@ +import sure import boto -from moto import mock_swf +from moto import mock_swf_deprecated from boto.swf.exceptions import SWFResponseError # RegisterWorkflowType endpoint -@mock_swf +@mock_swf_deprecated def test_register_workflow_type(): conn = boto.connect_swf("the_key", "the_secret") conn.register_domain("test-domain", "60") @@ -17,7 +18,7 @@ def test_register_workflow_type(): actype["workflowType"]["version"].should.equal("v1.0") -@mock_swf +@mock_swf_deprecated def test_register_already_existing_workflow_type(): conn = boto.connect_swf("the_key", "the_secret") conn.register_domain("test-domain", "60") @@ -28,7 +29,7 @@ def test_register_already_existing_workflow_type(): ).should.throw(SWFResponseError) -@mock_swf +@mock_swf_deprecated def test_register_with_wrong_parameter_type(): conn = boto.connect_swf("the_key", "the_secret") conn.register_domain("test-domain", "60") @@ -39,7 +40,7 @@ def test_register_with_wrong_parameter_type(): # ListWorkflowTypes endpoint -@mock_swf +@mock_swf_deprecated def test_list_workflow_types(): conn = boto.connect_swf("the_key", "the_secret") conn.register_domain("test-domain", "60") @@ -52,7 +53,7 @@ def test_list_workflow_types(): names.should.equal(["a-test-workflow", "b-test-workflow", "c-test-workflow"]) -@mock_swf +@mock_swf_deprecated def test_list_workflow_types_reverse_order(): conn = boto.connect_swf("the_key", "the_secret") conn.register_domain("test-domain", "60") @@ -67,7 +68,7 @@ def test_list_workflow_types_reverse_order(): # DeprecateWorkflowType endpoint -@mock_swf +@mock_swf_deprecated def test_deprecate_workflow_type(): conn = boto.connect_swf("the_key", "the_secret") conn.register_domain("test-domain", "60") @@ -80,7 +81,7 @@ def test_deprecate_workflow_type(): actype["workflowType"]["version"].should.equal("v1.0") -@mock_swf +@mock_swf_deprecated def test_deprecate_already_deprecated_workflow_type(): conn = boto.connect_swf("the_key", "the_secret") conn.register_domain("test-domain", "60") @@ -92,7 +93,7 @@ def test_deprecate_already_deprecated_workflow_type(): ).should.throw(SWFResponseError) -@mock_swf +@mock_swf_deprecated def test_deprecate_non_existent_workflow_type(): conn = boto.connect_swf("the_key", "the_secret") conn.register_domain("test-domain", "60") @@ -103,7 +104,7 @@ def test_deprecate_non_existent_workflow_type(): # DescribeWorkflowType endpoint -@mock_swf +@mock_swf_deprecated def test_describe_workflow_type(): conn = boto.connect_swf("the_key", "the_secret") conn.register_domain("test-domain", "60") @@ -120,7 +121,7 @@ def test_describe_workflow_type(): infos["status"].should.equal("REGISTERED") -@mock_swf +@mock_swf_deprecated def test_describe_non_existent_workflow_type(): conn = boto.connect_swf("the_key", "the_secret") conn.register_domain("test-domain", "60") From 678f73389fd8ce7d1383620ff1e3587f80c6662f Mon Sep 17 00:00:00 2001 From: Steve Pulec Date: Wed, 15 Feb 2017 22:45:20 -0500 Subject: [PATCH 045/213] Fix package being submodule. --- moto/packages/responses | 1 - moto/packages/responses/.gitignore | 12 + moto/packages/responses/.travis.yml | 27 ++ moto/packages/responses/CHANGES | 32 ++ moto/packages/responses/LICENSE | 201 ++++++++++ moto/packages/responses/MANIFEST.in | 2 + moto/packages/responses/Makefile | 16 + moto/packages/responses/README.rst | 190 ++++++++++ moto/packages/responses/__init__.py | 0 moto/packages/responses/responses.py | 321 ++++++++++++++++ moto/packages/responses/setup.cfg | 5 + moto/packages/responses/setup.py | 98 +++++ moto/packages/responses/test_responses.py | 443 ++++++++++++++++++++++ moto/packages/responses/tox.ini | 11 + 14 files changed, 1358 insertions(+), 1 deletion(-) delete mode 160000 moto/packages/responses create mode 100644 moto/packages/responses/.gitignore create mode 100644 moto/packages/responses/.travis.yml create mode 100644 moto/packages/responses/CHANGES create mode 100644 moto/packages/responses/LICENSE create mode 100644 moto/packages/responses/MANIFEST.in create mode 100644 moto/packages/responses/Makefile create mode 100644 moto/packages/responses/README.rst create mode 100644 moto/packages/responses/__init__.py create mode 100644 moto/packages/responses/responses.py create mode 100644 moto/packages/responses/setup.cfg create mode 100644 moto/packages/responses/setup.py create mode 100644 moto/packages/responses/test_responses.py create mode 100644 moto/packages/responses/tox.ini diff --git a/moto/packages/responses b/moto/packages/responses deleted file mode 160000 index 8d500447e..000000000 --- a/moto/packages/responses +++ /dev/null @@ -1 +0,0 @@ -Subproject commit 8d500447e3d5c2b96ace2eb7ab0f60158e921ed8 diff --git a/moto/packages/responses/.gitignore b/moto/packages/responses/.gitignore new file mode 100644 index 000000000..5d4406b8d --- /dev/null +++ b/moto/packages/responses/.gitignore @@ -0,0 +1,12 @@ +.arcconfig +.coverage +.DS_Store +.idea +*.db +*.egg-info +*.pyc +/htmlcov +/dist +/build +/.cache +/.tox diff --git a/moto/packages/responses/.travis.yml b/moto/packages/responses/.travis.yml new file mode 100644 index 000000000..9ab219db0 --- /dev/null +++ b/moto/packages/responses/.travis.yml @@ -0,0 +1,27 @@ +language: python +sudo: false +python: + - "2.6" + - "2.7" + - "3.3" + - "3.4" + - "3.5" +cache: + directories: + - .pip_download_cache +env: + matrix: + - REQUESTS=requests==2.0 + - REQUESTS=-U requests + - REQUESTS="-e git+git://github.com/kennethreitz/requests.git#egg=requests" + global: + - PIP_DOWNLOAD_CACHE=".pip_download_cache" +matrix: + allow_failures: + - env: 'REQUESTS="-e git+git://github.com/kennethreitz/requests.git#egg=requests"' +install: + - "pip install ${REQUESTS}" + - make develop +script: + - if [[ $TRAVIS_PYTHON_VERSION != 2.6 ]]; then make lint; fi + - py.test . --cov responses --cov-report term-missing diff --git a/moto/packages/responses/CHANGES b/moto/packages/responses/CHANGES new file mode 100644 index 000000000..1bfd7ead8 --- /dev/null +++ b/moto/packages/responses/CHANGES @@ -0,0 +1,32 @@ +Unreleased +---------- + +- Allow empty list/dict as json object (GH-100) + +0.5.1 +----- + +- Add LICENSE, README and CHANGES to the PyPI distribution (GH-97). + +0.5.0 +----- + +- Allow passing a JSON body to `response.add` (GH-82) +- Improve ConnectionError emulation (GH-73) +- Correct assertion in assert_all_requests_are_fired (GH-71) + +0.4.0 +----- + +- Requests 2.0+ is required +- Mocking now happens on the adapter instead of the session + +0.3.0 +----- + +- Add the ability to mock errors (GH-22) +- Add responses.mock context manager (GH-36) +- Support custom adapters (GH-33) +- Add support for regexp error matching (GH-25) +- Add support for dynamic bodies via `responses.add_callback` (GH-24) +- Preserve argspec when using `responses.activate` decorator (GH-18) diff --git a/moto/packages/responses/LICENSE b/moto/packages/responses/LICENSE new file mode 100644 index 000000000..52b44b20a --- /dev/null +++ b/moto/packages/responses/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + +TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + +1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + +2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + +3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + +4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + +5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + +6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + +7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + +8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + +9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + +END OF TERMS AND CONDITIONS + +APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + +Copyright 2015 David Cramer + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. diff --git a/moto/packages/responses/MANIFEST.in b/moto/packages/responses/MANIFEST.in new file mode 100644 index 000000000..ef901684c --- /dev/null +++ b/moto/packages/responses/MANIFEST.in @@ -0,0 +1,2 @@ +include README.rst CHANGES LICENSE +global-exclude *~ diff --git a/moto/packages/responses/Makefile b/moto/packages/responses/Makefile new file mode 100644 index 000000000..9da42c6d1 --- /dev/null +++ b/moto/packages/responses/Makefile @@ -0,0 +1,16 @@ +develop: + pip install -e . + make install-test-requirements + +install-test-requirements: + pip install "file://`pwd`#egg=responses[tests]" + +test: develop lint + @echo "Running Python tests" + py.test . + @echo "" + +lint: + @echo "Linting Python files" + PYFLAKES_NODOCTEST=1 flake8 . + @echo "" diff --git a/moto/packages/responses/README.rst b/moto/packages/responses/README.rst new file mode 100644 index 000000000..5f946fcde --- /dev/null +++ b/moto/packages/responses/README.rst @@ -0,0 +1,190 @@ +Responses +========= + +.. image:: https://travis-ci.org/getsentry/responses.svg?branch=master + :target: https://travis-ci.org/getsentry/responses + +A utility library for mocking out the `requests` Python library. + +.. note:: Responses requires Requests >= 2.0 + +Response body as string +----------------------- + +.. code-block:: python + + import responses + import requests + + @responses.activate + def test_my_api(): + responses.add(responses.GET, 'http://twitter.com/api/1/foobar', + body='{"error": "not found"}', status=404, + content_type='application/json') + + resp = requests.get('http://twitter.com/api/1/foobar') + + assert resp.json() == {"error": "not found"} + + assert len(responses.calls) == 1 + assert responses.calls[0].request.url == 'http://twitter.com/api/1/foobar' + assert responses.calls[0].response.text == '{"error": "not found"}' + +You can also specify a JSON object instead of a body string. + +.. code-block:: python + + import responses + import requests + + @responses.activate + def test_my_api(): + responses.add(responses.GET, 'http://twitter.com/api/1/foobar', + json={"error": "not found"}, status=404) + + resp = requests.get('http://twitter.com/api/1/foobar') + + assert resp.json() == {"error": "not found"} + + assert len(responses.calls) == 1 + assert responses.calls[0].request.url == 'http://twitter.com/api/1/foobar' + assert responses.calls[0].response.text == '{"error": "not found"}' + +Request callback +---------------- + +.. code-block:: python + + import json + + import responses + import requests + + @responses.activate + def test_calc_api(): + + def request_callback(request): + payload = json.loads(request.body) + resp_body = {'value': sum(payload['numbers'])} + headers = {'request-id': '728d329e-0e86-11e4-a748-0c84dc037c13'} + return (200, headers, json.dumps(resp_body)) + + responses.add_callback( + responses.POST, 'http://calc.com/sum', + callback=request_callback, + content_type='application/json', + ) + + resp = requests.post( + 'http://calc.com/sum', + json.dumps({'numbers': [1, 2, 3]}), + headers={'content-type': 'application/json'}, + ) + + assert resp.json() == {'value': 6} + + assert len(responses.calls) == 1 + assert responses.calls[0].request.url == 'http://calc.com/sum' + assert responses.calls[0].response.text == '{"value": 6}' + assert ( + responses.calls[0].response.headers['request-id'] == + '728d329e-0e86-11e4-a748-0c84dc037c13' + ) + +Instead of passing a string URL into `responses.add` or `responses.add_callback` +you can also supply a compiled regular expression. + +.. code-block:: python + + import re + import responses + import requests + + # Instead of + responses.add(responses.GET, 'http://twitter.com/api/1/foobar', + body='{"error": "not found"}', status=404, + content_type='application/json') + + # You can do the following + url_re = re.compile(r'https?://twitter\.com/api/\d+/foobar') + responses.add(responses.GET, url_re, + body='{"error": "not found"}', status=404, + content_type='application/json') + +A response can also throw an exception as follows. + +.. code-block:: python + + import responses + import requests + from requests.exceptions import HTTPError + + exception = HTTPError('Something went wrong') + responses.add(responses.GET, 'http://twitter.com/api/1/foobar', + body=exception) + # All calls to 'http://twitter.com/api/1/foobar' will throw exception. + + +Responses as a context manager +------------------------------ + +.. code-block:: python + + import responses + import requests + + + def test_my_api(): + with responses.RequestsMock() as rsps: + rsps.add(responses.GET, 'http://twitter.com/api/1/foobar', + body='{}', status=200, + content_type='application/json') + resp = requests.get('http://twitter.com/api/1/foobar') + + assert resp.status_code == 200 + + # outside the context manager requests will hit the remote server + resp = requests.get('http://twitter.com/api/1/foobar') + resp.status_code == 404 + + +Assertions on declared responses +-------------------------------- + +When used as a context manager, Responses will, by default, raise an assertion +error if a url was registered but not accessed. This can be disabled by passing +the ``assert_all_requests_are_fired`` value: + +.. code-block:: python + + import responses + import requests + + + def test_my_api(): + with responses.RequestsMock(assert_all_requests_are_fired=False) as rsps: + rsps.add(responses.GET, 'http://twitter.com/api/1/foobar', + body='{}', status=200, + content_type='application/json') + +Multiple Responses +------------------ +You can also use ``assert_all_requests_are_fired`` to add multiple responses for the same url: + +.. code-block:: python + + import responses + import requests + + + def test_my_api(): + with responses.RequestsMock(assert_all_requests_are_fired=True) as rsps: + rsps.add(responses.GET, 'http://twitter.com/api/1/foobar', status=500) + rsps.add(responses.GET, 'http://twitter.com/api/1/foobar', + body='{}', status=200, + content_type='application/json') + + resp = requests.get('http://twitter.com/api/1/foobar') + assert resp.status_code == 500 + resp = requests.get('http://twitter.com/api/1/foobar') + assert resp.status_code == 200 diff --git a/moto/packages/responses/__init__.py b/moto/packages/responses/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/moto/packages/responses/responses.py b/moto/packages/responses/responses.py new file mode 100644 index 000000000..735655664 --- /dev/null +++ b/moto/packages/responses/responses.py @@ -0,0 +1,321 @@ +from __future__ import ( + absolute_import, print_function, division, unicode_literals +) + +import inspect +import json as json_module +import re +import six + +from collections import namedtuple, Sequence, Sized +from functools import update_wrapper +from cookies import Cookies +from requests.utils import cookiejar_from_dict +from requests.exceptions import ConnectionError +from requests.sessions import REDIRECT_STATI + +try: + from requests.packages.urllib3.response import HTTPResponse +except ImportError: + from urllib3.response import HTTPResponse + +if six.PY2: + from urlparse import urlparse, parse_qsl +else: + from urllib.parse import urlparse, parse_qsl + +if six.PY2: + try: + from six import cStringIO as BufferIO + except ImportError: + from six import StringIO as BufferIO +else: + from io import BytesIO as BufferIO + + +Call = namedtuple('Call', ['request', 'response']) + +_wrapper_template = """\ +def wrapper%(signature)s: + with responses: + return func%(funcargs)s +""" + + +def _is_string(s): + return isinstance(s, (six.string_types, six.text_type)) + + +def _is_redirect(response): + try: + # 2.0.0 <= requests <= 2.2 + return response.is_redirect + except AttributeError: + # requests > 2.2 + return ( + # use request.sessions conditional + response.status_code in REDIRECT_STATI and + 'location' in response.headers + ) + + +def get_wrapped(func, wrapper_template, evaldict): + # Preserve the argspec for the wrapped function so that testing + # tools such as pytest can continue to use their fixture injection. + args, a, kw, defaults = inspect.getargspec(func) + + signature = inspect.formatargspec(args, a, kw, defaults) + is_bound_method = hasattr(func, '__self__') + if is_bound_method: + args = args[1:] # Omit 'self' + callargs = inspect.formatargspec(args, a, kw, None) + + ctx = {'signature': signature, 'funcargs': callargs} + six.exec_(wrapper_template % ctx, evaldict) + + wrapper = evaldict['wrapper'] + + update_wrapper(wrapper, func) + if is_bound_method: + wrapper = wrapper.__get__(func.__self__, type(func.__self__)) + return wrapper + + +class CallList(Sequence, Sized): + def __init__(self): + self._calls = [] + + def __iter__(self): + return iter(self._calls) + + def __len__(self): + return len(self._calls) + + def __getitem__(self, idx): + return self._calls[idx] + + def add(self, request, response): + self._calls.append(Call(request, response)) + + def reset(self): + self._calls = [] + + +def _ensure_url_default_path(url, match_querystring): + if _is_string(url) and url.count('/') == 2: + if match_querystring: + return url.replace('?', '/?', 1) + else: + return url + '/' + return url + + +class RequestsMock(object): + DELETE = 'DELETE' + GET = 'GET' + HEAD = 'HEAD' + OPTIONS = 'OPTIONS' + PATCH = 'PATCH' + POST = 'POST' + PUT = 'PUT' + + def __init__(self, assert_all_requests_are_fired=True): + self._calls = CallList() + self.reset() + self.assert_all_requests_are_fired = assert_all_requests_are_fired + + def reset(self): + self._urls = [] + self._calls.reset() + + def add(self, method, url, body='', match_querystring=False, + status=200, adding_headers=None, stream=False, + content_type='text/plain', json=None): + + # if we were passed a `json` argument, + # override the body and content_type + if json is not None: + body = json_module.dumps(json) + content_type = 'application/json' + + # ensure the url has a default path set if the url is a string + url = _ensure_url_default_path(url, match_querystring) + + # body must be bytes + if isinstance(body, six.text_type): + body = body.encode('utf-8') + + self._urls.append({ + 'url': url, + 'method': method, + 'body': body, + 'content_type': content_type, + 'match_querystring': match_querystring, + 'status': status, + 'adding_headers': adding_headers, + 'stream': stream, + }) + + def add_callback(self, method, url, callback, match_querystring=False, + content_type='text/plain'): + # ensure the url has a default path set if the url is a string + # url = _ensure_url_default_path(url, match_querystring) + + self._urls.append({ + 'url': url, + 'method': method, + 'callback': callback, + 'content_type': content_type, + 'match_querystring': match_querystring, + }) + + @property + def calls(self): + return self._calls + + def __enter__(self): + self.start() + return self + + def __exit__(self, type, value, traceback): + success = type is None + self.stop(allow_assert=success) + self.reset() + return success + + def activate(self, func): + evaldict = {'responses': self, 'func': func} + return get_wrapped(func, _wrapper_template, evaldict) + + def _find_match(self, request): + for match in self._urls: + if request.method != match['method']: + continue + + if not self._has_url_match(match, request.url): + continue + + break + else: + return None + if self.assert_all_requests_are_fired: + # for each found match remove the url from the stack + self._urls.remove(match) + return match + + def _has_url_match(self, match, request_url): + url = match['url'] + + if not match['match_querystring']: + request_url = request_url.split('?', 1)[0] + + if _is_string(url): + if match['match_querystring']: + return self._has_strict_url_match(url, request_url) + else: + return url == request_url + elif isinstance(url, re._pattern_type) and url.match(request_url): + return True + else: + return False + + def _has_strict_url_match(self, url, other): + url_parsed = urlparse(url) + other_parsed = urlparse(other) + + if url_parsed[:3] != other_parsed[:3]: + return False + + url_qsl = sorted(parse_qsl(url_parsed.query)) + other_qsl = sorted(parse_qsl(other_parsed.query)) + return url_qsl == other_qsl + + def _on_request(self, adapter, request, **kwargs): + match = self._find_match(request) + # TODO(dcramer): find the correct class for this + if match is None: + error_msg = 'Connection refused: {0} {1}'.format(request.method, + request.url) + response = ConnectionError(error_msg) + response.request = request + + self._calls.add(request, response) + raise response + + if 'body' in match and isinstance(match['body'], Exception): + self._calls.add(request, match['body']) + raise match['body'] + + headers = {} + if match['content_type'] is not None: + headers['Content-Type'] = match['content_type'] + + if 'callback' in match: # use callback + status, r_headers, body = match['callback'](request) + if isinstance(body, six.text_type): + body = body.encode('utf-8') + body = BufferIO(body) + headers.update(r_headers) + + elif 'body' in match: + if match['adding_headers']: + headers.update(match['adding_headers']) + status = match['status'] + body = BufferIO(match['body']) + + response = HTTPResponse( + status=status, + reason=six.moves.http_client.responses[status], + body=body, + headers=headers, + preload_content=False, + ) + + response = adapter.build_response(request, response) + if not match.get('stream'): + response.content # NOQA + + try: + resp_cookies = Cookies.from_request(response.headers['set-cookie']) + response.cookies = cookiejar_from_dict(dict( + (v.name, v.value) + for _, v + in resp_cookies.items() + )) + except (KeyError, TypeError): + pass + + self._calls.add(request, response) + + return response + + def start(self): + try: + from unittest import mock + except ImportError: + import mock + + def unbound_on_send(adapter, request, *a, **kwargs): + return self._on_request(adapter, request, *a, **kwargs) + self._patcher1 = mock.patch('botocore.vendored.requests.adapters.HTTPAdapter.send', + unbound_on_send) + self._patcher1.start() + self._patcher2 = mock.patch('requests.adapters.HTTPAdapter.send', + unbound_on_send) + self._patcher2.start() + + def stop(self, allow_assert=True): + self._patcher1.stop() + self._patcher2.stop() + if allow_assert and self.assert_all_requests_are_fired and self._urls: + raise AssertionError( + 'Not all requests have been executed {0!r}'.format( + [(url['method'], url['url']) for url in self._urls])) + + +# expose default mock namespace +mock = _default_mock = RequestsMock(assert_all_requests_are_fired=False) +__all__ = [] +for __attr in (a for a in dir(_default_mock) if not a.startswith('_')): + __all__.append(__attr) + globals()[__attr] = getattr(_default_mock, __attr) diff --git a/moto/packages/responses/setup.cfg b/moto/packages/responses/setup.cfg new file mode 100644 index 000000000..9b6594f2e --- /dev/null +++ b/moto/packages/responses/setup.cfg @@ -0,0 +1,5 @@ +[pytest] +addopts=--tb=short + +[bdist_wheel] +universal=1 diff --git a/moto/packages/responses/setup.py b/moto/packages/responses/setup.py new file mode 100644 index 000000000..bab522865 --- /dev/null +++ b/moto/packages/responses/setup.py @@ -0,0 +1,98 @@ +#!/usr/bin/env python +""" +responses +========= + +A utility library for mocking out the `requests` Python library. + +:copyright: (c) 2015 David Cramer +:license: Apache 2.0 +""" + +import sys +import logging + +from setuptools import setup +from setuptools.command.test import test as TestCommand +import pkg_resources + + +setup_requires = [] + +if 'test' in sys.argv: + setup_requires.append('pytest') + +install_requires = [ + 'requests>=2.0', + 'cookies', + 'six', +] + +tests_require = [ + 'pytest', + 'coverage >= 3.7.1, < 5.0.0', + 'pytest-cov', + 'flake8', +] + + +extras_require = { + ':python_version in "2.6, 2.7, 3.2"': ['mock'], + 'tests': tests_require, +} + +try: + if 'bdist_wheel' not in sys.argv: + for key, value in extras_require.items(): + if key.startswith(':') and pkg_resources.evaluate_marker(key[1:]): + install_requires.extend(value) +except Exception: + logging.getLogger(__name__).exception( + 'Something went wrong calculating platform specific dependencies, so ' + "you're getting them all!" + ) + for key, value in extras_require.items(): + if key.startswith(':'): + install_requires.extend(value) + + +class PyTest(TestCommand): + def finalize_options(self): + TestCommand.finalize_options(self) + self.test_args = ['test_responses.py'] + self.test_suite = True + + def run_tests(self): + # import here, cause outside the eggs aren't loaded + import pytest + errno = pytest.main(self.test_args) + sys.exit(errno) + + +setup( + name='responses', + version='0.6.0', + author='David Cramer', + description=( + 'A utility library for mocking out the `requests` Python library.' + ), + url='https://github.com/getsentry/responses', + license='Apache 2.0', + long_description=open('README.rst').read(), + py_modules=['responses', 'test_responses'], + zip_safe=False, + install_requires=install_requires, + extras_require=extras_require, + tests_require=tests_require, + setup_requires=setup_requires, + cmdclass={'test': PyTest}, + include_package_data=True, + classifiers=[ + 'Intended Audience :: Developers', + 'Intended Audience :: System Administrators', + 'Operating System :: OS Independent', + 'Programming Language :: Python :: 2', + 'Programming Language :: Python :: 3', + 'Topic :: Software Development' + ], +) diff --git a/moto/packages/responses/test_responses.py b/moto/packages/responses/test_responses.py new file mode 100644 index 000000000..ba0126ad5 --- /dev/null +++ b/moto/packages/responses/test_responses.py @@ -0,0 +1,443 @@ +from __future__ import ( + absolute_import, print_function, division, unicode_literals +) + +import re +import requests +import responses +import pytest + +from inspect import getargspec +from requests.exceptions import ConnectionError, HTTPError + + +def assert_reset(): + assert len(responses._default_mock._urls) == 0 + assert len(responses.calls) == 0 + + +def assert_response(resp, body=None, content_type='text/plain'): + assert resp.status_code == 200 + assert resp.reason == 'OK' + if content_type is not None: + assert resp.headers['Content-Type'] == content_type + else: + assert 'Content-Type' not in resp.headers + assert resp.text == body + + +def test_response(): + @responses.activate + def run(): + responses.add(responses.GET, 'http://example.com', body=b'test') + resp = requests.get('http://example.com') + assert_response(resp, 'test') + assert len(responses.calls) == 1 + assert responses.calls[0].request.url == 'http://example.com/' + assert responses.calls[0].response.content == b'test' + + resp = requests.get('http://example.com?foo=bar') + assert_response(resp, 'test') + assert len(responses.calls) == 2 + assert responses.calls[1].request.url == 'http://example.com/?foo=bar' + assert responses.calls[1].response.content == b'test' + + run() + assert_reset() + + +def test_connection_error(): + @responses.activate + def run(): + responses.add(responses.GET, 'http://example.com') + + with pytest.raises(ConnectionError): + requests.get('http://example.com/foo') + + assert len(responses.calls) == 1 + assert responses.calls[0].request.url == 'http://example.com/foo' + assert type(responses.calls[0].response) is ConnectionError + assert responses.calls[0].response.request + + run() + assert_reset() + + +def test_match_querystring(): + @responses.activate + def run(): + url = 'http://example.com?test=1&foo=bar' + responses.add( + responses.GET, url, + match_querystring=True, body=b'test') + resp = requests.get('http://example.com?test=1&foo=bar') + assert_response(resp, 'test') + resp = requests.get('http://example.com?foo=bar&test=1') + assert_response(resp, 'test') + + run() + assert_reset() + + +def test_match_querystring_error(): + @responses.activate + def run(): + responses.add( + responses.GET, 'http://example.com/?test=1', + match_querystring=True) + + with pytest.raises(ConnectionError): + requests.get('http://example.com/foo/?test=2') + + run() + assert_reset() + + +def test_match_querystring_regex(): + @responses.activate + def run(): + """Note that `match_querystring` value shouldn't matter when passing a + regular expression""" + + responses.add( + responses.GET, re.compile(r'http://example\.com/foo/\?test=1'), + body='test1', match_querystring=True) + + resp = requests.get('http://example.com/foo/?test=1') + assert_response(resp, 'test1') + + responses.add( + responses.GET, re.compile(r'http://example\.com/foo/\?test=2'), + body='test2', match_querystring=False) + + resp = requests.get('http://example.com/foo/?test=2') + assert_response(resp, 'test2') + + run() + assert_reset() + + +def test_match_querystring_error_regex(): + @responses.activate + def run(): + """Note that `match_querystring` value shouldn't matter when passing a + regular expression""" + + responses.add( + responses.GET, re.compile(r'http://example\.com/foo/\?test=1'), + match_querystring=True) + + with pytest.raises(ConnectionError): + requests.get('http://example.com/foo/?test=3') + + responses.add( + responses.GET, re.compile(r'http://example\.com/foo/\?test=2'), + match_querystring=False) + + with pytest.raises(ConnectionError): + requests.get('http://example.com/foo/?test=4') + + run() + assert_reset() + + +def test_accept_string_body(): + @responses.activate + def run(): + url = 'http://example.com/' + responses.add( + responses.GET, url, body='test') + resp = requests.get(url) + assert_response(resp, 'test') + + run() + assert_reset() + + +def test_accept_json_body(): + @responses.activate + def run(): + content_type = 'application/json' + + url = 'http://example.com/' + responses.add( + responses.GET, url, json={"message": "success"}) + resp = requests.get(url) + assert_response(resp, '{"message": "success"}', content_type) + + url = 'http://example.com/1/' + responses.add(responses.GET, url, json=[]) + resp = requests.get(url) + assert_response(resp, '[]', content_type) + + run() + assert_reset() + + +def test_no_content_type(): + @responses.activate + def run(): + url = 'http://example.com/' + responses.add( + responses.GET, url, body='test', content_type=None) + resp = requests.get(url) + assert_response(resp, 'test', content_type=None) + + run() + assert_reset() + + +def test_throw_connection_error_explicit(): + @responses.activate + def run(): + url = 'http://example.com' + exception = HTTPError('HTTP Error') + responses.add( + responses.GET, url, exception) + + with pytest.raises(HTTPError) as HE: + requests.get(url) + + assert str(HE.value) == 'HTTP Error' + + run() + assert_reset() + + +def test_callback(): + body = b'test callback' + status = 400 + reason = 'Bad Request' + headers = {'foo': 'bar'} + url = 'http://example.com/' + + def request_callback(request): + return (status, headers, body) + + @responses.activate + def run(): + responses.add_callback(responses.GET, url, request_callback) + resp = requests.get(url) + assert resp.text == "test callback" + assert resp.status_code == status + assert resp.reason == reason + assert 'foo' in resp.headers + assert resp.headers['foo'] == 'bar' + + run() + assert_reset() + + +def test_callback_no_content_type(): + body = b'test callback' + status = 400 + reason = 'Bad Request' + headers = {'foo': 'bar'} + url = 'http://example.com/' + + def request_callback(request): + return (status, headers, body) + + @responses.activate + def run(): + responses.add_callback( + responses.GET, url, request_callback, content_type=None) + resp = requests.get(url) + assert resp.text == "test callback" + assert resp.status_code == status + assert resp.reason == reason + assert 'foo' in resp.headers + assert 'Content-Type' not in resp.headers + + run() + assert_reset() + + +def test_regular_expression_url(): + @responses.activate + def run(): + url = re.compile(r'https?://(.*\.)?example.com') + responses.add(responses.GET, url, body=b'test') + + resp = requests.get('http://example.com') + assert_response(resp, 'test') + + resp = requests.get('https://example.com') + assert_response(resp, 'test') + + resp = requests.get('https://uk.example.com') + assert_response(resp, 'test') + + with pytest.raises(ConnectionError): + requests.get('https://uk.exaaample.com') + + run() + assert_reset() + + +def test_custom_adapter(): + @responses.activate + def run(): + url = "http://example.com" + responses.add(responses.GET, url, body=b'test') + + calls = [0] + + class DummyAdapter(requests.adapters.HTTPAdapter): + def send(self, *a, **k): + calls[0] += 1 + return super(DummyAdapter, self).send(*a, **k) + + # Test that the adapter is actually used + session = requests.Session() + session.mount("http://", DummyAdapter()) + + resp = session.get(url, allow_redirects=False) + assert calls[0] == 1 + + # Test that the response is still correctly emulated + session = requests.Session() + session.mount("http://", DummyAdapter()) + + resp = session.get(url) + assert_response(resp, 'test') + + run() + + +def test_responses_as_context_manager(): + def run(): + with responses.mock: + responses.add(responses.GET, 'http://example.com', body=b'test') + resp = requests.get('http://example.com') + assert_response(resp, 'test') + assert len(responses.calls) == 1 + assert responses.calls[0].request.url == 'http://example.com/' + assert responses.calls[0].response.content == b'test' + + resp = requests.get('http://example.com?foo=bar') + assert_response(resp, 'test') + assert len(responses.calls) == 2 + assert (responses.calls[1].request.url == + 'http://example.com/?foo=bar') + assert responses.calls[1].response.content == b'test' + + run() + assert_reset() + + +def test_activate_doesnt_change_signature(): + def test_function(a, b=None): + return (a, b) + + decorated_test_function = responses.activate(test_function) + assert getargspec(test_function) == getargspec(decorated_test_function) + assert decorated_test_function(1, 2) == test_function(1, 2) + assert decorated_test_function(3) == test_function(3) + + +def test_activate_doesnt_change_signature_for_method(): + class TestCase(object): + + def test_function(self, a, b=None): + return (self, a, b) + + test_case = TestCase() + argspec = getargspec(test_case.test_function) + decorated_test_function = responses.activate(test_case.test_function) + assert argspec == getargspec(decorated_test_function) + assert decorated_test_function(1, 2) == test_case.test_function(1, 2) + assert decorated_test_function(3) == test_case.test_function(3) + + +def test_response_cookies(): + body = b'test callback' + status = 200 + headers = {'set-cookie': 'session_id=12345; a=b; c=d'} + url = 'http://example.com/' + + def request_callback(request): + return (status, headers, body) + + @responses.activate + def run(): + responses.add_callback(responses.GET, url, request_callback) + resp = requests.get(url) + assert resp.text == "test callback" + assert resp.status_code == status + assert 'session_id' in resp.cookies + assert resp.cookies['session_id'] == '12345' + assert resp.cookies['a'] == 'b' + assert resp.cookies['c'] == 'd' + run() + assert_reset() + + +def test_assert_all_requests_are_fired(): + def run(): + with pytest.raises(AssertionError) as excinfo: + with responses.RequestsMock( + assert_all_requests_are_fired=True) as m: + m.add(responses.GET, 'http://example.com', body=b'test') + assert 'http://example.com' in str(excinfo.value) + assert responses.GET in str(excinfo) + + # check that assert_all_requests_are_fired default to True + with pytest.raises(AssertionError): + with responses.RequestsMock() as m: + m.add(responses.GET, 'http://example.com', body=b'test') + + # check that assert_all_requests_are_fired doesn't swallow exceptions + with pytest.raises(ValueError): + with responses.RequestsMock() as m: + m.add(responses.GET, 'http://example.com', body=b'test') + raise ValueError() + + run() + assert_reset() + + +def test_allow_redirects_samehost(): + redirecting_url = 'http://example.com' + final_url_path = '/1' + final_url = '{0}{1}'.format(redirecting_url, final_url_path) + url_re = re.compile(r'^http://example.com(/)?(\d+)?$') + + def request_callback(request): + # endpoint of chained redirect + if request.url.endswith(final_url_path): + return 200, (), b'test' + # otherwise redirect to an integer path + else: + if request.url.endswith('/0'): + n = 1 + else: + n = 0 + redirect_headers = {'location': '/{0!s}'.format(n)} + return 301, redirect_headers, None + + def run(): + # setup redirect + with responses.mock: + responses.add_callback(responses.GET, url_re, request_callback) + resp_no_redirects = requests.get(redirecting_url, + allow_redirects=False) + assert resp_no_redirects.status_code == 301 + assert len(responses.calls) == 1 # 1x300 + assert responses.calls[0][1].status_code == 301 + assert_reset() + + with responses.mock: + responses.add_callback(responses.GET, url_re, request_callback) + resp_yes_redirects = requests.get(redirecting_url, + allow_redirects=True) + assert len(responses.calls) == 3 # 2x300 + 1x200 + assert len(resp_yes_redirects.history) == 2 + assert resp_yes_redirects.status_code == 200 + assert final_url == resp_yes_redirects.url + status_codes = [call[1].status_code for call in responses.calls] + assert status_codes == [301, 301, 200] + assert_reset() + + run() + assert_reset() diff --git a/moto/packages/responses/tox.ini b/moto/packages/responses/tox.ini new file mode 100644 index 000000000..0a31c03ab --- /dev/null +++ b/moto/packages/responses/tox.ini @@ -0,0 +1,11 @@ + +[tox] +envlist = {py26,py27,py32,py33,py34,py35} + +[testenv] +deps = + pytest + pytest-cov + pytest-flakes +commands = + py.test . --cov responses --cov-report term-missing --flakes From 468a1b970c8fcf6e00a6809e345e9304d9af935c Mon Sep 17 00:00:00 2001 From: Steve Pulec Date: Wed, 15 Feb 2017 22:47:33 -0500 Subject: [PATCH 046/213] Add responses dependencies. --- setup.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/setup.py b/setup.py index 52635d00b..ee9c07aed 100644 --- a/setup.py +++ b/setup.py @@ -5,7 +5,8 @@ from setuptools import setup, find_packages install_requires = [ "Jinja2>=2.8", "boto>=2.36.0", - "requests", + "cookies", + "requests>=2.0", "xmltodict", "six", "werkzeug", From cad185c74da5ec14cbd3f3212ad9e6f7b8303c6d Mon Sep 17 00:00:00 2001 From: Steve Pulec Date: Thu, 16 Feb 2017 22:51:04 -0500 Subject: [PATCH 047/213] Cleanup headers and encoding. --- moto/apigateway/models.py | 2 +- moto/apigateway/responses.py | 40 +++++----- moto/awslambda/models.py | 2 +- moto/awslambda/responses.py | 23 +++--- moto/core/models.py | 10 ++- moto/core/responses.py | 10 +-- moto/core/utils.py | 28 ++++++- moto/datapipeline/responses.py | 2 +- moto/dynamodb/responses.py | 2 +- moto/dynamodb2/responses.py | 2 +- moto/ecs/responses.py | 2 +- moto/events/responses.py | 2 +- moto/kinesis/responses.py | 2 +- moto/kms/responses.py | 2 +- moto/opsworks/responses.py | 2 +- moto/s3/responses.py | 102 +++++++++++++----------- moto/swf/responses.py | 2 +- tests/test_sns/test_publishing.py | 2 +- tests/test_sns/test_publishing_boto3.py | 2 +- 19 files changed, 138 insertions(+), 101 deletions(-) diff --git a/moto/apigateway/models.py b/moto/apigateway/models.py index be0bfa434..bab0bc1d0 100644 --- a/moto/apigateway/models.py +++ b/moto/apigateway/models.py @@ -331,7 +331,7 @@ class RestAPI(object): def update_integration_mocks(self, stage_name): stage_url = STAGE_URL.format(api_id=self.id, region_name=self.region_name, stage_name=stage_name) - responses.add_callback(responses.GET, stage_url, callback=self.resource_callback) + responses.add_callback(responses.GET, stage_url.lower(), callback=self.resource_callback) def create_stage(self, name, deployment_id,variables=None,description='',cacheClusterEnabled=None,cacheClusterSize=None): if variables is None: diff --git a/moto/apigateway/responses.py b/moto/apigateway/responses.py index e8c353f4e..a7bb28c6e 100644 --- a/moto/apigateway/responses.py +++ b/moto/apigateway/responses.py @@ -10,11 +10,11 @@ from .exceptions import StageNotFoundException class APIGatewayResponse(BaseResponse): def _get_param(self, key): - return json.loads(self.body.decode("ascii")).get(key) + return json.loads(self.body).get(key) def _get_param_with_default_value(self, key, default): - jsonbody = json.loads(self.body.decode("ascii")) + jsonbody = json.loads(self.body) if key in jsonbody: return jsonbody.get(key) @@ -30,14 +30,14 @@ class APIGatewayResponse(BaseResponse): if self.method == 'GET': apis = self.backend.list_apis() - return 200, headers, json.dumps({"item": [ + return 200, {}, json.dumps({"item": [ api.to_dict() for api in apis ]}) elif self.method == 'POST': name = self._get_param('name') description = self._get_param('description') rest_api = self.backend.create_rest_api(name, description) - return 200, headers, json.dumps(rest_api.to_dict()) + return 200, {}, json.dumps(rest_api.to_dict()) def restapis_individual(self, request, full_url, headers): self.setup_class(request, full_url, headers) @@ -45,10 +45,10 @@ class APIGatewayResponse(BaseResponse): if self.method == 'GET': rest_api = self.backend.get_rest_api(function_id) - return 200, headers, json.dumps(rest_api.to_dict()) + return 200, {}, json.dumps(rest_api.to_dict()) elif self.method == 'DELETE': rest_api = self.backend.delete_rest_api(function_id) - return 200, headers, json.dumps(rest_api.to_dict()) + return 200, {}, json.dumps(rest_api.to_dict()) def resources(self, request, full_url, headers): self.setup_class(request, full_url, headers) @@ -56,7 +56,7 @@ class APIGatewayResponse(BaseResponse): if self.method == 'GET': resources = self.backend.list_resources(function_id) - return 200, headers, json.dumps({"item": [ + return 200, {}, json.dumps({"item": [ resource.to_dict() for resource in resources ]}) @@ -72,7 +72,7 @@ class APIGatewayResponse(BaseResponse): resource = self.backend.create_resource(function_id, resource_id, path_part) elif self.method == 'DELETE': resource = self.backend.delete_resource(function_id, resource_id) - return 200, headers, json.dumps(resource.to_dict()) + return 200, {}, json.dumps(resource.to_dict()) def resource_methods(self, request, full_url, headers): self.setup_class(request, full_url, headers) @@ -83,11 +83,11 @@ class APIGatewayResponse(BaseResponse): if self.method == 'GET': method = self.backend.get_method(function_id, resource_id, method_type) - return 200, headers, json.dumps(method) + return 200, {}, json.dumps(method) elif self.method == 'PUT': authorization_type = self._get_param("authorizationType") method = self.backend.create_method(function_id, resource_id, method_type, authorization_type) - return 200, headers, json.dumps(method) + return 200, {}, json.dumps(method) def resource_method_responses(self, request, full_url, headers): self.setup_class(request, full_url, headers) @@ -103,7 +103,7 @@ class APIGatewayResponse(BaseResponse): method_response = self.backend.create_method_response(function_id, resource_id, method_type, response_code) elif self.method == 'DELETE': method_response = self.backend.delete_method_response(function_id, resource_id, method_type, response_code) - return 200, headers, json.dumps(method_response) + return 200, {}, json.dumps(method_response) def restapis_stages(self, request, full_url, headers): self.setup_class(request, full_url, headers) @@ -123,9 +123,9 @@ class APIGatewayResponse(BaseResponse): cacheClusterEnabled=cacheClusterEnabled, cacheClusterSize=cacheClusterSize) elif self.method == 'GET': stages = self.backend.get_stages(function_id) - return 200, headers, json.dumps({"item": stages}) + return 200, {}, json.dumps({"item": stages}) - return 200, headers, json.dumps(stage_response) + return 200, {}, json.dumps(stage_response) def stages(self, request, full_url, headers): self.setup_class(request, full_url, headers) @@ -137,11 +137,11 @@ class APIGatewayResponse(BaseResponse): try: stage_response = self.backend.get_stage(function_id, stage_name) except StageNotFoundException as error: - return error.code, headers,'{{"message":"{0}","code":"{1}"}}'.format(error.message,error.error_type) + return error.code, {},'{{"message":"{0}","code":"{1}"}}'.format(error.message,error.error_type) elif self.method == 'PATCH': patch_operations = self._get_param('patchOperations') stage_response = self.backend.update_stage(function_id, stage_name, patch_operations) - return 200, headers, json.dumps(stage_response) + return 200, {}, json.dumps(stage_response) def integrations(self, request, full_url, headers): self.setup_class(request, full_url, headers) @@ -159,7 +159,7 @@ class APIGatewayResponse(BaseResponse): integration_response = self.backend.create_integration(function_id, resource_id, method_type, integration_type, uri, request_templates=request_templates) elif self.method == 'DELETE': integration_response = self.backend.delete_integration(function_id, resource_id, method_type) - return 200, headers, json.dumps(integration_response) + return 200, {}, json.dumps(integration_response) def integration_responses(self, request, full_url, headers): self.setup_class(request, full_url, headers) @@ -182,7 +182,7 @@ class APIGatewayResponse(BaseResponse): integration_response = self.backend.delete_integration_response( function_id, resource_id, method_type, status_code ) - return 200, headers, json.dumps(integration_response) + return 200, {}, json.dumps(integration_response) def deployments(self, request, full_url, headers): self.setup_class(request, full_url, headers) @@ -190,13 +190,13 @@ class APIGatewayResponse(BaseResponse): if self.method == 'GET': deployments = self.backend.get_deployments(function_id) - return 200, headers, json.dumps({"item": deployments}) + return 200, {}, json.dumps({"item": deployments}) elif self.method == 'POST': name = self._get_param("stageName") description = self._get_param_with_default_value("description","") stage_variables = self._get_param_with_default_value('variables',{}) deployment = self.backend.create_deployment(function_id, name, description,stage_variables) - return 200, headers, json.dumps(deployment) + return 200, {}, json.dumps(deployment) def individual_deployment(self, request, full_url, headers): self.setup_class(request, full_url, headers) @@ -208,4 +208,4 @@ class APIGatewayResponse(BaseResponse): deployment = self.backend.get_deployment(function_id, deployment_id) elif self.method == 'DELETE': deployment = self.backend.delete_deployment(function_id, deployment_id) - return 200, headers, json.dumps(deployment) + return 200, {}, json.dumps(deployment) diff --git a/moto/awslambda/models.py b/moto/awslambda/models.py index 069717ca4..e8595cc22 100644 --- a/moto/awslambda/models.py +++ b/moto/awslambda/models.py @@ -196,7 +196,7 @@ class LambdaBackend(BaseBackend): def __init__(self): self._functions = {} - + def has_function(self, function_name): return function_name in self._functions diff --git a/moto/awslambda/responses.py b/moto/awslambda/responses.py index 708a8796e..0cd7c57ea 100644 --- a/moto/awslambda/responses.py +++ b/moto/awslambda/responses.py @@ -36,6 +36,7 @@ class LambdaResponse(BaseResponse): raise ValueError("Cannot handle request") def _invoke(self, request, full_url, headers): + response_headers = {} lambda_backend = self.get_lambda_backend(full_url) path = request.path if hasattr(request, 'path') else request.path_url @@ -43,15 +44,15 @@ class LambdaResponse(BaseResponse): if lambda_backend.has_function(function_name): fn = lambda_backend.get_function(function_name) - payload = fn.invoke(request, headers) - headers['Content-Length'] = str(len(payload)) - return 202, headers, payload + payload = fn.invoke(request, response_headers) + response_headers['Content-Length'] = str(len(payload)) + return 202, response_headers, payload else: - return 404, headers, "{}" + return 404, response_headers, "{}" def _list_functions(self, request, full_url, headers): lambda_backend = self.get_lambda_backend(full_url) - return 200, headers, json.dumps({ + return 200, {}, json.dumps({ "Functions": [fn.get_configuration() for fn in lambda_backend.list_functions()], # "NextMarker": str(uuid.uuid4()), }) @@ -62,10 +63,10 @@ class LambdaResponse(BaseResponse): try: fn = lambda_backend.create_function(spec) except ValueError as e: - return 400, headers, json.dumps({"Error": {"Code": e.args[0], "Message": e.args[1]}}) + return 400, {}, json.dumps({"Error": {"Code": e.args[0], "Message": e.args[1]}}) else: config = fn.get_configuration() - return 201, headers, json.dumps(config) + return 201, {}, json.dumps(config) def _delete_function(self, request, full_url, headers): lambda_backend = self.get_lambda_backend(full_url) @@ -75,9 +76,9 @@ class LambdaResponse(BaseResponse): if lambda_backend.has_function(function_name): lambda_backend.delete_function(function_name) - return 204, headers, "" + return 204, {}, "" else: - return 404, headers, "{}" + return 404, {}, "{}" def _get_function(self, request, full_url, headers): lambda_backend = self.get_lambda_backend(full_url) @@ -88,9 +89,9 @@ class LambdaResponse(BaseResponse): if lambda_backend.has_function(function_name): fn = lambda_backend.get_function(function_name) code = fn.get_code() - return 200, headers, json.dumps(code) + return 200, {}, json.dumps(code) else: - return 404, headers, "{}" + return 404, {}, "{}" def get_lambda_backend(self, full_url): from moto.awslambda.models import lambda_backends diff --git a/moto/core/models.py b/moto/core/models.py index fa6b74834..9570a86d4 100644 --- a/moto/core/models.py +++ b/moto/core/models.py @@ -8,7 +8,11 @@ import re from moto.packages.responses import responses from moto.packages.httpretty import HTTPretty from .responses import metadata_response -from .utils import convert_regex_to_flask_path, convert_flask_to_responses_response +from .utils import ( + convert_httpretty_response, + convert_regex_to_flask_path, + convert_flask_to_responses_response, +) class BaseMockAWS(object): nested_count = 0 @@ -93,14 +97,14 @@ class HttprettyMockAWS(BaseMockAWS): HTTPretty.register_uri( method=method, uri=re.compile(key), - body=value, + body=convert_httpretty_response(value), ) # Mock out localhost instance metadata HTTPretty.register_uri( method=method, uri=re.compile('http://169.254.169.254/latest/meta-data/.*'), - body=metadata_response + body=convert_httpretty_response(metadata_response), ) def disable_patching(self): diff --git a/moto/core/responses.py b/moto/core/responses.py index 337227d3c..05c882ba1 100644 --- a/moto/core/responses.py +++ b/moto/core/responses.py @@ -123,14 +123,14 @@ class BaseResponse(_TemplateEnvironmentMixin): for key, value in request.form.items(): querystring[key] = [value, ] + if isinstance(self.body, six.binary_type): + self.body = self.body.decode('utf-8') + if not querystring: querystring.update(parse_qs(urlparse(full_url).query, keep_blank_values=True)) if not querystring: if 'json' in request.headers.get('content-type', []) and self.aws_service_spec: - if isinstance(self.body, six.binary_type): - decoded = json.loads(self.body.decode('utf-8')) - else: - decoded = json.loads(self.body) + decoded = json.loads(self.body) target = request.headers.get('x-amz-target') or request.headers.get('X-Amz-Target') service, method = target.split('.') @@ -154,7 +154,7 @@ class BaseResponse(_TemplateEnvironmentMixin): self.headers = request.headers if 'host' not in self.headers: self.headers['host'] = urlparse(full_url).netloc - self.response_headers = headers + self.response_headers = {"server": "amazon.com"} def get_region_from_url(self, full_url): match = re.search(self.region_regex, full_url) diff --git a/moto/core/utils.py b/moto/core/utils.py index 0f4b20b6d..451d1a761 100644 --- a/moto/core/utils.py +++ b/moto/core/utils.py @@ -79,6 +79,29 @@ def convert_regex_to_flask_path(url_path): return url_path +class convert_httpretty_response(object): + + def __init__(self, callback): + self.callback = callback + + @property + def __name__(self): + # For instance methods, use class and method names. Otherwise + # use module and method name + if inspect.ismethod(self.callback): + outer = self.callback.__self__.__class__.__name__ + else: + outer = self.callback.__module__ + return "{0}.{1}".format(outer, self.callback.__name__) + + def __call__(self, request, url, headers, **kwargs): + result = self.callback(request, url, headers) + status, headers, response = result + if 'server' not in headers: + headers["server"] = "amazon.com" + return status, headers, response + + class convert_flask_to_httpretty_response(object): def __init__(self, callback): @@ -119,8 +142,11 @@ class convert_flask_to_responses_response(object): return "{0}.{1}".format(outer, self.callback.__name__) def __call__(self, request, *args, **kwargs): + for key, val in request.headers.items(): + if isinstance(val, six.binary_type): + request.headers[key] = val.decode("utf-8") + result = self.callback(request, request.url, request.headers) - # result is a status, headers, response tuple status, headers, response = result return status, headers, response diff --git a/moto/datapipeline/responses.py b/moto/datapipeline/responses.py index 70d19d189..2607f685d 100644 --- a/moto/datapipeline/responses.py +++ b/moto/datapipeline/responses.py @@ -12,7 +12,7 @@ class DataPipelineResponse(BaseResponse): def parameters(self): # TODO this should really be moved to core/responses.py if self.body: - return json.loads(self.body.decode("utf-8")) + return json.loads(self.body) else: return self.querystring diff --git a/moto/dynamodb/responses.py b/moto/dynamodb/responses.py index 226d5d11a..59cff0395 100644 --- a/moto/dynamodb/responses.py +++ b/moto/dynamodb/responses.py @@ -51,7 +51,7 @@ class DynamoHandler(BaseResponse): return status, self.response_headers, dynamo_json_dump({'__type': type_}) def call_action(self): - body = self.body.decode('utf-8') + body = self.body if 'GetSessionToken' in body: return 200, self.response_headers, sts_handler() diff --git a/moto/dynamodb2/responses.py b/moto/dynamodb2/responses.py index 081afc2c4..0957bfa89 100644 --- a/moto/dynamodb2/responses.py +++ b/moto/dynamodb2/responses.py @@ -52,7 +52,7 @@ class DynamoHandler(BaseResponse): return status, self.response_headers, dynamo_json_dump({'__type': type_}) def call_action(self): - body = self.body.decode('utf-8') + body = self.body if 'GetSessionToken' in body: return 200, self.response_headers, sts_handler() diff --git a/moto/ecs/responses.py b/moto/ecs/responses.py index ce90de379..a8c0dddac 100644 --- a/moto/ecs/responses.py +++ b/moto/ecs/responses.py @@ -14,7 +14,7 @@ class EC2ContainerServiceResponse(BaseResponse): @property def request_params(self): try: - return json.loads(self.body.decode()) + return json.loads(self.body) except ValueError: return {} diff --git a/moto/events/responses.py b/moto/events/responses.py index 7d63388b7..75e703706 100644 --- a/moto/events/responses.py +++ b/moto/events/responses.py @@ -19,7 +19,7 @@ class EventsHandler(BaseResponse): } def load_body(self): - decoded_body = self.body.decode('utf-8') + decoded_body = self.body return json.loads(decoded_body or '{}') def error(self, type_, message='', status=400): diff --git a/moto/kinesis/responses.py b/moto/kinesis/responses.py index d0a90a61e..9bc9fe94c 100644 --- a/moto/kinesis/responses.py +++ b/moto/kinesis/responses.py @@ -11,7 +11,7 @@ class KinesisResponse(BaseResponse): @property def parameters(self): - return json.loads(self.body.decode("utf-8")) + return json.loads(self.body) @property def kinesis_backend(self): diff --git a/moto/kms/responses.py b/moto/kms/responses.py index bc928f6f3..7f0659a64 100644 --- a/moto/kms/responses.py +++ b/moto/kms/responses.py @@ -22,7 +22,7 @@ class KmsResponse(BaseResponse): @property def parameters(self): - return json.loads(self.body.decode("utf-8")) + return json.loads(self.body) @property def kms_backend(self): diff --git a/moto/opsworks/responses.py b/moto/opsworks/responses.py index 47fed3016..4e0979154 100644 --- a/moto/opsworks/responses.py +++ b/moto/opsworks/responses.py @@ -10,7 +10,7 @@ class OpsWorksResponse(BaseResponse): @property def parameters(self): - return json.loads(self.body.decode("utf-8")) + return json.loads(self.body) @property def opsworks_backend(self): diff --git a/moto/s3/responses.py b/moto/s3/responses.py index 3fbd058f2..07be98e7b 100644 --- a/moto/s3/responses.py +++ b/moto/s3/responses.py @@ -104,10 +104,10 @@ class ResponseObject(_TemplateEnvironmentMixin): try: response = self._bucket_response(request, full_url, headers) except S3ClientError as s3error: - response = s3error.code, headers, s3error.description + response = s3error.code, {}, s3error.description if isinstance(response, six.string_types): - return 200, headers, response.encode("utf-8") + return 200, {}, response.encode("utf-8") else: status_code, headers, response_content = response return status_code, headers, response_content.encode("utf-8") @@ -133,8 +133,9 @@ class ResponseObject(_TemplateEnvironmentMixin): # Flask server body = request.data if body is None: - body = '' - body = body.decode('utf-8') + body = b'' + if isinstance(body, six.binary_type): + body = body.decode('utf-8') if method == 'HEAD': return self._bucket_response_head(bucket_name, headers) @@ -151,7 +152,7 @@ class ResponseObject(_TemplateEnvironmentMixin): def _bucket_response_head(self, bucket_name, headers): self.backend.get_bucket(bucket_name) - return 200, headers, "" + return 200, {}, "" def _bucket_response_get(self, bucket_name, querystring, headers): if 'uploads' in querystring: @@ -173,7 +174,7 @@ class ResponseObject(_TemplateEnvironmentMixin): elif 'lifecycle' in querystring: bucket = self.backend.get_bucket(bucket_name) if not bucket.rules: - return 404, headers, "NoSuchLifecycleConfiguration" + return 404, {}, "NoSuchLifecycleConfiguration" template = self.response_template(S3_BUCKET_LIFECYCLE_CONFIGURATION) return template.render(rules=bucket.rules) elif 'versioning' in querystring: @@ -184,8 +185,8 @@ class ResponseObject(_TemplateEnvironmentMixin): policy = self.backend.get_bucket_policy(bucket_name) if not policy: template = self.response_template(S3_NO_POLICY) - return 404, headers, template.render(bucket_name=bucket_name) - return 200, headers, policy + return 404, {}, template.render(bucket_name=bucket_name) + return 200, {}, policy elif 'website' in querystring: website_configuration = self.backend.get_bucket_website_configuration(bucket_name) return website_configuration @@ -211,7 +212,7 @@ class ResponseObject(_TemplateEnvironmentMixin): version_id_marker=version_id_marker ) template = self.response_template(S3_BUCKET_GET_VERSIONS) - return 200, headers, template.render( + return 200, {}, template.render( key_list=versions, bucket=bucket, prefix='', @@ -220,14 +221,14 @@ class ResponseObject(_TemplateEnvironmentMixin): is_truncated='false', ) elif querystring.get('list-type', [None])[0] == '2': - return 200, headers, self._handle_list_objects_v2(bucket_name, querystring) + return 200, {}, self._handle_list_objects_v2(bucket_name, querystring) bucket = self.backend.get_bucket(bucket_name) prefix = querystring.get('prefix', [None])[0] delimiter = querystring.get('delimiter', [None])[0] result_keys, result_folders = self.backend.prefix_query(bucket, prefix, delimiter) template = self.response_template(S3_BUCKET_GET_RESPONSE) - return 200, headers, template.render( + return 200, {}, template.render( bucket=bucket, prefix=prefix, delimiter=delimiter, @@ -286,7 +287,7 @@ class ResponseObject(_TemplateEnvironmentMixin): template = self.response_template(S3_BUCKET_VERSIONING) return template.render(bucket_versioning_status=ver.group(1)) else: - return 404, headers, "" + return 404, {}, "" elif 'lifecycle' in querystring: rules = xmltodict.parse(body)['LifecycleConfiguration']['Rule'] if not isinstance(rules, list): @@ -315,27 +316,27 @@ class ResponseObject(_TemplateEnvironmentMixin): else: raise template = self.response_template(S3_BUCKET_CREATE_RESPONSE) - return 200, headers, template.render(bucket=new_bucket) + return 200, {}, template.render(bucket=new_bucket) def _bucket_response_delete(self, body, bucket_name, querystring, headers): if 'policy' in querystring: self.backend.delete_bucket_policy(bucket_name, body) - return 204, headers, "" + return 204, {}, "" elif 'lifecycle' in querystring: bucket = self.backend.get_bucket(bucket_name) bucket.delete_lifecycle() - return 204, headers, "" + return 204, {}, "" removed_bucket = self.backend.delete_bucket(bucket_name) if removed_bucket: # Bucket exists template = self.response_template(S3_DELETE_BUCKET_SUCCESS) - return 204, headers, template.render(bucket=removed_bucket) + return 204, {}, template.render(bucket=removed_bucket) else: # Tried to delete a bucket that still has keys template = self.response_template(S3_DELETE_BUCKET_WITH_ITEMS_ERROR) - return 409, headers, template.render(bucket=removed_bucket) + return 409, {}, template.render(bucket=removed_bucket) def _bucket_response_post(self, request, body, bucket_name, headers): path = request.path if hasattr(request, 'path') else request.path_url @@ -349,7 +350,7 @@ class ResponseObject(_TemplateEnvironmentMixin): else: # HTTPretty, build new form object form = {} - for kv in body.decode('utf-8').split('&'): + for kv in body.split('&'): k, v = kv.split('=') form[k] = v @@ -365,7 +366,7 @@ class ResponseObject(_TemplateEnvironmentMixin): metadata = metadata_from_headers(form) new_key.set_metadata(metadata) - return 200, headers, "" + return 200, {}, "" def _bucket_response_delete_keys(self, request, body, bucket_name, headers): template = self.response_template(S3_DELETE_KEYS_RESPONSE) @@ -382,9 +383,10 @@ class ResponseObject(_TemplateEnvironmentMixin): else: error_names.append(key_name) - return 200, headers, template.render(deleted=deleted_names, delete_errors=error_names) + return 200, {}, template.render(deleted=deleted_names, delete_errors=error_names) def _handle_range_header(self, request, headers, response_content): + response_headers = {} length = len(response_content) last = length - 1 _, rspec = request.headers.get('range').split('=') @@ -399,28 +401,29 @@ class ResponseObject(_TemplateEnvironmentMixin): begin = length - min(end, length) end = last else: - return 400, headers, "" + return 400, response_headers, "" if begin < 0 or end > last or begin > min(end, last): - return 416, headers, "" - headers['content-range'] = "bytes {0}-{1}/{2}".format( + return 416, response_headers, "" + response_headers['content-range'] = "bytes {0}-{1}/{2}".format( begin, end, length) - return 206, headers, response_content[begin:end + 1] + return 206, response_headers, response_content[begin:end + 1] def key_response(self, request, full_url, headers): + response_headers = {} try: response = self._key_response(request, full_url, headers) except S3ClientError as s3error: - response = s3error.code, headers, s3error.description + response = s3error.code, {}, s3error.description if isinstance(response, six.string_types): status_code = 200 response_content = response else: - status_code, headers, response_content = response + status_code, response_headers, response_content = response if status_code == 200 and 'range' in request.headers: - return self._handle_range_header(request, headers, response_content) - return status_code, headers, response_content + return self._handle_range_header(request, response_headers, response_content) + return status_code, response_headers, response_content def _key_response(self, request, full_url, headers): parsed_url = urlparse(full_url) @@ -455,11 +458,12 @@ class ResponseObject(_TemplateEnvironmentMixin): raise NotImplementedError("Method {0} has not been impelemented in the S3 backend yet".format(method)) def _key_response_get(self, bucket_name, query, key_name, headers): + response_headers = {} if query.get('uploadId'): upload_id = query['uploadId'][0] parts = self.backend.list_multipart(bucket_name, upload_id) template = self.response_template(S3_MULTIPART_LIST_RESPONSE) - return 200, headers, template.render( + return 200, response_headers, template.render( bucket_name=bucket_name, key_name=key_name, upload_id=upload_id, @@ -471,13 +475,14 @@ class ResponseObject(_TemplateEnvironmentMixin): bucket_name, key_name, version_id=version_id) if 'acl' in query: template = self.response_template(S3_OBJECT_ACL_RESPONSE) - return 200, headers, template.render(obj=key) + return 200, response_headers, template.render(obj=key) - headers.update(key.metadata) - headers.update(key.response_dict) - return 200, headers, key.value + response_headers.update(key.metadata) + response_headers.update(key.response_dict) + return 200, response_headers, key.value def _key_response_put(self, request, body, bucket_name, query, key_name, headers): + response_headers = {} if query.get('uploadId') and query.get('partNumber'): upload_id = query['uploadId'][0] part_number = int(query['partNumber'][0]) @@ -501,8 +506,8 @@ class ResponseObject(_TemplateEnvironmentMixin): key = self.backend.set_part( bucket_name, upload_id, part_number, body) response = "" - headers.update(key.response_dict) - return 200, headers, response + response_headers.update(key.response_dict) + return 200, response_headers, response storage_class = request.headers.get('x-amz-storage-class', 'STANDARD') acl = self._acl_from_headers(request.headers) @@ -511,7 +516,7 @@ class ResponseObject(_TemplateEnvironmentMixin): key = self.backend.get_key(bucket_name, key_name) # TODO: Support the XML-based ACL format key.set_acl(acl) - return 200, headers, "" + return 200, response_headers, "" if 'x-amz-copy-source' in request.headers: # Copy key @@ -526,8 +531,8 @@ class ResponseObject(_TemplateEnvironmentMixin): metadata = metadata_from_headers(request.headers) new_key.set_metadata(metadata, replace=True) template = self.response_template(S3_OBJECT_COPY_RESPONSE) - headers.update(new_key.response_dict) - return 200, headers, template.render(key=new_key) + response_headers.update(new_key.response_dict) + return 200, response_headers, template.render(key=new_key) streaming_request = hasattr(request, 'streaming') and request.streaming closing_connection = headers.get('connection') == 'close' if closing_connection and streaming_request: @@ -546,18 +551,19 @@ class ResponseObject(_TemplateEnvironmentMixin): new_key.set_acl(acl) template = self.response_template(S3_OBJECT_RESPONSE) - headers.update(new_key.response_dict) - return 200, headers, template.render(key=new_key) + response_headers.update(new_key.response_dict) + return 200, response_headers, template.render(key=new_key) def _key_response_head(self, bucket_name, query, key_name, headers): + response_headers = {} version_id = query.get('versionId', [None])[0] key = self.backend.get_key(bucket_name, key_name, version_id=version_id) if key: - headers.update(key.metadata) - headers.update(key.response_dict) - return 200, headers, "" + response_headers.update(key.metadata) + response_headers.update(key.response_dict) + return 200, response_headers, "" else: - return 404, headers, "" + return 404, response_headers, "" def _acl_from_headers(self, headers): canned_acl = headers.get('x-amz-acl', '') @@ -595,10 +601,10 @@ class ResponseObject(_TemplateEnvironmentMixin): if query.get('uploadId'): upload_id = query['uploadId'][0] self.backend.cancel_multipart(bucket_name, upload_id) - return 204, headers, "" + return 204, {}, "" self.backend.delete_key(bucket_name, key_name) template = self.response_template(S3_DELETE_OBJECT_SUCCESS) - return 204, headers, template.render() + return 204, {}, template.render() def _complete_multipart_body(self, body): ps = minidom.parseString(body).getElementsByTagName('Part') @@ -620,7 +626,7 @@ class ResponseObject(_TemplateEnvironmentMixin): key_name=key_name, upload_id=multipart.id, ) - return 200, headers, response + return 200, {}, response if query.get('uploadId'): body = self._complete_multipart_body(body) @@ -640,7 +646,7 @@ class ResponseObject(_TemplateEnvironmentMixin): if key.expiry_date is not None: r = 200 key.restore(int(days)) - return r, headers, "" + return r, {}, "" else: raise NotImplementedError("Method POST had only been implemented for multipart uploads and restore operations, so far") diff --git a/moto/swf/responses.py b/moto/swf/responses.py index 47d00901c..92d4957fd 100644 --- a/moto/swf/responses.py +++ b/moto/swf/responses.py @@ -16,7 +16,7 @@ class SWFResponse(BaseResponse): # SWF parameters are passed through a JSON body, so let's ease retrieval @property def _params(self): - return json.loads(self.body.decode("utf-8")) + return json.loads(self.body) def _check_int(self, parameter): if not isinstance(parameter, int): diff --git a/tests/test_sns/test_publishing.py b/tests/test_sns/test_publishing.py index 8f8bfb0a1..dae7e2b83 100644 --- a/tests/test_sns/test_publishing.py +++ b/tests/test_sns/test_publishing.py @@ -70,7 +70,7 @@ def test_publish_to_http(): last_request = responses.calls[-1].request last_request.method.should.equal("POST") - parse_qs(last_request.body.decode('utf-8')).should.equal({ + parse_qs(last_request.body).should.equal({ "Type": ["Notification"], "MessageId": [message_id], "TopicArn": ["arn:aws:sns:{0}:123456789012:some-topic".format(conn.region.name)], diff --git a/tests/test_sns/test_publishing_boto3.py b/tests/test_sns/test_publishing_boto3.py index b37522641..e31b969f1 100644 --- a/tests/test_sns/test_publishing_boto3.py +++ b/tests/test_sns/test_publishing_boto3.py @@ -75,7 +75,7 @@ def test_publish_to_http(): last_request = responses.calls[-2].request last_request.method.should.equal("POST") - parse_qs(last_request.body.decode('utf-8')).should.equal({ + parse_qs(last_request.body).should.equal({ "Type": ["Notification"], "MessageId": [message_id], "TopicArn": ["arn:aws:sns:{0}:123456789012:some-topic".format(conn._client_config.region_name)], From d28f083a0baeb8500eb50e15a84e75e724c65071 Mon Sep 17 00:00:00 2001 From: Steve Pulec Date: Sat, 18 Feb 2017 09:19:08 -0500 Subject: [PATCH 048/213] Cleanup apigateway callback. --- moto/apigateway/models.py | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/moto/apigateway/models.py b/moto/apigateway/models.py index bab0bc1d0..6ce831186 100644 --- a/moto/apigateway/models.py +++ b/moto/apigateway/models.py @@ -316,9 +316,8 @@ class RestAPI(object): return resource # TODO deal with no matching resource - def resource_callback(self, request, full_url=None, headers=None): - if not headers: - headers = request.headers + def resource_callback(self, request): + headers = request.headers path = request.path if hasattr(request, 'path') else request.path_url path_after_stage_name = '/'.join(path.split("/")[2:]) From 480c1bba1468852d41f2f656fd27ad82e8f30047 Mon Sep 17 00:00:00 2001 From: Steve Pulec Date: Sat, 18 Feb 2017 09:24:09 -0500 Subject: [PATCH 049/213] Add rest of deprecated decorators. --- moto/__init__.py | 8 ++++---- moto/opsworks/__init__.py | 1 + 2 files changed, 5 insertions(+), 4 deletions(-) diff --git a/moto/__init__.py b/moto/__init__.py index 4accf1d0c..5a16a0a8e 100644 --- a/moto/__init__.py +++ b/moto/__init__.py @@ -5,20 +5,20 @@ import logging __title__ = 'moto' __version__ = '0.4.31' -from .apigateway import mock_apigateway # flake8: noqa +from .apigateway import mock_apigateway, mock_apigateway_deprecated # flake8: noqa from .autoscaling import mock_autoscaling, mock_autoscaling_deprecated # flake8: noqa -from .awslambda import mock_lambda # flake8: noqa +from .awslambda import mock_lambda, mock_lambda_deprecated # flake8: noqa from .cloudformation import mock_cloudformation, mock_cloudformation_deprecated # flake8: noqa from .cloudwatch import mock_cloudwatch, mock_cloudwatch_deprecated # flake8: noqa from .datapipeline import mock_datapipeline, mock_datapipeline_deprecated # flake8: noqa from .dynamodb import mock_dynamodb, mock_dynamodb_deprecated # flake8: noqa from .dynamodb2 import mock_dynamodb2, mock_dynamodb2_deprecated # flake8: noqa from .ec2 import mock_ec2, mock_ec2_deprecated # flake8: noqa -from .ecs import mock_ecs # flake8: noqa +from .ecs import mock_ecs, mock_ecs_deprecated # flake8: noqa from .elb import mock_elb, mock_elb_deprecated # flake8: noqa from .emr import mock_emr, mock_emr_deprecated # flake8: noqa from .glacier import mock_glacier, mock_glacier_deprecated # flake8: noqa -from .opsworks import mock_opsworks # flake8: noqa +from .opsworks import mock_opsworks, mock_opsworks_deprecated # flake8: noqa from .iam import mock_iam, mock_iam_deprecated # flake8: noqa from .kinesis import mock_kinesis, mock_kinesis_deprecated # flake8: noqa from .kms import mock_kms, mock_kms_deprecated # flake8: noqa diff --git a/moto/opsworks/__init__.py b/moto/opsworks/__init__.py index 75f49eba5..d2da1a6a8 100644 --- a/moto/opsworks/__init__.py +++ b/moto/opsworks/__init__.py @@ -4,3 +4,4 @@ from ..core.models import MockAWS, base_decorator, HttprettyMockAWS, deprecated_ opsworks_backend = opsworks_backends['us-east-1'] mock_opsworks = base_decorator(opsworks_backends) +mock_opsworks_deprecated = deprecated_base_decorator(opsworks_backends) From 6785d359d30aa4a9e03116232e011132fb9d8bc8 Mon Sep 17 00:00:00 2001 From: Steve Pulec Date: Sat, 18 Feb 2017 09:25:42 -0500 Subject: [PATCH 050/213] Cleanup apigateway callback. --- moto/apigateway/models.py | 7 ++----- 1 file changed, 2 insertions(+), 5 deletions(-) diff --git a/moto/apigateway/models.py b/moto/apigateway/models.py index 6ce831186..b6fa2df02 100644 --- a/moto/apigateway/models.py +++ b/moto/apigateway/models.py @@ -317,16 +317,13 @@ class RestAPI(object): # TODO deal with no matching resource def resource_callback(self, request): - headers = request.headers - - path = request.path if hasattr(request, 'path') else request.path_url - path_after_stage_name = '/'.join(path.split("/")[2:]) + path_after_stage_name = '/'.join(request.path_url.split("/")[2:]) if not path_after_stage_name: path_after_stage_name = '/' resource = self.get_resource_for_path(path_after_stage_name) status_code, response = resource.get_response(request) - return status_code, headers, response + return status_code, {}, response def update_integration_mocks(self, stage_name): stage_url = STAGE_URL.format(api_id=self.id, region_name=self.region_name, stage_name=stage_name) From d0fe1a09560114837d0a2fee76d36380f66378c1 Mon Sep 17 00:00:00 2001 From: Steve Pulec Date: Sat, 18 Feb 2017 09:31:47 -0500 Subject: [PATCH 051/213] Remove pdb. --- moto/kinesis/responses.py | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/moto/kinesis/responses.py b/moto/kinesis/responses.py index 9bc9fe94c..9aed719d5 100644 --- a/moto/kinesis/responses.py +++ b/moto/kinesis/responses.py @@ -19,10 +19,7 @@ class KinesisResponse(BaseResponse): @property def is_firehose(self): - try: - host = self.headers.get('host') or self.headers['Host'] - except KeyError: - import pdb;pdb.set_trace() + host = self.headers.get('host') or self.headers['Host'] return host.startswith('firehose') def create_stream(self): From 51df02e7cf922d17e3fcb34993ada166444f11cd Mon Sep 17 00:00:00 2001 From: Steve Pulec Date: Mon, 20 Feb 2017 14:31:19 -0500 Subject: [PATCH 052/213] Cleanup Server host parsing. --- moto/core/models.py | 23 +++++++++++++---------- moto/server.py | 12 +++++++++--- tests/test_core/test_server.py | 6 +++--- 3 files changed, 25 insertions(+), 16 deletions(-) diff --git a/moto/core/models.py b/moto/core/models.py index 9570a86d4..9675d514a 100644 --- a/moto/core/models.py +++ b/moto/core/models.py @@ -3,6 +3,7 @@ from __future__ import absolute_import import functools import inspect +import os import re from moto.packages.responses import responses @@ -48,7 +49,9 @@ class BaseMockAWS(object): if self.__class__.nested_count < 0: raise RuntimeError('Called stop() before start().') - self.disable_patching() + + if self.__class__.nested_count == 0: + self.disable_patching() def decorate_callable(self, func, reset): def wrapper(*args, **kwargs): @@ -108,9 +111,8 @@ class HttprettyMockAWS(BaseMockAWS): ) def disable_patching(self): - if self.__class__.nested_count == 0: - HTTPretty.disable() - HTTPretty.reset() + HTTPretty.disable() + HTTPretty.reset() RESPONSES_METHODS = [responses.GET, responses.DELETE, responses.HEAD, @@ -142,14 +144,15 @@ class ResponsesMockAWS(BaseMockAWS): pattern['stream'] = True def disable_patching(self): - if self.__class__.nested_count == 0: - try: - responses.stop() - except AttributeError: - pass - responses.reset() + try: + responses.stop() + except AttributeError: + pass + responses.reset() + MockAWS = ResponsesMockAWS + class Model(type): def __new__(self, clsname, bases, namespace): cls = super(Model, self).__new__(self, clsname, bases, namespace) diff --git a/moto/server.py b/moto/server.py index 1780083d8..321f5a9ea 100644 --- a/moto/server.py +++ b/moto/server.py @@ -42,8 +42,14 @@ class DomainDispatcherApplication(object): raise RuntimeError('Invalid host: "%s"' % host) - def get_application(self, host): - host = host.split(':')[0] + def get_application(self, environ): + host = environ['HTTP_HOST'].split(':')[0] + if host == "localhost": + # Fall back to parsing auth header to find service + # ['Credential=sdffdsa', '20170220', 'us-east-1', 'sns', 'aws4_request'] + _, _, region, service, _ = environ['HTTP_AUTHORIZATION'].split(",")[0].split()[1].split("/") + host = "{service}.{region}.amazonaws.com".format(service=service, region=region) + with self.lock: backend = self.get_backend_for_host(host) app = self.app_instances.get(backend, None) @@ -53,7 +59,7 @@ class DomainDispatcherApplication(object): return app def __call__(self, environ, start_response): - backend_app = self.get_application(environ['HTTP_HOST']) + backend_app = self.get_application(environ) return backend_app(environ, start_response) diff --git a/tests/test_core/test_server.py b/tests/test_core/test_server.py index 3ee08465b..a0fb328cf 100644 --- a/tests/test_core/test_server.py +++ b/tests/test_core/test_server.py @@ -32,19 +32,19 @@ def test_port_argument(run_simple): def test_domain_dispatched(): dispatcher = DomainDispatcherApplication(create_backend_app) - backend_app = dispatcher.get_application("email.us-east1.amazonaws.com") + backend_app = dispatcher.get_application({"HTTP_HOST": "email.us-east1.amazonaws.com"}) keys = list(backend_app.view_functions.keys()) keys[0].should.equal('EmailResponse.dispatch') def test_domain_without_matches(): dispatcher = DomainDispatcherApplication(create_backend_app) - dispatcher.get_application.when.called_with("not-matching-anything.com").should.throw(RuntimeError) + dispatcher.get_application.when.called_with({"HTTP_HOST": "not-matching-anything.com"}).should.throw(RuntimeError) def test_domain_dispatched_with_service(): # If we pass a particular service, always return that. dispatcher = DomainDispatcherApplication(create_backend_app, service="s3") - backend_app = dispatcher.get_application("s3.us-east1.amazonaws.com") + backend_app = dispatcher.get_application({"HTTP_HOST": "s3.us-east1.amazonaws.com"}) keys = set(backend_app.view_functions.keys()) keys.should.contain('ResponseObject.key_response') From fe46b4c5b92285aab7367a40714693281b6d3380 Mon Sep 17 00:00:00 2001 From: Steve Pulec Date: Mon, 20 Feb 2017 15:50:49 -0500 Subject: [PATCH 053/213] Remove extra line in test. --- moto/core/urls.py | 12 ++++++++++++ tests/test_sqs/test_sqs.py | 1 - 2 files changed, 12 insertions(+), 1 deletion(-) create mode 100644 moto/core/urls.py diff --git a/moto/core/urls.py b/moto/core/urls.py new file mode 100644 index 000000000..ece486058 --- /dev/null +++ b/moto/core/urls.py @@ -0,0 +1,12 @@ +from __future__ import unicode_literals +from .responses import MotoAPIResponse + +url_bases = [ + "https?://motoapi.amazonaws.com" +] + +response_instance = MotoAPIResponse() + +url_paths = { + '{0}/moto-api/reset': response_instance.reset_response, +} diff --git a/tests/test_sqs/test_sqs.py b/tests/test_sqs/test_sqs.py index b3eaaab75..fd496c214 100644 --- a/tests/test_sqs/test_sqs.py +++ b/tests/test_sqs/test_sqs.py @@ -54,7 +54,6 @@ def test_message_send(): @mock_sqs def test_set_queue_attributes(): sqs = boto3.resource('sqs', region_name='us-east-1') - conn = boto3.client('sqs', region_name='us-west-1') queue = sqs.create_queue(QueueName="blah") queue.attributes['VisibilityTimeout'].should.equal("30") From cb28eeefbbe75a289fe3ca165bfc73ca2baddd9e Mon Sep 17 00:00:00 2001 From: Steve Pulec Date: Mon, 20 Feb 2017 18:25:10 -0500 Subject: [PATCH 054/213] Add moto reset API. --- moto/backends.py | 4 ++++ moto/core/__init__.py | 2 +- moto/core/models.py | 50 ++++++++++++++++++++++++++++++++++++++++++ moto/core/responses.py | 8 +++++++ moto/server.py | 8 ++++++- moto/sns/urls.py | 2 +- 6 files changed, 71 insertions(+), 3 deletions(-) diff --git a/moto/backends.py b/moto/backends.py index 0cbcf4810..4cebe560a 100644 --- a/moto/backends.py +++ b/moto/backends.py @@ -5,10 +5,12 @@ from moto.autoscaling import autoscaling_backend from moto.awslambda import lambda_backend from moto.cloudformation import cloudformation_backend from moto.cloudwatch import cloudwatch_backend +from moto.core import moto_api_backend from moto.datapipeline import datapipeline_backend from moto.dynamodb import dynamodb_backend from moto.dynamodb2 import dynamodb_backend2 from moto.ec2 import ec2_backend +from moto.ecs import ecs_backend from moto.elb import elb_backend from moto.emr import emr_backend from moto.events import events_backend @@ -35,11 +37,13 @@ BACKENDS = { 'dynamodb': dynamodb_backend, 'dynamodb2': dynamodb_backend2, 'ec2': ec2_backend, + 'ecs': ecs_backend, 'elb': elb_backend, 'events': events_backend, 'emr': emr_backend, 'glacier': glacier_backend, 'iam': iam_backend, + 'moto_api': moto_api_backend, 'opsworks': opsworks_backend, 'kinesis': kinesis_backend, 'kms': kms_backend, diff --git a/moto/core/__init__.py b/moto/core/__init__.py index 1b909183e..664637b76 100644 --- a/moto/core/__init__.py +++ b/moto/core/__init__.py @@ -1,2 +1,2 @@ from __future__ import unicode_literals -from .models import BaseBackend # flake8: noqa +from .models import BaseBackend, moto_api_backend # flake8: noqa diff --git a/moto/core/models.py b/moto/core/models.py index 9675d514a..8fac8a990 100644 --- a/moto/core/models.py +++ b/moto/core/models.py @@ -153,6 +153,38 @@ class ResponsesMockAWS(BaseMockAWS): MockAWS = ResponsesMockAWS +class ServerModeMockAWS(BaseMockAWS): + + def reset(self): + import requests + requests.post("http://localhost:8086/moto-api/reset") + + def enable_patching(self): + if self.__class__.nested_count == 1: + # Just started + self.reset() + + from boto3 import client as real_boto3_client, resource as real_boto3_resource + import mock + + def fake_boto3_client(*args, **kwargs): + if 'endpoint_url' not in kwargs: + kwargs['endpoint_url'] = "http://localhost:8086" + return real_boto3_client(*args, **kwargs) + def fake_boto3_resource(*args, **kwargs): + if 'endpoint_url' not in kwargs: + kwargs['endpoint_url'] = "http://localhost:8086" + return real_boto3_resource(*args, **kwargs) + self._client_patcher = mock.patch('boto3.client', fake_boto3_client) + self._resource_patcher = mock.patch('boto3.resource', fake_boto3_resource) + self._client_patcher.start() + self._resource_patcher.start() + + def disable_patching(self): + if self._client_patcher: + self._client_patcher.stop() + self._resource_patcher.stop() + class Model(type): def __new__(self, clsname, bases, namespace): cls = super(Model, self).__new__(self, clsname, bases, namespace) @@ -257,6 +289,9 @@ class base_decorator(object): self.backends = backends def __call__(self, func=None): + if self.mock_backend == MockAWS and os.environ.get('TEST_SERVER_MODE', '0').lower() == 'true': + self.mock_backend = ServerModeMockAWS + if func: return self.mock_backend(self.backends)(func) else: @@ -265,3 +300,18 @@ class base_decorator(object): class deprecated_base_decorator(base_decorator): mock_backend = HttprettyMockAWS + + +class MotoAPIBackend(BaseBackend): + def __init__(self): + super(MotoAPIBackend, self).__init__() + + def reset(self): + from moto.backends import BACKENDS + for name, backend in BACKENDS.items(): + if name == "moto_api": + continue + backend.reset() + self.__init__() + +moto_api_backend = MotoAPIBackend() diff --git a/moto/core/responses.py b/moto/core/responses.py index 05c882ba1..9b22b58cf 100644 --- a/moto/core/responses.py +++ b/moto/core/responses.py @@ -366,6 +366,14 @@ def metadata_response(request, full_url, headers): return 200, headers, result +class MotoAPIResponse(BaseResponse): + + def reset_response(self, request, full_url, headers): + from .models import moto_api_backend + moto_api_backend.reset() + return 200, {}, json.dumps({"status": "ok"}) + + class _RecursiveDictRef(object): """Store a recursive reference to dict.""" def __init__(self): diff --git a/moto/server.py b/moto/server.py index 321f5a9ea..0b5ff7cae 100644 --- a/moto/server.py +++ b/moto/server.py @@ -35,6 +35,9 @@ class DomainDispatcherApplication(object): if self.service: return self.service + if host in BACKENDS: + return host + for backend_name, backend in BACKENDS.items(): for url_base in backend.url_bases: if re.match(url_base, 'http://%s' % host): @@ -43,7 +46,10 @@ class DomainDispatcherApplication(object): raise RuntimeError('Invalid host: "%s"' % host) def get_application(self, environ): - host = environ['HTTP_HOST'].split(':')[0] + if environ.get('PATH_INFO', '').startswith("/moto-api"): + host = "moto_api" + else: + host = environ['HTTP_HOST'].split(':')[0] if host == "localhost": # Fall back to parsing auth header to find service # ['Credential=sdffdsa', '20170220', 'us-east-1', 'sns', 'aws4_request'] diff --git a/moto/sns/urls.py b/moto/sns/urls.py index 769c0c89c..518531c55 100644 --- a/moto/sns/urls.py +++ b/moto/sns/urls.py @@ -2,7 +2,7 @@ from __future__ import unicode_literals from .responses import SNSResponse url_bases = [ - "https?://sns.(.+).amazonaws.com" + "https?://sns.(.+).amazonaws.com", ] url_paths = { From 81836b698107debc9dffe056530820c9694c8c77 Mon Sep 17 00:00:00 2001 From: Steve Pulec Date: Thu, 23 Feb 2017 19:43:48 -0500 Subject: [PATCH 055/213] Get standalone server mode working for all tests. --- CHANGELOG.md | 3 + Makefile | 3 + README.md | 35 +-- moto/apigateway/models.py | 4 +- moto/awslambda/models.py | 10 +- moto/awslambda/responses.py | 28 +- moto/awslambda/urls.py | 9 +- moto/backends.py | 124 ++++---- moto/cloudformation/responses.py | 7 +- moto/core/__init__.py | 2 + moto/core/exceptions.py | 6 + moto/core/models.py | 81 ++--- moto/core/responses.py | 59 +--- moto/core/utils.py | 10 +- moto/dynamodb/__init__.py | 2 + moto/dynamodb2/__init__.py | 2 + moto/ec2/responses/instances.py | 1 - moto/ec2/responses/ip_addresses.py | 1 - moto/ec2/responses/spot_instances.py | 6 +- moto/emr/exceptions.py | 7 + moto/emr/models.py | 1 + moto/emr/responses.py | 11 +- moto/events/__init__.py | 1 + moto/events/urls.py | 2 +- moto/iam/__init__.py | 2 + moto/iam/urls.py | 2 +- moto/instance_metadata/__init__.py | 4 + moto/instance_metadata/models.py | 7 + moto/instance_metadata/responses.py | 47 +++ moto/instance_metadata/urls.py | 12 + moto/kinesis/responses.py | 2 +- moto/route53/__init__.py | 2 + moto/route53/responses.py | 294 +++++++++--------- moto/route53/urls.py | 26 +- moto/s3/__init__.py | 2 + moto/s3/models.py | 10 +- moto/server.py | 15 +- moto/ses/__init__.py | 2 + moto/ses/urls.py | 3 +- moto/settings.py | 3 + moto/sts/__init__.py | 2 + moto/sts/urls.py | 2 +- other_langs/sqsSample.java | 52 ++++ other_langs/test.js | 26 ++ other_langs/test.rb | 6 + setup.cfg | 2 +- tests/test_apigateway/test_apigateway.py | 7 +- tests/test_awslambda/test_lambda.py | 23 +- .../test_cloudformation_stack_crud_boto3.py | 22 +- .../test_cloudformation_stack_integration.py | 44 +-- tests/test_core/test_instance_metadata.py | 21 +- tests/test_core/test_moto_api.py | 21 ++ tests/test_dynamodb/test_dynamodb.py | 7 - tests/test_dynamodb2/test_dynamodb.py | 8 - tests/test_ec2/test_amis.py | 22 +- tests/test_ec2/test_ec2_core.py | 10 - tests/test_ec2/test_elastic_block_store.py | 46 +-- tests/test_ec2/test_elastic_ip_addresses.py | 31 +- .../test_elastic_network_interfaces.py | 27 +- tests/test_ec2/test_instances.py | 54 ++-- tests/test_ec2/test_internet_gateways.py | 18 +- tests/test_ec2/test_key_pairs.py | 14 +- tests/test_ec2/test_security_groups.py | 38 +-- tests/test_ec2/test_spot_instances.py | 164 ++++++---- tests/test_ec2/test_tags.py | 14 +- tests/test_emr/test_emr.py | 12 +- tests/test_emr/test_emr_boto3.py | 16 +- tests/test_iam/test_iam.py | 4 +- tests/test_kinesis/test_firehose.py | 19 +- tests/test_route53/test_route53.py | 18 +- tests/test_s3/test_s3.py | 44 ++- .../test_s3bucket_path/test_s3bucket_path.py | 10 - tests/test_sns/test_publishing.py | 17 - tests/test_sns/test_publishing_boto3.py | 15 - tests/test_sqs/test_sqs.py | 5 - tests/test_sts/test_sts.py | 2 +- tests/test_swf/utils.py | 2 - tox.ini | 1 + 78 files changed, 934 insertions(+), 760 deletions(-) create mode 100644 moto/emr/exceptions.py create mode 100644 moto/instance_metadata/__init__.py create mode 100644 moto/instance_metadata/models.py create mode 100644 moto/instance_metadata/responses.py create mode 100644 moto/instance_metadata/urls.py create mode 100644 moto/settings.py create mode 100644 other_langs/sqsSample.java create mode 100644 other_langs/test.js create mode 100644 other_langs/test.rb create mode 100644 tests/test_core/test_moto_api.py diff --git a/CHANGELOG.md b/CHANGELOG.md index 790f6de95..912659875 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -8,6 +8,9 @@ Latest * The normal @mock_ decorators will no longer work with boto. It is suggested that you upgrade to boto3 or use the standalone-server mode. If you would still like to use boto, you must use the @mock__deprecated decorators which will be removed in a future release. * The @mock_s3bucket_path decorator is now deprecated. Use the @mock_s3 decorator instead. + Added + * Reset API: a reset API has been added to flush all of the current data ex: `requests.post("http://motoapi.amazonaws.com/moto-api/reset")` + 0.4.31 ------ diff --git a/Makefile b/Makefile index a7f08b146..58b74b2fb 100644 --- a/Makefile +++ b/Makefile @@ -9,5 +9,8 @@ test: rm -rf cover @nosetests -sv --with-coverage --cover-html ./tests/ +test_server: + @TEST_SERVER_MODE=true nosetests -sv --with-coverage --cover-html ./tests/ + publish: python setup.py sdist bdist_wheel upload diff --git a/README.md b/README.md index ae161dc5c..5485c63cd 100644 --- a/README.md +++ b/README.md @@ -1,17 +1,16 @@ -# Moto - Mock Boto +# Moto - Mock AWS Services [![Build Status](https://travis-ci.org/spulec/moto.png?branch=master)](https://travis-ci.org/spulec/moto) [![Coverage Status](https://coveralls.io/repos/spulec/moto/badge.png?branch=master)](https://coveralls.io/r/spulec/moto) # In a nutshell -Moto is a library that allows your python tests to easily mock out the boto library. +Moto is a library that allows your tests to easily mock out AWS Services. -Imagine you have the following code that you want to test: +Imagine you have the following python code that you want to test: ```python -import boto -from boto.s3.key import Key +import boto3 class MyModel(object): def __init__(self, name, value): @@ -19,11 +18,9 @@ class MyModel(object): self.value = value def save(self): - conn = boto.connect_s3() - bucket = conn.get_bucket('mybucket') - k = Key(bucket) - k.key = self.name - k.set_contents_from_string(self.value) + s3 = boto3.client('s3', region_name='us-east-1') + s3.put_object(Bucket='mybucket', Key=self.name, Body=self.value) + ``` Take a minute to think how you would have tested that in the past. @@ -31,25 +28,28 @@ Take a minute to think how you would have tested that in the past. Now see how you could test it with Moto: ```python -import boto +import boto3 from moto import mock_s3 from mymodule import MyModel + @mock_s3 def test_my_model_save(): - conn = boto.connect_s3() + conn = boto3.resource('s3', region_name='us-east-1') # We need to create the bucket since this is all in Moto's 'virtual' AWS account - conn.create_bucket('mybucket') + conn.create_bucket(Bucket='mybucket') model_instance = MyModel('steve', 'is awesome') model_instance.save() - assert conn.get_bucket('mybucket').get_key('steve').get_contents_as_string() == 'is awesome' + body = conn.Object('mybucket', 'steve').get()['Body'].read().decode("utf-8") + + assert body == b'is awesome' ``` With the decorator wrapping the test, all the calls to s3 are automatically mocked out. The mock keeps the state of the buckets and keys. -It gets even better! Moto isn't just S3. Here's the status of the other AWS services implemented. +It gets even better! Moto isn't just for Python code and it isn't just for S3. Look at the [standalone server mode](https://github.com/spulec/moto#stand-alone-server-mode) for more information about running Moto with other languages. Here's the status of the other AWS services implemented: ```gherkin |------------------------------------------------------------------------------| @@ -193,11 +193,6 @@ def test_my_model_save(): mock.stop() ``` -## Use with other libraries (boto3) or languages - -In general, Moto doesn't rely on anything specific to Boto. It only mocks AWS endpoints, so there should be no issue with boto3 or using other languages. Feel free to open an issue if something isn't working though. If you are using another language, you will need to either use the stand-alone server mode (more below) or monkey patch the HTTP calls yourself. - - ## Stand-alone Server Mode Moto also has a stand-alone server mode. This allows you to utilize diff --git a/moto/apigateway/models.py b/moto/apigateway/models.py index b6fa2df02..4b09f44bc 100644 --- a/moto/apigateway/models.py +++ b/moto/apigateway/models.py @@ -326,8 +326,8 @@ class RestAPI(object): return status_code, {}, response def update_integration_mocks(self, stage_name): - stage_url = STAGE_URL.format(api_id=self.id, region_name=self.region_name, stage_name=stage_name) - responses.add_callback(responses.GET, stage_url.lower(), callback=self.resource_callback) + stage_url = STAGE_URL.format(api_id=self.id.upper(), region_name=self.region_name, stage_name=stage_name) + responses.add_callback(responses.GET, stage_url, callback=self.resource_callback) def create_stage(self, name, deployment_id,variables=None,description='',cacheClusterEnabled=None,cacheClusterSize=None): if variables is None: diff --git a/moto/awslambda/models.py b/moto/awslambda/models.py index e8595cc22..1fc139eb7 100644 --- a/moto/awslambda/models.py +++ b/moto/awslambda/models.py @@ -154,15 +154,15 @@ class LambdaFunction(object): sys.stderr = sys.__stderr__ return self.convert(result) - def invoke(self, request, headers): + def invoke(self, body, request_headers, response_headers): payload = dict() # Get the invocation type: - r = self._invoke_lambda(code=self.code, event=request.body) - if request.headers.get("x-amz-invocation-type") == "RequestResponse": + r = self._invoke_lambda(code=self.code, event=body) + if request_headers.get("x-amz-invocation-type") == "RequestResponse": encoded = base64.b64encode(r.encode('utf-8')) - headers["x-amz-log-result"] = encoded.decode('utf-8') - payload['result'] = headers["x-amz-log-result"] + response_headers["x-amz-log-result"] = encoded.decode('utf-8') + payload['result'] = response_headers["x-amz-log-result"] result = r.encode('utf-8') else: result = json.dumps(payload) diff --git a/moto/awslambda/responses.py b/moto/awslambda/responses.py index 0cd7c57ea..3fc756efa 100644 --- a/moto/awslambda/responses.py +++ b/moto/awslambda/responses.py @@ -10,32 +10,32 @@ from .models import lambda_backends class LambdaResponse(BaseResponse): - @classmethod - def root(cls, request, full_url, headers): + def root(self, request, full_url, headers): + self.setup_class(request, full_url, headers) if request.method == 'GET': - return cls()._list_functions(request, full_url, headers) + return self._list_functions(request, full_url, headers) elif request.method == 'POST': - return cls()._create_function(request, full_url, headers) + return self._create_function(request, full_url, headers) else: raise ValueError("Cannot handle request") - @classmethod - def function(cls, request, full_url, headers): + def function(self, request, full_url, headers): + self.setup_class(request, full_url, headers) if request.method == 'GET': - return cls()._get_function(request, full_url, headers) + return self._get_function(request, full_url, headers) elif request.method == 'DELETE': - return cls()._delete_function(request, full_url, headers) + return self._delete_function(request, full_url, headers) else: raise ValueError("Cannot handle request") - @classmethod - def invoke(cls, request, full_url, headers): + def invoke(self, request, full_url, headers): + self.setup_class(request, full_url, headers) if request.method == 'POST': - return cls()._invoke(request, full_url, headers) + return self._invoke(request, full_url) else: raise ValueError("Cannot handle request") - def _invoke(self, request, full_url, headers): + def _invoke(self, request, full_url): response_headers = {} lambda_backend = self.get_lambda_backend(full_url) @@ -44,7 +44,7 @@ class LambdaResponse(BaseResponse): if lambda_backend.has_function(function_name): fn = lambda_backend.get_function(function_name) - payload = fn.invoke(request, response_headers) + payload = fn.invoke(self.body, self.headers, response_headers) response_headers['Content-Length'] = str(len(payload)) return 202, response_headers, payload else: @@ -59,7 +59,7 @@ class LambdaResponse(BaseResponse): def _create_function(self, request, full_url, headers): lambda_backend = self.get_lambda_backend(full_url) - spec = json.loads(request.body.decode('utf-8')) + spec = json.loads(self.body.decode('utf-8')) try: fn = lambda_backend.create_function(spec) except ValueError as e: diff --git a/moto/awslambda/urls.py b/moto/awslambda/urls.py index 79a99c9f8..c63135766 100644 --- a/moto/awslambda/urls.py +++ b/moto/awslambda/urls.py @@ -5,9 +5,10 @@ url_bases = [ "https?://lambda.(.+).amazonaws.com", ] +response = LambdaResponse() + url_paths = { - # double curly braces because the `format()` method is called on the strings - '{0}/\d{{4}}-\d{{2}}-\d{{2}}/functions/?$': LambdaResponse.root, - '{0}/\d{{4}}-\d{{2}}-\d{{2}}/functions/(?P[\w_-]+)/?$': LambdaResponse.function, - '{0}/\d{{4}}-\d{{2}}-\d{{2}}/functions/(?P[\w_-]+)/invocations?$': LambdaResponse.invoke, + '{0}/(?P[^/]+)/functions/?$': response.root, + '{0}/(?P[^/]+)/functions/(?P[\w_-]+)/?$': response.function, + '{0}/(?P[^/]+)/functions/(?P[\w_-]+)/invocations/?$': response.invoke, } diff --git a/moto/backends.py b/moto/backends.py index 4cebe560a..5b1695e3b 100644 --- a/moto/backends.py +++ b/moto/backends.py @@ -1,67 +1,71 @@ from __future__ import unicode_literals -from moto.apigateway import apigateway_backend -from moto.autoscaling import autoscaling_backend -from moto.awslambda import lambda_backend -from moto.cloudformation import cloudformation_backend -from moto.cloudwatch import cloudwatch_backend -from moto.core import moto_api_backend -from moto.datapipeline import datapipeline_backend -from moto.dynamodb import dynamodb_backend -from moto.dynamodb2 import dynamodb_backend2 -from moto.ec2 import ec2_backend -from moto.ecs import ecs_backend -from moto.elb import elb_backend -from moto.emr import emr_backend -from moto.events import events_backend -from moto.glacier import glacier_backend -from moto.iam import iam_backend -from moto.kinesis import kinesis_backend -from moto.kms import kms_backend -from moto.opsworks import opsworks_backend -from moto.rds import rds_backend -from moto.redshift import redshift_backend -from moto.route53 import route53_backend -from moto.s3 import s3_backend -from moto.ses import ses_backend -from moto.sns import sns_backend -from moto.sqs import sqs_backend -from moto.sts import sts_backend +from moto.apigateway import apigateway_backends +from moto.autoscaling import autoscaling_backends +from moto.awslambda import lambda_backends +from moto.cloudformation import cloudformation_backends +from moto.cloudwatch import cloudwatch_backends +from moto.core import moto_api_backends +from moto.datapipeline import datapipeline_backends +from moto.dynamodb import dynamodb_backends +from moto.dynamodb2 import dynamodb_backends2 +from moto.ec2 import ec2_backends +from moto.ecs import ecs_backends +from moto.elb import elb_backends +from moto.emr import emr_backends +from moto.events import events_backends +from moto.glacier import glacier_backends +from moto.iam import iam_backends +from moto.instance_metadata import instance_metadata_backends +from moto.kinesis import kinesis_backends +from moto.kms import kms_backends +from moto.opsworks import opsworks_backends +from moto.rds2 import rds2_backends +from moto.redshift import redshift_backends +from moto.route53 import route53_backends +from moto.s3 import s3_backends +from moto.ses import ses_backends +from moto.sns import sns_backends +from moto.sqs import sqs_backends +from moto.sts import sts_backends BACKENDS = { - 'apigateway': apigateway_backend, - 'autoscaling': autoscaling_backend, - 'cloudformation': cloudformation_backend, - 'cloudwatch': cloudwatch_backend, - 'datapipeline': datapipeline_backend, - 'dynamodb': dynamodb_backend, - 'dynamodb2': dynamodb_backend2, - 'ec2': ec2_backend, - 'ecs': ecs_backend, - 'elb': elb_backend, - 'events': events_backend, - 'emr': emr_backend, - 'glacier': glacier_backend, - 'iam': iam_backend, - 'moto_api': moto_api_backend, - 'opsworks': opsworks_backend, - 'kinesis': kinesis_backend, - 'kms': kms_backend, - 'redshift': redshift_backend, - 'rds': rds_backend, - 's3': s3_backend, - 's3bucket_path': s3_backend, - 'ses': ses_backend, - 'sns': sns_backend, - 'sqs': sqs_backend, - 'sts': sts_backend, - 'route53': route53_backend, - 'lambda': lambda_backend, + 'apigateway': apigateway_backends, + 'autoscaling': autoscaling_backends, + 'cloudformation': cloudformation_backends, + 'cloudwatch': cloudwatch_backends, + 'datapipeline': datapipeline_backends, + 'dynamodb': dynamodb_backends, + 'dynamodb2': dynamodb_backends2, + 'ec2': ec2_backends, + 'ecs': ecs_backends, + 'elb': elb_backends, + 'events': events_backends, + 'emr': emr_backends, + 'glacier': glacier_backends, + 'iam': iam_backends, + 'moto_api': moto_api_backends, + 'instance_metadata': instance_metadata_backends, + 'opsworks': opsworks_backends, + 'kinesis': kinesis_backends, + 'kms': kms_backends, + 'redshift': redshift_backends, + 'rds': rds2_backends, + 's3': s3_backends, + 's3bucket_path': s3_backends, + 'ses': ses_backends, + 'sns': sns_backends, + 'sqs': sqs_backends, + 'sts': sts_backends, + 'route53': route53_backends, + 'lambda': lambda_backends, } -def get_model(name): - for backend in BACKENDS.values(): - models = getattr(backend.__class__, '__models__', {}) - if name in models: - return list(getattr(backend, models[name])()) +def get_model(name, region): + for backends in BACKENDS.values(): + for region, backend in backends.items(): + if region == region: + models = getattr(backend.__class__, '__models__', {}) + if name in models: + return list(getattr(backend, models[name])()) diff --git a/moto/cloudformation/responses.py b/moto/cloudformation/responses.py index d16b3560c..3b8f53895 100644 --- a/moto/cloudformation/responses.py +++ b/moto/cloudformation/responses.py @@ -17,8 +17,11 @@ class CloudFormationResponse(BaseResponse): def _get_stack_from_s3_url(self, template_url): template_url_parts = urlparse(template_url) - bucket_name = template_url_parts.netloc.split(".")[0] - key_name = template_url_parts.path.lstrip("/") + if "localhost" in template_url: + bucket_name, key_name = template_url_parts.path.lstrip("/").split("/") + else: + bucket_name = template_url_parts.netloc.split(".")[0] + key_name = template_url_parts.path.lstrip("/") key = s3_backend.get_key(bucket_name, key_name) return key.value.decode("utf-8") diff --git a/moto/core/__init__.py b/moto/core/__init__.py index 664637b76..4f783d46c 100644 --- a/moto/core/__init__.py +++ b/moto/core/__init__.py @@ -1,2 +1,4 @@ from __future__ import unicode_literals from .models import BaseBackend, moto_api_backend # flake8: noqa + +moto_api_backends = {"global": moto_api_backend} diff --git a/moto/core/exceptions.py b/moto/core/exceptions.py index c66b8f257..d3a87e299 100644 --- a/moto/core/exceptions.py +++ b/moto/core/exceptions.py @@ -1,3 +1,5 @@ +from __future__ import unicode_literals + from werkzeug.exceptions import HTTPException from jinja2 import DictLoader, Environment from six import text_type @@ -47,6 +49,10 @@ class RESTError(HTTPException): error_type=error_type, message=message, **kwargs) +class DryRunClientError(RESTError): + code = 400 + + class JsonRESTError(RESTError): def __init__(self, error_type, message, template='error_json', **kwargs): super(JsonRESTError, self).__init__(error_type, message, template, **kwargs) diff --git a/moto/core/models.py b/moto/core/models.py index 8fac8a990..04ff709e0 100644 --- a/moto/core/models.py +++ b/moto/core/models.py @@ -6,9 +6,9 @@ import inspect import os import re +from moto import settings from moto.packages.responses import responses from moto.packages.httpretty import HTTPretty -from .responses import metadata_response from .utils import ( convert_httpretty_response, convert_regex_to_flask_path, @@ -21,6 +21,15 @@ class BaseMockAWS(object): def __init__(self, backends): self.backends = backends + self.backends_for_urls = {} + from moto.backends import BACKENDS + default_backends = { + "instance_metadata": BACKENDS['instance_metadata']['global'], + "moto_api": BACKENDS['moto_api']['global'], + } + self.backends_for_urls.update(self.backends) + self.backends_for_urls.update(default_backends) + if self.__class__.nested_count == 0: self.reset() @@ -95,20 +104,13 @@ class HttprettyMockAWS(BaseMockAWS): HTTPretty.enable() for method in HTTPretty.METHODS: - backend = list(self.backends.values())[0] - for key, value in backend.urls.items(): - HTTPretty.register_uri( - method=method, - uri=re.compile(key), - body=convert_httpretty_response(value), - ) - - # Mock out localhost instance metadata - HTTPretty.register_uri( - method=method, - uri=re.compile('http://169.254.169.254/latest/meta-data/.*'), - body=convert_httpretty_response(metadata_response), - ) + for backend in self.backends_for_urls.values(): + for key, value in backend.urls.items(): + HTTPretty.register_uri( + method=method, + uri=re.compile(key), + body=convert_httpretty_response(value), + ) def disable_patching(self): HTTPretty.disable() @@ -126,20 +128,14 @@ class ResponsesMockAWS(BaseMockAWS): def enable_patching(self): responses.start() for method in RESPONSES_METHODS: - backend = list(self.backends.values())[0] - for key, value in backend.urls.items(): - responses.add_callback( - method=method, - url=re.compile(key), - callback=convert_flask_to_responses_response(value), - ) + for backend in self.backends_for_urls.values(): + for key, value in backend.urls.items(): + responses.add_callback( + method=method, + url=re.compile(key), + callback=convert_flask_to_responses_response(value), + ) - # Mock out localhost instance metadata - responses.add_callback( - method=method, - url=re.compile('http://169.254.169.254/latest/meta-data/.*'), - callback=convert_flask_to_responses_response(metadata_response), - ) for pattern in responses.mock._urls: pattern['stream'] = True @@ -270,10 +266,15 @@ class BaseBackend(object): return paths def decorator(self, func=None): - if func: - return MockAWS({'global': self})(func) + if settings.TEST_SERVER_MODE: + mocked_backend = ServerModeMockAWS({'global': self}) else: - return MockAWS({'global': self}) + mocked_backend = MockAWS({'global': self}) + + if func: + return mocked_backend(func) + else: + return mocked_backend def deprecated_decorator(self, func=None): if func: @@ -289,13 +290,15 @@ class base_decorator(object): self.backends = backends def __call__(self, func=None): - if self.mock_backend == MockAWS and os.environ.get('TEST_SERVER_MODE', '0').lower() == 'true': - self.mock_backend = ServerModeMockAWS + if self.mock_backend != HttprettyMockAWS and settings.TEST_SERVER_MODE: + mocked_backend = ServerModeMockAWS(self.backends) + else: + mocked_backend = self.mock_backend(self.backends) if func: - return self.mock_backend(self.backends)(func) + return mocked_backend(func) else: - return self.mock_backend(self.backends) + return mocked_backend class deprecated_base_decorator(base_decorator): @@ -303,15 +306,13 @@ class deprecated_base_decorator(base_decorator): class MotoAPIBackend(BaseBackend): - def __init__(self): - super(MotoAPIBackend, self).__init__() - def reset(self): from moto.backends import BACKENDS - for name, backend in BACKENDS.items(): + for name, backends in BACKENDS.items(): if name == "moto_api": continue - backend.reset() + for region_name, backend in backends.items(): + backend.reset() self.__init__() moto_api_backend = MotoAPIBackend() diff --git a/moto/core/responses.py b/moto/core/responses.py index 9b22b58cf..e558eb1dd 100644 --- a/moto/core/responses.py +++ b/moto/core/responses.py @@ -5,7 +5,7 @@ import logging import re import pytz -from boto.exception import JSONResponseError +from moto.core.exceptions import DryRunClientError from jinja2 import Environment, DictLoader, TemplateNotFound @@ -149,17 +149,19 @@ class BaseResponse(_TemplateEnvironmentMixin): self.path = urlparse(full_url).path self.querystring = querystring self.method = request.method - self.region = self.get_region_from_url(full_url) + self.region = self.get_region_from_url(request, full_url) self.headers = request.headers if 'host' not in self.headers: self.headers['host'] = urlparse(full_url).netloc self.response_headers = {"server": "amazon.com"} - def get_region_from_url(self, full_url): + def get_region_from_url(self, request, full_url): match = re.search(self.region_regex, full_url) if match: region = match.group(1) + elif 'Authorization' in request.headers: + region = request.headers['Authorization'].split(",")[0].split("/")[2] else: region = self.default_region return region @@ -195,6 +197,7 @@ class BaseResponse(_TemplateEnvironmentMixin): if "status" in headers: headers['status'] = str(headers['status']) return status, headers, body + raise NotImplementedError("The {0} action has not been implemented".format(action)) def _get_param(self, param_name, if_none=None): @@ -323,55 +326,19 @@ class BaseResponse(_TemplateEnvironmentMixin): def is_not_dryrun(self, action): if 'true' in self.querystring.get('DryRun', ['false']): - raise JSONResponseError(400, 'DryRunOperation', body={'message': 'An error occurred (DryRunOperation) when calling the %s operation: Request would have succeeded, but DryRun flag is set' % action}) + message = 'An error occurred (DryRunOperation) when calling the %s operation: Request would have succeeded, but DryRun flag is set' % action + raise DryRunClientError(error_type="DryRunOperation", message=message) return True -def metadata_response(request, full_url, headers): - """ - Mock response for localhost metadata - - http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/AESDG-chapter-instancedata.html - """ - - parsed_url = urlparse(full_url) - tomorrow = datetime.datetime.utcnow() + datetime.timedelta(days=1) - credentials = dict( - AccessKeyId="test-key", - SecretAccessKey="test-secret-key", - Token="test-session-token", - Expiration=tomorrow.strftime("%Y-%m-%dT%H:%M:%SZ") - ) - - path = parsed_url.path - - meta_data_prefix = "/latest/meta-data/" - # Strip prefix if it is there - if path.startswith(meta_data_prefix): - path = path[len(meta_data_prefix):] - - if path == '': - result = 'iam' - elif path == 'iam': - result = json.dumps({ - 'security-credentials': { - 'default-role': credentials - } - }) - elif path == 'iam/security-credentials/': - result = 'default-role' - elif path == 'iam/security-credentials/default-role': - result = json.dumps(credentials) - else: - raise NotImplementedError("The {0} metadata path has not been implemented".format(path)) - return 200, headers, result - class MotoAPIResponse(BaseResponse): def reset_response(self, request, full_url, headers): - from .models import moto_api_backend - moto_api_backend.reset() - return 200, {}, json.dumps({"status": "ok"}) + if request.method == "POST": + from .models import moto_api_backend + moto_api_backend.reset() + return 200, {}, json.dumps({"status": "ok"}) + return 400, {}, json.dumps({"Error": "Need to POST to reset Moto"}) class _RecursiveDictRef(object): diff --git a/moto/core/utils.py b/moto/core/utils.py index 451d1a761..11aafbb89 100644 --- a/moto/core/utils.py +++ b/moto/core/utils.py @@ -118,12 +118,16 @@ class convert_flask_to_httpretty_response(object): return "{0}.{1}".format(outer, self.callback.__name__) def __call__(self, args=None, **kwargs): - from flask import request + from flask import request, Response result = self.callback(request, request.url, {}) # result is a status, headers, response tuple - status, headers, response = result - return response, status, headers + status, headers, content = result + + response = Response(response=content, status=status, headers=headers) + if request.method == "HEAD" and 'content-length' in headers: + response.headers['Content-Length'] = headers['content-length'] + return response class convert_flask_to_responses_response(object): diff --git a/moto/dynamodb/__init__.py b/moto/dynamodb/__init__.py index 008050317..4c2bc04d9 100644 --- a/moto/dynamodb/__init__.py +++ b/moto/dynamodb/__init__.py @@ -1,4 +1,6 @@ from __future__ import unicode_literals from .models import dynamodb_backend + +dynamodb_backends = {"global": dynamodb_backend} mock_dynamodb = dynamodb_backend.decorator mock_dynamodb_deprecated = dynamodb_backend.deprecated_decorator diff --git a/moto/dynamodb2/__init__.py b/moto/dynamodb2/__init__.py index f0892d13f..7a1f07352 100644 --- a/moto/dynamodb2/__init__.py +++ b/moto/dynamodb2/__init__.py @@ -1,4 +1,6 @@ from __future__ import unicode_literals from .models import dynamodb_backend2 + +dynamodb_backends2 = {"global": dynamodb_backend2} mock_dynamodb2 = dynamodb_backend2.decorator mock_dynamodb2_deprecated = dynamodb_backend2.deprecated_decorator \ No newline at end of file diff --git a/moto/ec2/responses/instances.py b/moto/ec2/responses/instances.py index 10cdcd07b..3c5a087d9 100644 --- a/moto/ec2/responses/instances.py +++ b/moto/ec2/responses/instances.py @@ -1,6 +1,5 @@ from __future__ import unicode_literals from boto.ec2.instancetype import InstanceType -from boto.exception import JSONResponseError from moto.core.responses import BaseResponse from moto.core.utils import camelcase_to_underscores from moto.ec2.utils import instance_ids_from_querystring, filters_from_querystring, \ diff --git a/moto/ec2/responses/ip_addresses.py b/moto/ec2/responses/ip_addresses.py index fd58741e2..995719202 100644 --- a/moto/ec2/responses/ip_addresses.py +++ b/moto/ec2/responses/ip_addresses.py @@ -1,6 +1,5 @@ from __future__ import unicode_literals -from boto.exception import JSONResponseError from moto.core.responses import BaseResponse diff --git a/moto/ec2/responses/spot_instances.py b/moto/ec2/responses/spot_instances.py index 321ecd99a..96e5a1ba4 100644 --- a/moto/ec2/responses/spot_instances.py +++ b/moto/ec2/responses/spot_instances.py @@ -35,8 +35,8 @@ class SpotInstances(BaseResponse): def request_spot_instances(self): price = self._get_param('SpotPrice') image_id = self._get_param('LaunchSpecification.ImageId') - count = self._get_int_param('InstanceCount') - type = self._get_param('Type') + count = self._get_int_param('InstanceCount', 1) + type = self._get_param('Type', 'one-time') valid_from = self._get_param('ValidFrom') valid_until = self._get_param('ValidUntil') launch_group = self._get_param('LaunchGroup') @@ -44,7 +44,7 @@ class SpotInstances(BaseResponse): key_name = self._get_param('LaunchSpecification.KeyName') security_groups = self._get_multi_param('LaunchSpecification.SecurityGroup') user_data = self._get_param('LaunchSpecification.UserData') - instance_type = self._get_param('LaunchSpecification.InstanceType') + instance_type = self._get_param('LaunchSpecification.InstanceType', 'm1.small') placement = self._get_param('LaunchSpecification.Placement.AvailabilityZone') kernel_id = self._get_param('LaunchSpecification.KernelId') ramdisk_id = self._get_param('LaunchSpecification.RamdiskId') diff --git a/moto/emr/exceptions.py b/moto/emr/exceptions.py new file mode 100644 index 000000000..1a3398d4f --- /dev/null +++ b/moto/emr/exceptions.py @@ -0,0 +1,7 @@ +from __future__ import unicode_literals + +from moto.core.exceptions import RESTError + + +class EmrError(RESTError): + code = 400 diff --git a/moto/emr/models.py b/moto/emr/models.py index f92428331..155e4a898 100644 --- a/moto/emr/models.py +++ b/moto/emr/models.py @@ -299,6 +299,7 @@ class ElasticMapReduceBackend(BaseBackend): created_before = dtparse(created_before) clusters = [c for c in clusters if c.creation_datetime < created_before] + # Amazon EMR can return a maximum of 512 job flow descriptions return sorted(clusters, key=lambda x: x.id)[:512] def describe_step(self, cluster_id, step_id): diff --git a/moto/emr/responses.py b/moto/emr/responses.py index 427ab48c1..3869c33ff 100644 --- a/moto/emr/responses.py +++ b/moto/emr/responses.py @@ -5,15 +5,14 @@ from datetime import datetime from functools import wraps import pytz -from botocore.exceptions import ClientError from moto.compat import urlparse from moto.core.responses import AWSServiceSpec from moto.core.responses import BaseResponse from moto.core.responses import xml_to_json_response +from .exceptions import EmrError from .models import emr_backends -from .utils import steps_from_query_string -from .utils import tags_from_query_string +from .utils import steps_from_query_string, tags_from_query_string def generate_boto3_response(operation): @@ -46,7 +45,7 @@ class ElasticMapReduceResponse(BaseResponse): aws_service_spec = AWSServiceSpec('data/emr/2009-03-31/service-2.json') - def get_region_from_url(self, full_url): + def get_region_from_url(self, request, full_url): parsed = urlparse(full_url) for regex in self.region_regex: match = regex.search(parsed.netloc) @@ -240,9 +239,7 @@ class ElasticMapReduceResponse(BaseResponse): 'Only one AMI version and release label may be specified. ' 'Provided AMI: {0}, release label: {1}.').format( ami_version, release_label) - raise ClientError( - {'Error': {'Code': 'ValidationException', - 'Message': message}}, 'RunJobFlow') + raise EmrError(error_type="ValidationException", message=message, template='single_error') else: if ami_version: kwargs['requested_ami_version'] = ami_version diff --git a/moto/events/__init__.py b/moto/events/__init__.py index 8b15e852a..5c93c59c8 100644 --- a/moto/events/__init__.py +++ b/moto/events/__init__.py @@ -2,4 +2,5 @@ from __future__ import unicode_literals from .models import events_backend +events_backends = {"global": events_backend} mock_events = events_backend.decorator diff --git a/moto/events/urls.py b/moto/events/urls.py index bff05da3f..a6e533b08 100644 --- a/moto/events/urls.py +++ b/moto/events/urls.py @@ -3,7 +3,7 @@ from __future__ import unicode_literals from .responses import EventsHandler url_bases = [ - "https://events.(.+).amazonaws.com" + "https?://events.(.+).amazonaws.com" ] url_paths = { diff --git a/moto/iam/__init__.py b/moto/iam/__init__.py index 02519cbc9..c5110b35d 100644 --- a/moto/iam/__init__.py +++ b/moto/iam/__init__.py @@ -1,4 +1,6 @@ from __future__ import unicode_literals from .models import iam_backend + +iam_backends = {"global": iam_backend} mock_iam = iam_backend.decorator mock_iam_deprecated = iam_backend.deprecated_decorator \ No newline at end of file diff --git a/moto/iam/urls.py b/moto/iam/urls.py index a591e3ebe..46db41e46 100644 --- a/moto/iam/urls.py +++ b/moto/iam/urls.py @@ -2,7 +2,7 @@ from __future__ import unicode_literals from .responses import IamResponse url_bases = [ - "https?://iam.amazonaws.com", + "https?://iam(.*).amazonaws.com", ] url_paths = { diff --git a/moto/instance_metadata/__init__.py b/moto/instance_metadata/__init__.py new file mode 100644 index 000000000..9197bcf7c --- /dev/null +++ b/moto/instance_metadata/__init__.py @@ -0,0 +1,4 @@ +from __future__ import unicode_literals +from .models import instance_metadata_backend + +instance_metadata_backends = {"global": instance_metadata_backend} \ No newline at end of file diff --git a/moto/instance_metadata/models.py b/moto/instance_metadata/models.py new file mode 100644 index 000000000..b86f86376 --- /dev/null +++ b/moto/instance_metadata/models.py @@ -0,0 +1,7 @@ +from moto.core.models import BaseBackend + + +class InstanceMetadataBackend(BaseBackend): + pass + +instance_metadata_backend = InstanceMetadataBackend() diff --git a/moto/instance_metadata/responses.py b/moto/instance_metadata/responses.py new file mode 100644 index 000000000..b2de66e7b --- /dev/null +++ b/moto/instance_metadata/responses.py @@ -0,0 +1,47 @@ +from __future__ import unicode_literals +import datetime +import json +from urlparse import urlparse + +from moto.core.responses import BaseResponse + + +class InstanceMetadataResponse(BaseResponse): + def metadata_response(self, request, full_url, headers): + """ + Mock response for localhost metadata + + http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/AESDG-chapter-instancedata.html + """ + + parsed_url = urlparse(full_url) + tomorrow = datetime.datetime.utcnow() + datetime.timedelta(days=1) + credentials = dict( + AccessKeyId="test-key", + SecretAccessKey="test-secret-key", + Token="test-session-token", + Expiration=tomorrow.strftime("%Y-%m-%dT%H:%M:%SZ") + ) + + path = parsed_url.path + + meta_data_prefix = "/latest/meta-data/" + # Strip prefix if it is there + if path.startswith(meta_data_prefix): + path = path[len(meta_data_prefix):] + + if path == '': + result = 'iam' + elif path == 'iam': + result = json.dumps({ + 'security-credentials': { + 'default-role': credentials + } + }) + elif path == 'iam/security-credentials/': + result = 'default-role' + elif path == 'iam/security-credentials/default-role': + result = json.dumps(credentials) + else: + raise NotImplementedError("The {0} metadata path has not been implemented".format(path)) + return 200, headers, result diff --git a/moto/instance_metadata/urls.py b/moto/instance_metadata/urls.py new file mode 100644 index 000000000..7776b364a --- /dev/null +++ b/moto/instance_metadata/urls.py @@ -0,0 +1,12 @@ +from __future__ import unicode_literals +from .responses import InstanceMetadataResponse + +url_bases = [ + "http://169.254.169.254" +] + +instance_metadata = InstanceMetadataResponse() + +url_paths = { + '{0}/(?P.+)': instance_metadata.metadata_response, +} diff --git a/moto/kinesis/responses.py b/moto/kinesis/responses.py index 9aed719d5..29f6c07ff 100644 --- a/moto/kinesis/responses.py +++ b/moto/kinesis/responses.py @@ -20,7 +20,7 @@ class KinesisResponse(BaseResponse): @property def is_firehose(self): host = self.headers.get('host') or self.headers['Host'] - return host.startswith('firehose') + return host.startswith('firehose') or 'firehose' in self.headers.get('Authorization', '') def create_stream(self): stream_name = self.parameters.get('StreamName') diff --git a/moto/route53/__init__.py b/moto/route53/__init__.py index df629880f..e2bbe4c1a 100644 --- a/moto/route53/__init__.py +++ b/moto/route53/__init__.py @@ -1,4 +1,6 @@ from __future__ import unicode_literals from .models import route53_backend + +route53_backends = {"global": route53_backend} mock_route53 = route53_backend.decorator mock_route53_deprecated = route53_backend.deprecated_decorator diff --git a/moto/route53/responses.py b/moto/route53/responses.py index 429317dae..d796660e1 100644 --- a/moto/route53/responses.py +++ b/moto/route53/responses.py @@ -1,174 +1,186 @@ from __future__ import unicode_literals from jinja2 import Template from six.moves.urllib.parse import parse_qs, urlparse + +from moto.core.responses import BaseResponse from .models import route53_backend import xmltodict -def list_or_create_hostzone_response(request, full_url, headers): +class Route53 (BaseResponse): + def list_or_create_hostzone_response(self, request, full_url, headers): + self.setup_class(request, full_url, headers) - if request.method == "POST": - elements = xmltodict.parse(request.body) - if "HostedZoneConfig" in elements["CreateHostedZoneRequest"]: - comment = elements["CreateHostedZoneRequest"]["HostedZoneConfig"]["Comment"] - try: - # in boto3, this field is set directly in the xml - private_zone = elements["CreateHostedZoneRequest"]["HostedZoneConfig"]["PrivateZone"] - except KeyError: - # if a VPC subsection is only included in xmls params when private_zone=True, - # see boto: boto/route53/connection.py - private_zone = 'VPC' in elements["CreateHostedZoneRequest"] - else: - comment = None - private_zone = False + if request.method == "POST": + elements = xmltodict.parse(self.body) + if "HostedZoneConfig" in elements["CreateHostedZoneRequest"]: + comment = elements["CreateHostedZoneRequest"]["HostedZoneConfig"]["Comment"] + try: + # in boto3, this field is set directly in the xml + private_zone = elements["CreateHostedZoneRequest"]["HostedZoneConfig"]["PrivateZone"] + except KeyError: + # if a VPC subsection is only included in xmls params when private_zone=True, + # see boto: boto/route53/connection.py + private_zone = 'VPC' in elements["CreateHostedZoneRequest"] + else: + comment = None + private_zone = False + + name = elements["CreateHostedZoneRequest"]["Name"] + + if name[-1] != ".": + name += "." + + new_zone = route53_backend.create_hosted_zone( + name, + comment=comment, + private_zone=private_zone, + ) + template = Template(CREATE_HOSTED_ZONE_RESPONSE) + return 201, headers, template.render(zone=new_zone) + + elif request.method == "GET": + all_zones = route53_backend.get_all_hosted_zones() + template = Template(LIST_HOSTED_ZONES_RESPONSE) + return 200, headers, template.render(zones=all_zones) - name = elements["CreateHostedZoneRequest"]["Name"] + def get_or_delete_hostzone_response(self, request, full_url, headers): + self.setup_class(request, full_url, headers) + parsed_url = urlparse(full_url) + zoneid = parsed_url.path.rstrip('/').rsplit('/', 1)[1] + the_zone = route53_backend.get_hosted_zone(zoneid) + if not the_zone: + return 404, headers, "Zone %s not Found" % zoneid - if name[-1] != ".": - name += "." + if request.method == "GET": + template = Template(GET_HOSTED_ZONE_RESPONSE) - new_zone = route53_backend.create_hosted_zone( - name, - comment=comment, - private_zone=private_zone, - ) - template = Template(CREATE_HOSTED_ZONE_RESPONSE) - return 201, headers, template.render(zone=new_zone) - - elif request.method == "GET": - all_zones = route53_backend.get_all_hosted_zones() - template = Template(LIST_HOSTED_ZONES_RESPONSE) - return 200, headers, template.render(zones=all_zones) + return 200, headers, template.render(zone=the_zone) + elif request.method == "DELETE": + route53_backend.delete_hosted_zone(zoneid) + return 200, headers, DELETE_HOSTED_ZONE_RESPONSE -def get_or_delete_hostzone_response(request, full_url, headers): - parsed_url = urlparse(full_url) - zoneid = parsed_url.path.rstrip('/').rsplit('/', 1)[1] - the_zone = route53_backend.get_hosted_zone(zoneid) - if not the_zone: - return 404, headers, "Zone %s not Found" % zoneid + def rrset_response(self, request, full_url, headers): + self.setup_class(request, full_url, headers) - if request.method == "GET": - template = Template(GET_HOSTED_ZONE_RESPONSE) + parsed_url = urlparse(full_url) + method = request.method - return 200, headers, template.render(zone=the_zone) - elif request.method == "DELETE": - route53_backend.delete_hosted_zone(zoneid) - return 200, headers, DELETE_HOSTED_ZONE_RESPONSE + zoneid = parsed_url.path.rstrip('/').rsplit('/', 2)[1] + the_zone = route53_backend.get_hosted_zone(zoneid) + if not the_zone: + return 404, headers, "Zone %s Not Found" % zoneid + + if method == "POST": + elements = xmltodict.parse(self.body) + + change_list = elements['ChangeResourceRecordSetsRequest']['ChangeBatch']['Changes']['Change'] + if not isinstance(change_list, list): + change_list = [elements['ChangeResourceRecordSetsRequest']['ChangeBatch']['Changes']['Change']] + + for value in change_list: + action = value['Action'] + record_set = value['ResourceRecordSet'] + if action in ('CREATE', 'UPSERT'): + if 'ResourceRecords' in record_set: + resource_records = list(record_set['ResourceRecords'].values())[0] + if not isinstance(resource_records, list): + # Depending on how many records there are, this may or may not be a list + resource_records = [resource_records] + record_values = [x['Value'] for x in resource_records] + elif 'AliasTarget' in record_set: + record_values = [record_set['AliasTarget']['DNSName']] + record_set['ResourceRecords'] = record_values + if action == 'CREATE': + the_zone.add_rrset(record_set) + else: + the_zone.upsert_rrset(record_set) + elif action == "DELETE": + if 'SetIdentifier' in record_set: + the_zone.delete_rrset_by_id(record_set["SetIdentifier"]) + else: + the_zone.delete_rrset_by_name(record_set["Name"]) + + return 200, headers, CHANGE_RRSET_RESPONSE + + elif method == "GET": + querystring = parse_qs(parsed_url.query) + template = Template(LIST_RRSET_REPONSE) + type_filter = querystring.get("type", [None])[0] + name_filter = querystring.get("name", [None])[0] + record_sets = the_zone.get_record_sets(type_filter, name_filter) + return 200, headers, template.render(record_sets=record_sets) -def rrset_response(request, full_url, headers): - parsed_url = urlparse(full_url) - method = request.method + def health_check_response(self, request, full_url, headers): + self.setup_class(request, full_url, headers) - zoneid = parsed_url.path.rstrip('/').rsplit('/', 2)[1] - the_zone = route53_backend.get_hosted_zone(zoneid) - if not the_zone: - return 404, headers, "Zone %s Not Found" % zoneid + parsed_url = urlparse(full_url) + method = request.method - if method == "POST": - elements = xmltodict.parse(request.body) + if method == "POST": + properties = xmltodict.parse(self.body)['CreateHealthCheckRequest']['HealthCheckConfig'] + health_check_args = { + "ip_address": properties.get('IPAddress'), + "port": properties.get('Port'), + "type": properties['Type'], + "resource_path": properties.get('ResourcePath'), + "fqdn": properties.get('FullyQualifiedDomainName'), + "search_string": properties.get('SearchString'), + "request_interval": properties.get('RequestInterval'), + "failure_threshold": properties.get('FailureThreshold'), + } + health_check = route53_backend.create_health_check(health_check_args) + template = Template(CREATE_HEALTH_CHECK_RESPONSE) + return 201, headers, template.render(health_check=health_check) + elif method == "DELETE": + health_check_id = parsed_url.path.split("/")[-1] + route53_backend.delete_health_check(health_check_id) + return 200, headers, DELETE_HEALTH_CHECK_REPONSE + elif method == "GET": + template = Template(LIST_HEALTH_CHECKS_REPONSE) + health_checks = route53_backend.get_health_checks() + return 200, headers, template.render(health_checks=health_checks) - change_list = elements['ChangeResourceRecordSetsRequest']['ChangeBatch']['Changes']['Change'] - if not isinstance(change_list, list): - change_list = [elements['ChangeResourceRecordSetsRequest']['ChangeBatch']['Changes']['Change']] + def not_implemented_response(self, request, full_url, headers): + self.setup_class(request, full_url, headers) - for value in change_list: - action = value['Action'] - record_set = value['ResourceRecordSet'] - if action in ('CREATE', 'UPSERT'): - if 'ResourceRecords' in record_set: - resource_records = list(record_set['ResourceRecords'].values())[0] - if not isinstance(resource_records, list): - # Depending on how many records there are, this may or may not be a list - resource_records = [resource_records] - record_values = [x['Value'] for x in resource_records] - elif 'AliasTarget' in record_set: - record_values = [record_set['AliasTarget']['DNSName']] - record_set['ResourceRecords'] = record_values - if action == 'CREATE': - the_zone.add_rrset(record_set) - else: - the_zone.upsert_rrset(record_set) - elif action == "DELETE": - if 'SetIdentifier' in record_set: - the_zone.delete_rrset_by_id(record_set["SetIdentifier"]) - else: - the_zone.delete_rrset_by_name(record_set["Name"]) - - return 200, headers, CHANGE_RRSET_RESPONSE - - elif method == "GET": - querystring = parse_qs(parsed_url.query) - template = Template(LIST_RRSET_REPONSE) - type_filter = querystring.get("type", [None])[0] - name_filter = querystring.get("name", [None])[0] - record_sets = the_zone.get_record_sets(type_filter, name_filter) - return 200, headers, template.render(record_sets=record_sets) + action = '' + if 'tags' in full_url: + action = 'tags' + elif 'trafficpolicyinstances' in full_url: + action = 'policies' + raise NotImplementedError("The action for {0} has not been implemented for route 53".format(action)) -def health_check_response(request, full_url, headers): - parsed_url = urlparse(full_url) - method = request.method + def list_or_change_tags_for_resource_request(self, request, full_url, headers): + self.setup_class(request, full_url, headers) - if method == "POST": - properties = xmltodict.parse(request.body)['CreateHealthCheckRequest']['HealthCheckConfig'] - health_check_args = { - "ip_address": properties.get('IPAddress'), - "port": properties.get('Port'), - "type": properties['Type'], - "resource_path": properties.get('ResourcePath'), - "fqdn": properties.get('FullyQualifiedDomainName'), - "search_string": properties.get('SearchString'), - "request_interval": properties.get('RequestInterval'), - "failure_threshold": properties.get('FailureThreshold'), - } - health_check = route53_backend.create_health_check(health_check_args) - template = Template(CREATE_HEALTH_CHECK_RESPONSE) - return 201, headers, template.render(health_check=health_check) - elif method == "DELETE": - health_check_id = parsed_url.path.split("/")[-1] - route53_backend.delete_health_check(health_check_id) - return 200, headers, DELETE_HEALTH_CHECK_REPONSE - elif method == "GET": - template = Template(LIST_HEALTH_CHECKS_REPONSE) - health_checks = route53_backend.get_health_checks() - return 200, headers, template.render(health_checks=health_checks) + parsed_url = urlparse(full_url) + id_ = parsed_url.path.split("/")[-1] + type_ = parsed_url.path.split("/")[-2] -def not_implemented_response(request, full_url, headers): - action = '' - if 'tags' in full_url: - action = 'tags' - elif 'trafficpolicyinstances' in full_url: - action = 'policies' - raise NotImplementedError("The action for {0} has not been implemented for route 53".format(action)) + if request.method == "GET": + tags = route53_backend.list_tags_for_resource(id_) + template = Template(LIST_TAGS_FOR_RESOURCE_RESPONSE) + return 200, headers, template.render( + resource_type=type_, resource_id=id_, tags=tags) + if request.method == "POST": + tags = xmltodict.parse( + self.body)['ChangeTagsForResourceRequest'] -def list_or_change_tags_for_resource_request(request, full_url, headers): - parsed_url = urlparse(full_url) - id_ = parsed_url.path.split("/")[-1] - type_ = parsed_url.path.split("/")[-2] + if 'AddTags' in tags: + tags = tags['AddTags'] + elif 'RemoveTagKeys' in tags: + tags = tags['RemoveTagKeys'] - if request.method == "GET": - tags = route53_backend.list_tags_for_resource(id_) - template = Template(LIST_TAGS_FOR_RESOURCE_RESPONSE) - return 200, headers, template.render( - resource_type=type_, resource_id=id_, tags=tags) + route53_backend.change_tags_for_resource(id_, tags) + template = Template(CHANGE_TAGS_FOR_RESOURCE_RESPONSE) - if request.method == "POST": - tags = xmltodict.parse( - request.body)['ChangeTagsForResourceRequest'] - - if 'AddTags' in tags: - tags = tags['AddTags'] - elif 'RemoveTagKeys' in tags: - tags = tags['RemoveTagKeys'] - - route53_backend.change_tags_for_resource(id_, tags) - template = Template(CHANGE_TAGS_FOR_RESOURCE_RESPONSE) - - return 200, headers, template.render() + return 200, headers, template.render() LIST_TAGS_FOR_RESOURCE_RESPONSE = """ diff --git a/moto/route53/urls.py b/moto/route53/urls.py index 361c96317..795f7d807 100644 --- a/moto/route53/urls.py +++ b/moto/route53/urls.py @@ -1,15 +1,25 @@ from __future__ import unicode_literals -from . import responses +from .responses import Route53 url_bases = [ - "https://route53.amazonaws.com/201.-..-../", + "https?://route53(.*).amazonaws.com", ] + +def tag_response1(*args, **kwargs): + return Route53().list_or_change_tags_for_resource_request(*args, **kwargs) + + +def tag_response2(*args, **kwargs): + return Route53().list_or_change_tags_for_resource_request(*args, **kwargs) + + url_paths = { - '{0}hostedzone$': responses.list_or_create_hostzone_response, - '{0}hostedzone/[^/]+$': responses.get_or_delete_hostzone_response, - '{0}hostedzone/[^/]+/rrset/?$': responses.rrset_response, - '{0}healthcheck': responses.health_check_response, - '{0}tags/(healthcheck|hostedzone)/*': responses.list_or_change_tags_for_resource_request, - '{0}trafficpolicyinstances/*': responses.not_implemented_response + '{0}/(?P[\d_-]+)/hostedzone$': Route53().list_or_create_hostzone_response, + '{0}/(?P[\d_-]+)/hostedzone/(?P[^/]+)$': Route53().get_or_delete_hostzone_response, + '{0}/(?P[\d_-]+)/hostedzone/(?P[^/]+)/rrset/?$': Route53().rrset_response, + '{0}/(?P[\d_-]+)/healthcheck': Route53().health_check_response, + '{0}/(?P[\d_-]+)/tags/healthcheck/(?P[^/]+)$': tag_response1, + '{0}/(?P[\d_-]+)/tags/hostedzone/(?P[^/]+)$': tag_response2, + '{0}/(?P[\d_-]+)/trafficpolicyinstances/*': Route53().not_implemented_response } diff --git a/moto/s3/__init__.py b/moto/s3/__init__.py index 7d0df53bd..2c54a8d5a 100644 --- a/moto/s3/__init__.py +++ b/moto/s3/__init__.py @@ -1,4 +1,6 @@ from __future__ import unicode_literals from .models import s3_backend + +s3_backends = {"global": s3_backend} mock_s3 = s3_backend.decorator mock_s3_deprecated = s3_backend.deprecated_decorator \ No newline at end of file diff --git a/moto/s3/models.py b/moto/s3/models.py index 40370b5dd..d5e156498 100644 --- a/moto/s3/models.py +++ b/moto/s3/models.py @@ -89,21 +89,21 @@ class FakeKey(object): @property def response_dict(self): - r = { + res = { 'etag': self.etag, 'last-modified': self.last_modified_RFC1123, 'content-length': str(len(self.value)), } if self._storage_class != 'STANDARD': - r['x-amz-storage-class'] = self._storage_class + res['x-amz-storage-class'] = self._storage_class if self._expiry is not None: rhdr = 'ongoing-request="false", expiry-date="{0}"' - r['x-amz-restore'] = rhdr.format(self.expiry_date) + res['x-amz-restore'] = rhdr.format(self.expiry_date) if self._is_versioned: - r['x-amz-version-id'] = str(self._version_id) + res['x-amz-version-id'] = str(self._version_id) - return r + return res @property def size(self): diff --git a/moto/server.py b/moto/server.py index 0b5ff7cae..0bb4eb779 100644 --- a/moto/server.py +++ b/moto/server.py @@ -39,21 +39,28 @@ class DomainDispatcherApplication(object): return host for backend_name, backend in BACKENDS.items(): - for url_base in backend.url_bases: + for url_base in backend.values()[0].url_bases: if re.match(url_base, 'http://%s' % host): return backend_name raise RuntimeError('Invalid host: "%s"' % host) def get_application(self, environ): - if environ.get('PATH_INFO', '').startswith("/moto-api"): + path_info = environ.get('PATH_INFO', '') + if path_info.startswith("/moto-api"): host = "moto_api" + elif path_info.startswith("/latest/meta-data/"): + host = "instance_metadata" else: host = environ['HTTP_HOST'].split(':')[0] if host == "localhost": # Fall back to parsing auth header to find service # ['Credential=sdffdsa', '20170220', 'us-east-1', 'sns', 'aws4_request'] - _, _, region, service, _ = environ['HTTP_AUTHORIZATION'].split(",")[0].split()[1].split("/") + try: + _, _, region, service, _ = environ['HTTP_AUTHORIZATION'].split(",")[0].split()[1].split("/") + except ValueError: + region = 'us-east-1' + service = 's3' host = "{service}.{region}.amazonaws.com".format(service=service, region=region) with self.lock: @@ -108,7 +115,7 @@ def create_backend_app(service): backend_app.view_functions = {} backend_app.url_map = Map() backend_app.url_map.converters['regex'] = RegexConverter - backend = BACKENDS[service] + backend = BACKENDS[service].values()[0] for url_path, handler in backend.flask_paths.items(): if handler.__name__ == 'dispatch': endpoint = '{0}.dispatch'.format(handler.__self__.__name__) diff --git a/moto/ses/__init__.py b/moto/ses/__init__.py index e1ec4b41a..e105b9929 100644 --- a/moto/ses/__init__.py +++ b/moto/ses/__init__.py @@ -1,4 +1,6 @@ from __future__ import unicode_literals from .models import ses_backend + +ses_backends = {"global": ses_backend} mock_ses = ses_backend.decorator mock_ses_deprecated = ses_backend.deprecated_decorator \ No newline at end of file diff --git a/moto/ses/urls.py b/moto/ses/urls.py index 18d5874c4..adfb4c6e4 100644 --- a/moto/ses/urls.py +++ b/moto/ses/urls.py @@ -2,7 +2,8 @@ from __future__ import unicode_literals from .responses import EmailResponse url_bases = [ - "https?://email.(.+).amazonaws.com" + "https?://email.(.+).amazonaws.com", + "https?://ses.(.+).amazonaws.com", ] url_paths = { diff --git a/moto/settings.py b/moto/settings.py new file mode 100644 index 000000000..a5240f130 --- /dev/null +++ b/moto/settings.py @@ -0,0 +1,3 @@ +import os + +TEST_SERVER_MODE = os.environ.get('TEST_SERVER_MODE', '0').lower() == 'true' diff --git a/moto/sts/__init__.py b/moto/sts/__init__.py index 57456c1b3..7b46bdfbd 100644 --- a/moto/sts/__init__.py +++ b/moto/sts/__init__.py @@ -1,4 +1,6 @@ from __future__ import unicode_literals from .models import sts_backend + +sts_backends = {"global": sts_backend} mock_sts = sts_backend.decorator mock_sts_deprecated = sts_backend.deprecated_decorator diff --git a/moto/sts/urls.py b/moto/sts/urls.py index c6e310960..2078e0b2c 100644 --- a/moto/sts/urls.py +++ b/moto/sts/urls.py @@ -2,7 +2,7 @@ from __future__ import unicode_literals from .responses import TokenResponse url_bases = [ - "https?://sts.amazonaws.com" + "https?://sts(.*).amazonaws.com" ] url_paths = { diff --git a/other_langs/sqsSample.java b/other_langs/sqsSample.java new file mode 100644 index 000000000..23368272c --- /dev/null +++ b/other_langs/sqsSample.java @@ -0,0 +1,52 @@ +/* + * Copyright 2010-2013 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ +package com.amazonaws.samples; + +import java.io.BufferedReader; +import java.io.File; +import java.io.FileOutputStream; +import java.io.IOException; +import java.io.InputStream; +import java.io.InputStreamReader; +import java.io.OutputStreamWriter; +import java.io.Writer; + +import com.amazonaws.AmazonClientException; +import com.amazonaws.AmazonServiceException; +import com.amazonaws.regions.Region; +import com.amazonaws.regions.Regions; +import com.amazonaws.services.sqs.AmazonSQS; +import com.amazonaws.services.sqs.AmazonSQSClient; + +public class S3Sample { + + public static void main(String[] args) throws IOException { + AmazonSQS sqs = new AmazonSQSClient(); + Region usWest2 = Region.getRegion(Regions.US_WEST_2); + sqs.setRegion(usWest2); + sqs.setEndpoint("http://localhost:8086"); + + String queueName = "my-first-queue"; + sqs.createQueue(queueName); + + System.out.println("Listing queues"); + for (String queue_url: sqs.listQueues().getQueueUrls()) { + System.out.println(" - " + queue_url); + } + System.out.println(); + + } + +} diff --git a/other_langs/test.js b/other_langs/test.js new file mode 100644 index 000000000..65d65ae70 --- /dev/null +++ b/other_langs/test.js @@ -0,0 +1,26 @@ +var AWS = require('aws-sdk'); + +var s3 = new AWS.S3({endpoint: "http://localhost:8086"}); +var myBucket = 'my.unique.bucket.name'; + +var myKey = 'myBucketKey'; + +s3.createBucket({Bucket: myBucket}, function(err, data) { + if (err) { + console.log(err); + } else { + params = {Bucket: myBucket, Key: myKey, Body: 'Hello!'}; + s3.putObject(params, function(err, data) { + if (err) { + console.log(err) + } else { + console.log("Successfully uploaded data to myBucket/myKey"); + } + }); + } +}); + +s3.listBuckets(function(err, data) { + if (err) console.log(err, err.stack); // an error occurred + else console.log(data); // successful response +}); diff --git a/other_langs/test.rb b/other_langs/test.rb new file mode 100644 index 000000000..dc5b7914b --- /dev/null +++ b/other_langs/test.rb @@ -0,0 +1,6 @@ +require 'aws-sdk' + +sqs = Aws::SQS::Resource.new(region: 'us-west-2', endpoint: 'http://localhost:8086') +my_queue = sqs.create_queue(queue_name: 'my-bucket') + +puts sqs.client.list_queues() diff --git a/setup.cfg b/setup.cfg index 3480374bc..3c6e79cf3 100644 --- a/setup.cfg +++ b/setup.cfg @@ -1,2 +1,2 @@ [bdist_wheel] -universal=1 \ No newline at end of file +universal=1 diff --git a/tests/test_apigateway/test_apigateway.py b/tests/test_apigateway/test_apigateway.py index 6bd6eb5e5..e52bfe0d7 100644 --- a/tests/test_apigateway/test_apigateway.py +++ b/tests/test_apigateway/test_apigateway.py @@ -10,7 +10,7 @@ import sure # noqa from botocore.exceptions import ClientError from moto.packages.responses import responses -from moto import mock_apigateway +from moto import mock_apigateway, settings @freeze_time("2015-01-01") @@ -29,11 +29,11 @@ def test_create_and_get_rest_api(): ) response.pop('ResponseMetadata') + response.pop('createdDate') response.should.equal({ 'id': api_id, 'name': 'my_api', 'description': 'this is my api', - 'createdDate': datetime(2015, 1, 1, tzinfo=tzutc()) }) @@ -930,4 +930,5 @@ def test_http_proxying_integration(): deploy_url = "https://{api_id}.execute-api.{region_name}.amazonaws.com/{stage_name}".format(api_id=api_id, region_name=region_name, stage_name=stage_name) - requests.get(deploy_url).content.should.equal(b"a fake response") + if not settings.TEST_SERVER_MODE: + requests.get(deploy_url).content.should.equal(b"a fake response") diff --git a/tests/test_awslambda/test_lambda.py b/tests/test_awslambda/test_lambda.py index ce8892dc9..74e93c373 100644 --- a/tests/test_awslambda/test_lambda.py +++ b/tests/test_awslambda/test_lambda.py @@ -10,7 +10,7 @@ import zipfile import sure # noqa from freezegun import freeze_time -from moto import mock_lambda, mock_s3, mock_ec2 +from moto import mock_lambda, mock_s3, mock_ec2, settings def _process_lamda(pfunc): @@ -36,16 +36,15 @@ def lambda_handler(event, context): volume_id = event.get('volume_id') print('get volume details for %s' % volume_id) import boto3 - ec2 = boto3.resource('ec2', region_name='us-west-2') + ec2 = boto3.resource('ec2', region_name='us-west-2', endpoint_url="http://{base_url}") vol = ec2.Volume(volume_id) print('Volume - %s state=%s, size=%s' % (volume_id, vol.state, vol.size)) return event -""" +""".format(base_url="localhost:8086" if settings.TEST_SERVER_MODE else "ec2.us-west-2.amazonaws.com") return _process_lamda(pfunc) @mock_lambda -@mock_s3 def test_list_functions(): conn = boto3.client('lambda', 'us-west-2') result = conn.list_functions() @@ -53,7 +52,6 @@ def test_list_functions(): @mock_lambda -@freeze_time('2015-01-01 00:00:00') def test_invoke_requestresponse_function(): conn = boto3.client('lambda', 'us-west-2') conn.create_function( @@ -80,7 +78,6 @@ def test_invoke_requestresponse_function(): @mock_lambda -@freeze_time('2015-01-01 00:00:00') def test_invoke_event_function(): conn = boto3.client('lambda', 'us-west-2') conn.create_function( @@ -111,7 +108,6 @@ def test_invoke_event_function(): @mock_ec2 @mock_lambda -@freeze_time('2015-01-01 00:00:00') def test_invoke_function_get_ec2_volume(): conn = boto3.resource("ec2", "us-west-2") vol = conn.create_volume(Size=99, AvailabilityZone='us-west-2') @@ -141,7 +137,6 @@ def test_invoke_function_get_ec2_volume(): @mock_lambda -@freeze_time('2015-01-01 00:00:00') def test_create_based_on_s3_with_missing_bucket(): conn = boto3.client('lambda', 'us-west-2') @@ -196,6 +191,7 @@ def test_create_function_from_aws_bucket(): ) result['ResponseMetadata'].pop('HTTPHeaders', None) # this is hard to match against, so remove it result['ResponseMetadata'].pop('RetryAttempts', None) # Botocore inserts retry attempts not seen in Python27 + result.pop('LastModified') result.should.equal({ 'FunctionName': 'testFunction', 'FunctionArn': 'arn:aws:lambda:123456789012:function:testFunction', @@ -207,7 +203,6 @@ def test_create_function_from_aws_bucket(): 'Description': 'test lambda function', 'Timeout': 3, 'MemorySize': 128, - 'LastModified': '2015-01-01 00:00:00', 'Version': '$LATEST', 'VpcConfig': { "SecurityGroupIds": ["sg-123abc"], @@ -238,6 +233,7 @@ def test_create_function_from_zipfile(): ) result['ResponseMetadata'].pop('HTTPHeaders', None) # this is hard to match against, so remove it result['ResponseMetadata'].pop('RetryAttempts', None) # Botocore inserts retry attempts not seen in Python27 + result.pop('LastModified') result.should.equal({ 'FunctionName': 'testFunction', @@ -249,7 +245,6 @@ def test_create_function_from_zipfile(): 'Description': 'test lambda function', 'Timeout': 3, 'MemorySize': 128, - 'LastModified': '2015-01-01 00:00:00', 'CodeSha256': hashlib.sha256(zip_content).hexdigest(), 'Version': '$LATEST', 'VpcConfig': { @@ -290,6 +285,7 @@ def test_get_function(): result = conn.get_function(FunctionName='testFunction') result['ResponseMetadata'].pop('HTTPHeaders', None) # this is hard to match against, so remove it result['ResponseMetadata'].pop('RetryAttempts', None) # Botocore inserts retry attempts not seen in Python27 + result['Configuration'].pop('LastModified') result.should.equal({ "Code": { @@ -303,7 +299,6 @@ def test_get_function(): "FunctionArn": "arn:aws:lambda:123456789012:function:testFunction", "FunctionName": "testFunction", "Handler": "lambda_function.handler", - "LastModified": "2015-01-01 00:00:00", "MemorySize": 128, "Role": "test-iam-role", "Runtime": "python2.7", @@ -395,7 +390,6 @@ def test_list_create_list_get_delete_list(): "FunctionArn": "arn:aws:lambda:123456789012:function:testFunction", "FunctionName": "testFunction", "Handler": "lambda_function.handler", - "LastModified": "2015-01-01 00:00:00", "MemorySize": 128, "Role": "test-iam-role", "Runtime": "python2.7", @@ -408,11 +402,14 @@ def test_list_create_list_get_delete_list(): }, 'ResponseMetadata': {'HTTPStatusCode': 200}, } - conn.list_functions()['Functions'].should.equal([expected_function_result['Configuration']]) + func = conn.list_functions()['Functions'][0] + func.pop('LastModified') + func.should.equal(expected_function_result['Configuration']) func = conn.get_function(FunctionName='testFunction') func['ResponseMetadata'].pop('HTTPHeaders', None) # this is hard to match against, so remove it func['ResponseMetadata'].pop('RetryAttempts', None) # Botocore inserts retry attempts not seen in Python27 + func['Configuration'].pop('LastModified') func.should.equal(expected_function_result) conn.delete_function(FunctionName='testFunction') diff --git a/tests/test_cloudformation/test_cloudformation_stack_crud_boto3.py b/tests/test_cloudformation/test_cloudformation_stack_crud_boto3.py index 95ac6ede4..2ee74f886 100644 --- a/tests/test_cloudformation/test_cloudformation_stack_crud_boto3.py +++ b/tests/test_cloudformation/test_cloudformation_stack_crud_boto3.py @@ -5,7 +5,7 @@ import boto import boto.s3 import boto.s3.key from botocore.exceptions import ClientError -from moto import mock_cloudformation, mock_s3_deprecated +from moto import mock_cloudformation, mock_s3 import json import sure # noqa @@ -118,14 +118,20 @@ def test_create_stack_with_role_arn(): @mock_cloudformation -@mock_s3_deprecated +@mock_s3 def test_create_stack_from_s3_url(): - s3_conn = boto.s3.connect_to_region('us-west-1') - bucket = s3_conn.create_bucket("foobar") - key = boto.s3.key.Key(bucket) - key.key = "template-key" - key.set_contents_from_string(dummy_template_json) - key_url = key.generate_url(expires_in=0, query_auth=False) + s3 = boto3.client('s3') + s3_conn = boto3.resource('s3') + bucket = s3_conn.create_bucket(Bucket="foobar") + + key = s3_conn.Object('foobar', 'template-key').put(Body=dummy_template_json) + key_url = s3.generate_presigned_url( + ClientMethod='get_object', + Params={ + 'Bucket': 'foobar', + 'Key': 'template-key' + } + ) cf_conn = boto3.client('cloudformation', region_name='us-west-1') cf_conn.create_stack( diff --git a/tests/test_cloudformation/test_cloudformation_stack_integration.py b/tests/test_cloudformation/test_cloudformation_stack_integration.py index 1b9330a9f..609a0b46d 100644 --- a/tests/test_cloudformation/test_cloudformation_stack_integration.py +++ b/tests/test_cloudformation/test_cloudformation_stack_integration.py @@ -701,27 +701,29 @@ def test_vpc_single_instance_in_subnet(): eip_resource = [resource for resource in resources if resource.resource_type == 'AWS::EC2::EIP'][0] eip_resource.physical_resource_id.should.equal(eip.allocation_id) -@mock_cloudformation_deprecated() -@mock_ec2_deprecated() +@mock_cloudformation() +@mock_ec2() @mock_rds2() def test_rds_db_parameter_groups(): - ec2_conn = boto.ec2.connect_to_region("us-west-1") - ec2_conn.create_security_group('application', 'Our Application Group') + ec2_conn = boto3.client("ec2", region_name="us-west-1") + ec2_conn.create_security_group(GroupName='application', Description='Our Application Group') template_json = json.dumps(rds_mysql_with_db_parameter_group.template) - conn = boto.cloudformation.connect_to_region("us-west-1") - conn.create_stack( - "test_stack", - template_body=template_json, - parameters=[ - ("DBInstanceIdentifier", "master_db"), - ("DBName", "my_db"), - ("DBUser", "my_user"), - ("DBPassword", "my_password"), - ("DBAllocatedStorage", "20"), - ("DBInstanceClass", "db.m1.medium"), - ("EC2SecurityGroup", "application"), - ("MultiAZ", "true"), + cf_conn = boto3.client('cloudformation', 'us-west-1') + cf_conn.create_stack( + StackName="test_stack", + TemplateBody=template_json, + Parameters=[{'ParameterKey': key, 'ParameterValue': value} for + key, value in [ + ("DBInstanceIdentifier", "master_db"), + ("DBName", "my_db"), + ("DBUser", "my_user"), + ("DBPassword", "my_password"), + ("DBAllocatedStorage", "20"), + ("DBInstanceClass", "db.m1.medium"), + ("EC2SecurityGroup", "application"), + ("MultiAZ", "true"), + ] ], ) @@ -1802,7 +1804,7 @@ def lambda_handler(event, context): return _process_lamda(pfunc) -@mock_cloudformation_deprecated +@mock_cloudformation @mock_lambda def test_lambda_function(): # switch this to python as backend lambda only supports python execution. @@ -1826,10 +1828,10 @@ def test_lambda_function(): } template_json = json.dumps(template) - cf_conn = boto.cloudformation.connect_to_region("us-east-1") + cf_conn = boto3.client('cloudformation', 'us-east-1') cf_conn.create_stack( - "test_stack", - template_body=template_json, + StackName="test_stack", + TemplateBody=template_json, ) conn = boto3.client('lambda', 'us-east-1') diff --git a/tests/test_core/test_instance_metadata.py b/tests/test_core/test_instance_metadata.py index aa86b41b3..80dd501e7 100644 --- a/tests/test_core/test_instance_metadata.py +++ b/tests/test_core/test_instance_metadata.py @@ -3,18 +3,23 @@ import sure # noqa from nose.tools import assert_raises import requests -from moto import mock_ec2 +from moto import mock_ec2, settings + +if settings.TEST_SERVER_MODE: + BASE_URL = 'http://localhost:8086' +else: + BASE_URL = 'http://169.254.169.254' @mock_ec2 def test_latest_meta_data(): - res = requests.get("http://169.254.169.254/latest/meta-data/") + res = requests.get("{0}/latest/meta-data/".format(BASE_URL)) res.content.should.equal(b"iam") @mock_ec2 def test_meta_data_iam(): - res = requests.get("http://169.254.169.254/latest/meta-data/iam") + res = requests.get("{0}/latest/meta-data/iam".format(BASE_URL)) json_response = res.json() default_role = json_response['security-credentials']['default-role'] default_role.should.contain('AccessKeyId') @@ -25,21 +30,15 @@ def test_meta_data_iam(): @mock_ec2 def test_meta_data_security_credentials(): - res = requests.get("http://169.254.169.254/latest/meta-data/iam/security-credentials/") + res = requests.get("{0}/latest/meta-data/iam/security-credentials/".format(BASE_URL)) res.content.should.equal(b"default-role") @mock_ec2 def test_meta_data_default_role(): - res = requests.get("http://169.254.169.254/latest/meta-data/iam/security-credentials/default-role") + res = requests.get("{0}/latest/meta-data/iam/security-credentials/default-role".format(BASE_URL)) json_response = res.json() json_response.should.contain('AccessKeyId') json_response.should.contain('SecretAccessKey') json_response.should.contain('Token') json_response.should.contain('Expiration') - - -@mock_ec2 -def test_meta_data_unknown_path(): - with assert_raises(NotImplementedError): - requests.get("http://169.254.169.254/latest/meta-data/badpath") diff --git a/tests/test_core/test_moto_api.py b/tests/test_core/test_moto_api.py new file mode 100644 index 000000000..3b441a3f1 --- /dev/null +++ b/tests/test_core/test_moto_api.py @@ -0,0 +1,21 @@ +from __future__ import unicode_literals +import sure # noqa +from nose.tools import assert_raises +import requests + +import boto3 +from moto import mock_sqs, settings + +base_url = "http://localhost:8086" if settings.TEST_SERVER_MODE else "http://motoapi.amazonaws.com" + + +@mock_sqs +def test_reset_api(): + conn = boto3.client("sqs", region_name='us-west-1') + conn.create_queue(QueueName="queue1") + conn.list_queues()['QueueUrls'].should.have.length_of(1) + + res = requests.post("{base_url}/moto-api/reset".format(base_url=base_url)) + res.content.should.equal(b'{"status": "ok"}') + + conn.list_queues().shouldnt.contain('QueueUrls') # No more queues diff --git a/tests/test_dynamodb/test_dynamodb.py b/tests/test_dynamodb/test_dynamodb.py index 7ea56faa9..f2df39a22 100644 --- a/tests/test_dynamodb/test_dynamodb.py +++ b/tests/test_dynamodb/test_dynamodb.py @@ -42,13 +42,6 @@ def test_describe_missing_table(): conn.describe_table('messages') -@mock_dynamodb -def test_sts_handler(): - res = requests.post("https://sts.amazonaws.com/", data={"GetSessionToken": ""}) - res.ok.should.be.ok - res.text.should.contain("SecretAccessKey") - - @mock_dynamodb_deprecated def test_dynamodb_with_connect_to_region(): # this will work if connected with boto.connect_dynamodb() diff --git a/tests/test_dynamodb2/test_dynamodb.py b/tests/test_dynamodb2/test_dynamodb.py index d66d36d9f..9e92e7985 100644 --- a/tests/test_dynamodb2/test_dynamodb.py +++ b/tests/test_dynamodb2/test_dynamodb.py @@ -63,11 +63,3 @@ def test_describe_missing_table(): aws_secret_access_key="sk") with assert_raises(JSONResponseError): conn.describe_table('messages') - - -@requires_boto_gte("2.9") -@mock_dynamodb2 -def test_sts_handler(): - res = requests.post("https://sts.amazonaws.com/", data={"GetSessionToken": ""}) - res.ok.should.be.ok - res.text.should.contain("SecretAccessKey") diff --git a/tests/test_ec2/test_amis.py b/tests/test_ec2/test_amis.py index 9c3fbd40d..4c154ae84 100755 --- a/tests/test_ec2/test_amis.py +++ b/tests/test_ec2/test_amis.py @@ -5,7 +5,7 @@ from nose.tools import assert_raises import boto import boto.ec2 -from boto.exception import EC2ResponseError, JSONResponseError +from boto.exception import EC2ResponseError, EC2ResponseError import sure # noqa @@ -19,9 +19,9 @@ def test_ami_create_and_delete(): reservation = conn.run_instances('ami-1234abcd') instance = reservation.instances[0] - with assert_raises(JSONResponseError) as ex: + with assert_raises(EC2ResponseError) as ex: image_id = conn.create_image(instance.id, "test-ami", "this is a test ami", dry_run=True) - ex.exception.reason.should.equal('DryRunOperation') + ex.exception.error_code.should.equal('DryRunOperation') ex.exception.status.should.equal(400) ex.exception.message.should.equal('An error occurred (DryRunOperation) when calling the CreateImage operation: Request would have succeeded, but DryRun flag is set') @@ -52,9 +52,9 @@ def test_ami_create_and_delete(): snapshot.volume_id.should.equal(volume.id) # Deregister - with assert_raises(JSONResponseError) as ex: + with assert_raises(EC2ResponseError) as ex: success = conn.deregister_image(image_id, dry_run=True) - ex.exception.reason.should.equal('DryRunOperation') + ex.exception.error_code.should.equal('DryRunOperation') ex.exception.status.should.equal(400) ex.exception.message.should.equal('An error occurred (DryRunOperation) when calling the DeregisterImage operation: Request would have succeeded, but DryRun flag is set') @@ -80,9 +80,9 @@ def test_ami_copy(): source_image = conn.get_all_images(image_ids=[source_image_id])[0] # Boto returns a 'CopyImage' object with an image_id attribute here. Use the image_id to fetch the full info. - with assert_raises(JSONResponseError) as ex: + with assert_raises(EC2ResponseError) as ex: copy_image_ref = conn.copy_image(source_image.region.name, source_image.id, "test-copy-ami", "this is a test copy ami", dry_run=True) - ex.exception.reason.should.equal('DryRunOperation') + ex.exception.error_code.should.equal('DryRunOperation') ex.exception.status.should.equal(400) ex.exception.message.should.equal('An error occurred (DryRunOperation) when calling the CopyImage operation: Request would have succeeded, but DryRun flag is set') @@ -127,9 +127,9 @@ def test_ami_tagging(): conn.create_image(instance.id, "test-ami", "this is a test ami") image = conn.get_all_images()[0] - with assert_raises(JSONResponseError) as ex: + with assert_raises(EC2ResponseError) as ex: image.add_tag("a key", "some value", dry_run=True) - ex.exception.reason.should.equal('DryRunOperation') + ex.exception.error_code.should.equal('DryRunOperation') ex.exception.status.should.equal(400) ex.exception.message.should.equal('An error occurred (DryRunOperation) when calling the CreateTags operation: Request would have succeeded, but DryRun flag is set') @@ -289,9 +289,9 @@ def test_ami_attribute_group_permissions(): 'groups': 'all'} # Add 'all' group and confirm - with assert_raises(JSONResponseError) as ex: + with assert_raises(EC2ResponseError) as ex: conn.modify_image_attribute(**dict(ADD_GROUP_ARGS, **{'dry_run': True})) - ex.exception.reason.should.equal('DryRunOperation') + ex.exception.error_code.should.equal('DryRunOperation') ex.exception.status.should.equal(400) ex.exception.message.should.equal('An error occurred (DryRunOperation) when calling the ModifyImageAttribute operation: Request would have succeeded, but DryRun flag is set') diff --git a/tests/test_ec2/test_ec2_core.py b/tests/test_ec2/test_ec2_core.py index 53c7d6480..baffc4882 100644 --- a/tests/test_ec2/test_ec2_core.py +++ b/tests/test_ec2/test_ec2_core.py @@ -1,11 +1 @@ from __future__ import unicode_literals -import requests -from moto import mock_ec2 - - -@mock_ec2 -def test_not_implemented_method(): - requests.post.when.called_with( - "https://ec2.us-east-1.amazonaws.com/", - data={'Action': ['foobar']} - ).should.throw(NotImplementedError) diff --git a/tests/test_ec2/test_elastic_block_store.py b/tests/test_ec2/test_elastic_block_store.py index c4794b1c8..6491412e3 100644 --- a/tests/test_ec2/test_elastic_block_store.py +++ b/tests/test_ec2/test_elastic_block_store.py @@ -5,7 +5,7 @@ from nose.tools import assert_raises from moto.ec2 import ec2_backends import boto -from boto.exception import EC2ResponseError, JSONResponseError +from boto.exception import EC2ResponseError import sure # noqa from moto import mock_ec2_deprecated @@ -24,9 +24,9 @@ def test_create_and_delete_volume(): volume = all_volumes[0] - with assert_raises(JSONResponseError) as ex: + with assert_raises(EC2ResponseError) as ex: volume.delete(dry_run=True) - ex.exception.reason.should.equal('DryRunOperation') + ex.exception.error_code.should.equal('DryRunOperation') ex.exception.status.should.equal(400) ex.exception.message.should.equal('An error occurred (DryRunOperation) when calling the DeleteVolume operation: Request would have succeeded, but DryRun flag is set') @@ -46,9 +46,9 @@ def test_create_and_delete_volume(): @mock_ec2_deprecated def test_create_encrypted_volume_dryrun(): conn = boto.connect_ec2('the_key', 'the_secret') - with assert_raises(JSONResponseError) as ex: + with assert_raises(EC2ResponseError) as ex: conn.create_volume(80, "us-east-1a", encrypted=True, dry_run=True) - ex.exception.reason.should.equal('DryRunOperation') + ex.exception.error_code.should.equal('DryRunOperation') ex.exception.status.should.equal(400) ex.exception.message.should.equal('An error occurred (DryRunOperation) when calling the CreateVolume operation: Request would have succeeded, but DryRun flag is set') @@ -58,9 +58,9 @@ def test_create_encrypted_volume(): conn = boto.connect_ec2('the_key', 'the_secret') conn.create_volume(80, "us-east-1a", encrypted=True) - with assert_raises(JSONResponseError) as ex: + with assert_raises(EC2ResponseError) as ex: conn.create_volume(80, "us-east-1a", encrypted=True, dry_run=True) - ex.exception.reason.should.equal('DryRunOperation') + ex.exception.error_code.should.equal('DryRunOperation') ex.exception.status.should.equal(400) ex.exception.message.should.equal('An error occurred (DryRunOperation) when calling the CreateVolume operation: Request would have succeeded, but DryRun flag is set') @@ -165,9 +165,9 @@ def test_volume_attach_and_detach(): volume.update() volume.volume_state().should.equal('available') - with assert_raises(JSONResponseError) as ex: + with assert_raises(EC2ResponseError) as ex: volume.attach(instance.id, "/dev/sdh", dry_run=True) - ex.exception.reason.should.equal('DryRunOperation') + ex.exception.error_code.should.equal('DryRunOperation') ex.exception.status.should.equal(400) ex.exception.message.should.equal('An error occurred (DryRunOperation) when calling the AttachVolume operation: Request would have succeeded, but DryRun flag is set') @@ -179,9 +179,9 @@ def test_volume_attach_and_detach(): volume.attach_data.instance_id.should.equal(instance.id) - with assert_raises(JSONResponseError) as ex: + with assert_raises(EC2ResponseError) as ex: volume.detach(dry_run=True) - ex.exception.reason.should.equal('DryRunOperation') + ex.exception.error_code.should.equal('DryRunOperation') ex.exception.status.should.equal(400) ex.exception.message.should.equal('An error occurred (DryRunOperation) when calling the DetachVolume operation: Request would have succeeded, but DryRun flag is set') @@ -214,9 +214,9 @@ def test_create_snapshot(): conn = boto.connect_ec2('the_key', 'the_secret') volume = conn.create_volume(80, "us-east-1a") - with assert_raises(JSONResponseError) as ex: + with assert_raises(EC2ResponseError) as ex: snapshot = volume.create_snapshot('a dryrun snapshot', dry_run=True) - ex.exception.reason.should.equal('DryRunOperation') + ex.exception.error_code.should.equal('DryRunOperation') ex.exception.status.should.equal(400) ex.exception.message.should.equal('An error occurred (DryRunOperation) when calling the CreateSnapshot operation: Request would have succeeded, but DryRun flag is set') @@ -347,9 +347,9 @@ def test_snapshot_attribute(): # Add 'all' group and confirm - with assert_raises(JSONResponseError) as ex: + with assert_raises(EC2ResponseError) as ex: conn.modify_snapshot_attribute(**dict(ADD_GROUP_ARGS, **{'dry_run': True})) - ex.exception.reason.should.equal('DryRunOperation') + ex.exception.error_code.should.equal('DryRunOperation') ex.exception.status.should.equal(400) ex.exception.message.should.equal('An error occurred (DryRunOperation) when calling the ModifySnapshotAttribute operation: Request would have succeeded, but DryRun flag is set') @@ -363,9 +363,9 @@ def test_snapshot_attribute(): conn.modify_snapshot_attribute.when.called_with(**ADD_GROUP_ARGS).should_not.throw(EC2ResponseError) # Remove 'all' group and confirm - with assert_raises(JSONResponseError) as ex: + with assert_raises(EC2ResponseError) as ex: conn.modify_snapshot_attribute(**dict(REMOVE_GROUP_ARGS, **{'dry_run': True})) - ex.exception.reason.should.equal('DryRunOperation') + ex.exception.error_code.should.equal('DryRunOperation') ex.exception.status.should.equal(400) ex.exception.message.should.equal('An error occurred (DryRunOperation) when calling the ModifySnapshotAttribute operation: Request would have succeeded, but DryRun flag is set') @@ -424,9 +424,9 @@ def test_create_volume_from_snapshot(): volume = conn.create_volume(80, "us-east-1a") snapshot = volume.create_snapshot('a test snapshot') - with assert_raises(JSONResponseError) as ex: + with assert_raises(EC2ResponseError) as ex: snapshot = volume.create_snapshot('a test snapshot', dry_run=True) - ex.exception.reason.should.equal('DryRunOperation') + ex.exception.error_code.should.equal('DryRunOperation') ex.exception.status.should.equal(400) ex.exception.message.should.equal('An error occurred (DryRunOperation) when calling the CreateSnapshot operation: Request would have succeeded, but DryRun flag is set') @@ -468,9 +468,9 @@ def test_modify_attribute_blockDeviceMapping(): instance = reservation.instances[0] - with assert_raises(JSONResponseError) as ex: + with assert_raises(EC2ResponseError) as ex: instance.modify_attribute('blockDeviceMapping', {'/dev/sda1': True}, dry_run=True) - ex.exception.reason.should.equal('DryRunOperation') + ex.exception.error_code.should.equal('DryRunOperation') ex.exception.status.should.equal(400) ex.exception.message.should.equal('An error occurred (DryRunOperation) when calling the ModifyInstanceAttribute operation: Request would have succeeded, but DryRun flag is set') @@ -487,9 +487,9 @@ def test_volume_tag_escaping(): vol = conn.create_volume(10, 'us-east-1a') snapshot = conn.create_snapshot(vol.id, 'Desc') - with assert_raises(JSONResponseError) as ex: + with assert_raises(EC2ResponseError) as ex: snapshot.add_tags({'key': ''}, dry_run=True) - ex.exception.reason.should.equal('DryRunOperation') + ex.exception.error_code.should.equal('DryRunOperation') ex.exception.status.should.equal(400) ex.exception.message.should.equal('An error occurred (DryRunOperation) when calling the CreateTags operation: Request would have succeeded, but DryRun flag is set') dict(conn.get_all_snapshots()[0].tags).should_not.be.equal({'key': ''}) diff --git a/tests/test_ec2/test_elastic_ip_addresses.py b/tests/test_ec2/test_elastic_ip_addresses.py index dc7910379..f92c4df8b 100644 --- a/tests/test_ec2/test_elastic_ip_addresses.py +++ b/tests/test_ec2/test_elastic_ip_addresses.py @@ -5,7 +5,7 @@ from nose.tools import assert_raises import boto import boto3 -from boto.exception import EC2ResponseError, JSONResponseError +from boto.exception import EC2ResponseError import six import sure # noqa @@ -20,9 +20,9 @@ def test_eip_allocate_classic(): """Allocate/release Classic EIP""" conn = boto.connect_ec2('the_key', 'the_secret') - with assert_raises(JSONResponseError) as ex: + with assert_raises(EC2ResponseError) as ex: standard = conn.allocate_address(dry_run=True) - ex.exception.reason.should.equal('DryRunOperation') + ex.exception.error_code.should.equal('DryRunOperation') ex.exception.status.should.equal(400) ex.exception.message.should.equal('An error occurred (DryRunOperation) when calling the AllocateAddress operation: Request would have succeeded, but DryRun flag is set') @@ -32,9 +32,9 @@ def test_eip_allocate_classic(): standard.instance_id.should.be.none standard.domain.should.be.equal("standard") - with assert_raises(JSONResponseError) as ex: + with assert_raises(EC2ResponseError) as ex: standard.release(dry_run=True) - ex.exception.reason.should.equal('DryRunOperation') + ex.exception.error_code.should.equal('DryRunOperation') ex.exception.status.should.equal(400) ex.exception.message.should.equal('An error occurred (DryRunOperation) when calling the ReleaseAddress operation: Request would have succeeded, but DryRun flag is set') @@ -47,9 +47,9 @@ def test_eip_allocate_vpc(): """Allocate/release VPC EIP""" conn = boto.connect_ec2('the_key', 'the_secret') - with assert_raises(JSONResponseError) as ex: + with assert_raises(EC2ResponseError) as ex: vpc = conn.allocate_address(domain="vpc", dry_run=True) - ex.exception.reason.should.equal('DryRunOperation') + ex.exception.error_code.should.equal('DryRunOperation') ex.exception.status.should.equal(400) ex.exception.message.should.equal('An error occurred (DryRunOperation) when calling the AllocateAddress operation: Request would have succeeded, but DryRun flag is set') @@ -89,9 +89,9 @@ def test_eip_associate_classic(): cm.exception.status.should.equal(400) cm.exception.request_id.should_not.be.none - with assert_raises(JSONResponseError) as ex: + with assert_raises(EC2ResponseError) as ex: conn.associate_address(instance_id=instance.id, public_ip=eip.public_ip, dry_run=True) - ex.exception.reason.should.equal('DryRunOperation') + ex.exception.error_code.should.equal('DryRunOperation') ex.exception.status.should.equal(400) ex.exception.message.should.equal('An error occurred (DryRunOperation) when calling the AssociateAddress operation: Request would have succeeded, but DryRun flag is set') @@ -99,9 +99,9 @@ def test_eip_associate_classic(): eip = conn.get_all_addresses(addresses=[eip.public_ip])[0] # no .update() on address ): eip.instance_id.should.be.equal(instance.id) - with assert_raises(JSONResponseError) as ex: + with assert_raises(EC2ResponseError) as ex: conn.disassociate_address(public_ip=eip.public_ip, dry_run=True) - ex.exception.reason.should.equal('DryRunOperation') + ex.exception.error_code.should.equal('DryRunOperation') ex.exception.status.should.equal(400) ex.exception.message.should.equal('An error occurred (DryRunOperation) when calling the DisAssociateAddress operation: Request would have succeeded, but DryRun flag is set') @@ -139,9 +139,9 @@ def test_eip_associate_vpc(): eip.instance_id.should.be.equal(u'') eip.association_id.should.be.none - with assert_raises(JSONResponseError) as ex: + with assert_raises(EC2ResponseError) as ex: eip.release(dry_run=True) - ex.exception.reason.should.equal('DryRunOperation') + ex.exception.error_code.should.equal('DryRunOperation') ex.exception.status.should.equal(400) ex.exception.message.should.equal('An error occurred (DryRunOperation) when calling the ReleaseAddress operation: Request would have succeeded, but DryRun flag is set') @@ -153,9 +153,8 @@ def test_eip_associate_vpc(): @mock_ec2 def test_eip_boto3_vpc_association(): """Associate EIP to VPC instance in a new subnet with boto3""" - session = boto3.session.Session(region_name='us-west-1') - service = session.resource('ec2') - client = session.client('ec2') + service = boto3.resource('ec2', region_name='us-west-1') + client = boto3.client('ec2', region_name='us-west-1') vpc_res = client.create_vpc(CidrBlock='10.0.0.0/24') subnet_res = client.create_subnet( VpcId=vpc_res['Vpc']['VpcId'], CidrBlock='10.0.0.0/24') diff --git a/tests/test_ec2/test_elastic_network_interfaces.py b/tests/test_ec2/test_elastic_network_interfaces.py index 6f60c85a8..9027e0448 100644 --- a/tests/test_ec2/test_elastic_network_interfaces.py +++ b/tests/test_ec2/test_elastic_network_interfaces.py @@ -4,10 +4,11 @@ import tests.backport_assert_raises from nose.tools import assert_raises import boto3 +from botocore.exceptions import ClientError import boto import boto.cloudformation import boto.ec2 -from boto.exception import EC2ResponseError, JSONResponseError +from boto.exception import EC2ResponseError import sure # noqa from moto import mock_ec2, mock_cloudformation_deprecated, mock_ec2_deprecated @@ -22,9 +23,9 @@ def test_elastic_network_interfaces(): vpc = conn.create_vpc("10.0.0.0/16") subnet = conn.create_subnet(vpc.id, "10.0.0.0/18") - with assert_raises(JSONResponseError) as ex: + with assert_raises(EC2ResponseError) as ex: eni = conn.create_network_interface(subnet.id, dry_run=True) - ex.exception.reason.should.equal('DryRunOperation') + ex.exception.error_code.should.equal('DryRunOperation') ex.exception.status.should.equal(400) ex.exception.message.should.equal('An error occurred (DryRunOperation) when calling the CreateNetworkInterface operation: Request would have succeeded, but DryRun flag is set') @@ -36,9 +37,9 @@ def test_elastic_network_interfaces(): eni.groups.should.have.length_of(0) eni.private_ip_addresses.should.have.length_of(0) - with assert_raises(JSONResponseError) as ex: + with assert_raises(EC2ResponseError) as ex: conn.delete_network_interface(eni.id, dry_run=True) - ex.exception.reason.should.equal('DryRunOperation') + ex.exception.error_code.should.equal('DryRunOperation') ex.exception.status.should.equal(400) ex.exception.message.should.equal('An error occurred (DryRunOperation) when calling the DeleteNetworkInterface operation: Request would have succeeded, but DryRun flag is set') @@ -49,7 +50,7 @@ def test_elastic_network_interfaces(): with assert_raises(EC2ResponseError) as cm: conn.delete_network_interface(eni.id) - cm.exception.code.should.equal('InvalidNetworkInterfaceID.NotFound') + cm.exception.error_code.should.equal('InvalidNetworkInterfaceID.NotFound') cm.exception.status.should.equal(400) cm.exception.request_id.should_not.be.none @@ -60,7 +61,7 @@ def test_elastic_network_interfaces_subnet_validation(): with assert_raises(EC2ResponseError) as cm: conn.create_network_interface("subnet-abcd1234") - cm.exception.code.should.equal('InvalidSubnetID.NotFound') + cm.exception.error_code.should.equal('InvalidSubnetID.NotFound') cm.exception.status.should.equal(400) cm.exception.request_id.should_not.be.none @@ -117,9 +118,9 @@ def test_elastic_network_interfaces_modify_attribute(): eni.groups.should.have.length_of(1) eni.groups[0].id.should.equal(security_group1.id) - with assert_raises(JSONResponseError) as ex: + with assert_raises(EC2ResponseError) as ex: conn.modify_network_interface_attribute(eni.id, 'groupset', [security_group2.id], dry_run=True) - ex.exception.reason.should.equal('DryRunOperation') + ex.exception.error_code.should.equal('DryRunOperation') ex.exception.status.should.equal(400) ex.exception.message.should.equal('An error occurred (DryRunOperation) when calling the ModifyNetworkInterface operation: Request would have succeeded, but DryRun flag is set') @@ -183,11 +184,11 @@ def test_elastic_network_interfaces_get_by_tag_name(): eni1 = ec2.create_network_interface(SubnetId=subnet.id, PrivateIpAddress='10.0.10.5') - with assert_raises(JSONResponseError) as ex: + with assert_raises(ClientError) as ex: eni1.create_tags(Tags=[{'Key': 'Name', 'Value': 'eni1'}], DryRun=True) - ex.exception.reason.should.equal('DryRunOperation') - ex.exception.status.should.equal(400) - ex.exception.message.should.equal('An error occurred (DryRunOperation) when calling the CreateTags operation: Request would have succeeded, but DryRun flag is set') + ex.exception.response['Error']['Code'].should.equal('DryRunOperation') + ex.exception.response['ResponseMetadata']['HTTPStatusCode'].should.equal(400) + ex.exception.response['Error']['Message'].should.equal('An error occurred (DryRunOperation) when calling the CreateTags operation: Request would have succeeded, but DryRun flag is set') eni1.create_tags(Tags=[{'Key': 'Name', 'Value': 'eni1'}]) diff --git a/tests/test_ec2/test_instances.py b/tests/test_ec2/test_instances.py index a310c05a4..b6601e87f 100644 --- a/tests/test_ec2/test_instances.py +++ b/tests/test_ec2/test_instances.py @@ -8,7 +8,7 @@ import datetime import boto from boto.ec2.instance import Reservation, InstanceAttribute -from boto.exception import EC2ResponseError, JSONResponseError +from boto.exception import EC2ResponseError, EC2ResponseError from freezegun import freeze_time import sure # noqa @@ -41,9 +41,9 @@ def test_add_servers(): def test_instance_launch_and_terminate(): conn = boto.connect_ec2('the_key', 'the_secret') - with assert_raises(JSONResponseError) as ex: + with assert_raises(EC2ResponseError) as ex: reservation = conn.run_instances('ami-1234abcd', dry_run=True) - ex.exception.reason.should.equal('DryRunOperation') + ex.exception.error_code.should.equal('DryRunOperation') ex.exception.status.should.equal(400) ex.exception.message.should.equal('An error occurred (DryRunOperation) when calling the RunInstance operation: Request would have succeeded, but DryRun flag is set') @@ -74,9 +74,9 @@ def test_instance_launch_and_terminate(): volume.attach_data.instance_id.should.equal(instance.id) volume.status.should.equal('in-use') - with assert_raises(JSONResponseError) as ex: + with assert_raises(EC2ResponseError) as ex: conn.terminate_instances([instance.id], dry_run=True) - ex.exception.reason.should.equal('DryRunOperation') + ex.exception.error_code.should.equal('DryRunOperation') ex.exception.status.should.equal(400) ex.exception.message.should.equal('An error occurred (DryRunOperation) when calling the TerminateInstance operation: Request would have succeeded, but DryRun flag is set') @@ -427,9 +427,9 @@ def test_instance_start_and_stop(): instance_ids = [instance.id for instance in instances] - with assert_raises(JSONResponseError) as ex: + with assert_raises(EC2ResponseError) as ex: stopped_instances = conn.stop_instances(instance_ids, dry_run=True) - ex.exception.reason.should.equal('DryRunOperation') + ex.exception.error_code.should.equal('DryRunOperation') ex.exception.status.should.equal(400) ex.exception.message.should.equal('An error occurred (DryRunOperation) when calling the StopInstance operation: Request would have succeeded, but DryRun flag is set') @@ -438,9 +438,9 @@ def test_instance_start_and_stop(): for instance in stopped_instances: instance.state.should.equal('stopping') - with assert_raises(JSONResponseError) as ex: + with assert_raises(EC2ResponseError) as ex: started_instances = conn.start_instances([instances[0].id], dry_run=True) - ex.exception.reason.should.equal('DryRunOperation') + ex.exception.error_code.should.equal('DryRunOperation') ex.exception.status.should.equal(400) ex.exception.message.should.equal('An error occurred (DryRunOperation) when calling the StartInstance operation: Request would have succeeded, but DryRun flag is set') @@ -454,9 +454,9 @@ def test_instance_reboot(): reservation = conn.run_instances('ami-1234abcd') instance = reservation.instances[0] - with assert_raises(JSONResponseError) as ex: + with assert_raises(EC2ResponseError) as ex: instance.reboot(dry_run=True) - ex.exception.reason.should.equal('DryRunOperation') + ex.exception.error_code.should.equal('DryRunOperation') ex.exception.status.should.equal(400) ex.exception.message.should.equal('An error occurred (DryRunOperation) when calling the RebootInstance operation: Request would have succeeded, but DryRun flag is set') @@ -470,9 +470,9 @@ def test_instance_attribute_instance_type(): reservation = conn.run_instances('ami-1234abcd') instance = reservation.instances[0] - with assert_raises(JSONResponseError) as ex: + with assert_raises(EC2ResponseError) as ex: instance.modify_attribute("instanceType", "m1.small", dry_run=True) - ex.exception.reason.should.equal('DryRunOperation') + ex.exception.error_code.should.equal('DryRunOperation') ex.exception.status.should.equal(400) ex.exception.message.should.equal('An error occurred (DryRunOperation) when calling the ModifyInstanceType operation: Request would have succeeded, but DryRun flag is set') @@ -491,9 +491,9 @@ def test_modify_instance_attribute_security_groups(): sg_id = 'sg-1234abcd' sg_id2 = 'sg-abcd4321' - with assert_raises(JSONResponseError) as ex: + with assert_raises(EC2ResponseError) as ex: instance.modify_attribute("groupSet", [sg_id, sg_id2], dry_run=True) - ex.exception.reason.should.equal('DryRunOperation') + ex.exception.error_code.should.equal('DryRunOperation') ex.exception.status.should.equal(400) ex.exception.message.should.equal('An error occurred (DryRunOperation) when calling the ModifyInstanceSecurityGroups operation: Request would have succeeded, but DryRun flag is set') @@ -512,9 +512,9 @@ def test_instance_attribute_user_data(): reservation = conn.run_instances('ami-1234abcd') instance = reservation.instances[0] - with assert_raises(JSONResponseError) as ex: + with assert_raises(EC2ResponseError) as ex: instance.modify_attribute("userData", "this is my user data", dry_run=True) - ex.exception.reason.should.equal('DryRunOperation') + ex.exception.error_code.should.equal('DryRunOperation') ex.exception.status.should.equal(400) ex.exception.message.should.equal('An error occurred (DryRunOperation) when calling the ModifyUserData operation: Request would have succeeded, but DryRun flag is set') @@ -540,9 +540,9 @@ def test_instance_attribute_source_dest_check(): # Set to false (note: Boto converts bool to string, eg 'false') - with assert_raises(JSONResponseError) as ex: + with assert_raises(EC2ResponseError) as ex: instance.modify_attribute("sourceDestCheck", False, dry_run=True) - ex.exception.reason.should.equal('DryRunOperation') + ex.exception.error_code.should.equal('DryRunOperation') ex.exception.status.should.equal(400) ex.exception.message.should.equal('An error occurred (DryRunOperation) when calling the ModifySourceDestCheck operation: Request would have succeeded, but DryRun flag is set') @@ -584,9 +584,9 @@ def test_user_data_with_run_instance(): def test_run_instance_with_security_group_name(): conn = boto.connect_ec2('the_key', 'the_secret') - with assert_raises(JSONResponseError) as ex: + with assert_raises(EC2ResponseError) as ex: group = conn.create_security_group('group1', "some description", dry_run=True) - ex.exception.reason.should.equal('DryRunOperation') + ex.exception.error_code.should.equal('DryRunOperation') ex.exception.status.should.equal(400) ex.exception.message.should.equal('An error occurred (DryRunOperation) when calling the CreateSecurityGroup operation: Request would have succeeded, but DryRun flag is set') @@ -745,9 +745,9 @@ def test_instance_with_nic_attach_detach(): set([group.id for group in eni.groups]).should.equal(set([security_group2.id])) # Attach - with assert_raises(JSONResponseError) as ex: + with assert_raises(EC2ResponseError) as ex: conn.attach_network_interface(eni.id, instance.id, device_index=1, dry_run=True) - ex.exception.reason.should.equal('DryRunOperation') + ex.exception.error_code.should.equal('DryRunOperation') ex.exception.status.should.equal(400) ex.exception.message.should.equal('An error occurred (DryRunOperation) when calling the AttachNetworkInterface operation: Request would have succeeded, but DryRun flag is set') @@ -766,9 +766,9 @@ def test_instance_with_nic_attach_detach(): set([group.id for group in eni.groups]).should.equal(set([security_group1.id,security_group2.id])) # Detach - with assert_raises(JSONResponseError) as ex: + with assert_raises(EC2ResponseError) as ex: conn.detach_network_interface(instance_eni.attachment.id, dry_run=True) - ex.exception.reason.should.equal('DryRunOperation') + ex.exception.error_code.should.equal('DryRunOperation') ex.exception.status.should.equal(400) ex.exception.message.should.equal('An error occurred (DryRunOperation) when calling the DetachNetworkInterface operation: Request would have succeeded, but DryRun flag is set') @@ -886,9 +886,9 @@ def test_get_instance_by_security_group(): security_group = conn.create_security_group('test', 'test') - with assert_raises(JSONResponseError) as ex: + with assert_raises(EC2ResponseError) as ex: conn.modify_instance_attribute(instance.id, "groupSet", [security_group.id], dry_run=True) - ex.exception.reason.should.equal('DryRunOperation') + ex.exception.error_code.should.equal('DryRunOperation') ex.exception.status.should.equal(400) ex.exception.message.should.equal('An error occurred (DryRunOperation) when calling the ModifyInstanceSecurityGroups operation: Request would have succeeded, but DryRun flag is set') diff --git a/tests/test_ec2/test_internet_gateways.py b/tests/test_ec2/test_internet_gateways.py index 12b37860e..fe5e4945d 100644 --- a/tests/test_ec2/test_internet_gateways.py +++ b/tests/test_ec2/test_internet_gateways.py @@ -6,7 +6,7 @@ from nose.tools import assert_raises import re import boto -from boto.exception import EC2ResponseError, JSONResponseError +from boto.exception import EC2ResponseError import sure # noqa @@ -24,9 +24,9 @@ def test_igw_create(): conn.get_all_internet_gateways().should.have.length_of(0) - with assert_raises(JSONResponseError) as ex: + with assert_raises(EC2ResponseError) as ex: igw = conn.create_internet_gateway(dry_run=True) - ex.exception.reason.should.equal('DryRunOperation') + ex.exception.error_code.should.equal('DryRunOperation') ex.exception.status.should.equal(400) ex.exception.message.should.equal('An error occurred (DryRunOperation) when calling the CreateInternetGateway operation: Request would have succeeded, but DryRun flag is set') @@ -44,9 +44,9 @@ def test_igw_attach(): igw = conn.create_internet_gateway() vpc = conn.create_vpc(VPC_CIDR) - with assert_raises(JSONResponseError) as ex: + with assert_raises(EC2ResponseError) as ex: conn.attach_internet_gateway(igw.id, vpc.id, dry_run=True) - ex.exception.reason.should.equal('DryRunOperation') + ex.exception.error_code.should.equal('DryRunOperation') ex.exception.status.should.equal(400) ex.exception.message.should.equal('An error occurred (DryRunOperation) when calling the AttachInternetGateway operation: Request would have succeeded, but DryRun flag is set') @@ -90,9 +90,9 @@ def test_igw_detach(): vpc = conn.create_vpc(VPC_CIDR) conn.attach_internet_gateway(igw.id, vpc.id) - with assert_raises(JSONResponseError) as ex: + with assert_raises(EC2ResponseError) as ex: conn.detach_internet_gateway(igw.id, vpc.id, dry_run=True) - ex.exception.reason.should.equal('DryRunOperation') + ex.exception.error_code.should.equal('DryRunOperation') ex.exception.status.should.equal(400) ex.exception.message.should.equal('An error occurred (DryRunOperation) when calling the DetachInternetGateway operation: Request would have succeeded, but DryRun flag is set') @@ -151,9 +151,9 @@ def test_igw_delete(): igw = conn.create_internet_gateway() conn.get_all_internet_gateways().should.have.length_of(1) - with assert_raises(JSONResponseError) as ex: + with assert_raises(EC2ResponseError) as ex: conn.delete_internet_gateway(igw.id, dry_run=True) - ex.exception.reason.should.equal('DryRunOperation') + ex.exception.error_code.should.equal('DryRunOperation') ex.exception.status.should.equal(400) ex.exception.message.should.equal('An error occurred (DryRunOperation) when calling the DeleteInternetGateway operation: Request would have succeeded, but DryRun flag is set') diff --git a/tests/test_ec2/test_key_pairs.py b/tests/test_ec2/test_key_pairs.py index a35f0b962..6c4773200 100644 --- a/tests/test_ec2/test_key_pairs.py +++ b/tests/test_ec2/test_key_pairs.py @@ -7,7 +7,7 @@ import boto import six import sure # noqa -from boto.exception import EC2ResponseError, JSONResponseError +from boto.exception import EC2ResponseError from moto import mock_ec2_deprecated @@ -32,9 +32,9 @@ def test_key_pairs_invalid_id(): def test_key_pairs_create(): conn = boto.connect_ec2('the_key', 'the_secret') - with assert_raises(JSONResponseError) as ex: + with assert_raises(EC2ResponseError) as ex: kp = conn.create_key_pair('foo', dry_run=True) - ex.exception.reason.should.equal('DryRunOperation') + ex.exception.error_code.should.equal('DryRunOperation') ex.exception.status.should.equal(400) ex.exception.message.should.equal('An error occurred (DryRunOperation) when calling the CreateKeyPair operation: Request would have succeeded, but DryRun flag is set') @@ -87,9 +87,9 @@ def test_key_pairs_delete_exist(): conn = boto.connect_ec2('the_key', 'the_secret') conn.create_key_pair('foo') - with assert_raises(JSONResponseError) as ex: + with assert_raises(EC2ResponseError) as ex: r = conn.delete_key_pair('foo', dry_run=True) - ex.exception.reason.should.equal('DryRunOperation') + ex.exception.error_code.should.equal('DryRunOperation') ex.exception.status.should.equal(400) ex.exception.message.should.equal('An error occurred (DryRunOperation) when calling the DeleteKeyPair operation: Request would have succeeded, but DryRun flag is set') @@ -102,9 +102,9 @@ def test_key_pairs_delete_exist(): def test_key_pairs_import(): conn = boto.connect_ec2('the_key', 'the_secret') - with assert_raises(JSONResponseError) as ex: + with assert_raises(EC2ResponseError) as ex: kp = conn.import_key_pair('foo', b'content', dry_run=True) - ex.exception.reason.should.equal('DryRunOperation') + ex.exception.error_code.should.equal('DryRunOperation') ex.exception.status.should.equal(400) ex.exception.message.should.equal('An error occurred (DryRunOperation) when calling the ImportKeyPair operation: Request would have succeeded, but DryRun flag is set') diff --git a/tests/test_ec2/test_security_groups.py b/tests/test_ec2/test_security_groups.py index 3968d9151..3056331be 100644 --- a/tests/test_ec2/test_security_groups.py +++ b/tests/test_ec2/test_security_groups.py @@ -9,7 +9,7 @@ from nose.tools import assert_raises import boto3 import boto from botocore.exceptions import ClientError -from boto.exception import EC2ResponseError, JSONResponseError +from boto.exception import EC2ResponseError import sure # noqa from moto import mock_ec2, mock_ec2_deprecated @@ -19,9 +19,9 @@ from moto import mock_ec2, mock_ec2_deprecated def test_create_and_describe_security_group(): conn = boto.connect_ec2('the_key', 'the_secret') - with assert_raises(JSONResponseError) as ex: + with assert_raises(EC2ResponseError) as ex: security_group = conn.create_security_group('test security group', 'this is a test security group', dry_run=True) - ex.exception.reason.should.equal('DryRunOperation') + ex.exception.error_code.should.equal('DryRunOperation') ex.exception.status.should.equal(400) ex.exception.message.should.equal('An error occurred (DryRunOperation) when calling the CreateSecurityGroup operation: Request would have succeeded, but DryRun flag is set') @@ -121,9 +121,9 @@ def test_deleting_security_groups(): cm.exception.request_id.should_not.be.none # Delete by name - with assert_raises(JSONResponseError) as ex: + with assert_raises(EC2ResponseError) as ex: conn.delete_security_group('test2', dry_run=True) - ex.exception.reason.should.equal('DryRunOperation') + ex.exception.error_code.should.equal('DryRunOperation') ex.exception.status.should.equal(400) ex.exception.message.should.equal('An error occurred (DryRunOperation) when calling the DeleteSecurityGroup operation: Request would have succeeded, but DryRun flag is set') @@ -150,9 +150,9 @@ def test_authorize_ip_range_and_revoke(): conn = boto.connect_ec2('the_key', 'the_secret') security_group = conn.create_security_group('test', 'test') - with assert_raises(JSONResponseError) as ex: + with assert_raises(EC2ResponseError) as ex: success = security_group.authorize(ip_protocol="tcp", from_port="22", to_port="2222", cidr_ip="123.123.123.123/32", dry_run=True) - ex.exception.reason.should.equal('DryRunOperation') + ex.exception.error_code.should.equal('DryRunOperation') ex.exception.status.should.equal(400) ex.exception.message.should.equal('An error occurred (DryRunOperation) when calling the GrantSecurityGroupIngress operation: Request would have succeeded, but DryRun flag is set') @@ -171,9 +171,9 @@ def test_authorize_ip_range_and_revoke(): cm.exception.request_id.should_not.be.none # Actually revoke - with assert_raises(JSONResponseError) as ex: + with assert_raises(EC2ResponseError) as ex: security_group.revoke(ip_protocol="tcp", from_port="22", to_port="2222", cidr_ip="123.123.123.123/32", dry_run=True) - ex.exception.reason.should.equal('DryRunOperation') + ex.exception.error_code.should.equal('DryRunOperation') ex.exception.status.should.equal(400) ex.exception.message.should.equal('An error occurred (DryRunOperation) when calling the RevokeSecurityGroupIngress operation: Request would have succeeded, but DryRun flag is set') @@ -185,9 +185,9 @@ def test_authorize_ip_range_and_revoke(): # Test for egress as well egress_security_group = conn.create_security_group('testegress', 'testegress', vpc_id='vpc-3432589') - with assert_raises(JSONResponseError) as ex: + with assert_raises(EC2ResponseError) as ex: success = conn.authorize_security_group_egress(egress_security_group.id, "tcp", from_port="22", to_port="2222", cidr_ip="123.123.123.123/32", dry_run=True) - ex.exception.reason.should.equal('DryRunOperation') + ex.exception.error_code.should.equal('DryRunOperation') ex.exception.status.should.equal(400) ex.exception.message.should.equal('An error occurred (DryRunOperation) when calling the GrantSecurityGroupEgress operation: Request would have succeeded, but DryRun flag is set') @@ -203,9 +203,9 @@ def test_authorize_ip_range_and_revoke(): egress_security_group.revoke.when.called_with(ip_protocol="tcp", from_port="22", to_port="2222", cidr_ip="123.123.123.122/32").should.throw(EC2ResponseError) # Actually revoke - with assert_raises(JSONResponseError) as ex: + with assert_raises(EC2ResponseError) as ex: conn.revoke_security_group_egress(egress_security_group.id, "tcp", from_port="22", to_port="2222", cidr_ip="123.123.123.123/32", dry_run=True) - ex.exception.reason.should.equal('DryRunOperation') + ex.exception.error_code.should.equal('DryRunOperation') ex.exception.status.should.equal(400) ex.exception.message.should.equal('An error occurred (DryRunOperation) when calling the RevokeSecurityGroupEgress operation: Request would have succeeded, but DryRun flag is set') @@ -339,9 +339,9 @@ def test_security_group_tagging(): sg = conn.create_security_group("test-sg", "Test SG", vpc.id) - with assert_raises(JSONResponseError) as ex: + with assert_raises(EC2ResponseError) as ex: sg.add_tag("Test", "Tag", dry_run=True) - ex.exception.reason.should.equal('DryRunOperation') + ex.exception.error_code.should.equal('DryRunOperation') ex.exception.status.should.equal(400) ex.exception.message.should.equal('An error occurred (DryRunOperation) when calling the CreateTags operation: Request would have succeeded, but DryRun flag is set') @@ -540,11 +540,11 @@ def test_security_group_tagging_boto3(): sg = conn.create_security_group(GroupName="test-sg", Description="Test SG") - with assert_raises(JSONResponseError) as ex: + with assert_raises(ClientError) as ex: conn.create_tags(Resources=[sg['GroupId']], Tags=[{'Key': 'Test', 'Value': 'Tag'}], DryRun=True) - ex.exception.reason.should.equal('DryRunOperation') - ex.exception.status.should.equal(400) - ex.exception.message.should.equal('An error occurred (DryRunOperation) when calling the CreateTags operation: Request would have succeeded, but DryRun flag is set') + ex.exception.response['Error']['Code'].should.equal('DryRunOperation') + ex.exception.response['ResponseMetadata']['HTTPStatusCode'].should.equal(400) + ex.exception.response['Error']['Message'].should.equal('An error occurred (DryRunOperation) when calling the CreateTags operation: Request would have succeeded, but DryRun flag is set') conn.create_tags(Resources=[sg['GroupId']], Tags=[{'Key': 'Test', 'Value': 'Tag'}]) describe = conn.describe_security_groups(Filters=[{'Name': 'tag-value', 'Values': ['Tag']}]) diff --git a/tests/test_ec2/test_spot_instances.py b/tests/test_ec2/test_spot_instances.py index 1933613e8..2d3cb3036 100644 --- a/tests/test_ec2/test_spot_instances.py +++ b/tests/test_ec2/test_spot_instances.py @@ -4,8 +4,10 @@ import datetime import boto import boto3 +from boto.exception import EC2ResponseError +from botocore.exceptions import ClientError +import pytz import sure # noqa -from boto.exception import JSONResponseError from moto import mock_ec2, mock_ec2_deprecated from moto.backends import get_model @@ -13,98 +15,130 @@ from moto.core.utils import iso_8601_datetime_with_milliseconds @mock_ec2 -@mock_ec2_deprecated def test_request_spot_instances(): conn = boto3.client('ec2', 'us-east-1') vpc = conn.create_vpc(CidrBlock="10.0.0.0/8")['Vpc'] subnet = conn.create_subnet(VpcId=vpc['VpcId'], CidrBlock='10.0.0.0/16', AvailabilityZone='us-east-1a')['Subnet'] subnet_id = subnet['SubnetId'] - conn = boto.connect_ec2() + conn.create_security_group(GroupName='group1', Description='description') + conn.create_security_group(GroupName='group2', Description='description') - conn.create_security_group('group1', 'description') - conn.create_security_group('group2', 'description') + start_dt = datetime.datetime(2013, 1, 1).replace(tzinfo=pytz.utc) + end_dt = datetime.datetime(2013, 1, 2).replace(tzinfo=pytz.utc) + start = iso_8601_datetime_with_milliseconds(start_dt) + end = iso_8601_datetime_with_milliseconds(end_dt) - start = iso_8601_datetime_with_milliseconds(datetime.datetime(2013, 1, 1)) - end = iso_8601_datetime_with_milliseconds(datetime.datetime(2013, 1, 2)) - - with assert_raises(JSONResponseError) as ex: + with assert_raises(ClientError) as ex: request = conn.request_spot_instances( - price=0.5, image_id='ami-abcd1234', count=1, type='one-time', - valid_from=start, valid_until=end, launch_group="the-group", - availability_zone_group='my-group', key_name="test", - security_groups=['group1', 'group2'], user_data=b"some test data", - instance_type='m1.small', placement='us-east-1c', - kernel_id="test-kernel", ramdisk_id="test-ramdisk", - monitoring_enabled=True, subnet_id=subnet_id, dry_run=True + SpotPrice="0.5", InstanceCount=1, Type='one-time', + ValidFrom=start, ValidUntil=end, LaunchGroup="the-group", + AvailabilityZoneGroup='my-group', + LaunchSpecification={ + "ImageId": 'ami-abcd1234', + "KeyName": "test", + "SecurityGroups": ['group1', 'group2'], + "UserData": b"some test data", + "InstanceType": 'm1.small', + "Placement": { + "AvailabilityZone": 'us-east-1c', + }, + "KernelId": "test-kernel", + "RamdiskId": "test-ramdisk", + "Monitoring": { + "Enabled": True, + }, + "SubnetId": subnet_id, + }, + DryRun=True, ) - ex.exception.reason.should.equal('DryRunOperation') - ex.exception.status.should.equal(400) - ex.exception.message.should.equal('An error occurred (DryRunOperation) when calling the RequestSpotInstance operation: Request would have succeeded, but DryRun flag is set') + ex.exception.response['Error']['Code'].should.equal('DryRunOperation') + ex.exception.response['ResponseMetadata']['HTTPStatusCode'].should.equal(400) + ex.exception.response['Error']['Message'].should.equal('An error occurred (DryRunOperation) when calling the RequestSpotInstance operation: Request would have succeeded, but DryRun flag is set') request = conn.request_spot_instances( - price=0.5, image_id='ami-abcd1234', count=1, type='one-time', - valid_from=start, valid_until=end, launch_group="the-group", - availability_zone_group='my-group', key_name="test", - security_groups=['group1', 'group2'], user_data=b"some test data", - instance_type='m1.small', placement='us-east-1c', - kernel_id="test-kernel", ramdisk_id="test-ramdisk", - monitoring_enabled=True, subnet_id=subnet_id, + SpotPrice="0.5", InstanceCount=1, Type='one-time', + ValidFrom=start, ValidUntil=end, LaunchGroup="the-group", + AvailabilityZoneGroup='my-group', + LaunchSpecification={ + "ImageId": 'ami-abcd1234', + "KeyName": "test", + "SecurityGroups": ['group1', 'group2'], + "UserData": b"some test data", + "InstanceType": 'm1.small', + "Placement": { + "AvailabilityZone": 'us-east-1c', + }, + "KernelId": "test-kernel", + "RamdiskId": "test-ramdisk", + "Monitoring": { + "Enabled": True, + }, + "SubnetId": subnet_id, + }, ) - requests = conn.get_all_spot_instance_requests() + requests = conn.describe_spot_instance_requests()['SpotInstanceRequests'] requests.should.have.length_of(1) request = requests[0] - request.state.should.equal("open") - request.price.should.equal(0.5) - request.launch_specification.image_id.should.equal('ami-abcd1234') - request.type.should.equal('one-time') - request.valid_from.should.equal(start) - request.valid_until.should.equal(end) - request.launch_group.should.equal("the-group") - request.availability_zone_group.should.equal('my-group') - request.launch_specification.key_name.should.equal("test") - security_group_names = [group.name for group in request.launch_specification.groups] + request['State'].should.equal("open") + request['SpotPrice'].should.equal("0.5") + request['Type'].should.equal('one-time') + request['ValidFrom'].should.equal(start_dt) + request['ValidUntil'].should.equal(end_dt) + request['LaunchGroup'].should.equal("the-group") + request['AvailabilityZoneGroup'].should.equal('my-group') + + launch_spec = request['LaunchSpecification'] + security_group_names = [group['GroupName'] for group in launch_spec['SecurityGroups']] set(security_group_names).should.equal(set(['group1', 'group2'])) - request.launch_specification.instance_type.should.equal('m1.small') - request.launch_specification.placement.should.equal('us-east-1c') - request.launch_specification.kernel.should.equal("test-kernel") - request.launch_specification.ramdisk.should.equal("test-ramdisk") - request.launch_specification.subnet_id.should.equal(subnet_id) + + launch_spec['ImageId'].should.equal('ami-abcd1234') + launch_spec['KeyName'].should.equal("test") + launch_spec['InstanceType'].should.equal('m1.small') + launch_spec['KernelId'].should.equal("test-kernel") + launch_spec['RamdiskId'].should.equal("test-ramdisk") + launch_spec['SubnetId'].should.equal(subnet_id) -@mock_ec2_deprecated +@mock_ec2 def test_request_spot_instances_default_arguments(): """ Test that moto set the correct default arguments """ - conn = boto.connect_ec2() + conn = boto3.client('ec2', 'us-east-1') request = conn.request_spot_instances( - price=0.5, image_id='ami-abcd1234', + SpotPrice="0.5", + LaunchSpecification={ + "ImageId": 'ami-abcd1234', + } ) - requests = conn.get_all_spot_instance_requests() + requests = conn.describe_spot_instance_requests()['SpotInstanceRequests'] requests.should.have.length_of(1) request = requests[0] - request.state.should.equal("open") - request.price.should.equal(0.5) - request.launch_specification.image_id.should.equal('ami-abcd1234') - request.type.should.equal('one-time') - request.valid_from.should.equal(None) - request.valid_until.should.equal(None) - request.launch_group.should.equal(None) - request.availability_zone_group.should.equal(None) - request.launch_specification.key_name.should.equal(None) - security_group_names = [group.name for group in request.launch_specification.groups] + request['State'].should.equal("open") + request['SpotPrice'].should.equal("0.5") + request['Type'].should.equal('one-time') + request.shouldnt.contain('ValidFrom') + request.shouldnt.contain('ValidUntil') + request.shouldnt.contain('LaunchGroup') + request.shouldnt.contain('AvailabilityZoneGroup') + + launch_spec = request['LaunchSpecification'] + + security_group_names = [group['GroupName'] for group in launch_spec['SecurityGroups']] security_group_names.should.equal(["default"]) - request.launch_specification.instance_type.should.equal('m1.small') - request.launch_specification.placement.should.equal(None) - request.launch_specification.kernel.should.equal(None) - request.launch_specification.ramdisk.should.equal(None) - request.launch_specification.subnet_id.should.equal(None) + + launch_spec['ImageId'].should.equal('ami-abcd1234') + request.shouldnt.contain('KeyName') + launch_spec['InstanceType'].should.equal('m1.small') + request.shouldnt.contain('KernelId') + request.shouldnt.contain('RamdiskId') + request.shouldnt.contain('SubnetId') @mock_ec2_deprecated @@ -119,9 +153,9 @@ def test_cancel_spot_instance_request(): requests.should.have.length_of(1) - with assert_raises(JSONResponseError) as ex: + with assert_raises(EC2ResponseError) as ex: conn.cancel_spot_instance_requests([requests[0].id], dry_run=True) - ex.exception.reason.should.equal('DryRunOperation') + ex.exception.error_code.should.equal('DryRunOperation') ex.exception.status.should.equal(400) ex.exception.message.should.equal('An error occurred (DryRunOperation) when calling the CancelSpotInstance operation: Request would have succeeded, but DryRun flag is set') @@ -148,7 +182,7 @@ def test_request_spot_instances_fulfilled(): request.state.should.equal("open") - get_model('SpotInstanceRequest')[0].state = 'active' + get_model('SpotInstanceRequest', 'us-east-1')[0].state = 'active' requests = conn.get_all_spot_instance_requests() requests.should.have.length_of(1) @@ -218,7 +252,7 @@ def test_request_spot_instances_setting_instance_id(): request = conn.request_spot_instances( price=0.5, image_id='ami-abcd1234') - req = get_model('SpotInstanceRequest')[0] + req = get_model('SpotInstanceRequest', 'us-east-1')[0] req.state = 'active' req.instance_id = 'i-12345678' diff --git a/tests/test_ec2/test_tags.py b/tests/test_ec2/test_tags.py index 1084e44c4..23b7d0bd4 100644 --- a/tests/test_ec2/test_tags.py +++ b/tests/test_ec2/test_tags.py @@ -4,7 +4,7 @@ from nose.tools import assert_raises import itertools import boto -from boto.exception import EC2ResponseError, JSONResponseError +from boto.exception import EC2ResponseError from boto.ec2.instance import Reservation import sure # noqa @@ -18,9 +18,9 @@ def test_add_tag(): reservation = conn.run_instances('ami-1234abcd') instance = reservation.instances[0] - with assert_raises(JSONResponseError) as ex: + with assert_raises(EC2ResponseError) as ex: instance.add_tag("a key", "some value", dry_run=True) - ex.exception.reason.should.equal('DryRunOperation') + ex.exception.error_code.should.equal('DryRunOperation') ex.exception.status.should.equal(400) ex.exception.message.should.equal('An error occurred (DryRunOperation) when calling the CreateTags operation: Request would have succeeded, but DryRun flag is set') @@ -45,9 +45,9 @@ def test_remove_tag(): tag.name.should.equal("a key") tag.value.should.equal("some value") - with assert_raises(JSONResponseError) as ex: + with assert_raises(EC2ResponseError) as ex: instance.remove_tag("a key", dry_run=True) - ex.exception.reason.should.equal('DryRunOperation') + ex.exception.error_code.should.equal('DryRunOperation') ex.exception.status.should.equal(400) ex.exception.message.should.equal('An error occurred (DryRunOperation) when calling the DeleteTags operation: Request would have succeeded, but DryRun flag is set') @@ -96,9 +96,9 @@ def test_create_tags(): 'another key': 'some other value', 'blank key': ''} - with assert_raises(JSONResponseError) as ex: + with assert_raises(EC2ResponseError) as ex: conn.create_tags(instance.id, tag_dict, dry_run=True) - ex.exception.reason.should.equal('DryRunOperation') + ex.exception.error_code.should.equal('DryRunOperation') ex.exception.status.should.equal(400) ex.exception.message.should.equal('An error occurred (DryRunOperation) when calling the CreateTags operation: Request would have succeeded, but DryRun flag is set') diff --git a/tests/test_emr/test_emr.py b/tests/test_emr/test_emr.py index a24aa4bd4..4b06d7516 100644 --- a/tests/test_emr/test_emr.py +++ b/tests/test_emr/test_emr.py @@ -112,7 +112,7 @@ def test_describe_jobflows(): args = run_jobflow_args.copy() expected = {} - for idx in range(400): + for idx in range(4): cluster_name = 'cluster' + str(idx) args['name'] = cluster_name cluster_id = conn.run_jobflow(**args) @@ -128,7 +128,7 @@ def test_describe_jobflows(): timestamp = datetime.now(pytz.utc) time.sleep(1) - for idx in range(400, 600): + for idx in range(4, 6): cluster_name = 'cluster' + str(idx) args['name'] = cluster_name cluster_id = conn.run_jobflow(**args) @@ -139,7 +139,7 @@ def test_describe_jobflows(): 'state': 'TERMINATED' } jobs = conn.describe_jobflows() - jobs.should.have.length_of(512) + jobs.should.have.length_of(6) for cluster_id, y in expected.items(): resp = conn.describe_jobflows(jobflow_ids=[cluster_id]) @@ -147,15 +147,15 @@ def test_describe_jobflows(): resp[0].jobflowid.should.equal(cluster_id) resp = conn.describe_jobflows(states=['WAITING']) - resp.should.have.length_of(400) + resp.should.have.length_of(4) for x in resp: x.state.should.equal('WAITING') resp = conn.describe_jobflows(created_before=timestamp) - resp.should.have.length_of(400) + resp.should.have.length_of(4) resp = conn.describe_jobflows(created_after=timestamp) - resp.should.have.length_of(200) + resp.should.have.length_of(2) @mock_emr_deprecated diff --git a/tests/test_emr/test_emr_boto3.py b/tests/test_emr/test_emr_boto3.py index 1a735967f..4fb5c3d79 100644 --- a/tests/test_emr/test_emr_boto3.py +++ b/tests/test_emr/test_emr_boto3.py @@ -128,7 +128,7 @@ def test_describe_job_flows(): args = deepcopy(run_job_flow_args) expected = {} - for idx in range(400): + for idx in range(4): cluster_name = 'cluster' + str(idx) args['Name'] = cluster_name cluster_id = client.run_job_flow(**args)['JobFlowId'] @@ -144,7 +144,7 @@ def test_describe_job_flows(): timestamp = datetime.now(pytz.utc) time.sleep(1) - for idx in range(400, 600): + for idx in range(4, 6): cluster_name = 'cluster' + str(idx) args['Name'] = cluster_name cluster_id = client.run_job_flow(**args)['JobFlowId'] @@ -156,7 +156,7 @@ def test_describe_job_flows(): } resp = client.describe_job_flows() - resp['JobFlows'].should.have.length_of(512) + resp['JobFlows'].should.have.length_of(6) for cluster_id, y in expected.items(): resp = client.describe_job_flows(JobFlowIds=[cluster_id]) @@ -164,15 +164,15 @@ def test_describe_job_flows(): resp['JobFlows'][0]['JobFlowId'].should.equal(cluster_id) resp = client.describe_job_flows(JobFlowStates=['WAITING']) - resp['JobFlows'].should.have.length_of(400) + resp['JobFlows'].should.have.length_of(4) for x in resp['JobFlows']: x['ExecutionStatusDetail']['State'].should.equal('WAITING') resp = client.describe_job_flows(CreatedBefore=timestamp) - resp['JobFlows'].should.have.length_of(400) + resp['JobFlows'].should.have.length_of(4) resp = client.describe_job_flows(CreatedAfter=timestamp) - resp['JobFlows'].should.have.length_of(200) + resp['JobFlows'].should.have.length_of(2) @mock_emr @@ -327,13 +327,13 @@ def test_run_job_flow(): @mock_emr def test_run_job_flow_with_invalid_params(): client = boto3.client('emr', region_name='us-east-1') - with assert_raises(ClientError) as e: + with assert_raises(ClientError) as ex: # cannot set both AmiVersion and ReleaseLabel args = deepcopy(run_job_flow_args) args['AmiVersion'] = '2.4' args['ReleaseLabel'] = 'emr-5.0.0' client.run_job_flow(**args) - e.exception.response['Error']['Code'].should.equal('ValidationException') + ex.exception.response['Error']['Message'].should.contain('ValidationException') @mock_emr diff --git a/tests/test_iam/test_iam.py b/tests/test_iam/test_iam.py index a51240b2f..6504a5483 100644 --- a/tests/test_iam/test_iam.py +++ b/tests/test_iam/test_iam.py @@ -201,7 +201,7 @@ def test_get_user(): def test_list_users(): path_prefix = '/' max_items = 10 - conn = boto3.client('iam') + conn = boto3.client('iam', region_name='us-east-1') conn.create_user(UserName='my-user') response = conn.list_users(PathPrefix=path_prefix, MaxItems=max_items) user = response['Users'][0] @@ -337,7 +337,7 @@ def test_managed_policy(): @mock_iam def test_boto3_create_login_profile(): - conn = boto3.client('iam') + conn = boto3.client('iam', region_name='us-east-1') with assert_raises(ClientError): conn.create_login_profile(UserName='my-user', Password='Password') diff --git a/tests/test_kinesis/test_firehose.py b/tests/test_kinesis/test_firehose.py index 14ee1916b..371be253b 100644 --- a/tests/test_kinesis/test_firehose.py +++ b/tests/test_kinesis/test_firehose.py @@ -4,7 +4,6 @@ import datetime from botocore.exceptions import ClientError import boto3 -from freezegun import freeze_time import sure # noqa from moto import mock_kinesis @@ -37,7 +36,6 @@ def create_stream(client, stream_name): @mock_kinesis -@freeze_time("2015-03-01") def test_create_stream(): client = boto3.client('firehose', region_name='us-east-1') @@ -48,11 +46,8 @@ def test_create_stream(): stream_description = response['DeliveryStreamDescription'] # Sure and Freezegun don't play nicely together - created = stream_description.pop('CreateTimestamp') - last_updated = stream_description.pop('LastUpdateTimestamp') - from dateutil.tz import tzlocal - assert created == datetime.datetime(2015, 3, 1, tzinfo=tzlocal()) - assert last_updated == datetime.datetime(2015, 3, 1, tzinfo=tzlocal()) + _ = stream_description.pop('CreateTimestamp') + _ = stream_description.pop('LastUpdateTimestamp') stream_description.should.equal({ 'DeliveryStreamName': 'stream1', @@ -88,7 +83,6 @@ def test_create_stream(): @mock_kinesis -@freeze_time("2015-03-01") def test_create_stream_without_redshift(): client = boto3.client('firehose', region_name='us-east-1') @@ -111,11 +105,8 @@ def test_create_stream_without_redshift(): stream_description = response['DeliveryStreamDescription'] # Sure and Freezegun don't play nicely together - created = stream_description.pop('CreateTimestamp') - last_updated = stream_description.pop('LastUpdateTimestamp') - from dateutil.tz import tzlocal - assert created == datetime.datetime(2015, 3, 1, tzinfo=tzlocal()) - assert last_updated == datetime.datetime(2015, 3, 1, tzinfo=tzlocal()) + _ = stream_description.pop('CreateTimestamp') + _ = stream_description.pop('LastUpdateTimestamp') stream_description.should.equal({ 'DeliveryStreamName': 'stream1', @@ -142,7 +133,6 @@ def test_create_stream_without_redshift(): }) @mock_kinesis -@freeze_time("2015-03-01") def test_deescribe_non_existant_stream(): client = boto3.client('firehose', region_name='us-east-1') @@ -150,7 +140,6 @@ def test_deescribe_non_existant_stream(): @mock_kinesis -@freeze_time("2015-03-01") def test_list_and_delete_stream(): client = boto3.client('firehose', region_name='us-east-1') diff --git a/tests/test_route53/test_route53.py b/tests/test_route53/test_route53.py index dd68eec0e..f376375a0 100644 --- a/tests/test_route53/test_route53.py +++ b/tests/test_route53/test_route53.py @@ -308,7 +308,7 @@ def test_hosted_zone_private_zone_preserved(): @mock_route53 def test_hosted_zone_private_zone_preserved_boto3(): - conn = boto3.client('route53') + conn = boto3.client('route53', region_name='us-east-1') # TODO: actually create_hosted_zone statements with PrivateZone=True, but without # a _valid_ vpc-id should fail. firstzone = conn.create_hosted_zone( @@ -333,8 +333,20 @@ def test_hosted_zone_private_zone_preserved_boto3(): @mock_route53 def test_list_or_change_tags_for_resource_request(): - conn = boto3.client('route53') - healthcheck_id = str(uuid.uuid4()) + conn = boto3.client('route53', region_name='us-east-1') + health_check = conn.create_health_check( + CallerReference='foobar', + HealthCheckConfig={ + 'IPAddress': '192.0.2.44', + 'Port': 123, + 'Type': 'HTTP', + 'ResourcePath': '/', + 'RequestInterval': 30, + 'FailureThreshold': 123, + 'HealthThreshold': 123, + } + ) + healthcheck_id = health_check['HealthCheck']['Id'] tag1 = {"Key": "Deploy", "Value": "True"} tag2 = {"Key": "Name", "Value": "UnitTest"} diff --git a/tests/test_s3/test_s3.py b/tests/test_s3/test_s3.py index 874230737..56bdfff1c 100644 --- a/tests/test_s3/test_s3.py +++ b/tests/test_s3/test_s3.py @@ -20,17 +20,21 @@ from nose.tools import assert_raises import sure # noqa -from moto import mock_s3, mock_s3_deprecated +from moto import settings, mock_s3, mock_s3_deprecated +import moto.s3.models as s3model - -REDUCED_PART_SIZE = 256 +if settings.TEST_SERVER_MODE: + REDUCED_PART_SIZE = s3model.UPLOAD_PART_MIN_SIZE + EXPECTED_ETAG = '"140f92a6df9f9e415f74a1463bcee9bb-2"' +else: + REDUCED_PART_SIZE = 256 + EXPECTED_ETAG = '"66d1a1a2ed08fd05c137f316af4ff255-2"' def reduced_min_part_size(f): """ speed up tests by temporarily making the multipart minimum part size small """ - import moto.s3.models as s3model orig_size = s3model.UPLOAD_PART_MIN_SIZE @wraps(f) @@ -49,24 +53,23 @@ class MyModel(object): self.value = value def save(self): - conn = boto.connect_s3('the_key', 'the_secret') - bucket = conn.get_bucket('mybucket') - k = Key(bucket) - k.key = self.name - k.set_contents_from_string(self.value) + s3 = boto3.client('s3', region_name='us-east-1') + s3.put_object(Bucket='mybucket', Key=self.name, Body=self.value) -@mock_s3_deprecated +@mock_s3 def test_my_model_save(): # Create Bucket so that test can run - conn = boto.connect_s3('the_key', 'the_secret') - conn.create_bucket('mybucket') + conn = boto3.resource('s3', region_name='us-east-1') + conn.create_bucket(Bucket='mybucket') #################################### model_instance = MyModel('steve', 'is awesome') model_instance.save() - conn.get_bucket('mybucket').get_key('steve').get_contents_as_string().should.equal(b'is awesome') + body = conn.Object('mybucket', 'steve').get()['Body'].read().decode("utf-8") + + assert body == b'is awesome' @mock_s3_deprecated @@ -190,8 +193,7 @@ def test_multipart_etag(): multipart.upload_part_from_file(BytesIO(part2), 2) multipart.complete_upload() # we should get both parts as the key contents - bucket.get_key("the-key").etag.should.equal( - '"66d1a1a2ed08fd05c137f316af4ff255-2"') + bucket.get_key("the-key").etag.should.equal(EXPECTED_ETAG) @mock_s3_deprecated @@ -544,16 +546,6 @@ def test_delete_keys_with_invalid(): keys[0].name.should.equal('file1') -@mock_s3 -def test_bucket_method_not_implemented(): - requests.patch.when.called_with("https://foobar.s3.amazonaws.com/").should.throw(NotImplementedError) - - -@mock_s3 -def test_key_method_not_implemented(): - requests.post.when.called_with("https://foobar.s3.amazonaws.com/foo").should.throw(NotImplementedError) - - @mock_s3_deprecated def test_bucket_name_with_dot(): conn = boto.connect_s3() @@ -1241,7 +1233,7 @@ def test_boto3_multipart_etag(): for i, etag in enumerate(etags, 1)]}) # we should get both parts as the key contents resp = s3.get_object(Bucket='mybucket', Key='the-key') - resp['ETag'].should.equal('"66d1a1a2ed08fd05c137f316af4ff255-2"') + resp['ETag'].should.equal(EXPECTED_ETAG) TEST_XML = """\ diff --git a/tests/test_s3bucket_path/test_s3bucket_path.py b/tests/test_s3bucket_path/test_s3bucket_path.py index 24c5f7fa5..528c75368 100644 --- a/tests/test_s3bucket_path/test_s3bucket_path.py +++ b/tests/test_s3bucket_path/test_s3bucket_path.py @@ -211,16 +211,6 @@ def test_post_with_metadata_to_bucket(): bucket.get_key('the-key').get_metadata('test').should.equal('metadata') -@mock_s3 -def test_bucket_method_not_implemented(): - requests.patch.when.called_with("https://s3.amazonaws.com/foobar").should.throw(NotImplementedError) - - -@mock_s3 -def test_key_method_not_implemented(): - requests.post.when.called_with("https://s3.amazonaws.com/foobar/foo").should.throw(NotImplementedError) - - @mock_s3_deprecated def test_bucket_name_with_dot(): conn = create_connection() diff --git a/tests/test_sns/test_publishing.py b/tests/test_sns/test_publishing.py index dae7e2b83..dab2a569b 100644 --- a/tests/test_sns/test_publishing.py +++ b/tests/test_sns/test_publishing.py @@ -67,20 +67,3 @@ def test_publish_to_http(): response = conn.publish(topic=topic_arn, message="my message", subject="my subject") message_id = response['PublishResponse']['PublishResult']['MessageId'] - - last_request = responses.calls[-1].request - last_request.method.should.equal("POST") - parse_qs(last_request.body).should.equal({ - "Type": ["Notification"], - "MessageId": [message_id], - "TopicArn": ["arn:aws:sns:{0}:123456789012:some-topic".format(conn.region.name)], - "Subject": ["my subject"], - "Message": ["my message"], - "Timestamp": ["2013-01-01T00:00:00.000Z"], - "SignatureVersion": ["1"], - "Signature": ["EXAMPLElDMXvB8r9R83tGoNn0ecwd5UjllzsvSvbItzfaMpN2nk5HVSw7XnOn/49IkxDKz8YrlH2qJXj2iZB0Zo2O71c4qQk1fMUDi3LGpij7RCW7AW9vYYsSqIKRnFS94ilu7NFhUzLiieYr4BKHpdTmdD6c0esKEYBpabxDSc="], - "SigningCertURL": ["https://sns.us-east-1.amazonaws.com/SimpleNotificationService-f3ecfb7224c7233fe7bb5f59f96de52f.pem"], - "UnsubscribeURL": ["https://sns.us-east-1.amazonaws.com/?Action=Unsubscribe&SubscriptionArn=arn:aws:sns:us-east-1:123456789012:some-topic:2bcfbf39-05c3-41de-beaa-fcfcc21c8f55"], - }) - - diff --git a/tests/test_sns/test_publishing_boto3.py b/tests/test_sns/test_publishing_boto3.py index e31b969f1..edf2948fb 100644 --- a/tests/test_sns/test_publishing_boto3.py +++ b/tests/test_sns/test_publishing_boto3.py @@ -72,18 +72,3 @@ def test_publish_to_http(): response = conn.publish(TopicArn=topic_arn, Message="my message", Subject="my subject") message_id = response['MessageId'] - - last_request = responses.calls[-2].request - last_request.method.should.equal("POST") - parse_qs(last_request.body).should.equal({ - "Type": ["Notification"], - "MessageId": [message_id], - "TopicArn": ["arn:aws:sns:{0}:123456789012:some-topic".format(conn._client_config.region_name)], - "Subject": ["my subject"], - "Message": ["my message"], - "Timestamp": ["2013-01-01T00:00:00.000Z"], - "SignatureVersion": ["1"], - "Signature": ["EXAMPLElDMXvB8r9R83tGoNn0ecwd5UjllzsvSvbItzfaMpN2nk5HVSw7XnOn/49IkxDKz8YrlH2qJXj2iZB0Zo2O71c4qQk1fMUDi3LGpij7RCW7AW9vYYsSqIKRnFS94ilu7NFhUzLiieYr4BKHpdTmdD6c0esKEYBpabxDSc="], - "SigningCertURL": ["https://sns.us-east-1.amazonaws.com/SimpleNotificationService-f3ecfb7224c7233fe7bb5f59f96de52f.pem"], - "UnsubscribeURL": ["https://sns.us-east-1.amazonaws.com/?Action=Unsubscribe&SubscriptionArn=arn:aws:sns:us-east-1:123456789012:some-topic:2bcfbf39-05c3-41de-beaa-fcfcc21c8f55"], - }) diff --git a/tests/test_sqs/test_sqs.py b/tests/test_sqs/test_sqs.py index fd496c214..89ea7413d 100644 --- a/tests/test_sqs/test_sqs.py +++ b/tests/test_sqs/test_sqs.py @@ -408,11 +408,6 @@ def test_delete_batch_operation(): queue.count().should.equal(1) -@mock_sqs -def test_sqs_method_not_implemented(): - requests.post.when.called_with("https://sqs.amazonaws.com/?Action=[foobar]").should.throw(NotImplementedError) - - @mock_sqs_deprecated def test_queue_attributes(): conn = boto.connect_sqs('the_key', 'the_secret') diff --git a/tests/test_sts/test_sts.py b/tests/test_sts/test_sts.py index 870f14860..19865ca77 100644 --- a/tests/test_sts/test_sts.py +++ b/tests/test_sts/test_sts.py @@ -68,7 +68,7 @@ def test_assume_role(): @mock_sts def test_get_caller_identity(): - identity = boto3.client("sts").get_caller_identity() + identity = boto3.client("sts", region_name='us-east-1').get_caller_identity() identity['Arn'].should.equal('arn:aws:sts::123456789012:user/moto') identity['UserId'].should.equal('AKIAIOSFODNN7EXAMPLE') diff --git a/tests/test_swf/utils.py b/tests/test_swf/utils.py index 2df0fcc92..756d17c27 100644 --- a/tests/test_swf/utils.py +++ b/tests/test_swf/utils.py @@ -1,6 +1,5 @@ import boto -from moto import mock_swf from moto.swf.models import ( ActivityType, Domain, @@ -76,7 +75,6 @@ def auto_start_decision_tasks(wfe): # Setup a complete example workflow and return the connection object -@mock_swf def setup_workflow(): conn = boto.connect_swf("the_key", "the_secret") conn.register_domain("test-domain", "60", description="A test domain") diff --git a/tox.ini b/tox.ini index 368eba9c2..3fe5d0141 100644 --- a/tox.ini +++ b/tox.ini @@ -11,3 +11,4 @@ commands = [flake8] ignore = E128,E501 +exclude = moto/packages,dist From 1433f288462e19178e5d2bf8eea7aa6764230cba Mon Sep 17 00:00:00 2001 From: Steve Pulec Date: Thu, 23 Feb 2017 19:50:34 -0500 Subject: [PATCH 056/213] Update s3 test. --- tests/test_s3/test_s3.py | 10 ++++------ 1 file changed, 4 insertions(+), 6 deletions(-) diff --git a/tests/test_s3/test_s3.py b/tests/test_s3/test_s3.py index 56bdfff1c..e424ba6a3 100644 --- a/tests/test_s3/test_s3.py +++ b/tests/test_s3/test_s3.py @@ -72,17 +72,15 @@ def test_my_model_save(): assert body == b'is awesome' -@mock_s3_deprecated +@mock_s3 def test_key_etag(): - # Create Bucket so that test can run - conn = boto.connect_s3('the_key', 'the_secret') - conn.create_bucket('mybucket') - #################################### + conn = boto3.resource('s3', region_name='us-east-1') + conn.create_bucket(Bucket='mybucket') model_instance = MyModel('steve', 'is awesome') model_instance.save() - conn.get_bucket('mybucket').get_key('steve').etag.should.equal( + conn.Bucket('mybucket').Object('steve').e_tag.should.equal( '"d32bda93738f7e03adb22e66c90fbc04"') From f37bad0e0070c87c0be5b0077cb8635d88a09c34 Mon Sep 17 00:00:00 2001 From: Steve Pulec Date: Thu, 23 Feb 2017 21:37:43 -0500 Subject: [PATCH 057/213] Lints. --- moto/__init__.py | 2 +- moto/apigateway/__init__.py | 2 +- moto/apigateway/exceptions.py | 4 +- moto/apigateway/models.py | 163 +++--- moto/apigateway/responses.py | 61 ++- moto/autoscaling/__init__.py | 2 +- moto/autoscaling/models.py | 67 ++- moto/autoscaling/responses.py | 39 +- moto/awslambda/__init__.py | 2 +- moto/awslambda/models.py | 23 +- moto/awslambda/responses.py | 2 - moto/cloudformation/__init__.py | 5 +- moto/cloudformation/exceptions.py | 4 +- moto/cloudformation/models.py | 14 +- moto/cloudformation/parsing.py | 58 ++- moto/cloudformation/responses.py | 21 +- moto/cloudwatch/__init__.py | 2 +- moto/cloudwatch/models.py | 12 +- moto/cloudwatch/responses.py | 27 +- moto/core/exceptions.py | 6 +- moto/core/models.py | 20 +- moto/core/responses.py | 44 +- moto/core/utils.py | 2 +- moto/datapipeline/__init__.py | 2 +- moto/datapipeline/models.py | 14 +- moto/datapipeline/responses.py | 9 +- moto/dynamodb/models.py | 14 +- moto/dynamodb/responses.py | 18 +- moto/dynamodb2/__init__.py | 2 +- moto/dynamodb2/comparisons.py | 12 +- moto/dynamodb2/models.py | 80 +-- moto/dynamodb2/responses.py | 79 +-- moto/ec2/__init__.py | 2 +- moto/ec2/exceptions.py | 41 ++ moto/ec2/models.py | 467 ++++++++++++------ moto/ec2/responses/__init__.py | 1 + moto/ec2/responses/amazon_dev_pay.py | 4 +- moto/ec2/responses/amis.py | 31 +- .../availability_zones_and_regions.py | 2 + moto/ec2/responses/customer_gateways.py | 6 +- moto/ec2/responses/dhcp_options.py | 7 +- moto/ec2/responses/elastic_block_store.py | 62 ++- moto/ec2/responses/elastic_ip_addresses.py | 58 ++- .../responses/elastic_network_interfaces.py | 35 +- moto/ec2/responses/general.py | 1 + moto/ec2/responses/instances.py | 46 +- moto/ec2/responses/internet_gateways.py | 7 +- moto/ec2/responses/ip_addresses.py | 7 +- moto/ec2/responses/key_pairs.py | 6 +- moto/ec2/responses/monitoring.py | 7 +- moto/ec2/responses/nat_gateways.py | 3 +- moto/ec2/responses/network_acls.py | 3 +- moto/ec2/responses/placement_groups.py | 10 +- moto/ec2/responses/reserved_instances.py | 19 +- moto/ec2/responses/route_tables.py | 49 +- moto/ec2/responses/security_groups.py | 12 +- moto/ec2/responses/spot_fleets.py | 16 +- moto/ec2/responses/spot_instances.py | 30 +- moto/ec2/responses/subnets.py | 4 +- moto/ec2/responses/tags.py | 6 +- .../ec2/responses/virtual_private_gateways.py | 2 + moto/ec2/responses/vm_export.py | 10 +- moto/ec2/responses/vm_import.py | 13 +- moto/ec2/responses/vpc_peering_connections.py | 19 +- moto/ec2/responses/vpcs.py | 7 +- moto/ec2/responses/vpn_connections.py | 10 +- moto/ec2/responses/windows.py | 13 +- moto/ec2/utils.py | 65 ++- moto/ecs/__init__.py | 2 +- moto/ecs/models.py | 181 ++++--- moto/ecs/responses.py | 117 ++--- moto/elb/__init__.py | 2 +- moto/elb/exceptions.py | 6 +- moto/elb/models.py | 57 ++- moto/elb/responses.py | 90 ++-- moto/emr/__init__.py | 2 +- moto/emr/models.py | 53 +- moto/emr/responses.py | 66 ++- moto/emr/utils.py | 6 +- moto/events/models.py | 16 +- moto/events/responses.py | 9 +- moto/glacier/__init__.py | 2 +- moto/glacier/models.py | 1 + moto/glacier/responses.py | 3 +- moto/iam/__init__.py | 2 +- moto/iam/models.py | 70 ++- moto/iam/responses.py | 36 +- moto/instance_metadata/__init__.py | 2 +- moto/instance_metadata/models.py | 1 + moto/instance_metadata/responses.py | 4 +- moto/kinesis/__init__.py | 2 +- moto/kinesis/exceptions.py | 5 + moto/kinesis/models.py | 77 +-- moto/kinesis/responses.py | 33 +- moto/kinesis/utils.py | 3 +- moto/kms/__init__.py | 2 +- moto/kms/models.py | 5 +- moto/kms/responses.py | 30 +- moto/opsworks/__init__.py | 2 +- moto/opsworks/exceptions.py | 2 + moto/opsworks/models.py | 28 +- moto/opsworks/responses.py | 36 +- moto/packages/httpretty/__init__.py | 1 + moto/packages/httpretty/compat.py | 3 + moto/packages/httpretty/core.py | 42 +- moto/packages/httpretty/errors.py | 1 + moto/packages/responses/responses.py | 5 +- moto/packages/responses/setup.py | 1 + moto/packages/responses/test_responses.py | 1 + moto/rds/__init__.py | 2 +- moto/rds/exceptions.py | 4 + moto/rds/models.py | 27 +- moto/rds/responses.py | 27 +- moto/rds2/__init__.py | 2 +- moto/rds2/exceptions.py | 6 + moto/rds2/models.py | 167 ++++--- moto/rds2/responses.py | 47 +- moto/redshift/__init__.py | 2 +- moto/redshift/exceptions.py | 6 + moto/redshift/models.py | 46 +- moto/redshift/responses.py | 21 +- moto/route53/models.py | 37 +- moto/route53/responses.py | 78 +-- moto/s3/__init__.py | 2 +- moto/s3/exceptions.py | 2 + moto/s3/models.py | 50 +- moto/s3/responses.py | 63 ++- moto/s3/utils.py | 3 +- moto/server.py | 15 +- moto/ses/__init__.py | 2 +- moto/ses/models.py | 5 + moto/sns/__init__.py | 2 +- moto/sns/models.py | 33 +- moto/sns/responses.py | 25 +- moto/sqs/__init__.py | 2 +- moto/sqs/models.py | 13 +- moto/sqs/responses.py | 35 +- moto/sqs/utils.py | 19 +- moto/sts/models.py | 4 + moto/sts/responses.py | 1 + moto/swf/__init__.py | 2 +- moto/swf/exceptions.py | 23 +- moto/swf/models/__init__.py | 37 +- moto/swf/models/activity_task.py | 1 + moto/swf/models/activity_type.py | 1 + moto/swf/models/decision_task.py | 4 +- moto/swf/models/domain.py | 1 + moto/swf/models/generic_type.py | 1 + moto/swf/models/history_event.py | 4 +- moto/swf/models/timeout.py | 1 + moto/swf/models/workflow_execution.py | 36 +- moto/swf/models/workflow_type.py | 1 + moto/swf/responses.py | 51 +- tests/backport_assert_raises.py | 1 + tests/helpers.py | 5 +- tests/test_apigateway/test_apigateway.py | 283 ++++++----- tests/test_autoscaling/test_autoscaling.py | 122 ++--- .../test_launch_configurations.py | 18 +- tests/test_awslambda/test_lambda.py | 51 +- .../rds_mysql_with_db_parameter_group.py | 361 +++++++------- .../fixtures/rds_mysql_with_read_replica.py | 355 ++++++------- .../test_cloudformation/fixtures/redshift.py | 360 +++++++------- .../route53_ec2_instance_with_public_ip.py | 54 +- .../fixtures/route53_health_check.py | 48 +- .../fixtures/route53_roundrobin.py | 76 +-- .../test_cloudformation_stack_crud.py | 47 +- .../test_cloudformation_stack_crud_boto3.py | 21 +- .../test_cloudformation_stack_integration.py | 306 +++++++----- tests/test_cloudformation/test_server.py | 11 +- .../test_cloudformation/test_stack_parsing.py | 19 +- tests/test_cloudwatch/test_cloudwatch.py | 16 +- tests/test_core/test_decorator_calls.py | 5 +- tests/test_core/test_instance_metadata.py | 6 +- tests/test_core/test_responses.py | 24 +- tests/test_core/test_server.py | 9 +- tests/test_core/test_url_mapping.py | 3 +- tests/test_datapipeline/test_datapipeline.py | 9 +- tests/test_datapipeline/test_server.py | 7 +- tests/test_dynamodb/test_dynamodb.py | 9 +- .../test_dynamodb_table_with_range_key.py | 24 +- .../test_dynamodb_table_without_range_key.py | 3 +- tests/test_dynamodb2/test_dynamodb.py | 15 +- .../test_dynamodb_table_with_range_key.py | 130 +++-- .../test_dynamodb_table_without_range_key.py | 21 +- tests/test_ec2/test_amis.py | 129 +++-- tests/test_ec2/test_customer_gateways.py | 12 +- tests/test_ec2/test_dhcp_options.py | 45 +- tests/test_ec2/test_elastic_block_store.py | 169 ++++--- tests/test_ec2/test_elastic_ip_addresses.py | 85 ++-- .../test_elastic_network_interfaces.py | 90 ++-- tests/test_ec2/test_instances.py | 251 ++++++---- tests/test_ec2/test_internet_gateways.py | 39 +- tests/test_ec2/test_key_pairs.py | 9 +- tests/test_ec2/test_nat_gateway.py | 21 +- tests/test_ec2/test_regions.py | 14 +- tests/test_ec2/test_route_tables.py | 92 ++-- tests/test_ec2/test_security_groups.py | 178 ++++--- tests/test_ec2/test_server.py | 3 +- tests/test_ec2/test_spot_fleet.py | 112 +++-- tests/test_ec2/test_spot_instances.py | 64 +-- tests/test_ec2/test_subnets.py | 66 ++- tests/test_ec2/test_tags.py | 26 +- .../test_ec2/test_virtual_private_gateways.py | 1 + tests/test_ec2/test_vpc_peering.py | 1 - tests/test_ec2/test_vpcs.py | 17 +- tests/test_ec2/test_vpn_connections.py | 9 +- tests/test_ecs/test_ecs_boto3.py | 245 +++++---- tests/test_elb/test_elb.py | 243 +++++---- tests/test_emr/test_emr.py | 24 +- tests/test_emr/test_emr_boto3.py | 93 ++-- tests/test_glacier/test_glacier_jobs.py | 18 +- tests/test_glacier/test_glacier_server.py | 3 +- tests/test_iam/test_iam.py | 93 ++-- tests/test_iam/test_iam_groups.py | 6 +- tests/test_iam/test_server.py | 5 +- tests/test_kinesis/test_firehose.py | 10 +- tests/test_kinesis/test_kinesis.py | 107 ++-- tests/test_kms/test_kms.py | 155 ++++-- tests/test_opsworks/test_instances.py | 8 +- tests/test_opsworks/test_layers.py | 4 +- tests/test_opsworks/test_stack.py | 2 - tests/test_rds/test_rds.py | 49 +- tests/test_rds2/test_rds2.py | 302 +++++++---- tests/test_rds2/test_server.py | 2 +- tests/test_redshift/test_redshift.py | 120 +++-- tests/test_redshift/test_server.py | 3 +- tests/test_route53/test_route53.py | 112 +++-- tests/test_s3/test_s3.py | 146 ++++-- tests/test_s3/test_s3_lifecycle.py | 4 +- tests/test_s3/test_s3_utils.py | 7 +- tests/test_s3/test_server.py | 6 +- .../test_bucket_path_server.py | 3 +- .../test_s3bucket_path/test_s3bucket_path.py | 25 +- .../test_s3bucket_path_utils.py | 3 +- tests/test_ses/test_ses.py | 29 +- tests/test_sns/test_application.py | 89 ++-- tests/test_sns/test_application_boto3.py | 36 +- tests/test_sns/test_publishing.py | 18 +- tests/test_sns/test_publishing_boto3.py | 3 +- tests/test_sns/test_server.py | 6 +- tests/test_sns/test_subscriptions.py | 45 +- tests/test_sns/test_subscriptions_boto3.py | 18 +- tests/test_sns/test_topics.py | 42 +- tests/test_sns/test_topics_boto3.py | 18 +- tests/test_sqs/test_server.py | 9 +- tests/test_sqs/test_sqs.py | 43 +- tests/test_sts/test_sts.py | 25 +- tests/test_swf/models/test_activity_task.py | 3 +- tests/test_swf/models/test_decision_task.py | 3 +- tests/test_swf/models/test_domain.py | 27 +- tests/test_swf/models/test_generic_type.py | 10 +- .../models/test_workflow_execution.py | 33 +- .../test_swf/responses/test_activity_tasks.py | 78 ++- .../test_swf/responses/test_activity_types.py | 15 +- .../test_swf/responses/test_decision_tasks.py | 51 +- tests/test_swf/responses/test_domains.py | 3 +- tests/test_swf/responses/test_timeouts.py | 27 +- .../responses/test_workflow_executions.py | 27 +- .../test_swf/responses/test_workflow_types.py | 18 +- tests/test_swf/utils.py | 6 +- 260 files changed, 6370 insertions(+), 3773 deletions(-) diff --git a/moto/__init__.py b/moto/__init__.py index 5a16a0a8e..546603b00 100644 --- a/moto/__init__.py +++ b/moto/__init__.py @@ -1,6 +1,6 @@ from __future__ import unicode_literals import logging -#logging.getLogger('boto').setLevel(logging.CRITICAL) +# logging.getLogger('boto').setLevel(logging.CRITICAL) __title__ = 'moto' __version__ = '0.4.31' diff --git a/moto/apigateway/__init__.py b/moto/apigateway/__init__.py index c6ea9a3bc..98b2058d9 100644 --- a/moto/apigateway/__init__.py +++ b/moto/apigateway/__init__.py @@ -1,6 +1,6 @@ from __future__ import unicode_literals from .models import apigateway_backends -from ..core.models import MockAWS, base_decorator, HttprettyMockAWS, deprecated_base_decorator +from ..core.models import base_decorator, deprecated_base_decorator apigateway_backend = apigateway_backends['us-east-1'] mock_apigateway = base_decorator(apigateway_backends) diff --git a/moto/apigateway/exceptions.py b/moto/apigateway/exceptions.py index 77a1c932a..d4cf8d1c7 100644 --- a/moto/apigateway/exceptions.py +++ b/moto/apigateway/exceptions.py @@ -4,9 +4,7 @@ from moto.core.exceptions import RESTError class StageNotFoundException(RESTError): code = 404 + def __init__(self): super(StageNotFoundException, self).__init__( "NotFoundException", "Invalid stage identifier specified") - - - diff --git a/moto/apigateway/models.py b/moto/apigateway/models.py index 4b09f44bc..6585d19f5 100644 --- a/moto/apigateway/models.py +++ b/moto/apigateway/models.py @@ -14,15 +14,18 @@ STAGE_URL = "https://{api_id}.execute-api.{region_name}.amazonaws.com/{stage_nam class Deployment(dict): + def __init__(self, deployment_id, name, description=""): super(Deployment, self).__init__() self['id'] = deployment_id self['stageName'] = name self['description'] = description - self['createdDate'] = iso_8601_datetime_with_milliseconds(datetime.datetime.now()) + self['createdDate'] = iso_8601_datetime_with_milliseconds( + datetime.datetime.now()) class IntegrationResponse(dict): + def __init__(self, status_code, selection_pattern=None): self['responseTemplates'] = {"application/json": None} self['statusCode'] = status_code @@ -31,6 +34,7 @@ class IntegrationResponse(dict): class Integration(dict): + def __init__(self, integration_type, uri, http_method, request_templates=None): super(Integration, self).__init__() self['type'] = integration_type @@ -42,7 +46,8 @@ class Integration(dict): } def create_integration_response(self, status_code, selection_pattern): - integration_response = IntegrationResponse(status_code, selection_pattern) + integration_response = IntegrationResponse( + status_code, selection_pattern) self["integrationResponses"][status_code] = integration_response return integration_response @@ -54,12 +59,14 @@ class Integration(dict): class MethodResponse(dict): + def __init__(self, status_code): super(MethodResponse, self).__init__() self['statusCode'] = status_code class Method(dict): + def __init__(self, method_type, authorization_type): super(Method, self).__init__() self.update(dict( @@ -86,6 +93,7 @@ class Method(dict): class Resource(object): + def __init__(self, id, region_name, api_id, path_part, parent_id): self.id = id self.region_name = region_name @@ -127,14 +135,17 @@ class Resource(object): if integration_type == 'HTTP': uri = integration['uri'] - requests_func = getattr(requests, integration['httpMethod'].lower()) + requests_func = getattr(requests, integration[ + 'httpMethod'].lower()) response = requests_func(uri) else: - raise NotImplementedError("The {0} type has not been implemented".format(integration_type)) + raise NotImplementedError( + "The {0} type has not been implemented".format(integration_type)) return response.status_code, response.text def add_method(self, method_type, authorization_type): - method = Method(method_type=method_type, authorization_type=authorization_type) + method = Method(method_type=method_type, + authorization_type=authorization_type) self.resource_methods[method_type] = method return method @@ -142,7 +153,8 @@ class Resource(object): return self.resource_methods[method_type] def add_integration(self, method_type, integration_type, uri, request_templates=None): - integration = Integration(integration_type, uri, method_type, request_templates=request_templates) + integration = Integration( + integration_type, uri, method_type, request_templates=request_templates) self.resource_methods[method_type]['methodIntegration'] = integration return integration @@ -155,9 +167,8 @@ class Resource(object): class Stage(dict): - def __init__(self, name=None, deployment_id=None, variables=None, - description='',cacheClusterEnabled=False,cacheClusterSize=None): + description='', cacheClusterEnabled=False, cacheClusterSize=None): super(Stage, self).__init__() if variables is None: variables = {} @@ -190,21 +201,24 @@ class Stage(dict): elif op['op'] == 'replace': # Method Settings drop into here # (e.g., path could be '/*/*/logging/loglevel') - split_path = op['path'].split('/',3) - if len(split_path)!=4: + split_path = op['path'].split('/', 3) + if len(split_path) != 4: continue - self._patch_method_setting('/'.join(split_path[1:3]),split_path[3],op['value']) + self._patch_method_setting( + '/'.join(split_path[1:3]), split_path[3], op['value']) else: - raise Exception('Patch operation "%s" not implemented' % op['op']) + raise Exception( + 'Patch operation "%s" not implemented' % op['op']) return self - def _patch_method_setting(self,resource_path_and_method,key,value): + def _patch_method_setting(self, resource_path_and_method, key, value): updated_key = self._method_settings_translations(key) if updated_key is not None: if resource_path_and_method not in self['methodSettings']: - self['methodSettings'][resource_path_and_method] = self._get_default_method_settings() - self['methodSettings'][resource_path_and_method][updated_key] = self._convert_to_type(updated_key,value) - + self['methodSettings'][ + resource_path_and_method] = self._get_default_method_settings() + self['methodSettings'][resource_path_and_method][ + updated_key] = self._convert_to_type(updated_key, value) def _get_default_method_settings(self): return { @@ -219,18 +233,18 @@ class Stage(dict): "requireAuthorizationForCacheControl": True } - def _method_settings_translations(self,key): + def _method_settings_translations(self, key): mappings = { - 'metrics/enabled' :'metricsEnabled', - 'logging/loglevel' : 'loggingLevel', - 'logging/dataTrace' : 'dataTraceEnabled' , - 'throttling/burstLimit' : 'throttlingBurstLimit', - 'throttling/rateLimit' : 'throttlingRateLimit', - 'caching/enabled' : 'cachingEnabled', - 'caching/ttlInSeconds' : 'cacheTtlInSeconds', - 'caching/dataEncrypted' : 'cacheDataEncrypted', - 'caching/requireAuthorizationForCacheControl' : 'requireAuthorizationForCacheControl', - 'caching/unauthorizedCacheControlHeaderStrategy' : 'unauthorizedCacheControlHeaderStrategy' + 'metrics/enabled': 'metricsEnabled', + 'logging/loglevel': 'loggingLevel', + 'logging/dataTrace': 'dataTraceEnabled', + 'throttling/burstLimit': 'throttlingBurstLimit', + 'throttling/rateLimit': 'throttlingRateLimit', + 'caching/enabled': 'cachingEnabled', + 'caching/ttlInSeconds': 'cacheTtlInSeconds', + 'caching/dataEncrypted': 'cacheDataEncrypted', + 'caching/requireAuthorizationForCacheControl': 'requireAuthorizationForCacheControl', + 'caching/unauthorizedCacheControlHeaderStrategy': 'unauthorizedCacheControlHeaderStrategy' } if key in mappings: @@ -238,21 +252,21 @@ class Stage(dict): else: None - def _str2bool(self,v): + def _str2bool(self, v): return v.lower() == "true" - def _convert_to_type(self,key,val): + def _convert_to_type(self, key, val): type_mappings = { - 'metricsEnabled' : 'bool', - 'loggingLevel' : 'str', - 'dataTraceEnabled' : 'bool', - 'throttlingBurstLimit' : 'int', - 'throttlingRateLimit' : 'float', - 'cachingEnabled' : 'bool', - 'cacheTtlInSeconds' : 'int', - 'cacheDataEncrypted' : 'bool', - 'requireAuthorizationForCacheControl' :'bool', - 'unauthorizedCacheControlHeaderStrategy' : 'str' + 'metricsEnabled': 'bool', + 'loggingLevel': 'str', + 'dataTraceEnabled': 'bool', + 'throttlingBurstLimit': 'int', + 'throttlingRateLimit': 'float', + 'cachingEnabled': 'bool', + 'cacheTtlInSeconds': 'int', + 'cacheDataEncrypted': 'bool', + 'requireAuthorizationForCacheControl': 'bool', + 'unauthorizedCacheControlHeaderStrategy': 'str' } if key in type_mappings: @@ -261,7 +275,7 @@ class Stage(dict): if type_value == 'bool': return self._str2bool(val) elif type_value == 'int': - return int(val) + return int(val) elif type_value == 'float': return float(val) else: @@ -269,10 +283,8 @@ class Stage(dict): else: return str(val) - - - def _apply_operation_to_variables(self,op): - key = op['path'][op['path'].rindex("variables/")+10:] + def _apply_operation_to_variables(self, op): + key = op['path'][op['path'].rindex("variables/") + 10:] if op['op'] == 'remove': self['variables'].pop(key, None) elif op['op'] == 'replace': @@ -281,8 +293,8 @@ class Stage(dict): raise Exception('Patch operation "%s" not implemented' % op['op']) - class RestAPI(object): + def __init__(self, id, region_name, name, description): self.id = id self.region_name = region_name @@ -306,7 +318,8 @@ class RestAPI(object): def add_child(self, path, parent_id=None): child_id = create_id() - child = Resource(id=child_id, region_name=self.region_name, api_id=self.id, path_part=path, parent_id=parent_id) + child = Resource(id=child_id, region_name=self.region_name, + api_id=self.id, path_part=path, parent_id=parent_id) self.resources[child_id] = child return child @@ -326,25 +339,28 @@ class RestAPI(object): return status_code, {}, response def update_integration_mocks(self, stage_name): - stage_url = STAGE_URL.format(api_id=self.id.upper(), region_name=self.region_name, stage_name=stage_name) - responses.add_callback(responses.GET, stage_url, callback=self.resource_callback) + stage_url = STAGE_URL.format(api_id=self.id.upper(), + region_name=self.region_name, stage_name=stage_name) + responses.add_callback(responses.GET, stage_url, + callback=self.resource_callback) - def create_stage(self, name, deployment_id,variables=None,description='',cacheClusterEnabled=None,cacheClusterSize=None): + def create_stage(self, name, deployment_id, variables=None, description='', cacheClusterEnabled=None, cacheClusterSize=None): if variables is None: variables = {} - stage = Stage(name=name, deployment_id=deployment_id,variables=variables, - description=description,cacheClusterSize=cacheClusterSize,cacheClusterEnabled=cacheClusterEnabled) + stage = Stage(name=name, deployment_id=deployment_id, variables=variables, + description=description, cacheClusterSize=cacheClusterSize, cacheClusterEnabled=cacheClusterEnabled) self.stages[name] = stage self.update_integration_mocks(name) return stage - def create_deployment(self, name, description="",stage_variables=None): + def create_deployment(self, name, description="", stage_variables=None): if stage_variables is None: stage_variables = {} deployment_id = create_id() deployment = Deployment(deployment_id, name, description) self.deployments[deployment_id] = deployment - self.stages[name] = Stage(name=name, deployment_id=deployment_id,variables=stage_variables) + self.stages[name] = Stage( + name=name, deployment_id=deployment_id, variables=stage_variables) self.update_integration_mocks(name) return deployment @@ -353,7 +369,7 @@ class RestAPI(object): return self.deployments[deployment_id] def get_stages(self): - return list(self.stages.values()) + return list(self.stages.values()) def get_deployments(self): return list(self.deployments.values()) @@ -363,6 +379,7 @@ class RestAPI(object): class APIGatewayBackend(BaseBackend): + def __init__(self, region_name): super(APIGatewayBackend, self).__init__() self.apis = {} @@ -429,19 +446,17 @@ class APIGatewayBackend(BaseBackend): else: return stage - def get_stages(self, function_id): api = self.get_rest_api(function_id) return api.get_stages() - def create_stage(self, function_id, stage_name, deploymentId, - variables=None,description='',cacheClusterEnabled=None,cacheClusterSize=None): + variables=None, description='', cacheClusterEnabled=None, cacheClusterSize=None): if variables is None: variables = {} api = self.get_rest_api(function_id) - api.create_stage(stage_name,deploymentId,variables=variables, - description=description,cacheClusterEnabled=cacheClusterEnabled,cacheClusterSize=cacheClusterSize) + api.create_stage(stage_name, deploymentId, variables=variables, + description=description, cacheClusterEnabled=cacheClusterEnabled, cacheClusterSize=cacheClusterSize) return api.stages.get(stage_name) def update_stage(self, function_id, stage_name, patch_operations): @@ -467,10 +482,10 @@ class APIGatewayBackend(BaseBackend): return method_response def create_integration(self, function_id, resource_id, method_type, integration_type, uri, - request_templates=None): + request_templates=None): resource = self.get_resource(function_id, resource_id) integration = resource.add_integration(method_type, integration_type, uri, - request_templates=request_templates) + request_templates=request_templates) return integration def get_integration(self, function_id, resource_id, method_type): @@ -482,25 +497,31 @@ class APIGatewayBackend(BaseBackend): return resource.delete_integration(method_type) def create_integration_response(self, function_id, resource_id, method_type, status_code, selection_pattern): - integration = self.get_integration(function_id, resource_id, method_type) - integration_response = integration.create_integration_response(status_code, selection_pattern) + integration = self.get_integration( + function_id, resource_id, method_type) + integration_response = integration.create_integration_response( + status_code, selection_pattern) return integration_response def get_integration_response(self, function_id, resource_id, method_type, status_code): - integration = self.get_integration(function_id, resource_id, method_type) - integration_response = integration.get_integration_response(status_code) + integration = self.get_integration( + function_id, resource_id, method_type) + integration_response = integration.get_integration_response( + status_code) return integration_response def delete_integration_response(self, function_id, resource_id, method_type, status_code): - integration = self.get_integration(function_id, resource_id, method_type) - integration_response = integration.delete_integration_response(status_code) + integration = self.get_integration( + function_id, resource_id, method_type) + integration_response = integration.delete_integration_response( + status_code) return integration_response - def create_deployment(self, function_id, name, description ="", stage_variables=None): + def create_deployment(self, function_id, name, description="", stage_variables=None): if stage_variables is None: stage_variables = {} api = self.get_rest_api(function_id) - deployment = api.create_deployment(name, description,stage_variables) + deployment = api.create_deployment(name, description, stage_variables) return deployment def get_deployment(self, function_id, deployment_id): @@ -515,6 +536,8 @@ class APIGatewayBackend(BaseBackend): api = self.get_rest_api(function_id) return api.delete_deployment(deployment_id) + apigateway_backends = {} -for region_name in ['us-east-1', 'us-west-2', 'eu-west-1', 'ap-northeast-1']: # Not available in boto yet +# Not available in boto yet +for region_name in ['us-east-1', 'us-west-2', 'eu-west-1', 'ap-northeast-1']: apigateway_backends[region_name] = APIGatewayBackend(region_name) diff --git a/moto/apigateway/responses.py b/moto/apigateway/responses.py index a7bb28c6e..443fd4060 100644 --- a/moto/apigateway/responses.py +++ b/moto/apigateway/responses.py @@ -12,7 +12,6 @@ class APIGatewayResponse(BaseResponse): def _get_param(self, key): return json.loads(self.body).get(key) - def _get_param_with_default_value(self, key, default): jsonbody = json.loads(self.body) @@ -69,7 +68,8 @@ class APIGatewayResponse(BaseResponse): resource = self.backend.get_resource(function_id, resource_id) elif self.method == 'POST': path_part = self._get_param("pathPart") - resource = self.backend.create_resource(function_id, resource_id, path_part) + resource = self.backend.create_resource( + function_id, resource_id, path_part) elif self.method == 'DELETE': resource = self.backend.delete_resource(function_id, resource_id) return 200, {}, json.dumps(resource.to_dict()) @@ -82,11 +82,13 @@ class APIGatewayResponse(BaseResponse): method_type = url_path_parts[6] if self.method == 'GET': - method = self.backend.get_method(function_id, resource_id, method_type) + method = self.backend.get_method( + function_id, resource_id, method_type) return 200, {}, json.dumps(method) elif self.method == 'PUT': authorization_type = self._get_param("authorizationType") - method = self.backend.create_method(function_id, resource_id, method_type, authorization_type) + method = self.backend.create_method( + function_id, resource_id, method_type, authorization_type) return 200, {}, json.dumps(method) def resource_method_responses(self, request, full_url, headers): @@ -98,11 +100,14 @@ class APIGatewayResponse(BaseResponse): response_code = url_path_parts[8] if self.method == 'GET': - method_response = self.backend.get_method_response(function_id, resource_id, method_type, response_code) + method_response = self.backend.get_method_response( + function_id, resource_id, method_type, response_code) elif self.method == 'PUT': - method_response = self.backend.create_method_response(function_id, resource_id, method_type, response_code) + method_response = self.backend.create_method_response( + function_id, resource_id, method_type, response_code) elif self.method == 'DELETE': - method_response = self.backend.delete_method_response(function_id, resource_id, method_type, response_code) + method_response = self.backend.delete_method_response( + function_id, resource_id, method_type, response_code) return 200, {}, json.dumps(method_response) def restapis_stages(self, request, full_url, headers): @@ -113,10 +118,13 @@ class APIGatewayResponse(BaseResponse): if self.method == 'POST': stage_name = self._get_param("stageName") deployment_id = self._get_param("deploymentId") - stage_variables = self._get_param_with_default_value('variables',{}) - description = self._get_param_with_default_value('description','') - cacheClusterEnabled = self._get_param_with_default_value('cacheClusterEnabled',False) - cacheClusterSize = self._get_param_with_default_value('cacheClusterSize',None) + stage_variables = self._get_param_with_default_value( + 'variables', {}) + description = self._get_param_with_default_value('description', '') + cacheClusterEnabled = self._get_param_with_default_value( + 'cacheClusterEnabled', False) + cacheClusterSize = self._get_param_with_default_value( + 'cacheClusterSize', None) stage_response = self.backend.create_stage(function_id, stage_name, deployment_id, variables=stage_variables, description=description, @@ -135,12 +143,14 @@ class APIGatewayResponse(BaseResponse): if self.method == 'GET': try: - stage_response = self.backend.get_stage(function_id, stage_name) + stage_response = self.backend.get_stage( + function_id, stage_name) except StageNotFoundException as error: - return error.code, {},'{{"message":"{0}","code":"{1}"}}'.format(error.message,error.error_type) + return error.code, {}, '{{"message":"{0}","code":"{1}"}}'.format(error.message, error.error_type) elif self.method == 'PATCH': patch_operations = self._get_param('patchOperations') - stage_response = self.backend.update_stage(function_id, stage_name, patch_operations) + stage_response = self.backend.update_stage( + function_id, stage_name, patch_operations) return 200, {}, json.dumps(stage_response) def integrations(self, request, full_url, headers): @@ -151,14 +161,17 @@ class APIGatewayResponse(BaseResponse): method_type = url_path_parts[6] if self.method == 'GET': - integration_response = self.backend.get_integration(function_id, resource_id, method_type) + integration_response = self.backend.get_integration( + function_id, resource_id, method_type) elif self.method == 'PUT': integration_type = self._get_param('type') uri = self._get_param('uri') request_templates = self._get_param('requestTemplates') - integration_response = self.backend.create_integration(function_id, resource_id, method_type, integration_type, uri, request_templates=request_templates) + integration_response = self.backend.create_integration( + function_id, resource_id, method_type, integration_type, uri, request_templates=request_templates) elif self.method == 'DELETE': - integration_response = self.backend.delete_integration(function_id, resource_id, method_type) + integration_response = self.backend.delete_integration( + function_id, resource_id, method_type) return 200, {}, json.dumps(integration_response) def integration_responses(self, request, full_url, headers): @@ -193,9 +206,11 @@ class APIGatewayResponse(BaseResponse): return 200, {}, json.dumps({"item": deployments}) elif self.method == 'POST': name = self._get_param("stageName") - description = self._get_param_with_default_value("description","") - stage_variables = self._get_param_with_default_value('variables',{}) - deployment = self.backend.create_deployment(function_id, name, description,stage_variables) + description = self._get_param_with_default_value("description", "") + stage_variables = self._get_param_with_default_value( + 'variables', {}) + deployment = self.backend.create_deployment( + function_id, name, description, stage_variables) return 200, {}, json.dumps(deployment) def individual_deployment(self, request, full_url, headers): @@ -205,7 +220,9 @@ class APIGatewayResponse(BaseResponse): deployment_id = url_path_parts[4] if self.method == 'GET': - deployment = self.backend.get_deployment(function_id, deployment_id) + deployment = self.backend.get_deployment( + function_id, deployment_id) elif self.method == 'DELETE': - deployment = self.backend.delete_deployment(function_id, deployment_id) + deployment = self.backend.delete_deployment( + function_id, deployment_id) return 200, {}, json.dumps(deployment) diff --git a/moto/autoscaling/__init__.py b/moto/autoscaling/__init__.py index 9b5842788..b2b8b0bae 100644 --- a/moto/autoscaling/__init__.py +++ b/moto/autoscaling/__init__.py @@ -1,6 +1,6 @@ from __future__ import unicode_literals from .models import autoscaling_backends -from ..core.models import MockAWS, base_decorator, HttprettyMockAWS, deprecated_base_decorator +from ..core.models import base_decorator, deprecated_base_decorator autoscaling_backend = autoscaling_backends['us-east-1'] mock_autoscaling = base_decorator(autoscaling_backends) diff --git a/moto/autoscaling/models.py b/moto/autoscaling/models.py index 53a0f62df..18dfcb5fe 100644 --- a/moto/autoscaling/models.py +++ b/moto/autoscaling/models.py @@ -10,12 +10,14 @@ DEFAULT_COOLDOWN = 300 class InstanceState(object): + def __init__(self, instance, lifecycle_state="InService"): self.instance = instance self.lifecycle_state = lifecycle_state class FakeScalingPolicy(object): + def __init__(self, name, policy_type, adjustment_type, as_name, scaling_adjustment, cooldown, autoscaling_backend): self.name = name @@ -31,14 +33,18 @@ class FakeScalingPolicy(object): def execute(self): if self.adjustment_type == 'ExactCapacity': - self.autoscaling_backend.set_desired_capacity(self.as_name, self.scaling_adjustment) + self.autoscaling_backend.set_desired_capacity( + self.as_name, self.scaling_adjustment) elif self.adjustment_type == 'ChangeInCapacity': - self.autoscaling_backend.change_capacity(self.as_name, self.scaling_adjustment) + self.autoscaling_backend.change_capacity( + self.as_name, self.scaling_adjustment) elif self.adjustment_type == 'PercentChangeInCapacity': - self.autoscaling_backend.change_capacity_percent(self.as_name, self.scaling_adjustment) + self.autoscaling_backend.change_capacity_percent( + self.as_name, self.scaling_adjustment) class FakeLaunchConfiguration(object): + def __init__(self, name, image_id, key_name, ramdisk_id, kernel_id, security_groups, user_data, instance_type, instance_monitoring, instance_profile_name, spot_price, ebs_optimized, associate_public_ip_address, block_device_mapping_dict): @@ -77,14 +83,16 @@ class FakeLaunchConfiguration(object): instance_profile_name=instance_profile_name, spot_price=properties.get("SpotPrice"), ebs_optimized=properties.get("EbsOptimized"), - associate_public_ip_address=properties.get("AssociatePublicIpAddress"), + associate_public_ip_address=properties.get( + "AssociatePublicIpAddress"), block_device_mappings=properties.get("BlockDeviceMapping.member") ) return config @classmethod def update_from_cloudformation_json(cls, original_resource, new_resource_name, cloudformation_json, region_name): - cls.delete_from_cloudformation_json(original_resource.name, cloudformation_json, region_name) + cls.delete_from_cloudformation_json( + original_resource.name, cloudformation_json, region_name) return cls.create_from_cloudformation_json(new_resource_name, cloudformation_json, region_name) @classmethod @@ -126,7 +134,8 @@ class FakeLaunchConfiguration(object): else: block_type.volume_type = mapping.get('ebs._volume_type') block_type.snapshot_id = mapping.get('ebs._snapshot_id') - block_type.delete_on_termination = mapping.get('ebs._delete_on_termination') + block_type.delete_on_termination = mapping.get( + 'ebs._delete_on_termination') block_type.size = mapping.get('ebs._volume_size') block_type.iops = mapping.get('ebs._iops') block_device_map[mount_point] = block_type @@ -134,6 +143,7 @@ class FakeLaunchConfiguration(object): class FakeAutoScalingGroup(object): + def __init__(self, name, availability_zones, desired_capacity, max_size, min_size, launch_config_name, vpc_zone_identifier, default_cooldown, health_check_period, health_check_type, @@ -145,7 +155,8 @@ class FakeAutoScalingGroup(object): self.max_size = max_size self.min_size = min_size - self.launch_config = self.autoscaling_backend.launch_configurations[launch_config_name] + self.launch_config = self.autoscaling_backend.launch_configurations[ + launch_config_name] self.launch_config_name = launch_config_name self.vpc_zone_identifier = vpc_zone_identifier @@ -175,7 +186,8 @@ class FakeAutoScalingGroup(object): max_size=properties.get("MaxSize"), min_size=properties.get("MinSize"), launch_config_name=launch_config_name, - vpc_zone_identifier=(','.join(properties.get("VPCZoneIdentifier", [])) or None), + vpc_zone_identifier=( + ','.join(properties.get("VPCZoneIdentifier", [])) or None), default_cooldown=properties.get("Cooldown"), health_check_period=properties.get("HealthCheckGracePeriod"), health_check_type=properties.get("HealthCheckType"), @@ -188,7 +200,8 @@ class FakeAutoScalingGroup(object): @classmethod def update_from_cloudformation_json(cls, original_resource, new_resource_name, cloudformation_json, region_name): - cls.delete_from_cloudformation_json(original_resource.name, cloudformation_json, region_name) + cls.delete_from_cloudformation_json( + original_resource.name, cloudformation_json, region_name) return cls.create_from_cloudformation_json(new_resource_name, cloudformation_json, region_name) @classmethod @@ -219,7 +232,8 @@ class FakeAutoScalingGroup(object): self.min_size = min_size if launch_config_name: - self.launch_config = self.autoscaling_backend.launch_configurations[launch_config_name] + self.launch_config = self.autoscaling_backend.launch_configurations[ + launch_config_name] self.launch_config_name = launch_config_name if vpc_zone_identifier is not None: self.vpc_zone_identifier = vpc_zone_identifier @@ -244,7 +258,8 @@ class FakeAutoScalingGroup(object): if self.desired_capacity > curr_instance_count: # Need more instances - count_needed = int(self.desired_capacity) - int(curr_instance_count) + count_needed = int(self.desired_capacity) - \ + int(curr_instance_count) reservation = self.autoscaling_backend.ec2_backend.add_instances( self.launch_config.image_id, count_needed, @@ -259,8 +274,10 @@ class FakeAutoScalingGroup(object): # Need to remove some instances count_to_remove = curr_instance_count - self.desired_capacity instances_to_remove = self.instance_states[:count_to_remove] - instance_ids_to_remove = [instance.instance.id for instance in instances_to_remove] - self.autoscaling_backend.ec2_backend.terminate_instances(instance_ids_to_remove) + instance_ids_to_remove = [ + instance.instance.id for instance in instances_to_remove] + self.autoscaling_backend.ec2_backend.terminate_instances( + instance_ids_to_remove) self.instance_states = self.instance_states[count_to_remove:] @@ -419,8 +436,8 @@ class AutoScalingBackend(BaseBackend): def describe_policies(self, autoscaling_group_name=None, policy_names=None, policy_types=None): return [policy for policy in self.policies.values() if (not autoscaling_group_name or policy.as_name == autoscaling_group_name) and - (not policy_names or policy.name in policy_names) and - (not policy_types or policy.policy_type in policy_types)] + (not policy_names or policy.name in policy_names) and + (not policy_types or policy.policy_type in policy_types)] def delete_policy(self, group_name): self.policies.pop(group_name, None) @@ -431,18 +448,22 @@ class AutoScalingBackend(BaseBackend): def update_attached_elbs(self, group_name): group = self.autoscaling_groups[group_name] - group_instance_ids = set(state.instance.id for state in group.instance_states) + group_instance_ids = set( + state.instance.id for state in group.instance_states) try: - elbs = self.elb_backend.describe_load_balancers(names=group.load_balancers) + elbs = self.elb_backend.describe_load_balancers( + names=group.load_balancers) except LoadBalancerNotFoundError: # ELBs can be deleted before their autoscaling group return for elb in elbs: elb_instace_ids = set(elb.instance_ids) - self.elb_backend.register_instances(elb.name, group_instance_ids - elb_instace_ids) - self.elb_backend.deregister_instances(elb.name, elb_instace_ids - group_instance_ids) + self.elb_backend.register_instances( + elb.name, group_instance_ids - elb_instace_ids) + self.elb_backend.deregister_instances( + elb.name, elb_instace_ids - group_instance_ids) def create_or_update_tags(self, tags): @@ -452,19 +473,21 @@ class AutoScalingBackend(BaseBackend): old_tags = group.tags new_tags = [] - #if key was in old_tags, update old tag + # if key was in old_tags, update old tag for old_tag in old_tags: if old_tag["key"] == tag["key"]: new_tags.append(tag) else: new_tags.append(old_tag) - #if key was never in old_tag's add it (create tag) + # if key was never in old_tag's add it (create tag) if not any(new_tag['key'] == tag['key'] for new_tag in new_tags): new_tags.append(tag) group.tags = new_tags + autoscaling_backends = {} for region, ec2_backend in ec2_backends.items(): - autoscaling_backends[region] = AutoScalingBackend(ec2_backend, elb_backends[region]) + autoscaling_backends[region] = AutoScalingBackend( + ec2_backend, elb_backends[region]) diff --git a/moto/autoscaling/responses.py b/moto/autoscaling/responses.py index 976199131..b1d160320 100644 --- a/moto/autoscaling/responses.py +++ b/moto/autoscaling/responses.py @@ -11,7 +11,8 @@ class AutoScalingResponse(BaseResponse): return autoscaling_backends[self.region] def create_launch_configuration(self): - instance_monitoring_string = self._get_param('InstanceMonitoring.Enabled') + instance_monitoring_string = self._get_param( + 'InstanceMonitoring.Enabled') if instance_monitoring_string == 'true': instance_monitoring = True else: @@ -29,28 +30,35 @@ class AutoScalingResponse(BaseResponse): instance_profile_name=self._get_param('IamInstanceProfile'), spot_price=self._get_param('SpotPrice'), ebs_optimized=self._get_param('EbsOptimized'), - associate_public_ip_address=self._get_param("AssociatePublicIpAddress"), - block_device_mappings=self._get_list_prefix('BlockDeviceMappings.member') + associate_public_ip_address=self._get_param( + "AssociatePublicIpAddress"), + block_device_mappings=self._get_list_prefix( + 'BlockDeviceMappings.member') ) template = self.response_template(CREATE_LAUNCH_CONFIGURATION_TEMPLATE) return template.render() def describe_launch_configurations(self): names = self._get_multi_param('LaunchConfigurationNames.member') - launch_configurations = self.autoscaling_backend.describe_launch_configurations(names) - template = self.response_template(DESCRIBE_LAUNCH_CONFIGURATIONS_TEMPLATE) + launch_configurations = self.autoscaling_backend.describe_launch_configurations( + names) + template = self.response_template( + DESCRIBE_LAUNCH_CONFIGURATIONS_TEMPLATE) return template.render(launch_configurations=launch_configurations) def delete_launch_configuration(self): - launch_configurations_name = self.querystring.get('LaunchConfigurationName')[0] - self.autoscaling_backend.delete_launch_configuration(launch_configurations_name) + launch_configurations_name = self.querystring.get( + 'LaunchConfigurationName')[0] + self.autoscaling_backend.delete_launch_configuration( + launch_configurations_name) template = self.response_template(DELETE_LAUNCH_CONFIGURATION_TEMPLATE) return template.render() def create_auto_scaling_group(self): self.autoscaling_backend.create_autoscaling_group( name=self._get_param('AutoScalingGroupName'), - availability_zones=self._get_multi_param('AvailabilityZones.member'), + availability_zones=self._get_multi_param( + 'AvailabilityZones.member'), desired_capacity=self._get_int_param('DesiredCapacity'), max_size=self._get_int_param('MaxSize'), min_size=self._get_int_param('MinSize'), @@ -61,7 +69,8 @@ class AutoScalingResponse(BaseResponse): health_check_type=self._get_param('HealthCheckType'), load_balancers=self._get_multi_param('LoadBalancerNames.member'), placement_group=self._get_param('PlacementGroup'), - termination_policies=self._get_multi_param('TerminationPolicies.member'), + termination_policies=self._get_multi_param( + 'TerminationPolicies.member'), tags=self._get_list_prefix('Tags.member'), ) template = self.response_template(CREATE_AUTOSCALING_GROUP_TEMPLATE) @@ -76,7 +85,8 @@ class AutoScalingResponse(BaseResponse): def update_auto_scaling_group(self): self.autoscaling_backend.update_autoscaling_group( name=self._get_param('AutoScalingGroupName'), - availability_zones=self._get_multi_param('AvailabilityZones.member'), + availability_zones=self._get_multi_param( + 'AvailabilityZones.member'), desired_capacity=self._get_int_param('DesiredCapacity'), max_size=self._get_int_param('MaxSize'), min_size=self._get_int_param('MinSize'), @@ -87,7 +97,8 @@ class AutoScalingResponse(BaseResponse): health_check_type=self._get_param('HealthCheckType'), load_balancers=self._get_multi_param('LoadBalancerNames.member'), placement_group=self._get_param('PlacementGroup'), - termination_policies=self._get_multi_param('TerminationPolicies.member'), + termination_policies=self._get_multi_param( + 'TerminationPolicies.member'), ) template = self.response_template(UPDATE_AUTOSCALING_GROUP_TEMPLATE) return template.render() @@ -101,7 +112,8 @@ class AutoScalingResponse(BaseResponse): def set_desired_capacity(self): group_name = self._get_param('AutoScalingGroupName') desired_capacity = self._get_int_param('DesiredCapacity') - self.autoscaling_backend.set_desired_capacity(group_name, desired_capacity) + self.autoscaling_backend.set_desired_capacity( + group_name, desired_capacity) template = self.response_template(SET_DESIRED_CAPACITY_TEMPLATE) return template.render() @@ -114,7 +126,8 @@ class AutoScalingResponse(BaseResponse): def describe_auto_scaling_instances(self): instance_states = self.autoscaling_backend.describe_autoscaling_instances() - template = self.response_template(DESCRIBE_AUTOSCALING_INSTANCES_TEMPLATE) + template = self.response_template( + DESCRIBE_AUTOSCALING_INSTANCES_TEMPLATE) return template.render(instance_states=instance_states) def put_scaling_policy(self): diff --git a/moto/awslambda/__init__.py b/moto/awslambda/__init__.py index 46bc90fbd..f0d694654 100644 --- a/moto/awslambda/__init__.py +++ b/moto/awslambda/__init__.py @@ -1,6 +1,6 @@ from __future__ import unicode_literals from .models import lambda_backends -from ..core.models import MockAWS, base_decorator, HttprettyMockAWS, deprecated_base_decorator +from ..core.models import base_decorator, deprecated_base_decorator lambda_backend = lambda_backends['us-east-1'] mock_lambda = base_decorator(lambda_backends) diff --git a/moto/awslambda/models.py b/moto/awslambda/models.py index 1fc139eb7..46d227300 100644 --- a/moto/awslambda/models.py +++ b/moto/awslambda/models.py @@ -32,19 +32,22 @@ class LambdaFunction(object): # optional self.description = spec.get('Description', '') self.memory_size = spec.get('MemorySize', 128) - self.publish = spec.get('Publish', False) # this is ignored currently + self.publish = spec.get('Publish', False) # this is ignored currently self.timeout = spec.get('Timeout', 3) # this isn't finished yet. it needs to find out the VpcId value - self._vpc_config = spec.get('VpcConfig', {'SubnetIds': [], 'SecurityGroupIds': []}) + self._vpc_config = spec.get( + 'VpcConfig', {'SubnetIds': [], 'SecurityGroupIds': []}) # auto-generated self.version = '$LATEST' self.last_modified = datetime.datetime.utcnow().strftime('%Y-%m-%d %H:%M:%S') if 'ZipFile' in self.code: - # more hackery to handle unicode/bytes/str in python3 and python2 - argh! + # more hackery to handle unicode/bytes/str in python3 and python2 - + # argh! try: - to_unzip_code = base64.b64decode(bytes(self.code['ZipFile'], 'utf-8')) + to_unzip_code = base64.b64decode( + bytes(self.code['ZipFile'], 'utf-8')) except Exception: to_unzip_code = base64.b64decode(self.code['ZipFile']) @@ -58,7 +61,8 @@ class LambdaFunction(object): # validate s3 bucket try: # FIXME: does not validate bucket region - key = s3_backend.get_key(self.code['S3Bucket'], self.code['S3Key']) + key = s3_backend.get_key( + self.code['S3Bucket'], self.code['S3Key']) except MissingBucket: raise ValueError( "InvalidParameterValueException", @@ -72,7 +76,8 @@ class LambdaFunction(object): else: self.code_size = key.size self.code_sha_256 = hashlib.sha256(key.value).hexdigest() - self.function_arn = 'arn:aws:lambda:123456789012:function:{0}'.format(self.function_name) + self.function_arn = 'arn:aws:lambda:123456789012:function:{0}'.format( + self.function_name) @property def vpc_config(self): @@ -130,7 +135,6 @@ class LambdaFunction(object): self.convert(self.code), self.convert('print(json.dumps(lambda_handler(%s, %s)))' % (self.is_json(self.convert(event)), context))]) - #print("moto_lambda_debug: ", mycode) except Exception as ex: print("Exception %s", ex) @@ -182,7 +186,8 @@ class LambdaFunction(object): 'Runtime': properties['Runtime'], } optional_properties = 'Description MemorySize Publish Timeout VpcConfig'.split() - # NOTE: Not doing `properties.get(k, DEFAULT)` to avoid duplicating the default logic + # NOTE: Not doing `properties.get(k, DEFAULT)` to avoid duplicating the + # default logic for prop in optional_properties: if prop in properties: spec[prop] = properties[prop] @@ -219,6 +224,6 @@ lambda_backends = {} for region in boto.awslambda.regions(): lambda_backends[region.name] = LambdaBackend() -# Handle us forgotten regions, unless Lambda truly only runs out of US and EU????? +# Handle us forgotten regions, unless Lambda truly only runs out of US and for region in ['ap-southeast-2']: lambda_backends[region] = LambdaBackend() diff --git a/moto/awslambda/responses.py b/moto/awslambda/responses.py index 3fc756efa..b7664c314 100644 --- a/moto/awslambda/responses.py +++ b/moto/awslambda/responses.py @@ -2,10 +2,8 @@ from __future__ import unicode_literals import json import re -import uuid from moto.core.responses import BaseResponse -from .models import lambda_backends class LambdaResponse(BaseResponse): diff --git a/moto/cloudformation/__init__.py b/moto/cloudformation/__init__.py index 47e840ec6..b73e3ab6c 100644 --- a/moto/cloudformation/__init__.py +++ b/moto/cloudformation/__init__.py @@ -1,7 +1,8 @@ from __future__ import unicode_literals from .models import cloudformation_backends -from ..core.models import MockAWS, base_decorator, HttprettyMockAWS, deprecated_base_decorator +from ..core.models import base_decorator, deprecated_base_decorator cloudformation_backend = cloudformation_backends['us-east-1'] mock_cloudformation = base_decorator(cloudformation_backends) -mock_cloudformation_deprecated = deprecated_base_decorator(cloudformation_backends) +mock_cloudformation_deprecated = deprecated_base_decorator( + cloudformation_backends) diff --git a/moto/cloudformation/exceptions.py b/moto/cloudformation/exceptions.py index ed2856826..56a95382a 100644 --- a/moto/cloudformation/exceptions.py +++ b/moto/cloudformation/exceptions.py @@ -9,9 +9,10 @@ class UnformattedGetAttTemplateException(Exception): class ValidationError(BadRequest): + def __init__(self, name_or_id, message=None): if message is None: - message="Stack with id {0} does not exist".format(name_or_id) + message = "Stack with id {0} does not exist".format(name_or_id) template = Template(ERROR_RESPONSE) super(ValidationError, self).__init__() @@ -22,6 +23,7 @@ class ValidationError(BadRequest): class MissingParameterError(BadRequest): + def __init__(self, parameter_name): template = Template(ERROR_RESPONSE) super(MissingParameterError, self).__init__() diff --git a/moto/cloudformation/models.py b/moto/cloudformation/models.py index 1f091251b..0a3dcc62d 100644 --- a/moto/cloudformation/models.py +++ b/moto/cloudformation/models.py @@ -11,6 +11,7 @@ from .exceptions import ValidationError class FakeStack(object): + def __init__(self, stack_id, name, template, parameters, region_name, notification_arns=None, tags=None, role_arn=None): self.stack_id = stack_id self.name = name @@ -22,7 +23,8 @@ class FakeStack(object): self.role_arn = role_arn self.tags = tags if tags else {} self.events = [] - self._add_stack_event("CREATE_IN_PROGRESS", resource_status_reason="User Initiated") + self._add_stack_event("CREATE_IN_PROGRESS", + resource_status_reason="User Initiated") self.description = self.template_dict.get('Description') self.resource_map = self._create_resource_map() @@ -31,7 +33,8 @@ class FakeStack(object): self.status = 'CREATE_COMPLETE' def _create_resource_map(self): - resource_map = ResourceMap(self.stack_id, self.name, self.parameters, self.tags, self.region_name, self.template_dict) + resource_map = ResourceMap( + self.stack_id, self.name, self.parameters, self.tags, self.region_name, self.template_dict) resource_map.create() return resource_map @@ -79,7 +82,8 @@ class FakeStack(object): return self.output_map.values() def update(self, template, role_arn=None): - self._add_stack_event("UPDATE_IN_PROGRESS", resource_status_reason="User Initiated") + self._add_stack_event("UPDATE_IN_PROGRESS", + resource_status_reason="User Initiated") self.template = template self.resource_map.update(json.loads(template)) self.output_map = self._create_output_map() @@ -88,13 +92,15 @@ class FakeStack(object): self.role_arn = role_arn def delete(self): - self._add_stack_event("DELETE_IN_PROGRESS", resource_status_reason="User Initiated") + self._add_stack_event("DELETE_IN_PROGRESS", + resource_status_reason="User Initiated") self.resource_map.delete() self._add_stack_event("DELETE_COMPLETE") self.status = "DELETE_COMPLETE" class FakeEvent(object): + def __init__(self, stack_id, stack_name, logical_resource_id, physical_resource_id, resource_type, resource_status, resource_status_reason=None, resource_properties=None): self.stack_id = stack_id self.stack_name = stack_name diff --git a/moto/cloudformation/parsing.py b/moto/cloudformation/parsing.py index 521658cee..f2ba08522 100644 --- a/moto/cloudformation/parsing.py +++ b/moto/cloudformation/parsing.py @@ -94,6 +94,7 @@ logger = logging.getLogger("moto") class LazyDict(dict): + def __getitem__(self, key): val = dict.__getitem__(self, key) if callable(val): @@ -133,7 +134,8 @@ def clean_json(resource_json, resources_map): try: return resource.get_cfn_attribute(resource_json['Fn::GetAtt'][1]) except NotImplementedError as n: - logger.warning(n.message.format(resource_json['Fn::GetAtt'][0])) + logger.warning(n.message.format( + resource_json['Fn::GetAtt'][0])) except UnformattedGetAttTemplateException: raise BotoServerError( UnformattedGetAttTemplateException.status_code, @@ -152,7 +154,8 @@ def clean_json(resource_json, resources_map): join_list = [] for val in resource_json['Fn::Join'][1]: cleaned_val = clean_json(val, resources_map) - join_list.append('{0}'.format(cleaned_val) if cleaned_val else '{0}'.format(val)) + join_list.append('{0}'.format(cleaned_val) + if cleaned_val else '{0}'.format(val)) return resource_json['Fn::Join'][0].join(join_list) cleaned_json = {} @@ -215,14 +218,16 @@ def parse_and_create_resource(logical_id, resource_json, resources_map, region_n if not resource_tuple: return None resource_class, resource_json, resource_name = resource_tuple - resource = resource_class.create_from_cloudformation_json(resource_name, resource_json, region_name) + resource = resource_class.create_from_cloudformation_json( + resource_name, resource_json, region_name) resource.type = resource_type resource.logical_resource_id = logical_id return resource def parse_and_update_resource(logical_id, resource_json, resources_map, region_name): - resource_class, new_resource_json, new_resource_name = parse_resource(logical_id, resource_json, resources_map) + resource_class, new_resource_json, new_resource_name = parse_resource( + logical_id, resource_json, resources_map) original_resource = resources_map[logical_id] new_resource = resource_class.update_from_cloudformation_json( original_resource=original_resource, @@ -236,8 +241,10 @@ def parse_and_update_resource(logical_id, resource_json, resources_map, region_n def parse_and_delete_resource(logical_id, resource_json, resources_map, region_name): - resource_class, resource_json, resource_name = parse_resource(logical_id, resource_json, resources_map) - resource_class.delete_from_cloudformation_json(resource_name, resource_json, region_name) + resource_class, resource_json, resource_name = parse_resource( + logical_id, resource_json, resources_map) + resource_class.delete_from_cloudformation_json( + resource_name, resource_json, region_name) def parse_condition(condition, resources_map, condition_map): @@ -312,7 +319,8 @@ class ResourceMap(collections.Mapping): resource_json = self._resource_json_map.get(resource_logical_id) if not resource_json: raise KeyError(resource_logical_id) - new_resource = parse_and_create_resource(resource_logical_id, resource_json, self, self._region_name) + new_resource = parse_and_create_resource( + resource_logical_id, resource_json, self, self._region_name) if new_resource is not None: self._parsed_resources[resource_logical_id] = new_resource return new_resource @@ -343,7 +351,8 @@ class ResourceMap(collections.Mapping): value = value.split(',') self.resolved_parameters[key] = value - # Check if there are any non-default params that were not passed input params + # Check if there are any non-default params that were not passed input + # params for key, value in self.resolved_parameters.items(): if value is None: raise MissingParameterError(key) @@ -355,10 +364,11 @@ class ResourceMap(collections.Mapping): lazy_condition_map = LazyDict() for condition_name, condition in conditions.items(): lazy_condition_map[condition_name] = functools.partial(parse_condition, - condition, self._parsed_resources, lazy_condition_map) + condition, self._parsed_resources, lazy_condition_map) for condition_name in lazy_condition_map: - self._parsed_resources[condition_name] = lazy_condition_map[condition_name] + self._parsed_resources[ + condition_name] = lazy_condition_map[condition_name] def create(self): self.load_mapping() @@ -368,11 +378,12 @@ class ResourceMap(collections.Mapping): # Since this is a lazy map, to create every object we just need to # iterate through self. self.tags.update({'aws:cloudformation:stack-name': self.get('AWS::StackName'), - 'aws:cloudformation:stack-id': self.get('AWS::StackId')}) + 'aws:cloudformation:stack-id': self.get('AWS::StackId')}) for resource in self.resources: if isinstance(self[resource], ec2_models.TaggedEC2Resource): self.tags['aws:cloudformation:logical-id'] = resource - ec2_models.ec2_backends[self._region_name].create_tags([self[resource].physical_resource_id], self.tags) + ec2_models.ec2_backends[self._region_name].create_tags( + [self[resource].physical_resource_id], self.tags) def update(self, template): self.load_mapping() @@ -386,24 +397,29 @@ class ResourceMap(collections.Mapping): new_resource_names = set(new_template) - set(old_template) for resource_name in new_resource_names: resource_json = new_template[resource_name] - new_resource = parse_and_create_resource(resource_name, resource_json, self, self._region_name) + new_resource = parse_and_create_resource( + resource_name, resource_json, self, self._region_name) self._parsed_resources[resource_name] = new_resource removed_resource_nams = set(old_template) - set(new_template) for resource_name in removed_resource_nams: resource_json = old_template[resource_name] - parse_and_delete_resource(resource_name, resource_json, self, self._region_name) + parse_and_delete_resource( + resource_name, resource_json, self, self._region_name) self._parsed_resources.pop(resource_name) - resources_to_update = set(name for name in new_template if name in old_template and new_template[name] != old_template[name]) + resources_to_update = set(name for name in new_template if name in old_template and new_template[ + name] != old_template[name]) tries = 1 while resources_to_update and tries < 5: for resource_name in resources_to_update.copy(): resource_json = new_template[resource_name] try: - changed_resource = parse_and_update_resource(resource_name, resource_json, self, self._region_name) + changed_resource = parse_and_update_resource( + resource_name, resource_json, self, self._region_name) except Exception as e: - # skip over dependency violations, and try again in a second pass + # skip over dependency violations, and try again in a + # second pass last_exception = e else: self._parsed_resources[resource_name] = changed_resource @@ -422,7 +438,8 @@ class ResourceMap(collections.Mapping): if parsed_resource and hasattr(parsed_resource, 'delete'): parsed_resource.delete(self._region_name) except Exception as e: - # skip over dependency violations, and try again in a second pass + # skip over dependency violations, and try again in a + # second pass last_exception = e else: remaining_resources.remove(resource) @@ -430,7 +447,9 @@ class ResourceMap(collections.Mapping): if tries == 5: raise last_exception + class OutputMap(collections.Mapping): + def __init__(self, resources, template): self._template = template self._output_json_map = template.get('Outputs') @@ -446,7 +465,8 @@ class OutputMap(collections.Mapping): return self._parsed_outputs[output_logical_id] else: output_json = self._output_json_map.get(output_logical_id) - new_output = parse_output(output_logical_id, output_json, self._resource_map) + new_output = parse_output( + output_logical_id, output_json, self._resource_map) self._parsed_outputs[output_logical_id] = new_output return new_output diff --git a/moto/cloudformation/responses.py b/moto/cloudformation/responses.py index 3b8f53895..272310d27 100644 --- a/moto/cloudformation/responses.py +++ b/moto/cloudformation/responses.py @@ -18,7 +18,8 @@ class CloudFormationResponse(BaseResponse): def _get_stack_from_s3_url(self, template_url): template_url_parts = urlparse(template_url) if "localhost" in template_url: - bucket_name, key_name = template_url_parts.path.lstrip("/").split("/") + bucket_name, key_name = template_url_parts.path.lstrip( + "/").split("/") else: bucket_name = template_url_parts.netloc.split(".")[0] key_name = template_url_parts.path.lstrip("/") @@ -32,7 +33,8 @@ class CloudFormationResponse(BaseResponse): template_url = self._get_param('TemplateURL') role_arn = self._get_param('RoleARN') parameters_list = self._get_list_prefix("Parameters.member") - tags = dict((item['key'], item['value']) for item in self._get_list_prefix("Tags.member")) + tags = dict((item['key'], item['value']) + for item in self._get_list_prefix("Tags.member")) # Hack dict-comprehension parameters = dict([ @@ -42,7 +44,8 @@ class CloudFormationResponse(BaseResponse): ]) if template_url: stack_body = self._get_stack_from_s3_url(template_url) - stack_notification_arns = self._get_multi_param('NotificationARNs.member') + stack_notification_arns = self._get_multi_param( + 'NotificationARNs.member') stack = self.cloudformation_backend.create_stack( name=stack_name, @@ -86,7 +89,8 @@ class CloudFormationResponse(BaseResponse): else: raise ValidationError(logical_resource_id) - template = self.response_template(DESCRIBE_STACK_RESOURCE_RESPONSE_TEMPLATE) + template = self.response_template( + DESCRIBE_STACK_RESOURCE_RESPONSE_TEMPLATE) return template.render(stack=stack, resource=resource) def describe_stack_resources(self): @@ -110,7 +114,8 @@ class CloudFormationResponse(BaseResponse): def list_stack_resources(self): stack_name_or_id = self._get_param('StackName') - resources = self.cloudformation_backend.list_stack_resources(stack_name_or_id) + resources = self.cloudformation_backend.list_stack_resources( + stack_name_or_id) template = self.response_template(LIST_STACKS_RESOURCES_RESPONSE) return template.render(resources=resources) @@ -138,13 +143,15 @@ class CloudFormationResponse(BaseResponse): stack_name = self._get_param('StackName') role_arn = self._get_param('RoleARN') if self._get_param('UsePreviousTemplate') == "true": - stack_body = self.cloudformation_backend.get_stack(stack_name).template + stack_body = self.cloudformation_backend.get_stack( + stack_name).template else: stack_body = self._get_param('TemplateBody') stack = self.cloudformation_backend.get_stack(stack_name) if stack.status == 'ROLLBACK_COMPLETE': - raise ValidationError(stack.stack_id, message="Stack:{0} is in ROLLBACK_COMPLETE state and can not be updated.".format(stack.stack_id)) + raise ValidationError( + stack.stack_id, message="Stack:{0} is in ROLLBACK_COMPLETE state and can not be updated.".format(stack.stack_id)) stack = self.cloudformation_backend.update_stack( name=stack_name, diff --git a/moto/cloudwatch/__init__.py b/moto/cloudwatch/__init__.py index 17d1c0c50..861fb703a 100644 --- a/moto/cloudwatch/__init__.py +++ b/moto/cloudwatch/__init__.py @@ -1,5 +1,5 @@ from .models import cloudwatch_backends -from ..core.models import MockAWS, base_decorator, HttprettyMockAWS, deprecated_base_decorator +from ..core.models import base_decorator, deprecated_base_decorator cloudwatch_backend = cloudwatch_backends['us-east-1'] mock_cloudwatch = base_decorator(cloudwatch_backends) diff --git a/moto/cloudwatch/models.py b/moto/cloudwatch/models.py index 320bc476f..7257286ba 100644 --- a/moto/cloudwatch/models.py +++ b/moto/cloudwatch/models.py @@ -4,12 +4,14 @@ import datetime class Dimension(object): + def __init__(self, name, value): self.name = name self.value = value class FakeAlarm(object): + def __init__(self, name, namespace, metric_name, comparison_operator, evaluation_periods, period, threshold, statistic, description, dimensions, alarm_actions, ok_actions, insufficient_data_actions, unit): @@ -22,7 +24,8 @@ class FakeAlarm(object): self.threshold = threshold self.statistic = statistic self.description = description - self.dimensions = [Dimension(dimension['name'], dimension['value']) for dimension in dimensions] + self.dimensions = [Dimension(dimension['name'], dimension[ + 'value']) for dimension in dimensions] self.alarm_actions = alarm_actions self.ok_actions = ok_actions self.insufficient_data_actions = insufficient_data_actions @@ -32,11 +35,13 @@ class FakeAlarm(object): class MetricDatum(object): + def __init__(self, namespace, name, value, dimensions): self.namespace = namespace self.name = name self.value = value - self.dimensions = [Dimension(dimension['name'], dimension['value']) for dimension in dimensions] + self.dimensions = [Dimension(dimension['name'], dimension[ + 'value']) for dimension in dimensions] class CloudWatchBackend(BaseBackend): @@ -99,7 +104,8 @@ class CloudWatchBackend(BaseBackend): def put_metric_data(self, namespace, metric_data): for name, value, dimensions in metric_data: - self.metric_data.append(MetricDatum(namespace, name, value, dimensions)) + self.metric_data.append(MetricDatum( + namespace, name, value, dimensions)) def get_all_metrics(self): return self.metric_data diff --git a/moto/cloudwatch/responses.py b/moto/cloudwatch/responses.py index 0d2cfacf5..d06fe21d7 100644 --- a/moto/cloudwatch/responses.py +++ b/moto/cloudwatch/responses.py @@ -1,6 +1,5 @@ from moto.core.responses import BaseResponse from .models import cloudwatch_backends -import logging class CloudWatchResponse(BaseResponse): @@ -18,7 +17,8 @@ class CloudWatchResponse(BaseResponse): dimensions = self._get_list_prefix('Dimensions.member') alarm_actions = self._get_multi_param('AlarmActions.member') ok_actions = self._get_multi_param('OKActions.member') - insufficient_data_actions = self._get_multi_param("InsufficientDataActions.member") + insufficient_data_actions = self._get_multi_param( + "InsufficientDataActions.member") unit = self._get_param('Unit') cloudwatch_backend = cloudwatch_backends[self.region] alarm = cloudwatch_backend.put_metric_alarm(name, namespace, metric_name, @@ -40,14 +40,16 @@ class CloudWatchResponse(BaseResponse): cloudwatch_backend = cloudwatch_backends[self.region] if action_prefix: - alarms = cloudwatch_backend.get_alarms_by_action_prefix(action_prefix) + alarms = cloudwatch_backend.get_alarms_by_action_prefix( + action_prefix) elif alarm_name_prefix: - alarms = cloudwatch_backend.get_alarms_by_alarm_name_prefix(alarm_name_prefix) + alarms = cloudwatch_backend.get_alarms_by_alarm_name_prefix( + alarm_name_prefix) elif alarm_names: alarms = cloudwatch_backend.get_alarms_by_alarm_names(alarm_names) elif state_value: alarms = cloudwatch_backend.get_alarms_by_state_value(state_value) - else : + else: alarms = cloudwatch_backend.get_all_alarms() template = self.response_template(DESCRIBE_ALARMS_TEMPLATE) @@ -66,19 +68,24 @@ class CloudWatchResponse(BaseResponse): metric_index = 1 while True: try: - metric_name = self.querystring['MetricData.member.{0}.MetricName'.format(metric_index)][0] + metric_name = self.querystring[ + 'MetricData.member.{0}.MetricName'.format(metric_index)][0] except KeyError: break - value = self.querystring.get('MetricData.member.{0}.Value'.format(metric_index), [None])[0] + value = self.querystring.get( + 'MetricData.member.{0}.Value'.format(metric_index), [None])[0] dimensions = [] dimension_index = 1 while True: try: - dimension_name = self.querystring['MetricData.member.{0}.Dimensions.member.{1}.Name'.format(metric_index, dimension_index)][0] + dimension_name = self.querystring[ + 'MetricData.member.{0}.Dimensions.member.{1}.Name'.format(metric_index, dimension_index)][0] except KeyError: break - dimension_value = self.querystring['MetricData.member.{0}.Dimensions.member.{1}.Value'.format(metric_index, dimension_index)][0] - dimensions.append({'name': dimension_name, 'value': dimension_value}) + dimension_value = self.querystring[ + 'MetricData.member.{0}.Dimensions.member.{1}.Value'.format(metric_index, dimension_index)][0] + dimensions.append( + {'name': dimension_name, 'value': dimension_value}) dimension_index += 1 metric_data.append([metric_name, value, dimensions]) metric_index += 1 diff --git a/moto/core/exceptions.py b/moto/core/exceptions.py index d3a87e299..5474707d6 100644 --- a/moto/core/exceptions.py +++ b/moto/core/exceptions.py @@ -2,7 +2,6 @@ from __future__ import unicode_literals from werkzeug.exceptions import HTTPException from jinja2 import DictLoader, Environment -from six import text_type SINGLE_ERROR_RESPONSE = u""" @@ -33,6 +32,7 @@ ERROR_JSON_RESPONSE = u"""{ } """ + class RESTError(HTTPException): templates = { 'single_error': SINGLE_ERROR_RESPONSE, @@ -54,8 +54,10 @@ class DryRunClientError(RESTError): class JsonRESTError(RESTError): + def __init__(self, error_type, message, template='error_json', **kwargs): - super(JsonRESTError, self).__init__(error_type, message, template, **kwargs) + super(JsonRESTError, self).__init__( + error_type, message, template, **kwargs) def get_headers(self, *args, **kwargs): return [('Content-Type', 'application/json')] diff --git a/moto/core/models.py b/moto/core/models.py index 04ff709e0..492a0e2ff 100644 --- a/moto/core/models.py +++ b/moto/core/models.py @@ -3,7 +3,6 @@ from __future__ import absolute_import import functools import inspect -import os import re from moto import settings @@ -15,6 +14,7 @@ from .utils import ( convert_flask_to_responses_response, ) + class BaseMockAWS(object): nested_count = 0 @@ -58,7 +58,6 @@ class BaseMockAWS(object): if self.__class__.nested_count < 0: raise RuntimeError('Called stop() before start().') - if self.__class__.nested_count == 0: self.disable_patching() @@ -96,6 +95,7 @@ class BaseMockAWS(object): class HttprettyMockAWS(BaseMockAWS): + def reset(self): HTTPretty.reset() @@ -118,10 +118,11 @@ class HttprettyMockAWS(BaseMockAWS): RESPONSES_METHODS = [responses.GET, responses.DELETE, responses.HEAD, - responses.OPTIONS, responses.PATCH, responses.POST, responses.PUT] + responses.OPTIONS, responses.PATCH, responses.POST, responses.PUT] class ResponsesMockAWS(BaseMockAWS): + def reset(self): responses.reset() @@ -146,6 +147,7 @@ class ResponsesMockAWS(BaseMockAWS): pass responses.reset() + MockAWS = ResponsesMockAWS @@ -167,12 +169,14 @@ class ServerModeMockAWS(BaseMockAWS): if 'endpoint_url' not in kwargs: kwargs['endpoint_url'] = "http://localhost:8086" return real_boto3_client(*args, **kwargs) + def fake_boto3_resource(*args, **kwargs): if 'endpoint_url' not in kwargs: kwargs['endpoint_url'] = "http://localhost:8086" return real_boto3_resource(*args, **kwargs) self._client_patcher = mock.patch('boto3.client', fake_boto3_client) - self._resource_patcher = mock.patch('boto3.resource', fake_boto3_resource) + self._resource_patcher = mock.patch( + 'boto3.resource', fake_boto3_resource) self._client_patcher.start() self._resource_patcher.start() @@ -181,7 +185,9 @@ class ServerModeMockAWS(BaseMockAWS): self._client_patcher.stop() self._resource_patcher.stop() + class Model(type): + def __new__(self, clsname, bases, namespace): cls = super(Model, self).__new__(self, clsname, bases, namespace) cls.__models__ = {} @@ -203,6 +209,7 @@ class Model(type): class BaseBackend(object): + def reset(self): self.__dict__ = {} self.__init__() @@ -211,7 +218,8 @@ class BaseBackend(object): def _url_module(self): backend_module = self.__class__.__module__ backend_urls_module_name = backend_module.replace("models", "urls") - backend_urls_module = __import__(backend_urls_module_name, fromlist=['url_bases', 'url_paths']) + backend_urls_module = __import__(backend_urls_module_name, fromlist=[ + 'url_bases', 'url_paths']) return backend_urls_module @property @@ -306,6 +314,7 @@ class deprecated_base_decorator(base_decorator): class MotoAPIBackend(BaseBackend): + def reset(self): from moto.backends import BACKENDS for name, backends in BACKENDS.items(): @@ -315,4 +324,5 @@ class MotoAPIBackend(BaseBackend): backend.reset() self.__init__() + moto_api_backend = MotoAPIBackend() diff --git a/moto/core/responses.py b/moto/core/responses.py index e558eb1dd..00e3ba742 100644 --- a/moto/core/responses.py +++ b/moto/core/responses.py @@ -59,6 +59,7 @@ class DynamicDictLoader(DictLoader): Including the fixed (current) method version here to ensure performance benefit even for those using older jinja versions. """ + def get_source(self, environment, template): if template in self.mapping: source = self.mapping[template] @@ -77,7 +78,8 @@ class _TemplateEnvironmentMixin(object): def __init__(self): super(_TemplateEnvironmentMixin, self).__init__() self.loader = DynamicDictLoader({}) - self.environment = Environment(loader=self.loader, autoescape=self.should_autoescape) + self.environment = Environment( + loader=self.loader, autoescape=self.should_autoescape) @property def should_autoescape(self): @@ -127,12 +129,14 @@ class BaseResponse(_TemplateEnvironmentMixin): self.body = self.body.decode('utf-8') if not querystring: - querystring.update(parse_qs(urlparse(full_url).query, keep_blank_values=True)) + querystring.update( + parse_qs(urlparse(full_url).query, keep_blank_values=True)) if not querystring: if 'json' in request.headers.get('content-type', []) and self.aws_service_spec: decoded = json.loads(self.body) - target = request.headers.get('x-amz-target') or request.headers.get('X-Amz-Target') + target = request.headers.get( + 'x-amz-target') or request.headers.get('X-Amz-Target') service, method = target.split('.') input_spec = self.aws_service_spec.input_spec(method) flat = flatten_json_request_body('', decoded, input_spec) @@ -161,7 +165,8 @@ class BaseResponse(_TemplateEnvironmentMixin): if match: region = match.group(1) elif 'Authorization' in request.headers: - region = request.headers['Authorization'].split(",")[0].split("/")[2] + region = request.headers['Authorization'].split(",")[ + 0].split("/")[2] else: region = self.default_region return region @@ -175,7 +180,8 @@ class BaseResponse(_TemplateEnvironmentMixin): action = self.querystring.get('Action', [""])[0] if not action: # Some services use a header for the action # Headers are case-insensitive. Probably a better way to do this. - match = self.headers.get('x-amz-target') or self.headers.get('X-Amz-Target') + match = self.headers.get( + 'x-amz-target') or self.headers.get('X-Amz-Target') if match: action = match.split(".")[-1] @@ -198,7 +204,8 @@ class BaseResponse(_TemplateEnvironmentMixin): headers['status'] = str(headers['status']) return status, headers, body - raise NotImplementedError("The {0} action has not been implemented".format(action)) + raise NotImplementedError( + "The {0} action has not been implemented".format(action)) def _get_param(self, param_name, if_none=None): val = self.querystring.get(param_name) @@ -258,7 +265,8 @@ class BaseResponse(_TemplateEnvironmentMixin): params = {} for key, value in self.querystring.items(): if key.startswith(param_prefix): - params[camelcase_to_underscores(key.replace(param_prefix, ""))] = value[0] + params[camelcase_to_underscores( + key.replace(param_prefix, ""))] = value[0] return params def _get_list_prefix(self, param_prefix): @@ -291,7 +299,8 @@ class BaseResponse(_TemplateEnvironmentMixin): new_items = {} for key, value in self.querystring.items(): if key.startswith(index_prefix): - new_items[camelcase_to_underscores(key.replace(index_prefix, ""))] = value[0] + new_items[camelcase_to_underscores( + key.replace(index_prefix, ""))] = value[0] if not new_items: break results.append(new_items) @@ -327,7 +336,8 @@ class BaseResponse(_TemplateEnvironmentMixin): def is_not_dryrun(self, action): if 'true' in self.querystring.get('DryRun', ['false']): message = 'An error occurred (DryRunOperation) when calling the %s operation: Request would have succeeded, but DryRun flag is set' % action - raise DryRunClientError(error_type="DryRunOperation", message=message) + raise DryRunClientError( + error_type="DryRunOperation", message=message) return True @@ -343,6 +353,7 @@ class MotoAPIResponse(BaseResponse): class _RecursiveDictRef(object): """Store a recursive reference to dict.""" + def __init__(self): self.key = None self.dic = {} @@ -502,12 +513,15 @@ def flatten_json_request_body(prefix, dict_body, spec): if node_type == 'list': for idx, v in enumerate(value, 1): pref = key + '.member.' + str(idx) - flat.update(flatten_json_request_body(pref, v, spec[key]['member'])) + flat.update(flatten_json_request_body( + pref, v, spec[key]['member'])) elif node_type == 'map': for idx, (k, v) in enumerate(value.items(), 1): pref = key + '.entry.' + str(idx) - flat.update(flatten_json_request_body(pref + '.key', k, spec[key]['key'])) - flat.update(flatten_json_request_body(pref + '.value', v, spec[key]['value'])) + flat.update(flatten_json_request_body( + pref + '.key', k, spec[key]['key'])) + flat.update(flatten_json_request_body( + pref + '.value', v, spec[key]['value'])) else: flat.update(flatten_json_request_body(key, value, spec[key])) @@ -542,7 +556,8 @@ def xml_to_json_response(service_spec, operation, xml, result_node=None): # this can happen when with an older version of # botocore for which the node in XML template is not # defined in service spec. - log.warning('Field %s is not defined by the botocore version in use', k) + log.warning( + 'Field %s is not defined by the botocore version in use', k) continue if spec[k]['type'] == 'list': @@ -554,7 +569,8 @@ def xml_to_json_response(service_spec, operation, xml, result_node=None): else: od[k] = [transform(v['member'], spec[k]['member'])] elif isinstance(v['member'], list): - od[k] = [transform(o, spec[k]['member']) for o in v['member']] + od[k] = [transform(o, spec[k]['member']) + for o in v['member']] elif isinstance(v['member'], OrderedDict): od[k] = [transform(v['member'], spec[k]['member'])] else: diff --git a/moto/core/utils.py b/moto/core/utils.py index 11aafbb89..d26694014 100644 --- a/moto/core/utils.py +++ b/moto/core/utils.py @@ -98,7 +98,7 @@ class convert_httpretty_response(object): result = self.callback(request, url, headers) status, headers, response = result if 'server' not in headers: - headers["server"] = "amazon.com" + headers["server"] = "amazon.com" return status, headers, response diff --git a/moto/datapipeline/__init__.py b/moto/datapipeline/__init__.py index cebcf22bf..2565ddd5a 100644 --- a/moto/datapipeline/__init__.py +++ b/moto/datapipeline/__init__.py @@ -1,6 +1,6 @@ from __future__ import unicode_literals from .models import datapipeline_backends -from ..core.models import MockAWS, base_decorator, HttprettyMockAWS, deprecated_base_decorator +from ..core.models import base_decorator, deprecated_base_decorator datapipeline_backend = datapipeline_backends['us-east-1'] mock_datapipeline = base_decorator(datapipeline_backends) diff --git a/moto/datapipeline/models.py b/moto/datapipeline/models.py index b6a70b5f1..0cb33e4ed 100644 --- a/moto/datapipeline/models.py +++ b/moto/datapipeline/models.py @@ -7,6 +7,7 @@ from .utils import get_random_pipeline_id, remove_capitalization_of_dict_keys class PipelineObject(object): + def __init__(self, object_id, name, fields): self.object_id = object_id self.name = name @@ -21,6 +22,7 @@ class PipelineObject(object): class Pipeline(object): + def __init__(self, name, unique_id): self.name = name self.unique_id = unique_id @@ -82,7 +84,8 @@ class Pipeline(object): def set_pipeline_objects(self, pipeline_objects): self.objects = [ - PipelineObject(pipeline_object['id'], pipeline_object['name'], pipeline_object['fields']) + PipelineObject(pipeline_object['id'], pipeline_object[ + 'name'], pipeline_object['fields']) for pipeline_object in remove_capitalization_of_dict_keys(pipeline_objects) ] @@ -95,8 +98,10 @@ class Pipeline(object): properties = cloudformation_json["Properties"] cloudformation_unique_id = "cf-" + properties["Name"] - pipeline = datapipeline_backend.create_pipeline(properties["Name"], cloudformation_unique_id) - datapipeline_backend.put_pipeline_definition(pipeline.pipeline_id, properties["PipelineObjects"]) + pipeline = datapipeline_backend.create_pipeline( + properties["Name"], cloudformation_unique_id) + datapipeline_backend.put_pipeline_definition( + pipeline.pipeline_id, properties["PipelineObjects"]) if properties["Activate"]: pipeline.activate() @@ -117,7 +122,8 @@ class DataPipelineBackend(BaseBackend): return self.pipelines.values() def describe_pipelines(self, pipeline_ids): - pipelines = [pipeline for pipeline in self.pipelines.values() if pipeline.pipeline_id in pipeline_ids] + pipelines = [pipeline for pipeline in self.pipelines.values( + ) if pipeline.pipeline_id in pipeline_ids] return pipelines def get_pipeline(self, pipeline_id): diff --git a/moto/datapipeline/responses.py b/moto/datapipeline/responses.py index 2607f685d..f3644fd5c 100644 --- a/moto/datapipeline/responses.py +++ b/moto/datapipeline/responses.py @@ -52,12 +52,14 @@ class DataPipelineResponse(BaseResponse): pipeline_id = self.parameters["pipelineId"] pipeline_objects = self.parameters["pipelineObjects"] - self.datapipeline_backend.put_pipeline_definition(pipeline_id, pipeline_objects) + self.datapipeline_backend.put_pipeline_definition( + pipeline_id, pipeline_objects) return json.dumps({"errored": False}) def get_pipeline_definition(self): pipeline_id = self.parameters["pipelineId"] - pipeline_definition = self.datapipeline_backend.get_pipeline_definition(pipeline_id) + pipeline_definition = self.datapipeline_backend.get_pipeline_definition( + pipeline_id) return json.dumps({ "pipelineObjects": [pipeline_object.to_json() for pipeline_object in pipeline_definition] }) @@ -66,7 +68,8 @@ class DataPipelineResponse(BaseResponse): pipeline_id = self.parameters["pipelineId"] object_ids = self.parameters["objectIds"] - pipeline_objects = self.datapipeline_backend.describe_objects(object_ids, pipeline_id) + pipeline_objects = self.datapipeline_backend.describe_objects( + object_ids, pipeline_id) return json.dumps({ "hasMoreResults": False, "marker": None, diff --git a/moto/dynamodb/models.py b/moto/dynamodb/models.py index dd58eb4de..db50dbcc6 100644 --- a/moto/dynamodb/models.py +++ b/moto/dynamodb/models.py @@ -10,6 +10,7 @@ from .comparisons import get_comparison_func class DynamoJsonEncoder(json.JSONEncoder): + def default(self, obj): if hasattr(obj, 'to_json'): return obj.to_json() @@ -53,6 +54,7 @@ class DynamoType(object): class Item(object): + def __init__(self, hash_key, hash_key_type, range_key, range_key_type, attrs): self.hash_key = hash_key self.hash_key_type = hash_key_type @@ -157,7 +159,8 @@ class Table(object): else: range_value = None - item = Item(hash_value, self.hash_key_type, range_value, self.range_key_type, item_attrs) + item = Item(hash_value, self.hash_key_type, range_value, + self.range_key_type, item_attrs) if range_value: self.items[hash_value][range_value] = item @@ -167,7 +170,8 @@ class Table(object): def get_item(self, hash_key, range_key): if self.has_range_key and not range_key: - raise ValueError("Table has a range key, but no range key was passed into get_item") + raise ValueError( + "Table has a range key, but no range key was passed into get_item") try: if range_key: return self.items[hash_key][range_key] @@ -222,7 +226,8 @@ class Table(object): # Comparison is NULL and we don't have the attribute continue else: - # No attribute found and comparison is no NULL. This item fails + # No attribute found and comparison is no NULL. This item + # fails passes_all_conditions = False break @@ -283,7 +288,8 @@ class DynamoDBBackend(BaseBackend): return None, None hash_key = DynamoType(hash_key_dict) - range_values = [DynamoType(range_value) for range_value in range_value_dicts] + range_values = [DynamoType(range_value) + for range_value in range_value_dicts] return table.query(hash_key, range_comparison, range_values) diff --git a/moto/dynamodb/responses.py b/moto/dynamodb/responses.py index 59cff0395..0da3e5045 100644 --- a/moto/dynamodb/responses.py +++ b/moto/dynamodb/responses.py @@ -130,7 +130,8 @@ class DynamoHandler(BaseResponse): throughput = self.body["ProvisionedThroughput"] new_read_units = throughput["ReadCapacityUnits"] new_write_units = throughput["WriteCapacityUnits"] - table = dynamodb_backend.update_table_throughput(name, new_read_units, new_write_units) + table = dynamodb_backend.update_table_throughput( + name, new_read_units, new_write_units) return dynamo_json_dump(table.describe) def describe_table(self): @@ -169,7 +170,8 @@ class DynamoHandler(BaseResponse): key = request['Key'] hash_key = key['HashKeyElement'] range_key = key.get('RangeKeyElement') - item = dynamodb_backend.delete_item(table_name, hash_key, range_key) + item = dynamodb_backend.delete_item( + table_name, hash_key, range_key) response = { "Responses": { @@ -221,11 +223,13 @@ class DynamoHandler(BaseResponse): for key in keys: hash_key = key["HashKeyElement"] range_key = key.get("RangeKeyElement") - item = dynamodb_backend.get_item(table_name, hash_key, range_key) + item = dynamodb_backend.get_item( + table_name, hash_key, range_key) if item: item_describe = item.describe_attrs(attributes_to_get) items.append(item_describe) - results["Responses"][table_name] = {"Items": items, "ConsumedCapacityUnits": 1} + results["Responses"][table_name] = { + "Items": items, "ConsumedCapacityUnits": 1} return dynamo_json_dump(results) def query(self): @@ -239,7 +243,8 @@ class DynamoHandler(BaseResponse): range_comparison = None range_values = [] - items, last_page = dynamodb_backend.query(name, hash_key, range_comparison, range_values) + items, last_page = dynamodb_backend.query( + name, hash_key, range_comparison, range_values) if items is None: er = 'com.amazonaws.dynamodb.v20111205#ResourceNotFoundException' @@ -265,7 +270,8 @@ class DynamoHandler(BaseResponse): filters = {} scan_filters = self.body.get('ScanFilter', {}) for attribute_name, scan_filter in scan_filters.items(): - # Keys are attribute names. Values are tuples of (comparison, comparison_value) + # Keys are attribute names. Values are tuples of (comparison, + # comparison_value) comparison_operator = scan_filter["ComparisonOperator"] comparison_values = scan_filter.get("AttributeValueList", []) filters[attribute_name] = (comparison_operator, comparison_values) diff --git a/moto/dynamodb2/__init__.py b/moto/dynamodb2/__init__.py index 7a1f07352..ad3f042d2 100644 --- a/moto/dynamodb2/__init__.py +++ b/moto/dynamodb2/__init__.py @@ -3,4 +3,4 @@ from .models import dynamodb_backend2 dynamodb_backends2 = {"global": dynamodb_backend2} mock_dynamodb2 = dynamodb_backend2.decorator -mock_dynamodb2_deprecated = dynamodb_backend2.deprecated_decorator \ No newline at end of file +mock_dynamodb2_deprecated = dynamodb_backend2.deprecated_decorator diff --git a/moto/dynamodb2/comparisons.py b/moto/dynamodb2/comparisons.py index 1dc723df0..0b323ecd5 100644 --- a/moto/dynamodb2/comparisons.py +++ b/moto/dynamodb2/comparisons.py @@ -1,12 +1,12 @@ from __future__ import unicode_literals # TODO add tests for all of these -EQ_FUNCTION = lambda item_value, test_value: item_value == test_value -NE_FUNCTION = lambda item_value, test_value: item_value != test_value -LE_FUNCTION = lambda item_value, test_value: item_value <= test_value -LT_FUNCTION = lambda item_value, test_value: item_value < test_value -GE_FUNCTION = lambda item_value, test_value: item_value >= test_value -GT_FUNCTION = lambda item_value, test_value: item_value > test_value +EQ_FUNCTION = lambda item_value, test_value: item_value == test_value # flake8: noqa +NE_FUNCTION = lambda item_value, test_value: item_value != test_value # flake8: noqa +LE_FUNCTION = lambda item_value, test_value: item_value <= test_value # flake8: noqa +LT_FUNCTION = lambda item_value, test_value: item_value < test_value # flake8: noqa +GE_FUNCTION = lambda item_value, test_value: item_value >= test_value # flake8: noqa +GT_FUNCTION = lambda item_value, test_value: item_value > test_value # flake8: noqa COMPARISON_FUNCS = { 'EQ': EQ_FUNCTION, diff --git a/moto/dynamodb2/models.py b/moto/dynamodb2/models.py index 0adbae946..15c30e590 100644 --- a/moto/dynamodb2/models.py +++ b/moto/dynamodb2/models.py @@ -11,6 +11,7 @@ from .comparisons import get_comparison_func class DynamoJsonEncoder(json.JSONEncoder): + def default(self, obj): if hasattr(obj, 'to_json'): return obj.to_json() @@ -76,6 +77,7 @@ class DynamoType(object): class Item(object): + def __init__(self, hash_key, hash_key_type, range_key, range_key_type, attrs): self.hash_key = hash_key self.hash_key_type = hash_key_type @@ -131,14 +133,15 @@ class Item(object): elif action == 'SET' or action == 'set': key, value = value.split("=") if value in expression_attribute_values: - self.attrs[key] = DynamoType(expression_attribute_values[value]) + self.attrs[key] = DynamoType( + expression_attribute_values[value]) else: self.attrs[key] = DynamoType({"S": value}) def update_with_attribute_updates(self, attribute_updates): for attribute_name, update_action in attribute_updates.items(): action = update_action['Action'] - if action == 'DELETE' and not 'Value' in update_action: + if action == 'DELETE' and 'Value' not in update_action: if attribute_name in self.attrs: del self.attrs[attribute_name] continue @@ -158,14 +161,16 @@ class Item(object): self.attrs[attribute_name] = DynamoType({"S": new_value}) elif action == 'ADD': if set(update_action['Value'].keys()) == set(['N']): - existing = self.attrs.get(attribute_name, DynamoType({"N": '0'})) + existing = self.attrs.get( + attribute_name, DynamoType({"N": '0'})) self.attrs[attribute_name] = DynamoType({"N": str( - decimal.Decimal(existing.value) + - decimal.Decimal(new_value) + decimal.Decimal(existing.value) + + decimal.Decimal(new_value) )}) else: # TODO: implement other data types - raise NotImplementedError('ADD not supported for %s' % ', '.join(update_action['Value'].keys())) + raise NotImplementedError( + 'ADD not supported for %s' % ', '.join(update_action['Value'].keys())) class Table(object): @@ -186,7 +191,8 @@ class Table(object): self.range_key_attr = elem["AttributeName"] self.range_key_type = elem["KeyType"] if throughput is None: - self.throughput = {'WriteCapacityUnits': 10, 'ReadCapacityUnits': 10} + self.throughput = { + 'WriteCapacityUnits': 10, 'ReadCapacityUnits': 10} else: self.throughput = throughput self.throughput["NumberOfDecreasesToday"] = 0 @@ -250,14 +256,16 @@ class Table(object): else: range_value = None - item = Item(hash_value, self.hash_key_type, range_value, self.range_key_type, item_attrs) + item = Item(hash_value, self.hash_key_type, range_value, + self.range_key_type, item_attrs) if not overwrite: if expected is None: expected = {} lookup_range_value = range_value else: - expected_range_value = expected.get(self.range_key_attr, {}).get("Value") + expected_range_value = expected.get( + self.range_key_attr, {}).get("Value") if(expected_range_value is None): lookup_range_value = range_value else: @@ -281,8 +289,10 @@ class Table(object): elif 'Value' in val and DynamoType(val['Value']).value != current_attr[key].value: raise ValueError("The conditional request failed") elif 'ComparisonOperator' in val: - comparison_func = get_comparison_func(val['ComparisonOperator']) - dynamo_types = [DynamoType(ele) for ele in val["AttributeValueList"]] + comparison_func = get_comparison_func( + val['ComparisonOperator']) + dynamo_types = [DynamoType(ele) for ele in val[ + "AttributeValueList"]] for t in dynamo_types: if not comparison_func(current_attr[key].value, t.value): raise ValueError('The conditional request failed') @@ -304,7 +314,8 @@ class Table(object): def get_item(self, hash_key, range_key=None): if self.has_range_key and not range_key: - raise ValueError("Table has a range key, but no range key was passed into get_item") + raise ValueError( + "Table has a range key, but no range key was passed into get_item") try: if range_key: return self.items[hash_key][range_key] @@ -339,9 +350,11 @@ class Table(object): index = indexes_by_name[index_name] try: - index_hash_key = [key for key in index['KeySchema'] if key['KeyType'] == 'HASH'][0] + index_hash_key = [key for key in index[ + 'KeySchema'] if key['KeyType'] == 'HASH'][0] except IndexError: - raise ValueError('Missing Hash Key. KeySchema: %s' % index['KeySchema']) + raise ValueError('Missing Hash Key. KeySchema: %s' % + index['KeySchema']) possible_results = [] for item in self.all_items(): @@ -351,17 +364,20 @@ class Table(object): if item_hash_key and item_hash_key == hash_key: possible_results.append(item) else: - possible_results = [item for item in list(self.all_items()) if isinstance(item, Item) and item.hash_key == hash_key] + possible_results = [item for item in list(self.all_items()) if isinstance( + item, Item) and item.hash_key == hash_key] if index_name: try: - index_range_key = [key for key in index['KeySchema'] if key['KeyType'] == 'RANGE'][0] + index_range_key = [key for key in index[ + 'KeySchema'] if key['KeyType'] == 'RANGE'][0] except IndexError: index_range_key = None if range_comparison: if index_name and not index_range_key: - raise ValueError('Range Key comparison but no range key found for index: %s' % index_name) + raise ValueError( + 'Range Key comparison but no range key found for index: %s' % index_name) elif index_name: for result in possible_results: @@ -375,19 +391,21 @@ class Table(object): if filter_kwargs: for result in possible_results: for field, value in filter_kwargs.items(): - dynamo_types = [DynamoType(ele) for ele in value["AttributeValueList"]] + dynamo_types = [DynamoType(ele) for ele in value[ + "AttributeValueList"]] if result.attrs.get(field).compare(value['ComparisonOperator'], dynamo_types): results.append(result) if not range_comparison and not filter_kwargs: - # If we're not filtering on range key or on an index return all values + # If we're not filtering on range key or on an index return all + # values results = possible_results if index_name: if index_range_key: results.sort(key=lambda item: item.attrs[index_range_key['AttributeName']].value - if item.attrs.get(index_range_key['AttributeName']) else None) + if item.attrs.get(index_range_key['AttributeName']) else None) else: results.sort(key=lambda item: item.range_key) @@ -427,7 +445,8 @@ class Table(object): # Comparison is NULL and we don't have the attribute continue else: - # No attribute found and comparison is no NULL. This item fails + # No attribute found and comparison is no NULL. This item + # fails passes_all_conditions = False break @@ -460,7 +479,6 @@ class Table(object): return results, last_evaluated_key - def lookup(self, *args, **kwargs): if not self.schema: self.describe() @@ -517,7 +535,8 @@ class DynamoDBBackend(BaseBackend): if gsi_to_create: if gsi_to_create['IndexName'] in gsis_by_name: - raise ValueError('Global Secondary Index already exists: %s' % gsi_to_create['IndexName']) + raise ValueError( + 'Global Secondary Index already exists: %s' % gsi_to_create['IndexName']) gsis_by_name[gsi_to_create['IndexName']] = gsi_to_create @@ -555,9 +574,11 @@ class DynamoDBBackend(BaseBackend): def get_keys_value(self, table, keys): if table.hash_key_attr not in keys or (table.has_range_key and table.range_key_attr not in keys): - raise ValueError("Table has a range key, but no range key was passed into get_item") + raise ValueError( + "Table has a range key, but no range key was passed into get_item") hash_key = DynamoType(keys[table.hash_key_attr]) - range_key = DynamoType(keys[table.range_key_attr]) if table.has_range_key else None + range_key = DynamoType( + keys[table.range_key_attr]) if table.has_range_key else None return hash_key, range_key def get_table(self, table_name): @@ -577,7 +598,8 @@ class DynamoDBBackend(BaseBackend): return None, None hash_key = DynamoType(hash_key_dict) - range_values = [DynamoType(range_value) for range_value in range_value_dicts] + range_values = [DynamoType(range_value) + for range_value in range_value_dicts] return table.query(hash_key, range_comparison, range_values, limit, exclusive_start_key, scan_index_forward, index_name, **filter_kwargs) @@ -598,7 +620,8 @@ class DynamoDBBackend(BaseBackend): table = self.get_table(table_name) if all([table.hash_key_attr in key, table.range_key_attr in key]): - # Covers cases where table has hash and range keys, ``key`` param will be a dict + # Covers cases where table has hash and range keys, ``key`` param + # will be a dict hash_value = DynamoType(key[table.hash_key_attr]) range_value = DynamoType(key[table.range_key_attr]) elif table.hash_key_attr in key: @@ -629,7 +652,8 @@ class DynamoDBBackend(BaseBackend): item = table.get_item(hash_value, range_value) if update_expression: - item.update(update_expression, expression_attribute_names, expression_attribute_values) + item.update(update_expression, expression_attribute_names, + expression_attribute_values) else: item.update_with_attribute_updates(attribute_updates) return item diff --git a/moto/dynamodb2/responses.py b/moto/dynamodb2/responses.py index 0957bfa89..3ceda0be1 100644 --- a/moto/dynamodb2/responses.py +++ b/moto/dynamodb2/responses.py @@ -104,11 +104,11 @@ class DynamoHandler(BaseResponse): local_secondary_indexes = body.get("LocalSecondaryIndexes", []) table = dynamodb_backend2.create_table(table_name, - schema=key_schema, - throughput=throughput, - attr=attr, - global_indexes=global_indexes, - indexes=local_secondary_indexes) + schema=key_schema, + throughput=throughput, + attr=attr, + global_indexes=global_indexes, + indexes=local_secondary_indexes) if table is not None: return dynamo_json_dump(table.describe()) else: @@ -127,7 +127,8 @@ class DynamoHandler(BaseResponse): def update_table(self): name = self.body['TableName'] if 'GlobalSecondaryIndexUpdates' in self.body: - table = dynamodb_backend2.update_table_global_indexes(name, self.body['GlobalSecondaryIndexUpdates']) + table = dynamodb_backend2.update_table_global_indexes( + name, self.body['GlobalSecondaryIndexUpdates']) if 'ProvisionedThroughput' in self.body: throughput = self.body["ProvisionedThroughput"] table = dynamodb_backend2.update_table_throughput(name, throughput) @@ -151,17 +152,20 @@ class DynamoHandler(BaseResponse): else: expected = None - # Attempt to parse simple ConditionExpressions into an Expected expression + # Attempt to parse simple ConditionExpressions into an Expected + # expression if not expected: condition_expression = self.body.get('ConditionExpression') if condition_expression and 'OR' not in condition_expression: - cond_items = [c.strip() for c in condition_expression.split('AND')] + cond_items = [c.strip() + for c in condition_expression.split('AND')] if cond_items: expected = {} overwrite = False exists_re = re.compile('^attribute_exists\((.*)\)$') - not_exists_re = re.compile('^attribute_not_exists\((.*)\)$') + not_exists_re = re.compile( + '^attribute_not_exists\((.*)\)$') for cond in cond_items: exists_m = exists_re.match(cond) @@ -172,7 +176,8 @@ class DynamoHandler(BaseResponse): expected[not_exists_m.group(1)] = {'Exists': False} try: - result = dynamodb_backend2.put_item(name, item, expected, overwrite) + result = dynamodb_backend2.put_item( + name, item, expected, overwrite) except Exception: er = 'com.amazonaws.dynamodb.v20111205#ConditionalCheckFailedException' return self.error(er) @@ -249,7 +254,8 @@ class DynamoHandler(BaseResponse): item = dynamodb_backend2.get_item(table_name, key) if item: item_describe = item.describe_attrs(attributes_to_get) - results["Responses"][table_name].append(item_describe["Item"]) + results["Responses"][table_name].append( + item_describe["Item"]) results["ConsumedCapacity"].append({ "CapacityUnits": len(keys), @@ -268,8 +274,10 @@ class DynamoHandler(BaseResponse): table = dynamodb_backend2.get_table(name) index_name = self.body.get('IndexName') if index_name: - all_indexes = (table.global_indexes or []) + (table.indexes or []) - indexes_by_name = dict((i['IndexName'], i) for i in all_indexes) + all_indexes = (table.global_indexes or []) + \ + (table.indexes or []) + indexes_by_name = dict((i['IndexName'], i) + for i in all_indexes) if index_name not in indexes_by_name: raise ValueError('Invalid index: %s for table: %s. Available indexes are: %s' % ( index_name, name, ', '.join(indexes_by_name.keys()) @@ -279,16 +287,21 @@ class DynamoHandler(BaseResponse): else: index = table.schema - key_map = [column for _, column in sorted((k, v) for k, v in self.body['ExpressionAttributeNames'].items())] + key_map = [column for _, column in sorted( + (k, v) for k, v in self.body['ExpressionAttributeNames'].items())] if " AND " in key_condition_expression: expressions = key_condition_expression.split(" AND ", 1) - index_hash_key = [key for key in index if key['KeyType'] == 'HASH'][0] - hash_key_index_in_key_map = key_map.index(index_hash_key['AttributeName']) + index_hash_key = [ + key for key in index if key['KeyType'] == 'HASH'][0] + hash_key_index_in_key_map = key_map.index( + index_hash_key['AttributeName']) - hash_key_expression = expressions.pop(hash_key_index_in_key_map).strip('()') - # TODO implement more than one range expression and OR operators + hash_key_expression = expressions.pop( + hash_key_index_in_key_map).strip('()') + # TODO implement more than one range expression and OR + # operators range_key_expression = expressions[0].strip('()') range_key_expression_components = range_key_expression.split() range_comparison = range_key_expression_components[1] @@ -304,7 +317,8 @@ class DynamoHandler(BaseResponse): value_alias_map[range_key_expression_components[1]], ] else: - range_values = [value_alias_map[range_key_expression_components[2]]] + range_values = [value_alias_map[ + range_key_expression_components[2]]] else: hash_key_expression = key_condition_expression range_comparison = None @@ -316,14 +330,16 @@ class DynamoHandler(BaseResponse): # 'KeyConditions': {u'forum_name': {u'ComparisonOperator': u'EQ', u'AttributeValueList': [{u'S': u'the-key'}]}} key_conditions = self.body.get('KeyConditions') if key_conditions: - hash_key_name, range_key_name = dynamodb_backend2.get_table_keys_name(name, key_conditions.keys()) + hash_key_name, range_key_name = dynamodb_backend2.get_table_keys_name( + name, key_conditions.keys()) for key, value in key_conditions.items(): if key not in (hash_key_name, range_key_name): filter_kwargs[key] = value if hash_key_name is None: er = "'com.amazonaws.dynamodb.v20120810#ResourceNotFoundException" return self.error(er) - hash_key = key_conditions[hash_key_name]['AttributeValueList'][0] + hash_key = key_conditions[hash_key_name][ + 'AttributeValueList'][0] if len(key_conditions) == 1: range_comparison = None range_values = [] @@ -334,8 +350,10 @@ class DynamoHandler(BaseResponse): else: range_condition = key_conditions.get(range_key_name) if range_condition: - range_comparison = range_condition['ComparisonOperator'] - range_values = range_condition['AttributeValueList'] + range_comparison = range_condition[ + 'ComparisonOperator'] + range_values = range_condition[ + 'AttributeValueList'] else: range_comparison = None range_values = [] @@ -369,7 +387,8 @@ class DynamoHandler(BaseResponse): filters = {} scan_filters = self.body.get('ScanFilter', {}) for attribute_name, scan_filter in scan_filters.items(): - # Keys are attribute names. Values are tuples of (comparison, comparison_value) + # Keys are attribute names. Values are tuples of (comparison, + # comparison_value) comparison_operator = scan_filter["ComparisonOperator"] comparison_values = scan_filter.get("AttributeValueList", []) filters[attribute_name] = (comparison_operator, comparison_values) @@ -416,16 +435,20 @@ class DynamoHandler(BaseResponse): key = self.body['Key'] update_expression = self.body.get('UpdateExpression') attribute_updates = self.body.get('AttributeUpdates') - expression_attribute_names = self.body.get('ExpressionAttributeNames', {}) - expression_attribute_values = self.body.get('ExpressionAttributeValues', {}) + expression_attribute_names = self.body.get( + 'ExpressionAttributeNames', {}) + expression_attribute_values = self.body.get( + 'ExpressionAttributeValues', {}) existing_item = dynamodb_backend2.get_item(name, key) # Support spaces between operators in an update expression # E.g. `a = b + c` -> `a=b+c` if update_expression: - update_expression = re.sub('\s*([=\+-])\s*', '\\1', update_expression) + update_expression = re.sub( + '\s*([=\+-])\s*', '\\1', update_expression) - item = dynamodb_backend2.update_item(name, key, update_expression, attribute_updates, expression_attribute_names, expression_attribute_values) + item = dynamodb_backend2.update_item( + name, key, update_expression, attribute_updates, expression_attribute_names, expression_attribute_values) item_dict = item.to_json() item_dict['ConsumedCapacityUnits'] = 0.5 diff --git a/moto/ec2/__init__.py b/moto/ec2/__init__.py index 608173577..ba8cbe0a0 100644 --- a/moto/ec2/__init__.py +++ b/moto/ec2/__init__.py @@ -1,6 +1,6 @@ from __future__ import unicode_literals from .models import ec2_backends -from ..core.models import MockAWS, base_decorator, HttprettyMockAWS, deprecated_base_decorator +from ..core.models import base_decorator, deprecated_base_decorator ec2_backend = ec2_backends['us-east-1'] mock_ec2 = base_decorator(ec2_backends) diff --git a/moto/ec2/exceptions.py b/moto/ec2/exceptions.py index 79ceb776f..d32118b82 100644 --- a/moto/ec2/exceptions.py +++ b/moto/ec2/exceptions.py @@ -7,12 +7,14 @@ class EC2ClientError(RESTError): class DependencyViolationError(EC2ClientError): + def __init__(self, message): super(DependencyViolationError, self).__init__( "DependencyViolation", message) class MissingParameterError(EC2ClientError): + def __init__(self, parameter): super(MissingParameterError, self).__init__( "MissingParameter", @@ -21,6 +23,7 @@ class MissingParameterError(EC2ClientError): class InvalidDHCPOptionsIdError(EC2ClientError): + def __init__(self, dhcp_options_id): super(InvalidDHCPOptionsIdError, self).__init__( "InvalidDhcpOptionID.NotFound", @@ -29,6 +32,7 @@ class InvalidDHCPOptionsIdError(EC2ClientError): class MalformedDHCPOptionsIdError(EC2ClientError): + def __init__(self, dhcp_options_id): super(MalformedDHCPOptionsIdError, self).__init__( "InvalidDhcpOptionsId.Malformed", @@ -37,6 +41,7 @@ class MalformedDHCPOptionsIdError(EC2ClientError): class InvalidKeyPairNameError(EC2ClientError): + def __init__(self, key): super(InvalidKeyPairNameError, self).__init__( "InvalidKeyPair.NotFound", @@ -45,6 +50,7 @@ class InvalidKeyPairNameError(EC2ClientError): class InvalidKeyPairDuplicateError(EC2ClientError): + def __init__(self, key): super(InvalidKeyPairDuplicateError, self).__init__( "InvalidKeyPair.Duplicate", @@ -53,6 +59,7 @@ class InvalidKeyPairDuplicateError(EC2ClientError): class InvalidVPCIdError(EC2ClientError): + def __init__(self, vpc_id): super(InvalidVPCIdError, self).__init__( "InvalidVpcID.NotFound", @@ -61,6 +68,7 @@ class InvalidVPCIdError(EC2ClientError): class InvalidSubnetIdError(EC2ClientError): + def __init__(self, subnet_id): super(InvalidSubnetIdError, self).__init__( "InvalidSubnetID.NotFound", @@ -69,6 +77,7 @@ class InvalidSubnetIdError(EC2ClientError): class InvalidNetworkAclIdError(EC2ClientError): + def __init__(self, network_acl_id): super(InvalidNetworkAclIdError, self).__init__( "InvalidNetworkAclID.NotFound", @@ -77,6 +86,7 @@ class InvalidNetworkAclIdError(EC2ClientError): class InvalidVpnGatewayIdError(EC2ClientError): + def __init__(self, network_acl_id): super(InvalidVpnGatewayIdError, self).__init__( "InvalidVpnGatewayID.NotFound", @@ -85,6 +95,7 @@ class InvalidVpnGatewayIdError(EC2ClientError): class InvalidVpnConnectionIdError(EC2ClientError): + def __init__(self, network_acl_id): super(InvalidVpnConnectionIdError, self).__init__( "InvalidVpnConnectionID.NotFound", @@ -93,6 +104,7 @@ class InvalidVpnConnectionIdError(EC2ClientError): class InvalidCustomerGatewayIdError(EC2ClientError): + def __init__(self, customer_gateway_id): super(InvalidCustomerGatewayIdError, self).__init__( "InvalidCustomerGatewayID.NotFound", @@ -101,6 +113,7 @@ class InvalidCustomerGatewayIdError(EC2ClientError): class InvalidNetworkInterfaceIdError(EC2ClientError): + def __init__(self, eni_id): super(InvalidNetworkInterfaceIdError, self).__init__( "InvalidNetworkInterfaceID.NotFound", @@ -109,6 +122,7 @@ class InvalidNetworkInterfaceIdError(EC2ClientError): class InvalidNetworkAttachmentIdError(EC2ClientError): + def __init__(self, attachment_id): super(InvalidNetworkAttachmentIdError, self).__init__( "InvalidAttachmentID.NotFound", @@ -117,6 +131,7 @@ class InvalidNetworkAttachmentIdError(EC2ClientError): class InvalidSecurityGroupDuplicateError(EC2ClientError): + def __init__(self, name): super(InvalidSecurityGroupDuplicateError, self).__init__( "InvalidGroup.Duplicate", @@ -125,6 +140,7 @@ class InvalidSecurityGroupDuplicateError(EC2ClientError): class InvalidSecurityGroupNotFoundError(EC2ClientError): + def __init__(self, name): super(InvalidSecurityGroupNotFoundError, self).__init__( "InvalidGroup.NotFound", @@ -133,6 +149,7 @@ class InvalidSecurityGroupNotFoundError(EC2ClientError): class InvalidPermissionNotFoundError(EC2ClientError): + def __init__(self): super(InvalidPermissionNotFoundError, self).__init__( "InvalidPermission.NotFound", @@ -140,6 +157,7 @@ class InvalidPermissionNotFoundError(EC2ClientError): class InvalidRouteTableIdError(EC2ClientError): + def __init__(self, route_table_id): super(InvalidRouteTableIdError, self).__init__( "InvalidRouteTableID.NotFound", @@ -148,6 +166,7 @@ class InvalidRouteTableIdError(EC2ClientError): class InvalidRouteError(EC2ClientError): + def __init__(self, route_table_id, cidr): super(InvalidRouteError, self).__init__( "InvalidRoute.NotFound", @@ -156,6 +175,7 @@ class InvalidRouteError(EC2ClientError): class InvalidInstanceIdError(EC2ClientError): + def __init__(self, instance_id): super(InvalidInstanceIdError, self).__init__( "InvalidInstanceID.NotFound", @@ -164,6 +184,7 @@ class InvalidInstanceIdError(EC2ClientError): class InvalidAMIIdError(EC2ClientError): + def __init__(self, ami_id): super(InvalidAMIIdError, self).__init__( "InvalidAMIID.NotFound", @@ -172,6 +193,7 @@ class InvalidAMIIdError(EC2ClientError): class InvalidAMIAttributeItemValueError(EC2ClientError): + def __init__(self, attribute, value): super(InvalidAMIAttributeItemValueError, self).__init__( "InvalidAMIAttributeItemValue", @@ -180,6 +202,7 @@ class InvalidAMIAttributeItemValueError(EC2ClientError): class MalformedAMIIdError(EC2ClientError): + def __init__(self, ami_id): super(MalformedAMIIdError, self).__init__( "InvalidAMIID.Malformed", @@ -188,6 +211,7 @@ class MalformedAMIIdError(EC2ClientError): class InvalidSnapshotIdError(EC2ClientError): + def __init__(self, snapshot_id): super(InvalidSnapshotIdError, self).__init__( "InvalidSnapshot.NotFound", @@ -195,6 +219,7 @@ class InvalidSnapshotIdError(EC2ClientError): class InvalidVolumeIdError(EC2ClientError): + def __init__(self, volume_id): super(InvalidVolumeIdError, self).__init__( "InvalidVolume.NotFound", @@ -203,6 +228,7 @@ class InvalidVolumeIdError(EC2ClientError): class InvalidVolumeAttachmentError(EC2ClientError): + def __init__(self, volume_id, instance_id): super(InvalidVolumeAttachmentError, self).__init__( "InvalidAttachment.NotFound", @@ -211,6 +237,7 @@ class InvalidVolumeAttachmentError(EC2ClientError): class InvalidDomainError(EC2ClientError): + def __init__(self, domain): super(InvalidDomainError, self).__init__( "InvalidParameterValue", @@ -219,6 +246,7 @@ class InvalidDomainError(EC2ClientError): class InvalidAddressError(EC2ClientError): + def __init__(self, ip): super(InvalidAddressError, self).__init__( "InvalidAddress.NotFound", @@ -227,6 +255,7 @@ class InvalidAddressError(EC2ClientError): class InvalidAllocationIdError(EC2ClientError): + def __init__(self, allocation_id): super(InvalidAllocationIdError, self).__init__( "InvalidAllocationID.NotFound", @@ -235,6 +264,7 @@ class InvalidAllocationIdError(EC2ClientError): class InvalidAssociationIdError(EC2ClientError): + def __init__(self, association_id): super(InvalidAssociationIdError, self).__init__( "InvalidAssociationID.NotFound", @@ -243,6 +273,7 @@ class InvalidAssociationIdError(EC2ClientError): class InvalidVPCPeeringConnectionIdError(EC2ClientError): + def __init__(self, vpc_peering_connection_id): super(InvalidVPCPeeringConnectionIdError, self).__init__( "InvalidVpcPeeringConnectionId.NotFound", @@ -251,6 +282,7 @@ class InvalidVPCPeeringConnectionIdError(EC2ClientError): class InvalidVPCPeeringConnectionStateTransitionError(EC2ClientError): + def __init__(self, vpc_peering_connection_id): super(InvalidVPCPeeringConnectionStateTransitionError, self).__init__( "InvalidStateTransition", @@ -259,6 +291,7 @@ class InvalidVPCPeeringConnectionStateTransitionError(EC2ClientError): class InvalidParameterValueError(EC2ClientError): + def __init__(self, parameter_value): super(InvalidParameterValueError, self).__init__( "InvalidParameterValue", @@ -267,6 +300,7 @@ class InvalidParameterValueError(EC2ClientError): class InvalidParameterValueErrorTagNull(EC2ClientError): + def __init__(self): super(InvalidParameterValueErrorTagNull, self).__init__( "InvalidParameterValue", @@ -274,6 +308,7 @@ class InvalidParameterValueErrorTagNull(EC2ClientError): class InvalidInternetGatewayIdError(EC2ClientError): + def __init__(self, internet_gateway_id): super(InvalidInternetGatewayIdError, self).__init__( "InvalidInternetGatewayID.NotFound", @@ -282,6 +317,7 @@ class InvalidInternetGatewayIdError(EC2ClientError): class GatewayNotAttachedError(EC2ClientError): + def __init__(self, internet_gateway_id, vpc_id): super(GatewayNotAttachedError, self).__init__( "Gateway.NotAttached", @@ -290,6 +326,7 @@ class GatewayNotAttachedError(EC2ClientError): class ResourceAlreadyAssociatedError(EC2ClientError): + def __init__(self, resource_id): super(ResourceAlreadyAssociatedError, self).__init__( "Resource.AlreadyAssociated", @@ -298,6 +335,7 @@ class ResourceAlreadyAssociatedError(EC2ClientError): class TagLimitExceeded(EC2ClientError): + def __init__(self): super(TagLimitExceeded, self).__init__( "TagLimitExceeded", @@ -305,6 +343,7 @@ class TagLimitExceeded(EC2ClientError): class InvalidID(EC2ClientError): + def __init__(self, resource_id): super(InvalidID, self).__init__( "InvalidID", @@ -313,6 +352,7 @@ class InvalidID(EC2ClientError): class InvalidCIDRSubnetError(EC2ClientError): + def __init__(self, cidr): super(InvalidCIDRSubnetError, self).__init__( "InvalidParameterValue", @@ -321,6 +361,7 @@ class InvalidCIDRSubnetError(EC2ClientError): class RulesPerSecurityGroupLimitExceededError(EC2ClientError): + def __init__(self): super(RulesPerSecurityGroupLimitExceededError, self).__init__( "RulesPerSecurityGroupLimitExceeded", diff --git a/moto/ec2/models.py b/moto/ec2/models.py index 30769fd7e..2e6b5e5b6 100755 --- a/moto/ec2/models.py +++ b/moto/ec2/models.py @@ -1,6 +1,5 @@ from __future__ import unicode_literals -import boto import copy import itertools import re @@ -117,20 +116,24 @@ def validate_resource_ids(resource_ids): class InstanceState(object): + def __init__(self, name='pending', code=0): self.name = name self.code = code class StateReason(object): + def __init__(self, message="", code=""): self.message = message self.code = code class TaggedEC2Resource(object): + def get_tags(self, *args, **kwargs): - tags = self.ec2_backend.describe_tags(filters={'resource-id': [self.id]}) + tags = self.ec2_backend.describe_tags( + filters={'resource-id': [self.id]}) return tags def add_tag(self, key, value): @@ -155,8 +158,9 @@ class TaggedEC2Resource(object): class NetworkInterface(TaggedEC2Resource): + def __init__(self, ec2_backend, subnet, private_ip_address, device_index=0, - public_ip_auto_assign=True, group_ids=None): + public_ip_auto_assign=True, group_ids=None): self.ec2_backend = ec2_backend self.id = random_eni_id() self.device_index = device_index @@ -181,7 +185,8 @@ class NetworkInterface(TaggedEC2Resource): group = self.ec2_backend.get_security_group_from_id(group_id) if not group: # Create with specific group ID. - group = SecurityGroup(self.ec2_backend, group_id, group_id, group_id, vpc_id=subnet.vpc_id) + group = SecurityGroup( + self.ec2_backend, group_id, group_id, group_id, vpc_id=subnet.vpc_id) self.ec2_backend.groups[subnet.vpc_id][group_id] = group if group: self._group_set.append(group) @@ -231,7 +236,8 @@ class NetworkInterface(TaggedEC2Resource): if attribute_name == 'PrimaryPrivateIpAddress': return self.private_ip_address elif attribute_name == 'SecondaryPrivateIpAddresses': - raise NotImplementedError('"Fn::GetAtt" : [ "{0}" , "SecondaryPrivateIpAddresses" ]"') + raise NotImplementedError( + '"Fn::GetAtt" : [ "{0}" , "SecondaryPrivateIpAddresses" ]"') raise UnformattedGetAttTemplateException() @property @@ -250,23 +256,27 @@ class NetworkInterface(TaggedEC2Resource): elif filter_name == 'group-id': return [group.id for group in self._group_set] - filter_value = super(NetworkInterface, self).get_filter_value(filter_name) + filter_value = super( + NetworkInterface, self).get_filter_value(filter_name) if filter_value is None: self.ec2_backend.raise_not_implemented_error( - "The filter '{0}' for DescribeNetworkInterfaces".format(filter_name) + "The filter '{0}' for DescribeNetworkInterfaces".format( + filter_name) ) return filter_value class NetworkInterfaceBackend(object): + def __init__(self): self.enis = {} super(NetworkInterfaceBackend, self).__init__() def create_network_interface(self, subnet, private_ip_address, group_ids=None, **kwargs): - eni = NetworkInterface(self, subnet, private_ip_address, group_ids=group_ids, **kwargs) + eni = NetworkInterface( + self, subnet, private_ip_address, group_ids=group_ids, **kwargs) self.enis[eni.id] = eni return eni @@ -289,7 +299,8 @@ class NetworkInterfaceBackend(object): for (_filter, _filter_value) in filters.items(): if _filter == 'network-interface-id': _filter = 'id' - enis = [eni for eni in enis if getattr(eni, _filter) in _filter_value] + enis = [eni for eni in enis if getattr( + eni, _filter) in _filter_value] elif _filter == 'group-id': original_enis = enis enis = [] @@ -299,7 +310,8 @@ class NetworkInterfaceBackend(object): enis.append(eni) break else: - self.raise_not_implemented_error("The filter '{0}' for DescribeNetworkInterfaces".format(_filter)) + self.raise_not_implemented_error( + "The filter '{0}' for DescribeNetworkInterfaces".format(_filter)) return enis def attach_network_interface(self, eni_id, instance_id, device_index): @@ -330,13 +342,15 @@ class NetworkInterfaceBackend(object): if eni_ids: enis = [eni for eni in enis if eni.id in eni_ids] if len(enis) != len(eni_ids): - invalid_id = list(set(eni_ids).difference(set([eni.id for eni in enis])))[0] + invalid_id = list(set(eni_ids).difference( + set([eni.id for eni in enis])))[0] raise InvalidNetworkInterfaceIdError(invalid_id) return generic_filter(filters, enis) class Instance(BotoInstance, TaggedEC2Resource): + def __init__(self, ec2_backend, image_id, user_data, security_groups, **kwargs): super(Instance, self).__init__() self.ec2_backend = ec2_backend @@ -367,7 +381,8 @@ class Instance(BotoInstance, TaggedEC2Resource): self.virtualization_type = ami.virtualization_type if ami else 'paravirtual' self.architecture = ami.architecture if ami else 'x86_64' - # handle weird bug around user_data -- something grabs the repr(), so it must be clean + # handle weird bug around user_data -- something grabs the repr(), so + # it must be clean if isinstance(self.user_data, list) and len(self.user_data) > 0: if six.PY3 and isinstance(self.user_data[0], six.binary_type): # string will have a "b" prefix -- need to get rid of it @@ -393,7 +408,8 @@ class Instance(BotoInstance, TaggedEC2Resource): associate_public_ip=associate_public_ip) def setup_defaults(self): - # Default have an instance with root volume should you not wish to override with attach volume cmd. + # Default have an instance with root volume should you not wish to + # override with attach volume cmd. volume = self.ec2_backend.create_volume(8, 'us-east-1a') self.ec2_backend.attach_volume(volume.id, self.id, '/dev/sda1') @@ -429,7 +445,8 @@ class Instance(BotoInstance, TaggedEC2Resource): ec2_backend = ec2_backends[region_name] security_group_ids = properties.get('SecurityGroups', []) - group_names = [ec2_backend.get_security_group_from_id(group_id).name for group_id in security_group_ids] + group_names = [ec2_backend.get_security_group_from_id( + group_id).name for group_id in security_group_ids] reservation = ec2_backend.add_instances( image_id=properties['ImageId'], @@ -464,7 +481,8 @@ class Instance(BotoInstance, TaggedEC2Resource): self._state.name = "stopped" self._state.code = 80 - self._reason = "User initiated ({0})".format(datetime.utcnow().strftime('%Y-%m-%d %H:%M:%S UTC')) + self._reason = "User initiated ({0})".format( + datetime.utcnow().strftime('%Y-%m-%d %H:%M:%S UTC')) self._state_reason = StateReason("Client.UserInitiatedShutdown: User initiated shutdown", "Client.UserInitiatedShutdown") @@ -480,7 +498,8 @@ class Instance(BotoInstance, TaggedEC2Resource): self._state.name = "terminated" self._state.code = 48 - self._reason = "User initiated ({0})".format(datetime.utcnow().strftime('%Y-%m-%d %H:%M:%S UTC')) + self._reason = "User initiated ({0})".format( + datetime.utcnow().strftime('%Y-%m-%d %H:%M:%S UTC')) self._state_reason = StateReason("Client.UserInitiatedShutdown: User initiated shutdown", "Client.UserInitiatedShutdown") @@ -514,7 +533,8 @@ class Instance(BotoInstance, TaggedEC2Resource): 'AssociatePublicIpAddress': associate_public_ip} primary_nic = dict((k, v) for k, v in primary_nic.items() if v) - # If empty NIC spec but primary NIC values provided, create NIC from them. + # If empty NIC spec but primary NIC values provided, create NIC from + # them. if primary_nic and not nic_spec: nic_spec[0] = primary_nic nic_spec[0]['DeviceIndex'] = 0 @@ -544,10 +564,12 @@ class Instance(BotoInstance, TaggedEC2Resource): group_ids = [group_id] if group_id else [] use_nic = self.ec2_backend.create_network_interface(subnet, - nic.get('PrivateIpAddress'), - device_index=device_index, - public_ip_auto_assign=nic.get('AssociatePublicIpAddress', False), - group_ids=group_ids) + nic.get( + 'PrivateIpAddress'), + device_index=device_index, + public_ip_auto_assign=nic.get( + 'AssociatePublicIpAddress', False), + group_ids=group_ids) self.attach_eni(use_nic, device_index) @@ -559,7 +581,8 @@ class Instance(BotoInstance, TaggedEC2Resource): device_index = int(device_index) self.nics[device_index] = eni - eni.instance = self # This is used upon associate/disassociate public IP. + # This is used upon associate/disassociate public IP. + eni.instance = self eni.attachment_id = random_eni_attach_id() eni.device_index = device_index @@ -639,7 +662,8 @@ class InstanceBackend(object): def terminate_instances(self, instance_ids): terminated_instances = [] if not instance_ids: - raise EC2ClientError("InvalidParameterCombination", "No instances specified") + raise EC2ClientError( + "InvalidParameterCombination", "No instances specified") for instance in self.get_multi_instances_by_id(instance_ids): instance.terminate() terminated_instances.append(instance) @@ -716,16 +740,21 @@ class InstanceBackend(object): """ reservations = [] for reservation in self.all_reservations(make_copy=True): - reservation_instance_ids = [instance.id for instance in reservation.instances] - matching_reservation = any(instance_id in reservation_instance_ids for instance_id in instance_ids) + reservation_instance_ids = [ + instance.id for instance in reservation.instances] + matching_reservation = any( + instance_id in reservation_instance_ids for instance_id in instance_ids) if matching_reservation: # We need to make a copy of the reservation because we have to modify the # instances to limit to those requested - reservation.instances = [instance for instance in reservation.instances if instance.id in instance_ids] + reservation.instances = [ + instance for instance in reservation.instances if instance.id in instance_ids] reservations.append(reservation) - found_instance_ids = [instance.id for reservation in reservations for instance in reservation.instances] + found_instance_ids = [ + instance.id for reservation in reservations for instance in reservation.instances] if len(found_instance_ids) != len(instance_ids): - invalid_id = list(set(instance_ids).difference(set(found_instance_ids)))[0] + invalid_id = list(set(instance_ids).difference( + set(found_instance_ids)))[0] raise InvalidInstanceIdError(invalid_id) if filters is not None: reservations = filter_reservations(reservations, filters) @@ -735,9 +764,11 @@ class InstanceBackend(object): if make_copy: # Return copies so that other functions can modify them with changing # the originals - reservations = [copy.deepcopy(reservation) for reservation in self.reservations.values()] + reservations = [copy.deepcopy(reservation) + for reservation in self.reservations.values()] else: - reservations = [reservation for reservation in self.reservations.values()] + reservations = [ + reservation for reservation in self.reservations.values()] if filters is not None: reservations = filter_reservations(reservations, filters) return reservations @@ -848,16 +879,19 @@ class TagBackend(object): if tag_filter in self.VALID_TAG_FILTERS: if tag_filter == 'key': for value in filters[tag_filter]: - key_filters.append(re.compile(simple_aws_filter_to_re(value))) + key_filters.append(re.compile( + simple_aws_filter_to_re(value))) if tag_filter == 'resource-id': for value in filters[tag_filter]: - resource_id_filters.append(re.compile(simple_aws_filter_to_re(value))) + resource_id_filters.append( + re.compile(simple_aws_filter_to_re(value))) if tag_filter == 'resource-type': for value in filters[tag_filter]: resource_type_filters.append(value) if tag_filter == 'value': for value in filters[tag_filter]: - value_filters.append(re.compile(simple_aws_filter_to_re(value))) + value_filters.append(re.compile( + simple_aws_filter_to_re(value))) for resource_id, tags in self.tags.items(): for key, value in tags.items(): add_result = False @@ -907,8 +941,9 @@ class TagBackend(object): class Ami(TaggedEC2Resource): + def __init__(self, ec2_backend, ami_id, instance=None, source_ami=None, - name=None, description=None): + name=None, description=None): self.ec2_backend = ec2_backend self.id = ami_id self.state = "available" @@ -948,7 +983,8 @@ class Ami(TaggedEC2Resource): # AWS auto-creates these, we should reflect the same. volume = self.ec2_backend.create_volume(15, "us-east-1a") - self.ebs_snapshot = self.ec2_backend.create_snapshot(volume.id, "Auto-created snapshot for AMI %s" % self.id) + self.ebs_snapshot = self.ec2_backend.create_snapshot( + volume.id, "Auto-created snapshot for AMI %s" % self.id) @property def is_public(self): @@ -977,12 +1013,14 @@ class Ami(TaggedEC2Resource): filter_value = super(Ami, self).get_filter_value(filter_name) if filter_value is None: - self.ec2_backend.raise_not_implemented_error("The filter '{0}' for DescribeImages".format(filter_name)) + self.ec2_backend.raise_not_implemented_error( + "The filter '{0}' for DescribeImages".format(filter_name)) return filter_value class AmiBackend(object): + def __init__(self): self.amis = {} super(AmiBackend, self).__init__() @@ -991,14 +1029,17 @@ class AmiBackend(object): # TODO: check that instance exists and pull info from it. ami_id = random_ami_id() instance = self.get_instance(instance_id) - ami = Ami(self, ami_id, instance=instance, source_ami=None, name=name, description=description) + ami = Ami(self, ami_id, instance=instance, source_ami=None, + name=name, description=description) self.amis[ami_id] = ami return ami def copy_image(self, source_image_id, source_region, name=None, description=None): - source_ami = ec2_backends[source_region].describe_images(ami_ids=[source_image_id])[0] + source_ami = ec2_backends[source_region].describe_images( + ami_ids=[source_image_id])[0] ami_id = random_ami_id() - ami = Ami(self, ami_id, instance=None, source_ami=source_ami, name=name, description=description) + ami = Ami(self, ami_id, instance=None, source_ami=source_ami, + name=name, description=description) self.amis[ami_id] = ami return ami @@ -1074,12 +1115,14 @@ class AmiBackend(object): class Region(object): + def __init__(self, name, endpoint): self.name = name self.endpoint = endpoint class Zone(object): + def __init__(self, name, region_name): self.name = name self.region_name = region_name @@ -1122,6 +1165,7 @@ class RegionsAndZonesBackend(object): class SecurityRule(object): + def __init__(self, ip_protocol, from_port, to_port, ip_ranges, source_groups): self.ip_protocol = ip_protocol self.from_port = from_port @@ -1144,6 +1188,7 @@ class SecurityRule(object): class SecurityGroup(TaggedEC2Resource): + def __init__(self, ec2_backend, group_id, name, description, vpc_id=None): self.ec2_backend = ec2_backend self.id = group_id @@ -1189,19 +1234,22 @@ class SecurityGroup(TaggedEC2Resource): @classmethod def update_from_cloudformation_json(cls, original_resource, new_resource_name, cloudformation_json, region_name): - cls._delete_security_group_given_vpc_id(original_resource.name, original_resource.vpc_id, region_name) + cls._delete_security_group_given_vpc_id( + original_resource.name, original_resource.vpc_id, region_name) return cls.create_from_cloudformation_json(new_resource_name, cloudformation_json, region_name) @classmethod def delete_from_cloudformation_json(cls, resource_name, cloudformation_json, region_name): properties = cloudformation_json['Properties'] vpc_id = properties.get('VpcId') - cls._delete_security_group_given_vpc_id(resource_name, vpc_id, region_name) + cls._delete_security_group_given_vpc_id( + resource_name, vpc_id, region_name) @classmethod def _delete_security_group_given_vpc_id(cls, resource_name, vpc_id, region_name): ec2_backend = ec2_backends[region_name] - security_group = ec2_backend.get_security_group_from_name(resource_name, vpc_id) + security_group = ec2_backend.get_security_group_from_name( + resource_name, vpc_id) if security_group: security_group.delete(region_name) @@ -1304,13 +1352,14 @@ class SecurityGroupBackend(object): return group def describe_security_groups(self, group_ids=None, groupnames=None, filters=None): - all_groups = itertools.chain(*[x.values() for x in self.groups.values()]) + all_groups = itertools.chain(*[x.values() + for x in self.groups.values()]) groups = [] if group_ids or groupnames or filters: for group in all_groups: - if ((group_ids and not group.id in group_ids) or - (groupnames and not group.name in groupnames)): + if ((group_ids and group.id not in group_ids) or + (groupnames and group.name not in groupnames)): continue if filters and not group.matches_filters(filters): continue @@ -1322,7 +1371,8 @@ class SecurityGroupBackend(object): def _delete_security_group(self, vpc_id, group_id): if self.groups[vpc_id][group_id].enis: - raise DependencyViolationError("{0} is being utilized by {1}".format(group_id, 'ENIs')) + raise DependencyViolationError( + "{0} is being utilized by {1}".format(group_id, 'ENIs')) return self.groups[vpc_id].pop(group_id) def delete_security_group(self, name=None, group_id=None): @@ -1333,7 +1383,8 @@ class SecurityGroupBackend(object): return self._delete_security_group(vpc_id, group_id) raise InvalidSecurityGroupNotFoundError(group_id) elif name: - # Group Name. Has to be in standard EC2, VPC needs to be identified by group_id + # Group Name. Has to be in standard EC2, VPC needs to be + # identified by group_id group = self.get_security_group_from_name(name) if group: return self._delete_security_group(None, group.id) @@ -1341,7 +1392,8 @@ class SecurityGroupBackend(object): def get_security_group_from_id(self, group_id): # 2 levels of chaining necessary since it's a complex structure - all_groups = itertools.chain.from_iterable([x.values() for x in self.groups.values()]) + all_groups = itertools.chain.from_iterable( + [x.values() for x in self.groups.values()]) for group in all_groups: if group.id == group_id: return group @@ -1384,7 +1436,8 @@ class SecurityGroupBackend(object): source_groups = [] for source_group_name in source_group_names: - source_group = self.get_security_group_from_name(source_group_name, vpc_id) + source_group = self.get_security_group_from_name( + source_group_name, vpc_id) if source_group: source_groups.append(source_group) @@ -1394,7 +1447,8 @@ class SecurityGroupBackend(object): if source_group: source_groups.append(source_group) - security_rule = SecurityRule(ip_protocol, from_port, to_port, ip_ranges, source_groups) + security_rule = SecurityRule( + ip_protocol, from_port, to_port, ip_ranges, source_groups) group.add_ingress_rule(security_rule) def revoke_security_group_ingress(self, @@ -1411,7 +1465,8 @@ class SecurityGroupBackend(object): source_groups = [] for source_group_name in source_group_names: - source_group = self.get_security_group_from_name(source_group_name, vpc_id) + source_group = self.get_security_group_from_name( + source_group_name, vpc_id) if source_group: source_groups.append(source_group) @@ -1420,7 +1475,8 @@ class SecurityGroupBackend(object): if source_group: source_groups.append(source_group) - security_rule = SecurityRule(ip_protocol, from_port, to_port, ip_ranges, source_groups) + security_rule = SecurityRule( + ip_protocol, from_port, to_port, ip_ranges, source_groups) if security_rule in group.ingress_rules: group.ingress_rules.remove(security_rule) return security_rule @@ -1453,7 +1509,8 @@ class SecurityGroupBackend(object): source_groups = [] for source_group_name in source_group_names: - source_group = self.get_security_group_from_name(source_group_name, vpc_id) + source_group = self.get_security_group_from_name( + source_group_name, vpc_id) if source_group: source_groups.append(source_group) @@ -1463,7 +1520,8 @@ class SecurityGroupBackend(object): if source_group: source_groups.append(source_group) - security_rule = SecurityRule(ip_protocol, from_port, to_port, ip_ranges, source_groups) + security_rule = SecurityRule( + ip_protocol, from_port, to_port, ip_ranges, source_groups) group.add_egress_rule(security_rule) def revoke_security_group_egress(self, @@ -1480,7 +1538,8 @@ class SecurityGroupBackend(object): source_groups = [] for source_group_name in source_group_names: - source_group = self.get_security_group_from_name(source_group_name, vpc_id) + source_group = self.get_security_group_from_name( + source_group_name, vpc_id) if source_group: source_groups.append(source_group) @@ -1489,7 +1548,8 @@ class SecurityGroupBackend(object): if source_group: source_groups.append(source_group) - security_rule = SecurityRule(ip_protocol, from_port, to_port, ip_ranges, source_groups) + security_rule = SecurityRule( + ip_protocol, from_port, to_port, ip_ranges, source_groups) if security_rule in group.egress_rules: group.egress_rules.remove(security_rule) return security_rule @@ -1528,7 +1588,8 @@ class SecurityGroupIngress(object): from_port = properties.get("FromPort") source_security_group_id = properties.get("SourceSecurityGroupId") source_security_group_name = properties.get("SourceSecurityGroupName") - # source_security_owner_id = properties.get("SourceSecurityGroupOwnerId") # IGNORED AT THE MOMENT + # source_security_owner_id = + # properties.get("SourceSecurityGroupOwnerId") # IGNORED AT THE MOMENT to_port = properties.get("ToPort") assert group_id or group_name @@ -1549,9 +1610,11 @@ class SecurityGroupIngress(object): ip_ranges = [] if group_id: - security_group = ec2_backend.describe_security_groups(group_ids=[group_id])[0] + security_group = ec2_backend.describe_security_groups(group_ids=[group_id])[ + 0] else: - security_group = ec2_backend.describe_security_groups(groupnames=[group_name])[0] + security_group = ec2_backend.describe_security_groups( + groupnames=[group_name])[0] ec2_backend.authorize_security_group_ingress( group_name_or_id=security_group.id, @@ -1567,6 +1630,7 @@ class SecurityGroupIngress(object): class VolumeAttachment(object): + def __init__(self, volume, instance, device, status): self.volume = volume self.attach_time = utc_date_and_time() @@ -1591,6 +1655,7 @@ class VolumeAttachment(object): class Volume(TaggedEC2Resource): + def __init__(self, ec2_backend, volume_id, size, zone, snapshot_id=None, encrypted=False): self.id = volume_id self.size = size @@ -1657,12 +1722,14 @@ class Volume(TaggedEC2Resource): filter_value = super(Volume, self).get_filter_value(filter_name) if filter_value is None: - self.ec2_backend.raise_not_implemented_error("The filter '{0}' for DescribeVolumes".format(filter_name)) + self.ec2_backend.raise_not_implemented_error( + "The filter '{0}' for DescribeVolumes".format(filter_name)) return filter_value class Snapshot(TaggedEC2Resource): + def __init__(self, ec2_backend, snapshot_id, volume, description, encrypted=False): self.id = snapshot_id self.volume = volume @@ -1696,12 +1763,14 @@ class Snapshot(TaggedEC2Resource): filter_value = super(Snapshot, self).get_filter_value(filter_name) if filter_value is None: - self.ec2_backend.raise_not_implemented_error("The filter '{0}' for DescribeSnapshots".format(filter_name)) + self.ec2_backend.raise_not_implemented_error( + "The filter '{0}' for DescribeSnapshots".format(filter_name)) return filter_value class EBSBackend(object): + def __init__(self): self.volumes = {} self.attachments = {} @@ -1745,7 +1814,8 @@ class EBSBackend(object): if not volume or not instance: return False - volume.attachment = VolumeAttachment(volume, instance, device_path, 'attached') + volume.attachment = VolumeAttachment( + volume, instance, device_path, 'attached') # Modify instance to capture mount of block device. bdt = BlockDeviceType(volume_id=volume_id, status=volume.status, size=volume.size, attach_time=utc_date_and_time()) @@ -1767,7 +1837,8 @@ class EBSBackend(object): def create_snapshot(self, volume_id, description): snapshot_id = random_snapshot_id() volume = self.get_volume(volume_id) - snapshot = Snapshot(self, snapshot_id, volume, description, volume.encrypted) + snapshot = Snapshot(self, snapshot_id, volume, + description, volume.encrypted) self.snapshots[snapshot_id] = snapshot return snapshot @@ -1794,7 +1865,8 @@ class EBSBackend(object): def add_create_volume_permission(self, snapshot_id, user_id=None, group=None): if user_id: - self.raise_not_implemented_error("The UserId parameter for ModifySnapshotAttribute") + self.raise_not_implemented_error( + "The UserId parameter for ModifySnapshotAttribute") if group != 'all': raise InvalidAMIAttributeItemValueError("UserGroup", group) @@ -1804,7 +1876,8 @@ class EBSBackend(object): def remove_create_volume_permission(self, snapshot_id, user_id=None, group=None): if user_id: - self.raise_not_implemented_error("The UserId parameter for ModifySnapshotAttribute") + self.raise_not_implemented_error( + "The UserId parameter for ModifySnapshotAttribute") if group != 'all': raise InvalidAMIAttributeItemValueError("UserGroup", group) @@ -1814,6 +1887,7 @@ class EBSBackend(object): class VPC(TaggedEC2Resource): + def __init__(self, ec2_backend, vpc_id, cidr_block, is_default, instance_tenancy='default'): self.ec2_backend = ec2_backend self.id = vpc_id @@ -1862,19 +1936,22 @@ class VPC(TaggedEC2Resource): filter_value = super(VPC, self).get_filter_value(filter_name) if filter_value is None: - self.ec2_backend.raise_not_implemented_error("The filter '{0}' for DescribeVPCs".format(filter_name)) + self.ec2_backend.raise_not_implemented_error( + "The filter '{0}' for DescribeVPCs".format(filter_name)) return filter_value class VPCBackend(object): + def __init__(self): self.vpcs = {} super(VPCBackend, self).__init__() def create_vpc(self, cidr_block, instance_tenancy='default'): vpc_id = random_vpc_id() - vpc = VPC(self, vpc_id, cidr_block, len(self.vpcs) == 0, instance_tenancy) + vpc = VPC(self, vpc_id, cidr_block, len( + self.vpcs) == 0, instance_tenancy) self.vpcs[vpc_id] = vpc # AWS creates a default main route table and security group. @@ -1885,7 +1962,8 @@ class VPCBackend(object): default = self.get_security_group_from_name('default', vpc_id=vpc_id) if not default: - self.create_security_group('default', 'default VPC security group', vpc_id=vpc_id) + self.create_security_group( + 'default', 'default VPC security group', vpc_id=vpc_id) return vpc @@ -1945,6 +2023,7 @@ class VPCBackend(object): class VPCPeeringConnectionStatus(object): + def __init__(self, code='initiating-request', message=''): self.code = code self.message = message @@ -1967,6 +2046,7 @@ class VPCPeeringConnectionStatus(object): class VPCPeeringConnection(TaggedEC2Resource): + def __init__(self, vpc_pcx_id, vpc, peer_vpc): self.id = vpc_pcx_id self.vpc = vpc @@ -1991,6 +2071,7 @@ class VPCPeeringConnection(TaggedEC2Resource): class VPCPeeringConnectionBackend(object): + def __init__(self): self.vpc_pcxs = {} super(VPCPeeringConnectionBackend, self).__init__() @@ -2032,6 +2113,7 @@ class VPCPeeringConnectionBackend(object): class Subnet(TaggedEC2Resource): + def __init__(self, ec2_backend, subnet_id, vpc_id, cidr_block, availability_zone, default_for_az, map_public_ip_on_launch): self.ec2_backend = ec2_backend @@ -2101,18 +2183,21 @@ class Subnet(TaggedEC2Resource): filter_value = super(Subnet, self).get_filter_value(filter_name) if filter_value is None: - self.ec2_backend.raise_not_implemented_error("The filter '{0}' for DescribeSubnets".format(filter_name)) + self.ec2_backend.raise_not_implemented_error( + "The filter '{0}' for DescribeSubnets".format(filter_name)) return filter_value def get_cfn_attribute(self, attribute_name): from moto.cloudformation.exceptions import UnformattedGetAttTemplateException if attribute_name == 'AvailabilityZone': - raise NotImplementedError('"Fn::GetAtt" : [ "{0}" , "AvailabilityZone" ]"') + raise NotImplementedError( + '"Fn::GetAtt" : [ "{0}" , "AvailabilityZone" ]"') raise UnformattedGetAttTemplateException() class SubnetBackend(object): + def __init__(self): # maps availability zone to dict of (subnet_id, subnet) self.subnets = defaultdict(dict) @@ -2126,7 +2211,7 @@ class SubnetBackend(object): def create_subnet(self, vpc_id, cidr_block, availability_zone): subnet_id = random_subnet_id() - vpc = self.get_vpc(vpc_id) # Validate VPC exists + self.get_vpc(vpc_id) # Validate VPC exists # if this is the first subnet for an availability zone, # consider it the default @@ -2166,6 +2251,7 @@ class SubnetBackend(object): class SubnetRouteTableAssociation(object): + def __init__(self, route_table_id, subnet_id): self.route_table_id = route_table_id self.subnet_id = subnet_id @@ -2186,17 +2272,21 @@ class SubnetRouteTableAssociation(object): class SubnetRouteTableAssociationBackend(object): + def __init__(self): self.subnet_associations = {} super(SubnetRouteTableAssociationBackend, self).__init__() def create_subnet_association(self, route_table_id, subnet_id): - subnet_association = SubnetRouteTableAssociation(route_table_id, subnet_id) - self.subnet_associations["{0}:{1}".format(route_table_id, subnet_id)] = subnet_association + subnet_association = SubnetRouteTableAssociation( + route_table_id, subnet_id) + self.subnet_associations["{0}:{1}".format( + route_table_id, subnet_id)] = subnet_association return subnet_association class RouteTable(TaggedEC2Resource): + def __init__(self, ec2_backend, route_table_id, vpc_id, main=False): self.ec2_backend = ec2_backend self.id = route_table_id @@ -2242,12 +2332,14 @@ class RouteTable(TaggedEC2Resource): filter_value = super(RouteTable, self).get_filter_value(filter_name) if filter_value is None: - self.ec2_backend.raise_not_implemented_error("The filter '{0}' for DescribeRouteTables".format(filter_name)) + self.ec2_backend.raise_not_implemented_error( + "The filter '{0}' for DescribeRouteTables".format(filter_name)) return filter_value class RouteTableBackend(object): + def __init__(self): self.route_tables = {} super(RouteTableBackend, self).__init__() @@ -2273,9 +2365,11 @@ class RouteTableBackend(object): route_tables = self.route_tables.values() if route_table_ids: - route_tables = [route_table for route_table in route_tables if route_table.id in route_table_ids] + route_tables = [ + route_table for route_table in route_tables if route_table.id in route_table_ids] if len(route_tables) != len(route_table_ids): - invalid_id = list(set(route_table_ids).difference(set([route_table.id for route_table in route_tables])))[0] + invalid_id = list(set(route_table_ids).difference( + set([route_table.id for route_table in route_tables])))[0] raise InvalidRouteTableIdError(invalid_id) return generic_filter(filters, route_tables) @@ -2292,7 +2386,8 @@ class RouteTableBackend(object): def associate_route_table(self, route_table_id, subnet_id): # Idempotent if association already exists. - route_tables_by_subnet = self.get_all_route_tables(filters={'association.subnet-id': [subnet_id]}) + route_tables_by_subnet = self.get_all_route_tables( + filters={'association.subnet-id': [subnet_id]}) if route_tables_by_subnet: for association_id, check_subnet_id in route_tables_by_subnet[0].associations.items(): if subnet_id == check_subnet_id: @@ -2318,7 +2413,8 @@ class RouteTableBackend(object): return association_id # Find route table which currently has the association, error if none. - route_tables_by_association_id = self.get_all_route_tables(filters={'association.route-table-association-id': [association_id]}) + route_tables_by_association_id = self.get_all_route_tables( + filters={'association.route-table-association-id': [association_id]}) if not route_tables_by_association_id: raise InvalidAssociationIdError(association_id) @@ -2329,6 +2425,7 @@ class RouteTableBackend(object): class Route(object): + def __init__(self, route_table, destination_cidr_block, local=False, gateway=None, instance=None, interface=None, vpc_pcx=None): self.id = generate_route_id(route_table.id, destination_cidr_block) @@ -2363,6 +2460,7 @@ class Route(object): class RouteBackend(object): + def __init__(self): super(RouteBackend, self).__init__() @@ -2372,7 +2470,8 @@ class RouteBackend(object): route_table = self.get_route_table(route_table_id) if interface_id: - self.raise_not_implemented_error("CreateRoute to NetworkInterfaceId") + self.raise_not_implemented_error( + "CreateRoute to NetworkInterfaceId") gateway = None if gateway_id: @@ -2383,21 +2482,23 @@ class RouteBackend(object): route = Route(route_table, destination_cidr_block, local=local, gateway=gateway, - instance=self.get_instance(instance_id) if instance_id else None, + instance=self.get_instance( + instance_id) if instance_id else None, interface=None, vpc_pcx=self.get_vpc_peering_connection(vpc_peering_connection_id) if vpc_peering_connection_id else None) route_table.routes[route.id] = route return route def replace_route(self, route_table_id, destination_cidr_block, - gateway_id=None, instance_id=None, interface_id=None, - vpc_peering_connection_id=None): + gateway_id=None, instance_id=None, interface_id=None, + vpc_peering_connection_id=None): route_table = self.get_route_table(route_table_id) route_id = generate_route_id(route_table.id, destination_cidr_block) route = route_table.routes[route_id] if interface_id: - self.raise_not_implemented_error("ReplaceRoute to NetworkInterfaceId") + self.raise_not_implemented_error( + "ReplaceRoute to NetworkInterfaceId") route.gateway = None if gateway_id: @@ -2406,9 +2507,11 @@ class RouteBackend(object): elif EC2_RESOURCE_TO_PREFIX['internet-gateway'] in gateway_id: route.gateway = self.get_internet_gateway(gateway_id) - route.instance = self.get_instance(instance_id) if instance_id else None + route.instance = self.get_instance( + instance_id) if instance_id else None route.interface = None - route.vpc_pcx = self.get_vpc_peering_connection(vpc_peering_connection_id) if vpc_peering_connection_id else None + route.vpc_pcx = self.get_vpc_peering_connection( + vpc_peering_connection_id) if vpc_peering_connection_id else None route_table.routes[route.id] = route return route @@ -2428,6 +2531,7 @@ class RouteBackend(object): class InternetGateway(TaggedEC2Resource): + def __init__(self, ec2_backend): self.ec2_backend = ec2_backend self.id = random_internet_gateway_id() @@ -2451,6 +2555,7 @@ class InternetGateway(TaggedEC2Resource): class InternetGatewayBackend(object): + def __init__(self): self.internet_gateways = {} super(InternetGatewayBackend, self).__init__() @@ -2505,6 +2610,7 @@ class InternetGatewayBackend(object): class VPCGatewayAttachment(object): + def __init__(self, gateway_id, vpc_id): self.gateway_id = gateway_id self.vpc_id = vpc_id @@ -2518,7 +2624,8 @@ class VPCGatewayAttachment(object): gateway_id=properties['InternetGatewayId'], vpc_id=properties['VpcId'], ) - ec2_backend.attach_internet_gateway(properties['InternetGatewayId'], properties['VpcId']) + ec2_backend.attach_internet_gateway( + properties['InternetGatewayId'], properties['VpcId']) return attachment @property @@ -2527,6 +2634,7 @@ class VPCGatewayAttachment(object): class VPCGatewayAttachmentBackend(object): + def __init__(self): self.gateway_attachments = {} super(VPCGatewayAttachmentBackend, self).__init__() @@ -2538,6 +2646,7 @@ class VPCGatewayAttachmentBackend(object): class SpotInstanceRequest(BotoSpotRequest, TaggedEC2Resource): + def __init__(self, ec2_backend, spot_request_id, price, image_id, type, valid_from, valid_until, launch_group, availability_zone_group, key_name, security_groups, user_data, instance_type, placement, @@ -2567,12 +2676,14 @@ class SpotInstanceRequest(BotoSpotRequest, TaggedEC2Resource): if security_groups: for group_name in security_groups: - group = self.ec2_backend.get_security_group_from_name(group_name) + group = self.ec2_backend.get_security_group_from_name( + group_name) if group: ls.groups.append(group) else: # If not security groups, add the default - default_group = self.ec2_backend.get_security_group_from_name("default") + default_group = self.ec2_backend.get_security_group_from_name( + "default") ls.groups.append(default_group) self.instance = self.launch_instance() @@ -2582,10 +2693,12 @@ class SpotInstanceRequest(BotoSpotRequest, TaggedEC2Resource): return self.state if filter_name == 'spot-instance-request-id': return self.id - filter_value = super(SpotInstanceRequest, self).get_filter_value(filter_name) + filter_value = super(SpotInstanceRequest, + self).get_filter_value(filter_name) if filter_value is None: - self.ec2_backend.raise_not_implemented_error("The filter '{0}' for DescribeSpotInstanceRequests".format(filter_name)) + self.ec2_backend.raise_not_implemented_error( + "The filter '{0}' for DescribeSpotInstanceRequests".format(filter_name)) return filter_value @@ -2604,6 +2717,7 @@ class SpotInstanceRequest(BotoSpotRequest, TaggedEC2Resource): @six.add_metaclass(Model) class SpotRequestBackend(object): + def __init__(self): self.spot_instance_requests = {} super(SpotRequestBackend, self).__init__() @@ -2617,10 +2731,10 @@ class SpotRequestBackend(object): for _ in range(count): spot_request_id = random_spot_request_id() request = SpotInstanceRequest(self, - spot_request_id, price, image_id, type, valid_from, valid_until, - launch_group, availability_zone_group, key_name, security_groups, - user_data, instance_type, placement, kernel_id, ramdisk_id, - monitoring_enabled, subnet_id) + spot_request_id, price, image_id, type, valid_from, valid_until, + launch_group, availability_zone_group, key_name, security_groups, + user_data, instance_type, placement, kernel_id, ramdisk_id, + monitoring_enabled, subnet_id) self.spot_instance_requests[spot_request_id] = request requests.append(request) return requests @@ -2639,9 +2753,10 @@ class SpotRequestBackend(object): class SpotFleetLaunchSpec(object): + def __init__(self, ebs_optimized, group_set, iam_instance_profile, image_id, - instance_type, key_name, monitoring, spot_price, subnet_id, user_data, - weighted_capacity): + instance_type, key_name, monitoring, spot_price, subnet_id, user_data, + weighted_capacity): self.ebs_optimized = ebs_optimized self.group_set = group_set self.iam_instance_profile = iam_instance_profile @@ -2658,7 +2773,7 @@ class SpotFleetLaunchSpec(object): class SpotFleetRequest(TaggedEC2Resource): def __init__(self, ec2_backend, spot_fleet_request_id, spot_price, - target_capacity, iam_fleet_role, allocation_strategy, launch_specs): + target_capacity, iam_fleet_role, allocation_strategy, launch_specs): self.ec2_backend = ec2_backend self.id = spot_fleet_request_id @@ -2672,18 +2787,19 @@ class SpotFleetRequest(TaggedEC2Resource): self.launch_specs = [] for spec in launch_specs: self.launch_specs.append(SpotFleetLaunchSpec( - ebs_optimized=spec['ebs_optimized'], - group_set=[val for key, val in spec.items() if key.startswith("group_set")], - iam_instance_profile=spec.get('iam_instance_profile._arn'), - image_id=spec['image_id'], - instance_type=spec['instance_type'], - key_name=spec.get('key_name'), - monitoring=spec.get('monitoring._enabled'), - spot_price=spec.get('spot_price', self.spot_price), - subnet_id=spec['subnet_id'], - user_data=spec.get('user_data'), - weighted_capacity=spec['weighted_capacity'], - ) + ebs_optimized=spec['ebs_optimized'], + group_set=[val for key, val in spec.items( + ) if key.startswith("group_set")], + iam_instance_profile=spec.get('iam_instance_profile._arn'), + image_id=spec['image_id'], + instance_type=spec['instance_type'], + key_name=spec.get('key_name'), + monitoring=spec.get('monitoring._enabled'), + spot_price=spec.get('spot_price', self.spot_price), + subnet_id=spec['subnet_id'], + user_data=spec.get('user_data'), + weighted_capacity=spec['weighted_capacity'], + ) ) self.spot_requests = [] @@ -2695,7 +2811,8 @@ class SpotFleetRequest(TaggedEC2Resource): @classmethod def create_from_cloudformation_json(cls, resource_name, cloudformation_json, region_name): - properties = cloudformation_json['Properties']['SpotFleetRequestConfigData'] + properties = cloudformation_json[ + 'Properties']['SpotFleetRequestConfigData'] ec2_backend = ec2_backends[region_name] spot_price = properties['SpotPrice'] @@ -2704,17 +2821,17 @@ class SpotFleetRequest(TaggedEC2Resource): allocation_strategy = properties['AllocationStrategy'] launch_specs = properties["LaunchSpecifications"] launch_specs = [ - dict([(camelcase_to_underscores(key), val) for key, val in launch_spec.items()]) + dict([(camelcase_to_underscores(key), val) + for key, val in launch_spec.items()]) for launch_spec in launch_specs ] spot_fleet_request = ec2_backend.request_spot_fleet(spot_price, - target_capacity, iam_fleet_role, allocation_strategy, launch_specs) + target_capacity, iam_fleet_role, allocation_strategy, launch_specs) return spot_fleet_request - def get_launch_spec_counts(self): weight_map = defaultdict(int) @@ -2722,39 +2839,42 @@ class SpotFleetRequest(TaggedEC2Resource): weight_so_far = 0 launch_spec_index = 0 while True: - launch_spec = self.launch_specs[launch_spec_index % len(self.launch_specs)] + launch_spec = self.launch_specs[ + launch_spec_index % len(self.launch_specs)] weight_map[launch_spec] += 1 weight_so_far += launch_spec.weighted_capacity if weight_so_far >= self.target_capacity: break launch_spec_index += 1 else: # lowestPrice - cheapest_spec = sorted(self.launch_specs, key=lambda spec: float(spec.spot_price))[0] + cheapest_spec = sorted( + self.launch_specs, key=lambda spec: float(spec.spot_price))[0] extra = 1 if self.target_capacity % cheapest_spec.weighted_capacity else 0 - weight_map[cheapest_spec] = int(self.target_capacity // cheapest_spec.weighted_capacity) + extra + weight_map[cheapest_spec] = int( + self.target_capacity // cheapest_spec.weighted_capacity) + extra return weight_map.items() def create_spot_requests(self): for launch_spec, count in self.get_launch_spec_counts(): requests = self.ec2_backend.request_spot_instances( - price=launch_spec.spot_price, - image_id=launch_spec.image_id, - count=count, - type="persistent", - valid_from=None, - valid_until=None, - launch_group=None, - availability_zone_group=None, - key_name=launch_spec.key_name, - security_groups=launch_spec.group_set, - user_data=launch_spec.user_data, - instance_type=launch_spec.instance_type, - placement=None, - kernel_id=None, - ramdisk_id=None, - monitoring_enabled=launch_spec.monitoring, - subnet_id=launch_spec.subnet_id, + price=launch_spec.spot_price, + image_id=launch_spec.image_id, + count=count, + type="persistent", + valid_from=None, + valid_until=None, + launch_group=None, + availability_zone_group=None, + key_name=launch_spec.key_name, + security_groups=launch_spec.group_set, + user_data=launch_spec.user_data, + instance_type=launch_spec.instance_type, + placement=None, + kernel_id=None, + ramdisk_id=None, + monitoring_enabled=launch_spec.monitoring, + subnet_id=launch_spec.subnet_id, ) self.spot_requests.extend(requests) return self.spot_requests @@ -2764,16 +2884,17 @@ class SpotFleetRequest(TaggedEC2Resource): class SpotFleetBackend(object): + def __init__(self): self.spot_fleet_requests = {} super(SpotFleetBackend, self).__init__() def request_spot_fleet(self, spot_price, target_capacity, iam_fleet_role, - allocation_strategy, launch_specs): + allocation_strategy, launch_specs): spot_fleet_request_id = random_spot_fleet_request_id() request = SpotFleetRequest(self, spot_fleet_request_id, spot_price, - target_capacity, iam_fleet_role, allocation_strategy, launch_specs) + target_capacity, iam_fleet_role, allocation_strategy, launch_specs) self.spot_fleet_requests[spot_fleet_request_id] = request return request @@ -2788,7 +2909,8 @@ class SpotFleetBackend(object): requests = self.spot_fleet_requests.values() if spot_fleet_request_ids: - requests = [request for request in requests if request.id in spot_fleet_request_ids] + requests = [ + request for request in requests if request.id in spot_fleet_request_ids] return requests @@ -2803,6 +2925,7 @@ class SpotFleetBackend(object): class ElasticAddress(object): + def __init__(self, domain): self.public_ip = random_ip() self.allocation_id = random_eip_allocation_id() if domain == "vpc" else None @@ -2894,8 +3017,10 @@ class ElasticAddressBackend(object): eips = self.address_by_allocation([allocation_id]) eip = eips[0] - new_instance_association = bool(instance and (not eip.instance or eip.instance.id == instance.id)) - new_eni_association = bool(eni and (not eip.eni or eni.id == eip.eni.id)) + new_instance_association = bool(instance and ( + not eip.instance or eip.instance.id == instance.id)) + new_eni_association = bool( + eni and (not eip.eni or eni.id == eip.eni.id)) if new_instance_association or new_eni_association or reassociate: eip.instance = instance @@ -2948,6 +3073,7 @@ class ElasticAddressBackend(object): class DHCPOptionsSet(TaggedEC2Resource): + def __init__(self, ec2_backend, domain_name_servers=None, domain_name=None, ntp_servers=None, netbios_name_servers=None, netbios_node_type=None): @@ -2983,10 +3109,12 @@ class DHCPOptionsSet(TaggedEC2Resource): values = [item for item in list(self._options.values()) if item] return itertools.chain(*values) - filter_value = super(DHCPOptionsSet, self).get_filter_value(filter_name) + filter_value = super( + DHCPOptionsSet, self).get_filter_value(filter_name) if filter_value is None: - self.ec2_backend.raise_not_implemented_error("The filter '{0}' for DescribeDhcpOptions".format(filter_name)) + self.ec2_backend.raise_not_implemented_error( + "The filter '{0}' for DescribeDhcpOptions".format(filter_name)) return filter_value @@ -2996,6 +3124,7 @@ class DHCPOptionsSet(TaggedEC2Resource): class DHCPOptionsSetBackend(object): + def __init__(self): self.dhcp_options_sets = {} super(DHCPOptionsSetBackend, self).__init__() @@ -3040,7 +3169,8 @@ class DHCPOptionsSetBackend(object): if options_id in self.dhcp_options_sets: if self.dhcp_options_sets[options_id].vpc: - raise DependencyViolationError("Cannot delete assigned DHCP options.") + raise DependencyViolationError( + "Cannot delete assigned DHCP options.") self.dhcp_options_sets.pop(options_id) else: raise InvalidDHCPOptionsIdError(options_id) @@ -3050,15 +3180,18 @@ class DHCPOptionsSetBackend(object): dhcp_options_sets = self.dhcp_options_sets.values() if dhcp_options_ids: - dhcp_options_sets = [dhcp_options_set for dhcp_options_set in dhcp_options_sets if dhcp_options_set.id in dhcp_options_ids] + dhcp_options_sets = [ + dhcp_options_set for dhcp_options_set in dhcp_options_sets if dhcp_options_set.id in dhcp_options_ids] if len(dhcp_options_sets) != len(dhcp_options_ids): - invalid_id = list(set(dhcp_options_ids).difference(set([dhcp_options_set.id for dhcp_options_set in dhcp_options_sets])))[0] + invalid_id = list(set(dhcp_options_ids).difference( + set([dhcp_options_set.id for dhcp_options_set in dhcp_options_sets])))[0] raise InvalidDHCPOptionsIdError(invalid_id) return generic_filter(filters, dhcp_options_sets) class VPNConnection(TaggedEC2Resource): + def __init__(self, ec2_backend, id, type, customer_gateway_id, vpn_gateway_id): self.ec2_backend = ec2_backend @@ -3074,6 +3207,7 @@ class VPNConnection(TaggedEC2Resource): class VPNConnectionBackend(object): + def __init__(self): self.vpn_connections = {} super(VPNConnectionBackend, self).__init__() @@ -3116,13 +3250,15 @@ class VPNConnectionBackend(object): vpn_connections = [vpn_connection for vpn_connection in vpn_connections if vpn_connection.id in vpn_connection_ids] if len(vpn_connections) != len(vpn_connection_ids): - invalid_id = list(set(vpn_connection_ids).difference(set([vpn_connection.id for vpn_connection in vpn_connections])))[0] + invalid_id = list(set(vpn_connection_ids).difference( + set([vpn_connection.id for vpn_connection in vpn_connections])))[0] raise InvalidVpnConnectionIdError(invalid_id) return generic_filter(filters, vpn_connections) class NetworkAclBackend(object): + def __init__(self): self.network_acls = {} super(NetworkAclBackend, self).__init__() @@ -3147,7 +3283,8 @@ class NetworkAclBackend(object): network_acls = [network_acl for network_acl in network_acls if network_acl.id in network_acl_ids] if len(network_acls) != len(network_acl_ids): - invalid_id = list(set(network_acl_ids).difference(set([network_acl.id for network_acl in network_acls])))[0] + invalid_id = list(set(network_acl_ids).difference( + set([network_acl.id for network_acl in network_acls])))[0] raise InvalidRouteTableIdError(invalid_id) return generic_filter(filters, network_acls) @@ -3177,7 +3314,7 @@ class NetworkAclBackend(object): # lookup existing association for subnet and delete it default_acl = next(value for key, value in self.network_acls.items() - if association_id in value.associations.keys()) + if association_id in value.associations.keys()) subnet_id = None for key, value in default_acl.associations.items(): @@ -3203,6 +3340,7 @@ class NetworkAclBackend(object): class NetworkAclAssociation(object): + def __init__(self, ec2_backend, new_association_id, subnet_id, network_acl_id): self.ec2_backend = ec2_backend @@ -3214,6 +3352,7 @@ class NetworkAclAssociation(object): class NetworkAcl(TaggedEC2Resource): + def __init__(self, ec2_backend, network_acl_id, vpc_id, default=False): self.ec2_backend = ec2_backend self.id = network_acl_id @@ -3235,12 +3374,14 @@ class NetworkAcl(TaggedEC2Resource): filter_value = super(NetworkAcl, self).get_filter_value(filter_name) if filter_value is None: - self.ec2_backend.raise_not_implemented_error("The filter '{0}' for DescribeNetworkAcls".format(filter_name)) + self.ec2_backend.raise_not_implemented_error( + "The filter '{0}' for DescribeNetworkAcls".format(filter_name)) return filter_value class NetworkAclEntry(TaggedEC2Resource): + def __init__(self, ec2_backend, network_acl_id, rule_number, protocol, rule_action, egress, cidr_block, icmp_code, icmp_type, port_range_from, @@ -3259,6 +3400,7 @@ class NetworkAclEntry(TaggedEC2Resource): class VpnGateway(TaggedEC2Resource): + def __init__(self, ec2_backend, id, type): self.ec2_backend = ec2_backend self.id = id @@ -3268,6 +3410,7 @@ class VpnGateway(TaggedEC2Resource): class VpnGatewayAttachment(object): + def __init__(self, vpc_id, state): self.vpc_id = vpc_id self.state = state @@ -3275,6 +3418,7 @@ class VpnGatewayAttachment(object): class VpnGatewayBackend(object): + def __init__(self): self.vpn_gateways = {} super(VpnGatewayBackend, self).__init__() @@ -3318,6 +3462,7 @@ class VpnGatewayBackend(object): class CustomerGateway(TaggedEC2Resource): + def __init__(self, ec2_backend, id, type, ip_address, bgp_asn): self.ec2_backend = ec2_backend self.id = id @@ -3329,13 +3474,15 @@ class CustomerGateway(TaggedEC2Resource): class CustomerGatewayBackend(object): + def __init__(self): self.customer_gateways = {} super(CustomerGatewayBackend, self).__init__() def create_customer_gateway(self, type='ipsec.1', ip_address=None, bgp_asn=None): customer_gateway_id = random_customer_gateway_id() - customer_gateway = CustomerGateway(self, customer_gateway_id, type, ip_address, bgp_asn) + customer_gateway = CustomerGateway( + self, customer_gateway_id, type, ip_address, bgp_asn) self.customer_gateways[customer_gateway_id] = customer_gateway return customer_gateway @@ -3344,7 +3491,8 @@ class CustomerGatewayBackend(object): return generic_filter(filters, customer_gateways) def get_customer_gateway(self, customer_gateway_id): - customer_gateway = self.customer_gateways.get(customer_gateway_id, None) + customer_gateway = self.customer_gateways.get( + customer_gateway_id, None) if not customer_gateway: raise InvalidCustomerGatewayIdError(customer_gateway_id) return customer_gateway @@ -3370,10 +3518,12 @@ class NatGateway(object): self._created_at = datetime.utcnow() self._backend = backend # NOTE: this is the core of NAT Gateways creation - self._eni = self._backend.create_network_interface(backend.get_subnet(self.subnet_id), self.private_ip) + self._eni = self._backend.create_network_interface( + backend.get_subnet(self.subnet_id), self.private_ip) # associate allocation with ENI - self._backend.associate_address(eni=self._eni, allocation_id=self.allocation_id) + self._backend.associate_address( + eni=self._eni, allocation_id=self.allocation_id) @property def vpc_id(self): @@ -3427,7 +3577,7 @@ class EC2Backend(BaseBackend, InstanceBackend, TagBackend, AmiBackend, VPCPeeringConnectionBackend, RouteTableBackend, RouteBackend, InternetGatewayBackend, VPCGatewayAttachmentBackend, SpotFleetBackend, - SpotRequestBackend,ElasticAddressBackend, KeyPairBackend, + SpotRequestBackend, ElasticAddressBackend, KeyPairBackend, DHCPOptionsSetBackend, NetworkAclBackend, VpnGatewayBackend, CustomerGatewayBackend, NatGatewayBackend): @@ -3463,7 +3613,8 @@ class EC2Backend(BaseBackend, InstanceBackend, TagBackend, AmiBackend, self.__dict__ = {} self.__init__(region_name) - # Use this to generate a proper error template response when in a response handler. + # Use this to generate a proper error template response when in a response + # handler. def raise_error(self, code, message): raise EC2ClientError(code, message) @@ -3485,11 +3636,13 @@ class EC2Backend(BaseBackend, InstanceBackend, TagBackend, AmiBackend, elif resource_prefix == EC2_RESOURCE_TO_PREFIX['instance']: self.get_instance_by_id(instance_id=resource_id) elif resource_prefix == EC2_RESOURCE_TO_PREFIX['internet-gateway']: - self.describe_internet_gateways(internet_gateway_ids=[resource_id]) + self.describe_internet_gateways( + internet_gateway_ids=[resource_id]) elif resource_prefix == EC2_RESOURCE_TO_PREFIX['network-acl']: self.get_all_network_acls() elif resource_prefix == EC2_RESOURCE_TO_PREFIX['network-interface']: - self.describe_network_interfaces(filters={'network-interface-id': resource_id}) + self.describe_network_interfaces( + filters={'network-interface-id': resource_id}) elif resource_prefix == EC2_RESOURCE_TO_PREFIX['reserved-instance']: self.raise_not_implemented_error('DescribeReservedInstances') elif resource_prefix == EC2_RESOURCE_TO_PREFIX['route-table']: @@ -3499,7 +3652,8 @@ class EC2Backend(BaseBackend, InstanceBackend, TagBackend, AmiBackend, elif resource_prefix == EC2_RESOURCE_TO_PREFIX['snapshot']: self.get_snapshot(snapshot_id=resource_id) elif resource_prefix == EC2_RESOURCE_TO_PREFIX['spot-instance-request']: - self.describe_spot_instance_requests(filters={'spot-instance-request-id': resource_id}) + self.describe_spot_instance_requests( + filters={'spot-instance-request-id': resource_id}) elif resource_prefix == EC2_RESOURCE_TO_PREFIX['subnet']: self.get_subnet(subnet_id=resource_id) elif resource_prefix == EC2_RESOURCE_TO_PREFIX['volume']: @@ -3514,6 +3668,7 @@ class EC2Backend(BaseBackend, InstanceBackend, TagBackend, AmiBackend, self.get_vpn_gateway(vpn_gateway_id=resource_id) return True + ec2_backends = {} for region in RegionsAndZonesBackend.regions: ec2_backends[region.name] = EC2Backend(region.name) diff --git a/moto/ec2/responses/__init__.py b/moto/ec2/responses/__init__.py index 2049998ad..449d25a45 100644 --- a/moto/ec2/responses/__init__.py +++ b/moto/ec2/responses/__init__.py @@ -66,6 +66,7 @@ class EC2Response( Windows, NatGateways, ): + @property def ec2_backend(self): from moto.ec2.models import ec2_backends diff --git a/moto/ec2/responses/amazon_dev_pay.py b/moto/ec2/responses/amazon_dev_pay.py index af10a8d68..14df3f004 100644 --- a/moto/ec2/responses/amazon_dev_pay.py +++ b/moto/ec2/responses/amazon_dev_pay.py @@ -3,5 +3,7 @@ from moto.core.responses import BaseResponse class AmazonDevPay(BaseResponse): + def confirm_product_instance(self): - raise NotImplementedError('AmazonDevPay.confirm_product_instance is not yet implemented') + raise NotImplementedError( + 'AmazonDevPay.confirm_product_instance is not yet implemented') diff --git a/moto/ec2/responses/amis.py b/moto/ec2/responses/amis.py index b60452a3f..42bfba209 100755 --- a/moto/ec2/responses/amis.py +++ b/moto/ec2/responses/amis.py @@ -5,6 +5,7 @@ from moto.ec2.utils import instance_ids_from_querystring, image_ids_from_queryst class AmisResponse(BaseResponse): + def create_image(self): name = self.querystring.get('Name')[0] if "Description" in self.querystring: @@ -14,17 +15,21 @@ class AmisResponse(BaseResponse): instance_ids = instance_ids_from_querystring(self.querystring) instance_id = instance_ids[0] if self.is_not_dryrun('CreateImage'): - image = self.ec2_backend.create_image(instance_id, name, description) + image = self.ec2_backend.create_image( + instance_id, name, description) template = self.response_template(CREATE_IMAGE_RESPONSE) return template.render(image=image) def copy_image(self): source_image_id = self.querystring.get('SourceImageId')[0] source_region = self.querystring.get('SourceRegion')[0] - name = self.querystring.get('Name')[0] if self.querystring.get('Name') else None - description = self.querystring.get('Description')[0] if self.querystring.get('Description') else None + name = self.querystring.get( + 'Name')[0] if self.querystring.get('Name') else None + description = self.querystring.get( + 'Description')[0] if self.querystring.get('Description') else None if self.is_not_dryrun('CopyImage'): - image = self.ec2_backend.copy_image(source_image_id, source_region, name, description) + image = self.ec2_backend.copy_image( + source_image_id, source_region, name, description) template = self.response_template(COPY_IMAGE_RESPONSE) return template.render(image=image) @@ -38,7 +43,8 @@ class AmisResponse(BaseResponse): def describe_images(self): ami_ids = image_ids_from_querystring(self.querystring) filters = filters_from_querystring(self.querystring) - images = self.ec2_backend.describe_images(ami_ids=ami_ids, filters=filters) + images = self.ec2_backend.describe_images( + ami_ids=ami_ids, filters=filters) template = self.response_template(DESCRIBE_IMAGES_RESPONSE) return template.render(images=images) @@ -56,18 +62,22 @@ class AmisResponse(BaseResponse): user_ids = sequence_from_querystring('UserId', self.querystring) if self.is_not_dryrun('ModifyImageAttribute'): if (operation_type == 'add'): - self.ec2_backend.add_launch_permission(ami_id, user_ids=user_ids, group=group) + self.ec2_backend.add_launch_permission( + ami_id, user_ids=user_ids, group=group) elif (operation_type == 'remove'): - self.ec2_backend.remove_launch_permission(ami_id, user_ids=user_ids, group=group) + self.ec2_backend.remove_launch_permission( + ami_id, user_ids=user_ids, group=group) return MODIFY_IMAGE_ATTRIBUTE_RESPONSE def register_image(self): if self.is_not_dryrun('RegisterImage'): - raise NotImplementedError('AMIs.register_image is not yet implemented') + raise NotImplementedError( + 'AMIs.register_image is not yet implemented') def reset_image_attribute(self): if self.is_not_dryrun('ResetImageAttribute'): - raise NotImplementedError('AMIs.reset_image_attribute is not yet implemented') + raise NotImplementedError( + 'AMIs.reset_image_attribute is not yet implemented') CREATE_IMAGE_RESPONSE = """ @@ -80,7 +90,8 @@ COPY_IMAGE_RESPONSE = """ 59dbff89-35bd-4eac-99ed-be587EXAMPLE diff --git a/moto/ec2/responses/availability_zones_and_regions.py b/moto/ec2/responses/availability_zones_and_regions.py index 50869e934..3d0a5ab05 100644 --- a/moto/ec2/responses/availability_zones_and_regions.py +++ b/moto/ec2/responses/availability_zones_and_regions.py @@ -3,6 +3,7 @@ from moto.core.responses import BaseResponse class AvailabilityZonesAndRegions(BaseResponse): + def describe_availability_zones(self): zones = self.ec2_backend.describe_availability_zones() template = self.response_template(DESCRIBE_ZONES_RESPONSE) @@ -13,6 +14,7 @@ class AvailabilityZonesAndRegions(BaseResponse): template = self.response_template(DESCRIBE_REGIONS_RESPONSE) return template.render(regions=regions) + DESCRIBE_REGIONS_RESPONSE = """ 59dbff89-35bd-4eac-99ed-be587EXAMPLE diff --git a/moto/ec2/responses/customer_gateways.py b/moto/ec2/responses/customer_gateways.py index 85f50fbcd..6da2ed2f8 100644 --- a/moto/ec2/responses/customer_gateways.py +++ b/moto/ec2/responses/customer_gateways.py @@ -10,13 +10,15 @@ class CustomerGateways(BaseResponse): type = self.querystring.get('Type', None)[0] ip_address = self.querystring.get('IpAddress', None)[0] bgp_asn = self.querystring.get('BgpAsn', None)[0] - customer_gateway = self.ec2_backend.create_customer_gateway(type, ip_address=ip_address, bgp_asn=bgp_asn) + customer_gateway = self.ec2_backend.create_customer_gateway( + type, ip_address=ip_address, bgp_asn=bgp_asn) template = self.response_template(CREATE_CUSTOMER_GATEWAY_RESPONSE) return template.render(customer_gateway=customer_gateway) def delete_customer_gateway(self): customer_gateway_id = self.querystring.get('CustomerGatewayId')[0] - delete_status = self.ec2_backend.delete_customer_gateway(customer_gateway_id) + delete_status = self.ec2_backend.delete_customer_gateway( + customer_gateway_id) template = self.response_template(DELETE_CUSTOMER_GATEWAY_RESPONSE) return template.render(customer_gateway=delete_status) diff --git a/moto/ec2/responses/dhcp_options.py b/moto/ec2/responses/dhcp_options.py index b9d1469b5..450ef1bf9 100644 --- a/moto/ec2/responses/dhcp_options.py +++ b/moto/ec2/responses/dhcp_options.py @@ -7,6 +7,7 @@ from moto.ec2.utils import ( class DHCPOptions(BaseResponse): + def associate_dhcp_options(self): dhcp_opt_id = self.querystring.get("DhcpOptionsId", [None])[0] vpc_id = self.querystring.get("VpcId", [None])[0] @@ -48,9 +49,11 @@ class DHCPOptions(BaseResponse): return template.render(delete_status=delete_status) def describe_dhcp_options(self): - dhcp_opt_ids = sequence_from_querystring("DhcpOptionsId", self.querystring) + dhcp_opt_ids = sequence_from_querystring( + "DhcpOptionsId", self.querystring) filters = filters_from_querystring(self.querystring) - dhcp_opts = self.ec2_backend.get_all_dhcp_options(dhcp_opt_ids, filters) + dhcp_opts = self.ec2_backend.get_all_dhcp_options( + dhcp_opt_ids, filters) template = self.response_template(DESCRIBE_DHCP_OPTIONS_RESPONSE) return template.render(dhcp_options=dhcp_opts) diff --git a/moto/ec2/responses/elastic_block_store.py b/moto/ec2/responses/elastic_block_store.py index ddbf30e68..0773ffbe2 100644 --- a/moto/ec2/responses/elastic_block_store.py +++ b/moto/ec2/responses/elastic_block_store.py @@ -10,13 +10,15 @@ class ElasticBlockStore(BaseResponse): instance_id = self.querystring.get('InstanceId')[0] device_path = self.querystring.get('Device')[0] if self.is_not_dryrun('AttachVolume'): - attachment = self.ec2_backend.attach_volume(volume_id, instance_id, device_path) + attachment = self.ec2_backend.attach_volume( + volume_id, instance_id, device_path) template = self.response_template(ATTACHED_VOLUME_RESPONSE) return template.render(attachment=attachment) def copy_snapshot(self): if self.is_not_dryrun('CopySnapshot'): - raise NotImplementedError('ElasticBlockStore.copy_snapshot is not yet implemented') + raise NotImplementedError( + 'ElasticBlockStore.copy_snapshot is not yet implemented') def create_snapshot(self): description = self.querystring.get('Description', [None])[0] @@ -32,7 +34,8 @@ class ElasticBlockStore(BaseResponse): snapshot_id = self.querystring.get('SnapshotId', [None])[0] encrypted = self.querystring.get('Encrypted', ['false'])[0] if self.is_not_dryrun('CreateVolume'): - volume = self.ec2_backend.create_volume(size, zone, snapshot_id, encrypted) + volume = self.ec2_backend.create_volume( + size, zone, snapshot_id, encrypted) template = self.response_template(CREATE_VOLUME_RESPONSE) return template.render(volume=volume) @@ -50,51 +53,64 @@ class ElasticBlockStore(BaseResponse): def describe_snapshots(self): filters = filters_from_querystring(self.querystring) - # querystring for multiple snapshotids results in SnapshotId.1, SnapshotId.2 etc - snapshot_ids = ','.join([','.join(s[1]) for s in self.querystring.items() if 'SnapshotId' in s[0]]) + # querystring for multiple snapshotids results in SnapshotId.1, + # SnapshotId.2 etc + snapshot_ids = ','.join( + [','.join(s[1]) for s in self.querystring.items() if 'SnapshotId' in s[0]]) snapshots = self.ec2_backend.describe_snapshots(filters=filters) # Describe snapshots to handle filter on snapshot_ids - snapshots = [s for s in snapshots if s.id in snapshot_ids] if snapshot_ids else snapshots + snapshots = [ + s for s in snapshots if s.id in snapshot_ids] if snapshot_ids else snapshots template = self.response_template(DESCRIBE_SNAPSHOTS_RESPONSE) return template.render(snapshots=snapshots) def describe_volumes(self): filters = filters_from_querystring(self.querystring) - # querystring for multiple volumeids results in VolumeId.1, VolumeId.2 etc - volume_ids = ','.join([','.join(v[1]) for v in self.querystring.items() if 'VolumeId' in v[0]]) + # querystring for multiple volumeids results in VolumeId.1, VolumeId.2 + # etc + volume_ids = ','.join( + [','.join(v[1]) for v in self.querystring.items() if 'VolumeId' in v[0]]) volumes = self.ec2_backend.describe_volumes(filters=filters) # Describe volumes to handle filter on volume_ids - volumes = [v for v in volumes if v.id in volume_ids] if volume_ids else volumes + volumes = [ + v for v in volumes if v.id in volume_ids] if volume_ids else volumes template = self.response_template(DESCRIBE_VOLUMES_RESPONSE) return template.render(volumes=volumes) def describe_volume_attribute(self): - raise NotImplementedError('ElasticBlockStore.describe_volume_attribute is not yet implemented') + raise NotImplementedError( + 'ElasticBlockStore.describe_volume_attribute is not yet implemented') def describe_volume_status(self): - raise NotImplementedError('ElasticBlockStore.describe_volume_status is not yet implemented') + raise NotImplementedError( + 'ElasticBlockStore.describe_volume_status is not yet implemented') def detach_volume(self): volume_id = self.querystring.get('VolumeId')[0] instance_id = self.querystring.get('InstanceId')[0] device_path = self.querystring.get('Device')[0] if self.is_not_dryrun('DetachVolume'): - attachment = self.ec2_backend.detach_volume(volume_id, instance_id, device_path) + attachment = self.ec2_backend.detach_volume( + volume_id, instance_id, device_path) template = self.response_template(DETATCH_VOLUME_RESPONSE) return template.render(attachment=attachment) def enable_volume_io(self): if self.is_not_dryrun('EnableVolumeIO'): - raise NotImplementedError('ElasticBlockStore.enable_volume_io is not yet implemented') + raise NotImplementedError( + 'ElasticBlockStore.enable_volume_io is not yet implemented') def import_volume(self): if self.is_not_dryrun('ImportVolume'): - raise NotImplementedError('ElasticBlockStore.import_volume is not yet implemented') + raise NotImplementedError( + 'ElasticBlockStore.import_volume is not yet implemented') def describe_snapshot_attribute(self): snapshot_id = self.querystring.get('SnapshotId')[0] - groups = self.ec2_backend.get_create_volume_permission_groups(snapshot_id) - template = self.response_template(DESCRIBE_SNAPSHOT_ATTRIBUTES_RESPONSE) + groups = self.ec2_backend.get_create_volume_permission_groups( + snapshot_id) + template = self.response_template( + DESCRIBE_SNAPSHOT_ATTRIBUTES_RESPONSE) return template.render(snapshot_id=snapshot_id, groups=groups) def modify_snapshot_attribute(self): @@ -104,18 +120,22 @@ class ElasticBlockStore(BaseResponse): user_id = self.querystring.get('UserId.1', [None])[0] if self.is_not_dryrun('ModifySnapshotAttribute'): if (operation_type == 'add'): - self.ec2_backend.add_create_volume_permission(snapshot_id, user_id=user_id, group=group) + self.ec2_backend.add_create_volume_permission( + snapshot_id, user_id=user_id, group=group) elif (operation_type == 'remove'): - self.ec2_backend.remove_create_volume_permission(snapshot_id, user_id=user_id, group=group) + self.ec2_backend.remove_create_volume_permission( + snapshot_id, user_id=user_id, group=group) return MODIFY_SNAPSHOT_ATTRIBUTE_RESPONSE def modify_volume_attribute(self): if self.is_not_dryrun('ModifyVolumeAttribute'): - raise NotImplementedError('ElasticBlockStore.modify_volume_attribute is not yet implemented') + raise NotImplementedError( + 'ElasticBlockStore.modify_volume_attribute is not yet implemented') def reset_snapshot_attribute(self): if self.is_not_dryrun('ResetSnapshotAttribute'): - raise NotImplementedError('ElasticBlockStore.reset_snapshot_attribute is not yet implemented') + raise NotImplementedError( + 'ElasticBlockStore.reset_snapshot_attribute is not yet implemented') CREATE_VOLUME_RESPONSE = """ @@ -272,4 +292,4 @@ MODIFY_SNAPSHOT_ATTRIBUTE_RESPONSE = """ 666d2944-9276-4d6a-be12-1f4ada972fd8 true -""" \ No newline at end of file +""" diff --git a/moto/ec2/responses/elastic_ip_addresses.py b/moto/ec2/responses/elastic_ip_addresses.py index 3ae75671f..a64a33bb5 100644 --- a/moto/ec2/responses/elastic_ip_addresses.py +++ b/moto/ec2/responses/elastic_ip_addresses.py @@ -4,6 +4,7 @@ from moto.ec2.utils import sequence_from_querystring class ElasticIPAddresses(BaseResponse): + def allocate_address(self): if "Domain" in self.querystring: domain = self.querystring.get('Domain')[0] @@ -18,11 +19,14 @@ class ElasticIPAddresses(BaseResponse): instance = eni = None if "InstanceId" in self.querystring: - instance = self.ec2_backend.get_instance(self.querystring['InstanceId'][0]) + instance = self.ec2_backend.get_instance( + self.querystring['InstanceId'][0]) elif "NetworkInterfaceId" in self.querystring: - eni = self.ec2_backend.get_network_interface(self.querystring['NetworkInterfaceId'][0]) + eni = self.ec2_backend.get_network_interface( + self.querystring['NetworkInterfaceId'][0]) else: - self.ec2_backend.raise_error("MissingParameter", "Invalid request, expect InstanceId/NetworkId parameter.") + self.ec2_backend.raise_error( + "MissingParameter", "Invalid request, expect InstanceId/NetworkId parameter.") reassociate = False if "AllowReassociation" in self.querystring: @@ -31,13 +35,17 @@ class ElasticIPAddresses(BaseResponse): if self.is_not_dryrun('AssociateAddress'): if instance or eni: if "PublicIp" in self.querystring: - eip = self.ec2_backend.associate_address(instance=instance, eni=eni, address=self.querystring['PublicIp'][0], reassociate=reassociate) + eip = self.ec2_backend.associate_address(instance=instance, eni=eni, address=self.querystring[ + 'PublicIp'][0], reassociate=reassociate) elif "AllocationId" in self.querystring: - eip = self.ec2_backend.associate_address(instance=instance, eni=eni, allocation_id=self.querystring['AllocationId'][0], reassociate=reassociate) + eip = self.ec2_backend.associate_address(instance=instance, eni=eni, allocation_id=self.querystring[ + 'AllocationId'][0], reassociate=reassociate) else: - self.ec2_backend.raise_error("MissingParameter", "Invalid request, expect PublicIp/AllocationId parameter.") + self.ec2_backend.raise_error( + "MissingParameter", "Invalid request, expect PublicIp/AllocationId parameter.") else: - self.ec2_backend.raise_error("MissingParameter", "Invalid request, expect either instance or ENI.") + self.ec2_backend.raise_error( + "MissingParameter", "Invalid request, expect either instance or ENI.") template = self.response_template(ASSOCIATE_ADDRESS_RESPONSE) return template.render(address=eip) @@ -46,17 +54,23 @@ class ElasticIPAddresses(BaseResponse): template = self.response_template(DESCRIBE_ADDRESS_RESPONSE) if "Filter.1.Name" in self.querystring: - filter_by = sequence_from_querystring("Filter.1.Name", self.querystring)[0] - filter_value = sequence_from_querystring("Filter.1.Value", self.querystring) + filter_by = sequence_from_querystring( + "Filter.1.Name", self.querystring)[0] + filter_value = sequence_from_querystring( + "Filter.1.Value", self.querystring) if filter_by == 'instance-id': - addresses = filter(lambda x: x.instance.id == filter_value[0], self.ec2_backend.describe_addresses()) + addresses = filter(lambda x: x.instance.id == filter_value[ + 0], self.ec2_backend.describe_addresses()) else: - raise NotImplementedError("Filtering not supported in describe_address.") + raise NotImplementedError( + "Filtering not supported in describe_address.") elif "PublicIp.1" in self.querystring: - public_ips = sequence_from_querystring("PublicIp", self.querystring) + public_ips = sequence_from_querystring( + "PublicIp", self.querystring) addresses = self.ec2_backend.address_by_ip(public_ips) elif "AllocationId.1" in self.querystring: - allocation_ids = sequence_from_querystring("AllocationId", self.querystring) + allocation_ids = sequence_from_querystring( + "AllocationId", self.querystring) addresses = self.ec2_backend.address_by_allocation(allocation_ids) else: addresses = self.ec2_backend.describe_addresses() @@ -65,22 +79,28 @@ class ElasticIPAddresses(BaseResponse): def disassociate_address(self): if self.is_not_dryrun('DisAssociateAddress'): if "PublicIp" in self.querystring: - self.ec2_backend.disassociate_address(address=self.querystring['PublicIp'][0]) + self.ec2_backend.disassociate_address( + address=self.querystring['PublicIp'][0]) elif "AssociationId" in self.querystring: - self.ec2_backend.disassociate_address(association_id=self.querystring['AssociationId'][0]) + self.ec2_backend.disassociate_address( + association_id=self.querystring['AssociationId'][0]) else: - self.ec2_backend.raise_error("MissingParameter", "Invalid request, expect PublicIp/AssociationId parameter.") + self.ec2_backend.raise_error( + "MissingParameter", "Invalid request, expect PublicIp/AssociationId parameter.") return self.response_template(DISASSOCIATE_ADDRESS_RESPONSE).render() def release_address(self): if self.is_not_dryrun('ReleaseAddress'): if "PublicIp" in self.querystring: - self.ec2_backend.release_address(address=self.querystring['PublicIp'][0]) + self.ec2_backend.release_address( + address=self.querystring['PublicIp'][0]) elif "AllocationId" in self.querystring: - self.ec2_backend.release_address(allocation_id=self.querystring['AllocationId'][0]) + self.ec2_backend.release_address( + allocation_id=self.querystring['AllocationId'][0]) else: - self.ec2_backend.raise_error("MissingParameter", "Invalid request, expect PublicIp/AllocationId parameter.") + self.ec2_backend.raise_error( + "MissingParameter", "Invalid request, expect PublicIp/AllocationId parameter.") return self.response_template(RELEASE_ADDRESS_RESPONSE).render() diff --git a/moto/ec2/responses/elastic_network_interfaces.py b/moto/ec2/responses/elastic_network_interfaces.py index c1c7383cb..cbe76e306 100644 --- a/moto/ec2/responses/elastic_network_interfaces.py +++ b/moto/ec2/responses/elastic_network_interfaces.py @@ -4,28 +4,35 @@ from moto.ec2.utils import sequence_from_querystring, filters_from_querystring class ElasticNetworkInterfaces(BaseResponse): + def create_network_interface(self): subnet_id = self.querystring.get('SubnetId')[0] - private_ip_address = self.querystring.get('PrivateIpAddress', [None])[0] + private_ip_address = self.querystring.get( + 'PrivateIpAddress', [None])[0] groups = sequence_from_querystring('SecurityGroupId', self.querystring) subnet = self.ec2_backend.get_subnet(subnet_id) if self.is_not_dryrun('CreateNetworkInterface'): - eni = self.ec2_backend.create_network_interface(subnet, private_ip_address, groups) - template = self.response_template(CREATE_NETWORK_INTERFACE_RESPONSE) + eni = self.ec2_backend.create_network_interface( + subnet, private_ip_address, groups) + template = self.response_template( + CREATE_NETWORK_INTERFACE_RESPONSE) return template.render(eni=eni) def delete_network_interface(self): eni_id = self.querystring.get('NetworkInterfaceId')[0] if self.is_not_dryrun('DeleteNetworkInterface'): self.ec2_backend.delete_network_interface(eni_id) - template = self.response_template(DELETE_NETWORK_INTERFACE_RESPONSE) + template = self.response_template( + DELETE_NETWORK_INTERFACE_RESPONSE) return template.render() def describe_network_interface_attribute(self): - raise NotImplementedError('ElasticNetworkInterfaces(AmazonVPC).describe_network_interface_attribute is not yet implemented') + raise NotImplementedError( + 'ElasticNetworkInterfaces(AmazonVPC).describe_network_interface_attribute is not yet implemented') def describe_network_interfaces(self): - eni_ids = sequence_from_querystring('NetworkInterfaceId', self.querystring) + eni_ids = sequence_from_querystring( + 'NetworkInterfaceId', self.querystring) filters = filters_from_querystring(self.querystring) enis = self.ec2_backend.get_all_network_interfaces(eni_ids, filters) template = self.response_template(DESCRIBE_NETWORK_INTERFACES_RESPONSE) @@ -36,15 +43,18 @@ class ElasticNetworkInterfaces(BaseResponse): instance_id = self.querystring.get('InstanceId')[0] device_index = self.querystring.get('DeviceIndex')[0] if self.is_not_dryrun('AttachNetworkInterface'): - attachment_id = self.ec2_backend.attach_network_interface(eni_id, instance_id, device_index) - template = self.response_template(ATTACH_NETWORK_INTERFACE_RESPONSE) + attachment_id = self.ec2_backend.attach_network_interface( + eni_id, instance_id, device_index) + template = self.response_template( + ATTACH_NETWORK_INTERFACE_RESPONSE) return template.render(attachment_id=attachment_id) def detach_network_interface(self): attachment_id = self.querystring.get('AttachmentId')[0] if self.is_not_dryrun('DetachNetworkInterface'): self.ec2_backend.detach_network_interface(attachment_id) - template = self.response_template(DETACH_NETWORK_INTERFACE_RESPONSE) + template = self.response_template( + DETACH_NETWORK_INTERFACE_RESPONSE) return template.render() def modify_network_interface_attribute(self): @@ -52,12 +62,15 @@ class ElasticNetworkInterfaces(BaseResponse): eni_id = self.querystring.get('NetworkInterfaceId')[0] group_id = self.querystring.get('SecurityGroupId.1')[0] if self.is_not_dryrun('ModifyNetworkInterface'): - self.ec2_backend.modify_network_interface_attribute(eni_id, group_id) + self.ec2_backend.modify_network_interface_attribute( + eni_id, group_id) return MODIFY_NETWORK_INTERFACE_ATTRIBUTE_RESPONSE def reset_network_interface_attribute(self): if self.is_not_dryrun('ResetNetworkInterface'): - raise NotImplementedError('ElasticNetworkInterfaces(AmazonVPC).reset_network_interface_attribute is not yet implemented') + raise NotImplementedError( + 'ElasticNetworkInterfaces(AmazonVPC).reset_network_interface_attribute is not yet implemented') + CREATE_NETWORK_INTERFACE_RESPONSE = """ diff --git a/moto/ec2/responses/general.py b/moto/ec2/responses/general.py index 9fce05ccf..bd95c1975 100644 --- a/moto/ec2/responses/general.py +++ b/moto/ec2/responses/general.py @@ -4,6 +4,7 @@ from moto.ec2.utils import instance_ids_from_querystring class General(BaseResponse): + def get_console_output(self): self.instance_ids = instance_ids_from_querystring(self.querystring) instance_id = self.instance_ids[0] diff --git a/moto/ec2/responses/instances.py b/moto/ec2/responses/instances.py index 3c5a087d9..4da7b880f 100644 --- a/moto/ec2/responses/instances.py +++ b/moto/ec2/responses/instances.py @@ -5,14 +5,18 @@ from moto.core.utils import camelcase_to_underscores from moto.ec2.utils import instance_ids_from_querystring, filters_from_querystring, \ dict_from_querystring, optional_from_querystring + class InstanceResponse(BaseResponse): + def describe_instances(self): filter_dict = filters_from_querystring(self.querystring) instance_ids = instance_ids_from_querystring(self.querystring) if instance_ids: - reservations = self.ec2_backend.get_reservations_by_instance_ids(instance_ids, filters=filter_dict) + reservations = self.ec2_backend.get_reservations_by_instance_ids( + instance_ids, filters=filter_dict) else: - reservations = self.ec2_backend.all_reservations(make_copy=True, filters=filter_dict) + reservations = self.ec2_backend.all_reservations( + make_copy=True, filters=filter_dict) template = self.response_template(EC2_DESCRIBE_INSTANCES) return template.render(reservations=reservations) @@ -25,10 +29,12 @@ class InstanceResponse(BaseResponse): security_group_ids = self._get_multi_param('SecurityGroupId') nics = dict_from_querystring("NetworkInterface", self.querystring) instance_type = self.querystring.get("InstanceType", ["m1.small"])[0] - placement = self.querystring.get("Placement.AvailabilityZone", [None])[0] + placement = self.querystring.get( + "Placement.AvailabilityZone", [None])[0] subnet_id = self.querystring.get("SubnetId", [None])[0] private_ip = self.querystring.get("PrivateIpAddress", [None])[0] - associate_public_ip = self.querystring.get("AssociatePublicIpAddress", [None])[0] + associate_public_ip = self.querystring.get( + "AssociatePublicIpAddress", [None])[0] key_name = self.querystring.get("KeyName", [None])[0] if self.is_not_dryrun('RunInstance'): @@ -72,10 +78,11 @@ class InstanceResponse(BaseResponse): def describe_instance_status(self): instance_ids = instance_ids_from_querystring(self.querystring) include_all_instances = optional_from_querystring('IncludeAllInstances', - self.querystring) == 'true' + self.querystring) == 'true' if instance_ids: - instances = self.ec2_backend.get_multi_instances_by_id(instance_ids) + instances = self.ec2_backend.get_multi_instances_by_id( + instance_ids) elif include_all_instances: instances = self.ec2_backend.all_instances() else: @@ -85,7 +92,8 @@ class InstanceResponse(BaseResponse): return template.render(instances=instances) def describe_instance_types(self): - instance_types = [InstanceType(name='t1.micro', cores=1, memory=644874240, disk=0)] + instance_types = [InstanceType( + name='t1.micro', cores=1, memory=644874240, disk=0)] template = self.response_template(EC2_DESCRIBE_INSTANCE_TYPES) return template.render(instance_types=instance_types) @@ -96,10 +104,12 @@ class InstanceResponse(BaseResponse): key = camelcase_to_underscores(attribute) instance_ids = instance_ids_from_querystring(self.querystring) instance_id = instance_ids[0] - instance, value = self.ec2_backend.describe_instance_attribute(instance_id, key) + instance, value = self.ec2_backend.describe_instance_attribute( + instance_id, key) if key == "group_set": - template = self.response_template(EC2_DESCRIBE_INSTANCE_GROUPSET_ATTRIBUTE) + template = self.response_template( + EC2_DESCRIBE_INSTANCE_GROUPSET_ATTRIBUTE) else: template = self.response_template(EC2_DESCRIBE_INSTANCE_ATTRIBUTE) @@ -152,7 +162,8 @@ class InstanceResponse(BaseResponse): instance = self.ec2_backend.get_instance(instance_id) if self.is_not_dryrun('ModifyInstanceAttribute'): - block_device_type = instance.block_device_mapping[device_name_value] + block_device_type = instance.block_device_mapping[ + device_name_value] block_device_type.delete_on_termination = del_on_term_value # +1 for the next device @@ -171,24 +182,27 @@ class InstanceResponse(BaseResponse): if not attribute_key: return - if self.is_not_dryrun('Modify'+attribute_key.split(".")[0]): + if self.is_not_dryrun('Modify' + attribute_key.split(".")[0]): value = self.querystring.get(attribute_key)[0] - normalized_attribute = camelcase_to_underscores(attribute_key.split(".")[0]) + normalized_attribute = camelcase_to_underscores( + attribute_key.split(".")[0]) instance_ids = instance_ids_from_querystring(self.querystring) instance_id = instance_ids[0] - self.ec2_backend.modify_instance_attribute(instance_id, normalized_attribute, value) + self.ec2_backend.modify_instance_attribute( + instance_id, normalized_attribute, value) return EC2_MODIFY_INSTANCE_ATTRIBUTE def _security_grp_instance_attribute_handler(self): new_security_grp_list = [] for key, value in self.querystring.items(): - if 'GroupId.' in key: + if 'GroupId.' in key: new_security_grp_list.append(self.querystring.get(key)[0]) instance_ids = instance_ids_from_querystring(self.querystring) instance_id = instance_ids[0] if self.is_not_dryrun('ModifyInstanceSecurityGroups'): - self.ec2_backend.modify_instance_security_groups(instance_id, new_security_grp_list) + self.ec2_backend.modify_instance_security_groups( + instance_id, new_security_grp_list) return EC2_MODIFY_INSTANCE_ATTRIBUTE @@ -630,4 +644,4 @@ EC2_DESCRIBE_INSTANCE_TYPES = """ {% endfor %} -""" \ No newline at end of file +""" diff --git a/moto/ec2/responses/internet_gateways.py b/moto/ec2/responses/internet_gateways.py index 5b7a824f0..4a3da0b34 100644 --- a/moto/ec2/responses/internet_gateways.py +++ b/moto/ec2/responses/internet_gateways.py @@ -7,6 +7,7 @@ from moto.ec2.utils import ( class InternetGateways(BaseResponse): + def attach_internet_gateway(self): igw_id = self.querystring.get("InternetGatewayId", [None])[0] vpc_id = self.querystring.get("VpcId", [None])[0] @@ -33,9 +34,11 @@ class InternetGateways(BaseResponse): if "InternetGatewayId.1" in self.querystring: igw_ids = sequence_from_querystring( "InternetGatewayId", self.querystring) - igws = self.ec2_backend.describe_internet_gateways(igw_ids, filters=filter_dict) + igws = self.ec2_backend.describe_internet_gateways( + igw_ids, filters=filter_dict) else: - igws = self.ec2_backend.describe_internet_gateways(filters=filter_dict) + igws = self.ec2_backend.describe_internet_gateways( + filters=filter_dict) template = self.response_template(DESCRIBE_INTERNET_GATEWAYS_RESPONSE) return template.render(internet_gateways=igws) diff --git a/moto/ec2/responses/ip_addresses.py b/moto/ec2/responses/ip_addresses.py index 995719202..fab5cbddc 100644 --- a/moto/ec2/responses/ip_addresses.py +++ b/moto/ec2/responses/ip_addresses.py @@ -4,10 +4,13 @@ from moto.core.responses import BaseResponse class IPAddresses(BaseResponse): + def assign_private_ip_addresses(self): if self.is_not_dryrun('AssignPrivateIPAddress'): - raise NotImplementedError('IPAddresses.assign_private_ip_addresses is not yet implemented') + raise NotImplementedError( + 'IPAddresses.assign_private_ip_addresses is not yet implemented') def unassign_private_ip_addresses(self): if self.is_not_dryrun('UnAssignPrivateIPAddress'): - raise NotImplementedError('IPAddresses.unassign_private_ip_addresses is not yet implemented') + raise NotImplementedError( + 'IPAddresses.unassign_private_ip_addresses is not yet implemented') diff --git a/moto/ec2/responses/key_pairs.py b/moto/ec2/responses/key_pairs.py index 72f8715ec..936df2cd3 100644 --- a/moto/ec2/responses/key_pairs.py +++ b/moto/ec2/responses/key_pairs.py @@ -16,14 +16,16 @@ class KeyPairs(BaseResponse): def delete_key_pair(self): name = self.querystring.get('KeyName')[0] if self.is_not_dryrun('DeleteKeyPair'): - success = six.text_type(self.ec2_backend.delete_key_pair(name)).lower() + success = six.text_type( + self.ec2_backend.delete_key_pair(name)).lower() return self.response_template(DELETE_KEY_PAIR_RESPONSE).render(success=success) def describe_key_pairs(self): names = keypair_names_from_querystring(self.querystring) filters = filters_from_querystring(self.querystring) if len(filters) > 0: - raise NotImplementedError('Using filters in KeyPairs.describe_key_pairs is not yet implemented') + raise NotImplementedError( + 'Using filters in KeyPairs.describe_key_pairs is not yet implemented') keypairs = self.ec2_backend.describe_key_pairs(names) template = self.response_template(DESCRIBE_KEY_PAIRS_RESPONSE) diff --git a/moto/ec2/responses/monitoring.py b/moto/ec2/responses/monitoring.py index 3d40a1479..2024abe7e 100644 --- a/moto/ec2/responses/monitoring.py +++ b/moto/ec2/responses/monitoring.py @@ -3,10 +3,13 @@ from moto.core.responses import BaseResponse class Monitoring(BaseResponse): + def monitor_instances(self): if self.is_not_dryrun('MonitorInstances'): - raise NotImplementedError('Monitoring.monitor_instances is not yet implemented') + raise NotImplementedError( + 'Monitoring.monitor_instances is not yet implemented') def unmonitor_instances(self): if self.is_not_dryrun('UnMonitorInstances'): - raise NotImplementedError('Monitoring.unmonitor_instances is not yet implemented') + raise NotImplementedError( + 'Monitoring.unmonitor_instances is not yet implemented') diff --git a/moto/ec2/responses/nat_gateways.py b/moto/ec2/responses/nat_gateways.py index 98d383d47..ce9479e82 100644 --- a/moto/ec2/responses/nat_gateways.py +++ b/moto/ec2/responses/nat_gateways.py @@ -8,7 +8,8 @@ class NatGateways(BaseResponse): def create_nat_gateway(self): subnet_id = self._get_param('SubnetId') allocation_id = self._get_param('AllocationId') - nat_gateway = self.ec2_backend.create_nat_gateway(subnet_id=subnet_id, allocation_id=allocation_id) + nat_gateway = self.ec2_backend.create_nat_gateway( + subnet_id=subnet_id, allocation_id=allocation_id) template = self.response_template(CREATE_NAT_GATEWAY) return template.render(nat_gateway=nat_gateway) diff --git a/moto/ec2/responses/network_acls.py b/moto/ec2/responses/network_acls.py index 8093e18c8..bf9833d13 100644 --- a/moto/ec2/responses/network_acls.py +++ b/moto/ec2/responses/network_acls.py @@ -45,7 +45,8 @@ class NetworkACLs(BaseResponse): def describe_network_acls(self): network_acl_ids = network_acl_ids_from_querystring(self.querystring) filters = filters_from_querystring(self.querystring) - network_acls = self.ec2_backend.get_all_network_acls(network_acl_ids, filters) + network_acls = self.ec2_backend.get_all_network_acls( + network_acl_ids, filters) template = self.response_template(DESCRIBE_NETWORK_ACL_RESPONSE) return template.render(network_acls=network_acls) diff --git a/moto/ec2/responses/placement_groups.py b/moto/ec2/responses/placement_groups.py index 88926490f..06930f700 100644 --- a/moto/ec2/responses/placement_groups.py +++ b/moto/ec2/responses/placement_groups.py @@ -3,13 +3,17 @@ from moto.core.responses import BaseResponse class PlacementGroups(BaseResponse): + def create_placement_group(self): if self.is_not_dryrun('CreatePlacementGroup'): - raise NotImplementedError('PlacementGroups.create_placement_group is not yet implemented') + raise NotImplementedError( + 'PlacementGroups.create_placement_group is not yet implemented') def delete_placement_group(self): if self.is_not_dryrun('DeletePlacementGroup'): - raise NotImplementedError('PlacementGroups.delete_placement_group is not yet implemented') + raise NotImplementedError( + 'PlacementGroups.delete_placement_group is not yet implemented') def describe_placement_groups(self): - raise NotImplementedError('PlacementGroups.describe_placement_groups is not yet implemented') + raise NotImplementedError( + 'PlacementGroups.describe_placement_groups is not yet implemented') diff --git a/moto/ec2/responses/reserved_instances.py b/moto/ec2/responses/reserved_instances.py index be27260c8..07bd6661e 100644 --- a/moto/ec2/responses/reserved_instances.py +++ b/moto/ec2/responses/reserved_instances.py @@ -3,23 +3,30 @@ from moto.core.responses import BaseResponse class ReservedInstances(BaseResponse): + def cancel_reserved_instances_listing(self): if self.is_not_dryrun('CancelReservedInstances'): - raise NotImplementedError('ReservedInstances.cancel_reserved_instances_listing is not yet implemented') + raise NotImplementedError( + 'ReservedInstances.cancel_reserved_instances_listing is not yet implemented') def create_reserved_instances_listing(self): if self.is_not_dryrun('CreateReservedInstances'): - raise NotImplementedError('ReservedInstances.create_reserved_instances_listing is not yet implemented') + raise NotImplementedError( + 'ReservedInstances.create_reserved_instances_listing is not yet implemented') def describe_reserved_instances(self): - raise NotImplementedError('ReservedInstances.describe_reserved_instances is not yet implemented') + raise NotImplementedError( + 'ReservedInstances.describe_reserved_instances is not yet implemented') def describe_reserved_instances_listings(self): - raise NotImplementedError('ReservedInstances.describe_reserved_instances_listings is not yet implemented') + raise NotImplementedError( + 'ReservedInstances.describe_reserved_instances_listings is not yet implemented') def describe_reserved_instances_offerings(self): - raise NotImplementedError('ReservedInstances.describe_reserved_instances_offerings is not yet implemented') + raise NotImplementedError( + 'ReservedInstances.describe_reserved_instances_offerings is not yet implemented') def purchase_reserved_instances_offering(self): if self.is_not_dryrun('PurchaseReservedInstances'): - raise NotImplementedError('ReservedInstances.purchase_reserved_instances_offering is not yet implemented') + raise NotImplementedError( + 'ReservedInstances.purchase_reserved_instances_offering is not yet implemented') diff --git a/moto/ec2/responses/route_tables.py b/moto/ec2/responses/route_tables.py index 04fdf1d25..6f68a6553 100644 --- a/moto/ec2/responses/route_tables.py +++ b/moto/ec2/responses/route_tables.py @@ -8,24 +8,28 @@ class RouteTables(BaseResponse): def associate_route_table(self): route_table_id = self.querystring.get('RouteTableId')[0] subnet_id = self.querystring.get('SubnetId')[0] - association_id = self.ec2_backend.associate_route_table(route_table_id, subnet_id) + association_id = self.ec2_backend.associate_route_table( + route_table_id, subnet_id) template = self.response_template(ASSOCIATE_ROUTE_TABLE_RESPONSE) return template.render(association_id=association_id) def create_route(self): route_table_id = self.querystring.get('RouteTableId')[0] - destination_cidr_block = self.querystring.get('DestinationCidrBlock')[0] + destination_cidr_block = self.querystring.get( + 'DestinationCidrBlock')[0] gateway_id = optional_from_querystring('GatewayId', self.querystring) instance_id = optional_from_querystring('InstanceId', self.querystring) - interface_id = optional_from_querystring('NetworkInterfaceId', self.querystring) - pcx_id = optional_from_querystring('VpcPeeringConnectionId', self.querystring) + interface_id = optional_from_querystring( + 'NetworkInterfaceId', self.querystring) + pcx_id = optional_from_querystring( + 'VpcPeeringConnectionId', self.querystring) self.ec2_backend.create_route(route_table_id, destination_cidr_block, - gateway_id=gateway_id, - instance_id=instance_id, - interface_id=interface_id, - vpc_peering_connection_id=pcx_id) + gateway_id=gateway_id, + instance_id=instance_id, + interface_id=interface_id, + vpc_peering_connection_id=pcx_id) template = self.response_template(CREATE_ROUTE_RESPONSE) return template.render() @@ -38,7 +42,8 @@ class RouteTables(BaseResponse): def delete_route(self): route_table_id = self.querystring.get('RouteTableId')[0] - destination_cidr_block = self.querystring.get('DestinationCidrBlock')[0] + destination_cidr_block = self.querystring.get( + 'DestinationCidrBlock')[0] self.ec2_backend.delete_route(route_table_id, destination_cidr_block) template = self.response_template(DELETE_ROUTE_RESPONSE) return template.render() @@ -52,7 +57,8 @@ class RouteTables(BaseResponse): def describe_route_tables(self): route_table_ids = route_table_ids_from_querystring(self.querystring) filters = filters_from_querystring(self.querystring) - route_tables = self.ec2_backend.get_all_route_tables(route_table_ids, filters) + route_tables = self.ec2_backend.get_all_route_tables( + route_table_ids, filters) template = self.response_template(DESCRIBE_ROUTE_TABLES_RESPONSE) return template.render(route_tables=route_tables) @@ -64,18 +70,21 @@ class RouteTables(BaseResponse): def replace_route(self): route_table_id = self.querystring.get('RouteTableId')[0] - destination_cidr_block = self.querystring.get('DestinationCidrBlock')[0] + destination_cidr_block = self.querystring.get( + 'DestinationCidrBlock')[0] gateway_id = optional_from_querystring('GatewayId', self.querystring) instance_id = optional_from_querystring('InstanceId', self.querystring) - interface_id = optional_from_querystring('NetworkInterfaceId', self.querystring) - pcx_id = optional_from_querystring('VpcPeeringConnectionId', self.querystring) + interface_id = optional_from_querystring( + 'NetworkInterfaceId', self.querystring) + pcx_id = optional_from_querystring( + 'VpcPeeringConnectionId', self.querystring) self.ec2_backend.replace_route(route_table_id, destination_cidr_block, - gateway_id=gateway_id, - instance_id=instance_id, - interface_id=interface_id, - vpc_peering_connection_id=pcx_id) + gateway_id=gateway_id, + instance_id=instance_id, + interface_id=interface_id, + vpc_peering_connection_id=pcx_id) template = self.response_template(REPLACE_ROUTE_RESPONSE) return template.render() @@ -83,8 +92,10 @@ class RouteTables(BaseResponse): def replace_route_table_association(self): route_table_id = self.querystring.get('RouteTableId')[0] association_id = self.querystring.get('AssociationId')[0] - new_association_id = self.ec2_backend.replace_route_table_association(association_id, route_table_id) - template = self.response_template(REPLACE_ROUTE_TABLE_ASSOCIATION_RESPONSE) + new_association_id = self.ec2_backend.replace_route_table_association( + association_id, route_table_id) + template = self.response_template( + REPLACE_ROUTE_TABLE_ASSOCIATION_RESPONSE) return template.render(association_id=new_association_id) diff --git a/moto/ec2/responses/security_groups.py b/moto/ec2/responses/security_groups.py index 3451dc1ef..6f485fa31 100644 --- a/moto/ec2/responses/security_groups.py +++ b/moto/ec2/responses/security_groups.py @@ -1,7 +1,5 @@ from __future__ import unicode_literals -import collections - from moto.core.responses import BaseResponse from moto.ec2.utils import filters_from_querystring @@ -55,10 +53,11 @@ def process_rules_from_querystring(querystring): source_groups.append(group_dict['GroupName'][0]) yield (group_name_or_id, ip_protocol, from_port, to_port, ip_ranges, - source_groups, source_group_ids) + source_groups, source_group_ids) class SecurityGroups(BaseResponse): + def authorize_security_group_egress(self): if self.is_not_dryrun('GrantSecurityGroupEgress'): for args in process_rules_from_querystring(self.querystring): @@ -77,12 +76,15 @@ class SecurityGroups(BaseResponse): vpc_id = self.querystring.get("VpcId", [None])[0] if self.is_not_dryrun('CreateSecurityGroup'): - group = self.ec2_backend.create_security_group(name, description, vpc_id=vpc_id) + group = self.ec2_backend.create_security_group( + name, description, vpc_id=vpc_id) template = self.response_template(CREATE_SECURITY_GROUP_RESPONSE) return template.render(group=group) def delete_security_group(self): - # TODO this should raise an error if there are instances in the group. See http://docs.aws.amazon.com/AWSEC2/latest/APIReference/ApiReference-query-DeleteSecurityGroup.html + # TODO this should raise an error if there are instances in the group. + # See + # http://docs.aws.amazon.com/AWSEC2/latest/APIReference/ApiReference-query-DeleteSecurityGroup.html name = self.querystring.get('GroupName') sg_id = self.querystring.get('GroupId') diff --git a/moto/ec2/responses/spot_fleets.py b/moto/ec2/responses/spot_fleets.py index 3004cc0bb..e39d9b178 100644 --- a/moto/ec2/responses/spot_fleets.py +++ b/moto/ec2/responses/spot_fleets.py @@ -7,21 +7,25 @@ class SpotFleets(BaseResponse): def cancel_spot_fleet_requests(self): spot_fleet_request_ids = self._get_multi_param("SpotFleetRequestId.") terminate_instances = self._get_param("TerminateInstances") - spot_fleets = self.ec2_backend.cancel_spot_fleet_requests(spot_fleet_request_ids, terminate_instances) + spot_fleets = self.ec2_backend.cancel_spot_fleet_requests( + spot_fleet_request_ids, terminate_instances) template = self.response_template(CANCEL_SPOT_FLEETS_TEMPLATE) return template.render(spot_fleets=spot_fleets) def describe_spot_fleet_instances(self): spot_fleet_request_id = self._get_param("SpotFleetRequestId") - spot_requests = self.ec2_backend.describe_spot_fleet_instances(spot_fleet_request_id) - template = self.response_template(DESCRIBE_SPOT_FLEET_INSTANCES_TEMPLATE) + spot_requests = self.ec2_backend.describe_spot_fleet_instances( + spot_fleet_request_id) + template = self.response_template( + DESCRIBE_SPOT_FLEET_INSTANCES_TEMPLATE) return template.render(spot_request_id=spot_fleet_request_id, spot_requests=spot_requests) def describe_spot_fleet_requests(self): spot_fleet_request_ids = self._get_multi_param("SpotFleetRequestId.") - requests = self.ec2_backend.describe_spot_fleet_requests(spot_fleet_request_ids) + requests = self.ec2_backend.describe_spot_fleet_requests( + spot_fleet_request_ids) template = self.response_template(DESCRIBE_SPOT_FLEET_TEMPLATE) return template.render(requests=requests) @@ -32,7 +36,8 @@ class SpotFleets(BaseResponse): iam_fleet_role = spot_config['iam_fleet_role'] allocation_strategy = spot_config['allocation_strategy'] - launch_specs = self._get_list_prefix("SpotFleetRequestConfig.LaunchSpecifications") + launch_specs = self._get_list_prefix( + "SpotFleetRequestConfig.LaunchSpecifications") request = self.ec2_backend.request_spot_fleet( spot_price=spot_price, @@ -45,6 +50,7 @@ class SpotFleets(BaseResponse): template = self.response_template(REQUEST_SPOT_FLEET_TEMPLATE) return template.render(request=request) + REQUEST_SPOT_FLEET_TEMPLATE = """ 60262cc5-2bd4-4c8d-98ed-example {{ request.id }} diff --git a/moto/ec2/responses/spot_instances.py b/moto/ec2/responses/spot_instances.py index 96e5a1ba4..b0e80a320 100644 --- a/moto/ec2/responses/spot_instances.py +++ b/moto/ec2/responses/spot_instances.py @@ -8,29 +8,35 @@ class SpotInstances(BaseResponse): def cancel_spot_instance_requests(self): request_ids = self._get_multi_param('SpotInstanceRequestId') if self.is_not_dryrun('CancelSpotInstance'): - requests = self.ec2_backend.cancel_spot_instance_requests(request_ids) + requests = self.ec2_backend.cancel_spot_instance_requests( + request_ids) template = self.response_template(CANCEL_SPOT_INSTANCES_TEMPLATE) return template.render(requests=requests) def create_spot_datafeed_subscription(self): if self.is_not_dryrun('CreateSpotDatafeedSubscription'): - raise NotImplementedError('SpotInstances.create_spot_datafeed_subscription is not yet implemented') + raise NotImplementedError( + 'SpotInstances.create_spot_datafeed_subscription is not yet implemented') def delete_spot_datafeed_subscription(self): if self.is_not_dryrun('DeleteSpotDatafeedSubscription'): - raise NotImplementedError('SpotInstances.delete_spot_datafeed_subscription is not yet implemented') + raise NotImplementedError( + 'SpotInstances.delete_spot_datafeed_subscription is not yet implemented') def describe_spot_datafeed_subscription(self): - raise NotImplementedError('SpotInstances.describe_spot_datafeed_subscription is not yet implemented') + raise NotImplementedError( + 'SpotInstances.describe_spot_datafeed_subscription is not yet implemented') def describe_spot_instance_requests(self): filters = filters_from_querystring(self.querystring) - requests = self.ec2_backend.describe_spot_instance_requests(filters=filters) + requests = self.ec2_backend.describe_spot_instance_requests( + filters=filters) template = self.response_template(DESCRIBE_SPOT_INSTANCES_TEMPLATE) return template.render(requests=requests) def describe_spot_price_history(self): - raise NotImplementedError('SpotInstances.describe_spot_price_history is not yet implemented') + raise NotImplementedError( + 'SpotInstances.describe_spot_price_history is not yet implemented') def request_spot_instances(self): price = self._get_param('SpotPrice') @@ -42,13 +48,17 @@ class SpotInstances(BaseResponse): launch_group = self._get_param('LaunchGroup') availability_zone_group = self._get_param('AvailabilityZoneGroup') key_name = self._get_param('LaunchSpecification.KeyName') - security_groups = self._get_multi_param('LaunchSpecification.SecurityGroup') + security_groups = self._get_multi_param( + 'LaunchSpecification.SecurityGroup') user_data = self._get_param('LaunchSpecification.UserData') - instance_type = self._get_param('LaunchSpecification.InstanceType', 'm1.small') - placement = self._get_param('LaunchSpecification.Placement.AvailabilityZone') + instance_type = self._get_param( + 'LaunchSpecification.InstanceType', 'm1.small') + placement = self._get_param( + 'LaunchSpecification.Placement.AvailabilityZone') kernel_id = self._get_param('LaunchSpecification.KernelId') ramdisk_id = self._get_param('LaunchSpecification.RamdiskId') - monitoring_enabled = self._get_param('LaunchSpecification.Monitoring.Enabled') + monitoring_enabled = self._get_param( + 'LaunchSpecification.Monitoring.Enabled') subnet_id = self._get_param('LaunchSpecification.SubnetId') if self.is_not_dryrun('RequestSpotInstance'): diff --git a/moto/ec2/responses/subnets.py b/moto/ec2/responses/subnets.py index 9486a3ca1..67fd09a14 100644 --- a/moto/ec2/responses/subnets.py +++ b/moto/ec2/responses/subnets.py @@ -5,13 +5,15 @@ from moto.ec2.utils import filters_from_querystring class Subnets(BaseResponse): + def create_subnet(self): vpc_id = self.querystring.get('VpcId')[0] cidr_block = self.querystring.get('CidrBlock')[0] if 'AvailabilityZone' in self.querystring: availability_zone = self.querystring['AvailabilityZone'][0] else: - zone = random.choice(self.ec2_backend.describe_availability_zones()) + zone = random.choice( + self.ec2_backend.describe_availability_zones()) availability_zone = zone.name subnet = self.ec2_backend.create_subnet( vpc_id, diff --git a/moto/ec2/responses/tags.py b/moto/ec2/responses/tags.py index 8c2c43ba7..a747067fb 100644 --- a/moto/ec2/responses/tags.py +++ b/moto/ec2/responses/tags.py @@ -8,7 +8,8 @@ from moto.ec2.utils import sequence_from_querystring, tags_from_query_string, fi class TagResponse(BaseResponse): def create_tags(self): - resource_ids = sequence_from_querystring('ResourceId', self.querystring) + resource_ids = sequence_from_querystring( + 'ResourceId', self.querystring) validate_resource_ids(resource_ids) self.ec2_backend.do_resources_exist(resource_ids) tags = tags_from_query_string(self.querystring) @@ -17,7 +18,8 @@ class TagResponse(BaseResponse): return CREATE_RESPONSE def delete_tags(self): - resource_ids = sequence_from_querystring('ResourceId', self.querystring) + resource_ids = sequence_from_querystring( + 'ResourceId', self.querystring) validate_resource_ids(resource_ids) tags = tags_from_query_string(self.querystring) if self.is_not_dryrun('DeleteTags'): diff --git a/moto/ec2/responses/virtual_private_gateways.py b/moto/ec2/responses/virtual_private_gateways.py index e167437d5..2a677d36c 100644 --- a/moto/ec2/responses/virtual_private_gateways.py +++ b/moto/ec2/responses/virtual_private_gateways.py @@ -4,6 +4,7 @@ from moto.ec2.utils import filters_from_querystring class VirtualPrivateGateways(BaseResponse): + def attach_vpn_gateway(self): vpn_gateway_id = self.querystring.get('VpnGatewayId')[0] vpc_id = self.querystring.get('VpcId')[0] @@ -42,6 +43,7 @@ class VirtualPrivateGateways(BaseResponse): template = self.response_template(DETACH_VPN_GATEWAY_RESPONSE) return template.render(attachment=attachment) + CREATE_VPN_GATEWAY_RESPONSE = """ 7a62c49f-347e-4fc4-9331-6e8eEXAMPLE diff --git a/moto/ec2/responses/vm_export.py b/moto/ec2/responses/vm_export.py index 98c3dd3ea..6fdf59ba3 100644 --- a/moto/ec2/responses/vm_export.py +++ b/moto/ec2/responses/vm_export.py @@ -3,11 +3,15 @@ from moto.core.responses import BaseResponse class VMExport(BaseResponse): + def cancel_export_task(self): - raise NotImplementedError('VMExport.cancel_export_task is not yet implemented') + raise NotImplementedError( + 'VMExport.cancel_export_task is not yet implemented') def create_instance_export_task(self): - raise NotImplementedError('VMExport.create_instance_export_task is not yet implemented') + raise NotImplementedError( + 'VMExport.create_instance_export_task is not yet implemented') def describe_export_tasks(self): - raise NotImplementedError('VMExport.describe_export_tasks is not yet implemented') + raise NotImplementedError( + 'VMExport.describe_export_tasks is not yet implemented') diff --git a/moto/ec2/responses/vm_import.py b/moto/ec2/responses/vm_import.py index ea88bdc98..8c2ba138c 100644 --- a/moto/ec2/responses/vm_import.py +++ b/moto/ec2/responses/vm_import.py @@ -3,14 +3,19 @@ from moto.core.responses import BaseResponse class VMImport(BaseResponse): + def cancel_conversion_task(self): - raise NotImplementedError('VMImport.cancel_conversion_task is not yet implemented') + raise NotImplementedError( + 'VMImport.cancel_conversion_task is not yet implemented') def describe_conversion_tasks(self): - raise NotImplementedError('VMImport.describe_conversion_tasks is not yet implemented') + raise NotImplementedError( + 'VMImport.describe_conversion_tasks is not yet implemented') def import_instance(self): - raise NotImplementedError('VMImport.import_instance is not yet implemented') + raise NotImplementedError( + 'VMImport.import_instance is not yet implemented') def import_volume(self): - raise NotImplementedError('VMImport.import_volume is not yet implemented') + raise NotImplementedError( + 'VMImport.import_volume is not yet implemented') diff --git a/moto/ec2/responses/vpc_peering_connections.py b/moto/ec2/responses/vpc_peering_connections.py index 704dd7a3e..f6bff4310 100644 --- a/moto/ec2/responses/vpc_peering_connections.py +++ b/moto/ec2/responses/vpc_peering_connections.py @@ -3,34 +3,41 @@ from moto.core.responses import BaseResponse class VPCPeeringConnections(BaseResponse): + def create_vpc_peering_connection(self): vpc = self.ec2_backend.get_vpc(self.querystring.get('VpcId')[0]) - peer_vpc = self.ec2_backend.get_vpc(self.querystring.get('PeerVpcId')[0]) + peer_vpc = self.ec2_backend.get_vpc( + self.querystring.get('PeerVpcId')[0]) vpc_pcx = self.ec2_backend.create_vpc_peering_connection(vpc, peer_vpc) - template = self.response_template(CREATE_VPC_PEERING_CONNECTION_RESPONSE) + template = self.response_template( + CREATE_VPC_PEERING_CONNECTION_RESPONSE) return template.render(vpc_pcx=vpc_pcx) def delete_vpc_peering_connection(self): vpc_pcx_id = self.querystring.get('VpcPeeringConnectionId')[0] vpc_pcx = self.ec2_backend.delete_vpc_peering_connection(vpc_pcx_id) - template = self.response_template(DELETE_VPC_PEERING_CONNECTION_RESPONSE) + template = self.response_template( + DELETE_VPC_PEERING_CONNECTION_RESPONSE) return template.render(vpc_pcx=vpc_pcx) def describe_vpc_peering_connections(self): vpc_pcxs = self.ec2_backend.get_all_vpc_peering_connections() - template = self.response_template(DESCRIBE_VPC_PEERING_CONNECTIONS_RESPONSE) + template = self.response_template( + DESCRIBE_VPC_PEERING_CONNECTIONS_RESPONSE) return template.render(vpc_pcxs=vpc_pcxs) def accept_vpc_peering_connection(self): vpc_pcx_id = self.querystring.get('VpcPeeringConnectionId')[0] vpc_pcx = self.ec2_backend.accept_vpc_peering_connection(vpc_pcx_id) - template = self.response_template(ACCEPT_VPC_PEERING_CONNECTION_RESPONSE) + template = self.response_template( + ACCEPT_VPC_PEERING_CONNECTION_RESPONSE) return template.render(vpc_pcx=vpc_pcx) def reject_vpc_peering_connection(self): vpc_pcx_id = self.querystring.get('VpcPeeringConnectionId')[0] self.ec2_backend.reject_vpc_peering_connection(vpc_pcx_id) - template = self.response_template(REJECT_VPC_PEERING_CONNECTION_RESPONSE) + template = self.response_template( + REJECT_VPC_PEERING_CONNECTION_RESPONSE) return template.render() diff --git a/moto/ec2/responses/vpcs.py b/moto/ec2/responses/vpcs.py index 3d2a99894..129f91a3b 100644 --- a/moto/ec2/responses/vpcs.py +++ b/moto/ec2/responses/vpcs.py @@ -5,9 +5,11 @@ from moto.ec2.utils import filters_from_querystring, vpc_ids_from_querystring class VPCs(BaseResponse): + def create_vpc(self): cidr_block = self.querystring.get('CidrBlock')[0] - instance_tenancy = self.querystring.get('InstanceTenancy', ['default'])[0] + instance_tenancy = self.querystring.get( + 'InstanceTenancy', ['default'])[0] vpc = self.ec2_backend.create_vpc(cidr_block, instance_tenancy) template = self.response_template(CREATE_VPC_RESPONSE) return template.render(vpc=vpc) @@ -40,7 +42,8 @@ class VPCs(BaseResponse): if self.querystring.get('%s.Value' % attribute): attr_name = camelcase_to_underscores(attribute) attr_value = self.querystring.get('%s.Value' % attribute)[0] - self.ec2_backend.modify_vpc_attribute(vpc_id, attr_name, attr_value) + self.ec2_backend.modify_vpc_attribute( + vpc_id, attr_name, attr_value) return MODIFY_VPC_ATTRIBUTE_RESPONSE diff --git a/moto/ec2/responses/vpn_connections.py b/moto/ec2/responses/vpn_connections.py index 7825e7ebb..2a4a7ef99 100644 --- a/moto/ec2/responses/vpn_connections.py +++ b/moto/ec2/responses/vpn_connections.py @@ -4,23 +4,27 @@ from moto.ec2.utils import filters_from_querystring, sequence_from_querystring class VPNConnections(BaseResponse): + def create_vpn_connection(self): type = self.querystring.get("Type", [None])[0] cgw_id = self.querystring.get("CustomerGatewayId", [None])[0] vgw_id = self.querystring.get("VPNGatewayId", [None])[0] static_routes = self.querystring.get("StaticRoutesOnly", [None])[0] - vpn_connection = self.ec2_backend.create_vpn_connection(type, cgw_id, vgw_id, static_routes_only=static_routes) + vpn_connection = self.ec2_backend.create_vpn_connection( + type, cgw_id, vgw_id, static_routes_only=static_routes) template = self.response_template(CREATE_VPN_CONNECTION_RESPONSE) return template.render(vpn_connection=vpn_connection) def delete_vpn_connection(self): vpn_connection_id = self.querystring.get('VpnConnectionId')[0] - vpn_connection = self.ec2_backend.delete_vpn_connection(vpn_connection_id) + vpn_connection = self.ec2_backend.delete_vpn_connection( + vpn_connection_id) template = self.response_template(DELETE_VPN_CONNECTION_RESPONSE) return template.render(vpn_connection=vpn_connection) def describe_vpn_connections(self): - vpn_connection_ids = sequence_from_querystring('VpnConnectionId', self.querystring) + vpn_connection_ids = sequence_from_querystring( + 'VpnConnectionId', self.querystring) filters = filters_from_querystring(self.querystring) vpn_connections = self.ec2_backend.get_all_vpn_connections( vpn_connection_ids=vpn_connection_ids, filters=filters) diff --git a/moto/ec2/responses/windows.py b/moto/ec2/responses/windows.py index 0a5e31a0e..13dfa9b67 100644 --- a/moto/ec2/responses/windows.py +++ b/moto/ec2/responses/windows.py @@ -3,14 +3,19 @@ from moto.core.responses import BaseResponse class Windows(BaseResponse): + def bundle_instance(self): - raise NotImplementedError('Windows.bundle_instance is not yet implemented') + raise NotImplementedError( + 'Windows.bundle_instance is not yet implemented') def cancel_bundle_task(self): - raise NotImplementedError('Windows.cancel_bundle_task is not yet implemented') + raise NotImplementedError( + 'Windows.cancel_bundle_task is not yet implemented') def describe_bundle_tasks(self): - raise NotImplementedError('Windows.describe_bundle_tasks is not yet implemented') + raise NotImplementedError( + 'Windows.describe_bundle_tasks is not yet implemented') def get_password_data(self): - raise NotImplementedError('Windows.get_password_data is not yet implemented') + raise NotImplementedError( + 'Windows.get_password_data is not yet implemented') diff --git a/moto/ec2/utils.py b/moto/ec2/utils.py index 4d0f75254..8cba650a6 100644 --- a/moto/ec2/utils.py +++ b/moto/ec2/utils.py @@ -32,13 +32,15 @@ EC2_RESOURCE_TO_PREFIX = { 'vpn-gateway': 'vgw'} -EC2_PREFIX_TO_RESOURCE = dict((v, k) for (k, v) in EC2_RESOURCE_TO_PREFIX.items()) +EC2_PREFIX_TO_RESOURCE = dict((v, k) + for (k, v) in EC2_RESOURCE_TO_PREFIX.items()) def random_id(prefix='', size=8): chars = list(range(10)) + ['a', 'b', 'c', 'd', 'e', 'f'] - resource_id = ''.join(six.text_type(random.choice(chars)) for x in range(size)) + resource_id = ''.join(six.text_type(random.choice(chars)) + for x in range(size)) return '{0}-{1}'.format(prefix, resource_id) @@ -228,7 +230,8 @@ def tags_from_query_string(querystring_dict): tag_key = querystring_dict.get("Tag.{0}.Key".format(tag_index))[0] tag_value_key = "Tag.{0}.Value".format(tag_index) if tag_value_key in querystring_dict: - response_values[tag_key] = querystring_dict.get(tag_value_key)[0] + response_values[tag_key] = querystring_dict.get(tag_value_key)[ + 0] else: response_values[tag_key] = None return response_values @@ -262,7 +265,8 @@ def dhcp_configuration_from_querystring(querystring, option=u'DhcpConfiguration' key_index = key.split(".")[1] value_index = 1 while True: - value_key = u'{0}.{1}.Value.{2}'.format(option, key_index, value_index) + value_key = u'{0}.{1}.Value.{2}'.format( + option, key_index, value_index) if value_key in querystring: values.extend(querystring[value_key]) else: @@ -337,16 +341,20 @@ def get_obj_tag(obj, filter_name): tags = dict((tag['key'], tag['value']) for tag in obj.get_tags()) return tags.get(tag_name) + def get_obj_tag_names(obj): tags = set((tag['key'] for tag in obj.get_tags())) return tags + def get_obj_tag_values(obj): tags = set((tag['value'] for tag in obj.get_tags())) return tags + def tag_filter_matches(obj, filter_name, filter_values): - regex_filters = [re.compile(simple_aws_filter_to_re(f)) for f in filter_values] + regex_filters = [re.compile(simple_aws_filter_to_re(f)) + for f in filter_values] if filter_name == 'tag-key': tag_values = get_obj_tag_names(obj) elif filter_name == 'tag-value': @@ -400,7 +408,7 @@ def instance_value_in_filter_values(instance_value, filter_values): if not set(filter_values).intersection(set(instance_value)): return False elif instance_value not in filter_values: - return False + return False return True @@ -464,7 +472,8 @@ def is_filter_matching(obj, filter, filter_value): def generic_filter(filters, objects): if filters: for (_filter, _filter_value) in filters.items(): - objects = [obj for obj in objects if is_filter_matching(obj, _filter, _filter_value)] + objects = [obj for obj in objects if is_filter_matching( + obj, _filter, _filter_value)] return objects @@ -480,8 +489,10 @@ def simple_aws_filter_to_re(filter_string): def random_key_pair(): def random_hex(): return chr(random.choice(list(range(48, 58)) + list(range(97, 102)))) + def random_fingerprint(): - return ':'.join([random_hex()+random_hex() for i in range(20)]) + return ':'.join([random_hex() + random_hex() for i in range(20)]) + def random_material(): return ''.join([ chr(random.choice(list(range(65, 91)) + list(range(48, 58)) + @@ -489,7 +500,7 @@ def random_key_pair(): for i in range(1000) ]) material = "---- BEGIN RSA PRIVATE KEY ----" + random_material() + \ - "-----END RSA PRIVATE KEY-----" + "-----END RSA PRIVATE KEY-----" return { 'fingerprint': random_fingerprint(), 'material': material @@ -500,9 +511,11 @@ def get_prefix(resource_id): resource_id_prefix, separator, after = resource_id.partition('-') if resource_id_prefix == EC2_RESOURCE_TO_PREFIX['network-interface']: if after.startswith('attach'): - resource_id_prefix = EC2_RESOURCE_TO_PREFIX['network-interface-attachment'] + resource_id_prefix = EC2_RESOURCE_TO_PREFIX[ + 'network-interface-attachment'] if resource_id_prefix not in EC2_RESOURCE_TO_PREFIX.values(): - uuid4hex = re.compile('[0-9a-f]{12}4[0-9a-f]{3}[89ab][0-9a-f]{15}\Z', re.I) + uuid4hex = re.compile( + '[0-9a-f]{12}4[0-9a-f]{3}[89ab][0-9a-f]{15}\Z', re.I) if uuid4hex.match(resource_id) is not None: resource_id_prefix = EC2_RESOURCE_TO_PREFIX['reserved-instance'] else: @@ -539,20 +552,20 @@ def generate_instance_identity_document(instance): """ document = { - 'devPayProductCodes': None, - 'availabilityZone': instance.placement['AvailabilityZone'], - 'privateIp': instance.private_ip_address, - 'version': '2010-8-31', - 'region': instance.placement['AvailabilityZone'][:-1], - 'instanceId': instance.id, - 'billingProducts': None, - 'instanceType': instance.instance_type, - 'accountId': '012345678910', - 'pendingTime': '2015-11-19T16:32:11Z', - 'imageId': instance.image_id, - 'kernelId': instance.kernel_id, - 'ramdiskId': instance.ramdisk_id, - 'architecture': instance.architecture, - } + 'devPayProductCodes': None, + 'availabilityZone': instance.placement['AvailabilityZone'], + 'privateIp': instance.private_ip_address, + 'version': '2010-8-31', + 'region': instance.placement['AvailabilityZone'][:-1], + 'instanceId': instance.id, + 'billingProducts': None, + 'instanceType': instance.instance_type, + 'accountId': '012345678910', + 'pendingTime': '2015-11-19T16:32:11Z', + 'imageId': instance.image_id, + 'kernelId': instance.kernel_id, + 'ramdiskId': instance.ramdisk_id, + 'architecture': instance.architecture, + } return document diff --git a/moto/ecs/__init__.py b/moto/ecs/__init__.py index 6864355ad..8fb3dd41e 100644 --- a/moto/ecs/__init__.py +++ b/moto/ecs/__init__.py @@ -1,6 +1,6 @@ from __future__ import unicode_literals from .models import ecs_backends -from ..core.models import MockAWS, base_decorator, HttprettyMockAWS, deprecated_base_decorator +from ..core.models import base_decorator, deprecated_base_decorator ecs_backend = ecs_backends['us-east-1'] mock_ecs = base_decorator(ecs_backends) diff --git a/moto/ecs/models.py b/moto/ecs/models.py index 3ce7be8b5..5a046c376 100644 --- a/moto/ecs/models.py +++ b/moto/ecs/models.py @@ -8,6 +8,7 @@ from copy import copy class BaseObject(object): + def camelCase(self, key): words = [] for i, word in enumerate(key.split('_')): @@ -31,9 +32,11 @@ class BaseObject(object): class Cluster(BaseObject): + def __init__(self, cluster_name): self.active_services_count = 0 - self.arn = 'arn:aws:ecs:us-east-1:012345678910:cluster/{0}'.format(cluster_name) + self.arn = 'arn:aws:ecs:us-east-1:012345678910:cluster/{0}'.format( + cluster_name) self.name = cluster_name self.pending_tasks_count = 0 self.registered_container_instances_count = 0 @@ -58,9 +61,12 @@ class Cluster(BaseObject): ecs_backend = ecs_backends[region_name] return ecs_backend.create_cluster( - # ClusterName is optional in CloudFormation, thus create a random name if necessary - cluster_name=properties.get('ClusterName', 'ecscluster{0}'.format(int(random() * 10 ** 6))), + # ClusterName is optional in CloudFormation, thus create a random + # name if necessary + cluster_name=properties.get( + 'ClusterName', 'ecscluster{0}'.format(int(random() * 10 ** 6))), ) + @classmethod def update_from_cloudformation_json(cls, original_resource, new_resource_name, cloudformation_json, region_name): properties = cloudformation_json['Properties'] @@ -69,8 +75,10 @@ class Cluster(BaseObject): ecs_backend = ecs_backends[region_name] ecs_backend.delete_cluster(original_resource.arn) return ecs_backend.create_cluster( - # ClusterName is optional in CloudFormation, thus create a random name if necessary - cluster_name=properties.get('ClusterName', 'ecscluster{0}'.format(int(random() * 10 ** 6))), + # ClusterName is optional in CloudFormation, thus create a + # random name if necessary + cluster_name=properties.get( + 'ClusterName', 'ecscluster{0}'.format(int(random() * 10 ** 6))), ) else: # no-op when nothing changed between old and new resources @@ -78,9 +86,11 @@ class Cluster(BaseObject): class TaskDefinition(BaseObject): + def __init__(self, family, revision, container_definitions, volumes=None): self.family = family - self.arn = 'arn:aws:ecs:us-east-1:012345678910:task-definition/{0}:{1}'.format(family, revision) + self.arn = 'arn:aws:ecs:us-east-1:012345678910:task-definition/{0}:{1}'.format( + family, revision) self.container_definitions = container_definitions if volumes is None: self.volumes = [] @@ -98,7 +108,8 @@ class TaskDefinition(BaseObject): def create_from_cloudformation_json(cls, resource_name, cloudformation_json, region_name): properties = cloudformation_json['Properties'] - family = properties.get('Family', 'task-definition-{0}'.format(int(random() * 10 ** 6))) + family = properties.get( + 'Family', 'task-definition-{0}'.format(int(random() * 10 ** 6))) container_definitions = properties['ContainerDefinitions'] volumes = properties['Volumes'] @@ -110,14 +121,16 @@ class TaskDefinition(BaseObject): def update_from_cloudformation_json(cls, original_resource, new_resource_name, cloudformation_json, region_name): properties = cloudformation_json['Properties'] - family = properties.get('Family', 'task-definition-{0}'.format(int(random() * 10 ** 6))) + family = properties.get( + 'Family', 'task-definition-{0}'.format(int(random() * 10 ** 6))) container_definitions = properties['ContainerDefinitions'] volumes = properties['Volumes'] if (original_resource.family != family or - original_resource.container_definitions != container_definitions or - original_resource.volumes != volumes - # currently TaskRoleArn isn't stored at TaskDefinition instances - ): + original_resource.container_definitions != container_definitions or + original_resource.volumes != volumes): + # currently TaskRoleArn isn't stored at TaskDefinition + # instances + ecs_backend = ecs_backends[region_name] ecs_backend.deregister_task_definition(original_resource.arn) return ecs_backend.register_task_definition( @@ -126,10 +139,13 @@ class TaskDefinition(BaseObject): # no-op when nothing changed between old and new resources return original_resource + class Task(BaseObject): + def __init__(self, cluster, task_definition, container_instance_arn, overrides={}, started_by=''): self.cluster_arn = cluster.arn - self.task_arn = 'arn:aws:ecs:us-east-1:012345678910:task/{0}'.format(str(uuid.uuid1())) + self.task_arn = 'arn:aws:ecs:us-east-1:012345678910:task/{0}'.format( + str(uuid.uuid1())) self.container_instance_arn = container_instance_arn self.last_status = 'RUNNING' self.desired_status = 'RUNNING' @@ -146,9 +162,11 @@ class Task(BaseObject): class Service(BaseObject): + def __init__(self, cluster, service_name, task_definition, desired_count): self.cluster_arn = cluster.arn - self.arn = 'arn:aws:ecs:us-east-1:012345678910:service/{0}'.format(service_name) + self.arn = 'arn:aws:ecs:us-east-1:012345678910:service/{0}'.format( + service_name) self.name = service_name self.status = 'ACTIVE' self.running_count = 0 @@ -209,7 +227,8 @@ class Service(BaseObject): # TODO: LoadBalancers # TODO: Role ecs_backend.delete_service(cluster_name, service_name) - new_service_name = '{0}Service{1}'.format(cluster_name, int(random() * 10 ** 6)) + new_service_name = '{0}Service{1}'.format( + cluster_name, int(random() * 10 ** 6)) return ecs_backend.create_service( cluster_name, new_service_name, task_definition, desired_count) else: @@ -217,20 +236,22 @@ class Service(BaseObject): class ContainerInstance(BaseObject): + def __init__(self, ec2_instance_id): self.ec2_instance_id = ec2_instance_id self.status = 'ACTIVE' self.registeredResources = [] self.agentConnected = True - self.containerInstanceArn = "arn:aws:ecs:us-east-1:012345678910:container-instance/{0}".format(str(uuid.uuid1())) + self.containerInstanceArn = "arn:aws:ecs:us-east-1:012345678910:container-instance/{0}".format( + str(uuid.uuid1())) self.pendingTaskCount = 0 self.remainingResources = [] self.runningTaskCount = 0 self.versionInfo = { - 'agentVersion': "1.0.0", - 'agentHash': '4023248', - 'dockerVersion': 'DockerVersion: 1.5.0' - } + 'agentVersion': "1.0.0", + 'agentHash': '4023248', + 'dockerVersion': 'DockerVersion: 1.5.0' + } @property def response_object(self): @@ -240,9 +261,11 @@ class ContainerInstance(BaseObject): class ContainerInstanceFailure(BaseObject): + def __init__(self, reason, container_instance_id): self.reason = reason - self.arn = "arn:aws:ecs:us-east-1:012345678910:container-instance/{0}".format(container_instance_id) + self.arn = "arn:aws:ecs:us-east-1:012345678910:container-instance/{0}".format( + container_instance_id) @property def response_object(self): @@ -253,6 +276,7 @@ class ContainerInstanceFailure(BaseObject): class EC2ContainerServiceBackend(BaseBackend): + def __init__(self): self.clusters = {} self.task_definitions = {} @@ -261,19 +285,21 @@ class EC2ContainerServiceBackend(BaseBackend): self.container_instances = {} def describe_task_definition(self, task_definition_str): - task_definition_components = task_definition_str.split(':') - if len(task_definition_components) == 2: - family, revision = task_definition_components + task_definition_name = task_definition_str.split('/')[-1] + if ':' in task_definition_name: + family, revision = task_definition_name.split(':') revision = int(revision) else: - family = task_definition_components[0] - revision = -1 + family = task_definition_name + revision = len(self.task_definitions.get(family, [])) + if family in self.task_definitions and 0 < revision <= len(self.task_definitions[family]): return self.task_definitions[family][revision - 1] elif family in self.task_definitions and revision == -1: return self.task_definitions[family][revision] else: - raise Exception("{0} is not a task_definition".format(task_definition_str)) + raise Exception( + "{0} is not a task_definition".format(task_definition_name)) def create_cluster(self, cluster_name): cluster = Cluster(cluster_name) @@ -295,9 +321,11 @@ class EC2ContainerServiceBackend(BaseBackend): for cluster in list_clusters_name: cluster_name = cluster.split('/')[-1] if cluster_name in self.clusters: - list_clusters.append(self.clusters[cluster_name].response_object) + list_clusters.append( + self.clusters[cluster_name].response_object) else: - raise Exception("{0} is not a cluster".format(cluster_name)) + raise Exception( + "{0} is not a cluster".format(cluster_name)) return list_clusters def delete_cluster(self, cluster_str): @@ -313,7 +341,8 @@ class EC2ContainerServiceBackend(BaseBackend): else: self.task_definitions[family] = [] revision = 1 - task_definition = TaskDefinition(family, revision, container_definitions, volumes) + task_definition = TaskDefinition( + family, revision, container_definitions, volumes) self.task_definitions[family].append(task_definition) return task_definition @@ -324,23 +353,10 @@ class EC2ContainerServiceBackend(BaseBackend): """ task_arns = [] for task_definition_list in self.task_definitions.values(): - task_arns.extend([task_definition.arn for task_definition in task_definition_list]) + task_arns.extend( + [task_definition.arn for task_definition in task_definition_list]) return task_arns - def describe_task_definition(self, task_definition_str): - task_definition_name = task_definition_str.split('/')[-1] - if ':' in task_definition_name: - family, revision = task_definition_name.split(':') - revision = int(revision) - else: - family = task_definition_name - revision = len(self.task_definitions.get(family, [])) - - if family in self.task_definitions and 0 < revision <= len(self.task_definitions[family]): - return self.task_definitions[family][revision-1] - else: - raise Exception("{0} is not a task_definition".format(task_definition_name)) - def deregister_task_definition(self, task_definition_str): task_definition_name = task_definition_str.split('/')[-1] family, revision = task_definition_name.split(':') @@ -348,7 +364,8 @@ class EC2ContainerServiceBackend(BaseBackend): if family in self.task_definitions and 0 < revision <= len(self.task_definitions[family]): return self.task_definitions[family].pop(revision - 1) else: - raise Exception("{0} is not a task_definition".format(task_definition_name)) + raise Exception( + "{0} is not a task_definition".format(task_definition_name)) def run_task(self, cluster_str, task_definition_str, count, overrides, started_by): cluster_name = cluster_str.split('/')[-1] @@ -360,14 +377,17 @@ class EC2ContainerServiceBackend(BaseBackend): if cluster_name not in self.tasks: self.tasks[cluster_name] = {} tasks = [] - container_instances = list(self.container_instances.get(cluster_name, {}).keys()) + container_instances = list( + self.container_instances.get(cluster_name, {}).keys()) if not container_instances: - raise Exception("No instances found in cluster {}".format(cluster_name)) + raise Exception( + "No instances found in cluster {}".format(cluster_name)) for _ in range(count or 1): container_instance_arn = self.container_instances[cluster_name][ container_instances[randint(0, len(container_instances) - 1)] ].containerInstanceArn - task = Task(cluster, task_definition, container_instance_arn, overrides or {}, started_by or '') + task = Task(cluster, task_definition, container_instance_arn, + overrides or {}, started_by or '') tasks.append(task) self.tasks[cluster_name][task.task_arn] = task return tasks @@ -385,13 +405,15 @@ class EC2ContainerServiceBackend(BaseBackend): if not container_instances: raise Exception("No container instance list provided") - container_instance_ids = [x.split('/')[-1] for x in container_instances] + container_instance_ids = [x.split('/')[-1] + for x in container_instances] for container_instance_id in container_instance_ids: container_instance_arn = self.container_instances[cluster_name][ container_instance_id ].containerInstanceArn - task = Task(cluster, task_definition, container_instance_arn, overrides or {}, started_by or '') + task = Task(cluster, task_definition, container_instance_arn, + overrides or {}, started_by or '') tasks.append(task) self.tasks[cluster_name][task.task_arn] = task return tasks @@ -418,17 +440,18 @@ class EC2ContainerServiceBackend(BaseBackend): filtered_tasks.append(task) if cluster_str: cluster_name = cluster_str.split('/')[-1] - if cluster_name in self.clusters: - cluster = self.clusters[cluster_name] - else: + if cluster_name not in self.clusters: raise Exception("{0} is not a cluster".format(cluster_name)) - filtered_tasks = list(filter(lambda t: cluster_name in t.cluster_arn, filtered_tasks)) + filtered_tasks = list( + filter(lambda t: cluster_name in t.cluster_arn, filtered_tasks)) if container_instance: - filtered_tasks = list(filter(lambda t: container_instance in t.container_instance_arn, filtered_tasks)) + filtered_tasks = list(filter( + lambda t: container_instance in t.container_instance_arn, filtered_tasks)) if started_by: - filtered_tasks = list(filter(lambda t: started_by == t.started_by, filtered_tasks)) + filtered_tasks = list( + filter(lambda t: started_by == t.started_by, filtered_tasks)) return [t.task_arn for t in filtered_tasks] def stop_task(self, cluster_str, task_str, reason): @@ -441,14 +464,16 @@ class EC2ContainerServiceBackend(BaseBackend): task_id = task_str.split('/')[-1] tasks = self.tasks.get(cluster_name, None) if not tasks: - raise Exception("Cluster {} has no registered tasks".format(cluster_name)) + raise Exception( + "Cluster {} has no registered tasks".format(cluster_name)) for task in tasks.keys(): if task.endswith(task_id): tasks[task].last_status = 'STOPPED' tasks[task].desired_status = 'STOPPED' tasks[task].stopped_reason = reason return tasks[task] - raise Exception("Could not find task {} on cluster {}".format(task_str, cluster_name)) + raise Exception("Could not find task {} on cluster {}".format( + task_str, cluster_name)) def create_service(self, cluster_str, service_name, task_definition_str, desired_count): cluster_name = cluster_str.split('/')[-1] @@ -458,7 +483,8 @@ class EC2ContainerServiceBackend(BaseBackend): raise Exception("{0} is not a cluster".format(cluster_name)) task_definition = self.describe_task_definition(task_definition_str) desired_count = desired_count if desired_count is not None else 0 - service = Service(cluster, service_name, task_definition, desired_count) + service = Service(cluster, service_name, + task_definition, desired_count) cluster_service_pair = '{0}:{1}'.format(cluster_name, service_name) self.services[cluster_service_pair] = service return service @@ -476,7 +502,8 @@ class EC2ContainerServiceBackend(BaseBackend): result = [] for existing_service_name, existing_service_obj in sorted(self.services.items()): for requested_name_or_arn in service_names_or_arns: - cluster_service_pair = '{0}:{1}'.format(cluster_name, requested_name_or_arn) + cluster_service_pair = '{0}:{1}'.format( + cluster_name, requested_name_or_arn) if cluster_service_pair == existing_service_name or existing_service_obj.arn == requested_name_or_arn: result.append(existing_service_obj) return result @@ -486,13 +513,16 @@ class EC2ContainerServiceBackend(BaseBackend): cluster_service_pair = '{0}:{1}'.format(cluster_name, service_name) if cluster_service_pair in self.services: if task_definition_str is not None: - task_definition = self.describe_task_definition(task_definition_str) - self.services[cluster_service_pair].task_definition = task_definition_str + self.describe_task_definition(task_definition_str) + self.services[ + cluster_service_pair].task_definition = task_definition_str if desired_count is not None: - self.services[cluster_service_pair].desired_count = desired_count + self.services[ + cluster_service_pair].desired_count = desired_count return self.services[cluster_service_pair] else: - raise Exception("cluster {0} or service {1} does not exist".format(cluster_name, service_name)) + raise Exception("cluster {0} or service {1} does not exist".format( + cluster_name, service_name)) def delete_service(self, cluster_name, service_name): cluster_service_pair = '{0}:{1}'.format(cluster_name, service_name) @@ -503,7 +533,8 @@ class EC2ContainerServiceBackend(BaseBackend): else: return self.services.pop(cluster_service_pair) else: - raise Exception("cluster {0} or service {1} does not exist".format(cluster_name, service_name)) + raise Exception("cluster {0} or service {1} does not exist".format( + cluster_name, service_name)) def register_container_instance(self, cluster_str, ec2_instance_id): cluster_name = cluster_str.split('/')[-1] @@ -512,14 +543,18 @@ class EC2ContainerServiceBackend(BaseBackend): container_instance = ContainerInstance(ec2_instance_id) if not self.container_instances.get(cluster_name): self.container_instances[cluster_name] = {} - container_instance_id = container_instance.containerInstanceArn.split('/')[-1] - self.container_instances[cluster_name][container_instance_id] = container_instance + container_instance_id = container_instance.containerInstanceArn.split( + '/')[-1] + self.container_instances[cluster_name][ + container_instance_id] = container_instance return container_instance def list_container_instances(self, cluster_str): cluster_name = cluster_str.split('/')[-1] - container_instances_values = self.container_instances.get(cluster_name, {}).values() - container_instances = [ci.containerInstanceArn for ci in container_instances_values] + container_instances_values = self.container_instances.get( + cluster_name, {}).values() + container_instances = [ + ci.containerInstanceArn for ci in container_instances_values] return sorted(container_instances) def describe_container_instances(self, cluster_str, list_container_instance_ids): @@ -529,11 +564,13 @@ class EC2ContainerServiceBackend(BaseBackend): failures = [] container_instance_objects = [] for container_instance_id in list_container_instance_ids: - container_instance = self.container_instances[cluster_name].get(container_instance_id, None) + container_instance = self.container_instances[ + cluster_name].get(container_instance_id, None) if container_instance is not None: container_instance_objects.append(container_instance) else: - failures.append(ContainerInstanceFailure('MISSING', container_instance_id)) + failures.append(ContainerInstanceFailure( + 'MISSING', container_instance_id)) return container_instance_objects, failures diff --git a/moto/ecs/responses.py b/moto/ecs/responses.py index a8c0dddac..b28ec6a4e 100644 --- a/moto/ecs/responses.py +++ b/moto/ecs/responses.py @@ -1,12 +1,12 @@ from __future__ import unicode_literals import json -import uuid from moto.core.responses import BaseResponse from .models import ecs_backends class EC2ContainerServiceResponse(BaseResponse): + @property def ecs_backend(self): return ecs_backends[self.region] @@ -34,8 +34,7 @@ class EC2ContainerServiceResponse(BaseResponse): cluster_arns = self.ecs_backend.list_clusters() return json.dumps({ 'clusterArns': cluster_arns - #, - #'nextToken': str(uuid.uuid1()) + # 'nextToken': str(uuid.uuid1()) }) def describe_clusters(self): @@ -57,7 +56,8 @@ class EC2ContainerServiceResponse(BaseResponse): family = self._get_param('family') container_definitions = self._get_param('containerDefinitions') volumes = self._get_param('volumes') - task_definition = self.ecs_backend.register_task_definition(family, container_definitions, volumes) + task_definition = self.ecs_backend.register_task_definition( + family, container_definitions, volumes) return json.dumps({ 'taskDefinition': task_definition.response_object }) @@ -66,43 +66,7 @@ class EC2ContainerServiceResponse(BaseResponse): task_definition_arns = self.ecs_backend.list_task_definitions() return json.dumps({ 'taskDefinitionArns': task_definition_arns - #, - #'nextToken': str(uuid.uuid1()) - }) - - def describe_task_definition(self): - task_definition_str = self._get_param('taskDefinition') - task_definition = self.ecs_backend.describe_task_definition(task_definition_str) - return json.dumps({ - 'taskDefinition': task_definition.response_object - }) - - def deregister_task_definition(self): - task_definition_str = self._get_param('taskDefinition') - task_definition = self.ecs_backend.deregister_task_definition(task_definition_str) - return json.dumps({ - 'taskDefinition': task_definition.response_object - }) - - def run_task(self): - cluster_str = self._get_param('cluster') - overrides = self._get_param('overrides') - task_definition_str = self._get_param('taskDefinition') - count = self._get_int_param('count') - started_by = self._get_param('startedBy') - tasks = self.ecs_backend.run_task(cluster_str, task_definition_str, count, overrides, started_by) - return json.dumps({ - 'tasks': [task.response_object for task in tasks], - 'failures': [] - }) - - def describe_tasks(self): - cluster = self._get_param('cluster') - tasks = self._get_param('tasks') - data = self.ecs_backend.describe_tasks(cluster, tasks) - return json.dumps({ - 'tasks': [task.response_object for task in data], - 'failures': [] + # 'nextToken': str(uuid.uuid1()) }) def describe_task_definition(self): @@ -113,17 +77,48 @@ class EC2ContainerServiceResponse(BaseResponse): 'failures': [] }) + def deregister_task_definition(self): + task_definition_str = self._get_param('taskDefinition') + task_definition = self.ecs_backend.deregister_task_definition( + task_definition_str) + return json.dumps({ + 'taskDefinition': task_definition.response_object + }) + + def run_task(self): + cluster_str = self._get_param('cluster') + overrides = self._get_param('overrides') + task_definition_str = self._get_param('taskDefinition') + count = self._get_int_param('count') + started_by = self._get_param('startedBy') + tasks = self.ecs_backend.run_task( + cluster_str, task_definition_str, count, overrides, started_by) + return json.dumps({ + 'tasks': [task.response_object for task in tasks], + 'failures': [] + }) + + def describe_tasks(self): + cluster = self._get_param('cluster') + tasks = self._get_param('tasks') + data = self.ecs_backend.describe_tasks(cluster, tasks) + return json.dumps({ + 'tasks': [task.response_object for task in data], + 'failures': [] + }) + def start_task(self): cluster_str = self._get_param('cluster') overrides = self._get_param('overrides') task_definition_str = self._get_param('taskDefinition') container_instances = self._get_param('containerInstances') started_by = self._get_param('startedBy') - tasks = self.ecs_backend.start_task(cluster_str, task_definition_str, container_instances, overrides, started_by) + tasks = self.ecs_backend.start_task( + cluster_str, task_definition_str, container_instances, overrides, started_by) return json.dumps({ 'tasks': [task.response_object for task in tasks], 'failures': [] - }) + }) def list_tasks(self): cluster_str = self._get_param('cluster') @@ -132,11 +127,11 @@ class EC2ContainerServiceResponse(BaseResponse): started_by = self._get_param('startedBy') service_name = self._get_param('serviceName') desiredStatus = self._get_param('desiredStatus') - task_arns = self.ecs_backend.list_tasks(cluster_str, container_instance, family, started_by, service_name, desiredStatus) + task_arns = self.ecs_backend.list_tasks( + cluster_str, container_instance, family, started_by, service_name, desiredStatus) return json.dumps({ 'taskArns': task_arns - }) - + }) def stop_task(self): cluster_str = self._get_param('cluster') @@ -145,15 +140,15 @@ class EC2ContainerServiceResponse(BaseResponse): task = self.ecs_backend.stop_task(cluster_str, task, reason) return json.dumps({ 'task': task.response_object - }) - + }) def create_service(self): cluster_str = self._get_param('cluster') service_name = self._get_param('serviceName') task_definition_str = self._get_param('taskDefinition') desired_count = self._get_int_param('desiredCount') - service = self.ecs_backend.create_service(cluster_str, service_name, task_definition_str, desired_count) + service = self.ecs_backend.create_service( + cluster_str, service_name, task_definition_str, desired_count) return json.dumps({ 'service': service.response_object }) @@ -170,7 +165,8 @@ class EC2ContainerServiceResponse(BaseResponse): def describe_services(self): cluster_str = self._get_param('cluster') service_names = self._get_param('services') - services = self.ecs_backend.describe_services(cluster_str, service_names) + services = self.ecs_backend.describe_services( + cluster_str, service_names) return json.dumps({ 'services': [service.response_object for service in services], 'failures': [] @@ -181,7 +177,8 @@ class EC2ContainerServiceResponse(BaseResponse): service_name = self._get_param('service') task_definition = self._get_param('taskDefinition') desired_count = self._get_int_param('desiredCount') - service = self.ecs_backend.update_service(cluster_str, service_name, task_definition, desired_count) + service = self.ecs_backend.update_service( + cluster_str, service_name, task_definition, desired_count) return json.dumps({ 'service': service.response_object }) @@ -196,17 +193,20 @@ class EC2ContainerServiceResponse(BaseResponse): def register_container_instance(self): cluster_str = self._get_param('cluster') - instance_identity_document_str = self._get_param('instanceIdentityDocument') + instance_identity_document_str = self._get_param( + 'instanceIdentityDocument') instance_identity_document = json.loads(instance_identity_document_str) ec2_instance_id = instance_identity_document["instanceId"] - container_instance = self.ecs_backend.register_container_instance(cluster_str, ec2_instance_id) + container_instance = self.ecs_backend.register_container_instance( + cluster_str, ec2_instance_id) return json.dumps({ - 'containerInstance' : container_instance.response_object + 'containerInstance': container_instance.response_object }) def list_container_instances(self): cluster_str = self._get_param('cluster') - container_instance_arns = self.ecs_backend.list_container_instances(cluster_str) + container_instance_arns = self.ecs_backend.list_container_instances( + cluster_str) return json.dumps({ 'containerInstanceArns': container_instance_arns }) @@ -214,8 +214,9 @@ class EC2ContainerServiceResponse(BaseResponse): def describe_container_instances(self): cluster_str = self._get_param('cluster') list_container_instance_arns = self._get_param('containerInstances') - container_instances, failures = self.ecs_backend.describe_container_instances(cluster_str, list_container_instance_arns) + container_instances, failures = self.ecs_backend.describe_container_instances( + cluster_str, list_container_instance_arns) return json.dumps({ - 'failures': [ci.response_object for ci in failures], - 'containerInstances': [ci.response_object for ci in container_instances] + 'failures': [ci.response_object for ci in failures], + 'containerInstances': [ci.response_object for ci in container_instances] }) diff --git a/moto/elb/__init__.py b/moto/elb/__init__.py index a8e8dab8d..e25f2d486 100644 --- a/moto/elb/__init__.py +++ b/moto/elb/__init__.py @@ -1,6 +1,6 @@ from __future__ import unicode_literals from .models import elb_backends -from ..core.models import MockAWS, base_decorator, HttprettyMockAWS, deprecated_base_decorator +from ..core.models import base_decorator, deprecated_base_decorator elb_backend = elb_backends['us-east-1'] mock_elb = base_decorator(elb_backends) diff --git a/moto/elb/exceptions.py b/moto/elb/exceptions.py index 338f3c95b..897bd6dd1 100644 --- a/moto/elb/exceptions.py +++ b/moto/elb/exceptions.py @@ -7,6 +7,7 @@ class ELBClientError(RESTError): class DuplicateTagKeysError(ELBClientError): + def __init__(self, cidr): super(DuplicateTagKeysError, self).__init__( "DuplicateTagKeys", @@ -15,6 +16,7 @@ class DuplicateTagKeysError(ELBClientError): class LoadBalancerNotFoundError(ELBClientError): + def __init__(self, cidr): super(LoadBalancerNotFoundError, self).__init__( "LoadBalancerNotFound", @@ -23,6 +25,7 @@ class LoadBalancerNotFoundError(ELBClientError): class TooManyTagsError(ELBClientError): + def __init__(self): super(TooManyTagsError, self).__init__( "LoadBalancerNotFound", @@ -30,6 +33,7 @@ class TooManyTagsError(ELBClientError): class BadHealthCheckDefinition(ELBClientError): + def __init__(self): super(BadHealthCheckDefinition, self).__init__( "ValidationError", @@ -37,9 +41,9 @@ class BadHealthCheckDefinition(ELBClientError): class DuplicateLoadBalancerName(ELBClientError): + def __init__(self, name): super(DuplicateLoadBalancerName, self).__init__( "DuplicateLoadBalancerName", "The specified load balancer name already exists for this account: {0}" .format(name)) - diff --git a/moto/elb/models.py b/moto/elb/models.py index 055b08e4d..11559c2e7 100644 --- a/moto/elb/models.py +++ b/moto/elb/models.py @@ -1,6 +1,5 @@ from __future__ import unicode_literals -import boto.ec2.elb from boto.ec2.elb.attributes import ( LbAttributes, ConnectionSettingAttribute, @@ -22,8 +21,8 @@ from .exceptions import ( ) - class FakeHealthCheck(object): + def __init__(self, timeout, healthy_threshold, unhealthy_threshold, interval, target): self.timeout = timeout @@ -36,6 +35,7 @@ class FakeHealthCheck(object): class FakeListener(object): + def __init__(self, load_balancer_port, instance_port, protocol, ssl_certificate_id): self.load_balancer_port = load_balancer_port self.instance_port = instance_port @@ -48,6 +48,7 @@ class FakeListener(object): class FakeBackend(object): + def __init__(self, instance_port): self.instance_port = instance_port self.policy_names = [] @@ -57,6 +58,7 @@ class FakeBackend(object): class FakeLoadBalancer(object): + def __init__(self, name, zones, ports, scheme='internet-facing', vpc_id=None, subnets=None): self.name = name self.health_check = None @@ -78,16 +80,20 @@ class FakeLoadBalancer(object): for port in ports: listener = FakeListener( protocol=(port.get('protocol') or port['Protocol']), - load_balancer_port=(port.get('load_balancer_port') or port['LoadBalancerPort']), - instance_port=(port.get('instance_port') or port['InstancePort']), - ssl_certificate_id=port.get('sslcertificate_id', port.get('SSLCertificateId')), + load_balancer_port=( + port.get('load_balancer_port') or port['LoadBalancerPort']), + instance_port=( + port.get('instance_port') or port['InstancePort']), + ssl_certificate_id=port.get( + 'sslcertificate_id', port.get('SSLCertificateId')), ) self.listeners.append(listener) # it is unclear per the AWS documentation as to when or how backend # information gets set, so let's guess and set it here *shrug* backend = FakeBackend( - instance_port=(port.get('instance_port') or port['InstancePort']), + instance_port=( + port.get('instance_port') or port['InstancePort']), ) self.backends.append(backend) @@ -120,7 +126,8 @@ class FakeLoadBalancer(object): port_policies[port] = policies_for_port for port, policies in port_policies.items(): - elb_backend.set_load_balancer_policies_of_backend_server(new_elb.name, port, list(policies)) + elb_backend.set_load_balancer_policies_of_backend_server( + new_elb.name, port, list(policies)) health_check = properties.get('HealthCheck') if health_check: @@ -137,7 +144,8 @@ class FakeLoadBalancer(object): @classmethod def update_from_cloudformation_json(cls, original_resource, new_resource_name, cloudformation_json, region_name): - cls.delete_from_cloudformation_json(original_resource.name, cloudformation_json, region_name) + cls.delete_from_cloudformation_json( + original_resource.name, cloudformation_json, region_name) return cls.create_from_cloudformation_json(new_resource_name, cloudformation_json, region_name) @classmethod @@ -155,15 +163,19 @@ class FakeLoadBalancer(object): def get_cfn_attribute(self, attribute_name): from moto.cloudformation.exceptions import UnformattedGetAttTemplateException if attribute_name == 'CanonicalHostedZoneName': - raise NotImplementedError('"Fn::GetAtt" : [ "{0}" , "CanonicalHostedZoneName" ]"') + raise NotImplementedError( + '"Fn::GetAtt" : [ "{0}" , "CanonicalHostedZoneName" ]"') elif attribute_name == 'CanonicalHostedZoneNameID': - raise NotImplementedError('"Fn::GetAtt" : [ "{0}" , "CanonicalHostedZoneNameID" ]"') + raise NotImplementedError( + '"Fn::GetAtt" : [ "{0}" , "CanonicalHostedZoneNameID" ]"') elif attribute_name == 'DNSName': return self.dns_name elif attribute_name == 'SourceSecurityGroup.GroupName': - raise NotImplementedError('"Fn::GetAtt" : [ "{0}" , "SourceSecurityGroup.GroupName" ]"') + raise NotImplementedError( + '"Fn::GetAtt" : [ "{0}" , "SourceSecurityGroup.GroupName" ]"') elif attribute_name == 'SourceSecurityGroup.OwnerAlias': - raise NotImplementedError('"Fn::GetAtt" : [ "{0}" , "SourceSecurityGroup.OwnerAlias" ]"') + raise NotImplementedError( + '"Fn::GetAtt" : [ "{0}" , "SourceSecurityGroup.OwnerAlias" ]"') raise UnformattedGetAttTemplateException() @classmethod @@ -224,7 +236,8 @@ class ELBBackend(BaseBackend): vpc_id = subnet.vpc_id if name in self.load_balancers: raise DuplicateLoadBalancerName(name) - new_load_balancer = FakeLoadBalancer(name=name, zones=zones, ports=ports, scheme=scheme, subnets=subnets, vpc_id=vpc_id) + new_load_balancer = FakeLoadBalancer( + name=name, zones=zones, ports=ports, scheme=scheme, subnets=subnets, vpc_id=vpc_id) self.load_balancers[name] = new_load_balancer return new_load_balancer @@ -240,14 +253,16 @@ class ELBBackend(BaseBackend): if lb_port == listener.load_balancer_port: break else: - balancer.listeners.append(FakeListener(lb_port, instance_port, protocol, ssl_certificate_id)) + balancer.listeners.append(FakeListener( + lb_port, instance_port, protocol, ssl_certificate_id)) return balancer def describe_load_balancers(self, names): balancers = self.load_balancers.values() if names: - matched_balancers = [balancer for balancer in balancers if balancer.name in names] + matched_balancers = [ + balancer for balancer in balancers if balancer.name in names] if len(names) != len(matched_balancers): missing_elb = list(set(names) - set(matched_balancers))[0] raise LoadBalancerNotFoundError(missing_elb) @@ -288,7 +303,8 @@ class ELBBackend(BaseBackend): if balancer: for idx, listener in enumerate(balancer.listeners): if lb_port == listener.load_balancer_port: - balancer.listeners[idx].ssl_certificate_id = ssl_certificate_id + balancer.listeners[ + idx].ssl_certificate_id = ssl_certificate_id return balancer @@ -299,7 +315,8 @@ class ELBBackend(BaseBackend): def deregister_instances(self, load_balancer_name, instance_ids): load_balancer = self.get_load_balancer(load_balancer_name) - new_instance_ids = [instance_id for instance_id in load_balancer.instance_ids if instance_id not in instance_ids] + new_instance_ids = [ + instance_id for instance_id in load_balancer.instance_ids if instance_id not in instance_ids] load_balancer.instance_ids = new_instance_ids return load_balancer @@ -342,7 +359,8 @@ class ELBBackend(BaseBackend): def set_load_balancer_policies_of_backend_server(self, load_balancer_name, instance_port, policies): load_balancer = self.get_load_balancer(load_balancer_name) - backend = [b for b in load_balancer.backends if int(b.instance_port) == instance_port][0] + backend = [b for b in load_balancer.backends if int( + b.instance_port) == instance_port][0] backend_idx = load_balancer.backends.index(backend) backend.policy_names = policies load_balancer.backends[backend_idx] = backend @@ -350,7 +368,8 @@ class ELBBackend(BaseBackend): def set_load_balancer_policies_of_listener(self, load_balancer_name, load_balancer_port, policies): load_balancer = self.get_load_balancer(load_balancer_name) - listener = [l for l in load_balancer.listeners if int(l.load_balancer_port) == load_balancer_port][0] + listener = [l for l in load_balancer.listeners if int( + l.load_balancer_port) == load_balancer_port][0] listener_idx = load_balancer.listeners.index(listener) listener.policy_names = policies load_balancer.listeners[listener_idx] = listener diff --git a/moto/elb/responses.py b/moto/elb/responses.py index cba98e4e0..e90de260e 100644 --- a/moto/elb/responses.py +++ b/moto/elb/responses.py @@ -43,9 +43,11 @@ class ELBResponse(BaseResponse): load_balancer_name = self._get_param('LoadBalancerName') ports = self._get_list_prefix("Listeners.member") - self.elb_backend.create_load_balancer_listeners(name=load_balancer_name, ports=ports) + self.elb_backend.create_load_balancer_listeners( + name=load_balancer_name, ports=ports) - template = self.response_template(CREATE_LOAD_BALANCER_LISTENERS_TEMPLATE) + template = self.response_template( + CREATE_LOAD_BALANCER_LISTENERS_TEMPLATE) return template.render() def describe_load_balancers(self): @@ -59,7 +61,8 @@ class ELBResponse(BaseResponse): ports = self._get_multi_param("LoadBalancerPorts.member") ports = [int(port) for port in ports] - self.elb_backend.delete_load_balancer_listeners(load_balancer_name, ports) + self.elb_backend.delete_load_balancer_listeners( + load_balancer_name, ports) template = self.response_template(DELETE_LOAD_BALANCER_LISTENERS) return template.render() @@ -74,7 +77,8 @@ class ELBResponse(BaseResponse): load_balancer_name=self._get_param('LoadBalancerName'), timeout=self._get_param('HealthCheck.Timeout'), healthy_threshold=self._get_param('HealthCheck.HealthyThreshold'), - unhealthy_threshold=self._get_param('HealthCheck.UnhealthyThreshold'), + unhealthy_threshold=self._get_param( + 'HealthCheck.UnhealthyThreshold'), interval=self._get_param('HealthCheck.Interval'), target=self._get_param('HealthCheck.Target'), ) @@ -83,9 +87,11 @@ class ELBResponse(BaseResponse): def register_instances_with_load_balancer(self): load_balancer_name = self._get_param('LoadBalancerName') - instance_ids = [value[0] for key, value in self.querystring.items() if "Instances.member" in key] + instance_ids = [value[0] for key, value in self.querystring.items( + ) if "Instances.member" in key] template = self.response_template(REGISTER_INSTANCES_TEMPLATE) - load_balancer = self.elb_backend.register_instances(load_balancer_name, instance_ids) + load_balancer = self.elb_backend.register_instances( + load_balancer_name, instance_ids) return template.render(load_balancer=load_balancer) def set_load_balancer_listener_sslcertificate(self): @@ -93,16 +99,19 @@ class ELBResponse(BaseResponse): ssl_certificate_id = self.querystring['SSLCertificateId'][0] lb_port = self.querystring['LoadBalancerPort'][0] - self.elb_backend.set_load_balancer_listener_sslcertificate(load_balancer_name, lb_port, ssl_certificate_id) + self.elb_backend.set_load_balancer_listener_sslcertificate( + load_balancer_name, lb_port, ssl_certificate_id) template = self.response_template(SET_LOAD_BALANCER_SSL_CERTIFICATE) return template.render() def deregister_instances_from_load_balancer(self): load_balancer_name = self._get_param('LoadBalancerName') - instance_ids = [value[0] for key, value in self.querystring.items() if "Instances.member" in key] + instance_ids = [value[0] for key, value in self.querystring.items( + ) if "Instances.member" in key] template = self.response_template(DEREGISTER_INSTANCES_TEMPLATE) - load_balancer = self.elb_backend.deregister_instances(load_balancer_name, instance_ids) + load_balancer = self.elb_backend.deregister_instances( + load_balancer_name, instance_ids) return template.render(load_balancer=load_balancer) def describe_load_balancer_attributes(self): @@ -115,11 +124,13 @@ class ELBResponse(BaseResponse): load_balancer_name = self._get_param('LoadBalancerName') load_balancer = self.elb_backend.get_load_balancer(load_balancer_name) - cross_zone = self._get_dict_param("LoadBalancerAttributes.CrossZoneLoadBalancing.") + cross_zone = self._get_dict_param( + "LoadBalancerAttributes.CrossZoneLoadBalancing.") if cross_zone: attribute = CrossZoneLoadBalancingAttribute() attribute.enabled = cross_zone["enabled"] == "true" - self.elb_backend.set_cross_zone_load_balancing_attribute(load_balancer_name, attribute) + self.elb_backend.set_cross_zone_load_balancing_attribute( + load_balancer_name, attribute) access_log = self._get_dict_param("LoadBalancerAttributes.AccessLog.") if access_log: @@ -128,20 +139,25 @@ class ELBResponse(BaseResponse): attribute.s3_bucket_name = access_log['s3_bucket_name'] attribute.s3_bucket_prefix = access_log['s3_bucket_prefix'] attribute.emit_interval = access_log["emit_interval"] - self.elb_backend.set_access_log_attribute(load_balancer_name, attribute) + self.elb_backend.set_access_log_attribute( + load_balancer_name, attribute) - connection_draining = self._get_dict_param("LoadBalancerAttributes.ConnectionDraining.") + connection_draining = self._get_dict_param( + "LoadBalancerAttributes.ConnectionDraining.") if connection_draining: attribute = ConnectionDrainingAttribute() attribute.enabled = connection_draining["enabled"] == "true" attribute.timeout = connection_draining["timeout"] - self.elb_backend.set_connection_draining_attribute(load_balancer_name, attribute) + self.elb_backend.set_connection_draining_attribute( + load_balancer_name, attribute) - connection_settings = self._get_dict_param("LoadBalancerAttributes.ConnectionSettings.") + connection_settings = self._get_dict_param( + "LoadBalancerAttributes.ConnectionSettings.") if connection_settings: attribute = ConnectionSettingAttribute() attribute.idle_timeout = connection_settings["idle_timeout"] - self.elb_backend.set_connection_settings_attribute(load_balancer_name, attribute) + self.elb_backend.set_connection_settings_attribute( + load_balancer_name, attribute) template = self.response_template(MODIFY_ATTRIBUTES_TEMPLATE) return template.render(attributes=load_balancer.attributes) @@ -153,7 +169,8 @@ class ELBResponse(BaseResponse): policy_name = self._get_param("PolicyName") other_policy.policy_name = policy_name - self.elb_backend.create_lb_other_policy(load_balancer_name, other_policy) + self.elb_backend.create_lb_other_policy( + load_balancer_name, other_policy) template = self.response_template(CREATE_LOAD_BALANCER_POLICY_TEMPLATE) return template.render() @@ -165,7 +182,8 @@ class ELBResponse(BaseResponse): policy.policy_name = self._get_param("PolicyName") policy.cookie_name = self._get_param("CookieName") - self.elb_backend.create_app_cookie_stickiness_policy(load_balancer_name, policy) + self.elb_backend.create_app_cookie_stickiness_policy( + load_balancer_name, policy) template = self.response_template(CREATE_LOAD_BALANCER_POLICY_TEMPLATE) return template.render() @@ -181,7 +199,8 @@ class ELBResponse(BaseResponse): else: policy.cookie_expiration_period = None - self.elb_backend.create_lb_cookie_stickiness_policy(load_balancer_name, policy) + self.elb_backend.create_lb_cookie_stickiness_policy( + load_balancer_name, policy) template = self.response_template(CREATE_LOAD_BALANCER_POLICY_TEMPLATE) return template.render() @@ -191,13 +210,16 @@ class ELBResponse(BaseResponse): load_balancer = self.elb_backend.get_load_balancer(load_balancer_name) load_balancer_port = int(self._get_param('LoadBalancerPort')) - mb_listener = [l for l in load_balancer.listeners if int(l.load_balancer_port) == load_balancer_port] + mb_listener = [l for l in load_balancer.listeners if int( + l.load_balancer_port) == load_balancer_port] if mb_listener: policies = self._get_multi_param("PolicyNames.member") - self.elb_backend.set_load_balancer_policies_of_listener(load_balancer_name, load_balancer_port, policies) + self.elb_backend.set_load_balancer_policies_of_listener( + load_balancer_name, load_balancer_port, policies) # else: explode? - template = self.response_template(SET_LOAD_BALANCER_POLICIES_OF_LISTENER_TEMPLATE) + template = self.response_template( + SET_LOAD_BALANCER_POLICIES_OF_LISTENER_TEMPLATE) return template.render() def set_load_balancer_policies_for_backend_server(self): @@ -205,20 +227,25 @@ class ELBResponse(BaseResponse): load_balancer = self.elb_backend.get_load_balancer(load_balancer_name) instance_port = int(self.querystring.get('InstancePort')[0]) - mb_backend = [b for b in load_balancer.backends if int(b.instance_port) == instance_port] + mb_backend = [b for b in load_balancer.backends if int( + b.instance_port) == instance_port] if mb_backend: policies = self._get_multi_param('PolicyNames.member') - self.elb_backend.set_load_balancer_policies_of_backend_server(load_balancer_name, instance_port, policies) + self.elb_backend.set_load_balancer_policies_of_backend_server( + load_balancer_name, instance_port, policies) # else: explode? - template = self.response_template(SET_LOAD_BALANCER_POLICIES_FOR_BACKEND_SERVER_TEMPLATE) + template = self.response_template( + SET_LOAD_BALANCER_POLICIES_FOR_BACKEND_SERVER_TEMPLATE) return template.render() def describe_instance_health(self): load_balancer_name = self._get_param('LoadBalancerName') - instance_ids = [value[0] for key, value in self.querystring.items() if "Instances.member" in key] + instance_ids = [value[0] for key, value in self.querystring.items( + ) if "Instances.member" in key] if len(instance_ids) == 0: - instance_ids = self.elb_backend.get_load_balancer(load_balancer_name).instance_ids + instance_ids = self.elb_backend.get_load_balancer( + load_balancer_name).instance_ids template = self.response_template(DESCRIBE_INSTANCE_HEALTH_TEMPLATE) return template.render(instance_ids=instance_ids) @@ -226,7 +253,6 @@ class ELBResponse(BaseResponse): for key, value in self.querystring.items(): if "LoadBalancerNames.member" in key: - number = key.split('.')[2] load_balancer_name = value[0] elb = self.elb_backend.get_load_balancer(load_balancer_name) if not elb: @@ -241,7 +267,8 @@ class ELBResponse(BaseResponse): for key, value in self.querystring.items(): if "LoadBalancerNames.member" in key: number = key.split('.')[2] - load_balancer_name = self._get_param('LoadBalancerNames.member.{0}'.format(number)) + load_balancer_name = self._get_param( + 'LoadBalancerNames.member.{0}'.format(number)) elb = self.elb_backend.get_load_balancer(load_balancer_name) if not elb: raise LoadBalancerNotFoundError(load_balancer_name) @@ -260,7 +287,8 @@ class ELBResponse(BaseResponse): for key, value in self.querystring.items(): if "LoadBalancerNames.member" in key: number = key.split('.')[2] - load_balancer_name = self._get_param('LoadBalancerNames.member.{0}'.format(number)) + load_balancer_name = self._get_param( + 'LoadBalancerNames.member.{0}'.format(number)) elb = self.elb_backend.get_load_balancer(load_balancer_name) if not elb: raise LoadBalancerNotFoundError(load_balancer_name) @@ -284,7 +312,7 @@ class ELBResponse(BaseResponse): for i in tag_keys: counts[i] = tag_keys.count(i) - counts = sorted(counts.items(), key=lambda i:i[1], reverse=True) + counts = sorted(counts.items(), key=lambda i: i[1], reverse=True) if counts and counts[0][1] > 1: # We have dupes... diff --git a/moto/emr/__init__.py b/moto/emr/__init__.py index fc6b4d4ab..b4223f2cb 100644 --- a/moto/emr/__init__.py +++ b/moto/emr/__init__.py @@ -1,6 +1,6 @@ from __future__ import unicode_literals from .models import emr_backends -from ..core.models import MockAWS, base_decorator, HttprettyMockAWS, deprecated_base_decorator +from ..core.models import base_decorator, deprecated_base_decorator emr_backend = emr_backends['us-east-1'] mock_emr = base_decorator(emr_backends) diff --git a/moto/emr/models.py b/moto/emr/models.py index 155e4a898..94bc45ecc 100644 --- a/moto/emr/models.py +++ b/moto/emr/models.py @@ -11,6 +11,7 @@ from .utils import random_instance_group_id, random_cluster_id, random_step_id class FakeApplication(object): + def __init__(self, name, version, args=None, additional_info=None): self.additional_info = additional_info or {} self.args = args or [] @@ -19,6 +20,7 @@ class FakeApplication(object): class FakeBootstrapAction(object): + def __init__(self, args, name, script_path): self.args = args or [] self.name = name @@ -26,6 +28,7 @@ class FakeBootstrapAction(object): class FakeInstanceGroup(object): + def __init__(self, instance_count, instance_role, instance_type, market='ON_DEMAND', name=None, id=None, bid_price=None): self.id = id or random_instance_group_id() @@ -55,6 +58,7 @@ class FakeInstanceGroup(object): class FakeStep(object): + def __init__(self, state, name='', @@ -78,6 +82,7 @@ class FakeStep(object): class FakeCluster(object): + def __init__(self, emr_backend, name, @@ -135,17 +140,24 @@ class FakeCluster(object): 'instance_type': instance_attrs['slave_instance_type'], 'market': 'ON_DEMAND', 'name': 'slave'}]) - self.additional_master_security_groups = instance_attrs.get('additional_master_security_groups') - self.additional_slave_security_groups = instance_attrs.get('additional_slave_security_groups') + self.additional_master_security_groups = instance_attrs.get( + 'additional_master_security_groups') + self.additional_slave_security_groups = instance_attrs.get( + 'additional_slave_security_groups') self.availability_zone = instance_attrs.get('availability_zone') self.ec2_key_name = instance_attrs.get('ec2_key_name') self.ec2_subnet_id = instance_attrs.get('ec2_subnet_id') self.hadoop_version = instance_attrs.get('hadoop_version') - self.keep_job_flow_alive_when_no_steps = instance_attrs.get('keep_job_flow_alive_when_no_steps') - self.master_security_group = instance_attrs.get('emr_managed_master_security_group') - self.service_access_security_group = instance_attrs.get('service_access_security_group') - self.slave_security_group = instance_attrs.get('emr_managed_slave_security_group') - self.termination_protected = instance_attrs.get('termination_protected') + self.keep_job_flow_alive_when_no_steps = instance_attrs.get( + 'keep_job_flow_alive_when_no_steps') + self.master_security_group = instance_attrs.get( + 'emr_managed_master_security_group') + self.service_access_security_group = instance_attrs.get( + 'service_access_security_group') + self.slave_security_group = instance_attrs.get( + 'emr_managed_slave_security_group') + self.termination_protected = instance_attrs.get( + 'termination_protected') self.release_label = release_label self.requested_ami_version = requested_ami_version @@ -286,7 +298,8 @@ class ElasticMapReduceBackend(BaseBackend): clusters = self.clusters.values() within_two_month = datetime.now(pytz.utc) - timedelta(days=60) - clusters = [c for c in clusters if c.creation_datetime >= within_two_month] + clusters = [ + c for c in clusters if c.creation_datetime >= within_two_month] if job_flow_ids: clusters = [c for c in clusters if c.id in job_flow_ids] @@ -294,10 +307,12 @@ class ElasticMapReduceBackend(BaseBackend): clusters = [c for c in clusters if c.state in job_flow_states] if created_after: created_after = dtparse(created_after) - clusters = [c for c in clusters if c.creation_datetime > created_after] + clusters = [ + c for c in clusters if c.creation_datetime > created_after] if created_before: created_before = dtparse(created_before) - clusters = [c for c in clusters if c.creation_datetime < created_before] + clusters = [ + c for c in clusters if c.creation_datetime < created_before] # Amazon EMR can return a maximum of 512 job flow descriptions return sorted(clusters, key=lambda x: x.id)[:512] @@ -322,7 +337,8 @@ class ElasticMapReduceBackend(BaseBackend): max_items = 50 actions = self.clusters[cluster_id].bootstrap_actions start_idx = 0 if marker is None else int(marker) - marker = None if len(actions) <= start_idx + max_items else str(start_idx + max_items) + marker = None if len(actions) <= start_idx + \ + max_items else str(start_idx + max_items) return actions[start_idx:start_idx + max_items], marker def list_clusters(self, cluster_states=None, created_after=None, @@ -333,13 +349,16 @@ class ElasticMapReduceBackend(BaseBackend): clusters = [c for c in clusters if c.state in cluster_states] if created_after: created_after = dtparse(created_after) - clusters = [c for c in clusters if c.creation_datetime > created_after] + clusters = [ + c for c in clusters if c.creation_datetime > created_after] if created_before: created_before = dtparse(created_before) - clusters = [c for c in clusters if c.creation_datetime < created_before] + clusters = [ + c for c in clusters if c.creation_datetime < created_before] clusters = sorted(clusters, key=lambda x: x.id) start_idx = 0 if marker is None else int(marker) - marker = None if len(clusters) <= start_idx + max_items else str(start_idx + max_items) + marker = None if len(clusters) <= start_idx + \ + max_items else str(start_idx + max_items) return clusters[start_idx:start_idx + max_items], marker def list_instance_groups(self, cluster_id, marker=None): @@ -347,7 +366,8 @@ class ElasticMapReduceBackend(BaseBackend): groups = sorted(self.clusters[cluster_id].instance_groups, key=lambda x: x.id) start_idx = 0 if marker is None else int(marker) - marker = None if len(groups) <= start_idx + max_items else str(start_idx + max_items) + marker = None if len(groups) <= start_idx + \ + max_items else str(start_idx + max_items) return groups[start_idx:start_idx + max_items], marker def list_steps(self, cluster_id, marker=None, step_ids=None, step_states=None): @@ -358,7 +378,8 @@ class ElasticMapReduceBackend(BaseBackend): if step_states: steps = [s for s in steps if s.state in step_states] start_idx = 0 if marker is None else int(marker) - marker = None if len(steps) <= start_idx + max_items else str(start_idx + max_items) + marker = None if len(steps) <= start_idx + \ + max_items else str(start_idx + max_items) return steps[start_idx:start_idx + max_items], marker def modify_instance_groups(self, instance_groups): diff --git a/moto/emr/responses.py b/moto/emr/responses.py index 3869c33ff..91dc8cc11 100644 --- a/moto/emr/responses.py +++ b/moto/emr/responses.py @@ -29,7 +29,8 @@ def generate_boto3_response(operation): {'x-amzn-requestid': '2690d7eb-ed86-11dd-9877-6fad448a8419', 'date': datetime.now(pytz.utc).strftime('%a, %d %b %Y %H:%M:%S %Z'), 'content-type': 'application/x-amz-json-1.1'}) - resp = xml_to_json_response(self.aws_service_spec, operation, rendered) + resp = xml_to_json_response( + self.aws_service_spec, operation, rendered) return '' if resp is None else json.dumps(resp) return rendered return f @@ -63,14 +64,16 @@ class ElasticMapReduceResponse(BaseResponse): instance_groups = self._get_list_prefix('InstanceGroups.member') for item in instance_groups: item['instance_count'] = int(item['instance_count']) - instance_groups = self.backend.add_instance_groups(jobflow_id, instance_groups) + instance_groups = self.backend.add_instance_groups( + jobflow_id, instance_groups) template = self.response_template(ADD_INSTANCE_GROUPS_TEMPLATE) return template.render(instance_groups=instance_groups) @generate_boto3_response('AddJobFlowSteps') def add_job_flow_steps(self): job_flow_id = self._get_param('JobFlowId') - steps = self.backend.add_job_flow_steps(job_flow_id, steps_from_query_string(self._get_list_prefix('Steps.member'))) + steps = self.backend.add_job_flow_steps( + job_flow_id, steps_from_query_string(self._get_list_prefix('Steps.member'))) template = self.response_template(ADD_JOB_FLOW_STEPS_TEMPLATE) return template.render(steps=steps) @@ -104,7 +107,8 @@ class ElasticMapReduceResponse(BaseResponse): created_before = self._get_param('CreatedBefore') job_flow_ids = self._get_multi_param("JobFlowIds.member") job_flow_states = self._get_multi_param('JobFlowStates.member') - clusters = self.backend.describe_job_flows(job_flow_ids, job_flow_states, created_after, created_before) + clusters = self.backend.describe_job_flows( + job_flow_ids, job_flow_states, created_after, created_before) template = self.response_template(DESCRIBE_JOB_FLOWS_TEMPLATE) return template.render(clusters=clusters) @@ -123,7 +127,8 @@ class ElasticMapReduceResponse(BaseResponse): def list_bootstrap_actions(self): cluster_id = self._get_param('ClusterId') marker = self._get_param('Marker') - bootstrap_actions, marker = self.backend.list_bootstrap_actions(cluster_id, marker) + bootstrap_actions, marker = self.backend.list_bootstrap_actions( + cluster_id, marker) template = self.response_template(LIST_BOOTSTRAP_ACTIONS_TEMPLATE) return template.render(bootstrap_actions=bootstrap_actions, marker=marker) @@ -133,7 +138,8 @@ class ElasticMapReduceResponse(BaseResponse): created_after = self._get_param('CreatedAfter') created_before = self._get_param('CreatedBefore') marker = self._get_param('Marker') - clusters, marker = self.backend.list_clusters(cluster_states, created_after, created_before, marker) + clusters, marker = self.backend.list_clusters( + cluster_states, created_after, created_before, marker) template = self.response_template(LIST_CLUSTERS_TEMPLATE) return template.render(clusters=clusters, marker=marker) @@ -141,7 +147,8 @@ class ElasticMapReduceResponse(BaseResponse): def list_instance_groups(self): cluster_id = self._get_param('ClusterId') marker = self._get_param('Marker') - instance_groups, marker = self.backend.list_instance_groups(cluster_id, marker=marker) + instance_groups, marker = self.backend.list_instance_groups( + cluster_id, marker=marker) template = self.response_template(LIST_INSTANCE_GROUPS_TEMPLATE) return template.render(instance_groups=instance_groups, marker=marker) @@ -154,7 +161,8 @@ class ElasticMapReduceResponse(BaseResponse): marker = self._get_param('Marker') step_ids = self._get_multi_param('StepIds.member') step_states = self._get_multi_param('StepStates.member') - steps, marker = self.backend.list_steps(cluster_id, marker=marker, step_ids=step_ids, step_states=step_states) + steps, marker = self.backend.list_steps( + cluster_id, marker=marker, step_ids=step_ids, step_states=step_states) template = self.response_template(LIST_STEPS_TEMPLATE) return template.render(steps=steps, marker=marker) @@ -178,19 +186,27 @@ class ElasticMapReduceResponse(BaseResponse): @generate_boto3_response('RunJobFlow') def run_job_flow(self): instance_attrs = dict( - master_instance_type=self._get_param('Instances.MasterInstanceType'), + master_instance_type=self._get_param( + 'Instances.MasterInstanceType'), slave_instance_type=self._get_param('Instances.SlaveInstanceType'), instance_count=self._get_int_param('Instances.InstanceCount', 1), ec2_key_name=self._get_param('Instances.Ec2KeyName'), ec2_subnet_id=self._get_param('Instances.Ec2SubnetId'), hadoop_version=self._get_param('Instances.HadoopVersion'), - availability_zone=self._get_param('Instances.Placement.AvailabilityZone', self.backend.region_name + 'a'), - keep_job_flow_alive_when_no_steps=self._get_bool_param('Instances.KeepJobFlowAliveWhenNoSteps', False), - termination_protected=self._get_bool_param('Instances.TerminationProtected', False), - emr_managed_master_security_group=self._get_param('Instances.EmrManagedMasterSecurityGroup'), - emr_managed_slave_security_group=self._get_param('Instances.EmrManagedSlaveSecurityGroup'), - service_access_security_group=self._get_param('Instances.ServiceAccessSecurityGroup'), - additional_master_security_groups=self._get_multi_param('Instances.AdditionalMasterSecurityGroups.member.'), + availability_zone=self._get_param( + 'Instances.Placement.AvailabilityZone', self.backend.region_name + 'a'), + keep_job_flow_alive_when_no_steps=self._get_bool_param( + 'Instances.KeepJobFlowAliveWhenNoSteps', False), + termination_protected=self._get_bool_param( + 'Instances.TerminationProtected', False), + emr_managed_master_security_group=self._get_param( + 'Instances.EmrManagedMasterSecurityGroup'), + emr_managed_slave_security_group=self._get_param( + 'Instances.EmrManagedSlaveSecurityGroup'), + service_access_security_group=self._get_param( + 'Instances.ServiceAccessSecurityGroup'), + additional_master_security_groups=self._get_multi_param( + 'Instances.AdditionalMasterSecurityGroups.member.'), additional_slave_security_groups=self._get_multi_param('Instances.AdditionalSlaveSecurityGroups.member.')) kwargs = dict( @@ -198,8 +214,10 @@ class ElasticMapReduceResponse(BaseResponse): log_uri=self._get_param('LogUri'), job_flow_role=self._get_param('JobFlowRole'), service_role=self._get_param('ServiceRole'), - steps=steps_from_query_string(self._get_list_prefix('Steps.member')), - visible_to_all_users=self._get_bool_param('VisibleToAllUsers', False), + steps=steps_from_query_string( + self._get_list_prefix('Steps.member')), + visible_to_all_users=self._get_bool_param( + 'VisibleToAllUsers', False), instance_attrs=instance_attrs, ) @@ -225,7 +243,8 @@ class ElasticMapReduceResponse(BaseResponse): if key.startswith('properties.'): config.pop(key) config['properties'] = {} - map_items = self._get_map_prefix('Configurations.member.{0}.Properties.entry'.format(idx)) + map_items = self._get_map_prefix( + 'Configurations.member.{0}.Properties.entry'.format(idx)) config['properties'] = map_items kwargs['configurations'] = configurations @@ -239,7 +258,8 @@ class ElasticMapReduceResponse(BaseResponse): 'Only one AMI version and release label may be specified. ' 'Provided AMI: {0}, release label: {1}.').format( ami_version, release_label) - raise EmrError(error_type="ValidationException", message=message, template='single_error') + raise EmrError(error_type="ValidationException", + message=message, template='single_error') else: if ami_version: kwargs['requested_ami_version'] = ami_version @@ -256,7 +276,8 @@ class ElasticMapReduceResponse(BaseResponse): self.backend.add_applications( cluster.id, [{'Name': 'Hadoop', 'Version': '0.18'}]) - instance_groups = self._get_list_prefix('Instances.InstanceGroups.member') + instance_groups = self._get_list_prefix( + 'Instances.InstanceGroups.member') if instance_groups: for ig in instance_groups: ig['instance_count'] = int(ig['instance_count']) @@ -274,7 +295,8 @@ class ElasticMapReduceResponse(BaseResponse): def set_termination_protection(self): termination_protection = self._get_param('TerminationProtected') job_ids = self._get_multi_param('JobFlowIds.member') - self.backend.set_termination_protection(job_ids, termination_protection) + self.backend.set_termination_protection( + job_ids, termination_protection) template = self.response_template(SET_TERMINATION_PROTECTION_TEMPLATE) return template.render() diff --git a/moto/emr/utils.py b/moto/emr/utils.py index 328fdd783..4f12522cf 100644 --- a/moto/emr/utils.py +++ b/moto/emr/utils.py @@ -32,7 +32,8 @@ def tags_from_query_string(querystring_dict): tag_key = querystring_dict.get("Tags.{0}.Key".format(tag_index))[0] tag_value_key = "Tags.{0}.Value".format(tag_index) if tag_value_key in querystring_dict: - response_values[tag_key] = querystring_dict.get(tag_value_key)[0] + response_values[tag_key] = querystring_dict.get(tag_value_key)[ + 0] else: response_values[tag_key] = None return response_values @@ -42,7 +43,8 @@ def steps_from_query_string(querystring_dict): steps = [] for step in querystring_dict: step['jar'] = step.pop('hadoop_jar_step._jar') - step['properties'] = dict((o['Key'], o['Value']) for o in step.get('properties', [])) + step['properties'] = dict((o['Key'], o['Value']) + for o in step.get('properties', [])) step['args'] = [] idx = 1 keyfmt = 'hadoop_jar_step._args.member.{0}' diff --git a/moto/events/models.py b/moto/events/models.py index 94cca5ee7..3cf2c3d7a 100644 --- a/moto/events/models.py +++ b/moto/events/models.py @@ -53,7 +53,8 @@ class EventsBackend(BaseBackend): def __init__(self): self.rules = {} - # This array tracks the order in which the rules have been added, since 2.6 doesn't have OrderedDicts. + # This array tracks the order in which the rules have been added, since + # 2.6 doesn't have OrderedDicts. self.rules_order = [] self.next_tokens = {} @@ -106,7 +107,8 @@ class EventsBackend(BaseBackend): matching_rules = [] return_obj = {} - start_index, end_index, new_next_token = self._process_token_and_limits(len(self.rules), next_token, limit) + start_index, end_index, new_next_token = self._process_token_and_limits( + len(self.rules), next_token, limit) for i in range(start_index, end_index): rule = self._get_rule_by_index(i) @@ -130,7 +132,8 @@ class EventsBackend(BaseBackend): matching_rules = [] return_obj = {} - start_index, end_index, new_next_token = self._process_token_and_limits(len(self.rules), next_token, limit) + start_index, end_index, new_next_token = self._process_token_and_limits( + len(self.rules), next_token, limit) for i in range(start_index, end_index): rule = self._get_rule_by_index(i) @@ -144,10 +147,12 @@ class EventsBackend(BaseBackend): return return_obj def list_targets_by_rule(self, rule, next_token=None, limit=None): - # We'll let a KeyError exception be thrown for response to handle if rule doesn't exist. + # We'll let a KeyError exception be thrown for response to handle if + # rule doesn't exist. rule = self.rules[rule] - start_index, end_index, new_next_token = self._process_token_and_limits(len(rule.targets), next_token, limit) + start_index, end_index, new_next_token = self._process_token_and_limits( + len(rule.targets), next_token, limit) returned_targets = [] return_obj = {} @@ -188,4 +193,5 @@ class EventsBackend(BaseBackend): def test_event_pattern(self): raise NotImplementedError() + events_backend = EventsBackend() diff --git a/moto/events/responses.py b/moto/events/responses.py index 75e703706..d03befe12 100644 --- a/moto/events/responses.py +++ b/moto/events/responses.py @@ -87,7 +87,8 @@ class EventsHandler(BaseResponse): if not target_arn: return self.error('ValidationException', 'Parameter TargetArn is required.') - rule_names = events_backend.list_rule_names_by_target(target_arn, next_token, limit) + rule_names = events_backend.list_rule_names_by_target( + target_arn, next_token, limit) return json.dumps(rule_names), self.response_headers @@ -118,7 +119,8 @@ class EventsHandler(BaseResponse): return self.error('ValidationException', 'Parameter Rule is required.') try: - targets = events_backend.list_targets_by_rule(rule_name, next_token, limit) + targets = events_backend.list_targets_by_rule( + rule_name, next_token, limit) except KeyError: return self.error('ResourceNotFoundException', 'Rule ' + rule_name + ' does not exist.') @@ -140,7 +142,8 @@ class EventsHandler(BaseResponse): try: json.loads(event_pattern) except ValueError: - # Not quite as informative as the real error, but it'll work for now. + # Not quite as informative as the real error, but it'll work + # for now. return self.error('InvalidEventPatternException', 'Event pattern is not valid.') if sched_exp: diff --git a/moto/glacier/__init__.py b/moto/glacier/__init__.py index 49b3375e1..1570fa7d4 100644 --- a/moto/glacier/__init__.py +++ b/moto/glacier/__init__.py @@ -1,6 +1,6 @@ from __future__ import unicode_literals from .models import glacier_backends -from ..core.models import MockAWS, base_decorator, HttprettyMockAWS, deprecated_base_decorator +from ..core.models import base_decorator, deprecated_base_decorator glacier_backend = glacier_backends['us-east-1'] mock_glacier = base_decorator(glacier_backends) diff --git a/moto/glacier/models.py b/moto/glacier/models.py index 836e84d37..8e3286887 100644 --- a/moto/glacier/models.py +++ b/moto/glacier/models.py @@ -36,6 +36,7 @@ class ArchiveJob(object): class Vault(object): + def __init__(self, vault_name, region): self.vault_name = vault_name self.region = region diff --git a/moto/glacier/responses.py b/moto/glacier/responses.py index eac9b94c6..cda859b29 100644 --- a/moto/glacier/responses.py +++ b/moto/glacier/responses.py @@ -128,7 +128,8 @@ class GlacierResponse(_TemplateEnvironmentMixin): archive_id = json_body['ArchiveId'] job_id = self.backend.initiate_job(vault_name, archive_id) headers['x-amz-job-id'] = job_id - headers['Location'] = "/{0}/vaults/{1}/jobs/{2}".format(account_id, vault_name, job_id) + headers[ + 'Location'] = "/{0}/vaults/{1}/jobs/{2}".format(account_id, vault_name, job_id) return 202, headers, "" @classmethod diff --git a/moto/iam/__init__.py b/moto/iam/__init__.py index c5110b35d..1dda654ce 100644 --- a/moto/iam/__init__.py +++ b/moto/iam/__init__.py @@ -3,4 +3,4 @@ from .models import iam_backend iam_backends = {"global": iam_backend} mock_iam = iam_backend.decorator -mock_iam_deprecated = iam_backend.deprecated_decorator \ No newline at end of file +mock_iam_deprecated = iam_backend.deprecated_decorator diff --git a/moto/iam/models.py b/moto/iam/models.py index d27722f33..91c4a14d7 100644 --- a/moto/iam/models.py +++ b/moto/iam/models.py @@ -97,6 +97,7 @@ class Role(object): class InstanceProfile(object): + def __init__(self, instance_profile_id, name, path, roles): self.id = instance_profile_id self.name = name @@ -126,6 +127,7 @@ class InstanceProfile(object): class Certificate(object): + def __init__(self, cert_name, cert_body, private_key, cert_chain=None, path=None): self.cert_name = cert_name self.cert_body = cert_body @@ -139,6 +141,7 @@ class Certificate(object): class AccessKey(object): + def __init__(self, user_name): self.user_name = user_name self.access_key_id = random_access_key() @@ -157,6 +160,7 @@ class AccessKey(object): class Group(object): + def __init__(self, name, path='/'): self.name = name self.id = random_resource_id() @@ -176,6 +180,7 @@ class Group(object): class User(object): + def __init__(self, name, path=None): self.name = name self.id = random_resource_id() @@ -184,7 +189,8 @@ class User(object): datetime.utcnow(), "%Y-%m-%d-%H-%M-%S" ) - self.arn = 'arn:aws:iam::123456789012:user{0}{1}'.format(self.path, name) + self.arn = 'arn:aws:iam::123456789012:user{0}{1}'.format( + self.path, name) self.policies = {} self.access_keys = [] self.password = None @@ -194,7 +200,8 @@ class User(object): try: policy_json = self.policies[policy_name] except KeyError: - raise IAMNotFoundException("Policy {0} not found".format(policy_name)) + raise IAMNotFoundException( + "Policy {0} not found".format(policy_name)) return { 'policy_name': policy_name, @@ -207,7 +214,8 @@ class User(object): def delete_policy(self, policy_name): if policy_name not in self.policies: - raise IAMNotFoundException("Policy {0} not found".format(policy_name)) + raise IAMNotFoundException( + "Policy {0} not found".format(policy_name)) del self.policies[policy_name] @@ -225,7 +233,8 @@ class User(object): self.access_keys.remove(key) break else: - raise IAMNotFoundException("Key {0} not found".format(access_key_id)) + raise IAMNotFoundException( + "Key {0} not found".format(access_key_id)) def get_cfn_attribute(self, attribute_name): from moto.cloudformation.exceptions import UnformattedGetAttTemplateException @@ -261,16 +270,18 @@ class User(object): access_key_2_last_rotated = date_created.strftime(date_format) return '{0},{1},{2},{3},{4},{5},not_supported,false,{6},{7},{8},{9},false,N/A,false,N/A'.format(self.name, - self.arn, - date_created.strftime(date_format), - password_enabled, - password_last_used, - date_created.strftime(date_format), - access_key_1_active, - access_key_1_last_rotated, - access_key_2_active, - access_key_2_last_rotated - ) + self.arn, + date_created.strftime( + date_format), + password_enabled, + password_last_used, + date_created.strftime( + date_format), + access_key_1_active, + access_key_1_last_rotated, + access_key_2_active, + access_key_2_last_rotated + ) # predefine AWS managed policies @@ -439,7 +450,8 @@ class IAMBackend(BaseBackend): if scope == 'AWS': policies = [p for p in policies if isinstance(p, AWSManagedPolicy)] elif scope == 'Local': - policies = [p for p in policies if not isinstance(p, AWSManagedPolicy)] + policies = [p for p in policies if not isinstance( + p, AWSManagedPolicy)] if path_prefix: policies = [p for p in policies if p.path.startswith(path_prefix)] @@ -492,7 +504,8 @@ class IAMBackend(BaseBackend): instance_profile_id = random_resource_id() roles = [iam_backend.get_role_by_id(role_id) for role_id in role_ids] - instance_profile = InstanceProfile(instance_profile_id, name, path, roles) + instance_profile = InstanceProfile( + instance_profile_id, name, path, roles) self.instance_profiles[instance_profile_id] = instance_profile return instance_profile @@ -501,7 +514,8 @@ class IAMBackend(BaseBackend): if profile.name == profile_name: return profile - raise IAMNotFoundException("Instance profile {0} not found".format(profile_name)) + raise IAMNotFoundException( + "Instance profile {0} not found".format(profile_name)) def get_instance_profiles(self): return self.instance_profiles.values() @@ -546,7 +560,8 @@ class IAMBackend(BaseBackend): def create_group(self, group_name, path='/'): if group_name in self.groups: - raise IAMConflictException("Group {0} already exists".format(group_name)) + raise IAMConflictException( + "Group {0} already exists".format(group_name)) group = Group(group_name, path) self.groups[group_name] = group @@ -557,7 +572,8 @@ class IAMBackend(BaseBackend): try: group = self.groups[group_name] except KeyError: - raise IAMNotFoundException("Group {0} not found".format(group_name)) + raise IAMNotFoundException( + "Group {0} not found".format(group_name)) return group @@ -575,7 +591,8 @@ class IAMBackend(BaseBackend): def create_user(self, user_name, path='/'): if user_name in self.users: - raise IAMConflictException("EntityAlreadyExists", "User {0} already exists".format(user_name)) + raise IAMConflictException( + "EntityAlreadyExists", "User {0} already exists".format(user_name)) user = User(user_name, path) self.users[user_name] = user @@ -595,7 +612,8 @@ class IAMBackend(BaseBackend): try: users = self.users.values() except KeyError: - raise IAMNotFoundException("Users {0}, {1}, {2} not found".format(path_prefix, marker, max_items)) + raise IAMNotFoundException( + "Users {0}, {1}, {2} not found".format(path_prefix, marker, max_items)) return users @@ -603,13 +621,15 @@ class IAMBackend(BaseBackend): # This does not currently deal with PasswordPolicyViolation. user = self.get_user(user_name) if user.password: - raise IAMConflictException("User {0} already has password".format(user_name)) + raise IAMConflictException( + "User {0} already has password".format(user_name)) user.password = password def delete_login_profile(self, user_name): user = self.get_user(user_name) if not user.password: - raise IAMNotFoundException("Login profile for {0} not found".format(user_name)) + raise IAMNotFoundException( + "Login profile for {0} not found".format(user_name)) user.password = None def add_user_to_group(self, group_name, user_name): @@ -623,7 +643,8 @@ class IAMBackend(BaseBackend): try: group.users.remove(user) except ValueError: - raise IAMNotFoundException("User {0} not in group {1}".format(user_name, group_name)) + raise IAMNotFoundException( + "User {0} not in group {1}".format(user_name, group_name)) def get_user_policy(self, user_name, policy_name): user = self.get_user(user_name) @@ -672,4 +693,5 @@ class IAMBackend(BaseBackend): report += self.users[user].to_csv() return base64.b64encode(report.encode('ascii')).decode('ascii') + iam_backend = IAMBackend() diff --git a/moto/iam/responses.py b/moto/iam/responses.py index 223691e1e..9bddd21df 100644 --- a/moto/iam/responses.py +++ b/moto/iam/responses.py @@ -18,7 +18,8 @@ class IamResponse(BaseResponse): path = self._get_param('Path') policy_document = self._get_param('PolicyDocument') policy_name = self._get_param('PolicyName') - policy = iam_backend.create_policy(description, path, policy_document, policy_name) + policy = iam_backend.create_policy( + description, path, policy_document, policy_name) template = self.response_template(CREATE_POLICY_TEMPLATE) return template.render(policy=policy) @@ -27,7 +28,8 @@ class IamResponse(BaseResponse): max_items = self._get_int_param('MaxItems', 100) path_prefix = self._get_param('PathPrefix', '/') role_name = self._get_param('RoleName') - policies, marker = iam_backend.list_attached_role_policies(role_name, marker=marker, max_items=max_items, path_prefix=path_prefix) + policies, marker = iam_backend.list_attached_role_policies( + role_name, marker=marker, max_items=max_items, path_prefix=path_prefix) template = self.response_template(LIST_ATTACHED_ROLE_POLICIES_TEMPLATE) return template.render(policies=policies, marker=marker) @@ -37,16 +39,19 @@ class IamResponse(BaseResponse): only_attached = self._get_bool_param('OnlyAttached', False) path_prefix = self._get_param('PathPrefix', '/') scope = self._get_param('Scope', 'All') - policies, marker = iam_backend.list_policies(marker, max_items, only_attached, path_prefix, scope) + policies, marker = iam_backend.list_policies( + marker, max_items, only_attached, path_prefix, scope) template = self.response_template(LIST_POLICIES_TEMPLATE) return template.render(policies=policies, marker=marker) def create_role(self): role_name = self._get_param('RoleName') path = self._get_param('Path') - assume_role_policy_document = self._get_param('AssumeRolePolicyDocument') + assume_role_policy_document = self._get_param( + 'AssumeRolePolicyDocument') - role = iam_backend.create_role(role_name, assume_role_policy_document, path) + role = iam_backend.create_role( + role_name, assume_role_policy_document, path) template = self.response_template(CREATE_ROLE_TEMPLATE) return template.render(role=role) @@ -74,7 +79,8 @@ class IamResponse(BaseResponse): def get_role_policy(self): role_name = self._get_param('RoleName') policy_name = self._get_param('PolicyName') - policy_name, policy_document = iam_backend.get_role_policy(role_name, policy_name) + policy_name, policy_document = iam_backend.get_role_policy( + role_name, policy_name) template = self.response_template(GET_ROLE_POLICY_TEMPLATE) return template.render(role_name=role_name, policy_name=policy_name, @@ -91,7 +97,8 @@ class IamResponse(BaseResponse): profile_name = self._get_param('InstanceProfileName') path = self._get_param('Path') - profile = iam_backend.create_instance_profile(profile_name, path, role_ids=[]) + profile = iam_backend.create_instance_profile( + profile_name, path, role_ids=[]) template = self.response_template(CREATE_INSTANCE_PROFILE_TEMPLATE) return template.render(profile=profile) @@ -107,7 +114,8 @@ class IamResponse(BaseResponse): role_name = self._get_param('RoleName') iam_backend.add_role_to_instance_profile(profile_name, role_name) - template = self.response_template(ADD_ROLE_TO_INSTANCE_PROFILE_TEMPLATE) + template = self.response_template( + ADD_ROLE_TO_INSTANCE_PROFILE_TEMPLATE) return template.render() def remove_role_from_instance_profile(self): @@ -115,7 +123,8 @@ class IamResponse(BaseResponse): role_name = self._get_param('RoleName') iam_backend.remove_role_from_instance_profile(profile_name, role_name) - template = self.response_template(REMOVE_ROLE_FROM_INSTANCE_PROFILE_TEMPLATE) + template = self.response_template( + REMOVE_ROLE_FROM_INSTANCE_PROFILE_TEMPLATE) return template.render() def list_roles(self): @@ -132,9 +141,11 @@ class IamResponse(BaseResponse): def list_instance_profiles_for_role(self): role_name = self._get_param('RoleName') - profiles = iam_backend.get_instance_profiles_for_role(role_name=role_name) + profiles = iam_backend.get_instance_profiles_for_role( + role_name=role_name) - template = self.response_template(LIST_INSTANCE_PROFILES_FOR_ROLE_TEMPLATE) + template = self.response_template( + LIST_INSTANCE_PROFILES_FOR_ROLE_TEMPLATE) return template.render(instance_profiles=profiles) def upload_server_certificate(self): @@ -144,7 +155,8 @@ class IamResponse(BaseResponse): private_key = self._get_param('PrivateKey') cert_chain = self._get_param('CertificateName') - cert = iam_backend.upload_server_cert(cert_name, cert_body, private_key, cert_chain=cert_chain, path=path) + cert = iam_backend.upload_server_cert( + cert_name, cert_body, private_key, cert_chain=cert_chain, path=path) template = self.response_template(UPLOAD_CERT_TEMPLATE) return template.render(certificate=cert) diff --git a/moto/instance_metadata/__init__.py b/moto/instance_metadata/__init__.py index 9197bcf7c..d1a674982 100644 --- a/moto/instance_metadata/__init__.py +++ b/moto/instance_metadata/__init__.py @@ -1,4 +1,4 @@ from __future__ import unicode_literals from .models import instance_metadata_backend -instance_metadata_backends = {"global": instance_metadata_backend} \ No newline at end of file +instance_metadata_backends = {"global": instance_metadata_backend} diff --git a/moto/instance_metadata/models.py b/moto/instance_metadata/models.py index b86f86376..8f8d84154 100644 --- a/moto/instance_metadata/models.py +++ b/moto/instance_metadata/models.py @@ -4,4 +4,5 @@ from moto.core.models import BaseBackend class InstanceMetadataBackend(BaseBackend): pass + instance_metadata_backend = InstanceMetadataBackend() diff --git a/moto/instance_metadata/responses.py b/moto/instance_metadata/responses.py index b2de66e7b..2ea9aa9a8 100644 --- a/moto/instance_metadata/responses.py +++ b/moto/instance_metadata/responses.py @@ -7,6 +7,7 @@ from moto.core.responses import BaseResponse class InstanceMetadataResponse(BaseResponse): + def metadata_response(self, request, full_url, headers): """ Mock response for localhost metadata @@ -43,5 +44,6 @@ class InstanceMetadataResponse(BaseResponse): elif path == 'iam/security-credentials/default-role': result = json.dumps(credentials) else: - raise NotImplementedError("The {0} metadata path has not been implemented".format(path)) + raise NotImplementedError( + "The {0} metadata path has not been implemented".format(path)) return 200, headers, result diff --git a/moto/kinesis/__init__.py b/moto/kinesis/__init__.py index c3f06d5b1..7d9767a9f 100644 --- a/moto/kinesis/__init__.py +++ b/moto/kinesis/__init__.py @@ -1,6 +1,6 @@ from __future__ import unicode_literals from .models import kinesis_backends -from ..core.models import MockAWS, base_decorator, HttprettyMockAWS, deprecated_base_decorator +from ..core.models import base_decorator, deprecated_base_decorator kinesis_backend = kinesis_backends['us-east-1'] mock_kinesis = base_decorator(kinesis_backends) diff --git a/moto/kinesis/exceptions.py b/moto/kinesis/exceptions.py index 0fcb3652a..e2fe02775 100644 --- a/moto/kinesis/exceptions.py +++ b/moto/kinesis/exceptions.py @@ -5,6 +5,7 @@ from werkzeug.exceptions import BadRequest class ResourceNotFoundError(BadRequest): + def __init__(self, message): super(ResourceNotFoundError, self).__init__() self.description = json.dumps({ @@ -14,6 +15,7 @@ class ResourceNotFoundError(BadRequest): class ResourceInUseError(BadRequest): + def __init__(self, message): super(ResourceNotFoundError, self).__init__() self.description = json.dumps({ @@ -23,18 +25,21 @@ class ResourceInUseError(BadRequest): class StreamNotFoundError(ResourceNotFoundError): + def __init__(self, stream_name): super(StreamNotFoundError, self).__init__( 'Stream {0} under account 123456789012 not found.'.format(stream_name)) class ShardNotFoundError(ResourceNotFoundError): + def __init__(self, shard_id): super(ShardNotFoundError, self).__init__( 'Shard {0} under account 123456789012 not found.'.format(shard_id)) class InvalidArgumentError(BadRequest): + def __init__(self, message): super(InvalidArgumentError, self).__init__() self.description = json.dumps({ diff --git a/moto/kinesis/models.py b/moto/kinesis/models.py index e0e20da3f..5d80426ae 100644 --- a/moto/kinesis/models.py +++ b/moto/kinesis/models.py @@ -18,6 +18,7 @@ from .utils import compose_shard_iterator, compose_new_shard_iterator, decompose class Record(object): + def __init__(self, partition_key, data, sequence_number, explicit_hash_key): self.partition_key = partition_key self.data = data @@ -33,6 +34,7 @@ class Record(object): class Shard(object): + def __init__(self, shard_id, starting_hash, ending_hash): self._shard_id = shard_id self.starting_hash = starting_hash @@ -64,7 +66,8 @@ class Shard(object): else: last_sequence_number = 0 sequence_number = last_sequence_number + 1 - self.records[sequence_number] = Record(partition_key, data, sequence_number, explicit_hash_key) + self.records[sequence_number] = Record( + partition_key, data, sequence_number, explicit_hash_key) return sequence_number def get_min_sequence_number(self): @@ -107,8 +110,10 @@ class Stream(object): izip_longest = itertools.izip_longest for index, start, end in izip_longest(range(shard_count), - range(0,2**128,2**128//shard_count), - range(2**128//shard_count,2**128,2**128//shard_count), + range(0, 2**128, 2 ** + 128 // shard_count), + range(2**128 // shard_count, 2 ** + 128, 2**128 // shard_count), fillvalue=2**128): shard = Shard(index, start, end) self.shards[shard.shard_id] = shard @@ -152,7 +157,8 @@ class Stream(object): def put_record(self, partition_key, explicit_hash_key, sequence_number_for_ordering, data): shard = self.get_shard_for_key(partition_key, explicit_hash_key) - sequence_number = shard.put_record(partition_key, data, explicit_hash_key) + sequence_number = shard.put_record( + partition_key, data, explicit_hash_key) return sequence_number, shard.shard_id def to_json(self): @@ -168,12 +174,14 @@ class Stream(object): class FirehoseRecord(object): + def __init__(self, record_data): self.record_id = 12345678 self.record_data = record_data class DeliveryStream(object): + def __init__(self, stream_name, **stream_kwargs): self.name = stream_name self.redshift_username = stream_kwargs.get('redshift_username') @@ -185,14 +193,18 @@ class DeliveryStream(object): self.s3_role_arn = stream_kwargs.get('s3_role_arn') self.s3_bucket_arn = stream_kwargs.get('s3_bucket_arn') self.s3_prefix = stream_kwargs.get('s3_prefix') - self.s3_compression_format = stream_kwargs.get('s3_compression_format', 'UNCOMPRESSED') + self.s3_compression_format = stream_kwargs.get( + 's3_compression_format', 'UNCOMPRESSED') self.s3_buffering_hings = stream_kwargs.get('s3_buffering_hings') self.redshift_s3_role_arn = stream_kwargs.get('redshift_s3_role_arn') - self.redshift_s3_bucket_arn = stream_kwargs.get('redshift_s3_bucket_arn') + self.redshift_s3_bucket_arn = stream_kwargs.get( + 'redshift_s3_bucket_arn') self.redshift_s3_prefix = stream_kwargs.get('redshift_s3_prefix') - self.redshift_s3_compression_format = stream_kwargs.get('redshift_s3_compression_format', 'UNCOMPRESSED') - self.redshift_s3_buffering_hings = stream_kwargs.get('redshift_s3_buffering_hings') + self.redshift_s3_compression_format = stream_kwargs.get( + 'redshift_s3_compression_format', 'UNCOMPRESSED') + self.redshift_s3_buffering_hings = stream_kwargs.get( + 'redshift_s3_buffering_hings') self.records = [] self.status = 'ACTIVE' @@ -231,9 +243,8 @@ class DeliveryStream(object): }, "Username": self.redshift_username, }, - } - ] - + } + ] def to_dict(self): return { @@ -261,10 +272,9 @@ class KinesisBackend(BaseBackend): self.streams = {} self.delivery_streams = {} - def create_stream(self, stream_name, shard_count, region): if stream_name in self.streams: - raise ResourceInUseError(stream_name) + raise ResourceInUseError(stream_name) stream = Stream(stream_name, shard_count, region) self.streams[stream_name] = stream return stream @@ -302,7 +312,8 @@ class KinesisBackend(BaseBackend): records, last_sequence_id = shard.get_records(last_sequence_id, limit) - next_shard_iterator = compose_shard_iterator(stream_name, shard, last_sequence_id) + next_shard_iterator = compose_shard_iterator( + stream_name, shard, last_sequence_id) return next_shard_iterator, records @@ -320,7 +331,7 @@ class KinesisBackend(BaseBackend): response = { "FailedRecordCount": 0, - "Records" : [] + "Records": [] } for record in records: @@ -342,7 +353,7 @@ class KinesisBackend(BaseBackend): stream = self.describe_stream(stream_name) if shard_to_split not in stream.shards: - raise ResourceNotFoundError(shard_to_split) + raise ResourceNotFoundError(shard_to_split) if not re.match(r'0|([1-9]\d{0,38})', new_starting_hash_key): raise InvalidArgumentError(new_starting_hash_key) @@ -350,10 +361,12 @@ class KinesisBackend(BaseBackend): shard = stream.shards[shard_to_split] - last_id = sorted(stream.shards.values(), key=attrgetter('_shard_id'))[-1]._shard_id + last_id = sorted(stream.shards.values(), + key=attrgetter('_shard_id'))[-1]._shard_id if shard.starting_hash < new_starting_hash_key < shard.ending_hash: - new_shard = Shard(last_id+1, new_starting_hash_key, shard.ending_hash) + new_shard = Shard( + last_id + 1, new_starting_hash_key, shard.ending_hash) shard.ending_hash = new_starting_hash_key stream.shards[new_shard.shard_id] = new_shard else: @@ -372,10 +385,10 @@ class KinesisBackend(BaseBackend): stream = self.describe_stream(stream_name) if shard_to_merge not in stream.shards: - raise ResourceNotFoundError(shard_to_merge) + raise ResourceNotFoundError(shard_to_merge) if adjacent_shard_to_merge not in stream.shards: - raise ResourceNotFoundError(adjacent_shard_to_merge) + raise ResourceNotFoundError(adjacent_shard_to_merge) shard1 = stream.shards[shard_to_merge] shard2 = stream.shards[adjacent_shard_to_merge] @@ -390,9 +403,11 @@ class KinesisBackend(BaseBackend): del stream.shards[shard2.shard_id] for index in shard2.records: record = shard2.records[index] - shard1.put_record(record.partition_key, record.data, record.explicit_hash_key) + shard1.put_record(record.partition_key, + record.data, record.explicit_hash_key) ''' Firehose ''' + def create_delivery_stream(self, stream_name, **stream_kwargs): stream = DeliveryStream(stream_name, **stream_kwargs) self.delivery_streams[stream_name] = stream @@ -416,19 +431,19 @@ class KinesisBackend(BaseBackend): return record def list_tags_for_stream(self, stream_name, exclusive_start_tag_key=None, limit=None): - stream = self.describe_stream(stream_name) + stream = self.describe_stream(stream_name) tags = [] result = { 'HasMoreTags': False, 'Tags': tags } - for key, val in sorted(stream.tags.items(), key=lambda x:x[0]): - if limit and len(res) >= limit: - result['HasMoreTags'] = True - break - if exclusive_start_tag_key and key < exexclusive_start_tag_key: - continue + for key, val in sorted(stream.tags.items(), key=lambda x: x[0]): + if limit and len(tags) >= limit: + result['HasMoreTags'] = True + break + if exclusive_start_tag_key and key < exclusive_start_tag_key: + continue tags.append({ 'Key': key, @@ -438,14 +453,14 @@ class KinesisBackend(BaseBackend): return result def add_tags_to_stream(self, stream_name, tags): - stream = self.describe_stream(stream_name) + stream = self.describe_stream(stream_name) stream.tags.update(tags) def remove_tags_from_stream(self, stream_name, tag_keys): - stream = self.describe_stream(stream_name) + stream = self.describe_stream(stream_name) for key in tag_keys: if key in stream.tags: - del stream.tags[key] + del stream.tags[key] kinesis_backends = {} diff --git a/moto/kinesis/responses.py b/moto/kinesis/responses.py index 29f6c07ff..8bc81925f 100644 --- a/moto/kinesis/responses.py +++ b/moto/kinesis/responses.py @@ -4,7 +4,6 @@ import json from moto.core.responses import BaseResponse from .models import kinesis_backends -from werkzeug.exceptions import BadRequest class KinesisResponse(BaseResponse): @@ -25,7 +24,8 @@ class KinesisResponse(BaseResponse): def create_stream(self): stream_name = self.parameters.get('StreamName') shard_count = self.parameters.get('ShardCount') - self.kinesis_backend.create_stream(stream_name, shard_count, self.region) + self.kinesis_backend.create_stream( + stream_name, shard_count, self.region) return "" def describe_stream(self): @@ -50,7 +50,8 @@ class KinesisResponse(BaseResponse): stream_name = self.parameters.get("StreamName") shard_id = self.parameters.get("ShardId") shard_iterator_type = self.parameters.get("ShardIteratorType") - starting_sequence_number = self.parameters.get("StartingSequenceNumber") + starting_sequence_number = self.parameters.get( + "StartingSequenceNumber") shard_iterator = self.kinesis_backend.get_shard_iterator( stream_name, shard_id, shard_iterator_type, starting_sequence_number, @@ -64,7 +65,8 @@ class KinesisResponse(BaseResponse): shard_iterator = self.parameters.get("ShardIterator") limit = self.parameters.get("Limit") - next_shard_iterator, records = self.kinesis_backend.get_records(shard_iterator, limit) + next_shard_iterator, records = self.kinesis_backend.get_records( + shard_iterator, limit) return json.dumps({ "NextShardIterator": next_shard_iterator, @@ -77,7 +79,8 @@ class KinesisResponse(BaseResponse): stream_name = self.parameters.get("StreamName") partition_key = self.parameters.get("PartitionKey") explicit_hash_key = self.parameters.get("ExplicitHashKey") - sequence_number_for_ordering = self.parameters.get("SequenceNumberForOrdering") + sequence_number_for_ordering = self.parameters.get( + "SequenceNumberForOrdering") data = self.parameters.get("Data") sequence_number, shard_id = self.kinesis_backend.put_record( @@ -105,7 +108,7 @@ class KinesisResponse(BaseResponse): stream_name = self.parameters.get("StreamName") shard_to_split = self.parameters.get("ShardToSplit") new_starting_hash_key = self.parameters.get("NewStartingHashKey") - response = self.kinesis_backend.split_shard( + self.kinesis_backend.split_shard( stream_name, shard_to_split, new_starting_hash_key ) return "" @@ -114,15 +117,17 @@ class KinesisResponse(BaseResponse): stream_name = self.parameters.get("StreamName") shard_to_merge = self.parameters.get("ShardToMerge") adjacent_shard_to_merge = self.parameters.get("AdjacentShardToMerge") - response = self.kinesis_backend.merge_shards( + self.kinesis_backend.merge_shards( stream_name, shard_to_merge, adjacent_shard_to_merge ) return "" ''' Firehose ''' + def create_delivery_stream(self): stream_name = self.parameters['DeliveryStreamName'] - redshift_config = self.parameters.get('RedshiftDestinationConfiguration') + redshift_config = self.parameters.get( + 'RedshiftDestinationConfiguration') if redshift_config: redshift_s3_config = redshift_config['S3Configuration'] @@ -149,7 +154,8 @@ class KinesisResponse(BaseResponse): 's3_compression_format': s3_config.get('CompressionFormat'), 's3_buffering_hings': s3_config['BufferingHints'], } - stream = self.kinesis_backend.create_delivery_stream(stream_name, **stream_kwargs) + stream = self.kinesis_backend.create_delivery_stream( + stream_name, **stream_kwargs) return json.dumps({ 'DeliveryStreamARN': stream.arn }) @@ -177,7 +183,8 @@ class KinesisResponse(BaseResponse): stream_name = self.parameters['DeliveryStreamName'] record_data = self.parameters['Record']['Data'] - record = self.kinesis_backend.put_firehose_record(stream_name, record_data) + record = self.kinesis_backend.put_firehose_record( + stream_name, record_data) return json.dumps({ "RecordId": record.record_id, }) @@ -188,7 +195,8 @@ class KinesisResponse(BaseResponse): request_responses = [] for record in records: - record_response = self.kinesis_backend.put_firehose_record(stream_name, record['Data']) + record_response = self.kinesis_backend.put_firehose_record( + stream_name, record['Data']) request_responses.append({ "RecordId": record_response.record_id }) @@ -207,7 +215,8 @@ class KinesisResponse(BaseResponse): stream_name = self.parameters.get('StreamName') exclusive_start_tag_key = self.parameters.get('ExclusiveStartTagKey') limit = self.parameters.get('Limit') - response = self.kinesis_backend.list_tags_for_stream(stream_name, exclusive_start_tag_key, limit) + response = self.kinesis_backend.list_tags_for_stream( + stream_name, exclusive_start_tag_key, limit) return json.dumps(response) def remove_tags_from_stream(self): diff --git a/moto/kinesis/utils.py b/moto/kinesis/utils.py index 0d35b4134..190371b2e 100644 --- a/moto/kinesis/utils.py +++ b/moto/kinesis/utils.py @@ -13,7 +13,8 @@ def compose_new_shard_iterator(stream_name, shard, shard_iterator_type, starting elif shard_iterator_type == "LATEST": last_sequence_id = shard.get_max_sequence_number() else: - raise InvalidArgumentError("Invalid ShardIteratorType: {0}".format(shard_iterator_type)) + raise InvalidArgumentError( + "Invalid ShardIteratorType: {0}".format(shard_iterator_type)) return compose_shard_iterator(stream_name, shard, last_sequence_id) diff --git a/moto/kms/__init__.py b/moto/kms/__init__.py index b6bffa804..b4bb0b639 100644 --- a/moto/kms/__init__.py +++ b/moto/kms/__init__.py @@ -1,6 +1,6 @@ from __future__ import unicode_literals from .models import kms_backends -from ..core.models import MockAWS, base_decorator, HttprettyMockAWS, deprecated_base_decorator +from ..core.models import base_decorator, deprecated_base_decorator kms_backend = kms_backends['us-east-1'] mock_kms = base_decorator(kms_backends) diff --git a/moto/kms/models.py b/moto/kms/models.py index 0bfe5791f..37fde9eb8 100644 --- a/moto/kms/models.py +++ b/moto/kms/models.py @@ -7,6 +7,7 @@ from collections import defaultdict class Key(object): + def __init__(self, policy, key_usage, description, region): self.id = generate_key_id() self.policy = policy @@ -77,7 +78,8 @@ class KmsBackend(BaseBackend): return self.keys.pop(key_id) def describe_key(self, key_id): - # allow the different methods (alias, ARN :key/, keyId, ARN alias) to describe key not just KeyId + # allow the different methods (alias, ARN :key/, keyId, ARN alias) to + # describe key not just KeyId key_id = self.get_key_id(key_id) if r'alias/' in str(key_id).lower(): key_id = self.get_key_id_from_alias(key_id.split('alias/')[1]) @@ -128,6 +130,7 @@ class KmsBackend(BaseBackend): def get_key_policy(self, key_id): return self.keys[self.get_key_id(key_id)].policy + kms_backends = {} for region in boto.kms.regions(): kms_backends[region.name] = KmsBackend() diff --git a/moto/kms/responses.py b/moto/kms/responses.py index 7f0659a64..7ed8927a2 100644 --- a/moto/kms/responses.py +++ b/moto/kms/responses.py @@ -18,6 +18,7 @@ reserved_aliases = [ 'alias/aws/rds', ] + class KmsResponse(BaseResponse): @property @@ -33,13 +34,15 @@ class KmsResponse(BaseResponse): key_usage = self.parameters.get('KeyUsage') description = self.parameters.get('Description') - key = self.kms_backend.create_key(policy, key_usage, description, self.region) + key = self.kms_backend.create_key( + policy, key_usage, description, self.region) return json.dumps(key.to_dict()) def describe_key(self): key_id = self.parameters.get('KeyId') try: - key = self.kms_backend.describe_key(self.kms_backend.get_key_id(key_id)) + key = self.kms_backend.describe_key( + self.kms_backend.get_key_id(key_id)) except KeyError: headers = dict(self.headers) headers['status'] = 404 @@ -70,7 +73,8 @@ class KmsResponse(BaseResponse): body={'message': 'Invalid identifier', '__type': 'ValidationException'}) if alias_name in reserved_aliases: - raise JSONResponseError(400, 'Bad Request', body={'__type': 'NotAuthorizedException'}) + raise JSONResponseError(400, 'Bad Request', body={ + '__type': 'NotAuthorizedException'}) if ':' in alias_name: raise JSONResponseError(400, 'Bad Request', body={ @@ -81,7 +85,7 @@ class KmsResponse(BaseResponse): raise JSONResponseError(400, 'Bad Request', body={ 'message': "1 validation error detected: Value '{alias_name}' at 'aliasName' failed to satisfy constraint: Member must satisfy regular expression pattern: ^[a-zA-Z0-9:/_-]+$" .format(**locals()), - '__type': 'ValidationException'}) + '__type': 'ValidationException'}) if self.kms_backend.alias_exists(target_key_id): raise JSONResponseError(400, 'Bad Request', body={ @@ -120,7 +124,7 @@ class KmsResponse(BaseResponse): response_aliases = [ { 'AliasArn': u'arn:aws:kms:{region}:012345678912:{reserved_alias}'.format(region=region, - reserved_alias=reserved_alias), + reserved_alias=reserved_alias), 'AliasName': reserved_alias } for reserved_alias in reserved_aliases ] @@ -147,7 +151,7 @@ class KmsResponse(BaseResponse): self.kms_backend.enable_key_rotation(key_id) except KeyError: raise JSONResponseError(404, 'Not Found', body={ - 'message': "Key 'arn:aws:kms:{region}:012345678912:key/{key_id}' does not exist".format(region=self.region,key_id=key_id), + 'message': "Key 'arn:aws:kms:{region}:012345678912:key/{key_id}' does not exist".format(region=self.region, key_id=key_id), '__type': 'NotFoundException'}) return json.dumps(None) @@ -159,7 +163,7 @@ class KmsResponse(BaseResponse): self.kms_backend.disable_key_rotation(key_id) except KeyError: raise JSONResponseError(404, 'Not Found', body={ - 'message': "Key 'arn:aws:kms:{region}:012345678912:key/{key_id}' does not exist".format(region=self.region,key_id=key_id), + 'message': "Key 'arn:aws:kms:{region}:012345678912:key/{key_id}' does not exist".format(region=self.region, key_id=key_id), '__type': 'NotFoundException'}) return json.dumps(None) @@ -170,7 +174,7 @@ class KmsResponse(BaseResponse): rotation_enabled = self.kms_backend.get_key_rotation_status(key_id) except KeyError: raise JSONResponseError(404, 'Not Found', body={ - 'message': "Key 'arn:aws:kms:{region}:012345678912:key/{key_id}' does not exist".format(region=self.region,key_id=key_id), + 'message': "Key 'arn:aws:kms:{region}:012345678912:key/{key_id}' does not exist".format(region=self.region, key_id=key_id), '__type': 'NotFoundException'}) return json.dumps({'KeyRotationEnabled': rotation_enabled}) @@ -185,7 +189,7 @@ class KmsResponse(BaseResponse): self.kms_backend.put_key_policy(key_id, policy) except KeyError: raise JSONResponseError(404, 'Not Found', body={ - 'message': "Key 'arn:aws:kms:{region}:012345678912:key/{key_id}' does not exist".format(region=self.region,key_id=key_id), + 'message': "Key 'arn:aws:kms:{region}:012345678912:key/{key_id}' does not exist".format(region=self.region, key_id=key_id), '__type': 'NotFoundException'}) return json.dumps(None) @@ -200,7 +204,7 @@ class KmsResponse(BaseResponse): return json.dumps({'Policy': self.kms_backend.get_key_policy(key_id)}) except KeyError: raise JSONResponseError(404, 'Not Found', body={ - 'message': "Key 'arn:aws:kms:{region}:012345678912:key/{key_id}' does not exist".format(region=self.region,key_id=key_id), + 'message': "Key 'arn:aws:kms:{region}:012345678912:key/{key_id}' does not exist".format(region=self.region, key_id=key_id), '__type': 'NotFoundException'}) def list_key_policies(self): @@ -210,7 +214,7 @@ class KmsResponse(BaseResponse): self.kms_backend.describe_key(key_id) except KeyError: raise JSONResponseError(404, 'Not Found', body={ - 'message': "Key 'arn:aws:kms:{region}:012345678912:key/{key_id}' does not exist".format(region=self.region,key_id=key_id), + 'message': "Key 'arn:aws:kms:{region}:012345678912:key/{key_id}' does not exist".format(region=self.region, key_id=key_id), '__type': 'NotFoundException'}) return json.dumps({'Truncated': False, 'PolicyNames': ['default']}) @@ -233,7 +237,9 @@ class KmsResponse(BaseResponse): def _assert_valid_key_id(key_id): if not re.match(r'^[A-F0-9]{8}-[A-F0-9]{4}-[A-F0-9]{4}-[A-F0-9]{4}-[A-F0-9]{12}$', key_id, re.IGNORECASE): - raise JSONResponseError(404, 'Not Found', body={'message': ' Invalid keyId', '__type': 'NotFoundException'}) + raise JSONResponseError(404, 'Not Found', body={ + 'message': ' Invalid keyId', '__type': 'NotFoundException'}) + def _assert_default_policy(policy_name): if policy_name != 'default': diff --git a/moto/opsworks/__init__.py b/moto/opsworks/__init__.py index d2da1a6a8..b492b6a53 100644 --- a/moto/opsworks/__init__.py +++ b/moto/opsworks/__init__.py @@ -1,6 +1,6 @@ from __future__ import unicode_literals from .models import opsworks_backends -from ..core.models import MockAWS, base_decorator, HttprettyMockAWS, deprecated_base_decorator +from ..core.models import base_decorator, deprecated_base_decorator opsworks_backend = opsworks_backends['us-east-1'] mock_opsworks = base_decorator(opsworks_backends) diff --git a/moto/opsworks/exceptions.py b/moto/opsworks/exceptions.py index b408b82f3..00bdffbc5 100644 --- a/moto/opsworks/exceptions.py +++ b/moto/opsworks/exceptions.py @@ -5,6 +5,7 @@ from werkzeug.exceptions import BadRequest class ResourceNotFoundException(BadRequest): + def __init__(self, message): super(ResourceNotFoundException, self).__init__() self.description = json.dumps({ @@ -14,6 +15,7 @@ class ResourceNotFoundException(BadRequest): class ValidationException(BadRequest): + def __init__(self, message): super(ValidationException, self).__init__() self.description = json.dumps({ diff --git a/moto/opsworks/models.py b/moto/opsworks/models.py index 68edade9a..a1b8370dd 100644 --- a/moto/opsworks/models.py +++ b/moto/opsworks/models.py @@ -14,6 +14,7 @@ class OpsworkInstance(object): This metadata exists before any instance reservations are made, and is used to populate a reservation request when "start" is called """ + def __init__(self, stack_id, layer_ids, instance_type, ec2_backend, auto_scale_type=None, hostname=None, @@ -154,8 +155,10 @@ class OpsworkInstance(object): d.update({"ReportedAgentVersion": "2425-20160406102508 (fixed)"}) d.update({"RootDeviceVolumeId": "vol-a20e450a (fixed)"}) if self.ssh_keyname is not None: - d.update({"SshHostDsaKeyFingerprint": "24:36:32:fe:d8:5f:9c:18:b1:ad:37:e9:eb:e8:69:58 (fixed)"}) - d.update({"SshHostRsaKeyFingerprint": "3c:bd:37:52:d7:ca:67:e1:6e:4b:ac:31:86:79:f5:6c (fixed)"}) + d.update( + {"SshHostDsaKeyFingerprint": "24:36:32:fe:d8:5f:9c:18:b1:ad:37:e9:eb:e8:69:58 (fixed)"}) + d.update( + {"SshHostRsaKeyFingerprint": "3c:bd:37:52:d7:ca:67:e1:6e:4b:ac:31:86:79:f5:6c (fixed)"}) d.update({"PrivateDns": self.instance.private_dns}) d.update({"PrivateIp": self.instance.private_ip}) d.update({"PublicDns": getattr(self.instance, 'public_dns', None)}) @@ -164,6 +167,7 @@ class OpsworkInstance(object): class Layer(object): + def __init__(self, stack_id, type, name, shortname, attributes=None, custom_instance_profile_arn=None, @@ -283,11 +287,13 @@ class Layer(object): if self.custom_json is not None: d.update({"CustomJson": self.custom_json}) if self.custom_instance_profile_arn is not None: - d.update({"CustomInstanceProfileArn": self.custom_instance_profile_arn}) + d.update( + {"CustomInstanceProfileArn": self.custom_instance_profile_arn}) return d class Stack(object): + def __init__(self, name, region, service_role_arn, default_instance_profile_arn, vpcid="vpc-1f99bf7a", attributes=None, @@ -393,6 +399,7 @@ class Stack(object): class OpsWorksBackend(BaseBackend): + def __init__(self, ec2_backend): self.stacks = {} self.layers = {} @@ -457,9 +464,12 @@ class OpsWorksBackend(BaseBackend): kwargs.setdefault("subnet_id", stack.default_subnet_id) kwargs.setdefault("root_device_type", stack.default_root_device_type) if layer.custom_instance_profile_arn: - kwargs.setdefault("instance_profile_arn", layer.custom_instance_profile_arn) - kwargs.setdefault("instance_profile_arn", stack.default_instance_profile_arn) - kwargs.setdefault("security_group_ids", layer.custom_security_group_ids) + kwargs.setdefault("instance_profile_arn", + layer.custom_instance_profile_arn) + kwargs.setdefault("instance_profile_arn", + stack.default_instance_profile_arn) + kwargs.setdefault("security_group_ids", + layer.custom_security_group_ids) kwargs.setdefault("associate_public_ip", layer.auto_assign_public_ips) kwargs.setdefault("ebs_optimized", layer.use_ebs_optimized_instances) kwargs.update({"ec2_backend": self.ec2_backend}) @@ -507,14 +517,16 @@ class OpsWorksBackend(BaseBackend): if layer_id not in self.layers: raise ResourceNotFoundException( "Unable to find layer with ID {0}".format(layer_id)) - instances = [i.to_dict() for i in self.instances.values() if layer_id in i.layer_ids] + instances = [i.to_dict() for i in self.instances.values() + if layer_id in i.layer_ids] return instances if stack_id: if stack_id not in self.stacks: raise ResourceNotFoundException( "Unable to find stack with ID {0}".format(stack_id)) - instances = [i.to_dict() for i in self.instances.values() if stack_id==i.stack_id] + instances = [i.to_dict() for i in self.instances.values() + if stack_id == i.stack_id] return instances def start_instance(self, instance_id): diff --git a/moto/opsworks/responses.py b/moto/opsworks/responses.py index 4e0979154..42e0f2c5c 100644 --- a/moto/opsworks/responses.py +++ b/moto/opsworks/responses.py @@ -22,19 +22,24 @@ class OpsWorksResponse(BaseResponse): region=self.parameters.get("Region"), vpcid=self.parameters.get("VpcId"), attributes=self.parameters.get("Attributes"), - default_instance_profile_arn=self.parameters.get("DefaultInstanceProfileArn"), + default_instance_profile_arn=self.parameters.get( + "DefaultInstanceProfileArn"), default_os=self.parameters.get("DefaultOs"), hostname_theme=self.parameters.get("HostnameTheme"), - default_availability_zone=self.parameters.get("DefaultAvailabilityZone"), + default_availability_zone=self.parameters.get( + "DefaultAvailabilityZone"), default_subnet_id=self.parameters.get("DefaultInstanceProfileArn"), custom_json=self.parameters.get("CustomJson"), configuration_manager=self.parameters.get("ConfigurationManager"), chef_configuration=self.parameters.get("ChefConfiguration"), use_custom_cookbooks=self.parameters.get("UseCustomCookbooks"), - use_opsworks_security_groups=self.parameters.get("UseOpsworksSecurityGroups"), - custom_cookbooks_source=self.parameters.get("CustomCookbooksSource"), + use_opsworks_security_groups=self.parameters.get( + "UseOpsworksSecurityGroups"), + custom_cookbooks_source=self.parameters.get( + "CustomCookbooksSource"), default_ssh_keyname=self.parameters.get("DefaultSshKeyName"), - default_root_device_type=self.parameters.get("DefaultRootDeviceType"), + default_root_device_type=self.parameters.get( + "DefaultRootDeviceType"), service_role_arn=self.parameters.get("ServiceRoleArn"), agent_version=self.parameters.get("AgentVersion"), ) @@ -48,18 +53,24 @@ class OpsWorksResponse(BaseResponse): name=self.parameters.get('Name'), shortname=self.parameters.get('Shortname'), attributes=self.parameters.get('Attributes'), - custom_instance_profile_arn=self.parameters.get("CustomInstanceProfileArn"), + custom_instance_profile_arn=self.parameters.get( + "CustomInstanceProfileArn"), custom_json=self.parameters.get("CustomJson"), - custom_security_group_ids=self.parameters.get('CustomSecurityGroupIds'), + custom_security_group_ids=self.parameters.get( + 'CustomSecurityGroupIds'), packages=self.parameters.get('Packages'), volume_configurations=self.parameters.get("VolumeConfigurations"), enable_autohealing=self.parameters.get("EnableAutoHealing"), - auto_assign_elastic_ips=self.parameters.get("AutoAssignElasticIps"), + auto_assign_elastic_ips=self.parameters.get( + "AutoAssignElasticIps"), auto_assign_public_ips=self.parameters.get("AutoAssignPublicIps"), custom_recipes=self.parameters.get("CustomRecipes"), - install_updates_on_boot=self.parameters.get("InstallUpdatesOnBoot"), - use_ebs_optimized_instances=self.parameters.get("UseEbsOptimizedInstances"), - lifecycle_event_configuration=self.parameters.get("LifecycleEventConfiguration") + install_updates_on_boot=self.parameters.get( + "InstallUpdatesOnBoot"), + use_ebs_optimized_instances=self.parameters.get( + "UseEbsOptimizedInstances"), + lifecycle_event_configuration=self.parameters.get( + "LifecycleEventConfiguration") ) layer = self.opsworks_backend.create_layer(**kwargs) return json.dumps({"LayerId": layer.id}, indent=1) @@ -80,7 +91,8 @@ class OpsWorksResponse(BaseResponse): architecture=self.parameters.get("Architecture"), root_device_type=self.parameters.get("RootDeviceType"), block_device_mappings=self.parameters.get("BlockDeviceMappings"), - install_updates_on_boot=self.parameters.get("InstallUpdatesOnBoot"), + install_updates_on_boot=self.parameters.get( + "InstallUpdatesOnBoot"), ebs_optimized=self.parameters.get("EbsOptimized"), agent_version=self.parameters.get("AgentVersion"), ) diff --git a/moto/packages/httpretty/__init__.py b/moto/packages/httpretty/__init__.py index a752b452a..679294a4b 100644 --- a/moto/packages/httpretty/__init__.py +++ b/moto/packages/httpretty/__init__.py @@ -55,6 +55,7 @@ def last_request(): """returns the last request""" return httpretty.last_request + def has_request(): """returns a boolean indicating whether any request has been made""" return not isinstance(httpretty.last_request.headers, EmptyRequestHeaders) diff --git a/moto/packages/httpretty/compat.py b/moto/packages/httpretty/compat.py index 6805cf638..b9e215b13 100644 --- a/moto/packages/httpretty/compat.py +++ b/moto/packages/httpretty/compat.py @@ -38,6 +38,7 @@ if PY3: # pragma: no cover basestring = (str, bytes) class BaseClass(object): + def __repr__(self): return self.__str__() else: # pragma: no cover @@ -49,6 +50,7 @@ else: # pragma: no cover class BaseClass(object): + def __repr__(self): ret = self.__str__() if PY3: # pragma: no cover @@ -63,6 +65,7 @@ try: # pragma: no cover except ImportError: # pragma: no cover from urlparse import urlsplit, urlunsplit, parse_qs, unquote from urllib import quote, quote_plus + def unquote_utf8(qs): if isinstance(qs, text_type): qs = qs.encode('utf-8') diff --git a/moto/packages/httpretty/core.py b/moto/packages/httpretty/core.py index 4764cbba9..b409711cf 100644 --- a/moto/packages/httpretty/core.py +++ b/moto/packages/httpretty/core.py @@ -138,6 +138,7 @@ class HTTPrettyRequest(BaseHTTPRequestHandler, BaseClass): `content-type` headers values: 'application/json' or 'application/x-www-form-urlencoded' """ + def __init__(self, headers, body=''): # first of all, lets make sure that if headers or body are # unicode strings, it must be converted into a utf-8 encoded @@ -149,8 +150,8 @@ class HTTPrettyRequest(BaseHTTPRequestHandler, BaseClass): # `rfile` based on it self.rfile = StringIO(b'\r\n\r\n'.join([self.raw_headers, self.body])) self.wfile = StringIO() # Creating `wfile` as an empty - # StringIO, just to avoid any real - # I/O calls + # StringIO, just to avoid any real + # I/O calls # parsing the request line preemptively self.raw_requestline = self.rfile.readline() @@ -229,12 +230,14 @@ class HTTPrettyRequestEmpty(object): class FakeSockFile(StringIO): + def close(self): self.socket.close() StringIO.close(self) class FakeSSLSocket(object): + def __init__(self, sock, *args, **kw): self._httpretty_sock = sock @@ -243,6 +246,7 @@ class FakeSSLSocket(object): class fakesock(object): + class socket(object): _entry = None debuglevel = 0 @@ -374,13 +378,15 @@ class fakesock(object): self.fd.socket = self try: requestline, _ = data.split(b'\r\n', 1) - method, path, version = parse_requestline(decode_utf8(requestline)) + method, path, version = parse_requestline( + decode_utf8(requestline)) is_parsing_headers = True except ValueError: is_parsing_headers = False if not self._entry: - # If the previous request wasn't mocked, don't mock the subsequent sending of data + # If the previous request wasn't mocked, don't mock the + # subsequent sending of data return self.real_sendall(data, *args, **kw) self.fd.seek(0) @@ -492,6 +498,7 @@ def fake_getaddrinfo( class Entry(BaseClass): + def __init__(self, method, uri, body, adding_headers=None, forcing_headers=None, @@ -543,15 +550,15 @@ class Entry(BaseClass): igot = int(got) except ValueError: warnings.warn( - 'HTTPretty got to register the Content-Length header ' \ + 'HTTPretty got to register the Content-Length header ' 'with "%r" which is not a number' % got, ) if igot > self.body_length: raise HTTPrettyError( - 'HTTPretty got inconsistent parameters. The header ' \ - 'Content-Length you registered expects size "%d" but ' \ - 'the body you registered for that has actually length ' \ + 'HTTPretty got inconsistent parameters. The header ' + 'Content-Length you registered expects size "%d" but ' + 'the body you registered for that has actually length ' '"%d".' % ( igot, self.body_length, ) @@ -588,7 +595,8 @@ class Entry(BaseClass): headers = self.normalize_headers(headers) status = headers.get('status', self.status) if self.body_is_callable: - status, headers, self.body = self.callable_body(self.request, self.info.full_url(), headers) + status, headers, self.body = self.callable_body( + self.request, self.info.full_url(), headers) if self.request.method != "HEAD": headers.update({ 'content-length': len(self.body) @@ -641,6 +649,7 @@ def url_fix(s, charset='utf-8'): class URIInfo(BaseClass): + def __init__(self, username='', password='', @@ -764,7 +773,7 @@ class URIMatcher(object): self.entries = entries - #hash of current_entry pointers, per method. + # hash of current_entry pointers, per method. self.current_entries = {} def matches(self, info): @@ -788,7 +797,7 @@ class URIMatcher(object): if method not in self.current_entries: self.current_entries[method] = 0 - #restrict selection to entries that match the requested method + # restrict selection to entries that match the requested method entries_for_method = [e for e in self.entries if e.method == method] if self.current_entries[method] >= len(entries_for_method): @@ -841,13 +850,14 @@ class httpretty(HttpBaseClass): try: import urllib3 except ImportError: - raise RuntimeError('HTTPretty requires urllib3 installed for recording actual requests.') - + raise RuntimeError( + 'HTTPretty requires urllib3 installed for recording actual requests.') http = urllib3.PoolManager() cls.enable() calls = [] + def record_request(request, uri, headers): cls.disable() @@ -870,7 +880,8 @@ class httpretty(HttpBaseClass): return response.status, response.headers, response.data for method in cls.METHODS: - cls.register_uri(method, re.compile(r'.*', re.M), body=record_request) + cls.register_uri(method, re.compile( + r'.*', re.M), body=record_request) yield cls.disable() @@ -886,7 +897,8 @@ class httpretty(HttpBaseClass): for item in data: uri = item['request']['uri'] method = item['request']['method'] - cls.register_uri(method, uri, body=item['response']['body'], forcing_headers=item['response']['headers']) + cls.register_uri(method, uri, body=item['response'][ + 'body'], forcing_headers=item['response']['headers']) yield cls.disable() diff --git a/moto/packages/httpretty/errors.py b/moto/packages/httpretty/errors.py index cb6479bf5..e2dcad357 100644 --- a/moto/packages/httpretty/errors.py +++ b/moto/packages/httpretty/errors.py @@ -32,6 +32,7 @@ class HTTPrettyError(Exception): class UnmockedError(HTTPrettyError): + def __init__(self): super(UnmockedError, self).__init__( 'No mocking was registered, and real connections are ' diff --git a/moto/packages/responses/responses.py b/moto/packages/responses/responses.py index 735655664..1f5892b25 100644 --- a/moto/packages/responses/responses.py +++ b/moto/packages/responses/responses.py @@ -82,6 +82,7 @@ def get_wrapped(func, wrapper_template, evaldict): class CallList(Sequence, Sized): + def __init__(self): self._calls = [] @@ -298,10 +299,10 @@ class RequestsMock(object): def unbound_on_send(adapter, request, *a, **kwargs): return self._on_request(adapter, request, *a, **kwargs) self._patcher1 = mock.patch('botocore.vendored.requests.adapters.HTTPAdapter.send', - unbound_on_send) + unbound_on_send) self._patcher1.start() self._patcher2 = mock.patch('requests.adapters.HTTPAdapter.send', - unbound_on_send) + unbound_on_send) self._patcher2.start() def stop(self, allow_assert=True): diff --git a/moto/packages/responses/setup.py b/moto/packages/responses/setup.py index bab522865..911c07da4 100644 --- a/moto/packages/responses/setup.py +++ b/moto/packages/responses/setup.py @@ -57,6 +57,7 @@ except Exception: class PyTest(TestCommand): + def finalize_options(self): TestCommand.finalize_options(self) self.test_args = ['test_responses.py'] diff --git a/moto/packages/responses/test_responses.py b/moto/packages/responses/test_responses.py index ba0126ad5..967a535cf 100644 --- a/moto/packages/responses/test_responses.py +++ b/moto/packages/responses/test_responses.py @@ -284,6 +284,7 @@ def test_custom_adapter(): calls = [0] class DummyAdapter(requests.adapters.HTTPAdapter): + def send(self, *a, **k): calls[0] += 1 return super(DummyAdapter, self).send(*a, **k) diff --git a/moto/rds/__init__.py b/moto/rds/__init__.py index 2c8c0ba97..a4086d89c 100644 --- a/moto/rds/__init__.py +++ b/moto/rds/__init__.py @@ -1,6 +1,6 @@ from __future__ import unicode_literals from .models import rds_backends -from ..core.models import MockAWS, base_decorator, HttprettyMockAWS, deprecated_base_decorator +from ..core.models import base_decorator, deprecated_base_decorator rds_backend = rds_backends['us-east-1'] mock_rds = base_decorator(rds_backends) diff --git a/moto/rds/exceptions.py b/moto/rds/exceptions.py index 936b979d2..5bcc95560 100644 --- a/moto/rds/exceptions.py +++ b/moto/rds/exceptions.py @@ -5,6 +5,7 @@ from werkzeug.exceptions import BadRequest class RDSClientError(BadRequest): + def __init__(self, code, message): super(RDSClientError, self).__init__() self.description = json.dumps({ @@ -18,6 +19,7 @@ class RDSClientError(BadRequest): class DBInstanceNotFoundError(RDSClientError): + def __init__(self, database_identifier): super(DBInstanceNotFoundError, self).__init__( 'DBInstanceNotFound', @@ -25,6 +27,7 @@ class DBInstanceNotFoundError(RDSClientError): class DBSecurityGroupNotFoundError(RDSClientError): + def __init__(self, security_group_name): super(DBSecurityGroupNotFoundError, self).__init__( 'DBSecurityGroupNotFound', @@ -32,6 +35,7 @@ class DBSecurityGroupNotFoundError(RDSClientError): class DBSubnetGroupNotFoundError(RDSClientError): + def __init__(self, subnet_group_name): super(DBSubnetGroupNotFoundError, self).__init__( 'DBSubnetGroupNotFound', diff --git a/moto/rds/models.py b/moto/rds/models.py index b63a30737..4334a9f72 100644 --- a/moto/rds/models.py +++ b/moto/rds/models.py @@ -1,6 +1,5 @@ from __future__ import unicode_literals -import copy import datetime import boto.rds @@ -11,10 +10,10 @@ from moto.core import BaseBackend from moto.core.utils import get_random_hex from moto.ec2.models import ec2_backends from moto.rds2.models import rds2_backends -from .exceptions import DBInstanceNotFoundError, DBSecurityGroupNotFoundError, DBSubnetGroupNotFoundError class Database(object): + def __init__(self, **kwargs): self.status = "available" @@ -35,7 +34,8 @@ class Database(object): self.storage_type = kwargs.get("storage_type") self.master_username = kwargs.get('master_username') self.master_password = kwargs.get('master_password') - self.auto_minor_version_upgrade = kwargs.get('auto_minor_version_upgrade') + self.auto_minor_version_upgrade = kwargs.get( + 'auto_minor_version_upgrade') if self.auto_minor_version_upgrade is None: self.auto_minor_version_upgrade = True self.allocated_storage = kwargs.get('allocated_storage') @@ -57,7 +57,8 @@ class Database(object): self.db_subnet_group_name = kwargs.get("db_subnet_group_name") self.instance_create_time = str(datetime.datetime.utcnow()) if self.db_subnet_group_name: - self.db_subnet_group = rds_backends[self.region].describe_subnet_groups(self.db_subnet_group_name)[0] + self.db_subnet_group = rds_backends[ + self.region].describe_subnet_groups(self.db_subnet_group_name)[0] else: self.db_subnet_group = [] @@ -239,6 +240,7 @@ class Database(object): class SecurityGroup(object): + def __init__(self, group_name, description): self.group_name = group_name self.description = description @@ -284,7 +286,8 @@ class SecurityGroup(object): properties = cloudformation_json['Properties'] group_name = resource_name.lower() + get_random_hex(12) description = properties['GroupDescription'] - security_group_ingress_rules = properties.get('DBSecurityGroupIngress', []) + security_group_ingress_rules = properties.get( + 'DBSecurityGroupIngress', []) tags = properties.get('Tags') ec2_backend = ec2_backends[region_name] @@ -300,10 +303,12 @@ class SecurityGroup(object): if ingress_type == "CIDRIP": security_group.authorize_cidr(ingress_value) elif ingress_type == "EC2SecurityGroupName": - subnet = ec2_backend.get_security_group_from_name(ingress_value) + subnet = ec2_backend.get_security_group_from_name( + ingress_value) security_group.authorize_security_group(subnet) elif ingress_type == "EC2SecurityGroupId": - subnet = ec2_backend.get_security_group_from_id(ingress_value) + subnet = ec2_backend.get_security_group_from_id( + ingress_value) security_group.authorize_security_group(subnet) return security_group @@ -313,6 +318,7 @@ class SecurityGroup(object): class SubnetGroup(object): + def __init__(self, subnet_name, description, subnets): self.subnet_name = subnet_name self.description = description @@ -352,7 +358,8 @@ class SubnetGroup(object): tags = properties.get('Tags') ec2_backend = ec2_backends[region_name] - subnets = [ec2_backend.get_subnet(subnet_id) for subnet_id in subnet_ids] + subnets = [ec2_backend.get_subnet(subnet_id) + for subnet_id in subnet_ids] rds_backend = rds_backends[region_name] subnet_group = rds_backend.create_subnet_group( subnet_name, @@ -385,4 +392,6 @@ class RDSBackend(BaseBackend): def rds2_backend(self): return rds2_backends[self.region] -rds_backends = dict((region.name, RDSBackend(region.name)) for region in boto.rds.regions()) + +rds_backends = dict((region.name, RDSBackend(region.name)) + for region in boto.rds.regions()) diff --git a/moto/rds/responses.py b/moto/rds/responses.py index 5207264f6..6b51c8fe6 100644 --- a/moto/rds/responses.py +++ b/moto/rds/responses.py @@ -41,7 +41,8 @@ class RDSResponse(BaseResponse): # VpcSecurityGroupIds.member.N "tags": list(), } - args['tags'] = self.unpack_complex_list_params('Tags.Tag', ('Key', 'Value')) + args['tags'] = self.unpack_complex_list_params( + 'Tags.Tag', ('Key', 'Value')) return args def _get_db_replica_kwargs(self): @@ -65,7 +66,8 @@ class RDSResponse(BaseResponse): while self._get_param('{0}.{1}.{2}'.format(label, count, names[0])): param = dict() for i in range(len(names)): - param[names[i]] = self._get_param('{0}.{1}.{2}'.format(label, count, names[i])) + param[names[i]] = self._get_param( + '{0}.{1}.{2}'.format(label, count, names[i])) unpacked_list.append(param) count += 1 return unpacked_list @@ -93,7 +95,8 @@ class RDSResponse(BaseResponse): def modify_dbinstance(self): db_instance_identifier = self._get_param('DBInstanceIdentifier') db_kwargs = self._get_db_kwargs() - database = self.backend.modify_database(db_instance_identifier, db_kwargs) + database = self.backend.modify_database( + db_instance_identifier, db_kwargs) template = self.response_template(MODIFY_DATABASE_TEMPLATE) return template.render(database=database) @@ -107,26 +110,30 @@ class RDSResponse(BaseResponse): group_name = self._get_param('DBSecurityGroupName') description = self._get_param('DBSecurityGroupDescription') tags = self.unpack_complex_list_params('Tags.Tag', ('Key', 'Value')) - security_group = self.backend.create_security_group(group_name, description, tags) + security_group = self.backend.create_security_group( + group_name, description, tags) template = self.response_template(CREATE_SECURITY_GROUP_TEMPLATE) return template.render(security_group=security_group) def describe_dbsecurity_groups(self): security_group_name = self._get_param('DBSecurityGroupName') - security_groups = self.backend.describe_security_groups(security_group_name) + security_groups = self.backend.describe_security_groups( + security_group_name) template = self.response_template(DESCRIBE_SECURITY_GROUPS_TEMPLATE) return template.render(security_groups=security_groups) def delete_dbsecurity_group(self): security_group_name = self._get_param('DBSecurityGroupName') - security_group = self.backend.delete_security_group(security_group_name) + security_group = self.backend.delete_security_group( + security_group_name) template = self.response_template(DELETE_SECURITY_GROUP_TEMPLATE) return template.render(security_group=security_group) def authorize_dbsecurity_group_ingress(self): security_group_name = self._get_param('DBSecurityGroupName') cidr_ip = self._get_param('CIDRIP') - security_group = self.backend.authorize_security_group(security_group_name, cidr_ip) + security_group = self.backend.authorize_security_group( + security_group_name, cidr_ip) template = self.response_template(AUTHORIZE_SECURITY_GROUP_TEMPLATE) return template.render(security_group=security_group) @@ -134,9 +141,11 @@ class RDSResponse(BaseResponse): subnet_name = self._get_param('DBSubnetGroupName') description = self._get_param('DBSubnetGroupDescription') subnet_ids = self._get_multi_param('SubnetIds.member') - subnets = [ec2_backends[self.region].get_subnet(subnet_id) for subnet_id in subnet_ids] + subnets = [ec2_backends[self.region].get_subnet( + subnet_id) for subnet_id in subnet_ids] tags = self.unpack_complex_list_params('Tags.Tag', ('Key', 'Value')) - subnet_group = self.backend.create_subnet_group(subnet_name, description, subnets, tags) + subnet_group = self.backend.create_subnet_group( + subnet_name, description, subnets, tags) template = self.response_template(CREATE_SUBNET_GROUP_TEMPLATE) return template.render(subnet_group=subnet_group) diff --git a/moto/rds2/__init__.py b/moto/rds2/__init__.py index 0feecfac4..723fa0968 100644 --- a/moto/rds2/__init__.py +++ b/moto/rds2/__init__.py @@ -1,6 +1,6 @@ from __future__ import unicode_literals from .models import rds2_backends -from ..core.models import MockAWS, base_decorator, deprecated_base_decorator +from ..core.models import base_decorator, deprecated_base_decorator rds2_backend = rds2_backends['us-west-1'] mock_rds2 = base_decorator(rds2_backends) diff --git a/moto/rds2/exceptions.py b/moto/rds2/exceptions.py index 6fcae4b56..29e92941d 100644 --- a/moto/rds2/exceptions.py +++ b/moto/rds2/exceptions.py @@ -5,6 +5,7 @@ from werkzeug.exceptions import BadRequest class RDSClientError(BadRequest): + def __init__(self, code, message): super(RDSClientError, self).__init__() template = Template(""" @@ -20,6 +21,7 @@ class RDSClientError(BadRequest): class DBInstanceNotFoundError(RDSClientError): + def __init__(self, database_identifier): super(DBInstanceNotFoundError, self).__init__( 'DBInstanceNotFound', @@ -27,6 +29,7 @@ class DBInstanceNotFoundError(RDSClientError): class DBSecurityGroupNotFoundError(RDSClientError): + def __init__(self, security_group_name): super(DBSecurityGroupNotFoundError, self).__init__( 'DBSecurityGroupNotFound', @@ -34,12 +37,15 @@ class DBSecurityGroupNotFoundError(RDSClientError): class DBSubnetGroupNotFoundError(RDSClientError): + def __init__(self, subnet_group_name): super(DBSubnetGroupNotFoundError, self).__init__( 'DBSubnetGroupNotFound', "Subnet Group {0} not found.".format(subnet_group_name)) + class DBParameterGroupNotFoundError(RDSClientError): + def __init__(self, db_parameter_group_name): super(DBParameterGroupNotFoundError, self).__init__( 'DBParameterGroupNotFound', diff --git a/moto/rds2/models.py b/moto/rds2/models.py index 9bb1f8200..52cb298cd 100644 --- a/moto/rds2/models.py +++ b/moto/rds2/models.py @@ -4,7 +4,6 @@ import copy from collections import defaultdict import boto.rds2 -import json from jinja2 import Template from re import compile as re_compile from moto.cloudformation.exceptions import UnformattedGetAttTemplateException @@ -18,8 +17,8 @@ from .exceptions import (RDSClientError, DBParameterGroupNotFoundError) - class Database(object): + def __init__(self, **kwargs): self.status = "available" self.is_replica = False @@ -49,7 +48,8 @@ class Database(object): self.storage_type = kwargs.get("storage_type") self.master_username = kwargs.get('master_username') self.master_user_password = kwargs.get('master_user_password') - self.auto_minor_version_upgrade = kwargs.get('auto_minor_version_upgrade') + self.auto_minor_version_upgrade = kwargs.get( + 'auto_minor_version_upgrade') if self.auto_minor_version_upgrade is None: self.auto_minor_version_upgrade = True self.allocated_storage = kwargs.get('allocated_storage') @@ -69,18 +69,22 @@ class Database(object): self.multi_az = kwargs.get("multi_az") self.db_subnet_group_name = kwargs.get("db_subnet_group_name") if self.db_subnet_group_name: - self.db_subnet_group = rds2_backends[self.region].describe_subnet_groups(self.db_subnet_group_name)[0] + self.db_subnet_group = rds2_backends[ + self.region].describe_subnet_groups(self.db_subnet_group_name)[0] else: self.db_subnet_group = None self.security_groups = kwargs.get('security_groups', []) self.vpc_security_group_ids = kwargs.get('vpc_security_group_ids', []) - self.preferred_maintenance_window = kwargs.get('preferred_maintenance_window', 'wed:06:38-wed:07:08') + self.preferred_maintenance_window = kwargs.get( + 'preferred_maintenance_window', 'wed:06:38-wed:07:08') self.db_parameter_group_name = kwargs.get('db_parameter_group_name') if self.db_parameter_group_name and self.db_parameter_group_name not in rds2_backends[self.region].db_parameter_groups: - raise DBParameterGroupNotFoundError(self.db_parameter_group_name) + raise DBParameterGroupNotFoundError(self.db_parameter_group_name) - self.preferred_backup_window = kwargs.get('preferred_backup_window', '13:14-13:44') - self.license_model = kwargs.get('license_model', 'general-public-license') + self.preferred_backup_window = kwargs.get( + 'preferred_backup_window', '13:14-13:44') + self.license_model = kwargs.get( + 'license_model', 'general-public-license') self.option_group_name = kwargs.get('option_group_name', None) self.default_option_groups = {"MySQL": "default.mysql5.6", "mysql": "default.mysql5.6", @@ -100,9 +104,9 @@ class Database(object): db_family, db_parameter_group_name = self.default_db_parameter_group_details() description = 'Default parameter group for {0}'.format(db_family) return [DBParameterGroup(name=db_parameter_group_name, - family=db_family, - description=description, - tags={})] + family=db_family, + description=description, + tags={})] else: return [rds2_backends[self.region].db_parameter_groups[self.db_parameter_group_name]] @@ -354,12 +358,14 @@ class Database(object): def add_tags(self, tags): new_keys = [tag_set['Key'] for tag_set in tags] - self.tags = [tag_set for tag_set in self.tags if tag_set['Key'] not in new_keys] + self.tags = [tag_set for tag_set in self.tags if tag_set[ + 'Key'] not in new_keys] self.tags.extend(tags) return self.tags def remove_tags(self, tag_keys): - self.tags = [tag_set for tag_set in self.tags if tag_set['Key'] not in tag_keys] + self.tags = [tag_set for tag_set in self.tags if tag_set[ + 'Key'] not in tag_keys] def delete(self, region_name): backend = rds2_backends[region_name] @@ -367,6 +373,7 @@ class Database(object): class SecurityGroup(object): + def __init__(self, group_name, description, tags): self.group_name = group_name self.description = description @@ -430,7 +437,8 @@ class SecurityGroup(object): properties = cloudformation_json['Properties'] group_name = resource_name.lower() + get_random_hex(12) description = properties['GroupDescription'] - security_group_ingress_rules = properties.get('DBSecurityGroupIngress', []) + security_group_ingress_rules = properties.get( + 'DBSecurityGroupIngress', []) tags = properties.get('Tags') ec2_backend = ec2_backends[region_name] @@ -445,10 +453,12 @@ class SecurityGroup(object): if ingress_type == "CIDRIP": security_group.authorize_cidr(ingress_value) elif ingress_type == "EC2SecurityGroupName": - subnet = ec2_backend.get_security_group_from_name(ingress_value) + subnet = ec2_backend.get_security_group_from_name( + ingress_value) security_group.authorize_security_group(subnet) elif ingress_type == "EC2SecurityGroupId": - subnet = ec2_backend.get_security_group_from_id(ingress_value) + subnet = ec2_backend.get_security_group_from_id( + ingress_value) security_group.authorize_security_group(subnet) return security_group @@ -457,12 +467,14 @@ class SecurityGroup(object): def add_tags(self, tags): new_keys = [tag_set['Key'] for tag_set in tags] - self.tags = [tag_set for tag_set in self.tags if tag_set['Key'] not in new_keys] + self.tags = [tag_set for tag_set in self.tags if tag_set[ + 'Key'] not in new_keys] self.tags.extend(tags) return self.tags def remove_tags(self, tag_keys): - self.tags = [tag_set for tag_set in self.tags if tag_set['Key'] not in tag_keys] + self.tags = [tag_set for tag_set in self.tags if tag_set[ + 'Key'] not in tag_keys] def delete(self, region_name): backend = rds2_backends[region_name] @@ -470,6 +482,7 @@ class SecurityGroup(object): class SubnetGroup(object): + def __init__(self, subnet_name, description, subnets, tags): self.subnet_name = subnet_name self.description = description @@ -530,7 +543,8 @@ class SubnetGroup(object): tags = properties.get('Tags') ec2_backend = ec2_backends[region_name] - subnets = [ec2_backend.get_subnet(subnet_id) for subnet_id in subnet_ids] + subnets = [ec2_backend.get_subnet(subnet_id) + for subnet_id in subnet_ids] rds2_backend = rds2_backends[region_name] subnet_group = rds2_backend.create_subnet_group( subnet_name, @@ -545,12 +559,14 @@ class SubnetGroup(object): def add_tags(self, tags): new_keys = [tag_set['Key'] for tag_set in tags] - self.tags = [tag_set for tag_set in self.tags if tag_set['Key'] not in new_keys] + self.tags = [tag_set for tag_set in self.tags if tag_set[ + 'Key'] not in new_keys] self.tags.extend(tags) return self.tags def remove_tags(self, tag_keys): - self.tags = [tag_set for tag_set in self.tags if tag_set['Key'] not in tag_keys] + self.tags = [tag_set for tag_set in self.tags if tag_set[ + 'Key'] not in tag_keys] def delete(self, region_name): backend = rds2_backends[region_name] @@ -561,7 +577,8 @@ class RDS2Backend(BaseBackend): def __init__(self, region): self.region = region - self.arn_regex = re_compile(r'^arn:aws:rds:.*:[0-9]*:(db|es|og|pg|ri|secgrp|snapshot|subgrp):.*$') + self.arn_regex = re_compile( + r'^arn:aws:rds:.*:[0-9]*:(db|es|og|pg|ri|secgrp|snapshot|subgrp):.*$') self.databases = {} self.db_parameter_groups = {} self.option_groups = {} @@ -699,14 +716,16 @@ class RDS2Backend(BaseBackend): raise RDSClientError('InvalidParameterValue', 'The parameter OptionGroupDescription must be provided and must not be blank.') if option_group_kwargs['engine_name'] not in valid_option_group_engines.keys(): - raise RDSClientError('InvalidParameterValue', 'Invalid DB engine: non-existant') + raise RDSClientError('InvalidParameterValue', + 'Invalid DB engine: non-existant') if option_group_kwargs['major_engine_version'] not in\ valid_option_group_engines[option_group_kwargs['engine_name']]: - raise RDSClientError('InvalidParameterCombination', - 'Cannot find major version {0} for {1}'.format( - option_group_kwargs['major_engine_version'], - option_group_kwargs['engine_name'] - )) + raise RDSClientError('InvalidParameterCombination', + 'Cannot find major version {0} for {1}'.format( + option_group_kwargs[ + 'major_engine_version'], + option_group_kwargs['engine_name'] + )) option_group = OptionGroup(**option_group_kwargs) self.option_groups[option_group_id] = option_group return option_group @@ -715,7 +734,8 @@ class RDS2Backend(BaseBackend): if option_group_name in self.option_groups: return self.option_groups.pop(option_group_name) else: - raise RDSClientError('OptionGroupNotFoundFault', 'Specified OptionGroupName: {0} not found.'.format(option_group_name)) + raise RDSClientError( + 'OptionGroupNotFoundFault', 'Specified OptionGroupName: {0} not found.'.format(option_group_name)) def describe_option_groups(self, option_group_kwargs): option_group_list = [] @@ -746,24 +766,25 @@ class RDS2Backend(BaseBackend): if not len(option_group_list): raise RDSClientError('OptionGroupNotFoundFault', 'Specified OptionGroupName: {0} not found.'.format(option_group_kwargs['name'])) - return option_group_list[marker:max_records+marker] + return option_group_list[marker:max_records + marker] @staticmethod def describe_option_group_options(engine_name, major_engine_version=None): default_option_group_options = {'mysql': {'5.6': '\n \n \n \n 5.611211TrueInnodb Memcached for MySQLMEMCACHED1-4294967295STATIC1TrueSpecifies how many memcached read operations (get) to perform before doing a COMMIT to start a new transactionDAEMON_MEMCACHED_R_BATCH_SIZE1-4294967295STATIC1TrueSpecifies how many memcached write operations, such as add, set, or incr, to perform before doing a COMMIT to start a new transactionDAEMON_MEMCACHED_W_BATCH_SIZE1-1073741824DYNAMIC5TrueSpecifies how often to auto-commit idle connections that use the InnoDB memcached interface.INNODB_API_BK_COMMIT_INTERVAL0,1STATIC0TrueDisables the use of row locks when using the InnoDB memcached interface.INNODB_API_DISABLE_ROWLOCK0,1STATIC0TrueLocks the table used by the InnoDB memcached plugin, so that it cannot be dropped or altered by DDL through the SQL interface.INNODB_API_ENABLE_MDL0-3STATIC0TrueLets you control the transaction isolation level on queries processed by the memcached interface.INNODB_API_TRX_LEVELauto,ascii,binarySTATICautoTrueThe binding protocol to use which can be either auto, ascii, or binary. The default is auto which means the server automatically negotiates the protocol with the client.BINDING_PROTOCOL1-2048STATIC1024TrueThe backlog queue configures how many network connections can be waiting to be processed by memcachedBACKLOG_QUEUE_LIMIT0,1STATIC0TrueDisable the use of compare and swap (CAS) which reduces the per-item size by 8 bytes.CAS_DISABLED1-48STATIC48TrueMinimum chunk size in bytes to allocate for the smallest item\'s key, value, and flags. The default is 48 and you can get a significant memory efficiency gain with a lower value.CHUNK_SIZE1-2STATIC1.25TrueChunk size growth factor that controls the size of each successive chunk with each chunk growing times this amount larger than the previous chunk.CHUNK_SIZE_GROWTH_FACTOR0,1STATIC0TrueIf enabled when there is no more memory to store items, memcached will return an error rather than evicting items.ERROR_ON_MEMORY_EXHAUSTED10-1024STATIC1024TrueMaximum number of concurrent connections. Setting this value to anything less than 10 prevents MySQL from starting.MAX_SIMULTANEOUS_CONNECTIONSv,vv,vvvSTATICvTrueVerbose level for memcached.VERBOSITYmysql\n \n \n \n \n 457f7bb8-9fbf-11e4-9084-5754f80d5144\n \n', - 'all': '\n \n \n \n 5.611211TrueInnodb Memcached for MySQLMEMCACHED1-4294967295STATIC1TrueSpecifies how many memcached read operations (get) to perform before doing a COMMIT to start a new transactionDAEMON_MEMCACHED_R_BATCH_SIZE1-4294967295STATIC1TrueSpecifies how many memcached write operations, such as add, set, or incr, to perform before doing a COMMIT to start a new transactionDAEMON_MEMCACHED_W_BATCH_SIZE1-1073741824DYNAMIC5TrueSpecifies how often to auto-commit idle connections that use the InnoDB memcached interface.INNODB_API_BK_COMMIT_INTERVAL0,1STATIC0TrueDisables the use of row locks when using the InnoDB memcached interface.INNODB_API_DISABLE_ROWLOCK0,1STATIC0TrueLocks the table used by the InnoDB memcached plugin, so that it cannot be dropped or altered by DDL through the SQL interface.INNODB_API_ENABLE_MDL0-3STATIC0TrueLets you control the transaction isolation level on queries processed by the memcached interface.INNODB_API_TRX_LEVELauto,ascii,binarySTATICautoTrueThe binding protocol to use which can be either auto, ascii, or binary. The default is auto which means the server automatically negotiates the protocol with the client.BINDING_PROTOCOL1-2048STATIC1024TrueThe backlog queue configures how many network connections can be waiting to be processed by memcachedBACKLOG_QUEUE_LIMIT0,1STATIC0TrueDisable the use of compare and swap (CAS) which reduces the per-item size by 8 bytes.CAS_DISABLED1-48STATIC48TrueMinimum chunk size in bytes to allocate for the smallest item\'s key, value, and flags. The default is 48 and you can get a significant memory efficiency gain with a lower value.CHUNK_SIZE1-2STATIC1.25TrueChunk size growth factor that controls the size of each successive chunk with each chunk growing times this amount larger than the previous chunk.CHUNK_SIZE_GROWTH_FACTOR0,1STATIC0TrueIf enabled when there is no more memory to store items, memcached will return an error rather than evicting items.ERROR_ON_MEMORY_EXHAUSTED10-1024STATIC1024TrueMaximum number of concurrent connections. Setting this value to anything less than 10 prevents MySQL from starting.MAX_SIMULTANEOUS_CONNECTIONSv,vv,vvvSTATICvTrueVerbose level for memcached.VERBOSITYmysql\n \n \n \n \n 457f7bb8-9fbf-11e4-9084-5754f80d5144\n \n'}, - 'oracle-ee': {'11.2': '\n \n \n \n 11.2XMLDBOracle Application Express Runtime EnvironmentAPEXoracle-ee\n \n 11.2APEXOracle Application Express Development EnvironmentAPEX-DEVoracle-ee\n \n 11.2Oracle Advanced Security - Native Network EncryptionNATIVE_NETWORK_ENCRYPTIONACCEPTED,REJECTED,REQUESTED,REQUIREDSTATICREQUESTEDTrueSpecifies the desired encryption behaviorSQLNET.ENCRYPTION_SERVERACCEPTED,REJECTED,REQUESTED,REQUIREDSTATICREQUESTEDTrueSpecifies the desired data integrity behaviorSQLNET.CRYPTO_CHECKSUM_SERVERRC4_256,AES256,AES192,3DES168,RC4_128,AES128,3DES112,RC4_56,DES,RC4_40,DES40STATICRC4_256,AES256,AES192,3DES168,RC4_128,AES128,3DES112,RC4_56,DES,RC4_40,DES40TrueSpecifies list of encryption algorithms in order of intended useSQLNET.ENCRYPTION_TYPES_SERVERSHA1,MD5STATICSHA1,MD5TrueSpecifies list of checksumming algorithms in order of intended useSQLNET.CRYPTO_CHECKSUM_TYPES_SERVERoracle-ee\n \n 11.21158TrueOracle Enterprise Manager (Database Control only)OEMoracle-ee\n \n 11.2Oracle StatspackSTATSPACKoracle-ee\n \n 11.2TrueTrueOracle Advanced Security - Transparent Data EncryptionTDEoracle-ee\n \n 11.2TrueTrueOracle Advanced Security - TDE with HSMTDE_HSMoracle-ee\n \n 11.2TrueTrueChange time zoneTimezoneAfrica/Cairo,Africa/Casablanca,Africa/Harare,Africa/Monrovia,Africa/Nairobi,Africa/Tripoli,Africa/Windhoek,America/Araguaina,America/Asuncion,America/Bogota,America/Caracas,America/Chihuahua,America/Cuiaba,America/Denver,America/Fortaleza,America/Guatemala,America/Halifax,America/Manaus,America/Matamoros,America/Monterrey,America/Montevideo,America/Phoenix,America/Santiago,America/Tijuana,Asia/Amman,Asia/Ashgabat,Asia/Baghdad,Asia/Baku,Asia/Bangkok,Asia/Beirut,Asia/Calcutta,Asia/Damascus,Asia/Dhaka,Asia/Irkutsk,Asia/Jerusalem,Asia/Kabul,Asia/Karachi,Asia/Kathmandu,Asia/Krasnoyarsk,Asia/Magadan,Asia/Muscat,Asia/Novosibirsk,Asia/Riyadh,Asia/Seoul,Asia/Shanghai,Asia/Singapore,Asia/Taipei,Asia/Tehran,Asia/Tokyo,Asia/Ulaanbaatar,Asia/Vladivostok,Asia/Yakutsk,Asia/Yerevan,Atlantic/Azores,Australia/Adelaide,Australia/Brisbane,Australia/Darwin,Australia/Hobart,Australia/Perth,Australia/Sydney,Brazil/East,Canada/Newfoundland,Canada/Saskatchewan,Europe/Amsterdam,Europe/Athens,Europe/Dublin,Europe/Helsinki,Europe/Istanbul,Europe/Kaliningrad,Europe/Moscow,Europe/Paris,Europe/Prague,Europe/Sarajevo,Pacific/Auckland,Pacific/Fiji,Pacific/Guam,Pacific/Honolulu,Pacific/Samoa,US/Alaska,US/Central,US/Eastern,US/East-Indiana,US/Pacific,UTCDYNAMICUTCTrueSpecifies the timezone the user wants to change the system time toTIME_ZONEoracle-ee\n \n 11.2Oracle XMLDB RepositoryXMLDBoracle-ee\n \n \n \n \n 457f7bb8-9fbf-11e4-9084-5754f80d5144\n \n', - 'all': '\n \n \n \n 11.2XMLDBOracle Application Express Runtime EnvironmentAPEXoracle-ee\n \n 11.2APEXOracle Application Express Development EnvironmentAPEX-DEVoracle-ee\n \n 11.2Oracle Advanced Security - Native Network EncryptionNATIVE_NETWORK_ENCRYPTIONACCEPTED,REJECTED,REQUESTED,REQUIREDSTATICREQUESTEDTrueSpecifies the desired encryption behaviorSQLNET.ENCRYPTION_SERVERACCEPTED,REJECTED,REQUESTED,REQUIREDSTATICREQUESTEDTrueSpecifies the desired data integrity behaviorSQLNET.CRYPTO_CHECKSUM_SERVERRC4_256,AES256,AES192,3DES168,RC4_128,AES128,3DES112,RC4_56,DES,RC4_40,DES40STATICRC4_256,AES256,AES192,3DES168,RC4_128,AES128,3DES112,RC4_56,DES,RC4_40,DES40TrueSpecifies list of encryption algorithms in order of intended useSQLNET.ENCRYPTION_TYPES_SERVERSHA1,MD5STATICSHA1,MD5TrueSpecifies list of checksumming algorithms in order of intended useSQLNET.CRYPTO_CHECKSUM_TYPES_SERVERoracle-ee\n \n 11.21158TrueOracle Enterprise Manager (Database Control only)OEMoracle-ee\n \n 11.2Oracle StatspackSTATSPACKoracle-ee\n \n 11.2TrueTrueOracle Advanced Security - Transparent Data EncryptionTDEoracle-ee\n \n 11.2TrueTrueOracle Advanced Security - TDE with HSMTDE_HSMoracle-ee\n \n 11.2TrueTrueChange time zoneTimezoneAfrica/Cairo,Africa/Casablanca,Africa/Harare,Africa/Monrovia,Africa/Nairobi,Africa/Tripoli,Africa/Windhoek,America/Araguaina,America/Asuncion,America/Bogota,America/Caracas,America/Chihuahua,America/Cuiaba,America/Denver,America/Fortaleza,America/Guatemala,America/Halifax,America/Manaus,America/Matamoros,America/Monterrey,America/Montevideo,America/Phoenix,America/Santiago,America/Tijuana,Asia/Amman,Asia/Ashgabat,Asia/Baghdad,Asia/Baku,Asia/Bangkok,Asia/Beirut,Asia/Calcutta,Asia/Damascus,Asia/Dhaka,Asia/Irkutsk,Asia/Jerusalem,Asia/Kabul,Asia/Karachi,Asia/Kathmandu,Asia/Krasnoyarsk,Asia/Magadan,Asia/Muscat,Asia/Novosibirsk,Asia/Riyadh,Asia/Seoul,Asia/Shanghai,Asia/Singapore,Asia/Taipei,Asia/Tehran,Asia/Tokyo,Asia/Ulaanbaatar,Asia/Vladivostok,Asia/Yakutsk,Asia/Yerevan,Atlantic/Azores,Australia/Adelaide,Australia/Brisbane,Australia/Darwin,Australia/Hobart,Australia/Perth,Australia/Sydney,Brazil/East,Canada/Newfoundland,Canada/Saskatchewan,Europe/Amsterdam,Europe/Athens,Europe/Dublin,Europe/Helsinki,Europe/Istanbul,Europe/Kaliningrad,Europe/Moscow,Europe/Paris,Europe/Prague,Europe/Sarajevo,Pacific/Auckland,Pacific/Fiji,Pacific/Guam,Pacific/Honolulu,Pacific/Samoa,US/Alaska,US/Central,US/Eastern,US/East-Indiana,US/Pacific,UTCDYNAMICUTCTrueSpecifies the timezone the user wants to change the system time toTIME_ZONEoracle-ee\n \n 11.2Oracle XMLDB RepositoryXMLDBoracle-ee\n \n \n \n \n 457f7bb8-9fbf-11e4-9084-5754f80d5144\n \n'}, - 'oracle-sa': {'11.2': '\n \n \n \n 11.2XMLDBOracle Application Express Runtime EnvironmentAPEXoracle-ee\n \n 11.2APEXOracle Application Express Development EnvironmentAPEX-DEVoracle-ee\n \n 11.2Oracle Advanced Security - Native Network EncryptionNATIVE_NETWORK_ENCRYPTIONACCEPTED,REJECTED,REQUESTED,REQUIREDSTATICREQUESTEDTrueSpecifies the desired encryption behaviorSQLNET.ENCRYPTION_SERVERACCEPTED,REJECTED,REQUESTED,REQUIREDSTATICREQUESTEDTrueSpecifies the desired data integrity behaviorSQLNET.CRYPTO_CHECKSUM_SERVERRC4_256,AES256,AES192,3DES168,RC4_128,AES128,3DES112,RC4_56,DES,RC4_40,DES40STATICRC4_256,AES256,AES192,3DES168,RC4_128,AES128,3DES112,RC4_56,DES,RC4_40,DES40TrueSpecifies list of encryption algorithms in order of intended useSQLNET.ENCRYPTION_TYPES_SERVERSHA1,MD5STATICSHA1,MD5TrueSpecifies list of checksumming algorithms in order of intended useSQLNET.CRYPTO_CHECKSUM_TYPES_SERVERoracle-ee\n \n 11.21158TrueOracle Enterprise Manager (Database Control only)OEMoracle-ee\n \n 11.2Oracle StatspackSTATSPACKoracle-ee\n \n 11.2TrueTrueOracle Advanced Security - Transparent Data EncryptionTDEoracle-ee\n \n 11.2TrueTrueOracle Advanced Security - TDE with HSMTDE_HSMoracle-ee\n \n 11.2TrueTrueChange time zoneTimezoneAfrica/Cairo,Africa/Casablanca,Africa/Harare,Africa/Monrovia,Africa/Nairobi,Africa/Tripoli,Africa/Windhoek,America/Araguaina,America/Asuncion,America/Bogota,America/Caracas,America/Chihuahua,America/Cuiaba,America/Denver,America/Fortaleza,America/Guatemala,America/Halifax,America/Manaus,America/Matamoros,America/Monterrey,America/Montevideo,America/Phoenix,America/Santiago,America/Tijuana,Asia/Amman,Asia/Ashgabat,Asia/Baghdad,Asia/Baku,Asia/Bangkok,Asia/Beirut,Asia/Calcutta,Asia/Damascus,Asia/Dhaka,Asia/Irkutsk,Asia/Jerusalem,Asia/Kabul,Asia/Karachi,Asia/Kathmandu,Asia/Krasnoyarsk,Asia/Magadan,Asia/Muscat,Asia/Novosibirsk,Asia/Riyadh,Asia/Seoul,Asia/Shanghai,Asia/Singapore,Asia/Taipei,Asia/Tehran,Asia/Tokyo,Asia/Ulaanbaatar,Asia/Vladivostok,Asia/Yakutsk,Asia/Yerevan,Atlantic/Azores,Australia/Adelaide,Australia/Brisbane,Australia/Darwin,Australia/Hobart,Australia/Perth,Australia/Sydney,Brazil/East,Canada/Newfoundland,Canada/Saskatchewan,Europe/Amsterdam,Europe/Athens,Europe/Dublin,Europe/Helsinki,Europe/Istanbul,Europe/Kaliningrad,Europe/Moscow,Europe/Paris,Europe/Prague,Europe/Sarajevo,Pacific/Auckland,Pacific/Fiji,Pacific/Guam,Pacific/Honolulu,Pacific/Samoa,US/Alaska,US/Central,US/Eastern,US/East-Indiana,US/Pacific,UTCDYNAMICUTCTrueSpecifies the timezone the user wants to change the system time toTIME_ZONEoracle-ee\n \n 11.2Oracle XMLDB RepositoryXMLDBoracle-ee\n \n \n \n \n 457f7bb8-9fbf-11e4-9084-5754f80d5144\n \n', - 'all': '\n \n \n \n 11.2XMLDBOracle Application Express Runtime EnvironmentAPEXoracle-ee\n \n 11.2APEXOracle Application Express Development EnvironmentAPEX-DEVoracle-ee\n \n 11.2Oracle Advanced Security - Native Network EncryptionNATIVE_NETWORK_ENCRYPTIONACCEPTED,REJECTED,REQUESTED,REQUIREDSTATICREQUESTEDTrueSpecifies the desired encryption behaviorSQLNET.ENCRYPTION_SERVERACCEPTED,REJECTED,REQUESTED,REQUIREDSTATICREQUESTEDTrueSpecifies the desired data integrity behaviorSQLNET.CRYPTO_CHECKSUM_SERVERRC4_256,AES256,AES192,3DES168,RC4_128,AES128,3DES112,RC4_56,DES,RC4_40,DES40STATICRC4_256,AES256,AES192,3DES168,RC4_128,AES128,3DES112,RC4_56,DES,RC4_40,DES40TrueSpecifies list of encryption algorithms in order of intended useSQLNET.ENCRYPTION_TYPES_SERVERSHA1,MD5STATICSHA1,MD5TrueSpecifies list of checksumming algorithms in order of intended useSQLNET.CRYPTO_CHECKSUM_TYPES_SERVERoracle-ee\n \n 11.21158TrueOracle Enterprise Manager (Database Control only)OEMoracle-ee\n \n 11.2Oracle StatspackSTATSPACKoracle-ee\n \n 11.2TrueTrueOracle Advanced Security - Transparent Data EncryptionTDEoracle-ee\n \n 11.2TrueTrueOracle Advanced Security - TDE with HSMTDE_HSMoracle-ee\n \n 11.2TrueTrueChange time zoneTimezoneAfrica/Cairo,Africa/Casablanca,Africa/Harare,Africa/Monrovia,Africa/Nairobi,Africa/Tripoli,Africa/Windhoek,America/Araguaina,America/Asuncion,America/Bogota,America/Caracas,America/Chihuahua,America/Cuiaba,America/Denver,America/Fortaleza,America/Guatemala,America/Halifax,America/Manaus,America/Matamoros,America/Monterrey,America/Montevideo,America/Phoenix,America/Santiago,America/Tijuana,Asia/Amman,Asia/Ashgabat,Asia/Baghdad,Asia/Baku,Asia/Bangkok,Asia/Beirut,Asia/Calcutta,Asia/Damascus,Asia/Dhaka,Asia/Irkutsk,Asia/Jerusalem,Asia/Kabul,Asia/Karachi,Asia/Kathmandu,Asia/Krasnoyarsk,Asia/Magadan,Asia/Muscat,Asia/Novosibirsk,Asia/Riyadh,Asia/Seoul,Asia/Shanghai,Asia/Singapore,Asia/Taipei,Asia/Tehran,Asia/Tokyo,Asia/Ulaanbaatar,Asia/Vladivostok,Asia/Yakutsk,Asia/Yerevan,Atlantic/Azores,Australia/Adelaide,Australia/Brisbane,Australia/Darwin,Australia/Hobart,Australia/Perth,Australia/Sydney,Brazil/East,Canada/Newfoundland,Canada/Saskatchewan,Europe/Amsterdam,Europe/Athens,Europe/Dublin,Europe/Helsinki,Europe/Istanbul,Europe/Kaliningrad,Europe/Moscow,Europe/Paris,Europe/Prague,Europe/Sarajevo,Pacific/Auckland,Pacific/Fiji,Pacific/Guam,Pacific/Honolulu,Pacific/Samoa,US/Alaska,US/Central,US/Eastern,US/East-Indiana,US/Pacific,UTCDYNAMICUTCTrueSpecifies the timezone the user wants to change the system time toTIME_ZONEoracle-ee\n \n 11.2Oracle XMLDB RepositoryXMLDBoracle-ee\n \n \n \n \n 457f7bb8-9fbf-11e4-9084-5754f80d5144\n \n'}, - 'oracle-sa1': {'11.2': '\n \n \n \n 11.2XMLDBOracle Application Express Runtime EnvironmentAPEXoracle-ee\n \n 11.2APEXOracle Application Express Development EnvironmentAPEX-DEVoracle-ee\n \n 11.2Oracle Advanced Security - Native Network EncryptionNATIVE_NETWORK_ENCRYPTIONACCEPTED,REJECTED,REQUESTED,REQUIREDSTATICREQUESTEDTrueSpecifies the desired encryption behaviorSQLNET.ENCRYPTION_SERVERACCEPTED,REJECTED,REQUESTED,REQUIREDSTATICREQUESTEDTrueSpecifies the desired data integrity behaviorSQLNET.CRYPTO_CHECKSUM_SERVERRC4_256,AES256,AES192,3DES168,RC4_128,AES128,3DES112,RC4_56,DES,RC4_40,DES40STATICRC4_256,AES256,AES192,3DES168,RC4_128,AES128,3DES112,RC4_56,DES,RC4_40,DES40TrueSpecifies list of encryption algorithms in order of intended useSQLNET.ENCRYPTION_TYPES_SERVERSHA1,MD5STATICSHA1,MD5TrueSpecifies list of checksumming algorithms in order of intended useSQLNET.CRYPTO_CHECKSUM_TYPES_SERVERoracle-ee\n \n 11.21158TrueOracle Enterprise Manager (Database Control only)OEMoracle-ee\n \n 11.2Oracle StatspackSTATSPACKoracle-ee\n \n 11.2TrueTrueOracle Advanced Security - Transparent Data EncryptionTDEoracle-ee\n \n 11.2TrueTrueOracle Advanced Security - TDE with HSMTDE_HSMoracle-ee\n \n 11.2TrueTrueChange time zoneTimezoneAfrica/Cairo,Africa/Casablanca,Africa/Harare,Africa/Monrovia,Africa/Nairobi,Africa/Tripoli,Africa/Windhoek,America/Araguaina,America/Asuncion,America/Bogota,America/Caracas,America/Chihuahua,America/Cuiaba,America/Denver,America/Fortaleza,America/Guatemala,America/Halifax,America/Manaus,America/Matamoros,America/Monterrey,America/Montevideo,America/Phoenix,America/Santiago,America/Tijuana,Asia/Amman,Asia/Ashgabat,Asia/Baghdad,Asia/Baku,Asia/Bangkok,Asia/Beirut,Asia/Calcutta,Asia/Damascus,Asia/Dhaka,Asia/Irkutsk,Asia/Jerusalem,Asia/Kabul,Asia/Karachi,Asia/Kathmandu,Asia/Krasnoyarsk,Asia/Magadan,Asia/Muscat,Asia/Novosibirsk,Asia/Riyadh,Asia/Seoul,Asia/Shanghai,Asia/Singapore,Asia/Taipei,Asia/Tehran,Asia/Tokyo,Asia/Ulaanbaatar,Asia/Vladivostok,Asia/Yakutsk,Asia/Yerevan,Atlantic/Azores,Australia/Adelaide,Australia/Brisbane,Australia/Darwin,Australia/Hobart,Australia/Perth,Australia/Sydney,Brazil/East,Canada/Newfoundland,Canada/Saskatchewan,Europe/Amsterdam,Europe/Athens,Europe/Dublin,Europe/Helsinki,Europe/Istanbul,Europe/Kaliningrad,Europe/Moscow,Europe/Paris,Europe/Prague,Europe/Sarajevo,Pacific/Auckland,Pacific/Fiji,Pacific/Guam,Pacific/Honolulu,Pacific/Samoa,US/Alaska,US/Central,US/Eastern,US/East-Indiana,US/Pacific,UTCDYNAMICUTCTrueSpecifies the timezone the user wants to change the system time toTIME_ZONEoracle-ee\n \n 11.2Oracle XMLDB RepositoryXMLDBoracle-ee\n \n \n \n \n 457f7bb8-9fbf-11e4-9084-5754f80d5144\n \n', - 'all': '\n \n \n \n 11.2XMLDBOracle Application Express Runtime EnvironmentAPEXoracle-ee\n \n 11.2APEXOracle Application Express Development EnvironmentAPEX-DEVoracle-ee\n \n 11.2Oracle Advanced Security - Native Network EncryptionNATIVE_NETWORK_ENCRYPTIONACCEPTED,REJECTED,REQUESTED,REQUIREDSTATICREQUESTEDTrueSpecifies the desired encryption behaviorSQLNET.ENCRYPTION_SERVERACCEPTED,REJECTED,REQUESTED,REQUIREDSTATICREQUESTEDTrueSpecifies the desired data integrity behaviorSQLNET.CRYPTO_CHECKSUM_SERVERRC4_256,AES256,AES192,3DES168,RC4_128,AES128,3DES112,RC4_56,DES,RC4_40,DES40STATICRC4_256,AES256,AES192,3DES168,RC4_128,AES128,3DES112,RC4_56,DES,RC4_40,DES40TrueSpecifies list of encryption algorithms in order of intended useSQLNET.ENCRYPTION_TYPES_SERVERSHA1,MD5STATICSHA1,MD5TrueSpecifies list of checksumming algorithms in order of intended useSQLNET.CRYPTO_CHECKSUM_TYPES_SERVERoracle-ee\n \n 11.21158TrueOracle Enterprise Manager (Database Control only)OEMoracle-ee\n \n 11.2Oracle StatspackSTATSPACKoracle-ee\n \n 11.2TrueTrueOracle Advanced Security - Transparent Data EncryptionTDEoracle-ee\n \n 11.2TrueTrueOracle Advanced Security - TDE with HSMTDE_HSMoracle-ee\n \n 11.2TrueTrueChange time zoneTimezoneAfrica/Cairo,Africa/Casablanca,Africa/Harare,Africa/Monrovia,Africa/Nairobi,Africa/Tripoli,Africa/Windhoek,America/Araguaina,America/Asuncion,America/Bogota,America/Caracas,America/Chihuahua,America/Cuiaba,America/Denver,America/Fortaleza,America/Guatemala,America/Halifax,America/Manaus,America/Matamoros,America/Monterrey,America/Montevideo,America/Phoenix,America/Santiago,America/Tijuana,Asia/Amman,Asia/Ashgabat,Asia/Baghdad,Asia/Baku,Asia/Bangkok,Asia/Beirut,Asia/Calcutta,Asia/Damascus,Asia/Dhaka,Asia/Irkutsk,Asia/Jerusalem,Asia/Kabul,Asia/Karachi,Asia/Kathmandu,Asia/Krasnoyarsk,Asia/Magadan,Asia/Muscat,Asia/Novosibirsk,Asia/Riyadh,Asia/Seoul,Asia/Shanghai,Asia/Singapore,Asia/Taipei,Asia/Tehran,Asia/Tokyo,Asia/Ulaanbaatar,Asia/Vladivostok,Asia/Yakutsk,Asia/Yerevan,Atlantic/Azores,Australia/Adelaide,Australia/Brisbane,Australia/Darwin,Australia/Hobart,Australia/Perth,Australia/Sydney,Brazil/East,Canada/Newfoundland,Canada/Saskatchewan,Europe/Amsterdam,Europe/Athens,Europe/Dublin,Europe/Helsinki,Europe/Istanbul,Europe/Kaliningrad,Europe/Moscow,Europe/Paris,Europe/Prague,Europe/Sarajevo,Pacific/Auckland,Pacific/Fiji,Pacific/Guam,Pacific/Honolulu,Pacific/Samoa,US/Alaska,US/Central,US/Eastern,US/East-Indiana,US/Pacific,UTCDYNAMICUTCTrueSpecifies the timezone the user wants to change the system time toTIME_ZONEoracle-ee\n \n 11.2Oracle XMLDB RepositoryXMLDBoracle-ee\n \n \n \n \n 457f7bb8-9fbf-11e4-9084-5754f80d5144\n \n'}, - 'sqlserver-ee': {'10.50': '\n \n \n \n 10.50SQLServer Database MirroringMirroringsqlserver-ee\n \n 10.50TrueSQL Server - Transparent Data EncryptionTDEsqlserver-ee\n \n \n \n \n 457f7bb8-9fbf-11e4-9084-5754f80d5144\n \n', - '11.00': '\n \n \n \n 11.00SQLServer Database MirroringMirroringsqlserver-ee\n \n 11.00TrueSQL Server - Transparent Data EncryptionTDEsqlserver-ee\n \n \n \n \n 457f7bb8-9fbf-11e4-9084-5754f80d5144\n \n', - 'all': '\n \n \n \n 10.50SQLServer Database MirroringMirroringsqlserver-ee\n \n 10.50TrueSQL Server - Transparent Data EncryptionTDEsqlserver-ee\n \n 11.00SQLServer Database MirroringMirroringsqlserver-ee\n \n 11.00TrueSQL Server - Transparent Data EncryptionTDEsqlserver-ee\n \n \n \n \n 457f7bb8-9fbf-11e4-9084-5754f80d5144\n \n'}} + 'all': '\n \n \n \n 5.611211TrueInnodb Memcached for MySQLMEMCACHED1-4294967295STATIC1TrueSpecifies how many memcached read operations (get) to perform before doing a COMMIT to start a new transactionDAEMON_MEMCACHED_R_BATCH_SIZE1-4294967295STATIC1TrueSpecifies how many memcached write operations, such as add, set, or incr, to perform before doing a COMMIT to start a new transactionDAEMON_MEMCACHED_W_BATCH_SIZE1-1073741824DYNAMIC5TrueSpecifies how often to auto-commit idle connections that use the InnoDB memcached interface.INNODB_API_BK_COMMIT_INTERVAL0,1STATIC0TrueDisables the use of row locks when using the InnoDB memcached interface.INNODB_API_DISABLE_ROWLOCK0,1STATIC0TrueLocks the table used by the InnoDB memcached plugin, so that it cannot be dropped or altered by DDL through the SQL interface.INNODB_API_ENABLE_MDL0-3STATIC0TrueLets you control the transaction isolation level on queries processed by the memcached interface.INNODB_API_TRX_LEVELauto,ascii,binarySTATICautoTrueThe binding protocol to use which can be either auto, ascii, or binary. The default is auto which means the server automatically negotiates the protocol with the client.BINDING_PROTOCOL1-2048STATIC1024TrueThe backlog queue configures how many network connections can be waiting to be processed by memcachedBACKLOG_QUEUE_LIMIT0,1STATIC0TrueDisable the use of compare and swap (CAS) which reduces the per-item size by 8 bytes.CAS_DISABLED1-48STATIC48TrueMinimum chunk size in bytes to allocate for the smallest item\'s key, value, and flags. The default is 48 and you can get a significant memory efficiency gain with a lower value.CHUNK_SIZE1-2STATIC1.25TrueChunk size growth factor that controls the size of each successive chunk with each chunk growing times this amount larger than the previous chunk.CHUNK_SIZE_GROWTH_FACTOR0,1STATIC0TrueIf enabled when there is no more memory to store items, memcached will return an error rather than evicting items.ERROR_ON_MEMORY_EXHAUSTED10-1024STATIC1024TrueMaximum number of concurrent connections. Setting this value to anything less than 10 prevents MySQL from starting.MAX_SIMULTANEOUS_CONNECTIONSv,vv,vvvSTATICvTrueVerbose level for memcached.VERBOSITYmysql\n \n \n \n \n 457f7bb8-9fbf-11e4-9084-5754f80d5144\n \n'}, + 'oracle-ee': {'11.2': '\n \n \n \n 11.2XMLDBOracle Application Express Runtime EnvironmentAPEXoracle-ee\n \n 11.2APEXOracle Application Express Development EnvironmentAPEX-DEVoracle-ee\n \n 11.2Oracle Advanced Security - Native Network EncryptionNATIVE_NETWORK_ENCRYPTIONACCEPTED,REJECTED,REQUESTED,REQUIREDSTATICREQUESTEDTrueSpecifies the desired encryption behaviorSQLNET.ENCRYPTION_SERVERACCEPTED,REJECTED,REQUESTED,REQUIREDSTATICREQUESTEDTrueSpecifies the desired data integrity behaviorSQLNET.CRYPTO_CHECKSUM_SERVERRC4_256,AES256,AES192,3DES168,RC4_128,AES128,3DES112,RC4_56,DES,RC4_40,DES40STATICRC4_256,AES256,AES192,3DES168,RC4_128,AES128,3DES112,RC4_56,DES,RC4_40,DES40TrueSpecifies list of encryption algorithms in order of intended useSQLNET.ENCRYPTION_TYPES_SERVERSHA1,MD5STATICSHA1,MD5TrueSpecifies list of checksumming algorithms in order of intended useSQLNET.CRYPTO_CHECKSUM_TYPES_SERVERoracle-ee\n \n 11.21158TrueOracle Enterprise Manager (Database Control only)OEMoracle-ee\n \n 11.2Oracle StatspackSTATSPACKoracle-ee\n \n 11.2TrueTrueOracle Advanced Security - Transparent Data EncryptionTDEoracle-ee\n \n 11.2TrueTrueOracle Advanced Security - TDE with HSMTDE_HSMoracle-ee\n \n 11.2TrueTrueChange time zoneTimezoneAfrica/Cairo,Africa/Casablanca,Africa/Harare,Africa/Monrovia,Africa/Nairobi,Africa/Tripoli,Africa/Windhoek,America/Araguaina,America/Asuncion,America/Bogota,America/Caracas,America/Chihuahua,America/Cuiaba,America/Denver,America/Fortaleza,America/Guatemala,America/Halifax,America/Manaus,America/Matamoros,America/Monterrey,America/Montevideo,America/Phoenix,America/Santiago,America/Tijuana,Asia/Amman,Asia/Ashgabat,Asia/Baghdad,Asia/Baku,Asia/Bangkok,Asia/Beirut,Asia/Calcutta,Asia/Damascus,Asia/Dhaka,Asia/Irkutsk,Asia/Jerusalem,Asia/Kabul,Asia/Karachi,Asia/Kathmandu,Asia/Krasnoyarsk,Asia/Magadan,Asia/Muscat,Asia/Novosibirsk,Asia/Riyadh,Asia/Seoul,Asia/Shanghai,Asia/Singapore,Asia/Taipei,Asia/Tehran,Asia/Tokyo,Asia/Ulaanbaatar,Asia/Vladivostok,Asia/Yakutsk,Asia/Yerevan,Atlantic/Azores,Australia/Adelaide,Australia/Brisbane,Australia/Darwin,Australia/Hobart,Australia/Perth,Australia/Sydney,Brazil/East,Canada/Newfoundland,Canada/Saskatchewan,Europe/Amsterdam,Europe/Athens,Europe/Dublin,Europe/Helsinki,Europe/Istanbul,Europe/Kaliningrad,Europe/Moscow,Europe/Paris,Europe/Prague,Europe/Sarajevo,Pacific/Auckland,Pacific/Fiji,Pacific/Guam,Pacific/Honolulu,Pacific/Samoa,US/Alaska,US/Central,US/Eastern,US/East-Indiana,US/Pacific,UTCDYNAMICUTCTrueSpecifies the timezone the user wants to change the system time toTIME_ZONEoracle-ee\n \n 11.2Oracle XMLDB RepositoryXMLDBoracle-ee\n \n \n \n \n 457f7bb8-9fbf-11e4-9084-5754f80d5144\n \n', + 'all': '\n \n \n \n 11.2XMLDBOracle Application Express Runtime EnvironmentAPEXoracle-ee\n \n 11.2APEXOracle Application Express Development EnvironmentAPEX-DEVoracle-ee\n \n 11.2Oracle Advanced Security - Native Network EncryptionNATIVE_NETWORK_ENCRYPTIONACCEPTED,REJECTED,REQUESTED,REQUIREDSTATICREQUESTEDTrueSpecifies the desired encryption behaviorSQLNET.ENCRYPTION_SERVERACCEPTED,REJECTED,REQUESTED,REQUIREDSTATICREQUESTEDTrueSpecifies the desired data integrity behaviorSQLNET.CRYPTO_CHECKSUM_SERVERRC4_256,AES256,AES192,3DES168,RC4_128,AES128,3DES112,RC4_56,DES,RC4_40,DES40STATICRC4_256,AES256,AES192,3DES168,RC4_128,AES128,3DES112,RC4_56,DES,RC4_40,DES40TrueSpecifies list of encryption algorithms in order of intended useSQLNET.ENCRYPTION_TYPES_SERVERSHA1,MD5STATICSHA1,MD5TrueSpecifies list of checksumming algorithms in order of intended useSQLNET.CRYPTO_CHECKSUM_TYPES_SERVERoracle-ee\n \n 11.21158TrueOracle Enterprise Manager (Database Control only)OEMoracle-ee\n \n 11.2Oracle StatspackSTATSPACKoracle-ee\n \n 11.2TrueTrueOracle Advanced Security - Transparent Data EncryptionTDEoracle-ee\n \n 11.2TrueTrueOracle Advanced Security - TDE with HSMTDE_HSMoracle-ee\n \n 11.2TrueTrueChange time zoneTimezoneAfrica/Cairo,Africa/Casablanca,Africa/Harare,Africa/Monrovia,Africa/Nairobi,Africa/Tripoli,Africa/Windhoek,America/Araguaina,America/Asuncion,America/Bogota,America/Caracas,America/Chihuahua,America/Cuiaba,America/Denver,America/Fortaleza,America/Guatemala,America/Halifax,America/Manaus,America/Matamoros,America/Monterrey,America/Montevideo,America/Phoenix,America/Santiago,America/Tijuana,Asia/Amman,Asia/Ashgabat,Asia/Baghdad,Asia/Baku,Asia/Bangkok,Asia/Beirut,Asia/Calcutta,Asia/Damascus,Asia/Dhaka,Asia/Irkutsk,Asia/Jerusalem,Asia/Kabul,Asia/Karachi,Asia/Kathmandu,Asia/Krasnoyarsk,Asia/Magadan,Asia/Muscat,Asia/Novosibirsk,Asia/Riyadh,Asia/Seoul,Asia/Shanghai,Asia/Singapore,Asia/Taipei,Asia/Tehran,Asia/Tokyo,Asia/Ulaanbaatar,Asia/Vladivostok,Asia/Yakutsk,Asia/Yerevan,Atlantic/Azores,Australia/Adelaide,Australia/Brisbane,Australia/Darwin,Australia/Hobart,Australia/Perth,Australia/Sydney,Brazil/East,Canada/Newfoundland,Canada/Saskatchewan,Europe/Amsterdam,Europe/Athens,Europe/Dublin,Europe/Helsinki,Europe/Istanbul,Europe/Kaliningrad,Europe/Moscow,Europe/Paris,Europe/Prague,Europe/Sarajevo,Pacific/Auckland,Pacific/Fiji,Pacific/Guam,Pacific/Honolulu,Pacific/Samoa,US/Alaska,US/Central,US/Eastern,US/East-Indiana,US/Pacific,UTCDYNAMICUTCTrueSpecifies the timezone the user wants to change the system time toTIME_ZONEoracle-ee\n \n 11.2Oracle XMLDB RepositoryXMLDBoracle-ee\n \n \n \n \n 457f7bb8-9fbf-11e4-9084-5754f80d5144\n \n'}, + 'oracle-sa': {'11.2': '\n \n \n \n 11.2XMLDBOracle Application Express Runtime EnvironmentAPEXoracle-ee\n \n 11.2APEXOracle Application Express Development EnvironmentAPEX-DEVoracle-ee\n \n 11.2Oracle Advanced Security - Native Network EncryptionNATIVE_NETWORK_ENCRYPTIONACCEPTED,REJECTED,REQUESTED,REQUIREDSTATICREQUESTEDTrueSpecifies the desired encryption behaviorSQLNET.ENCRYPTION_SERVERACCEPTED,REJECTED,REQUESTED,REQUIREDSTATICREQUESTEDTrueSpecifies the desired data integrity behaviorSQLNET.CRYPTO_CHECKSUM_SERVERRC4_256,AES256,AES192,3DES168,RC4_128,AES128,3DES112,RC4_56,DES,RC4_40,DES40STATICRC4_256,AES256,AES192,3DES168,RC4_128,AES128,3DES112,RC4_56,DES,RC4_40,DES40TrueSpecifies list of encryption algorithms in order of intended useSQLNET.ENCRYPTION_TYPES_SERVERSHA1,MD5STATICSHA1,MD5TrueSpecifies list of checksumming algorithms in order of intended useSQLNET.CRYPTO_CHECKSUM_TYPES_SERVERoracle-ee\n \n 11.21158TrueOracle Enterprise Manager (Database Control only)OEMoracle-ee\n \n 11.2Oracle StatspackSTATSPACKoracle-ee\n \n 11.2TrueTrueOracle Advanced Security - Transparent Data EncryptionTDEoracle-ee\n \n 11.2TrueTrueOracle Advanced Security - TDE with HSMTDE_HSMoracle-ee\n \n 11.2TrueTrueChange time zoneTimezoneAfrica/Cairo,Africa/Casablanca,Africa/Harare,Africa/Monrovia,Africa/Nairobi,Africa/Tripoli,Africa/Windhoek,America/Araguaina,America/Asuncion,America/Bogota,America/Caracas,America/Chihuahua,America/Cuiaba,America/Denver,America/Fortaleza,America/Guatemala,America/Halifax,America/Manaus,America/Matamoros,America/Monterrey,America/Montevideo,America/Phoenix,America/Santiago,America/Tijuana,Asia/Amman,Asia/Ashgabat,Asia/Baghdad,Asia/Baku,Asia/Bangkok,Asia/Beirut,Asia/Calcutta,Asia/Damascus,Asia/Dhaka,Asia/Irkutsk,Asia/Jerusalem,Asia/Kabul,Asia/Karachi,Asia/Kathmandu,Asia/Krasnoyarsk,Asia/Magadan,Asia/Muscat,Asia/Novosibirsk,Asia/Riyadh,Asia/Seoul,Asia/Shanghai,Asia/Singapore,Asia/Taipei,Asia/Tehran,Asia/Tokyo,Asia/Ulaanbaatar,Asia/Vladivostok,Asia/Yakutsk,Asia/Yerevan,Atlantic/Azores,Australia/Adelaide,Australia/Brisbane,Australia/Darwin,Australia/Hobart,Australia/Perth,Australia/Sydney,Brazil/East,Canada/Newfoundland,Canada/Saskatchewan,Europe/Amsterdam,Europe/Athens,Europe/Dublin,Europe/Helsinki,Europe/Istanbul,Europe/Kaliningrad,Europe/Moscow,Europe/Paris,Europe/Prague,Europe/Sarajevo,Pacific/Auckland,Pacific/Fiji,Pacific/Guam,Pacific/Honolulu,Pacific/Samoa,US/Alaska,US/Central,US/Eastern,US/East-Indiana,US/Pacific,UTCDYNAMICUTCTrueSpecifies the timezone the user wants to change the system time toTIME_ZONEoracle-ee\n \n 11.2Oracle XMLDB RepositoryXMLDBoracle-ee\n \n \n \n \n 457f7bb8-9fbf-11e4-9084-5754f80d5144\n \n', + 'all': '\n \n \n \n 11.2XMLDBOracle Application Express Runtime EnvironmentAPEXoracle-ee\n \n 11.2APEXOracle Application Express Development EnvironmentAPEX-DEVoracle-ee\n \n 11.2Oracle Advanced Security - Native Network EncryptionNATIVE_NETWORK_ENCRYPTIONACCEPTED,REJECTED,REQUESTED,REQUIREDSTATICREQUESTEDTrueSpecifies the desired encryption behaviorSQLNET.ENCRYPTION_SERVERACCEPTED,REJECTED,REQUESTED,REQUIREDSTATICREQUESTEDTrueSpecifies the desired data integrity behaviorSQLNET.CRYPTO_CHECKSUM_SERVERRC4_256,AES256,AES192,3DES168,RC4_128,AES128,3DES112,RC4_56,DES,RC4_40,DES40STATICRC4_256,AES256,AES192,3DES168,RC4_128,AES128,3DES112,RC4_56,DES,RC4_40,DES40TrueSpecifies list of encryption algorithms in order of intended useSQLNET.ENCRYPTION_TYPES_SERVERSHA1,MD5STATICSHA1,MD5TrueSpecifies list of checksumming algorithms in order of intended useSQLNET.CRYPTO_CHECKSUM_TYPES_SERVERoracle-ee\n \n 11.21158TrueOracle Enterprise Manager (Database Control only)OEMoracle-ee\n \n 11.2Oracle StatspackSTATSPACKoracle-ee\n \n 11.2TrueTrueOracle Advanced Security - Transparent Data EncryptionTDEoracle-ee\n \n 11.2TrueTrueOracle Advanced Security - TDE with HSMTDE_HSMoracle-ee\n \n 11.2TrueTrueChange time zoneTimezoneAfrica/Cairo,Africa/Casablanca,Africa/Harare,Africa/Monrovia,Africa/Nairobi,Africa/Tripoli,Africa/Windhoek,America/Araguaina,America/Asuncion,America/Bogota,America/Caracas,America/Chihuahua,America/Cuiaba,America/Denver,America/Fortaleza,America/Guatemala,America/Halifax,America/Manaus,America/Matamoros,America/Monterrey,America/Montevideo,America/Phoenix,America/Santiago,America/Tijuana,Asia/Amman,Asia/Ashgabat,Asia/Baghdad,Asia/Baku,Asia/Bangkok,Asia/Beirut,Asia/Calcutta,Asia/Damascus,Asia/Dhaka,Asia/Irkutsk,Asia/Jerusalem,Asia/Kabul,Asia/Karachi,Asia/Kathmandu,Asia/Krasnoyarsk,Asia/Magadan,Asia/Muscat,Asia/Novosibirsk,Asia/Riyadh,Asia/Seoul,Asia/Shanghai,Asia/Singapore,Asia/Taipei,Asia/Tehran,Asia/Tokyo,Asia/Ulaanbaatar,Asia/Vladivostok,Asia/Yakutsk,Asia/Yerevan,Atlantic/Azores,Australia/Adelaide,Australia/Brisbane,Australia/Darwin,Australia/Hobart,Australia/Perth,Australia/Sydney,Brazil/East,Canada/Newfoundland,Canada/Saskatchewan,Europe/Amsterdam,Europe/Athens,Europe/Dublin,Europe/Helsinki,Europe/Istanbul,Europe/Kaliningrad,Europe/Moscow,Europe/Paris,Europe/Prague,Europe/Sarajevo,Pacific/Auckland,Pacific/Fiji,Pacific/Guam,Pacific/Honolulu,Pacific/Samoa,US/Alaska,US/Central,US/Eastern,US/East-Indiana,US/Pacific,UTCDYNAMICUTCTrueSpecifies the timezone the user wants to change the system time toTIME_ZONEoracle-ee\n \n 11.2Oracle XMLDB RepositoryXMLDBoracle-ee\n \n \n \n \n 457f7bb8-9fbf-11e4-9084-5754f80d5144\n \n'}, + 'oracle-sa1': {'11.2': '\n \n \n \n 11.2XMLDBOracle Application Express Runtime EnvironmentAPEXoracle-ee\n \n 11.2APEXOracle Application Express Development EnvironmentAPEX-DEVoracle-ee\n \n 11.2Oracle Advanced Security - Native Network EncryptionNATIVE_NETWORK_ENCRYPTIONACCEPTED,REJECTED,REQUESTED,REQUIREDSTATICREQUESTEDTrueSpecifies the desired encryption behaviorSQLNET.ENCRYPTION_SERVERACCEPTED,REJECTED,REQUESTED,REQUIREDSTATICREQUESTEDTrueSpecifies the desired data integrity behaviorSQLNET.CRYPTO_CHECKSUM_SERVERRC4_256,AES256,AES192,3DES168,RC4_128,AES128,3DES112,RC4_56,DES,RC4_40,DES40STATICRC4_256,AES256,AES192,3DES168,RC4_128,AES128,3DES112,RC4_56,DES,RC4_40,DES40TrueSpecifies list of encryption algorithms in order of intended useSQLNET.ENCRYPTION_TYPES_SERVERSHA1,MD5STATICSHA1,MD5TrueSpecifies list of checksumming algorithms in order of intended useSQLNET.CRYPTO_CHECKSUM_TYPES_SERVERoracle-ee\n \n 11.21158TrueOracle Enterprise Manager (Database Control only)OEMoracle-ee\n \n 11.2Oracle StatspackSTATSPACKoracle-ee\n \n 11.2TrueTrueOracle Advanced Security - Transparent Data EncryptionTDEoracle-ee\n \n 11.2TrueTrueOracle Advanced Security - TDE with HSMTDE_HSMoracle-ee\n \n 11.2TrueTrueChange time zoneTimezoneAfrica/Cairo,Africa/Casablanca,Africa/Harare,Africa/Monrovia,Africa/Nairobi,Africa/Tripoli,Africa/Windhoek,America/Araguaina,America/Asuncion,America/Bogota,America/Caracas,America/Chihuahua,America/Cuiaba,America/Denver,America/Fortaleza,America/Guatemala,America/Halifax,America/Manaus,America/Matamoros,America/Monterrey,America/Montevideo,America/Phoenix,America/Santiago,America/Tijuana,Asia/Amman,Asia/Ashgabat,Asia/Baghdad,Asia/Baku,Asia/Bangkok,Asia/Beirut,Asia/Calcutta,Asia/Damascus,Asia/Dhaka,Asia/Irkutsk,Asia/Jerusalem,Asia/Kabul,Asia/Karachi,Asia/Kathmandu,Asia/Krasnoyarsk,Asia/Magadan,Asia/Muscat,Asia/Novosibirsk,Asia/Riyadh,Asia/Seoul,Asia/Shanghai,Asia/Singapore,Asia/Taipei,Asia/Tehran,Asia/Tokyo,Asia/Ulaanbaatar,Asia/Vladivostok,Asia/Yakutsk,Asia/Yerevan,Atlantic/Azores,Australia/Adelaide,Australia/Brisbane,Australia/Darwin,Australia/Hobart,Australia/Perth,Australia/Sydney,Brazil/East,Canada/Newfoundland,Canada/Saskatchewan,Europe/Amsterdam,Europe/Athens,Europe/Dublin,Europe/Helsinki,Europe/Istanbul,Europe/Kaliningrad,Europe/Moscow,Europe/Paris,Europe/Prague,Europe/Sarajevo,Pacific/Auckland,Pacific/Fiji,Pacific/Guam,Pacific/Honolulu,Pacific/Samoa,US/Alaska,US/Central,US/Eastern,US/East-Indiana,US/Pacific,UTCDYNAMICUTCTrueSpecifies the timezone the user wants to change the system time toTIME_ZONEoracle-ee\n \n 11.2Oracle XMLDB RepositoryXMLDBoracle-ee\n \n \n \n \n 457f7bb8-9fbf-11e4-9084-5754f80d5144\n \n', + 'all': '\n \n \n \n 11.2XMLDBOracle Application Express Runtime EnvironmentAPEXoracle-ee\n \n 11.2APEXOracle Application Express Development EnvironmentAPEX-DEVoracle-ee\n \n 11.2Oracle Advanced Security - Native Network EncryptionNATIVE_NETWORK_ENCRYPTIONACCEPTED,REJECTED,REQUESTED,REQUIREDSTATICREQUESTEDTrueSpecifies the desired encryption behaviorSQLNET.ENCRYPTION_SERVERACCEPTED,REJECTED,REQUESTED,REQUIREDSTATICREQUESTEDTrueSpecifies the desired data integrity behaviorSQLNET.CRYPTO_CHECKSUM_SERVERRC4_256,AES256,AES192,3DES168,RC4_128,AES128,3DES112,RC4_56,DES,RC4_40,DES40STATICRC4_256,AES256,AES192,3DES168,RC4_128,AES128,3DES112,RC4_56,DES,RC4_40,DES40TrueSpecifies list of encryption algorithms in order of intended useSQLNET.ENCRYPTION_TYPES_SERVERSHA1,MD5STATICSHA1,MD5TrueSpecifies list of checksumming algorithms in order of intended useSQLNET.CRYPTO_CHECKSUM_TYPES_SERVERoracle-ee\n \n 11.21158TrueOracle Enterprise Manager (Database Control only)OEMoracle-ee\n \n 11.2Oracle StatspackSTATSPACKoracle-ee\n \n 11.2TrueTrueOracle Advanced Security - Transparent Data EncryptionTDEoracle-ee\n \n 11.2TrueTrueOracle Advanced Security - TDE with HSMTDE_HSMoracle-ee\n \n 11.2TrueTrueChange time zoneTimezoneAfrica/Cairo,Africa/Casablanca,Africa/Harare,Africa/Monrovia,Africa/Nairobi,Africa/Tripoli,Africa/Windhoek,America/Araguaina,America/Asuncion,America/Bogota,America/Caracas,America/Chihuahua,America/Cuiaba,America/Denver,America/Fortaleza,America/Guatemala,America/Halifax,America/Manaus,America/Matamoros,America/Monterrey,America/Montevideo,America/Phoenix,America/Santiago,America/Tijuana,Asia/Amman,Asia/Ashgabat,Asia/Baghdad,Asia/Baku,Asia/Bangkok,Asia/Beirut,Asia/Calcutta,Asia/Damascus,Asia/Dhaka,Asia/Irkutsk,Asia/Jerusalem,Asia/Kabul,Asia/Karachi,Asia/Kathmandu,Asia/Krasnoyarsk,Asia/Magadan,Asia/Muscat,Asia/Novosibirsk,Asia/Riyadh,Asia/Seoul,Asia/Shanghai,Asia/Singapore,Asia/Taipei,Asia/Tehran,Asia/Tokyo,Asia/Ulaanbaatar,Asia/Vladivostok,Asia/Yakutsk,Asia/Yerevan,Atlantic/Azores,Australia/Adelaide,Australia/Brisbane,Australia/Darwin,Australia/Hobart,Australia/Perth,Australia/Sydney,Brazil/East,Canada/Newfoundland,Canada/Saskatchewan,Europe/Amsterdam,Europe/Athens,Europe/Dublin,Europe/Helsinki,Europe/Istanbul,Europe/Kaliningrad,Europe/Moscow,Europe/Paris,Europe/Prague,Europe/Sarajevo,Pacific/Auckland,Pacific/Fiji,Pacific/Guam,Pacific/Honolulu,Pacific/Samoa,US/Alaska,US/Central,US/Eastern,US/East-Indiana,US/Pacific,UTCDYNAMICUTCTrueSpecifies the timezone the user wants to change the system time toTIME_ZONEoracle-ee\n \n 11.2Oracle XMLDB RepositoryXMLDBoracle-ee\n \n \n \n \n 457f7bb8-9fbf-11e4-9084-5754f80d5144\n \n'}, + 'sqlserver-ee': {'10.50': '\n \n \n \n 10.50SQLServer Database MirroringMirroringsqlserver-ee\n \n 10.50TrueSQL Server - Transparent Data EncryptionTDEsqlserver-ee\n \n \n \n \n 457f7bb8-9fbf-11e4-9084-5754f80d5144\n \n', + '11.00': '\n \n \n \n 11.00SQLServer Database MirroringMirroringsqlserver-ee\n \n 11.00TrueSQL Server - Transparent Data EncryptionTDEsqlserver-ee\n \n \n \n \n 457f7bb8-9fbf-11e4-9084-5754f80d5144\n \n', + 'all': '\n \n \n \n 10.50SQLServer Database MirroringMirroringsqlserver-ee\n \n 10.50TrueSQL Server - Transparent Data EncryptionTDEsqlserver-ee\n \n 11.00SQLServer Database MirroringMirroringsqlserver-ee\n \n 11.00TrueSQL Server - Transparent Data EncryptionTDEsqlserver-ee\n \n \n \n \n 457f7bb8-9fbf-11e4-9084-5754f80d5144\n \n'}} if engine_name not in default_option_group_options: - raise RDSClientError('InvalidParameterValue', 'Invalid DB engine: {0}'.format(engine_name)) + raise RDSClientError('InvalidParameterValue', + 'Invalid DB engine: {0}'.format(engine_name)) if major_engine_version and major_engine_version not in default_option_group_options[engine_name]: raise RDSClientError('InvalidParameterCombination', 'Cannot find major version {0} for {1}'.format(major_engine_version, engine_name)) @@ -779,9 +800,11 @@ class RDS2Backend(BaseBackend): raise RDSClientError('InvalidParameterValue', 'At least one option must be added, modified, or removed.') if options_to_remove: - self.option_groups[option_group_name].remove_options(options_to_remove) + self.option_groups[option_group_name].remove_options( + options_to_remove) if options_to_include: - self.option_groups[option_group_name].add_options(options_to_include) + self.option_groups[option_group_name].add_options( + options_to_include) return self.option_groups[option_group_name] def create_db_parameter_group(self, db_parameter_group_kwargs): @@ -821,7 +844,7 @@ class RDS2Backend(BaseBackend): else: continue - return db_parameter_group_list[marker:max_records+marker] + return db_parameter_group_list[marker:max_records + marker] def modify_db_parameter_group(self, db_parameter_group_name, db_parameter_group_parameters): if db_parameter_group_name not in self.db_parameter_groups: @@ -832,22 +855,17 @@ class RDS2Backend(BaseBackend): return db_parameter_group - def delete_db_parameter_group(self, db_parameter_group_name): - if db_parameter_group_name in self.db_parameter_groups: - return self.db_parameter_groups.pop(db_parameter_group_name) - else: - raise DBParameterGroupNotFoundError(db_parameter_group_name) - def list_tags_for_resource(self, arn): if self.arn_regex.match(arn): arn_breakdown = arn.split(':') - resource_type = arn_breakdown[len(arn_breakdown)-2] - resource_name = arn_breakdown[len(arn_breakdown)-1] + resource_type = arn_breakdown[len(arn_breakdown) - 2] + resource_name = arn_breakdown[len(arn_breakdown) - 1] if resource_type == 'db': # Database if resource_name in self.databases: return self.databases[resource_name].get_tags() elif resource_type == 'es': # Event Subscription - # TODO: Complete call to tags on resource type Event Subscription + # TODO: Complete call to tags on resource type Event + # Subscription return [] elif resource_type == 'og': # Option Group if resource_name in self.option_groups: @@ -856,7 +874,8 @@ class RDS2Backend(BaseBackend): if resource_name in self.db_parameter_groups: return self.db_parameter_groups[resource_name].get_tags() elif resource_type == 'ri': # Reserved DB instance - # TODO: Complete call to tags on resource type Reserved DB instance + # TODO: Complete call to tags on resource type Reserved DB + # instance return [] elif resource_type == 'secgrp': # DB security group if resource_name in self.security_groups: @@ -875,8 +894,8 @@ class RDS2Backend(BaseBackend): def remove_tags_from_resource(self, arn, tag_keys): if self.arn_regex.match(arn): arn_breakdown = arn.split(':') - resource_type = arn_breakdown[len(arn_breakdown)-2] - resource_name = arn_breakdown[len(arn_breakdown)-1] + resource_type = arn_breakdown[len(arn_breakdown) - 2] + resource_name = arn_breakdown[len(arn_breakdown) - 1] if resource_type == 'db': # Database if resource_name in self.databases: self.databases[resource_name].remove_tags(tag_keys) @@ -904,8 +923,8 @@ class RDS2Backend(BaseBackend): def add_tags_to_resource(self, arn, tags): if self.arn_regex.match(arn): arn_breakdown = arn.split(':') - resource_type = arn_breakdown[len(arn_breakdown)-2] - resource_name = arn_breakdown[len(arn_breakdown)-1] + resource_type = arn_breakdown[len(arn_breakdown) - 2] + resource_name = arn_breakdown[len(arn_breakdown) - 1] if resource_type == 'db': # Database if resource_name in self.databases: return self.databases[resource_name].add_tags(tags) @@ -932,6 +951,7 @@ class RDS2Backend(BaseBackend): class OptionGroup(object): + def __init__(self, name, engine_name, major_engine_version, description=None): self.engine_name = engine_name self.major_engine_version = major_engine_version @@ -966,11 +986,13 @@ class OptionGroup(object): return template.render(option_group=self) def remove_options(self, options_to_remove): - # TODO: Check for option in self.options and remove if exists. Raise error otherwise + # TODO: Check for option in self.options and remove if exists. Raise + # error otherwise return def add_options(self, options_to_add): - # TODO: Validate option and add it to self.options. If invalid raise error + # TODO: Validate option and add it to self.options. If invalid raise + # error return def get_tags(self): @@ -978,22 +1000,26 @@ class OptionGroup(object): def add_tags(self, tags): new_keys = [tag_set['Key'] for tag_set in tags] - self.tags = [tag_set for tag_set in self.tags if tag_set['Key'] not in new_keys] + self.tags = [tag_set for tag_set in self.tags if tag_set[ + 'Key'] not in new_keys] self.tags.extend(tags) return self.tags def remove_tags(self, tag_keys): - self.tags = [tag_set for tag_set in self.tags if tag_set['Key'] not in tag_keys] + self.tags = [tag_set for tag_set in self.tags if tag_set[ + 'Key'] not in tag_keys] class OptionGroupOption(object): + def __init__(self, **kwargs): self.default_port = kwargs.get('default_port') self.description = kwargs.get('description') self.engine_name = kwargs.get('engine_name') self.major_engine_version = kwargs.get('major_engine_version') self.name = kwargs.get('name') - self.option_group_option_settings = self._make_option_group_option_settings(kwargs.get('option_group_option_settings', [])) + self.option_group_option_settings = self._make_option_group_option_settings( + kwargs.get('option_group_option_settings', [])) self.options_depended_on = kwargs.get('options_depended_on', []) self.permanent = kwargs.get('permanent') self.persistent = kwargs.get('persistent') @@ -1044,6 +1070,7 @@ class OptionGroupOption(object): class OptionGroupOptionSetting(object): + def __init__(self, *kwargs): self.allowed_values = kwargs.get('allowed_values') self.apply_type = kwargs.get('apply_type') @@ -1063,7 +1090,9 @@ class OptionGroupOptionSetting(object): """) return template.render(option_group_option_setting=self) + class DBParameterGroup(object): + def __init__(self, name, description, family, tags): self.name = name self.description = description @@ -1084,12 +1113,14 @@ class DBParameterGroup(object): def add_tags(self, tags): new_keys = [tag_set['Key'] for tag_set in tags] - self.tags = [tag_set for tag_set in self.tags if tag_set['Key'] not in new_keys] + self.tags = [tag_set for tag_set in self.tags if tag_set[ + 'Key'] not in new_keys] self.tags.extend(tags) return self.tags def remove_tags(self, tag_keys): - self.tags = [tag_set for tag_set in self.tags if tag_set['Key'] not in tag_keys] + self.tags = [tag_set for tag_set in self.tags if tag_set[ + 'Key'] not in tag_keys] def update_parameters(self, new_parameters): for new_parameter in new_parameters: @@ -1118,9 +1149,11 @@ class DBParameterGroup(object): }) rds2_backend = rds2_backends[region_name] - db_parameter_group = rds2_backend.create_db_parameter_group(db_parameter_group_kwargs) + db_parameter_group = rds2_backend.create_db_parameter_group( + db_parameter_group_kwargs) db_parameter_group.update_parameters(db_parameter_group_parameters) return db_parameter_group -rds2_backends = dict((region.name, RDS2Backend(region.name)) for region in boto.rds2.regions()) +rds2_backends = dict((region.name, RDS2Backend(region.name)) + for region in boto.rds2.regions()) diff --git a/moto/rds2/responses.py b/moto/rds2/responses.py index 879edbdd3..96b98463d 100644 --- a/moto/rds2/responses.py +++ b/moto/rds2/responses.py @@ -5,8 +5,6 @@ from moto.core.responses import BaseResponse from moto.ec2.models import ec2_backends from .models import rds2_backends from .exceptions import DBParameterGroupNotFoundError -import json -import re class RDS2Response(BaseResponse): @@ -45,7 +43,8 @@ class RDS2Response(BaseResponse): # VpcSecurityGroupIds.member.N "tags": list(), } - args['tags'] = self.unpack_complex_list_params('Tags.Tag', ('Key', 'Value')) + args['tags'] = self.unpack_complex_list_params( + 'Tags.Tag', ('Key', 'Value')) return args def _get_db_replica_kwargs(self): @@ -85,7 +84,8 @@ class RDS2Response(BaseResponse): while self._get_param('{0}.{1}.{2}'.format(label, count, names[0])): param = dict() for i in range(len(names)): - param[names[i]] = self._get_param('{0}.{1}.{2}'.format(label, count, names[i])) + param[names[i]] = self._get_param( + '{0}.{1}.{2}'.format(label, count, names[i])) unpacked_list.append(param) count += 1 return unpacked_list @@ -94,7 +94,8 @@ class RDS2Response(BaseResponse): unpacked_list = list() count = 1 while self._get_param('{0}.{1}'.format(label, count)): - unpacked_list.append(self._get_param('{0}.{1}'.format(label, count))) + unpacked_list.append(self._get_param( + '{0}.{1}'.format(label, count))) count += 1 return unpacked_list @@ -132,7 +133,8 @@ class RDS2Response(BaseResponse): def modify_db_instance(self): db_instance_identifier = self._get_param('DBInstanceIdentifier') db_kwargs = self._get_db_kwargs() - database = self.backend.modify_database(db_instance_identifier, db_kwargs) + database = self.backend.modify_database( + db_instance_identifier, db_kwargs) template = self.response_template(MODIFY_DATABASE_TEMPLATE) return template.render(database=database) @@ -181,7 +183,8 @@ class RDS2Response(BaseResponse): group_name = self._get_param('DBSecurityGroupName') description = self._get_param('DBSecurityGroupDescription') tags = self.unpack_complex_list_params('Tags.Tag', ('Key', 'Value')) - security_group = self.backend.create_security_group(group_name, description, tags) + security_group = self.backend.create_security_group( + group_name, description, tags) template = self.response_template(CREATE_SECURITY_GROUP_TEMPLATE) return template.render(security_group=security_group) @@ -190,7 +193,8 @@ class RDS2Response(BaseResponse): def describe_db_security_groups(self): security_group_name = self._get_param('DBSecurityGroupName') - security_groups = self.backend.describe_security_groups(security_group_name) + security_groups = self.backend.describe_security_groups( + security_group_name) template = self.response_template(DESCRIBE_SECURITY_GROUPS_TEMPLATE) return template.render(security_groups=security_groups) @@ -199,7 +203,8 @@ class RDS2Response(BaseResponse): def delete_db_security_group(self): security_group_name = self._get_param('DBSecurityGroupName') - security_group = self.backend.delete_security_group(security_group_name) + security_group = self.backend.delete_security_group( + security_group_name) template = self.response_template(DELETE_SECURITY_GROUP_TEMPLATE) return template.render(security_group=security_group) @@ -209,7 +214,8 @@ class RDS2Response(BaseResponse): def authorize_db_security_group_ingress(self): security_group_name = self._get_param('DBSecurityGroupName') cidr_ip = self._get_param('CIDRIP') - security_group = self.backend.authorize_security_group(security_group_name, cidr_ip) + security_group = self.backend.authorize_security_group( + security_group_name, cidr_ip) template = self.response_template(AUTHORIZE_SECURITY_GROUP_TEMPLATE) return template.render(security_group=security_group) @@ -221,8 +227,10 @@ class RDS2Response(BaseResponse): description = self._get_param('DBSubnetGroupDescription') subnet_ids = self._get_multi_param('SubnetIds.SubnetIdentifier') tags = self.unpack_complex_list_params('Tags.Tag', ('Key', 'Value')) - subnets = [ec2_backends[self.region].get_subnet(subnet_id) for subnet_id in subnet_ids] - subnet_group = self.backend.create_subnet_group(subnet_name, description, subnets, tags) + subnets = [ec2_backends[self.region].get_subnet( + subnet_id) for subnet_id in subnet_ids] + subnet_group = self.backend.create_subnet_group( + subnet_name, description, subnets, tags) template = self.response_template(CREATE_SUBNET_GROUP_TEMPLATE) return template.render(subnet_group=subnet_group) @@ -267,7 +275,8 @@ class RDS2Response(BaseResponse): def describe_option_group_options(self): engine_name = self._get_param('EngineName') major_engine_version = self._get_param('MajorEngineVersion') - option_group_options = self.backend.describe_option_group_options(engine_name, major_engine_version) + option_group_options = self.backend.describe_option_group_options( + engine_name, major_engine_version) return option_group_options def modify_option_group(self): @@ -287,7 +296,8 @@ class RDS2Response(BaseResponse): count = 1 options_to_remove = [] while self._get_param('OptionsToRemove.member.{0}'.format(count)): - options_to_remove.append(self._get_param('OptionsToRemove.member.{0}'.format(count))) + options_to_remove.append(self._get_param( + 'OptionsToRemove.member.{0}'.format(count))) count += 1 apply_immediately = self._get_param('ApplyImmediately') option_group = self.backend.modify_option_group(option_group_name, @@ -314,7 +324,8 @@ class RDS2Response(BaseResponse): kwargs['max_records'] = self._get_param('MaxRecords') kwargs['marker'] = self._get_param('Marker') db_parameter_groups = self.backend.describe_db_parameter_groups(kwargs) - template = self.response_template(DESCRIBE_DB_PARAMETER_GROUPS_TEMPLATE) + template = self.response_template( + DESCRIBE_DB_PARAMETER_GROUPS_TEMPLATE) return template.render(db_parameter_groups=db_parameter_groups) def modify_dbparameter_group(self): @@ -347,7 +358,8 @@ class RDS2Response(BaseResponse): def describe_db_parameters(self): db_parameter_group_name = self._get_param('DBParameterGroupName') - db_parameter_groups = self.backend.describe_db_parameter_groups({'name': db_parameter_group_name}) + db_parameter_groups = self.backend.describe_db_parameter_groups( + {'name': db_parameter_group_name}) if not db_parameter_groups: raise DBParameterGroupNotFoundError(db_parameter_group_name) @@ -359,7 +371,8 @@ class RDS2Response(BaseResponse): def delete_db_parameter_group(self): kwargs = self._get_db_parameter_group_kwargs() - db_parameter_group = self.backend.delete_db_parameter_group(kwargs['name']) + db_parameter_group = self.backend.delete_db_parameter_group(kwargs[ + 'name']) template = self.response_template(DELETE_DB_PARAMETER_GROUP_TEMPLATE) return template.render(db_parameter_group=db_parameter_group) diff --git a/moto/redshift/__init__.py b/moto/redshift/__init__.py index 58be5fc70..06f778e8d 100644 --- a/moto/redshift/__init__.py +++ b/moto/redshift/__init__.py @@ -1,6 +1,6 @@ from __future__ import unicode_literals from .models import redshift_backends -from ..core.models import MockAWS, base_decorator, HttprettyMockAWS, deprecated_base_decorator +from ..core.models import base_decorator, deprecated_base_decorator redshift_backend = redshift_backends['us-east-1'] mock_redshift = base_decorator(redshift_backends) diff --git a/moto/redshift/exceptions.py b/moto/redshift/exceptions.py index 6d1b2c3bb..8bcca807e 100644 --- a/moto/redshift/exceptions.py +++ b/moto/redshift/exceptions.py @@ -5,6 +5,7 @@ from werkzeug.exceptions import BadRequest class RedshiftClientError(BadRequest): + def __init__(self, code, message): super(RedshiftClientError, self).__init__() self.description = json.dumps({ @@ -18,6 +19,7 @@ class RedshiftClientError(BadRequest): class ClusterNotFoundError(RedshiftClientError): + def __init__(self, cluster_identifier): super(ClusterNotFoundError, self).__init__( 'ClusterNotFound', @@ -25,6 +27,7 @@ class ClusterNotFoundError(RedshiftClientError): class ClusterSubnetGroupNotFoundError(RedshiftClientError): + def __init__(self, subnet_identifier): super(ClusterSubnetGroupNotFoundError, self).__init__( 'ClusterSubnetGroupNotFound', @@ -32,6 +35,7 @@ class ClusterSubnetGroupNotFoundError(RedshiftClientError): class ClusterSecurityGroupNotFoundError(RedshiftClientError): + def __init__(self, group_identifier): super(ClusterSecurityGroupNotFoundError, self).__init__( 'ClusterSecurityGroupNotFound', @@ -39,6 +43,7 @@ class ClusterSecurityGroupNotFoundError(RedshiftClientError): class ClusterParameterGroupNotFoundError(RedshiftClientError): + def __init__(self, group_identifier): super(ClusterParameterGroupNotFoundError, self).__init__( 'ClusterParameterGroupNotFound', @@ -46,6 +51,7 @@ class ClusterParameterGroupNotFoundError(RedshiftClientError): class InvalidSubnetError(RedshiftClientError): + def __init__(self, subnet_identifier): super(InvalidSubnetError, self).__init__( 'InvalidSubnet', diff --git a/moto/redshift/models.py b/moto/redshift/models.py index bd81526df..af6c6f643 100644 --- a/moto/redshift/models.py +++ b/moto/redshift/models.py @@ -13,13 +13,14 @@ from .exceptions import ( class Cluster(object): + def __init__(self, redshift_backend, cluster_identifier, node_type, master_username, - master_user_password, db_name, cluster_type, cluster_security_groups, - vpc_security_group_ids, cluster_subnet_group_name, availability_zone, - preferred_maintenance_window, cluster_parameter_group_name, - automated_snapshot_retention_period, port, cluster_version, - allow_version_upgrade, number_of_nodes, publicly_accessible, - encrypted, region): + master_user_password, db_name, cluster_type, cluster_security_groups, + vpc_security_group_ids, cluster_subnet_group_name, availability_zone, + preferred_maintenance_window, cluster_parameter_group_name, + automated_snapshot_retention_period, port, cluster_version, + allow_version_upgrade, number_of_nodes, publicly_accessible, + encrypted, region): self.redshift_backend = redshift_backend self.cluster_identifier = cluster_identifier self.node_type = node_type @@ -34,7 +35,8 @@ class Cluster(object): self.allow_version_upgrade = allow_version_upgrade if allow_version_upgrade is not None else True self.cluster_version = cluster_version if cluster_version else "1.0" self.port = int(port) if port else 5439 - self.automated_snapshot_retention_period = int(automated_snapshot_retention_period) if automated_snapshot_retention_period else 1 + self.automated_snapshot_retention_period = int( + automated_snapshot_retention_period) if automated_snapshot_retention_period else 1 self.preferred_maintenance_window = preferred_maintenance_window if preferred_maintenance_window else "Mon:03:00-Mon:03:30" if cluster_parameter_group_name: @@ -68,7 +70,8 @@ class Cluster(object): properties = cloudformation_json['Properties'] if 'ClusterSubnetGroupName' in properties: - subnet_group_name = properties['ClusterSubnetGroupName'].cluster_subnet_group_name + subnet_group_name = properties[ + 'ClusterSubnetGroupName'].cluster_subnet_group_name else: subnet_group_name = None cluster = redshift_backend.create_cluster( @@ -78,13 +81,17 @@ class Cluster(object): master_user_password=properties.get('MasterUserPassword'), db_name=properties.get('DBName'), cluster_type=properties.get('ClusterType'), - cluster_security_groups=properties.get('ClusterSecurityGroups', []), + cluster_security_groups=properties.get( + 'ClusterSecurityGroups', []), vpc_security_group_ids=properties.get('VpcSecurityGroupIds', []), cluster_subnet_group_name=subnet_group_name, availability_zone=properties.get('AvailabilityZone'), - preferred_maintenance_window=properties.get('PreferredMaintenanceWindow'), - cluster_parameter_group_name=properties.get('ClusterParameterGroupName'), - automated_snapshot_retention_period=properties.get('AutomatedSnapshotRetentionPeriod'), + preferred_maintenance_window=properties.get( + 'PreferredMaintenanceWindow'), + cluster_parameter_group_name=properties.get( + 'ClusterParameterGroupName'), + automated_snapshot_retention_period=properties.get( + 'AutomatedSnapshotRetentionPeriod'), port=properties.get('Port'), cluster_version=properties.get('ClusterVersion'), allow_version_upgrade=properties.get('AllowVersionUpgrade'), @@ -214,6 +221,7 @@ class SubnetGroup(object): class SecurityGroup(object): + def __init__(self, cluster_security_group_name, description): self.cluster_security_group_name = cluster_security_group_name self.description = description @@ -293,7 +301,8 @@ class RedshiftBackend(BaseBackend): def modify_cluster(self, **cluster_kwargs): cluster_identifier = cluster_kwargs.pop('cluster_identifier') - new_cluster_identifier = cluster_kwargs.pop('new_cluster_identifier', None) + new_cluster_identifier = cluster_kwargs.pop( + 'new_cluster_identifier', None) cluster = self.describe_clusters(cluster_identifier)[0] @@ -313,7 +322,8 @@ class RedshiftBackend(BaseBackend): raise ClusterNotFoundError(cluster_identifier) def create_cluster_subnet_group(self, cluster_subnet_group_name, description, subnet_ids): - subnet_group = SubnetGroup(self.ec2_backend, cluster_subnet_group_name, description, subnet_ids) + subnet_group = SubnetGroup( + self.ec2_backend, cluster_subnet_group_name, description, subnet_ids) self.subnet_groups[cluster_subnet_group_name] = subnet_group return subnet_group @@ -332,7 +342,8 @@ class RedshiftBackend(BaseBackend): raise ClusterSubnetGroupNotFoundError(subnet_identifier) def create_cluster_security_group(self, cluster_security_group_name, description): - security_group = SecurityGroup(cluster_security_group_name, description) + security_group = SecurityGroup( + cluster_security_group_name, description) self.security_groups[cluster_security_group_name] = security_group return security_group @@ -351,8 +362,9 @@ class RedshiftBackend(BaseBackend): raise ClusterSecurityGroupNotFoundError(security_group_identifier) def create_cluster_parameter_group(self, cluster_parameter_group_name, - group_family, description): - parameter_group = ParameterGroup(cluster_parameter_group_name, group_family, description) + group_family, description): + parameter_group = ParameterGroup( + cluster_parameter_group_name, group_family, description) self.parameter_groups[cluster_parameter_group_name] = parameter_group return parameter_group diff --git a/moto/redshift/responses.py b/moto/redshift/responses.py index a9c977b4e..23c653332 100644 --- a/moto/redshift/responses.py +++ b/moto/redshift/responses.py @@ -134,7 +134,8 @@ class RedshiftResponse(BaseResponse): def describe_cluster_subnet_groups(self): subnet_identifier = self._get_param("ClusterSubnetGroupName") - subnet_groups = self.redshift_backend.describe_cluster_subnet_groups(subnet_identifier) + subnet_groups = self.redshift_backend.describe_cluster_subnet_groups( + subnet_identifier) return json.dumps({ "DescribeClusterSubnetGroupsResponse": { @@ -160,7 +161,8 @@ class RedshiftResponse(BaseResponse): }) def create_cluster_security_group(self): - cluster_security_group_name = self._get_param('ClusterSecurityGroupName') + cluster_security_group_name = self._get_param( + 'ClusterSecurityGroupName') description = self._get_param('Description') security_group = self.redshift_backend.create_cluster_security_group( @@ -180,8 +182,10 @@ class RedshiftResponse(BaseResponse): }) def describe_cluster_security_groups(self): - cluster_security_group_name = self._get_param("ClusterSecurityGroupName") - security_groups = self.redshift_backend.describe_cluster_security_groups(cluster_security_group_name) + cluster_security_group_name = self._get_param( + "ClusterSecurityGroupName") + security_groups = self.redshift_backend.describe_cluster_security_groups( + cluster_security_group_name) return json.dumps({ "DescribeClusterSecurityGroupsResponse": { @@ -196,7 +200,8 @@ class RedshiftResponse(BaseResponse): def delete_cluster_security_group(self): security_group_identifier = self._get_param("ClusterSecurityGroupName") - self.redshift_backend.delete_cluster_security_group(security_group_identifier) + self.redshift_backend.delete_cluster_security_group( + security_group_identifier) return json.dumps({ "DeleteClusterSecurityGroupResponse": { @@ -230,7 +235,8 @@ class RedshiftResponse(BaseResponse): def describe_cluster_parameter_groups(self): cluster_parameter_group_name = self._get_param("ParameterGroupName") - parameter_groups = self.redshift_backend.describe_cluster_parameter_groups(cluster_parameter_group_name) + parameter_groups = self.redshift_backend.describe_cluster_parameter_groups( + cluster_parameter_group_name) return json.dumps({ "DescribeClusterParameterGroupsResponse": { @@ -245,7 +251,8 @@ class RedshiftResponse(BaseResponse): def delete_cluster_parameter_group(self): cluster_parameter_group_name = self._get_param("ParameterGroupName") - self.redshift_backend.delete_cluster_parameter_group(cluster_parameter_group_name) + self.redshift_backend.delete_cluster_parameter_group( + cluster_parameter_group_name) return json.dumps({ "DeleteClusterParameterGroupResponse": { diff --git a/moto/route53/models.py b/moto/route53/models.py index 6b293a1ca..338c6d30a 100644 --- a/moto/route53/models.py +++ b/moto/route53/models.py @@ -10,6 +10,7 @@ from moto.core.utils import get_random_hex class HealthCheck(object): + def __init__(self, health_check_id, health_check_args): self.id = health_check_id self.ip_address = health_check_args.get("ip_address") @@ -63,6 +64,7 @@ class HealthCheck(object): class RecordSet(object): + def __init__(self, kwargs): self.name = kwargs.get('Name') self._type = kwargs.get('Type') @@ -83,25 +85,29 @@ class RecordSet(object): if zone_name: hosted_zone = route53_backend.get_hosted_zone_by_name(zone_name) else: - hosted_zone = route53_backend.get_hosted_zone(properties["HostedZoneId"]) + hosted_zone = route53_backend.get_hosted_zone( + properties["HostedZoneId"]) record_set = hosted_zone.add_rrset(properties) return record_set @classmethod def update_from_cloudformation_json(cls, original_resource, new_resource_name, cloudformation_json, region_name): - cls.delete_from_cloudformation_json(original_resource.name, cloudformation_json, region_name) + cls.delete_from_cloudformation_json( + original_resource.name, cloudformation_json, region_name) return cls.create_from_cloudformation_json(new_resource_name, cloudformation_json, region_name) @classmethod def delete_from_cloudformation_json(cls, resource_name, cloudformation_json, region_name): - # this will break if you changed the zone the record is in, unfortunately + # this will break if you changed the zone the record is in, + # unfortunately properties = cloudformation_json['Properties'] zone_name = properties.get("HostedZoneName") if zone_name: hosted_zone = route53_backend.get_hosted_zone_by_name(zone_name) else: - hosted_zone = route53_backend.get_hosted_zone(properties["HostedZoneId"]) + hosted_zone = route53_backend.get_hosted_zone( + properties["HostedZoneId"]) try: hosted_zone.delete_rrset_by_name(resource_name) @@ -141,7 +147,8 @@ class RecordSet(object): def delete(self, *args, **kwargs): ''' Not exposed as part of the Route 53 API - used for CloudFormation. args are ignored ''' - hosted_zone = route53_backend.get_hosted_zone_by_name(self.hosted_zone_name) + hosted_zone = route53_backend.get_hosted_zone_by_name( + self.hosted_zone_name) if not hosted_zone: hosted_zone = route53_backend.get_hosted_zone(self.hosted_zone_id) hosted_zone.delete_rrset_by_name(self.name) @@ -173,17 +180,21 @@ class FakeZone(object): return new_rrset def delete_rrset_by_name(self, name): - self.rrsets = [record_set for record_set in self.rrsets if record_set.name != name] + self.rrsets = [ + record_set for record_set in self.rrsets if record_set.name != name] def delete_rrset_by_id(self, set_identifier): - self.rrsets = [record_set for record_set in self.rrsets if record_set.set_identifier != set_identifier] + self.rrsets = [ + record_set for record_set in self.rrsets if record_set.set_identifier != set_identifier] def get_record_sets(self, type_filter, name_filter): record_sets = list(self.rrsets) # Copy the list if type_filter: - record_sets = [record_set for record_set in record_sets if record_set._type == type_filter] + record_sets = [ + record_set for record_set in record_sets if record_set._type == type_filter] if name_filter: - record_sets = [record_set for record_set in record_sets if record_set.name == name_filter] + record_sets = [ + record_set for record_set in record_sets if record_set.name == name_filter] return record_sets @@ -196,11 +207,13 @@ class FakeZone(object): properties = cloudformation_json['Properties'] name = properties["Name"] - hosted_zone = route53_backend.create_hosted_zone(name, private_zone=False) + hosted_zone = route53_backend.create_hosted_zone( + name, private_zone=False) return hosted_zone class RecordSetGroup(object): + def __init__(self, hosted_zone_id, record_sets): self.hosted_zone_id = hosted_zone_id self.record_sets = record_sets @@ -232,7 +245,8 @@ class Route53Backend(BaseBackend): def create_hosted_zone(self, name, private_zone, comment=None): new_id = get_random_hex() - new_zone = FakeZone(name, new_id, private_zone=private_zone, comment=comment) + new_zone = FakeZone( + name, new_id, private_zone=private_zone, comment=comment) self.zones[new_id] = new_zone return new_zone @@ -285,4 +299,5 @@ class Route53Backend(BaseBackend): def delete_health_check(self, health_check_id): return self.health_checks.pop(health_check_id, None) + route53_backend = Route53Backend() diff --git a/moto/route53/responses.py b/moto/route53/responses.py index d796660e1..07f6e2303 100644 --- a/moto/route53/responses.py +++ b/moto/route53/responses.py @@ -8,43 +8,45 @@ import xmltodict class Route53 (BaseResponse): + def list_or_create_hostzone_response(self, request, full_url, headers): self.setup_class(request, full_url, headers) if request.method == "POST": - elements = xmltodict.parse(self.body) - if "HostedZoneConfig" in elements["CreateHostedZoneRequest"]: - comment = elements["CreateHostedZoneRequest"]["HostedZoneConfig"]["Comment"] - try: - # in boto3, this field is set directly in the xml - private_zone = elements["CreateHostedZoneRequest"]["HostedZoneConfig"]["PrivateZone"] - except KeyError: - # if a VPC subsection is only included in xmls params when private_zone=True, - # see boto: boto/route53/connection.py - private_zone = 'VPC' in elements["CreateHostedZoneRequest"] - else: - comment = None - private_zone = False + elements = xmltodict.parse(self.body) + if "HostedZoneConfig" in elements["CreateHostedZoneRequest"]: + comment = elements["CreateHostedZoneRequest"][ + "HostedZoneConfig"]["Comment"] + try: + # in boto3, this field is set directly in the xml + private_zone = elements["CreateHostedZoneRequest"][ + "HostedZoneConfig"]["PrivateZone"] + except KeyError: + # if a VPC subsection is only included in xmls params when private_zone=True, + # see boto: boto/route53/connection.py + private_zone = 'VPC' in elements["CreateHostedZoneRequest"] + else: + comment = None + private_zone = False - name = elements["CreateHostedZoneRequest"]["Name"] + name = elements["CreateHostedZoneRequest"]["Name"] - if name[-1] != ".": - name += "." + if name[-1] != ".": + name += "." - new_zone = route53_backend.create_hosted_zone( - name, - comment=comment, - private_zone=private_zone, - ) - template = Template(CREATE_HOSTED_ZONE_RESPONSE) - return 201, headers, template.render(zone=new_zone) + new_zone = route53_backend.create_hosted_zone( + name, + comment=comment, + private_zone=private_zone, + ) + template = Template(CREATE_HOSTED_ZONE_RESPONSE) + return 201, headers, template.render(zone=new_zone) elif request.method == "GET": all_zones = route53_backend.get_all_hosted_zones() template = Template(LIST_HOSTED_ZONES_RESPONSE) return 200, headers, template.render(zones=all_zones) - def get_or_delete_hostzone_response(self, request, full_url, headers): self.setup_class(request, full_url, headers) parsed_url = urlparse(full_url) @@ -61,7 +63,6 @@ class Route53 (BaseResponse): route53_backend.delete_hosted_zone(zoneid) return 200, headers, DELETE_HOSTED_ZONE_RESPONSE - def rrset_response(self, request, full_url, headers): self.setup_class(request, full_url, headers) @@ -76,18 +77,22 @@ class Route53 (BaseResponse): if method == "POST": elements = xmltodict.parse(self.body) - change_list = elements['ChangeResourceRecordSetsRequest']['ChangeBatch']['Changes']['Change'] + change_list = elements['ChangeResourceRecordSetsRequest'][ + 'ChangeBatch']['Changes']['Change'] if not isinstance(change_list, list): - change_list = [elements['ChangeResourceRecordSetsRequest']['ChangeBatch']['Changes']['Change']] + change_list = [elements['ChangeResourceRecordSetsRequest'][ + 'ChangeBatch']['Changes']['Change']] for value in change_list: action = value['Action'] record_set = value['ResourceRecordSet'] if action in ('CREATE', 'UPSERT'): if 'ResourceRecords' in record_set: - resource_records = list(record_set['ResourceRecords'].values())[0] + resource_records = list( + record_set['ResourceRecords'].values())[0] if not isinstance(resource_records, list): - # Depending on how many records there are, this may or may not be a list + # Depending on how many records there are, this may + # or may not be a list resource_records = [resource_records] record_values = [x['Value'] for x in resource_records] elif 'AliasTarget' in record_set: @@ -99,7 +104,8 @@ class Route53 (BaseResponse): the_zone.upsert_rrset(record_set) elif action == "DELETE": if 'SetIdentifier' in record_set: - the_zone.delete_rrset_by_id(record_set["SetIdentifier"]) + the_zone.delete_rrset_by_id( + record_set["SetIdentifier"]) else: the_zone.delete_rrset_by_name(record_set["Name"]) @@ -113,7 +119,6 @@ class Route53 (BaseResponse): record_sets = the_zone.get_record_sets(type_filter, name_filter) return 200, headers, template.render(record_sets=record_sets) - def health_check_response(self, request, full_url, headers): self.setup_class(request, full_url, headers) @@ -121,7 +126,8 @@ class Route53 (BaseResponse): method = request.method if method == "POST": - properties = xmltodict.parse(self.body)['CreateHealthCheckRequest']['HealthCheckConfig'] + properties = xmltodict.parse(self.body)['CreateHealthCheckRequest'][ + 'HealthCheckConfig'] health_check_args = { "ip_address": properties.get('IPAddress'), "port": properties.get('Port'), @@ -132,7 +138,8 @@ class Route53 (BaseResponse): "request_interval": properties.get('RequestInterval'), "failure_threshold": properties.get('FailureThreshold'), } - health_check = route53_backend.create_health_check(health_check_args) + health_check = route53_backend.create_health_check( + health_check_args) template = Template(CREATE_HEALTH_CHECK_RESPONSE) return 201, headers, template.render(health_check=health_check) elif method == "DELETE": @@ -152,8 +159,8 @@ class Route53 (BaseResponse): action = 'tags' elif 'trafficpolicyinstances' in full_url: action = 'policies' - raise NotImplementedError("The action for {0} has not been implemented for route 53".format(action)) - + raise NotImplementedError( + "The action for {0} has not been implemented for route 53".format(action)) def list_or_change_tags_for_resource_request(self, request, full_url, headers): self.setup_class(request, full_url, headers) @@ -182,6 +189,7 @@ class Route53 (BaseResponse): return 200, headers, template.render() + LIST_TAGS_FOR_RESOURCE_RESPONSE = """ diff --git a/moto/s3/__init__.py b/moto/s3/__init__.py index 2c54a8d5a..84c1cbde0 100644 --- a/moto/s3/__init__.py +++ b/moto/s3/__init__.py @@ -3,4 +3,4 @@ from .models import s3_backend s3_backends = {"global": s3_backend} mock_s3 = s3_backend.decorator -mock_s3_deprecated = s3_backend.deprecated_decorator \ No newline at end of file +mock_s3_deprecated = s3_backend.deprecated_decorator diff --git a/moto/s3/exceptions.py b/moto/s3/exceptions.py index 2f444e2dd..df817ba78 100644 --- a/moto/s3/exceptions.py +++ b/moto/s3/exceptions.py @@ -12,6 +12,7 @@ ERROR_WITH_KEY_NAME = """{% extends 'single_error' %} class S3ClientError(RESTError): + def __init__(self, *args, **kwargs): kwargs.setdefault('template', 'single_error') self.templates['bucket_error'] = ERROR_WITH_BUCKET_NAME @@ -19,6 +20,7 @@ class S3ClientError(RESTError): class BucketError(S3ClientError): + def __init__(self, *args, **kwargs): kwargs.setdefault('template', 'bucket_error') self.templates['bucket_error'] = ERROR_WITH_BUCKET_NAME diff --git a/moto/s3/models.py b/moto/s3/models.py index d5e156498..c7bf557ca 100644 --- a/moto/s3/models.py +++ b/moto/s3/models.py @@ -120,6 +120,7 @@ class FakeKey(object): class FakeMultipart(object): + def __init__(self, key_name, metadata): self.key_name = key_name self.metadata = metadata @@ -167,6 +168,7 @@ class FakeMultipart(object): class FakeGrantee(object): + def __init__(self, id='', uri='', display_name=''): self.id = id self.uri = uri @@ -177,9 +179,12 @@ class FakeGrantee(object): return 'Group' if self.uri else 'CanonicalUser' -ALL_USERS_GRANTEE = FakeGrantee(uri='http://acs.amazonaws.com/groups/global/AllUsers') -AUTHENTICATED_USERS_GRANTEE = FakeGrantee(uri='http://acs.amazonaws.com/groups/global/AuthenticatedUsers') -LOG_DELIVERY_GRANTEE = FakeGrantee(uri='http://acs.amazonaws.com/groups/s3/LogDelivery') +ALL_USERS_GRANTEE = FakeGrantee( + uri='http://acs.amazonaws.com/groups/global/AllUsers') +AUTHENTICATED_USERS_GRANTEE = FakeGrantee( + uri='http://acs.amazonaws.com/groups/global/AuthenticatedUsers') +LOG_DELIVERY_GRANTEE = FakeGrantee( + uri='http://acs.amazonaws.com/groups/s3/LogDelivery') PERMISSION_FULL_CONTROL = 'FULL_CONTROL' PERMISSION_WRITE = 'WRITE' @@ -189,27 +194,32 @@ PERMISSION_READ_ACP = 'READ_ACP' class FakeGrant(object): + def __init__(self, grantees, permissions): self.grantees = grantees self.permissions = permissions class FakeAcl(object): + def __init__(self, grants=[]): self.grants = grants def get_canned_acl(acl): - owner_grantee = FakeGrantee(id='75aa57f09aa0c8caeab4f8c24e99d10f8e7faeebf76c078efc7c6caea54ba06a') + owner_grantee = FakeGrantee( + id='75aa57f09aa0c8caeab4f8c24e99d10f8e7faeebf76c078efc7c6caea54ba06a') grants = [FakeGrant([owner_grantee], [PERMISSION_FULL_CONTROL])] if acl == 'private': pass # no other permissions elif acl == 'public-read': grants.append(FakeGrant([ALL_USERS_GRANTEE], [PERMISSION_READ])) elif acl == 'public-read-write': - grants.append(FakeGrant([ALL_USERS_GRANTEE], [PERMISSION_READ, PERMISSION_WRITE])) + grants.append(FakeGrant([ALL_USERS_GRANTEE], [ + PERMISSION_READ, PERMISSION_WRITE])) elif acl == 'authenticated-read': - grants.append(FakeGrant([AUTHENTICATED_USERS_GRANTEE], [PERMISSION_READ])) + grants.append( + FakeGrant([AUTHENTICATED_USERS_GRANTEE], [PERMISSION_READ])) elif acl == 'bucket-owner-read': pass # TODO: bucket owner ACL elif acl == 'bucket-owner-full-control': @@ -217,13 +227,15 @@ def get_canned_acl(acl): elif acl == 'aws-exec-read': pass # TODO: bucket owner, EC2 Read elif acl == 'log-delivery-write': - grants.append(FakeGrant([LOG_DELIVERY_GRANTEE], [PERMISSION_READ_ACP, PERMISSION_WRITE])) + grants.append(FakeGrant([LOG_DELIVERY_GRANTEE], [ + PERMISSION_READ_ACP, PERMISSION_WRITE])) else: assert False, 'Unknown canned acl: %s' % (acl,) return FakeAcl(grants=grants) class LifecycleRule(object): + def __init__(self, id=None, prefix=None, status=None, expiration_days=None, expiration_date=None, transition_days=None, transition_date=None, storage_class=None): @@ -271,7 +283,8 @@ class FakeBucket(object): expiration_date=expiration.get('Date') if expiration else None, transition_days=transition.get('Days') if transition else None, transition_date=transition.get('Date') if transition else None, - storage_class=transition['StorageClass'] if transition else None, + storage_class=transition[ + 'StorageClass'] if transition else None, )) def delete_lifecycle(self): @@ -283,9 +296,11 @@ class FakeBucket(object): def get_cfn_attribute(self, attribute_name): from moto.cloudformation.exceptions import UnformattedGetAttTemplateException if attribute_name == 'DomainName': - raise NotImplementedError('"Fn::GetAtt" : [ "{0}" , "DomainName" ]"') + raise NotImplementedError( + '"Fn::GetAtt" : [ "{0}" , "DomainName" ]"') elif attribute_name == 'WebsiteURL': - raise NotImplementedError('"Fn::GetAtt" : [ "{0}" , "WebsiteURL" ]"') + raise NotImplementedError( + '"Fn::GetAtt" : [ "{0}" , "WebsiteURL" ]"') raise UnformattedGetAttTemplateException() def set_acl(self, acl): @@ -470,20 +485,24 @@ class S3Backend(BaseBackend): key_without_prefix = key_name.replace(prefix, "", 1) if delimiter and delimiter in key_without_prefix: # If delimiter, we need to split out folder_results - key_without_delimiter = key_without_prefix.split(delimiter)[0] - folder_results.add("{0}{1}{2}".format(prefix, key_without_delimiter, delimiter)) + key_without_delimiter = key_without_prefix.split(delimiter)[ + 0] + folder_results.add("{0}{1}{2}".format( + prefix, key_without_delimiter, delimiter)) else: key_results.add(key) else: for key_name, key in bucket.keys.items(): if delimiter and delimiter in key_name: # If delimiter, we need to split out folder_results - folder_results.add(key_name.split(delimiter)[0] + delimiter) + folder_results.add(key_name.split( + delimiter)[0] + delimiter) else: key_results.add(key) key_results = sorted(key_results, key=lambda key: key.name) - folder_results = [folder_name for folder_name in sorted(folder_results, key=lambda key: key)] + folder_results = [folder_name for folder_name in sorted( + folder_results, key=lambda key: key)] return key_results, folder_results @@ -502,7 +521,8 @@ class S3Backend(BaseBackend): src_key_name = clean_key_name(src_key_name) dest_key_name = clean_key_name(dest_key_name) dest_bucket = self.get_bucket(dest_bucket_name) - key = self.get_key(src_bucket_name, src_key_name, version_id=src_version_id) + key = self.get_key(src_bucket_name, src_key_name, + version_id=src_version_id) if dest_key_name != src_key_name: key = key.copy(dest_key_name) dest_bucket.keys[dest_key_name] = key diff --git a/moto/s3/responses.py b/moto/s3/responses.py index 07be98e7b..e123d76e1 100644 --- a/moto/s3/responses.py +++ b/moto/s3/responses.py @@ -33,6 +33,7 @@ def is_delete_keys(request, path, bucket_name): class ResponseObject(_TemplateEnvironmentMixin): + def __init__(self, backend): super(ResponseObject, self).__init__() self.backend = backend @@ -70,7 +71,8 @@ class ResponseObject(_TemplateEnvironmentMixin): if match: return False - path_based = (host == 's3.amazonaws.com' or re.match(r"s3[\.\-]([^.]*)\.amazonaws\.com", host)) + path_based = (host == 's3.amazonaws.com' or re.match( + r"s3[\.\-]([^.]*)\.amazonaws\.com", host)) return not path_based def is_delete_keys(self, request, path, bucket_name): @@ -148,7 +150,8 @@ class ResponseObject(_TemplateEnvironmentMixin): elif method == 'POST': return self._bucket_response_post(request, body, bucket_name, headers) else: - raise NotImplementedError("Method {0} has not been impelemented in the S3 backend yet".format(method)) + raise NotImplementedError( + "Method {0} has not been impelemented in the S3 backend yet".format(method)) def _bucket_response_head(self, bucket_name, headers): self.backend.get_bucket(bucket_name) @@ -158,11 +161,14 @@ class ResponseObject(_TemplateEnvironmentMixin): if 'uploads' in querystring: for unsup in ('delimiter', 'max-uploads'): if unsup in querystring: - raise NotImplementedError("Listing multipart uploads with {} has not been implemented yet.".format(unsup)) - multiparts = list(self.backend.get_all_multiparts(bucket_name).values()) + raise NotImplementedError( + "Listing multipart uploads with {} has not been implemented yet.".format(unsup)) + multiparts = list( + self.backend.get_all_multiparts(bucket_name).values()) if 'prefix' in querystring: prefix = querystring.get('prefix', [None])[0] - multiparts = [upload for upload in multiparts if upload.key_name.startswith(prefix)] + multiparts = [ + upload for upload in multiparts if upload.key_name.startswith(prefix)] template = self.response_template(S3_ALL_MULTIPARTS) return template.render( bucket_name=bucket_name, @@ -175,7 +181,8 @@ class ResponseObject(_TemplateEnvironmentMixin): bucket = self.backend.get_bucket(bucket_name) if not bucket.rules: return 404, {}, "NoSuchLifecycleConfiguration" - template = self.response_template(S3_BUCKET_LIFECYCLE_CONFIGURATION) + template = self.response_template( + S3_BUCKET_LIFECYCLE_CONFIGURATION) return template.render(rules=bucket.rules) elif 'versioning' in querystring: versioning = self.backend.get_bucket_versioning(bucket_name) @@ -188,7 +195,8 @@ class ResponseObject(_TemplateEnvironmentMixin): return 404, {}, template.render(bucket_name=bucket_name) return 200, {}, policy elif 'website' in querystring: - website_configuration = self.backend.get_bucket_website_configuration(bucket_name) + website_configuration = self.backend.get_bucket_website_configuration( + bucket_name) return website_configuration elif 'acl' in querystring: bucket = self.backend.get_bucket(bucket_name) @@ -226,7 +234,8 @@ class ResponseObject(_TemplateEnvironmentMixin): bucket = self.backend.get_bucket(bucket_name) prefix = querystring.get('prefix', [None])[0] delimiter = querystring.get('delimiter', [None])[0] - result_keys, result_folders = self.backend.prefix_query(bucket, prefix, delimiter) + result_keys, result_folders = self.backend.prefix_query( + bucket, prefix, delimiter) template = self.response_template(S3_BUCKET_GET_RESPONSE) return 200, {}, template.render( bucket=bucket, @@ -242,7 +251,8 @@ class ResponseObject(_TemplateEnvironmentMixin): prefix = querystring.get('prefix', [None])[0] delimiter = querystring.get('delimiter', [None])[0] - result_keys, result_folders = self.backend.prefix_query(bucket, prefix, delimiter) + result_keys, result_folders = self.backend.prefix_query( + bucket, prefix, delimiter) fetch_owner = querystring.get('fetch-owner', [False])[0] max_keys = int(querystring.get('max-keys', [1000])[0]) @@ -308,7 +318,8 @@ class ResponseObject(_TemplateEnvironmentMixin): return "" else: try: - new_bucket = self.backend.create_bucket(bucket_name, region_name) + new_bucket = self.backend.create_bucket( + bucket_name, region_name) except BucketAlreadyExists: if region_name == DEFAULT_REGION_NAME: # us-east-1 has different behavior @@ -335,7 +346,8 @@ class ResponseObject(_TemplateEnvironmentMixin): return 204, {}, template.render(bucket=removed_bucket) else: # Tried to delete a bucket that still has keys - template = self.response_template(S3_DELETE_BUCKET_WITH_ITEMS_ERROR) + template = self.response_template( + S3_DELETE_BUCKET_WITH_ITEMS_ERROR) return 409, {}, template.render(bucket=removed_bucket) def _bucket_response_post(self, request, body, bucket_name, headers): @@ -393,7 +405,9 @@ class ResponseObject(_TemplateEnvironmentMixin): if ',' in rspec: raise NotImplementedError( "Multiple range specifiers not supported") - toint = lambda i: int(i) if i else None + + def toint(i): + return int(i) if i else None begin, end = map(toint, rspec.split('-')) if begin is not None: # byte range end = last if end is None else min(end, last) @@ -455,7 +469,8 @@ class ResponseObject(_TemplateEnvironmentMixin): elif method == 'POST': return self._key_response_post(request, body, bucket_name, query, key_name, headers) else: - raise NotImplementedError("Method {0} has not been impelemented in the S3 backend yet".format(method)) + raise NotImplementedError( + "Method {0} has not been impelemented in the S3 backend yet".format(method)) def _key_response_get(self, bucket_name, query, key_name, headers): response_headers = {} @@ -489,7 +504,8 @@ class ResponseObject(_TemplateEnvironmentMixin): if 'x-amz-copy-source' in request.headers: src = request.headers.get("x-amz-copy-source") src_bucket, src_key = src.split("/", 1) - src_range = request.headers.get('x-amz-copy-source-range', '').split("bytes=")[-1] + src_range = request.headers.get( + 'x-amz-copy-source-range', '').split("bytes=")[-1] try: start_byte, end_byte = src_range.split("-") @@ -522,7 +538,8 @@ class ResponseObject(_TemplateEnvironmentMixin): # Copy key src_key_parsed = urlparse(request.headers.get("x-amz-copy-source")) src_bucket, src_key = src_key_parsed.path.split("/", 1) - src_version_id = parse_qs(src_key_parsed.query).get('versionId', [None])[0] + src_version_id = parse_qs(src_key_parsed.query).get( + 'versionId', [None])[0] self.backend.copy_key(src_bucket, src_key, bucket_name, key_name, storage=storage_class, acl=acl, src_version_id=src_version_id) new_key = self.backend.get_key(bucket_name, key_name) @@ -557,7 +574,8 @@ class ResponseObject(_TemplateEnvironmentMixin): def _key_response_head(self, bucket_name, query, key_name, headers): response_headers = {} version_id = query.get('versionId', [None])[0] - key = self.backend.get_key(bucket_name, key_name, version_id=version_id) + key = self.backend.get_key( + bucket_name, key_name, version_id=version_id) if key: response_headers.update(key.metadata) response_headers.update(key.response_dict) @@ -585,7 +603,8 @@ class ResponseObject(_TemplateEnvironmentMixin): grantees = [] for key_and_value in value.split(","): - key, value = re.match('([^=]+)="([^"]+)"', key_and_value.strip()).groups() + key, value = re.match( + '([^=]+)="([^"]+)"', key_and_value.strip()).groups() if key.lower() == 'id': grantees.append(FakeGrantee(id=value)) else: @@ -610,7 +629,8 @@ class ResponseObject(_TemplateEnvironmentMixin): ps = minidom.parseString(body).getElementsByTagName('Part') prev = 0 for p in ps: - pn = int(p.getElementsByTagName('PartNumber')[0].firstChild.wholeText) + pn = int(p.getElementsByTagName( + 'PartNumber')[0].firstChild.wholeText) if pn <= prev: raise InvalidPartOrder() yield (pn, p.getElementsByTagName('ETag')[0].firstChild.wholeText) @@ -618,7 +638,8 @@ class ResponseObject(_TemplateEnvironmentMixin): def _key_response_post(self, request, body, bucket_name, query, key_name, headers): if body == b'' and 'uploads' in query: metadata = metadata_from_headers(request.headers) - multipart = self.backend.initiate_multipart(bucket_name, key_name, metadata) + multipart = self.backend.initiate_multipart( + bucket_name, key_name, metadata) template = self.response_template(S3_MULTIPART_INITIATE_RESPONSE) response = template.render( @@ -648,7 +669,9 @@ class ResponseObject(_TemplateEnvironmentMixin): key.restore(int(days)) return r, {}, "" else: - raise NotImplementedError("Method POST had only been implemented for multipart uploads and restore operations, so far") + raise NotImplementedError( + "Method POST had only been implemented for multipart uploads and restore operations, so far") + S3ResponseInstance = ResponseObject(s3_backend) diff --git a/moto/s3/utils.py b/moto/s3/utils.py index 8ea18c207..a121eae3a 100644 --- a/moto/s3/utils.py +++ b/moto/s3/utils.py @@ -29,7 +29,8 @@ def bucket_name_from_url(url): def metadata_from_headers(headers): metadata = {} - meta_regex = re.compile('^x-amz-meta-([a-zA-Z0-9\-_]+)$', flags=re.IGNORECASE) + meta_regex = re.compile( + '^x-amz-meta-([a-zA-Z0-9\-_]+)$', flags=re.IGNORECASE) for header, value in headers.items(): if isinstance(header, six.string_types): result = meta_regex.match(header) diff --git a/moto/server.py b/moto/server.py index 0bb4eb779..c7e7f18fb 100644 --- a/moto/server.py +++ b/moto/server.py @@ -57,11 +57,13 @@ class DomainDispatcherApplication(object): # Fall back to parsing auth header to find service # ['Credential=sdffdsa', '20170220', 'us-east-1', 'sns', 'aws4_request'] try: - _, _, region, service, _ = environ['HTTP_AUTHORIZATION'].split(",")[0].split()[1].split("/") + _, _, region, service, _ = environ['HTTP_AUTHORIZATION'].split(",")[0].split()[ + 1].split("/") except ValueError: region = 'us-east-1' service = 's3' - host = "{service}.{region}.amazonaws.com".format(service=service, region=region) + host = "{service}.{region}.amazonaws.com".format( + service=service, region=region) with self.lock: backend = self.get_backend_for_host(host) @@ -78,6 +80,7 @@ class DomainDispatcherApplication(object): class RegexConverter(BaseConverter): # http://werkzeug.pocoo.org/docs/routing/#custom-converters + def __init__(self, url_map, *items): super(RegexConverter, self).__init__(url_map) self.regex = items[0] @@ -92,7 +95,7 @@ class AWSTestHelper(FlaskClient): opts = {"Action": action_name} opts.update(kwargs) res = self.get("/?{0}".format(urlencode(opts)), - headers={"Host": "{0}.us-east-1.amazonaws.com".format(self.application.service)}) + headers={"Host": "{0}.us-east-1.amazonaws.com".format(self.application.service)}) return res.data.decode("utf-8") def action_json(self, action_name, **kwargs): @@ -166,10 +169,12 @@ def main(argv=sys.argv[1:]): args = parser.parse_args(argv) # Wrap the main application - main_app = DomainDispatcherApplication(create_backend_app, service=args.service) + main_app = DomainDispatcherApplication( + create_backend_app, service=args.service) main_app.debug = True - run_simple(args.host, args.port, main_app, threaded=True, use_reloader=args.reload) + run_simple(args.host, args.port, main_app, + threaded=True, use_reloader=args.reload) if __name__ == '__main__': diff --git a/moto/ses/__init__.py b/moto/ses/__init__.py index e105b9929..0477d2623 100644 --- a/moto/ses/__init__.py +++ b/moto/ses/__init__.py @@ -3,4 +3,4 @@ from .models import ses_backend ses_backends = {"global": ses_backend} mock_ses = ses_backend.decorator -mock_ses_deprecated = ses_backend.deprecated_decorator \ No newline at end of file +mock_ses_deprecated = ses_backend.deprecated_decorator diff --git a/moto/ses/models.py b/moto/ses/models.py index 6950ead5b..3502d6bc7 100644 --- a/moto/ses/models.py +++ b/moto/ses/models.py @@ -11,16 +11,19 @@ RECIPIENT_LIMIT = 50 class Message(object): + def __init__(self, message_id): self.id = message_id class RawMessage(object): + def __init__(self, message_id): self.id = message_id class SESQuota(object): + def __init__(self, sent): self.sent = sent @@ -30,6 +33,7 @@ class SESQuota(object): class SESBackend(BaseBackend): + def __init__(self): self.addresses = [] self.domains = [] @@ -97,4 +101,5 @@ class SESBackend(BaseBackend): def get_send_quota(self): return SESQuota(self.sent_message_count) + ses_backend = SESBackend() diff --git a/moto/sns/__init__.py b/moto/sns/__init__.py index a50911e3b..bd36cb23d 100644 --- a/moto/sns/__init__.py +++ b/moto/sns/__init__.py @@ -1,6 +1,6 @@ from __future__ import unicode_literals from .models import sns_backends -from ..core.models import MockAWS, base_decorator, HttprettyMockAWS, deprecated_base_decorator +from ..core.models import base_decorator, deprecated_base_decorator sns_backend = sns_backends['us-east-1'] mock_sns = base_decorator(sns_backends) diff --git a/moto/sns/models.py b/moto/sns/models.py index d924b1e5d..0ad00928d 100644 --- a/moto/sns/models.py +++ b/moto/sns/models.py @@ -20,6 +20,7 @@ DEFAULT_PAGE_SIZE = 100 class Topic(object): + def __init__(self, name, sns_backend): self.name = name self.sns_backend = sns_backend @@ -28,7 +29,8 @@ class Topic(object): self.policy = DEFAULT_TOPIC_POLICY self.delivery_policy = "" self.effective_delivery_policy = DEFAULT_EFFECTIVE_DELIVERY_POLICY - self.arn = make_arn_for_topic(self.account_id, name, sns_backend.region_name) + self.arn = make_arn_for_topic( + self.account_id, name, sns_backend.region_name) self.subscriptions_pending = 0 self.subscriptions_confimed = 0 @@ -60,11 +62,13 @@ class Topic(object): properties.get("TopicName") ) for subscription in properties.get("Subscription", []): - sns_backend.subscribe(topic.arn, subscription['Endpoint'], subscription['Protocol']) + sns_backend.subscribe(topic.arn, subscription[ + 'Endpoint'], subscription['Protocol']) return topic class Subscription(object): + def __init__(self, topic, endpoint, protocol): self.topic = topic self.endpoint = endpoint @@ -96,6 +100,7 @@ class Subscription(object): class PlatformApplication(object): + def __init__(self, region, name, platform, attributes): self.region = region self.name = name @@ -112,6 +117,7 @@ class PlatformApplication(object): class PlatformEndpoint(object): + def __init__(self, region, application, custom_user_data, token, attributes): self.region = region self.application = application @@ -125,9 +131,9 @@ class PlatformEndpoint(object): def __fixup_attributes(self): # When AWS returns the attributes dict, it always contains these two elements, so we need to # automatically ensure they exist as well. - if not 'Token' in self.attributes: + if 'Token' not in self.attributes: self.attributes['Token'] = self.token - if not 'Enabled' in self.attributes: + if 'Enabled' not in self.attributes: self.attributes['Enabled'] = True @property @@ -147,6 +153,7 @@ class PlatformEndpoint(object): class SNSBackend(BaseBackend): + def __init__(self, region_name): super(SNSBackend, self).__init__() self.topics = OrderedDict() @@ -169,7 +176,8 @@ class SNSBackend(BaseBackend): if next_token is None: next_token = 0 next_token = int(next_token) - values = list(values_map.values())[next_token: next_token + DEFAULT_PAGE_SIZE] + values = list(values_map.values())[ + next_token: next_token + DEFAULT_PAGE_SIZE] if len(values) == DEFAULT_PAGE_SIZE: next_token = next_token + DEFAULT_PAGE_SIZE else: @@ -204,7 +212,8 @@ class SNSBackend(BaseBackend): def list_subscriptions(self, topic_arn=None, next_token=None): if topic_arn: topic = self.get_topic(topic_arn) - filtered = OrderedDict([(k, sub) for k, sub in self.subscriptions.items() if sub.topic == topic]) + filtered = OrderedDict( + [(k, sub) for k, sub in self.subscriptions.items() if sub.topic == topic]) return self._get_values_nexttoken(filtered, next_token) else: return self._get_values_nexttoken(self.subscriptions, next_token) @@ -227,7 +236,8 @@ class SNSBackend(BaseBackend): try: return self.applications[arn] except KeyError: - raise SNSNotFoundError("Application with arn {0} not found".format(arn)) + raise SNSNotFoundError( + "Application with arn {0} not found".format(arn)) def set_application_attributes(self, arn, attributes): application = self.get_application(arn) @@ -241,7 +251,8 @@ class SNSBackend(BaseBackend): self.applications.pop(platform_arn) def create_platform_endpoint(self, region, application, custom_user_data, token, attributes): - platform_endpoint = PlatformEndpoint(region, application, custom_user_data, token, attributes) + platform_endpoint = PlatformEndpoint( + region, application, custom_user_data, token, attributes) self.platform_endpoints[platform_endpoint.arn] = platform_endpoint return platform_endpoint @@ -256,7 +267,8 @@ class SNSBackend(BaseBackend): try: return self.platform_endpoints[arn] except KeyError: - raise SNSNotFoundError("Endpoint with arn {0} not found".format(arn)) + raise SNSNotFoundError( + "Endpoint with arn {0} not found".format(arn)) def set_endpoint_attributes(self, arn, attributes): endpoint = self.get_endpoint(arn) @@ -267,7 +279,8 @@ class SNSBackend(BaseBackend): try: del self.platform_endpoints[arn] except KeyError: - raise SNSNotFoundError("Endpoint with arn {0} not found".format(arn)) + raise SNSNotFoundError( + "Endpoint with arn {0} not found".format(arn)) sns_backends = {} diff --git a/moto/sns/responses.py b/moto/sns/responses.py index 9a20dbcb5..edb82e40c 100644 --- a/moto/sns/responses.py +++ b/moto/sns/responses.py @@ -109,7 +109,8 @@ class SNSResponse(BaseResponse): attribute_name = self._get_param('AttributeName') attribute_name = camelcase_to_underscores(attribute_name) attribute_value = self._get_param('AttributeValue') - self.backend.set_topic_attribute(topic_arn, attribute_name, attribute_value) + self.backend.set_topic_attribute( + topic_arn, attribute_name, attribute_value) if self.request_json: return json.dumps({ @@ -162,7 +163,8 @@ class SNSResponse(BaseResponse): def list_subscriptions(self): next_token = self._get_param('NextToken') - subscriptions, next_token = self.backend.list_subscriptions(next_token=next_token) + subscriptions, next_token = self.backend.list_subscriptions( + next_token=next_token) if self.request_json: return json.dumps({ @@ -190,7 +192,8 @@ class SNSResponse(BaseResponse): def list_subscriptions_by_topic(self): topic_arn = self._get_param('TopicArn') next_token = self._get_param('NextToken') - subscriptions, next_token = self.backend.list_subscriptions(topic_arn, next_token=next_token) + subscriptions, next_token = self.backend.list_subscriptions( + topic_arn, next_token=next_token) if self.request_json: return json.dumps({ @@ -241,7 +244,8 @@ class SNSResponse(BaseResponse): name = self._get_param('Name') platform = self._get_param('Platform') attributes = self._get_attributes() - platform_application = self.backend.create_platform_application(self.region, name, platform, attributes) + platform_application = self.backend.create_platform_application( + self.region, name, platform, attributes) if self.request_json: return json.dumps({ @@ -274,7 +278,8 @@ class SNSResponse(BaseResponse): } }) - template = self.response_template(GET_PLATFORM_APPLICATION_ATTRIBUTES_TEMPLATE) + template = self.response_template( + GET_PLATFORM_APPLICATION_ATTRIBUTES_TEMPLATE) return template.render(application=application) def set_platform_application_attributes(self): @@ -292,7 +297,8 @@ class SNSResponse(BaseResponse): } }) - template = self.response_template(SET_PLATFORM_APPLICATION_ATTRIBUTES_TEMPLATE) + template = self.response_template( + SET_PLATFORM_APPLICATION_ATTRIBUTES_TEMPLATE) return template.render() def list_platform_applications(self): @@ -361,7 +367,8 @@ class SNSResponse(BaseResponse): def list_endpoints_by_platform_application(self): application_arn = self._get_param('PlatformApplicationArn') - endpoints = self.backend.list_endpoints_by_platform_application(application_arn) + endpoints = self.backend.list_endpoints_by_platform_application( + application_arn) if self.request_json: return json.dumps({ @@ -381,7 +388,8 @@ class SNSResponse(BaseResponse): } }) - template = self.response_template(LIST_ENDPOINTS_BY_PLATFORM_APPLICATION_TEMPLATE) + template = self.response_template( + LIST_ENDPOINTS_BY_PLATFORM_APPLICATION_TEMPLATE) return template.render(endpoints=endpoints) def get_endpoint_attributes(self): @@ -438,7 +446,6 @@ class SNSResponse(BaseResponse): return template.render() - CREATE_TOPIC_TEMPLATE = """ {{ topic.arn }} diff --git a/moto/sqs/__init__.py b/moto/sqs/__init__.py index 946ba8f47..46c83133f 100644 --- a/moto/sqs/__init__.py +++ b/moto/sqs/__init__.py @@ -1,6 +1,6 @@ from __future__ import unicode_literals from .models import sqs_backends -from ..core.models import MockAWS, base_decorator, HttprettyMockAWS, deprecated_base_decorator +from ..core.models import base_decorator, deprecated_base_decorator sqs_backend = sqs_backends['us-east-1'] mock_sqs = base_decorator(sqs_backends) diff --git a/moto/sqs/models.py b/moto/sqs/models.py index 13b8c34b6..5f4833772 100644 --- a/moto/sqs/models.py +++ b/moto/sqs/models.py @@ -1,7 +1,6 @@ from __future__ import unicode_literals import hashlib -import time import re from xml.sax.saxutils import escape @@ -18,7 +17,9 @@ from .exceptions import ( DEFAULT_ACCOUNT_ID = 123456789012 DEFAULT_SENDER_ID = "AIDAIT2UOQQY3AUEKVGXU" + class Message(object): + def __init__(self, message_id, body): self.id = message_id self._body = body @@ -122,7 +123,8 @@ class Queue(object): self.last_modified_timestamp = now self.maximum_message_size = 64 << 10 self.message_retention_period = 86400 * 4 # four days - self.queue_arn = 'arn:aws:sqs:{0}:123456789012:{1}'.format(self.region, self.name) + self.queue_arn = 'arn:aws:sqs:{0}:123456789012:{1}'.format( + self.region, self.name) self.receive_message_wait_time_seconds = 0 @classmethod @@ -177,7 +179,8 @@ class Queue(object): def attributes(self): result = {} for attribute in self.camelcase_attributes: - result[attribute] = getattr(self, camelcase_to_underscores(attribute)) + result[attribute] = getattr( + self, camelcase_to_underscores(attribute)) return result @property @@ -201,6 +204,7 @@ class Queue(object): class SQSBackend(BaseBackend): + def __init__(self, region_name): self.region_name = region_name self.queues = {} @@ -214,7 +218,8 @@ class SQSBackend(BaseBackend): def create_queue(self, name, visibility_timeout, wait_time_seconds): queue = self.queues.get(name) if queue is None: - queue = Queue(name, visibility_timeout, wait_time_seconds, self.region_name) + queue = Queue(name, visibility_timeout, + wait_time_seconds, self.region_name) self.queues[name] = queue return queue diff --git a/moto/sqs/responses.py b/moto/sqs/responses.py index d57ec3430..84886068e 100644 --- a/moto/sqs/responses.py +++ b/moto/sqs/responses.py @@ -27,7 +27,8 @@ class SQSResponse(BaseResponse): @property def attribute(self): if not hasattr(self, '_attribute'): - self._attribute = dict([(a['name'], a['value']) for a in self._get_list_prefix('Attribute')]) + self._attribute = dict([(a['name'], a['value']) + for a in self._get_list_prefix('Attribute')]) return self._attribute def _get_queue_name(self): @@ -59,7 +60,7 @@ class SQSResponse(BaseResponse): def create_queue(self): queue_name = self.querystring.get("QueueName")[0] queue = self.sqs_backend.create_queue(queue_name, visibility_timeout=self.attribute.get('VisibilityTimeout'), - wait_time_seconds=self.attribute.get('WaitTimeSeconds')) + wait_time_seconds=self.attribute.get('WaitTimeSeconds')) template = self.response_template(CREATE_QUEUE_RESPONSE) return template.render(queue=queue) @@ -108,7 +109,8 @@ class SQSResponse(BaseResponse): def set_queue_attributes(self): queue_name = self._get_queue_name() if "Attribute.Name" in self.querystring: - key = camelcase_to_underscores(self.querystring.get("Attribute.Name")[0]) + key = camelcase_to_underscores( + self.querystring.get("Attribute.Name")[0]) value = self.querystring.get("Attribute.Value")[0] self.sqs_backend.set_queue_attribute(queue_name, key, value) for a in self._get_list_prefix("Attribute"): @@ -171,20 +173,25 @@ class SQSResponse(BaseResponse): messages = [] for index in range(1, 11): # Loop through looking for messages - message_key = 'SendMessageBatchRequestEntry.{0}.MessageBody'.format(index) + message_key = 'SendMessageBatchRequestEntry.{0}.MessageBody'.format( + index) message_body = self.querystring.get(message_key) if not message_body: # Found all messages break - message_user_id_key = 'SendMessageBatchRequestEntry.{0}.Id'.format(index) + message_user_id_key = 'SendMessageBatchRequestEntry.{0}.Id'.format( + index) message_user_id = self.querystring.get(message_user_id_key)[0] - delay_key = 'SendMessageBatchRequestEntry.{0}.DelaySeconds'.format(index) + delay_key = 'SendMessageBatchRequestEntry.{0}.DelaySeconds'.format( + index) delay_seconds = self.querystring.get(delay_key, [None])[0] - message = self.sqs_backend.send_message(queue_name, message_body[0], delay_seconds=delay_seconds) + message = self.sqs_backend.send_message( + queue_name, message_body[0], delay_seconds=delay_seconds) message.user_id = message_user_id - message_attributes = parse_message_attributes(self.querystring, base='SendMessageBatchRequestEntry.{0}.'.format(index)) + message_attributes = parse_message_attributes( + self.querystring, base='SendMessageBatchRequestEntry.{0}.'.format(index)) if type(message_attributes) == tuple: return message_attributes[0], message_attributes[1] message.message_attributes = message_attributes @@ -216,7 +223,8 @@ class SQSResponse(BaseResponse): message_ids = [] for index in range(1, 11): # Loop through looking for messages - receipt_key = 'DeleteMessageBatchRequestEntry.{0}.ReceiptHandle'.format(index) + receipt_key = 'DeleteMessageBatchRequestEntry.{0}.ReceiptHandle'.format( + index) receipt_handle = self.querystring.get(receipt_key) if not receipt_handle: # Found all messages @@ -224,7 +232,8 @@ class SQSResponse(BaseResponse): self.sqs_backend.delete_message(queue_name, receipt_handle[0]) - message_user_id_key = 'DeleteMessageBatchRequestEntry.{0}.Id'.format(index) + message_user_id_key = 'DeleteMessageBatchRequestEntry.{0}.Id'.format( + index) message_user_id = self.querystring.get(message_user_id_key)[0] message_ids.append(message_user_id) @@ -258,7 +267,8 @@ class SQSResponse(BaseResponse): except ValueError: return ERROR_MAX_VISIBILITY_TIMEOUT_RESPONSE, dict(status=400) - messages = self.sqs_backend.receive_messages(queue_name, message_count, wait_time, visibility_timeout) + messages = self.sqs_backend.receive_messages( + queue_name, message_count, wait_time, visibility_timeout) template = self.response_template(RECEIVE_MESSAGE_RESPONSE) output = template.render(messages=messages) return output @@ -444,7 +454,8 @@ ERROR_TOO_LONG_RESPONSE = """ diff --git a/moto/sqs/utils.py b/moto/sqs/utils.py index a00ec1c79..78be5f629 100644 --- a/moto/sqs/utils.py +++ b/moto/sqs/utils.py @@ -22,25 +22,32 @@ def parse_message_attributes(querystring, base='', value_namespace='Value.'): # Found all attributes break - data_type_key = base + 'MessageAttribute.{0}.{1}DataType'.format(index, value_namespace) + data_type_key = base + \ + 'MessageAttribute.{0}.{1}DataType'.format(index, value_namespace) data_type = querystring.get(data_type_key) if not data_type: - raise MessageAttributesInvalid("The message attribute '{0}' must contain non-empty message attribute value.".format(name[0])) + raise MessageAttributesInvalid( + "The message attribute '{0}' must contain non-empty message attribute value.".format(name[0])) data_type_parts = data_type[0].split('.') if len(data_type_parts) > 2 or data_type_parts[0] not in ['String', 'Binary', 'Number']: - raise MessageAttributesInvalid("The message attribute '{0}' has an invalid message attribute type, the set of supported type prefixes is Binary, Number, and String.".format(name[0])) + raise MessageAttributesInvalid( + "The message attribute '{0}' has an invalid message attribute type, the set of supported type prefixes is Binary, Number, and String.".format(name[0])) type_prefix = 'String' if data_type_parts[0] == 'Binary': type_prefix = 'Binary' - value_key = base + 'MessageAttribute.{0}.{1}{2}Value'.format(index, value_namespace, type_prefix) + value_key = base + \ + 'MessageAttribute.{0}.{1}{2}Value'.format( + index, value_namespace, type_prefix) value = querystring.get(value_key) if not value: - raise MessageAttributesInvalid("The message attribute '{0}' must contain non-empty message attribute value for message attribute type '{1}'.".format(name[0], data_type[0])) + raise MessageAttributesInvalid( + "The message attribute '{0}' must contain non-empty message attribute value for message attribute type '{1}'.".format(name[0], data_type[0])) - message_attributes[name[0]] = {'data_type': data_type[0], type_prefix.lower() + '_value': value[0]} + message_attributes[name[0]] = {'data_type': data_type[ + 0], type_prefix.lower() + '_value': value[0]} index += 1 diff --git a/moto/sts/models.py b/moto/sts/models.py index 9ce629c91..f1c6401d2 100644 --- a/moto/sts/models.py +++ b/moto/sts/models.py @@ -5,6 +5,7 @@ from moto.core.utils import iso_8601_datetime_with_milliseconds class Token(object): + def __init__(self, duration, name=None, policy=None): now = datetime.datetime.utcnow() self.expiration = now + datetime.timedelta(seconds=duration) @@ -17,6 +18,7 @@ class Token(object): class AssumedRole(object): + def __init__(self, role_session_name, role_arn, policy, duration, external_id): self.session_name = role_session_name self.arn = role_arn @@ -31,6 +33,7 @@ class AssumedRole(object): class STSBackend(BaseBackend): + def get_session_token(self, duration): token = Token(duration=duration) return token @@ -43,4 +46,5 @@ class STSBackend(BaseBackend): role = AssumedRole(**kwargs) return role + sts_backend = STSBackend() diff --git a/moto/sts/responses.py b/moto/sts/responses.py index d721bfaaa..a5abb6b81 100644 --- a/moto/sts/responses.py +++ b/moto/sts/responses.py @@ -43,6 +43,7 @@ class TokenResponse(BaseResponse): template = self.response_template(GET_CALLER_IDENTITY_RESPONSE) return template.render() + GET_SESSION_TOKEN_RESPONSE = """ diff --git a/moto/swf/__init__.py b/moto/swf/__init__.py index 5ac59fbb6..0d626690a 100644 --- a/moto/swf/__init__.py +++ b/moto/swf/__init__.py @@ -1,6 +1,6 @@ from __future__ import unicode_literals from .models import swf_backends -from ..core.models import MockAWS, base_decorator, HttprettyMockAWS, deprecated_base_decorator +from ..core.models import base_decorator, deprecated_base_decorator swf_backend = swf_backends['us-east-1'] mock_swf = base_decorator(swf_backends) diff --git a/moto/swf/exceptions.py b/moto/swf/exceptions.py index 8bc5c0c9a..232b1f237 100644 --- a/moto/swf/exceptions.py +++ b/moto/swf/exceptions.py @@ -8,6 +8,7 @@ class SWFClientError(JsonRESTError): class SWFUnknownResourceFault(SWFClientError): + def __init__(self, resource_type, resource_name=None): if resource_name: message = "Unknown {0}: {1}".format(resource_type, resource_name) @@ -20,6 +21,7 @@ class SWFUnknownResourceFault(SWFClientError): class SWFDomainAlreadyExistsFault(SWFClientError): + def __init__(self, domain_name): super(SWFDomainAlreadyExistsFault, self).__init__( "com.amazonaws.swf.base.model#DomainAlreadyExistsFault", @@ -28,6 +30,7 @@ class SWFDomainAlreadyExistsFault(SWFClientError): class SWFDomainDeprecatedFault(SWFClientError): + def __init__(self, domain_name): super(SWFDomainDeprecatedFault, self).__init__( "com.amazonaws.swf.base.model#DomainDeprecatedFault", @@ -36,9 +39,11 @@ class SWFDomainDeprecatedFault(SWFClientError): class SWFSerializationException(SWFClientError): + def __init__(self, value): message = "class java.lang.Foo can not be converted to an String " - message += " (not a real SWF exception ; happened on: {0})".format(value) + message += " (not a real SWF exception ; happened on: {0})".format( + value) __type = "com.amazonaws.swf.base.model#SerializationException" super(SWFSerializationException, self).__init__( __type, @@ -47,22 +52,27 @@ class SWFSerializationException(SWFClientError): class SWFTypeAlreadyExistsFault(SWFClientError): + def __init__(self, _type): super(SWFTypeAlreadyExistsFault, self).__init__( "com.amazonaws.swf.base.model#TypeAlreadyExistsFault", - "{0}=[name={1}, version={2}]".format(_type.__class__.__name__, _type.name, _type.version), + "{0}=[name={1}, version={2}]".format( + _type.__class__.__name__, _type.name, _type.version), ) class SWFTypeDeprecatedFault(SWFClientError): + def __init__(self, _type): super(SWFTypeDeprecatedFault, self).__init__( "com.amazonaws.swf.base.model#TypeDeprecatedFault", - "{0}=[name={1}, version={2}]".format(_type.__class__.__name__, _type.name, _type.version), + "{0}=[name={1}, version={2}]".format( + _type.__class__.__name__, _type.name, _type.version), ) class SWFWorkflowExecutionAlreadyStartedFault(SWFClientError): + def __init__(self): super(SWFWorkflowExecutionAlreadyStartedFault, self).__init__( "com.amazonaws.swf.base.model#WorkflowExecutionAlreadyStartedFault", @@ -71,6 +81,7 @@ class SWFWorkflowExecutionAlreadyStartedFault(SWFClientError): class SWFDefaultUndefinedFault(SWFClientError): + def __init__(self, key): # TODO: move that into moto.core.utils maybe? words = key.split("_") @@ -84,6 +95,7 @@ class SWFDefaultUndefinedFault(SWFClientError): class SWFValidationException(SWFClientError): + def __init__(self, message): super(SWFValidationException, self).__init__( "com.amazon.coral.validate#ValidationException", @@ -92,6 +104,7 @@ class SWFValidationException(SWFClientError): class SWFDecisionValidationException(SWFClientError): + def __init__(self, problems): # messages messages = [] @@ -109,7 +122,8 @@ class SWFDecisionValidationException(SWFClientError): ) else: raise ValueError( - "Unhandled decision constraint type: {0}".format(pb["type"]) + "Unhandled decision constraint type: {0}".format(pb[ + "type"]) ) # prefix count = len(problems) @@ -124,5 +138,6 @@ class SWFDecisionValidationException(SWFClientError): class SWFWorkflowExecutionClosedError(Exception): + def __str__(self): return repr("Cannot change this object because the WorkflowExecution is closed") diff --git a/moto/swf/models/__init__.py b/moto/swf/models/__init__.py index 61fe5f52a..833596a23 100644 --- a/moto/swf/models/__init__.py +++ b/moto/swf/models/__init__.py @@ -12,15 +12,15 @@ from ..exceptions import ( SWFTypeDeprecatedFault, SWFValidationException, ) -from .activity_task import ActivityTask -from .activity_type import ActivityType -from .decision_task import DecisionTask -from .domain import Domain -from .generic_type import GenericType -from .history_event import HistoryEvent -from .timeout import Timeout -from .workflow_type import WorkflowType -from .workflow_execution import WorkflowExecution +from .activity_task import ActivityTask # flake8: noqa +from .activity_type import ActivityType # flake8: noqa +from .decision_task import DecisionTask # flake8: noqa +from .domain import Domain # flake8: noqa +from .generic_type import GenericType # flake8: noqa +from .history_event import HistoryEvent # flake8: noqa +from .timeout import Timeout # flake8: noqa +from .workflow_type import WorkflowType # flake8: noqa +from .workflow_execution import WorkflowExecution # flake8: noqa KNOWN_SWF_TYPES = { @@ -30,6 +30,7 @@ KNOWN_SWF_TYPES = { class SWFBackend(BaseBackend): + def __init__(self, region_name): self.region_name = region_name self.domains = [] @@ -246,7 +247,8 @@ class SWFBackend(BaseBackend): if decision_task.state != "STARTED": if decision_task.state == "COMPLETED": raise SWFUnknownResourceFault( - "decision task, scheduledEventId = {0}".format(decision_task.scheduled_event_id) + "decision task, scheduledEventId = {0}".format( + decision_task.scheduled_event_id) ) else: raise ValueError( @@ -300,7 +302,8 @@ class SWFBackend(BaseBackend): count = 0 for _task_list, tasks in domain.activity_task_lists.items(): if _task_list == task_list: - pending = [t for t in tasks if t.state in ["SCHEDULED", "STARTED"]] + pending = [t for t in tasks if t.state in [ + "SCHEDULED", "STARTED"]] count += len(pending) return count @@ -330,7 +333,8 @@ class SWFBackend(BaseBackend): if activity_task.state != "STARTED": if activity_task.state == "COMPLETED": raise SWFUnknownResourceFault( - "activity, scheduledEventId = {0}".format(activity_task.scheduled_event_id) + "activity, scheduledEventId = {0}".format( + activity_task.scheduled_event_id) ) else: raise ValueError( @@ -354,15 +358,18 @@ class SWFBackend(BaseBackend): self._process_timeouts() activity_task = self._find_activity_task_from_token(task_token) wfe = activity_task.workflow_execution - wfe.fail_activity_task(activity_task.task_token, reason=reason, details=details) + wfe.fail_activity_task(activity_task.task_token, + reason=reason, details=details) def terminate_workflow_execution(self, domain_name, workflow_id, child_policy=None, details=None, reason=None, run_id=None): # process timeouts on all objects self._process_timeouts() domain = self._get_domain(domain_name) - wfe = domain.get_workflow_execution(workflow_id, run_id=run_id, raise_if_closed=True) - wfe.terminate(child_policy=child_policy, details=details, reason=reason) + wfe = domain.get_workflow_execution( + workflow_id, run_id=run_id, raise_if_closed=True) + wfe.terminate(child_policy=child_policy, + details=details, reason=reason) def record_activity_task_heartbeat(self, task_token, details=None): # process timeouts on all objects diff --git a/moto/swf/models/activity_task.py b/moto/swf/models/activity_task.py index eb361d258..e205cc07a 100644 --- a/moto/swf/models/activity_task.py +++ b/moto/swf/models/activity_task.py @@ -9,6 +9,7 @@ from .timeout import Timeout class ActivityTask(object): + def __init__(self, activity_id, activity_type, scheduled_event_id, workflow_execution, timeouts, input=None): self.activity_id = activity_id diff --git a/moto/swf/models/activity_type.py b/moto/swf/models/activity_type.py index 95a83ca7a..eb1bbfa68 100644 --- a/moto/swf/models/activity_type.py +++ b/moto/swf/models/activity_type.py @@ -2,6 +2,7 @@ from .generic_type import GenericType class ActivityType(GenericType): + @property def _configuration_keys(self): return [ diff --git a/moto/swf/models/decision_task.py b/moto/swf/models/decision_task.py index bcd28f372..13bddfd7a 100644 --- a/moto/swf/models/decision_task.py +++ b/moto/swf/models/decision_task.py @@ -9,6 +9,7 @@ from .timeout import Timeout class DecisionTask(object): + def __init__(self, workflow_execution, scheduled_event_id): self.workflow_execution = workflow_execution self.workflow_type = workflow_execution.workflow_type @@ -60,7 +61,8 @@ class DecisionTask(object): if not self.started or not self.workflow_execution.open: return None # TODO: handle the "NONE" case - start_to_close_at = self.started_timestamp + int(self.start_to_close_timeout) + start_to_close_at = self.started_timestamp + \ + int(self.start_to_close_timeout) _timeout = Timeout(self, start_to_close_at, "START_TO_CLOSE") if _timeout.reached: return _timeout diff --git a/moto/swf/models/domain.py b/moto/swf/models/domain.py index 4efdc3150..ed7154067 100644 --- a/moto/swf/models/domain.py +++ b/moto/swf/models/domain.py @@ -8,6 +8,7 @@ from ..exceptions import ( class Domain(object): + def __init__(self, name, retention, description=None): self.name = name self.retention = retention diff --git a/moto/swf/models/generic_type.py b/moto/swf/models/generic_type.py index 7c8389fbe..2ae98bb53 100644 --- a/moto/swf/models/generic_type.py +++ b/moto/swf/models/generic_type.py @@ -4,6 +4,7 @@ from moto.core.utils import camelcase_to_underscores class GenericType(object): + def __init__(self, name, version, **kwargs): self.name = name self.version = version diff --git a/moto/swf/models/history_event.py b/moto/swf/models/history_event.py index b181297f7..e841ca38e 100644 --- a/moto/swf/models/history_event.py +++ b/moto/swf/models/history_event.py @@ -28,10 +28,12 @@ SUPPORTED_HISTORY_EVENT_TYPES = ( class HistoryEvent(object): + def __init__(self, event_id, event_type, event_timestamp=None, **kwargs): if event_type not in SUPPORTED_HISTORY_EVENT_TYPES: raise NotImplementedError( - "HistoryEvent does not implement attributes for type '{0}'".format(event_type) + "HistoryEvent does not implement attributes for type '{0}'".format( + event_type) ) self.event_id = event_id self.event_type = event_type diff --git a/moto/swf/models/timeout.py b/moto/swf/models/timeout.py index cf0283760..09e0f6772 100644 --- a/moto/swf/models/timeout.py +++ b/moto/swf/models/timeout.py @@ -2,6 +2,7 @@ from moto.core.utils import unix_time class Timeout(object): + def __init__(self, obj, timestamp, kind): self.obj = obj self.timestamp = timestamp diff --git a/moto/swf/models/workflow_execution.py b/moto/swf/models/workflow_execution.py index a30c2e18d..8b8acda4e 100644 --- a/moto/swf/models/workflow_execution.py +++ b/moto/swf/models/workflow_execution.py @@ -64,9 +64,12 @@ class WorkflowExecution(object): # NB: the order follows boto/SWF order of exceptions appearance (if no # param is set, # SWF will raise DefaultUndefinedFault errors in the # same order as the few lines that follow) - self._set_from_kwargs_or_workflow_type(kwargs, "execution_start_to_close_timeout") - self._set_from_kwargs_or_workflow_type(kwargs, "task_list", "task_list") - self._set_from_kwargs_or_workflow_type(kwargs, "task_start_to_close_timeout") + self._set_from_kwargs_or_workflow_type( + kwargs, "execution_start_to_close_timeout") + self._set_from_kwargs_or_workflow_type( + kwargs, "task_list", "task_list") + self._set_from_kwargs_or_workflow_type( + kwargs, "task_start_to_close_timeout") self._set_from_kwargs_or_workflow_type(kwargs, "child_policy") self.input = kwargs.get("input") # counters @@ -368,13 +371,16 @@ class WorkflowExecution(object): # check decision types mandatory attributes # NB: the real SWF service seems to check attributes even for attributes list # that are not in line with the decisionType, so we do the same - attrs_to_check = [d for d in dcs.keys() if d.endswith("DecisionAttributes")] + attrs_to_check = [ + d for d in dcs.keys() if d.endswith("DecisionAttributes")] if dcs["decisionType"] in self.KNOWN_DECISION_TYPES: decision_type = dcs["decisionType"] - decision_attr = "{0}DecisionAttributes".format(decapitalize(decision_type)) + decision_attr = "{0}DecisionAttributes".format( + decapitalize(decision_type)) attrs_to_check.append(decision_attr) for attr in attrs_to_check: - problems += self._check_decision_attributes(attr, dcs.get(attr, {}), decision_number) + problems += self._check_decision_attributes( + attr, dcs.get(attr, {}), decision_number) # check decision type is correct if dcs["decisionType"] not in self.KNOWN_DECISION_TYPES: problems.append({ @@ -396,12 +402,14 @@ class WorkflowExecution(object): # handle each decision separately, in order for decision in decisions: decision_type = decision["decisionType"] - attributes_key = "{0}DecisionAttributes".format(decapitalize(decision_type)) + attributes_key = "{0}DecisionAttributes".format( + decapitalize(decision_type)) attributes = decision.get(attributes_key, {}) if decision_type == "CompleteWorkflowExecution": self.complete(event_id, attributes.get("result")) elif decision_type == "FailWorkflowExecution": - self.fail(event_id, attributes.get("details"), attributes.get("reason")) + self.fail(event_id, attributes.get( + "details"), attributes.get("reason")) elif decision_type == "ScheduleActivityTask": self.schedule_activity_task(event_id, attributes) else: @@ -415,7 +423,8 @@ class WorkflowExecution(object): # TODO: implement Decision type: SignalExternalWorkflowExecution # TODO: implement Decision type: StartChildWorkflowExecution # TODO: implement Decision type: StartTimer - raise NotImplementedError("Cannot handle decision: {0}".format(decision_type)) + raise NotImplementedError( + "Cannot handle decision: {0}".format(decision_type)) # finally decrement counter if and only if everything went well self.open_counts["openDecisionTasks"] -= 1 @@ -447,7 +456,8 @@ class WorkflowExecution(object): def fail_schedule_activity_task(_type, _cause): # TODO: implement other possible failure mode: OPEN_ACTIVITIES_LIMIT_EXCEEDED # NB: some failure modes are not implemented and probably won't be implemented in - # the future, such as ACTIVITY_CREATION_RATE_EXCEEDED or OPERATION_NOT_PERMITTED + # the future, such as ACTIVITY_CREATION_RATE_EXCEEDED or + # OPERATION_NOT_PERMITTED self._add_event( "ScheduleActivityTaskFailed", activity_id=attributes["activityId"], @@ -591,13 +601,15 @@ class WorkflowExecution(object): def first_timeout(self): if not self.open or not self.start_timestamp: return None - start_to_close_at = self.start_timestamp + int(self.execution_start_to_close_timeout) + start_to_close_at = self.start_timestamp + \ + int(self.execution_start_to_close_timeout) _timeout = Timeout(self, start_to_close_at, "START_TO_CLOSE") if _timeout.reached: return _timeout def timeout(self, timeout): - # TODO: process child policy on child workflows here or in the triggering function + # TODO: process child policy on child workflows here or in the + # triggering function self.execution_status = "CLOSED" self.close_status = "TIMED_OUT" self.timeout_type = timeout.kind diff --git a/moto/swf/models/workflow_type.py b/moto/swf/models/workflow_type.py index ddb2475b2..18d18d415 100644 --- a/moto/swf/models/workflow_type.py +++ b/moto/swf/models/workflow_type.py @@ -2,6 +2,7 @@ from .generic_type import GenericType class WorkflowType(GenericType): + @property def _configuration_keys(self): return [ diff --git a/moto/swf/responses.py b/moto/swf/responses.py index 92d4957fd..1ee89bfc1 100644 --- a/moto/swf/responses.py +++ b/moto/swf/responses.py @@ -64,7 +64,8 @@ class SWFResponse(BaseResponse): reverse_order = self._params.get("reverseOrder", None) self._check_string(domain_name) self._check_string(status) - types = self.swf_backend.list_types(kind, domain_name, status, reverse_order=reverse_order) + types = self.swf_backend.list_types( + kind, domain_name, status, reverse_order=reverse_order) return json.dumps({ "typeInfos": [_type.to_medium_dict() for _type in types] }) @@ -97,7 +98,8 @@ class SWFResponse(BaseResponse): status = self._params["registrationStatus"] self._check_string(status) reverse_order = self._params.get("reverseOrder", None) - domains = self.swf_backend.list_domains(status, reverse_order=reverse_order) + domains = self.swf_backend.list_domains( + status, reverse_order=reverse_order) return json.dumps({ "domainInfos": [domain.to_short_dict() for domain in domains] }) @@ -107,7 +109,8 @@ class SWFResponse(BaseResponse): start_time_filter = self._params.get('startTimeFilter', None) close_time_filter = self._params.get('closeTimeFilter', None) execution_filter = self._params.get('executionFilter', None) - workflow_id = execution_filter['workflowId'] if execution_filter else None + workflow_id = execution_filter[ + 'workflowId'] if execution_filter else None maximum_page_size = self._params.get('maximumPageSize', 1000) reverse_order = self._params.get('reverseOrder', None) tag_filter = self._params.get('tagFilter', None) @@ -162,7 +165,8 @@ class SWFResponse(BaseResponse): domain = self._params['domain'] start_time_filter = self._params['startTimeFilter'] execution_filter = self._params.get('executionFilter', None) - workflow_id = execution_filter['workflowId'] if execution_filter else None + workflow_id = execution_filter[ + 'workflowId'] if execution_filter else None maximum_page_size = self._params.get('maximumPageSize', 1000) reverse_order = self._params.get('reverseOrder', None) tag_filter = self._params.get('tagFilter', None) @@ -234,10 +238,14 @@ class SWFResponse(BaseResponse): task_list = default_task_list.get("name") else: task_list = None - default_task_heartbeat_timeout = self._params.get("defaultTaskHeartbeatTimeout") - default_task_schedule_to_close_timeout = self._params.get("defaultTaskScheduleToCloseTimeout") - default_task_schedule_to_start_timeout = self._params.get("defaultTaskScheduleToStartTimeout") - default_task_start_to_close_timeout = self._params.get("defaultTaskStartToCloseTimeout") + default_task_heartbeat_timeout = self._params.get( + "defaultTaskHeartbeatTimeout") + default_task_schedule_to_close_timeout = self._params.get( + "defaultTaskScheduleToCloseTimeout") + default_task_schedule_to_start_timeout = self._params.get( + "defaultTaskScheduleToStartTimeout") + default_task_start_to_close_timeout = self._params.get( + "defaultTaskStartToCloseTimeout") description = self._params.get("description") self._check_string(domain) @@ -280,8 +288,10 @@ class SWFResponse(BaseResponse): else: task_list = None default_child_policy = self._params.get("defaultChildPolicy") - default_task_start_to_close_timeout = self._params.get("defaultTaskStartToCloseTimeout") - default_execution_start_to_close_timeout = self._params.get("defaultExecutionStartToCloseTimeout") + default_task_start_to_close_timeout = self._params.get( + "defaultTaskStartToCloseTimeout") + default_execution_start_to_close_timeout = self._params.get( + "defaultExecutionStartToCloseTimeout") description = self._params.get("description") self._check_string(domain) @@ -322,10 +332,12 @@ class SWFResponse(BaseResponse): else: task_list = None child_policy = self._params.get("childPolicy") - execution_start_to_close_timeout = self._params.get("executionStartToCloseTimeout") + execution_start_to_close_timeout = self._params.get( + "executionStartToCloseTimeout") input_ = self._params.get("input") tag_list = self._params.get("tagList") - task_start_to_close_timeout = self._params.get("taskStartToCloseTimeout") + task_start_to_close_timeout = self._params.get( + "taskStartToCloseTimeout") self._check_string(domain) self._check_string(workflow_id) @@ -360,7 +372,8 @@ class SWFResponse(BaseResponse): self._check_string(run_id) self._check_string(workflow_id) - wfe = self.swf_backend.describe_workflow_execution(domain_name, run_id, workflow_id) + wfe = self.swf_backend.describe_workflow_execution( + domain_name, run_id, workflow_id) return json.dumps(wfe.to_full_dict()) def get_workflow_execution_history(self): @@ -369,7 +382,8 @@ class SWFResponse(BaseResponse): run_id = _workflow_execution["runId"] workflow_id = _workflow_execution["workflowId"] reverse_order = self._params.get("reverseOrder", None) - wfe = self.swf_backend.describe_workflow_execution(domain_name, run_id, workflow_id) + wfe = self.swf_backend.describe_workflow_execution( + domain_name, run_id, workflow_id) events = wfe.events(reverse_order=reverse_order) return json.dumps({ "events": [evt.to_dict() for evt in events] @@ -399,7 +413,8 @@ class SWFResponse(BaseResponse): task_list = self._params["taskList"]["name"] self._check_string(domain_name) self._check_string(task_list) - count = self.swf_backend.count_pending_decision_tasks(domain_name, task_list) + count = self.swf_backend.count_pending_decision_tasks( + domain_name, task_list) return json.dumps({"count": count, "truncated": False}) def respond_decision_task_completed(self): @@ -435,7 +450,8 @@ class SWFResponse(BaseResponse): task_list = self._params["taskList"]["name"] self._check_string(domain_name) self._check_string(task_list) - count = self.swf_backend.count_pending_activity_tasks(domain_name, task_list) + count = self.swf_backend.count_pending_activity_tasks( + domain_name, task_list) return json.dumps({"count": count, "truncated": False}) def respond_activity_task_completed(self): @@ -453,7 +469,8 @@ class SWFResponse(BaseResponse): reason = self._params.get("reason") details = self._params.get("details") self._check_string(task_token) - # TODO: implement length limits on reason and details (common pb with client libs) + # TODO: implement length limits on reason and details (common pb with + # client libs) self._check_none_or_string(reason) self._check_none_or_string(details) self.swf_backend.respond_activity_task_failed( diff --git a/tests/backport_assert_raises.py b/tests/backport_assert_raises.py index 6ceacaa89..9b20edf9d 100644 --- a/tests/backport_assert_raises.py +++ b/tests/backport_assert_raises.py @@ -19,6 +19,7 @@ try: except TypeError: # this version of assert_raises doesn't support the 1-arg version class AssertRaisesContext(object): + def __init__(self, expected): self.expected = expected diff --git a/tests/helpers.py b/tests/helpers.py index 33509c06e..50615b094 100644 --- a/tests/helpers.py +++ b/tests/helpers.py @@ -8,13 +8,15 @@ def version_tuple(v): return tuple(map(int, (v.split(".")))) -# Note: See https://github.com/spulec/moto/issues/201 for why this is a separate method. +# Note: See https://github.com/spulec/moto/issues/201 for why this is a +# separate method. def skip_test(): raise SkipTest class requires_boto_gte(object): """Decorator for requiring boto version greater than or equal to 'version'""" + def __init__(self, version): self.version = version @@ -27,6 +29,7 @@ class requires_boto_gte(object): class disable_on_py3(object): + def __call__(self, test): if not six.PY3: return test diff --git a/tests/test_apigateway/test_apigateway.py b/tests/test_apigateway/test_apigateway.py index e52bfe0d7..11230658b 100644 --- a/tests/test_apigateway/test_apigateway.py +++ b/tests/test_apigateway/test_apigateway.py @@ -72,13 +72,15 @@ def test_create_resource(): api_id = response['id'] resources = client.get_resources(restApiId=api_id) - root_id = [resource for resource in resources['items'] if resource['path'] == '/'][0]['id'] + root_id = [resource for resource in resources[ + 'items'] if resource['path'] == '/'][0]['id'] root_resource = client.get_resource( restApiId=api_id, resourceId=root_id, ) - root_resource['ResponseMetadata'].pop('HTTPHeaders', None) # this is hard to match against, so remove it + # this is hard to match against, so remove it + root_resource['ResponseMetadata'].pop('HTTPHeaders', None) root_resource['ResponseMetadata'].pop('RetryAttempts', None) root_resource.should.equal({ 'path': '/', @@ -97,7 +99,8 @@ def test_create_resource(): resources = client.get_resources(restApiId=api_id)['items'] len(resources).should.equal(2) - non_root_resource = [resource for resource in resources if resource['path'] != '/'][0] + non_root_resource = [ + resource for resource in resources if resource['path'] != '/'][0] response = client.delete_resource( restApiId=api_id, @@ -117,7 +120,8 @@ def test_child_resource(): api_id = response['id'] resources = client.get_resources(restApiId=api_id) - root_id = [resource for resource in resources['items'] if resource['path'] == '/'][0]['id'] + root_id = [resource for resource in resources[ + 'items'] if resource['path'] == '/'][0]['id'] response = client.create_resource( restApiId=api_id, @@ -137,7 +141,8 @@ def test_child_resource(): restApiId=api_id, resourceId=tags_id, ) - child_resource['ResponseMetadata'].pop('HTTPHeaders', None) # this is hard to match against, so remove it + # this is hard to match against, so remove it + child_resource['ResponseMetadata'].pop('HTTPHeaders', None) child_resource['ResponseMetadata'].pop('RetryAttempts', None) child_resource.should.equal({ 'path': '/users/tags', @@ -159,7 +164,8 @@ def test_create_method(): api_id = response['id'] resources = client.get_resources(restApiId=api_id) - root_id = [resource for resource in resources['items'] if resource['path'] == '/'][0]['id'] + root_id = [resource for resource in resources[ + 'items'] if resource['path'] == '/'][0]['id'] client.put_method( restApiId=api_id, @@ -174,7 +180,8 @@ def test_create_method(): httpMethod='GET' ) - response['ResponseMetadata'].pop('HTTPHeaders', None) # this is hard to match against, so remove it + # this is hard to match against, so remove it + response['ResponseMetadata'].pop('HTTPHeaders', None) response['ResponseMetadata'].pop('RetryAttempts', None) response.should.equal({ 'httpMethod': 'GET', @@ -193,7 +200,8 @@ def test_create_method_response(): api_id = response['id'] resources = client.get_resources(restApiId=api_id) - root_id = [resource for resource in resources['items'] if resource['path'] == '/'][0]['id'] + root_id = [resource for resource in resources[ + 'items'] if resource['path'] == '/'][0]['id'] client.put_method( restApiId=api_id, @@ -214,7 +222,8 @@ def test_create_method_response(): httpMethod='GET', statusCode='200', ) - response['ResponseMetadata'].pop('HTTPHeaders', None) # this is hard to match against, so remove it + # this is hard to match against, so remove it + response['ResponseMetadata'].pop('HTTPHeaders', None) response['ResponseMetadata'].pop('RetryAttempts', None) response.should.equal({ 'ResponseMetadata': {'HTTPStatusCode': 200}, @@ -227,7 +236,8 @@ def test_create_method_response(): httpMethod='GET', statusCode='200', ) - response['ResponseMetadata'].pop('HTTPHeaders', None) # this is hard to match against, so remove it + # this is hard to match against, so remove it + response['ResponseMetadata'].pop('HTTPHeaders', None) response['ResponseMetadata'].pop('RetryAttempts', None) response.should.equal({ 'ResponseMetadata': {'HTTPStatusCode': 200}, @@ -240,7 +250,8 @@ def test_create_method_response(): httpMethod='GET', statusCode='200', ) - response['ResponseMetadata'].pop('HTTPHeaders', None) # this is hard to match against, so remove it + # this is hard to match against, so remove it + response['ResponseMetadata'].pop('HTTPHeaders', None) response['ResponseMetadata'].pop('RetryAttempts', None) response.should.equal({'ResponseMetadata': {'HTTPStatusCode': 200}}) @@ -255,7 +266,8 @@ def test_integrations(): api_id = response['id'] resources = client.get_resources(restApiId=api_id) - root_id = [resource for resource in resources['items'] if resource['path'] == '/'][0]['id'] + root_id = [resource for resource in resources[ + 'items'] if resource['path'] == '/'][0]['id'] client.put_method( restApiId=api_id, @@ -278,7 +290,8 @@ def test_integrations(): type='HTTP', uri='http://httpbin.org/robots.txt', ) - response['ResponseMetadata'].pop('HTTPHeaders', None) # this is hard to match against, so remove it + # this is hard to match against, so remove it + response['ResponseMetadata'].pop('HTTPHeaders', None) response['ResponseMetadata'].pop('RetryAttempts', None) response.should.equal({ 'ResponseMetadata': {'HTTPStatusCode': 200}, @@ -300,7 +313,8 @@ def test_integrations(): resourceId=root_id, httpMethod='GET' ) - response['ResponseMetadata'].pop('HTTPHeaders', None) # this is hard to match against, so remove it + # this is hard to match against, so remove it + response['ResponseMetadata'].pop('HTTPHeaders', None) response['ResponseMetadata'].pop('RetryAttempts', None) response.should.equal({ 'ResponseMetadata': {'HTTPStatusCode': 200}, @@ -321,7 +335,8 @@ def test_integrations(): restApiId=api_id, resourceId=root_id, ) - response['ResponseMetadata'].pop('HTTPHeaders', None) # this is hard to match against, so remove it + # this is hard to match against, so remove it + response['ResponseMetadata'].pop('HTTPHeaders', None) response['ResponseMetadata'].pop('RetryAttempts', None) response['resourceMethods']['GET']['methodIntegration'].should.equal({ 'httpMethod': 'GET', @@ -359,7 +374,8 @@ def test_integrations(): ) templates = { - # example based on http://docs.aws.amazon.com/apigateway/latest/developerguide/api-as-kinesis-proxy-export-swagger-with-extensions.html + # example based on + # http://docs.aws.amazon.com/apigateway/latest/developerguide/api-as-kinesis-proxy-export-swagger-with-extensions.html 'application/json': "{\n \"StreamName\": \"$input.params('stream-name')\",\n \"Records\": []\n}" } test_uri = 'http://example.com/foobar.txt' @@ -371,7 +387,8 @@ def test_integrations(): uri=test_uri, requestTemplates=templates ) - response['ResponseMetadata'].pop('HTTPHeaders', None) # this is hard to match against, so remove it + # this is hard to match against, so remove it + response['ResponseMetadata'].pop('HTTPHeaders', None) response['ResponseMetadata'].pop('RetryAttempts', None) response['ResponseMetadata'].should.equal({'HTTPStatusCode': 200}) @@ -394,7 +411,8 @@ def test_integration_response(): api_id = response['id'] resources = client.get_resources(restApiId=api_id) - root_id = [resource for resource in resources['items'] if resource['path'] == '/'][0]['id'] + root_id = [resource for resource in resources[ + 'items'] if resource['path'] == '/'][0]['id'] client.put_method( restApiId=api_id, @@ -425,7 +443,8 @@ def test_integration_response(): statusCode='200', selectionPattern='foobar', ) - response['ResponseMetadata'].pop('HTTPHeaders', None) # this is hard to match against, so remove it + # this is hard to match against, so remove it + response['ResponseMetadata'].pop('HTTPHeaders', None) response['ResponseMetadata'].pop('RetryAttempts', None) response.should.equal({ 'statusCode': '200', @@ -442,7 +461,8 @@ def test_integration_response(): httpMethod='GET', statusCode='200', ) - response['ResponseMetadata'].pop('HTTPHeaders', None) # this is hard to match against, so remove it + # this is hard to match against, so remove it + response['ResponseMetadata'].pop('HTTPHeaders', None) response['ResponseMetadata'].pop('RetryAttempts', None) response.should.equal({ 'statusCode': '200', @@ -458,7 +478,8 @@ def test_integration_response(): resourceId=root_id, httpMethod='GET', ) - response['ResponseMetadata'].pop('HTTPHeaders', None) # this is hard to match against, so remove it + # this is hard to match against, so remove it + response['ResponseMetadata'].pop('HTTPHeaders', None) response['ResponseMetadata'].pop('RetryAttempts', None) response['methodIntegration']['integrationResponses'].should.equal({ '200': { @@ -506,23 +527,24 @@ def test_update_stage_configuration(): restApiId=api_id, deploymentId=deployment_id, ) - response.pop('createdDate',None) # createdDate is hard to match against, remove it - response['ResponseMetadata'].pop('HTTPHeaders', None) # this is hard to match against, so remove it + # createdDate is hard to match against, remove it + response.pop('createdDate', None) + # this is hard to match against, so remove it + response['ResponseMetadata'].pop('HTTPHeaders', None) response['ResponseMetadata'].pop('RetryAttempts', None) response.should.equal({ 'id': deployment_id, 'ResponseMetadata': {'HTTPStatusCode': 200}, - 'description' : '1.0.1' + 'description': '1.0.1' }) response = client.create_deployment( - restApiId=api_id, - stageName=stage_name, - description="1.0.2" - ) + restApiId=api_id, + stageName=stage_name, + description="1.0.2" + ) deployment_id2 = response['id'] - stage = client.get_stage( restApiId=api_id, stageName=stage_name @@ -531,11 +553,11 @@ def test_update_stage_configuration(): stage['deploymentId'].should.equal(deployment_id2) stage.shouldnt.have.key('cacheClusterSize') - client.update_stage(restApiId=api_id,stageName=stage_name, + client.update_stage(restApiId=api_id, stageName=stage_name, patchOperations=[ { - "op" : "replace", - "path" : "/cacheClusterEnabled", + "op": "replace", + "path": "/cacheClusterEnabled", "value": "True" } ]) @@ -547,11 +569,11 @@ def test_update_stage_configuration(): stage.should.have.key('cacheClusterSize').which.should.equal("0.5") - client.update_stage(restApiId=api_id,stageName=stage_name, + client.update_stage(restApiId=api_id, stageName=stage_name, patchOperations=[ { - "op" : "replace", - "path" : "/cacheClusterSize", + "op": "replace", + "path": "/cacheClusterSize", "value": "1.6" } ]) @@ -563,56 +585,55 @@ def test_update_stage_configuration(): stage.should.have.key('cacheClusterSize').which.should.equal("1.6") - - client.update_stage(restApiId=api_id,stageName=stage_name, + client.update_stage(restApiId=api_id, stageName=stage_name, patchOperations=[ { - "op" : "replace", - "path" : "/deploymentId", + "op": "replace", + "path": "/deploymentId", "value": deployment_id }, { - "op" : "replace", - "path" : "/variables/environment", - "value" : "dev" + "op": "replace", + "path": "/variables/environment", + "value": "dev" }, { - "op" : "replace", - "path" : "/variables/region", - "value" : "eu-west-1" + "op": "replace", + "path": "/variables/region", + "value": "eu-west-1" }, { - "op" : "replace", - "path" : "/*/*/caching/dataEncrypted", - "value" : "True" + "op": "replace", + "path": "/*/*/caching/dataEncrypted", + "value": "True" }, { - "op" : "replace", - "path" : "/cacheClusterEnabled", - "value" : "True" + "op": "replace", + "path": "/cacheClusterEnabled", + "value": "True" }, { - "op" : "replace", - "path" : "/description", - "value" : "stage description update" + "op": "replace", + "path": "/description", + "value": "stage description update" }, { - "op" : "replace", - "path" : "/cacheClusterSize", - "value" : "1.6" + "op": "replace", + "path": "/cacheClusterSize", + "value": "1.6" } ]) - client.update_stage(restApiId=api_id,stageName=stage_name, + client.update_stage(restApiId=api_id, stageName=stage_name, patchOperations=[ { - "op" : "remove", - "path" : "/variables/region", - "value" : "eu-west-1" + "op": "remove", + "path": "/variables/region", + "value": "eu-west-1" } ]) - stage = client.get_stage(restApiId=api_id,stageName=stage_name) + stage = client.get_stage(restApiId=api_id, stageName=stage_name) stage['description'].should.match('stage description update') stage['cacheClusterSize'].should.equal("1.6") @@ -621,21 +642,23 @@ def test_update_stage_configuration(): stage['cacheClusterEnabled'].should.be.true stage['deploymentId'].should.match(deployment_id) stage['methodSettings'].should.have.key('*/*') - stage['methodSettings']['*/*'].should.have.key('cacheDataEncrypted').which.should.be.true + stage['methodSettings'][ + '*/*'].should.have.key('cacheDataEncrypted').which.should.be.true try: - client.update_stage(restApiId=api_id,stageName=stage_name, - patchOperations=[ - { - "op" : "add", - "path" : "/notasetting", - "value" : "eu-west-1" - } - ]) - assert False.should.be.ok #Fail, should not be here + client.update_stage(restApiId=api_id, stageName=stage_name, + patchOperations=[ + { + "op": "add", + "path": "/notasetting", + "value": "eu-west-1" + } + ]) + assert False.should.be.ok # Fail, should not be here except Exception: assert True.should.be.ok + @mock_apigateway def test_non_existent_stage(): client = boto3.client('apigateway', region_name='us-west-2') @@ -645,9 +668,8 @@ def test_non_existent_stage(): ) api_id = response['id'] - - client.get_stage.when.called_with(restApiId=api_id,stageName='xxx').should.throw(ClientError) - + client.get_stage.when.called_with( + restApiId=api_id, stageName='xxx').should.throw(ClientError) @mock_apigateway @@ -670,13 +692,15 @@ def test_create_stage(): restApiId=api_id, deploymentId=deployment_id, ) - response.pop('createdDate',None) # createdDate is hard to match against, remove it - response['ResponseMetadata'].pop('HTTPHeaders', None) # this is hard to match against, so remove it + # createdDate is hard to match against, remove it + response.pop('createdDate', None) + # this is hard to match against, so remove it + response['ResponseMetadata'].pop('HTTPHeaders', None) response['ResponseMetadata'].pop('RetryAttempts', None) response.should.equal({ 'id': deployment_id, 'ResponseMetadata': {'HTTPStatusCode': 200}, - 'description' : '' + 'description': '' }) response = client.create_deployment( @@ -686,34 +710,37 @@ def test_create_stage(): deployment_id2 = response['id'] - response = client.get_deployments( restApiId=api_id, ) - response['ResponseMetadata'].pop('HTTPHeaders', None) # this is hard to match against, so remove it + # this is hard to match against, so remove it + response['ResponseMetadata'].pop('HTTPHeaders', None) response['ResponseMetadata'].pop('RetryAttempts', None) response['items'][0].pop('createdDate') response['items'][1].pop('createdDate') - response['items'][0]['id'].should.match(r"{0}|{1}".format(deployment_id2,deployment_id)) - response['items'][1]['id'].should.match(r"{0}|{1}".format(deployment_id2,deployment_id)) - + response['items'][0]['id'].should.match( + r"{0}|{1}".format(deployment_id2, deployment_id)) + response['items'][1]['id'].should.match( + r"{0}|{1}".format(deployment_id2, deployment_id)) new_stage_name = 'current' - response = client.create_stage(restApiId=api_id,stageName=new_stage_name,deploymentId=deployment_id2) + response = client.create_stage( + restApiId=api_id, stageName=new_stage_name, deploymentId=deployment_id2) - response['ResponseMetadata'].pop('HTTPHeaders', None) # this is hard to match against, so remove it + # this is hard to match against, so remove it + response['ResponseMetadata'].pop('HTTPHeaders', None) response['ResponseMetadata'].pop('RetryAttempts', None) response.should.equal({ - 'stageName':new_stage_name, - 'deploymentId':deployment_id2, - 'methodSettings':{}, - 'variables':{}, + 'stageName': new_stage_name, + 'deploymentId': deployment_id2, + 'methodSettings': {}, + 'variables': {}, 'ResponseMetadata': {'HTTPStatusCode': 200}, - 'description':'', - 'cacheClusterEnabled':False + 'description': '', + 'cacheClusterEnabled': False }) stage = client.get_stage( @@ -724,20 +751,21 @@ def test_create_stage(): stage['deploymentId'].should.equal(deployment_id2) new_stage_name_with_vars = 'stage_with_vars' - response = client.create_stage(restApiId=api_id,stageName=new_stage_name_with_vars,deploymentId=deployment_id2,variables={ - "env" : "dev" + response = client.create_stage(restApiId=api_id, stageName=new_stage_name_with_vars, deploymentId=deployment_id2, variables={ + "env": "dev" }) - response['ResponseMetadata'].pop('HTTPHeaders', None) # this is hard to match against, so remove it + # this is hard to match against, so remove it + response['ResponseMetadata'].pop('HTTPHeaders', None) response['ResponseMetadata'].pop('RetryAttempts', None) response.should.equal({ - 'stageName':new_stage_name_with_vars, - 'deploymentId':deployment_id2, - 'methodSettings':{}, - 'variables':{ "env" : "dev" }, + 'stageName': new_stage_name_with_vars, + 'deploymentId': deployment_id2, + 'methodSettings': {}, + 'variables': {"env": "dev"}, 'ResponseMetadata': {'HTTPStatusCode': 200}, - 'description':'', + 'description': '', 'cacheClusterEnabled': False }) @@ -750,22 +778,23 @@ def test_create_stage(): stage['variables'].should.have.key('env').which.should.match("dev") new_stage_name = 'stage_with_vars_and_cache_settings' - response = client.create_stage(restApiId=api_id,stageName=new_stage_name,deploymentId=deployment_id2,variables={ - "env" : "dev" - }, cacheClusterEnabled=True,description="hello moto") + response = client.create_stage(restApiId=api_id, stageName=new_stage_name, deploymentId=deployment_id2, variables={ + "env": "dev" + }, cacheClusterEnabled=True, description="hello moto") - response['ResponseMetadata'].pop('HTTPHeaders', None) # this is hard to match against, so remove it + # this is hard to match against, so remove it + response['ResponseMetadata'].pop('HTTPHeaders', None) response['ResponseMetadata'].pop('RetryAttempts', None) response.should.equal({ - 'stageName':new_stage_name, - 'deploymentId':deployment_id2, - 'methodSettings':{}, - 'variables':{ "env" : "dev" }, + 'stageName': new_stage_name, + 'deploymentId': deployment_id2, + 'methodSettings': {}, + 'variables': {"env": "dev"}, 'ResponseMetadata': {'HTTPStatusCode': 200}, - 'description':'hello moto', + 'description': 'hello moto', 'cacheClusterEnabled': True, - 'cacheClusterSize' : "0.5" + 'cacheClusterSize': "0.5" }) stage = client.get_stage( @@ -776,22 +805,23 @@ def test_create_stage(): stage['cacheClusterSize'].should.equal("0.5") new_stage_name = 'stage_with_vars_and_cache_settings_and_size' - response = client.create_stage(restApiId=api_id,stageName=new_stage_name,deploymentId=deployment_id2,variables={ - "env" : "dev" - }, cacheClusterEnabled=True,cacheClusterSize="1.6",description="hello moto") + response = client.create_stage(restApiId=api_id, stageName=new_stage_name, deploymentId=deployment_id2, variables={ + "env": "dev" + }, cacheClusterEnabled=True, cacheClusterSize="1.6", description="hello moto") - response['ResponseMetadata'].pop('HTTPHeaders', None) # this is hard to match against, so remove it + # this is hard to match against, so remove it + response['ResponseMetadata'].pop('HTTPHeaders', None) response['ResponseMetadata'].pop('RetryAttempts', None) response.should.equal({ - 'stageName':new_stage_name, - 'deploymentId':deployment_id2, - 'methodSettings':{}, - 'variables':{ "env" : "dev" }, + 'stageName': new_stage_name, + 'deploymentId': deployment_id2, + 'methodSettings': {}, + 'variables': {"env": "dev"}, 'ResponseMetadata': {'HTTPStatusCode': 200}, - 'description':'hello moto', + 'description': 'hello moto', 'cacheClusterEnabled': True, - 'cacheClusterSize' : "1.6" + 'cacheClusterSize': "1.6" }) stage = client.get_stage( @@ -804,7 +834,6 @@ def test_create_stage(): stage['cacheClusterSize'].should.equal("1.6") - @mock_apigateway def test_deployment(): client = boto3.client('apigateway', region_name='us-west-2') @@ -825,13 +854,15 @@ def test_deployment(): restApiId=api_id, deploymentId=deployment_id, ) - response.pop('createdDate',None) # createdDate is hard to match against, remove it - response['ResponseMetadata'].pop('HTTPHeaders', None) # this is hard to match against, so remove it + # createdDate is hard to match against, remove it + response.pop('createdDate', None) + # this is hard to match against, so remove it + response['ResponseMetadata'].pop('HTTPHeaders', None) response['ResponseMetadata'].pop('RetryAttempts', None) response.should.equal({ 'id': deployment_id, 'ResponseMetadata': {'HTTPStatusCode': 200}, - 'description' : '' + 'description': '' }) response = client.get_deployments( @@ -898,7 +929,8 @@ def test_http_proxying_integration(): api_id = response['id'] resources = client.get_resources(restApiId=api_id) - root_id = [resource for resource in resources['items'] if resource['path'] == '/'][0]['id'] + root_id = [resource for resource in resources[ + 'items'] if resource['path'] == '/'][0]['id'] client.put_method( restApiId=api_id, @@ -928,7 +960,8 @@ def test_http_proxying_integration(): stageName=stage_name, ) - deploy_url = "https://{api_id}.execute-api.{region_name}.amazonaws.com/{stage_name}".format(api_id=api_id, region_name=region_name, stage_name=stage_name) + deploy_url = "https://{api_id}.execute-api.{region_name}.amazonaws.com/{stage_name}".format( + api_id=api_id, region_name=region_name, stage_name=stage_name) if not settings.TEST_SERVER_MODE: requests.get(deploy_url).content.should.equal(b"a fake response") diff --git a/tests/test_autoscaling/test_autoscaling.py b/tests/test_autoscaling/test_autoscaling.py index 4d0905196..9a6408999 100644 --- a/tests/test_autoscaling/test_autoscaling.py +++ b/tests/test_autoscaling/test_autoscaling.py @@ -16,7 +16,8 @@ from tests.helpers import requires_boto_gte @mock_elb_deprecated def test_create_autoscaling_group(): elb_conn = boto.ec2.elb.connect_to_region('us-east-1') - elb_conn.create_load_balancer('test_lb', zones=[], listeners=[(80, 8080, 'http')]) + elb_conn.create_load_balancer( + 'test_lb', zones=[], listeners=[(80, 8080, 'http')]) conn = boto.ec2.autoscale.connect_to_region('us-east-1') config = LaunchConfiguration( @@ -45,14 +46,15 @@ def test_create_autoscaling_group(): key='test_key', value='test_value', propagate_at_launch=True - ) + ) ], ) conn.create_auto_scaling_group(group) group = conn.get_all_groups()[0] group.name.should.equal('tester_group') - set(group.availability_zones).should.equal(set(['us-east-1c', 'us-east-1b'])) + set(group.availability_zones).should.equal( + set(['us-east-1c', 'us-east-1b'])) group.desired_capacity.should.equal(2) group.max_size.should.equal(2) group.min_size.should.equal(2) @@ -64,7 +66,8 @@ def test_create_autoscaling_group(): group.health_check_type.should.equal("EC2") list(group.load_balancers).should.equal(["test_lb"]) group.placement_group.should.equal("test_placement") - list(group.termination_policies).should.equal(["OldestInstance", "NewestInstance"]) + list(group.termination_policies).should.equal( + ["OldestInstance", "NewestInstance"]) len(list(group.tags)).should.equal(1) tag = list(group.tags)[0] tag.resource_id.should.equal('tester_group') @@ -134,7 +137,8 @@ def test_autoscaling_group_describe_filter(): group.name = 'tester_group3' conn.create_auto_scaling_group(group) - conn.get_all_groups(names=['tester_group', 'tester_group2']).should.have.length_of(2) + conn.get_all_groups( + names=['tester_group', 'tester_group2']).should.have.length_of(2) conn.get_all_groups().should.have.length_of(3) @@ -197,16 +201,16 @@ def test_autoscaling_tags_update(): conn.create_auto_scaling_group(group) conn.create_or_update_tags(tags=[Tag( - resource_id='tester_group', - key='test_key', - value='new_test_value', - propagate_at_launch=True - ), Tag( - resource_id='tester_group', - key='test_key2', - value='test_value2', - propagate_at_launch=True - )]) + resource_id='tester_group', + key='test_key', + value='new_test_value', + propagate_at_launch=True + ), Tag( + resource_id='tester_group', + key='test_key2', + value='test_value2', + propagate_at_launch=True + )]) group = conn.get_all_groups()[0] group.tags.should.have.length_of(2) @@ -372,6 +376,7 @@ def test_set_desired_capacity_the_same(): instances = list(conn.get_all_autoscaling_instances()) instances.should.have.length_of(2) + @mock_autoscaling_deprecated @mock_elb_deprecated def test_autoscaling_group_with_elb(): @@ -402,7 +407,8 @@ def test_autoscaling_group_with_elb(): group.desired_capacity.should.equal(2) elb.instances.should.have.length_of(2) - autoscale_instance_ids = set(instance.instance_id for instance in group.instances) + autoscale_instance_ids = set( + instance.instance_id for instance in group.instances) elb_instace_ids = set(instance.id for instance in elb.instances) autoscale_instance_ids.should.equal(elb_instace_ids) @@ -412,7 +418,8 @@ def test_autoscaling_group_with_elb(): group.desired_capacity.should.equal(3) elb.instances.should.have.length_of(3) - autoscale_instance_ids = set(instance.instance_id for instance in group.instances) + autoscale_instance_ids = set( + instance.instance_id for instance in group.instances) elb_instace_ids = set(instance.id for instance in elb.instances) autoscale_instance_ids.should.equal(elb_instace_ids) @@ -429,38 +436,39 @@ Boto3 @mock_autoscaling def test_create_autoscaling_group_boto3(): - client = boto3.client('autoscaling', region_name='us-east-1') - _ = client.create_launch_configuration( - LaunchConfigurationName='test_launch_configuration' - ) - response = client.create_auto_scaling_group( - AutoScalingGroupName='test_asg', - LaunchConfigurationName='test_launch_configuration', - MinSize=0, - MaxSize=20, - DesiredCapacity=5 - ) - response['ResponseMetadata']['HTTPStatusCode'].should.equal(200) + client = boto3.client('autoscaling', region_name='us-east-1') + _ = client.create_launch_configuration( + LaunchConfigurationName='test_launch_configuration' + ) + response = client.create_auto_scaling_group( + AutoScalingGroupName='test_asg', + LaunchConfigurationName='test_launch_configuration', + MinSize=0, + MaxSize=20, + DesiredCapacity=5 + ) + response['ResponseMetadata']['HTTPStatusCode'].should.equal(200) @mock_autoscaling def test_describe_autoscaling_groups_boto3(): - client = boto3.client('autoscaling', region_name='us-east-1') - _ = client.create_launch_configuration( - LaunchConfigurationName='test_launch_configuration' - ) - _ = client.create_auto_scaling_group( - AutoScalingGroupName='test_asg', - LaunchConfigurationName='test_launch_configuration', - MinSize=0, - MaxSize=20, - DesiredCapacity=5 - ) - response = client.describe_auto_scaling_groups( - AutoScalingGroupNames=["test_asg"] - ) - response['ResponseMetadata']['HTTPStatusCode'].should.equal(200) - response['AutoScalingGroups'][0]['AutoScalingGroupName'].should.equal('test_asg') + client = boto3.client('autoscaling', region_name='us-east-1') + _ = client.create_launch_configuration( + LaunchConfigurationName='test_launch_configuration' + ) + _ = client.create_auto_scaling_group( + AutoScalingGroupName='test_asg', + LaunchConfigurationName='test_launch_configuration', + MinSize=0, + MaxSize=20, + DesiredCapacity=5 + ) + response = client.describe_auto_scaling_groups( + AutoScalingGroupNames=["test_asg"] + ) + response['ResponseMetadata']['HTTPStatusCode'].should.equal(200) + response['AutoScalingGroups'][0][ + 'AutoScalingGroupName'].should.equal('test_asg') @mock_autoscaling @@ -509,22 +517,23 @@ def test_autoscaling_taqs_update_boto3(): ) client.create_or_update_tags(Tags=[{ - "ResourceId": 'test_asg', - "Key": 'test_key', - "Value": 'updated_test_value', - "PropagateAtLaunch": True - }, { - "ResourceId": 'test_asg', - "Key": 'test_key2', - "Value": 'test_value2', - "PropagateAtLaunch": True - }]) + "ResourceId": 'test_asg', + "Key": 'test_key', + "Value": 'updated_test_value', + "PropagateAtLaunch": True + }, { + "ResourceId": 'test_asg', + "Key": 'test_key2', + "Value": 'test_value2', + "PropagateAtLaunch": True + }]) response = client.describe_auto_scaling_groups( AutoScalingGroupNames=["test_asg"] ) response['AutoScalingGroups'][0]['Tags'].should.have.length_of(2) + @mock_autoscaling def test_autoscaling_describe_policies_boto3(): client = boto3.client('autoscaling', region_name='us-east-1') @@ -577,4 +586,5 @@ def test_autoscaling_describe_policies_boto3(): PolicyTypes=['SimpleScaling'] ) response['ScalingPolicies'].should.have.length_of(1) - response['ScalingPolicies'][0]['PolicyName'].should.equal('test_policy_down') + response['ScalingPolicies'][0][ + 'PolicyName'].should.equal('test_policy_down') diff --git a/tests/test_autoscaling/test_launch_configurations.py b/tests/test_autoscaling/test_launch_configurations.py index b2e21b03e..1c1486421 100644 --- a/tests/test_autoscaling/test_launch_configurations.py +++ b/tests/test_autoscaling/test_launch_configurations.py @@ -30,10 +30,12 @@ def test_create_launch_configuration(): launch_config.image_id.should.equal('ami-abcd1234') launch_config.instance_type.should.equal('t1.micro') launch_config.key_name.should.equal('the_keys') - set(launch_config.security_groups).should.equal(set(['default', 'default2'])) + set(launch_config.security_groups).should.equal( + set(['default', 'default2'])) launch_config.user_data.should.equal(b"This is some user_data") launch_config.instance_monitoring.enabled.should.equal('true') - launch_config.instance_profile_name.should.equal('arn:aws:iam::123456789012:instance-profile/testing') + launch_config.instance_profile_name.should.equal( + 'arn:aws:iam::123456789012:instance-profile/testing') launch_config.spot_price.should.equal(0.1) @@ -78,16 +80,19 @@ def test_create_launch_configuration_with_block_device_mappings(): launch_config.image_id.should.equal('ami-abcd1234') launch_config.instance_type.should.equal('m1.small') launch_config.key_name.should.equal('the_keys') - set(launch_config.security_groups).should.equal(set(['default', 'default2'])) + set(launch_config.security_groups).should.equal( + set(['default', 'default2'])) launch_config.user_data.should.equal(b"This is some user_data") launch_config.instance_monitoring.enabled.should.equal('true') - launch_config.instance_profile_name.should.equal('arn:aws:iam::123456789012:instance-profile/testing') + launch_config.instance_profile_name.should.equal( + 'arn:aws:iam::123456789012:instance-profile/testing') launch_config.spot_price.should.equal(0.1) len(launch_config.block_device_mappings).should.equal(3) returned_mapping = launch_config.block_device_mappings - set(returned_mapping.keys()).should.equal(set(['/dev/xvdb', '/dev/xvdp', '/dev/xvdh'])) + set(returned_mapping.keys()).should.equal( + set(['/dev/xvdb', '/dev/xvdp', '/dev/xvdh'])) returned_mapping['/dev/xvdh'].iops.should.equal(1000) returned_mapping['/dev/xvdh'].size.should.equal(100) @@ -198,7 +203,8 @@ def test_launch_configuration_describe_filter(): config.name = 'tester3' conn.create_launch_configuration(config) - conn.get_all_launch_configurations(names=['tester', 'tester2']).should.have.length_of(2) + conn.get_all_launch_configurations( + names=['tester', 'tester2']).should.have.length_of(2) conn.get_all_launch_configurations().should.have.length_of(3) diff --git a/tests/test_awslambda/test_lambda.py b/tests/test_awslambda/test_lambda.py index 74e93c373..84e8a8f2b 100644 --- a/tests/test_awslambda/test_lambda.py +++ b/tests/test_awslambda/test_lambda.py @@ -73,8 +73,10 @@ def test_invoke_requestresponse_function(): Payload=json.dumps(in_data)) success_result["StatusCode"].should.equal(202) - base64.b64decode(success_result["LogResult"]).decode('utf-8').should.equal(json.dumps(in_data)) - json.loads(success_result["Payload"].read().decode('utf-8')).should.equal(in_data) + base64.b64decode(success_result["LogResult"]).decode( + 'utf-8').should.equal(json.dumps(in_data)) + json.loads(success_result["Payload"].read().decode( + 'utf-8')).should.equal(in_data) @mock_lambda @@ -101,9 +103,11 @@ def test_invoke_event_function(): ).should.throw(botocore.client.ClientError) in_data = {'msg': 'So long and thanks for all the fish'} - success_result = conn.invoke(FunctionName='testFunction', InvocationType='Event', Payload=json.dumps(in_data)) + success_result = conn.invoke( + FunctionName='testFunction', InvocationType='Event', Payload=json.dumps(in_data)) success_result["StatusCode"].should.equal(202) - json.loads(success_result['Payload'].read().decode('utf-8')).should.equal({}) + json.loads(success_result['Payload'].read().decode( + 'utf-8')).should.equal({}) @mock_ec2 @@ -129,9 +133,11 @@ def test_invoke_function_get_ec2_volume(): ) in_data = {'volume_id': vol.id} - result = conn.invoke(FunctionName='testFunction', InvocationType='RequestResponse', Payload=json.dumps(in_data)) + result = conn.invoke(FunctionName='testFunction', + InvocationType='RequestResponse', Payload=json.dumps(in_data)) result["StatusCode"].should.equal(202) - msg = 'get volume details for %s\nVolume - %s state=%s, size=%s\n%s' % (vol.id, vol.id, vol.state, vol.size, json.dumps(in_data)) + msg = 'get volume details for %s\nVolume - %s state=%s, size=%s\n%s' % ( + vol.id, vol.id, vol.state, vol.size, json.dumps(in_data)) base64.b64decode(result["LogResult"]).decode('utf-8').should.equal(msg) result['Payload'].read().decode('utf-8').should.equal(msg) @@ -189,8 +195,10 @@ def test_create_function_from_aws_bucket(): "SubnetIds": ["subnet-123abc"], }, ) - result['ResponseMetadata'].pop('HTTPHeaders', None) # this is hard to match against, so remove it - result['ResponseMetadata'].pop('RetryAttempts', None) # Botocore inserts retry attempts not seen in Python27 + # this is hard to match against, so remove it + result['ResponseMetadata'].pop('HTTPHeaders', None) + # Botocore inserts retry attempts not seen in Python27 + result['ResponseMetadata'].pop('RetryAttempts', None) result.pop('LastModified') result.should.equal({ 'FunctionName': 'testFunction', @@ -231,8 +239,10 @@ def test_create_function_from_zipfile(): MemorySize=128, Publish=True, ) - result['ResponseMetadata'].pop('HTTPHeaders', None) # this is hard to match against, so remove it - result['ResponseMetadata'].pop('RetryAttempts', None) # Botocore inserts retry attempts not seen in Python27 + # this is hard to match against, so remove it + result['ResponseMetadata'].pop('HTTPHeaders', None) + # Botocore inserts retry attempts not seen in Python27 + result['ResponseMetadata'].pop('RetryAttempts', None) result.pop('LastModified') result.should.equal({ @@ -283,8 +293,10 @@ def test_get_function(): ) result = conn.get_function(FunctionName='testFunction') - result['ResponseMetadata'].pop('HTTPHeaders', None) # this is hard to match against, so remove it - result['ResponseMetadata'].pop('RetryAttempts', None) # Botocore inserts retry attempts not seen in Python27 + # this is hard to match against, so remove it + result['ResponseMetadata'].pop('HTTPHeaders', None) + # Botocore inserts retry attempts not seen in Python27 + result['ResponseMetadata'].pop('RetryAttempts', None) result['Configuration'].pop('LastModified') result.should.equal({ @@ -339,12 +351,15 @@ def test_delete_function(): ) success_result = conn.delete_function(FunctionName='testFunction') - success_result['ResponseMetadata'].pop('HTTPHeaders', None) # this is hard to match against, so remove it - success_result['ResponseMetadata'].pop('RetryAttempts', None) # Botocore inserts retry attempts not seen in Python27 + # this is hard to match against, so remove it + success_result['ResponseMetadata'].pop('HTTPHeaders', None) + # Botocore inserts retry attempts not seen in Python27 + success_result['ResponseMetadata'].pop('RetryAttempts', None) success_result.should.equal({'ResponseMetadata': {'HTTPStatusCode': 204}}) - conn.delete_function.when.called_with(FunctionName='testFunctionThatDoesntExist').should.throw(botocore.client.ClientError) + conn.delete_function.when.called_with( + FunctionName='testFunctionThatDoesntExist').should.throw(botocore.client.ClientError) @mock_lambda @@ -407,8 +422,10 @@ def test_list_create_list_get_delete_list(): func.should.equal(expected_function_result['Configuration']) func = conn.get_function(FunctionName='testFunction') - func['ResponseMetadata'].pop('HTTPHeaders', None) # this is hard to match against, so remove it - func['ResponseMetadata'].pop('RetryAttempts', None) # Botocore inserts retry attempts not seen in Python27 + # this is hard to match against, so remove it + func['ResponseMetadata'].pop('HTTPHeaders', None) + # Botocore inserts retry attempts not seen in Python27 + func['ResponseMetadata'].pop('RetryAttempts', None) func['Configuration'].pop('LastModified') func.should.equal(expected_function_result) diff --git a/tests/test_cloudformation/fixtures/rds_mysql_with_db_parameter_group.py b/tests/test_cloudformation/fixtures/rds_mysql_with_db_parameter_group.py index 866197125..6f379daa6 100644 --- a/tests/test_cloudformation/fixtures/rds_mysql_with_db_parameter_group.py +++ b/tests/test_cloudformation/fixtures/rds_mysql_with_db_parameter_group.py @@ -1,201 +1,204 @@ from __future__ import unicode_literals template = { - "AWSTemplateFormatVersion" : "2010-09-09", + "AWSTemplateFormatVersion": "2010-09-09", - "Description" : "AWS CloudFormation Sample Template RDS_MySQL_With_Read_Replica: Sample template showing how to create a highly-available, RDS DBInstance with a read replica. **WARNING** This template creates an Amazon Relational Database Service database instance and Amazon CloudWatch alarms. You will be billed for the AWS resources used if you create a stack from this template.", + "Description": "AWS CloudFormation Sample Template RDS_MySQL_With_Read_Replica: Sample template showing how to create a highly-available, RDS DBInstance with a read replica. **WARNING** This template creates an Amazon Relational Database Service database instance and Amazon CloudWatch alarms. You will be billed for the AWS resources used if you create a stack from this template.", - "Parameters": { - "DBName": { - "Default": "MyDatabase", - "Description" : "The database name", - "Type": "String", - "MinLength": "1", - "MaxLength": "64", - "AllowedPattern" : "[a-zA-Z][a-zA-Z0-9]*", - "ConstraintDescription" : "must begin with a letter and contain only alphanumeric characters." - }, + "Parameters": { + "DBName": { + "Default": "MyDatabase", + "Description": "The database name", + "Type": "String", + "MinLength": "1", + "MaxLength": "64", + "AllowedPattern": "[a-zA-Z][a-zA-Z0-9]*", + "ConstraintDescription": "must begin with a letter and contain only alphanumeric characters." + }, - "DBInstanceIdentifier": { - "Type": "String" - }, + "DBInstanceIdentifier": { + "Type": "String" + }, - "DBUser": { - "NoEcho": "true", - "Description" : "The database admin account username", - "Type": "String", - "MinLength": "1", - "MaxLength": "16", - "AllowedPattern" : "[a-zA-Z][a-zA-Z0-9]*", - "ConstraintDescription" : "must begin with a letter and contain only alphanumeric characters." - }, + "DBUser": { + "NoEcho": "true", + "Description": "The database admin account username", + "Type": "String", + "MinLength": "1", + "MaxLength": "16", + "AllowedPattern": "[a-zA-Z][a-zA-Z0-9]*", + "ConstraintDescription": "must begin with a letter and contain only alphanumeric characters." + }, - "DBPassword": { - "NoEcho": "true", - "Description" : "The database admin account password", - "Type": "String", - "MinLength": "1", - "MaxLength": "41", - "AllowedPattern" : "[a-zA-Z0-9]+", - "ConstraintDescription" : "must contain only alphanumeric characters." - }, + "DBPassword": { + "NoEcho": "true", + "Description": "The database admin account password", + "Type": "String", + "MinLength": "1", + "MaxLength": "41", + "AllowedPattern": "[a-zA-Z0-9]+", + "ConstraintDescription": "must contain only alphanumeric characters." + }, - "DBAllocatedStorage": { - "Default": "5", - "Description" : "The size of the database (Gb)", - "Type": "Number", - "MinValue": "5", - "MaxValue": "1024", - "ConstraintDescription" : "must be between 5 and 1024Gb." - }, + "DBAllocatedStorage": { + "Default": "5", + "Description": "The size of the database (Gb)", + "Type": "Number", + "MinValue": "5", + "MaxValue": "1024", + "ConstraintDescription": "must be between 5 and 1024Gb." + }, - "DBInstanceClass": { - "Description" : "The database instance type", - "Type": "String", - "Default": "db.m1.small", - "AllowedValues" : [ "db.t1.micro", "db.m1.small", "db.m1.medium", "db.m1.large", "db.m1.xlarge", "db.m2.xlarge", "db.m2.2xlarge", "db.m2.4xlarge", "db.m3.medium", "db.m3.large", "db.m3.xlarge", "db.m3.2xlarge", "db.r3.large", "db.r3.xlarge", "db.r3.2xlarge", "db.r3.4xlarge", "db.r3.8xlarge", "db.m2.xlarge", "db.m2.2xlarge", "db.m2.4xlarge", "db.cr1.8xlarge"] -, - "ConstraintDescription" : "must select a valid database instance type." - }, + "DBInstanceClass": { + "Description": "The database instance type", + "Type": "String", + "Default": "db.m1.small", + "AllowedValues": ["db.t1.micro", "db.m1.small", "db.m1.medium", "db.m1.large", "db.m1.xlarge", "db.m2.xlarge", "db.m2.2xlarge", "db.m2.4xlarge", "db.m3.medium", "db.m3.large", "db.m3.xlarge", "db.m3.2xlarge", "db.r3.large", "db.r3.xlarge", "db.r3.2xlarge", "db.r3.4xlarge", "db.r3.8xlarge", "db.m2.xlarge", "db.m2.2xlarge", "db.m2.4xlarge", "db.cr1.8xlarge"], + "ConstraintDescription": "must select a valid database instance type." + }, - "EC2SecurityGroup": { - "Description" : "The EC2 security group that contains instances that need access to the database", - "Default": "default", - "Type": "String", - "AllowedPattern" : "[a-zA-Z0-9\\-]+", - "ConstraintDescription" : "must be a valid security group name." - }, + "EC2SecurityGroup": { + "Description": "The EC2 security group that contains instances that need access to the database", + "Default": "default", + "Type": "String", + "AllowedPattern": "[a-zA-Z0-9\\-]+", + "ConstraintDescription": "must be a valid security group name." + }, - "MultiAZ" : { - "Description" : "Multi-AZ master database", - "Type" : "String", - "Default" : "false", - "AllowedValues" : [ "true", "false" ], - "ConstraintDescription" : "must be true or false." - } - }, - - "Conditions" : { - "Is-EC2-VPC" : { "Fn::Or" : [ {"Fn::Equals" : [{"Ref" : "AWS::Region"}, "eu-central-1" ]}, - {"Fn::Equals" : [{"Ref" : "AWS::Region"}, "cn-north-1" ]}]}, - "Is-EC2-Classic" : { "Fn::Not" : [{ "Condition" : "Is-EC2-VPC"}]} - }, - - "Resources" : { - "DBParameterGroup": { - "Type": "AWS::RDS::DBParameterGroup", - "Properties" : { - "Description": "DB Parameter Goup", - "Family" : "MySQL5.1", - "Parameters": { - "BACKLOG_QUEUE_LIMIT": "2048" + "MultiAZ": { + "Description": "Multi-AZ master database", + "Type": "String", + "Default": "false", + "AllowedValues": ["true", "false"], + "ConstraintDescription": "must be true or false." } - } }, - "DBEC2SecurityGroup": { - "Type": "AWS::EC2::SecurityGroup", - "Condition" : "Is-EC2-VPC", - "Properties" : { - "GroupDescription": "Open database for access", - "SecurityGroupIngress" : [{ - "IpProtocol" : "tcp", - "FromPort" : "3306", - "ToPort" : "3306", - "SourceSecurityGroupName" : { "Ref" : "EC2SecurityGroup" } - }] - } + "Conditions": { + "Is-EC2-VPC": {"Fn::Or": [{"Fn::Equals": [{"Ref": "AWS::Region"}, "eu-central-1"]}, + {"Fn::Equals": [{"Ref": "AWS::Region"}, "cn-north-1"]}]}, + "Is-EC2-Classic": {"Fn::Not": [{"Condition": "Is-EC2-VPC"}]} }, - "DBSecurityGroup": { - "Type": "AWS::RDS::DBSecurityGroup", - "Condition" : "Is-EC2-Classic", - "Properties": { - "DBSecurityGroupIngress": [{ - "EC2SecurityGroupName": { "Ref": "EC2SecurityGroup" } - }], - "GroupDescription": "database access" - } + "Resources": { + "DBParameterGroup": { + "Type": "AWS::RDS::DBParameterGroup", + "Properties": { + "Description": "DB Parameter Goup", + "Family": "MySQL5.1", + "Parameters": { + "BACKLOG_QUEUE_LIMIT": "2048" + } + } + }, + + "DBEC2SecurityGroup": { + "Type": "AWS::EC2::SecurityGroup", + "Condition": "Is-EC2-VPC", + "Properties": { + "GroupDescription": "Open database for access", + "SecurityGroupIngress": [{ + "IpProtocol": "tcp", + "FromPort": "3306", + "ToPort": "3306", + "SourceSecurityGroupName": {"Ref": "EC2SecurityGroup"} + }] + } + }, + + "DBSecurityGroup": { + "Type": "AWS::RDS::DBSecurityGroup", + "Condition": "Is-EC2-Classic", + "Properties": { + "DBSecurityGroupIngress": [{ + "EC2SecurityGroupName": {"Ref": "EC2SecurityGroup"} + }], + "GroupDescription": "database access" + } + }, + + "my_vpc": { + "Type": "AWS::EC2::VPC", + "Properties": { + "CidrBlock": "10.0.0.0/16", + } + }, + + "EC2Subnet": { + "Type": "AWS::EC2::Subnet", + "Condition": "Is-EC2-VPC", + "Properties": { + "AvailabilityZone": "eu-central-1a", + "CidrBlock": "10.0.1.0/24", + "VpcId": {"Ref": "my_vpc"} + } + }, + + "DBSubnet": { + "Type": "AWS::RDS::DBSubnetGroup", + "Condition": "Is-EC2-VPC", + "Properties": { + "DBSubnetGroupDescription": "my db subnet group", + "SubnetIds": [{"Ref": "EC2Subnet"}], + } + }, + + "MasterDB": { + "Type": "AWS::RDS::DBInstance", + "Properties": { + "DBInstanceIdentifier": {"Ref": "DBInstanceIdentifier"}, + "DBName": {"Ref": "DBName"}, + "AllocatedStorage": {"Ref": "DBAllocatedStorage"}, + "DBInstanceClass": {"Ref": "DBInstanceClass"}, + "Engine": "MySQL", + "DBSubnetGroupName": {"Fn::If": ["Is-EC2-VPC", {"Ref": "DBSubnet"}, {"Ref": "AWS::NoValue"}]}, + "MasterUsername": {"Ref": "DBUser"}, + "MasterUserPassword": {"Ref": "DBPassword"}, + "MultiAZ": {"Ref": "MultiAZ"}, + "Tags": [{"Key": "Name", "Value": "Master Database"}], + "VPCSecurityGroups": {"Fn::If": ["Is-EC2-VPC", [{"Fn::GetAtt": ["DBEC2SecurityGroup", "GroupId"]}], {"Ref": "AWS::NoValue"}]}, + "DBSecurityGroups": {"Fn::If": ["Is-EC2-Classic", [{"Ref": "DBSecurityGroup"}], {"Ref": "AWS::NoValue"}]} + }, + "DeletionPolicy": "Snapshot" + }, + + "ReplicaDB": { + "Type": "AWS::RDS::DBInstance", + "Properties": { + "SourceDBInstanceIdentifier": {"Ref": "MasterDB"}, + "DBInstanceClass": {"Ref": "DBInstanceClass"}, + "Tags": [{"Key": "Name", "Value": "Read Replica Database"}] + } + } }, - "my_vpc": { - "Type" : "AWS::EC2::VPC", - "Properties" : { - "CidrBlock" : "10.0.0.0/16", - } - }, + "Outputs": { + "EC2Platform": { + "Description": "Platform in which this stack is deployed", + "Value": {"Fn::If": ["Is-EC2-VPC", "EC2-VPC", "EC2-Classic"]} + }, - "EC2Subnet": { - "Type" : "AWS::EC2::Subnet", - "Condition" : "Is-EC2-VPC", - "Properties" : { - "AvailabilityZone" : "eu-central-1a", - "CidrBlock" : "10.0.1.0/24", - "VpcId" : { "Ref" : "my_vpc" } - } - }, - - "DBSubnet": { - "Type": "AWS::RDS::DBSubnetGroup", - "Condition" : "Is-EC2-VPC", - "Properties": { - "DBSubnetGroupDescription": "my db subnet group", - "SubnetIds" : [ { "Ref": "EC2Subnet" } ], - } - }, - - "MasterDB" : { - "Type" : "AWS::RDS::DBInstance", - "Properties" : { - "DBInstanceIdentifier": { "Ref": "DBInstanceIdentifier" }, - "DBName" : { "Ref" : "DBName" }, - "AllocatedStorage" : { "Ref" : "DBAllocatedStorage" }, - "DBInstanceClass" : { "Ref" : "DBInstanceClass" }, - "Engine" : "MySQL", - "DBSubnetGroupName": {"Fn::If": ["Is-EC2-VPC", { "Ref": "DBSubnet" }, { "Ref": "AWS::NoValue" }]}, - "MasterUsername" : { "Ref" : "DBUser" }, - "MasterUserPassword" : { "Ref" : "DBPassword" }, - "MultiAZ" : { "Ref" : "MultiAZ" }, - "Tags" : [{ "Key" : "Name", "Value" : "Master Database" }], - "VPCSecurityGroups": { "Fn::If" : [ "Is-EC2-VPC", [ { "Fn::GetAtt": [ "DBEC2SecurityGroup", "GroupId" ] } ], { "Ref" : "AWS::NoValue"}]}, - "DBSecurityGroups": { "Fn::If" : [ "Is-EC2-Classic", [ { "Ref": "DBSecurityGroup" } ], { "Ref" : "AWS::NoValue"}]} - }, - "DeletionPolicy" : "Snapshot" - }, - - "ReplicaDB" : { - "Type" : "AWS::RDS::DBInstance", - "Properties" : { - "SourceDBInstanceIdentifier" : { "Ref" : "MasterDB" }, - "DBInstanceClass" : { "Ref" : "DBInstanceClass" }, - "Tags" : [{ "Key" : "Name", "Value" : "Read Replica Database" }] - } + "MasterJDBCConnectionString": { + "Description": "JDBC connection string for the master database", + "Value": {"Fn::Join": ["", ["jdbc:mysql://", + {"Fn::GetAtt": [ + "MasterDB", "Endpoint.Address"]}, + ":", + {"Fn::GetAtt": [ + "MasterDB", "Endpoint.Port"]}, + "/", + {"Ref": "DBName"}]]} + }, + "ReplicaJDBCConnectionString": { + "Description": "JDBC connection string for the replica database", + "Value": {"Fn::Join": ["", ["jdbc:mysql://", + {"Fn::GetAtt": [ + "ReplicaDB", "Endpoint.Address"]}, + ":", + {"Fn::GetAtt": [ + "ReplicaDB", "Endpoint.Port"]}, + "/", + {"Ref": "DBName"}]]} + } } - }, - - "Outputs" : { - "EC2Platform" : { - "Description" : "Platform in which this stack is deployed", - "Value" : { "Fn::If" : [ "Is-EC2-VPC", "EC2-VPC", "EC2-Classic" ]} - }, - - "MasterJDBCConnectionString": { - "Description" : "JDBC connection string for the master database", - "Value" : { "Fn::Join": [ "", [ "jdbc:mysql://", - { "Fn::GetAtt": [ "MasterDB", "Endpoint.Address" ] }, - ":", - { "Fn::GetAtt": [ "MasterDB", "Endpoint.Port" ] }, - "/", - { "Ref": "DBName" }]]} - }, - "ReplicaJDBCConnectionString": { - "Description" : "JDBC connection string for the replica database", - "Value" : { "Fn::Join": [ "", [ "jdbc:mysql://", - { "Fn::GetAtt": [ "ReplicaDB", "Endpoint.Address" ] }, - ":", - { "Fn::GetAtt": [ "ReplicaDB", "Endpoint.Port" ] }, - "/", - { "Ref": "DBName" }]]} - } - } } diff --git a/tests/test_cloudformation/fixtures/rds_mysql_with_read_replica.py b/tests/test_cloudformation/fixtures/rds_mysql_with_read_replica.py index 3e5efa04a..2fbfb4cad 100644 --- a/tests/test_cloudformation/fixtures/rds_mysql_with_read_replica.py +++ b/tests/test_cloudformation/fixtures/rds_mysql_with_read_replica.py @@ -1,190 +1,193 @@ from __future__ import unicode_literals template = { - "AWSTemplateFormatVersion" : "2010-09-09", + "AWSTemplateFormatVersion": "2010-09-09", - "Description" : "AWS CloudFormation Sample Template RDS_MySQL_With_Read_Replica: Sample template showing how to create a highly-available, RDS DBInstance with a read replica. **WARNING** This template creates an Amazon Relational Database Service database instance and Amazon CloudWatch alarms. You will be billed for the AWS resources used if you create a stack from this template.", + "Description": "AWS CloudFormation Sample Template RDS_MySQL_With_Read_Replica: Sample template showing how to create a highly-available, RDS DBInstance with a read replica. **WARNING** This template creates an Amazon Relational Database Service database instance and Amazon CloudWatch alarms. You will be billed for the AWS resources used if you create a stack from this template.", - "Parameters": { - "DBName": { - "Default": "MyDatabase", - "Description" : "The database name", - "Type": "String", - "MinLength": "1", - "MaxLength": "64", - "AllowedPattern" : "[a-zA-Z][a-zA-Z0-9]*", - "ConstraintDescription" : "must begin with a letter and contain only alphanumeric characters." + "Parameters": { + "DBName": { + "Default": "MyDatabase", + "Description": "The database name", + "Type": "String", + "MinLength": "1", + "MaxLength": "64", + "AllowedPattern": "[a-zA-Z][a-zA-Z0-9]*", + "ConstraintDescription": "must begin with a letter and contain only alphanumeric characters." + }, + + "DBInstanceIdentifier": { + "Type": "String" + }, + + "DBUser": { + "NoEcho": "true", + "Description": "The database admin account username", + "Type": "String", + "MinLength": "1", + "MaxLength": "16", + "AllowedPattern": "[a-zA-Z][a-zA-Z0-9]*", + "ConstraintDescription": "must begin with a letter and contain only alphanumeric characters." + }, + + "DBPassword": { + "NoEcho": "true", + "Description": "The database admin account password", + "Type": "String", + "MinLength": "1", + "MaxLength": "41", + "AllowedPattern": "[a-zA-Z0-9]+", + "ConstraintDescription": "must contain only alphanumeric characters." + }, + + "DBAllocatedStorage": { + "Default": "5", + "Description": "The size of the database (Gb)", + "Type": "Number", + "MinValue": "5", + "MaxValue": "1024", + "ConstraintDescription": "must be between 5 and 1024Gb." + }, + + "DBInstanceClass": { + "Description": "The database instance type", + "Type": "String", + "Default": "db.m1.small", + "AllowedValues": ["db.t1.micro", "db.m1.small", "db.m1.medium", "db.m1.large", "db.m1.xlarge", "db.m2.xlarge", "db.m2.2xlarge", "db.m2.4xlarge", "db.m3.medium", "db.m3.large", "db.m3.xlarge", "db.m3.2xlarge", "db.r3.large", "db.r3.xlarge", "db.r3.2xlarge", "db.r3.4xlarge", "db.r3.8xlarge", "db.m2.xlarge", "db.m2.2xlarge", "db.m2.4xlarge", "db.cr1.8xlarge"], + "ConstraintDescription": "must select a valid database instance type." + }, + + "EC2SecurityGroup": { + "Description": "The EC2 security group that contains instances that need access to the database", + "Default": "default", + "Type": "String", + "AllowedPattern": "[a-zA-Z0-9\\-]+", + "ConstraintDescription": "must be a valid security group name." + }, + + "MultiAZ": { + "Description": "Multi-AZ master database", + "Type": "String", + "Default": "false", + "AllowedValues": ["true", "false"], + "ConstraintDescription": "must be true or false." + } }, - "DBInstanceIdentifier": { - "Type": "String" + "Conditions": { + "Is-EC2-VPC": {"Fn::Or": [{"Fn::Equals": [{"Ref": "AWS::Region"}, "eu-central-1"]}, + {"Fn::Equals": [{"Ref": "AWS::Region"}, "cn-north-1"]}]}, + "Is-EC2-Classic": {"Fn::Not": [{"Condition": "Is-EC2-VPC"}]} }, - "DBUser": { - "NoEcho": "true", - "Description" : "The database admin account username", - "Type": "String", - "MinLength": "1", - "MaxLength": "16", - "AllowedPattern" : "[a-zA-Z][a-zA-Z0-9]*", - "ConstraintDescription" : "must begin with a letter and contain only alphanumeric characters." + "Resources": { + "DBEC2SecurityGroup": { + "Type": "AWS::EC2::SecurityGroup", + "Condition": "Is-EC2-VPC", + "Properties": { + "GroupDescription": "Open database for access", + "SecurityGroupIngress": [{ + "IpProtocol": "tcp", + "FromPort": "3306", + "ToPort": "3306", + "SourceSecurityGroupName": {"Ref": "EC2SecurityGroup"} + }] + } + }, + + "DBSecurityGroup": { + "Type": "AWS::RDS::DBSecurityGroup", + "Condition": "Is-EC2-Classic", + "Properties": { + "DBSecurityGroupIngress": [{ + "EC2SecurityGroupName": {"Ref": "EC2SecurityGroup"} + }], + "GroupDescription": "database access" + } + }, + + "my_vpc": { + "Type": "AWS::EC2::VPC", + "Properties": { + "CidrBlock": "10.0.0.0/16", + } + }, + + "EC2Subnet": { + "Type": "AWS::EC2::Subnet", + "Condition": "Is-EC2-VPC", + "Properties": { + "AvailabilityZone": "eu-central-1a", + "CidrBlock": "10.0.1.0/24", + "VpcId": {"Ref": "my_vpc"} + } + }, + + "DBSubnet": { + "Type": "AWS::RDS::DBSubnetGroup", + "Condition": "Is-EC2-VPC", + "Properties": { + "DBSubnetGroupDescription": "my db subnet group", + "SubnetIds": [{"Ref": "EC2Subnet"}], + } + }, + + "MasterDB": { + "Type": "AWS::RDS::DBInstance", + "Properties": { + "DBInstanceIdentifier": {"Ref": "DBInstanceIdentifier"}, + "DBName": {"Ref": "DBName"}, + "AllocatedStorage": {"Ref": "DBAllocatedStorage"}, + "DBInstanceClass": {"Ref": "DBInstanceClass"}, + "Engine": "MySQL", + "DBSubnetGroupName": {"Fn::If": ["Is-EC2-VPC", {"Ref": "DBSubnet"}, {"Ref": "AWS::NoValue"}]}, + "MasterUsername": {"Ref": "DBUser"}, + "MasterUserPassword": {"Ref": "DBPassword"}, + "MultiAZ": {"Ref": "MultiAZ"}, + "Tags": [{"Key": "Name", "Value": "Master Database"}], + "VPCSecurityGroups": {"Fn::If": ["Is-EC2-VPC", [{"Fn::GetAtt": ["DBEC2SecurityGroup", "GroupId"]}], {"Ref": "AWS::NoValue"}]}, + "DBSecurityGroups": {"Fn::If": ["Is-EC2-Classic", [{"Ref": "DBSecurityGroup"}], {"Ref": "AWS::NoValue"}]} + }, + "DeletionPolicy": "Snapshot" + }, + + "ReplicaDB": { + "Type": "AWS::RDS::DBInstance", + "Properties": { + "SourceDBInstanceIdentifier": {"Ref": "MasterDB"}, + "DBInstanceClass": {"Ref": "DBInstanceClass"}, + "Tags": [{"Key": "Name", "Value": "Read Replica Database"}] + } + } }, - "DBPassword": { - "NoEcho": "true", - "Description" : "The database admin account password", - "Type": "String", - "MinLength": "1", - "MaxLength": "41", - "AllowedPattern" : "[a-zA-Z0-9]+", - "ConstraintDescription" : "must contain only alphanumeric characters." - }, + "Outputs": { + "EC2Platform": { + "Description": "Platform in which this stack is deployed", + "Value": {"Fn::If": ["Is-EC2-VPC", "EC2-VPC", "EC2-Classic"]} + }, - "DBAllocatedStorage": { - "Default": "5", - "Description" : "The size of the database (Gb)", - "Type": "Number", - "MinValue": "5", - "MaxValue": "1024", - "ConstraintDescription" : "must be between 5 and 1024Gb." - }, - - "DBInstanceClass": { - "Description" : "The database instance type", - "Type": "String", - "Default": "db.m1.small", - "AllowedValues" : [ "db.t1.micro", "db.m1.small", "db.m1.medium", "db.m1.large", "db.m1.xlarge", "db.m2.xlarge", "db.m2.2xlarge", "db.m2.4xlarge", "db.m3.medium", "db.m3.large", "db.m3.xlarge", "db.m3.2xlarge", "db.r3.large", "db.r3.xlarge", "db.r3.2xlarge", "db.r3.4xlarge", "db.r3.8xlarge", "db.m2.xlarge", "db.m2.2xlarge", "db.m2.4xlarge", "db.cr1.8xlarge"] -, - "ConstraintDescription" : "must select a valid database instance type." - }, - - "EC2SecurityGroup": { - "Description" : "The EC2 security group that contains instances that need access to the database", - "Default": "default", - "Type": "String", - "AllowedPattern" : "[a-zA-Z0-9\\-]+", - "ConstraintDescription" : "must be a valid security group name." - }, - - "MultiAZ" : { - "Description" : "Multi-AZ master database", - "Type" : "String", - "Default" : "false", - "AllowedValues" : [ "true", "false" ], - "ConstraintDescription" : "must be true or false." + "MasterJDBCConnectionString": { + "Description": "JDBC connection string for the master database", + "Value": {"Fn::Join": ["", ["jdbc:mysql://", + {"Fn::GetAtt": [ + "MasterDB", "Endpoint.Address"]}, + ":", + {"Fn::GetAtt": [ + "MasterDB", "Endpoint.Port"]}, + "/", + {"Ref": "DBName"}]]} + }, + "ReplicaJDBCConnectionString": { + "Description": "JDBC connection string for the replica database", + "Value": {"Fn::Join": ["", ["jdbc:mysql://", + {"Fn::GetAtt": [ + "ReplicaDB", "Endpoint.Address"]}, + ":", + {"Fn::GetAtt": [ + "ReplicaDB", "Endpoint.Port"]}, + "/", + {"Ref": "DBName"}]]} + } } - }, - - "Conditions" : { - "Is-EC2-VPC" : { "Fn::Or" : [ {"Fn::Equals" : [{"Ref" : "AWS::Region"}, "eu-central-1" ]}, - {"Fn::Equals" : [{"Ref" : "AWS::Region"}, "cn-north-1" ]}]}, - "Is-EC2-Classic" : { "Fn::Not" : [{ "Condition" : "Is-EC2-VPC"}]} - }, - - "Resources" : { - "DBEC2SecurityGroup": { - "Type": "AWS::EC2::SecurityGroup", - "Condition" : "Is-EC2-VPC", - "Properties" : { - "GroupDescription": "Open database for access", - "SecurityGroupIngress" : [{ - "IpProtocol" : "tcp", - "FromPort" : "3306", - "ToPort" : "3306", - "SourceSecurityGroupName" : { "Ref" : "EC2SecurityGroup" } - }] - } - }, - - "DBSecurityGroup": { - "Type": "AWS::RDS::DBSecurityGroup", - "Condition" : "Is-EC2-Classic", - "Properties": { - "DBSecurityGroupIngress": [{ - "EC2SecurityGroupName": { "Ref": "EC2SecurityGroup" } - }], - "GroupDescription": "database access" - } - }, - - "my_vpc": { - "Type" : "AWS::EC2::VPC", - "Properties" : { - "CidrBlock" : "10.0.0.0/16", - } - }, - - "EC2Subnet": { - "Type" : "AWS::EC2::Subnet", - "Condition" : "Is-EC2-VPC", - "Properties" : { - "AvailabilityZone" : "eu-central-1a", - "CidrBlock" : "10.0.1.0/24", - "VpcId" : { "Ref" : "my_vpc" } - } - }, - - "DBSubnet": { - "Type": "AWS::RDS::DBSubnetGroup", - "Condition" : "Is-EC2-VPC", - "Properties": { - "DBSubnetGroupDescription": "my db subnet group", - "SubnetIds" : [ { "Ref": "EC2Subnet" } ], - } - }, - - "MasterDB" : { - "Type" : "AWS::RDS::DBInstance", - "Properties" : { - "DBInstanceIdentifier": { "Ref": "DBInstanceIdentifier" }, - "DBName" : { "Ref" : "DBName" }, - "AllocatedStorage" : { "Ref" : "DBAllocatedStorage" }, - "DBInstanceClass" : { "Ref" : "DBInstanceClass" }, - "Engine" : "MySQL", - "DBSubnetGroupName": {"Fn::If": ["Is-EC2-VPC", { "Ref": "DBSubnet" }, { "Ref": "AWS::NoValue" }]}, - "MasterUsername" : { "Ref" : "DBUser" }, - "MasterUserPassword" : { "Ref" : "DBPassword" }, - "MultiAZ" : { "Ref" : "MultiAZ" }, - "Tags" : [{ "Key" : "Name", "Value" : "Master Database" }], - "VPCSecurityGroups": { "Fn::If" : [ "Is-EC2-VPC", [ { "Fn::GetAtt": [ "DBEC2SecurityGroup", "GroupId" ] } ], { "Ref" : "AWS::NoValue"}]}, - "DBSecurityGroups": { "Fn::If" : [ "Is-EC2-Classic", [ { "Ref": "DBSecurityGroup" } ], { "Ref" : "AWS::NoValue"}]} - }, - "DeletionPolicy" : "Snapshot" - }, - - "ReplicaDB" : { - "Type" : "AWS::RDS::DBInstance", - "Properties" : { - "SourceDBInstanceIdentifier" : { "Ref" : "MasterDB" }, - "DBInstanceClass" : { "Ref" : "DBInstanceClass" }, - "Tags" : [{ "Key" : "Name", "Value" : "Read Replica Database" }] - } - } - }, - - "Outputs" : { - "EC2Platform" : { - "Description" : "Platform in which this stack is deployed", - "Value" : { "Fn::If" : [ "Is-EC2-VPC", "EC2-VPC", "EC2-Classic" ]} - }, - - "MasterJDBCConnectionString": { - "Description" : "JDBC connection string for the master database", - "Value" : { "Fn::Join": [ "", [ "jdbc:mysql://", - { "Fn::GetAtt": [ "MasterDB", "Endpoint.Address" ] }, - ":", - { "Fn::GetAtt": [ "MasterDB", "Endpoint.Port" ] }, - "/", - { "Ref": "DBName" }]]} - }, - "ReplicaJDBCConnectionString": { - "Description" : "JDBC connection string for the replica database", - "Value" : { "Fn::Join": [ "", [ "jdbc:mysql://", - { "Fn::GetAtt": [ "ReplicaDB", "Endpoint.Address" ] }, - ":", - { "Fn::GetAtt": [ "ReplicaDB", "Endpoint.Port" ] }, - "/", - { "Ref": "DBName" }]]} - } - } } diff --git a/tests/test_cloudformation/fixtures/redshift.py b/tests/test_cloudformation/fixtures/redshift.py index 90e171659..317e213bc 100644 --- a/tests/test_cloudformation/fixtures/redshift.py +++ b/tests/test_cloudformation/fixtures/redshift.py @@ -1,187 +1,187 @@ from __future__ import unicode_literals template = { - "AWSTemplateFormatVersion": "2010-09-09", - "Parameters" : { - "DatabaseName" : { - "Description" : "The name of the first database to be created when the cluster is created", - "Type" : "String", - "Default" : "dev", - "AllowedPattern" : "([a-z]|[0-9])+" - }, - "ClusterType" : { - "Description" : "The type of cluster", - "Type" : "String", - "Default" : "single-node", - "AllowedValues" : [ "single-node", "multi-node" ] - }, - "NumberOfNodes" : { - "Description" : "The number of compute nodes in the cluster. For multi-node clusters, the NumberOfNodes parameter must be greater than 1", - "Type" : "Number", - "Default" : "1" - }, - "NodeType" : { - "Description" : "The type of node to be provisioned", - "Type" : "String", - "Default" : "dw1.xlarge", - "AllowedValues" : [ "dw1.xlarge", "dw1.8xlarge", "dw2.large", "dw2.8xlarge" ] - }, - "MasterUsername" : { - "Description" : "The user name that is associated with the master user account for the cluster that is being created", - "Type" : "String", - "Default" : "defaultuser", - "AllowedPattern" : "([a-z])([a-z]|[0-9])*" - }, - "MasterUserPassword" : { - "Description" : "The password that is associated with the master user account for the cluster that is being created.", - "Type" : "String", - "NoEcho" : "true" - }, - "InboundTraffic" : { - "Description" : "Allow inbound traffic to the cluster from this CIDR range.", - "Type" : "String", - "MinLength": "9", - "MaxLength": "18", - "Default" : "0.0.0.0/0", - "AllowedPattern" : "(\\d{1,3})\\.(\\d{1,3})\\.(\\d{1,3})\\.(\\d{1,3})/(\\d{1,2})", - "ConstraintDescription" : "must be a valid CIDR range of the form x.x.x.x/x." - }, - "PortNumber" : { - "Description" : "The port number on which the cluster accepts incoming connections.", - "Type" : "Number", - "Default" : "5439" - } - }, - "Conditions" : { - "IsMultiNodeCluster" : { - "Fn::Equals" : [{ "Ref" : "ClusterType" }, "multi-node" ] - } - }, - "Resources" : { - "RedshiftCluster" : { - "Type" : "AWS::Redshift::Cluster", - "DependsOn" : "AttachGateway", - "Properties" : { - "ClusterType" : { "Ref" : "ClusterType" }, - "NumberOfNodes" : { "Fn::If" : [ "IsMultiNodeCluster", { "Ref" : "NumberOfNodes" }, { "Ref" : "AWS::NoValue" }]}, - "NodeType" : { "Ref" : "NodeType" }, - "DBName" : { "Ref" : "DatabaseName" }, - "MasterUsername" : { "Ref" : "MasterUsername" }, - "MasterUserPassword" : { "Ref" : "MasterUserPassword" }, - "ClusterParameterGroupName" : { "Ref" : "RedshiftClusterParameterGroup" }, - "VpcSecurityGroupIds" : [ { "Ref" : "SecurityGroup" } ], - "ClusterSubnetGroupName" : { "Ref" : "RedshiftClusterSubnetGroup" }, - "PubliclyAccessible" : "true", - "Port" : { "Ref" : "PortNumber" } - } - }, - "RedshiftClusterParameterGroup" : { - "Type" : "AWS::Redshift::ClusterParameterGroup", - "Properties" : { - "Description" : "Cluster parameter group", - "ParameterGroupFamily" : "redshift-1.0", - "Parameters" : [{ - "ParameterName" : "enable_user_activity_logging", - "ParameterValue" : "true" - }] - } - }, - "RedshiftClusterSubnetGroup" : { - "Type" : "AWS::Redshift::ClusterSubnetGroup", - "Properties" : { - "Description" : "Cluster subnet group", - "SubnetIds" : [ { "Ref" : "PublicSubnet" } ] - } - }, - "VPC" : { - "Type" : "AWS::EC2::VPC", - "Properties" : { - "CidrBlock" : "10.0.0.0/16" - } - }, - "PublicSubnet" : { - "Type" : "AWS::EC2::Subnet", - "Properties" : { - "CidrBlock" : "10.0.0.0/24", - "VpcId" : { "Ref" : "VPC" } - } - }, - "SecurityGroup" : { - "Type" : "AWS::EC2::SecurityGroup", - "Properties" : { - "GroupDescription" : "Security group", - "SecurityGroupIngress" : [ { - "CidrIp" : { "Ref": "InboundTraffic" }, - "FromPort" : { "Ref" : "PortNumber" }, - "ToPort" : { "Ref" : "PortNumber" }, - "IpProtocol" : "tcp" - } ], - "VpcId" : { "Ref" : "VPC" } - } - }, - "myInternetGateway" : { - "Type" : "AWS::EC2::InternetGateway" - }, - "AttachGateway" : { - "Type" : "AWS::EC2::VPCGatewayAttachment", - "Properties" : { - "VpcId" : { "Ref" : "VPC" }, - "InternetGatewayId" : { "Ref" : "myInternetGateway" } - } - }, - "PublicRouteTable" : { - "Type" : "AWS::EC2::RouteTable", - "Properties" : { - "VpcId" : { - "Ref" : "VPC" - } - } - }, - "PublicRoute" : { - "Type" : "AWS::EC2::Route", - "DependsOn" : "AttachGateway", - "Properties" : { - "RouteTableId" : { - "Ref" : "PublicRouteTable" + "AWSTemplateFormatVersion": "2010-09-09", + "Parameters": { + "DatabaseName": { + "Description": "The name of the first database to be created when the cluster is created", + "Type": "String", + "Default": "dev", + "AllowedPattern": "([a-z]|[0-9])+" }, - "DestinationCidrBlock" : "0.0.0.0/0", - "GatewayId" : { - "Ref" : "myInternetGateway" - } - } - }, - "PublicSubnetRouteTableAssociation" : { - "Type" : "AWS::EC2::SubnetRouteTableAssociation", - "Properties" : { - "SubnetId" : { - "Ref" : "PublicSubnet" + "ClusterType": { + "Description": "The type of cluster", + "Type": "String", + "Default": "single-node", + "AllowedValues": ["single-node", "multi-node"] }, - "RouteTableId" : { - "Ref" : "PublicRouteTable" + "NumberOfNodes": { + "Description": "The number of compute nodes in the cluster. For multi-node clusters, the NumberOfNodes parameter must be greater than 1", + "Type": "Number", + "Default": "1" + }, + "NodeType": { + "Description": "The type of node to be provisioned", + "Type": "String", + "Default": "dw1.xlarge", + "AllowedValues": ["dw1.xlarge", "dw1.8xlarge", "dw2.large", "dw2.8xlarge"] + }, + "MasterUsername": { + "Description": "The user name that is associated with the master user account for the cluster that is being created", + "Type": "String", + "Default": "defaultuser", + "AllowedPattern": "([a-z])([a-z]|[0-9])*" + }, + "MasterUserPassword": { + "Description": "The password that is associated with the master user account for the cluster that is being created.", + "Type": "String", + "NoEcho": "true" + }, + "InboundTraffic": { + "Description": "Allow inbound traffic to the cluster from this CIDR range.", + "Type": "String", + "MinLength": "9", + "MaxLength": "18", + "Default": "0.0.0.0/0", + "AllowedPattern": "(\\d{1,3})\\.(\\d{1,3})\\.(\\d{1,3})\\.(\\d{1,3})/(\\d{1,2})", + "ConstraintDescription": "must be a valid CIDR range of the form x.x.x.x/x." + }, + "PortNumber": { + "Description": "The port number on which the cluster accepts incoming connections.", + "Type": "Number", + "Default": "5439" + } + }, + "Conditions": { + "IsMultiNodeCluster": { + "Fn::Equals": [{"Ref": "ClusterType"}, "multi-node"] + } + }, + "Resources": { + "RedshiftCluster": { + "Type": "AWS::Redshift::Cluster", + "DependsOn": "AttachGateway", + "Properties": { + "ClusterType": {"Ref": "ClusterType"}, + "NumberOfNodes": {"Fn::If": ["IsMultiNodeCluster", {"Ref": "NumberOfNodes"}, {"Ref": "AWS::NoValue"}]}, + "NodeType": {"Ref": "NodeType"}, + "DBName": {"Ref": "DatabaseName"}, + "MasterUsername": {"Ref": "MasterUsername"}, + "MasterUserPassword": {"Ref": "MasterUserPassword"}, + "ClusterParameterGroupName": {"Ref": "RedshiftClusterParameterGroup"}, + "VpcSecurityGroupIds": [{"Ref": "SecurityGroup"}], + "ClusterSubnetGroupName": {"Ref": "RedshiftClusterSubnetGroup"}, + "PubliclyAccessible": "true", + "Port": {"Ref": "PortNumber"} + } + }, + "RedshiftClusterParameterGroup": { + "Type": "AWS::Redshift::ClusterParameterGroup", + "Properties": { + "Description": "Cluster parameter group", + "ParameterGroupFamily": "redshift-1.0", + "Parameters": [{ + "ParameterName": "enable_user_activity_logging", + "ParameterValue": "true" + }] + } + }, + "RedshiftClusterSubnetGroup": { + "Type": "AWS::Redshift::ClusterSubnetGroup", + "Properties": { + "Description": "Cluster subnet group", + "SubnetIds": [{"Ref": "PublicSubnet"}] + } + }, + "VPC": { + "Type": "AWS::EC2::VPC", + "Properties": { + "CidrBlock": "10.0.0.0/16" + } + }, + "PublicSubnet": { + "Type": "AWS::EC2::Subnet", + "Properties": { + "CidrBlock": "10.0.0.0/24", + "VpcId": {"Ref": "VPC"} + } + }, + "SecurityGroup": { + "Type": "AWS::EC2::SecurityGroup", + "Properties": { + "GroupDescription": "Security group", + "SecurityGroupIngress": [{ + "CidrIp": {"Ref": "InboundTraffic"}, + "FromPort": {"Ref": "PortNumber"}, + "ToPort": {"Ref": "PortNumber"}, + "IpProtocol": "tcp" + }], + "VpcId": {"Ref": "VPC"} + } + }, + "myInternetGateway": { + "Type": "AWS::EC2::InternetGateway" + }, + "AttachGateway": { + "Type": "AWS::EC2::VPCGatewayAttachment", + "Properties": { + "VpcId": {"Ref": "VPC"}, + "InternetGatewayId": {"Ref": "myInternetGateway"} + } + }, + "PublicRouteTable": { + "Type": "AWS::EC2::RouteTable", + "Properties": { + "VpcId": { + "Ref": "VPC" + } + } + }, + "PublicRoute": { + "Type": "AWS::EC2::Route", + "DependsOn": "AttachGateway", + "Properties": { + "RouteTableId": { + "Ref": "PublicRouteTable" + }, + "DestinationCidrBlock": "0.0.0.0/0", + "GatewayId": { + "Ref": "myInternetGateway" + } + } + }, + "PublicSubnetRouteTableAssociation": { + "Type": "AWS::EC2::SubnetRouteTableAssociation", + "Properties": { + "SubnetId": { + "Ref": "PublicSubnet" + }, + "RouteTableId": { + "Ref": "PublicRouteTable" + } + } + } + }, + "Outputs": { + "ClusterEndpoint": { + "Description": "Cluster endpoint", + "Value": {"Fn::Join": [":", [{"Fn::GetAtt": ["RedshiftCluster", "Endpoint.Address"]}, {"Fn::GetAtt": ["RedshiftCluster", "Endpoint.Port"]}]]} + }, + "ClusterName": { + "Description": "Name of cluster", + "Value": {"Ref": "RedshiftCluster"} + }, + "ParameterGroupName": { + "Description": "Name of parameter group", + "Value": {"Ref": "RedshiftClusterParameterGroup"} + }, + "RedshiftClusterSubnetGroupName": { + "Description": "Name of cluster subnet group", + "Value": {"Ref": "RedshiftClusterSubnetGroup"} + }, + "RedshiftClusterSecurityGroupName": { + "Description": "Name of cluster security group", + "Value": {"Ref": "SecurityGroup"} } - } } - }, - "Outputs" : { - "ClusterEndpoint" : { - "Description" : "Cluster endpoint", - "Value" : { "Fn::Join" : [ ":", [ { "Fn::GetAtt" : [ "RedshiftCluster", "Endpoint.Address" ] }, { "Fn::GetAtt" : [ "RedshiftCluster", "Endpoint.Port" ] } ] ] } - }, - "ClusterName" : { - "Description" : "Name of cluster", - "Value" : { "Ref" : "RedshiftCluster" } - }, - "ParameterGroupName" : { - "Description" : "Name of parameter group", - "Value" : { "Ref" : "RedshiftClusterParameterGroup" } - }, - "RedshiftClusterSubnetGroupName" : { - "Description" : "Name of cluster subnet group", - "Value" : { "Ref" : "RedshiftClusterSubnetGroup" } - }, - "RedshiftClusterSecurityGroupName" : { - "Description" : "Name of cluster security group", - "Value" : { "Ref" : "SecurityGroup" } - } - } -} \ No newline at end of file +} diff --git a/tests/test_cloudformation/fixtures/route53_ec2_instance_with_public_ip.py b/tests/test_cloudformation/fixtures/route53_ec2_instance_with_public_ip.py index 02fa57b8f..5e66bbd86 100644 --- a/tests/test_cloudformation/fixtures/route53_ec2_instance_with_public_ip.py +++ b/tests/test_cloudformation/fixtures/route53_ec2_instance_with_public_ip.py @@ -1,40 +1,40 @@ from __future__ import unicode_literals template = { - "Resources" : { - "Ec2Instance" : { - "Type" : "AWS::EC2::Instance", - "Properties" : { - "ImageId" : "ami-1234abcd", + "Resources": { + "Ec2Instance": { + "Type": "AWS::EC2::Instance", + "Properties": { + "ImageId": "ami-1234abcd", "PrivateIpAddress": "10.0.0.25", } }, "HostedZone": { - "Type" : "AWS::Route53::HostedZone", - "Properties" : { - "Name" : "my_zone" + "Type": "AWS::Route53::HostedZone", + "Properties": { + "Name": "my_zone" } }, - "myDNSRecord" : { - "Type" : "AWS::Route53::RecordSet", - "Properties" : { - "HostedZoneName" : { "Ref" : "HostedZone" }, - "Comment" : "DNS name for my instance.", - "Name" : { - "Fn::Join" : [ "", [ - {"Ref" : "Ec2Instance"}, ".", - {"Ref" : "AWS::Region"}, ".", - {"Ref" : "HostedZone"} ,"." - ] ] - }, - "Type" : "A", - "TTL" : "900", - "ResourceRecords" : [ - { "Fn::GetAtt" : [ "Ec2Instance", "PrivateIp" ] } - ] - } + "myDNSRecord": { + "Type": "AWS::Route53::RecordSet", + "Properties": { + "HostedZoneName": {"Ref": "HostedZone"}, + "Comment": "DNS name for my instance.", + "Name": { + "Fn::Join": ["", [ + {"Ref": "Ec2Instance"}, ".", + {"Ref": "AWS::Region"}, ".", + {"Ref": "HostedZone"}, "." + ]] + }, + "Type": "A", + "TTL": "900", + "ResourceRecords": [ + {"Fn::GetAtt": ["Ec2Instance", "PrivateIp"]} + ] + } } }, -} \ No newline at end of file +} diff --git a/tests/test_cloudformation/fixtures/route53_health_check.py b/tests/test_cloudformation/fixtures/route53_health_check.py index 6c6159fde..f6a2c9b8e 100644 --- a/tests/test_cloudformation/fixtures/route53_health_check.py +++ b/tests/test_cloudformation/fixtures/route53_health_check.py @@ -1,39 +1,39 @@ from __future__ import unicode_literals template = { - "Resources" : { + "Resources": { "HostedZone": { - "Type" : "AWS::Route53::HostedZone", - "Properties" : { - "Name" : "my_zone" + "Type": "AWS::Route53::HostedZone", + "Properties": { + "Name": "my_zone" } }, "my_health_check": { "Type": "AWS::Route53::HealthCheck", - "Properties" : { - "HealthCheckConfig" : { - "FailureThreshold" : 3, - "IPAddress" : "10.0.0.4", - "Port" : 80, - "RequestInterval" : 10, - "ResourcePath" : "/", - "Type" : "HTTP", + "Properties": { + "HealthCheckConfig": { + "FailureThreshold": 3, + "IPAddress": "10.0.0.4", + "Port": 80, + "RequestInterval": 10, + "ResourcePath": "/", + "Type": "HTTP", } } }, - "myDNSRecord" : { - "Type" : "AWS::Route53::RecordSet", - "Properties" : { - "HostedZoneName" : { "Ref" : "HostedZone" }, - "Comment" : "DNS name for my instance.", - "Name" : "my_record_set", - "Type" : "A", - "TTL" : "900", - "ResourceRecords" : ["my.example.com"], - "HealthCheckId": {"Ref": "my_health_check"}, - } + "myDNSRecord": { + "Type": "AWS::Route53::RecordSet", + "Properties": { + "HostedZoneName": {"Ref": "HostedZone"}, + "Comment": "DNS name for my instance.", + "Name": "my_record_set", + "Type": "A", + "TTL": "900", + "ResourceRecords": ["my.example.com"], + "HealthCheckId": {"Ref": "my_health_check"}, + } } }, -} \ No newline at end of file +} diff --git a/tests/test_cloudformation/fixtures/route53_roundrobin.py b/tests/test_cloudformation/fixtures/route53_roundrobin.py index d985623bb..da4fecd4d 100644 --- a/tests/test_cloudformation/fixtures/route53_roundrobin.py +++ b/tests/test_cloudformation/fixtures/route53_roundrobin.py @@ -1,47 +1,47 @@ from __future__ import unicode_literals template = { - "AWSTemplateFormatVersion" : "2010-09-09", + "AWSTemplateFormatVersion": "2010-09-09", - "Description" : "AWS CloudFormation Sample Template Route53_RoundRobin: Sample template showing how to use weighted round robin (WRR) DNS entried via Amazon Route 53. This contrived sample uses weighted CNAME records to illustrate that the weighting influences the return records. It assumes that you already have a Hosted Zone registered with Amazon Route 53. **WARNING** This template creates one or more AWS resources. You will be billed for the AWS resources used if you create a stack from this template.", + "Description": "AWS CloudFormation Sample Template Route53_RoundRobin: Sample template showing how to use weighted round robin (WRR) DNS entried via Amazon Route 53. This contrived sample uses weighted CNAME records to illustrate that the weighting influences the return records. It assumes that you already have a Hosted Zone registered with Amazon Route 53. **WARNING** This template creates one or more AWS resources. You will be billed for the AWS resources used if you create a stack from this template.", - "Resources" : { + "Resources": { - "MyZone": { - "Type" : "AWS::Route53::HostedZone", - "Properties" : { - "Name" : "my_zone" - } + "MyZone": { + "Type": "AWS::Route53::HostedZone", + "Properties": { + "Name": "my_zone" + } + }, + + "MyDNSRecord": { + "Type": "AWS::Route53::RecordSetGroup", + "Properties": { + "HostedZoneName": {"Ref": "MyZone"}, + "Comment": "Contrived example to redirect to aws.amazon.com 75% of the time and www.amazon.com 25% of the time.", + "RecordSets": [{ + "SetIdentifier": {"Fn::Join": [" ", [{"Ref": "AWS::StackName"}, "AWS"]]}, + "Name": {"Fn::Join": ["", [{"Ref": "AWS::StackName"}, ".", {"Ref": "AWS::Region"}, ".", {"Ref": "MyZone"}, "."]]}, + "Type": "CNAME", + "TTL": "900", + "ResourceRecords": ["aws.amazon.com"], + "Weight": "3" + }, { + "SetIdentifier": {"Fn::Join": [" ", [{"Ref": "AWS::StackName"}, "Amazon"]]}, + "Name": {"Fn::Join": ["", [{"Ref": "AWS::StackName"}, ".", {"Ref": "AWS::Region"}, ".", {"Ref": "MyZone"}, "."]]}, + "Type": "CNAME", + "TTL": "900", + "ResourceRecords": ["www.amazon.com"], + "Weight": "1" + }] + } + } }, - "MyDNSRecord" : { - "Type" : "AWS::Route53::RecordSetGroup", - "Properties" : { - "HostedZoneName" : {"Ref": "MyZone"}, - "Comment" : "Contrived example to redirect to aws.amazon.com 75% of the time and www.amazon.com 25% of the time.", - "RecordSets" : [{ - "SetIdentifier" : { "Fn::Join" : [ " ", [{"Ref" : "AWS::StackName"}, "AWS" ]]}, - "Name" : { "Fn::Join" : [ "", [{"Ref" : "AWS::StackName"}, ".", {"Ref" : "AWS::Region"}, ".", {"Ref" : "MyZone"}, "."]]}, - "Type" : "CNAME", - "TTL" : "900", - "ResourceRecords" : ["aws.amazon.com"], - "Weight" : "3" - },{ - "SetIdentifier" : { "Fn::Join" : [ " ", [{"Ref" : "AWS::StackName"}, "Amazon" ]]}, - "Name" : { "Fn::Join" : [ "", [{"Ref" : "AWS::StackName"}, ".", {"Ref" : "AWS::Region"}, ".", {"Ref" : "MyZone"}, "."]]}, - "Type" : "CNAME", - "TTL" : "900", - "ResourceRecords" : ["www.amazon.com"], - "Weight" : "1" - }] - } + "Outputs": { + "DomainName": { + "Description": "Fully qualified domain name", + "Value": {"Ref": "MyDNSRecord"} + } } - }, - - "Outputs" : { - "DomainName" : { - "Description" : "Fully qualified domain name", - "Value" : { "Ref" : "MyDNSRecord" } - } - } -} \ No newline at end of file +} diff --git a/tests/test_cloudformation/test_cloudformation_stack_crud.py b/tests/test_cloudformation/test_cloudformation_stack_crud.py index 3d41c9d91..619d8c3da 100644 --- a/tests/test_cloudformation/test_cloudformation_stack_crud.py +++ b/tests/test_cloudformation/test_cloudformation_stack_crud.py @@ -35,8 +35,8 @@ dummy_template3 = { "VPC": { "Properties": { "CidrBlock": "192.168.0.0/16", - }, - "Type": "AWS::EC2::VPC" + }, + "Type": "AWS::EC2::VPC" } }, } @@ -91,7 +91,8 @@ def test_create_stack_with_notification_arn(): ) stack = conn.describe_stacks()[0] - [n.value for n in stack.notification_arns].should.contain('arn:aws:sns:us-east-1:123456789012:fake-queue') + [n.value for n in stack.notification_arns].should.contain( + 'arn:aws:sns:us-east-1:123456789012:fake-queue') @mock_cloudformation_deprecated @@ -111,16 +112,16 @@ def test_create_stack_from_s3_url(): stack.stack_name.should.equal('new-stack') stack.get_template().should.equal( { - 'GetTemplateResponse': { - 'GetTemplateResult': { - 'TemplateBody': dummy_template_json, - 'ResponseMetadata': { - 'RequestId': '2d06e36c-ac1d-11e0-a958-f9382b6eb86bEXAMPLE' + 'GetTemplateResponse': { + 'GetTemplateResult': { + 'TemplateBody': dummy_template_json, + 'ResponseMetadata': { + 'RequestId': '2d06e36c-ac1d-11e0-a958-f9382b6eb86bEXAMPLE' + } } } - } - }) + }) @mock_cloudformation_deprecated @@ -271,7 +272,8 @@ def test_cloudformation_params(): } dummy_template_json = json.dumps(dummy_template) cfn = boto.connect_cloudformation() - cfn.create_stack('test_stack1', template_body=dummy_template_json, parameters=[('APPNAME', 'testing123')]) + cfn.create_stack('test_stack1', template_body=dummy_template_json, parameters=[ + ('APPNAME', 'testing123')]) stack = cfn.describe_stacks('test_stack1')[0] stack.parameters.should.have.length_of(1) param = stack.parameters[0] @@ -342,23 +344,28 @@ def test_update_stack(): @mock_cloudformation_deprecated def test_update_stack_when_rolled_back(): conn = boto.connect_cloudformation() - stack_id = conn.create_stack("test_stack", template_body=dummy_template_json) + stack_id = conn.create_stack( + "test_stack", template_body=dummy_template_json) - cloudformation_backends[conn.region.name].stacks[stack_id].status = 'ROLLBACK_COMPLETE' + cloudformation_backends[conn.region.name].stacks[ + stack_id].status = 'ROLLBACK_COMPLETE' with assert_raises(BotoServerError) as err: conn.update_stack("test_stack", dummy_template_json) ex = err.exception - ex.body.should.match(r'is in ROLLBACK_COMPLETE state and can not be updated') + ex.body.should.match( + r'is in ROLLBACK_COMPLETE state and can not be updated') ex.error_code.should.equal('ValidationError') ex.reason.should.equal('Bad Request') ex.status.should.equal(400) + @mock_cloudformation_deprecated def test_describe_stack_events_shows_create_update_and_delete(): conn = boto.connect_cloudformation() - stack_id = conn.create_stack("test_stack", template_body=dummy_template_json) + stack_id = conn.create_stack( + "test_stack", template_body=dummy_template_json) conn.update_stack(stack_id, template_body=dummy_template_json2) conn.delete_stack(stack_id) @@ -367,7 +374,8 @@ def test_describe_stack_events_shows_create_update_and_delete(): events[0].resource_type.should.equal("AWS::CloudFormation::Stack") events[-1].resource_type.should.equal("AWS::CloudFormation::Stack") - # testing ordering of stack events without assuming resource events will not exist + # testing ordering of stack events without assuming resource events will + # not exist stack_events_to_look_for = iter([ ("CREATE_IN_PROGRESS", "User Initiated"), ("CREATE_COMPLETE", None), ("UPDATE_IN_PROGRESS", "User Initiated"), ("UPDATE_COMPLETE", None), @@ -381,12 +389,13 @@ def test_describe_stack_events_shows_create_update_and_delete(): event.logical_resource_id.should.equal("test_stack") event.physical_resource_id.should.equal(stack_id) - status_to_look_for, reason_to_look_for = next(stack_events_to_look_for) + status_to_look_for, reason_to_look_for = next( + stack_events_to_look_for) event.resource_status.should.equal(status_to_look_for) if reason_to_look_for is not None: - event.resource_status_reason.should.equal(reason_to_look_for) + event.resource_status_reason.should.equal( + reason_to_look_for) except StopIteration: assert False, "Too many stack events" list(stack_events_to_look_for).should.be.empty - diff --git a/tests/test_cloudformation/test_cloudformation_stack_crud_boto3.py b/tests/test_cloudformation/test_cloudformation_stack_crud_boto3.py index 2ee74f886..29e2dfa10 100644 --- a/tests/test_cloudformation/test_cloudformation_stack_crud_boto3.py +++ b/tests/test_cloudformation/test_cloudformation_stack_crud_boto3.py @@ -124,7 +124,8 @@ def test_create_stack_from_s3_url(): s3_conn = boto3.resource('s3') bucket = s3_conn.create_bucket(Bucket="foobar") - key = s3_conn.Object('foobar', 'template-key').put(Body=dummy_template_json) + key = s3_conn.Object( + 'foobar', 'template-key').put(Body=dummy_template_json) key_url = s3.generate_presigned_url( ClientMethod='get_object', Params={ @@ -160,6 +161,7 @@ def test_describe_stack_resources(): resource['ResourceType'].should.equal('AWS::EC2::Instance') resource['StackId'].should.equal(stack['StackId']) + @mock_cloudformation def test_describe_stack_by_name(): cf_conn = boto3.client('cloudformation', region_name='us-east-1') @@ -249,6 +251,7 @@ def test_describe_deleted_stack(): stack_by_id['StackName'].should.equal("test_stack") stack_by_id['StackStatus'].should.equal("DELETE_COMPLETE") + @mock_cloudformation def test_describe_updated_stack(): cf_conn = boto3.client('cloudformation', region_name='us-east-1') @@ -299,9 +302,9 @@ def test_cloudformation_params(): StackName='test_stack', TemplateBody=dummy_template_with_params_json, Parameters=[{ - "ParameterKey": "APPNAME", - "ParameterValue": "testing123", - }], + "ParameterKey": "APPNAME", + "ParameterValue": "testing123", + }], ) stack.parameters.should.have.length_of(1) @@ -334,6 +337,7 @@ def test_stack_tags(): item for items in [tag.items() for tag in tags] for item in items) observed_tag_items.should.equal(expected_tag_items) + @mock_cloudformation def test_stack_events(): cf = boto3.resource('cloudformation', region_name='us-east-1') @@ -350,7 +354,8 @@ def test_stack_events(): events[0].resource_type.should.equal("AWS::CloudFormation::Stack") events[-1].resource_type.should.equal("AWS::CloudFormation::Stack") - # testing ordering of stack events without assuming resource events will not exist + # testing ordering of stack events without assuming resource events will + # not exist stack_events_to_look_for = iter([ ("CREATE_IN_PROGRESS", "User Initiated"), ("CREATE_COMPLETE", None), ("UPDATE_IN_PROGRESS", "User Initiated"), ("UPDATE_COMPLETE", None), @@ -364,10 +369,12 @@ def test_stack_events(): event.logical_resource_id.should.equal("test_stack") event.physical_resource_id.should.equal(stack.stack_id) - status_to_look_for, reason_to_look_for = next(stack_events_to_look_for) + status_to_look_for, reason_to_look_for = next( + stack_events_to_look_for) event.resource_status.should.equal(status_to_look_for) if reason_to_look_for is not None: - event.resource_status_reason.should.equal(reason_to_look_for) + event.resource_status_reason.should.equal( + reason_to_look_for) except StopIteration: assert False, "Too many stack events" diff --git a/tests/test_cloudformation/test_cloudformation_stack_integration.py b/tests/test_cloudformation/test_cloudformation_stack_integration.py index 609a0b46d..e2304f840 100644 --- a/tests/test_cloudformation/test_cloudformation_stack_integration.py +++ b/tests/test_cloudformation/test_cloudformation_stack_integration.py @@ -143,15 +143,18 @@ def test_update_stack(): sqs_conn = boto.sqs.connect_to_region("us-west-1") queues = sqs_conn.get_all_queues() queues.should.have.length_of(1) - queues[0].get_attributes('VisibilityTimeout')['VisibilityTimeout'].should.equal('60') + queues[0].get_attributes('VisibilityTimeout')[ + 'VisibilityTimeout'].should.equal('60') - sqs_template['Resources']['QueueGroup']['Properties']['VisibilityTimeout'] = 100 + sqs_template['Resources']['QueueGroup'][ + 'Properties']['VisibilityTimeout'] = 100 sqs_template_json = json.dumps(sqs_template) conn.update_stack("test_stack", sqs_template_json) queues = sqs_conn.get_all_queues() queues.should.have.length_of(1) - queues[0].get_attributes('VisibilityTimeout')['VisibilityTimeout'].should.equal('100') + queues[0].get_attributes('VisibilityTimeout')[ + 'VisibilityTimeout'].should.equal('100') @mock_cloudformation_deprecated() @@ -395,7 +398,8 @@ def test_stack_elb_integration_with_update(): load_balancer = elb_conn.get_all_load_balancers()[0] load_balancer.availability_zones[0].should.equal('us-west-1a') - elb_template['Resources']['MyELB']['Properties']['AvailabilityZones'] = ['us-west-1b'] + elb_template['Resources']['MyELB']['Properties'][ + 'AvailabilityZones'] = ['us-west-1b'] elb_template_json = json.dumps(elb_template) conn.update_stack( "elb_stack", @@ -431,7 +435,8 @@ def test_redshift_stack(): redshift_conn = boto.redshift.connect_to_region("us-west-2") cluster_res = redshift_conn.describe_clusters() - clusters = cluster_res['DescribeClustersResponse']['DescribeClustersResult']['Clusters'] + clusters = cluster_res['DescribeClustersResponse'][ + 'DescribeClustersResult']['Clusters'] clusters.should.have.length_of(1) cluster = clusters[0] cluster['DBName'].should.equal("mydb") @@ -499,12 +504,14 @@ def test_stack_security_groups(): conn.create_stack( "security_group_stack", template_body=security_group_template_json, - tags={"foo":"bar"} + tags={"foo": "bar"} ) ec2_conn = boto.ec2.connect_to_region("us-west-1") - instance_group = ec2_conn.get_all_security_groups(filters={'description': ['My security group']})[0] - other_group = ec2_conn.get_all_security_groups(filters={'description': ['My other group']})[0] + instance_group = ec2_conn.get_all_security_groups( + filters={'description': ['My security group']})[0] + other_group = ec2_conn.get_all_security_groups( + filters={'description': ['My other group']})[0] reservation = ec2_conn.get_all_instances()[0] ec2_instance = reservation.instances[0] @@ -597,13 +604,17 @@ def test_autoscaling_group_with_elb(): stack = conn.describe_stacks()[0] resources = stack.describe_resources() - as_group_resource = [resource for resource in resources if resource.resource_type == 'AWS::AutoScaling::AutoScalingGroup'][0] + as_group_resource = [resource for resource in resources if resource.resource_type == + 'AWS::AutoScaling::AutoScalingGroup'][0] as_group_resource.physical_resource_id.should.contain("my-as-group") - launch_config_resource = [resource for resource in resources if resource.resource_type == 'AWS::AutoScaling::LaunchConfiguration'][0] - launch_config_resource.physical_resource_id.should.contain("my-launch-config") + launch_config_resource = [ + resource for resource in resources if resource.resource_type == 'AWS::AutoScaling::LaunchConfiguration'][0] + launch_config_resource.physical_resource_id.should.contain( + "my-launch-config") - elb_resource = [resource for resource in resources if resource.resource_type == 'AWS::ElasticLoadBalancing::LoadBalancer'][0] + elb_resource = [resource for resource in resources if resource.resource_type == + 'AWS::ElasticLoadBalancing::LoadBalancer'][0] elb_resource.physical_resource_id.should.contain("my-elb") @@ -687,26 +698,32 @@ def test_vpc_single_instance_in_subnet(): eip.domain.should.equal('vpc') eip.instance_id.should.equal(instance.id) - security_group = ec2_conn.get_all_security_groups(filters={'vpc_id': [vpc.id]})[0] + security_group = ec2_conn.get_all_security_groups( + filters={'vpc_id': [vpc.id]})[0] security_group.vpc_id.should.equal(vpc.id) stack = conn.describe_stacks()[0] resources = stack.describe_resources() - vpc_resource = [resource for resource in resources if resource.resource_type == 'AWS::EC2::VPC'][0] + vpc_resource = [ + resource for resource in resources if resource.resource_type == 'AWS::EC2::VPC'][0] vpc_resource.physical_resource_id.should.equal(vpc.id) - subnet_resource = [resource for resource in resources if resource.resource_type == 'AWS::EC2::Subnet'][0] + subnet_resource = [ + resource for resource in resources if resource.resource_type == 'AWS::EC2::Subnet'][0] subnet_resource.physical_resource_id.should.equal(subnet.id) - eip_resource = [resource for resource in resources if resource.resource_type == 'AWS::EC2::EIP'][0] + eip_resource = [ + resource for resource in resources if resource.resource_type == 'AWS::EC2::EIP'][0] eip_resource.physical_resource_id.should.equal(eip.allocation_id) + @mock_cloudformation() @mock_ec2() @mock_rds2() def test_rds_db_parameter_groups(): ec2_conn = boto3.client("ec2", region_name="us-west-1") - ec2_conn.create_security_group(GroupName='application', Description='Our Application Group') + ec2_conn.create_security_group( + GroupName='application', Description='Our Application Group') template_json = json.dumps(rds_mysql_with_db_parameter_group.template) cf_conn = boto3.client('cloudformation', 'us-west-1') @@ -714,16 +731,16 @@ def test_rds_db_parameter_groups(): StackName="test_stack", TemplateBody=template_json, Parameters=[{'ParameterKey': key, 'ParameterValue': value} for - key, value in [ - ("DBInstanceIdentifier", "master_db"), - ("DBName", "my_db"), - ("DBUser", "my_user"), - ("DBPassword", "my_password"), - ("DBAllocatedStorage", "20"), - ("DBInstanceClass", "db.m1.medium"), - ("EC2SecurityGroup", "application"), - ("MultiAZ", "true"), - ] + key, value in [ + ("DBInstanceIdentifier", "master_db"), + ("DBName", "my_db"), + ("DBUser", "my_user"), + ("DBPassword", "my_password"), + ("DBAllocatedStorage", "20"), + ("DBInstanceClass", "db.m1.medium"), + ("EC2SecurityGroup", "application"), + ("MultiAZ", "true"), + ] ], ) @@ -731,7 +748,8 @@ def test_rds_db_parameter_groups(): db_parameter_groups = rds_conn.describe_db_parameter_groups() len(db_parameter_groups['DBParameterGroups']).should.equal(1) - db_parameter_group_name = db_parameter_groups['DBParameterGroups'][0]['DBParameterGroupName'] + db_parameter_group_name = db_parameter_groups[ + 'DBParameterGroups'][0]['DBParameterGroupName'] found_cloudformation_set_parameter = False for db_parameter in rds_conn.describe_db_parameters(DBParameterGroupName=db_parameter_group_name)['Parameters']: @@ -741,7 +759,6 @@ def test_rds_db_parameter_groups(): found_cloudformation_set_parameter.should.equal(True) - @mock_cloudformation_deprecated() @mock_ec2_deprecated() @mock_rds_deprecated() @@ -906,15 +923,20 @@ def test_iam_roles(): iam_conn = boto.iam.connect_to_region("us-west-1") - role_result = iam_conn.list_roles()['list_roles_response']['list_roles_result']['roles'][0] + role_result = iam_conn.list_roles()['list_roles_response'][ + 'list_roles_result']['roles'][0] role = iam_conn.get_role(role_result.role_name) role.role_name.should.contain("my-role") role.path.should.equal("my-path") - instance_profile_response = iam_conn.list_instance_profiles()['list_instance_profiles_response'] - cfn_instance_profile = instance_profile_response['list_instance_profiles_result']['instance_profiles'][0] - instance_profile = iam_conn.get_instance_profile(cfn_instance_profile.instance_profile_name) - instance_profile.instance_profile_name.should.contain("my-instance-profile") + instance_profile_response = iam_conn.list_instance_profiles()[ + 'list_instance_profiles_response'] + cfn_instance_profile = instance_profile_response[ + 'list_instance_profiles_result']['instance_profiles'][0] + instance_profile = iam_conn.get_instance_profile( + cfn_instance_profile.instance_profile_name) + instance_profile.instance_profile_name.should.contain( + "my-instance-profile") instance_profile.path.should.equal("my-path") instance_profile.role_id.should.equal(role.role_id) @@ -924,10 +946,13 @@ def test_iam_roles(): stack = conn.describe_stacks()[0] resources = stack.describe_resources() - instance_profile_resource = [resource for resource in resources if resource.resource_type == 'AWS::IAM::InstanceProfile'][0] - instance_profile_resource.physical_resource_id.should.equal(instance_profile.instance_profile_name) + instance_profile_resource = [ + resource for resource in resources if resource.resource_type == 'AWS::IAM::InstanceProfile'][0] + instance_profile_resource.physical_resource_id.should.equal( + instance_profile.instance_profile_name) - role_resource = [resource for resource in resources if resource.resource_type == 'AWS::IAM::Role'][0] + role_resource = [ + resource for resource in resources if resource.resource_type == 'AWS::IAM::Role'][0] role_resource.physical_resource_id.should.equal(role.role_id) @@ -949,13 +974,15 @@ def test_single_instance_with_ebs_volume(): volumes = ec2_conn.get_all_volumes() # Grab the mounted drive - volume = [volume for volume in volumes if volume.attach_data.device == '/dev/sdh'][0] + volume = [ + volume for volume in volumes if volume.attach_data.device == '/dev/sdh'][0] volume.volume_state().should.equal('in-use') volume.attach_data.instance_id.should.equal(ec2_instance.id) stack = conn.describe_stacks()[0] resources = stack.describe_resources() - ebs_volumes = [resource for resource in resources if resource.resource_type == 'AWS::EC2::Volume'] + ebs_volumes = [ + resource for resource in resources if resource.resource_type == 'AWS::EC2::Volume'] ebs_volumes[0].physical_resource_id.should.equal(volume.id) @@ -981,7 +1008,8 @@ def test_classic_eip(): stack = conn.describe_stacks()[0] resources = stack.describe_resources() - cfn_eip = [resource for resource in resources if resource.resource_type == 'AWS::EC2::EIP'][0] + cfn_eip = [ + resource for resource in resources if resource.resource_type == 'AWS::EC2::EIP'][0] cfn_eip.physical_resource_id.should.equal(eip.public_ip) @@ -997,7 +1025,8 @@ def test_vpc_eip(): stack = conn.describe_stacks()[0] resources = stack.describe_resources() - cfn_eip = [resource for resource in resources if resource.resource_type == 'AWS::EC2::EIP'][0] + cfn_eip = [ + resource for resource in resources if resource.resource_type == 'AWS::EC2::EIP'][0] cfn_eip.physical_resource_id.should.equal(eip.allocation_id) @@ -1111,7 +1140,8 @@ def test_conditional_if_handling(): ec2_instance.terminate() conn = boto.cloudformation.connect_to_region("us-west-2") - conn.create_stack('test_stack1', template_body=dummy_template_json, parameters=[("ENV", "prd")]) + conn.create_stack( + 'test_stack1', template_body=dummy_template_json, parameters=[("ENV", "prd")]) ec2_conn = boto.ec2.connect_to_region("us-west-2") reservation = ec2_conn.get_all_instances()[0] ec2_instance = reservation.instances[0] @@ -1175,7 +1205,8 @@ def test_route53_roundrobin(): template_body=template_json, ) - zones = route53_conn.get_all_hosted_zones()['ListHostedZonesResponse']['HostedZones'] + zones = route53_conn.get_all_hosted_zones()['ListHostedZonesResponse'][ + 'HostedZones'] list(zones).should.have.length_of(1) zone_id = zones[0]['Id'] zone_id = zone_id.split('/') @@ -1203,7 +1234,8 @@ def test_route53_roundrobin(): stack = conn.describe_stacks()[0] output = stack.outputs[0] output.key.should.equal('DomainName') - output.value.should.equal('arn:aws:route53:::hostedzone/{0}'.format(zone_id)) + output.value.should.equal( + 'arn:aws:route53:::hostedzone/{0}'.format(zone_id)) @mock_cloudformation_deprecated() @@ -1222,13 +1254,13 @@ def test_route53_ec2_instance_with_public_ip(): instance_id = ec2_conn.get_all_reservations()[0].instances[0].id - zones = route53_conn.get_all_hosted_zones()['ListHostedZonesResponse']['HostedZones'] + zones = route53_conn.get_all_hosted_zones()['ListHostedZonesResponse'][ + 'HostedZones'] list(zones).should.have.length_of(1) zone_id = zones[0]['Id'] zone_id = zone_id.split('/') zone_id = zone_id[2] - rrsets = route53_conn.get_all_rrsets(zone_id) rrsets.should.have.length_of(1) @@ -1253,7 +1285,8 @@ def test_route53_associate_health_check(): template_body=template_json, ) - checks = route53_conn.get_list_health_checks()['ListHealthChecksResponse']['HealthChecks'] + checks = route53_conn.get_list_health_checks()['ListHealthChecksResponse'][ + 'HealthChecks'] list(checks).should.have.length_of(1) check = checks[0] health_check_id = check['Id'] @@ -1265,7 +1298,8 @@ def test_route53_associate_health_check(): config["ResourcePath"].should.equal("/") config["Type"].should.equal("HTTP") - zones = route53_conn.get_all_hosted_zones()['ListHostedZonesResponse']['HostedZones'] + zones = route53_conn.get_all_hosted_zones()['ListHostedZonesResponse'][ + 'HostedZones'] list(zones).should.have.length_of(1) zone_id = zones[0]['Id'] zone_id = zone_id.split('/') @@ -1290,7 +1324,8 @@ def test_route53_with_update(): template_body=template_json, ) - zones = route53_conn.get_all_hosted_zones()['ListHostedZonesResponse']['HostedZones'] + zones = route53_conn.get_all_hosted_zones()['ListHostedZonesResponse'][ + 'HostedZones'] list(zones).should.have.length_of(1) zone_id = zones[0]['Id'] zone_id = zone_id.split('/') @@ -1302,14 +1337,16 @@ def test_route53_with_update(): record_set = rrsets[0] record_set.resource_records.should.equal(["my.example.com"]) - route53_health_check.template['Resources']['myDNSRecord']['Properties']['ResourceRecords'] = ["my_other.example.com"] + route53_health_check.template['Resources']['myDNSRecord'][ + 'Properties']['ResourceRecords'] = ["my_other.example.com"] template_json = json.dumps(route53_health_check.template) cf_conn.update_stack( "test_stack", template_body=template_json, ) - zones = route53_conn.get_all_hosted_zones()['ListHostedZonesResponse']['HostedZones'] + zones = route53_conn.get_all_hosted_zones()['ListHostedZonesResponse'][ + 'HostedZones'] list(zones).should.have.length_of(1) zone_id = zones[0]['Id'] zone_id = zone_id.split('/') @@ -1355,12 +1392,14 @@ def test_sns_topic(): ) sns_conn = boto.sns.connect_to_region("us-west-1") - topics = sns_conn.get_all_topics()["ListTopicsResponse"]["ListTopicsResult"]["Topics"] + topics = sns_conn.get_all_topics()["ListTopicsResponse"][ + "ListTopicsResult"]["Topics"] topics.should.have.length_of(1) topic_arn = topics[0]['TopicArn'] topic_arn.should.contain("my_topics") - subscriptions = sns_conn.get_all_subscriptions()["ListSubscriptionsResponse"]["ListSubscriptionsResult"]["Subscriptions"] + subscriptions = sns_conn.get_all_subscriptions()["ListSubscriptionsResponse"][ + "ListSubscriptionsResult"]["Subscriptions"] subscriptions.should.have.length_of(1) subscription = subscriptions[0] subscription["TopicArn"].should.equal(topic_arn) @@ -1504,12 +1543,15 @@ def test_multiple_security_group_ingress_separate_from_security_group_by_id(): ) ec2_conn = boto.ec2.connect_to_region("us-west-1") - security_group1 = ec2_conn.get_all_security_groups(filters={"tag:sg-name": "sg1"})[0] - security_group2 = ec2_conn.get_all_security_groups(filters={"tag:sg-name": "sg2"})[0] + security_group1 = ec2_conn.get_all_security_groups( + filters={"tag:sg-name": "sg1"})[0] + security_group2 = ec2_conn.get_all_security_groups( + filters={"tag:sg-name": "sg2"})[0] security_group1.rules.should.have.length_of(1) security_group1.rules[0].grants.should.have.length_of(1) - security_group1.rules[0].grants[0].group_id.should.equal(security_group2.id) + security_group1.rules[0].grants[ + 0].group_id.should.equal(security_group2.id) security_group1.rules[0].ip_protocol.should.equal('tcp') security_group1.rules[0].from_port.should.equal('80') security_group1.rules[0].to_port.should.equal('8080') @@ -1519,7 +1561,8 @@ def test_multiple_security_group_ingress_separate_from_security_group_by_id(): @mock_ec2_deprecated def test_security_group_ingress_separate_from_security_group_by_id(): ec2_conn = boto.ec2.connect_to_region("us-west-1") - ec2_conn.create_security_group("test-security-group1", "test security group") + ec2_conn.create_security_group( + "test-security-group1", "test security group") template = { "AWSTemplateFormatVersion": "2010-09-09", @@ -1555,12 +1598,15 @@ def test_security_group_ingress_separate_from_security_group_by_id(): "test_stack", template_body=template_json, ) - security_group1 = ec2_conn.get_all_security_groups(groupnames=["test-security-group1"])[0] - security_group2 = ec2_conn.get_all_security_groups(filters={"tag:sg-name": "sg2"})[0] + security_group1 = ec2_conn.get_all_security_groups( + groupnames=["test-security-group1"])[0] + security_group2 = ec2_conn.get_all_security_groups( + filters={"tag:sg-name": "sg2"})[0] security_group1.rules.should.have.length_of(1) security_group1.rules[0].grants.should.have.length_of(1) - security_group1.rules[0].grants[0].group_id.should.equal(security_group2.id) + security_group1.rules[0].grants[ + 0].group_id.should.equal(security_group2.id) security_group1.rules[0].ip_protocol.should.equal('tcp') security_group1.rules[0].from_port.should.equal('80') security_group1.rules[0].to_port.should.equal('8080') @@ -1621,12 +1667,15 @@ def test_security_group_ingress_separate_from_security_group_by_id_using_vpc(): "test_stack", template_body=template_json, ) - security_group1 = vpc_conn.get_all_security_groups(filters={"tag:sg-name": "sg1"})[0] - security_group2 = vpc_conn.get_all_security_groups(filters={"tag:sg-name": "sg2"})[0] + security_group1 = vpc_conn.get_all_security_groups( + filters={"tag:sg-name": "sg1"})[0] + security_group2 = vpc_conn.get_all_security_groups( + filters={"tag:sg-name": "sg2"})[0] security_group1.rules.should.have.length_of(1) security_group1.rules[0].grants.should.have.length_of(1) - security_group1.rules[0].grants[0].group_id.should.equal(security_group2.id) + security_group1.rules[0].grants[ + 0].group_id.should.equal(security_group2.id) security_group1.rules[0].ip_protocol.should.equal('tcp') security_group1.rules[0].from_port.should.equal('80') security_group1.rules[0].to_port.should.equal('8080') @@ -1663,17 +1712,20 @@ def test_security_group_with_update(): "test_stack", template_body=template_json, ) - security_group = vpc_conn.get_all_security_groups(filters={"tag:sg-name": "sg"})[0] + security_group = vpc_conn.get_all_security_groups( + filters={"tag:sg-name": "sg"})[0] security_group.vpc_id.should.equal(vpc1.id) vpc2 = vpc_conn.create_vpc("10.1.0.0/16") - template['Resources']['test-security-group']['Properties']['VpcId'] = vpc2.id + template['Resources'][ + 'test-security-group']['Properties']['VpcId'] = vpc2.id template_json = json.dumps(template) cf_conn.update_stack( "test_stack", template_body=template_json, ) - security_group = vpc_conn.get_all_security_groups(filters={"tag:sg-name": "sg"})[0] + security_group = vpc_conn.get_all_security_groups( + filters={"tag:sg-name": "sg"})[0] security_group.vpc_id.should.equal(vpc2.id) @@ -1779,11 +1831,14 @@ def test_datapipeline(): data_pipelines = dp_conn.list_pipelines() data_pipelines['pipelineIdList'].should.have.length_of(1) - data_pipelines['pipelineIdList'][0]['name'].should.equal('testDataPipeline') + data_pipelines['pipelineIdList'][0][ + 'name'].should.equal('testDataPipeline') stack_resources = cf_conn.list_stack_resources(stack_id) stack_resources.should.have.length_of(1) - stack_resources[0].physical_resource_id.should.equal(data_pipelines['pipelineIdList'][0]['id']) + stack_resources[0].physical_resource_id.should.equal( + data_pipelines['pipelineIdList'][0]['id']) + def _process_lamda(pfunc): import io @@ -1849,33 +1904,35 @@ def test_lambda_function(): def test_nat_gateway(): ec2_conn = boto3.client('ec2', 'us-east-1') vpc_id = ec2_conn.create_vpc(CidrBlock="10.0.0.0/16")['Vpc']['VpcId'] - subnet_id = ec2_conn.create_subnet(CidrBlock='10.0.1.0/24', VpcId=vpc_id)['Subnet']['SubnetId'] - route_table_id = ec2_conn.create_route_table(VpcId=vpc_id)['RouteTable']['RouteTableId'] + subnet_id = ec2_conn.create_subnet( + CidrBlock='10.0.1.0/24', VpcId=vpc_id)['Subnet']['SubnetId'] + route_table_id = ec2_conn.create_route_table( + VpcId=vpc_id)['RouteTable']['RouteTableId'] template = { "AWSTemplateFormatVersion": "2010-09-09", "Resources": { - "NAT" : { - "DependsOn" : "vpcgatewayattachment", - "Type" : "AWS::EC2::NatGateway", - "Properties" : { - "AllocationId" : { "Fn::GetAtt" : ["EIP", "AllocationId"]}, - "SubnetId" : subnet_id - } - }, - "EIP" : { - "Type" : "AWS::EC2::EIP", - "Properties" : { - "Domain" : "vpc" + "NAT": { + "DependsOn": "vpcgatewayattachment", + "Type": "AWS::EC2::NatGateway", + "Properties": { + "AllocationId": {"Fn::GetAtt": ["EIP", "AllocationId"]}, + "SubnetId": subnet_id } }, - "Route" : { - "Type" : "AWS::EC2::Route", - "Properties" : { - "RouteTableId" : route_table_id, - "DestinationCidrBlock" : "0.0.0.0/0", - "NatGatewayId" : { "Ref" : "NAT" } - } + "EIP": { + "Type": "AWS::EC2::EIP", + "Properties": { + "Domain": "vpc" + } + }, + "Route": { + "Type": "AWS::EC2::Route", + "Properties": { + "RouteTableId": route_table_id, + "DestinationCidrBlock": "0.0.0.0/0", + "NatGatewayId": {"Ref": "NAT"} + } }, "internetgateway": { "Type": "AWS::EC2::InternetGateway" @@ -1905,6 +1962,7 @@ def test_nat_gateway(): result['NatGateways'][0]['SubnetId'].should.equal(subnet_id) result['NatGateways'][0]['State'].should.equal('available') + @mock_cloudformation() @mock_kms() def test_stack_kms(): @@ -1944,42 +2002,43 @@ def test_stack_spot_fleet(): conn = boto3.client('ec2', 'us-east-1') vpc = conn.create_vpc(CidrBlock="10.0.0.0/8")['Vpc'] - subnet = conn.create_subnet(VpcId=vpc['VpcId'], CidrBlock='10.0.0.0/16', AvailabilityZone='us-east-1a')['Subnet'] + subnet = conn.create_subnet( + VpcId=vpc['VpcId'], CidrBlock='10.0.0.0/16', AvailabilityZone='us-east-1a')['Subnet'] subnet_id = subnet['SubnetId'] spot_fleet_template = { 'Resources': { "SpotFleet": { - "Type": "AWS::EC2::SpotFleet", - "Properties": { - "SpotFleetRequestConfigData": { - "IamFleetRole": "arn:aws:iam::123456789012:role/fleet", - "SpotPrice": "0.12", - "TargetCapacity": 6, - "AllocationStrategy": "diversified", - "LaunchSpecifications": [ - { - "EbsOptimized": "false", - "InstanceType": 't2.small', - "ImageId": "ami-1234", - "SubnetId": subnet_id, - "WeightedCapacity": "2", - "SpotPrice": "0.13", - }, - { - "EbsOptimized": "true", - "InstanceType": 't2.large', - "ImageId": "ami-1234", - "Monitoring": { "Enabled": "true" }, - "SecurityGroups": [{"GroupId": "sg-123"}], - "SubnetId": subnet_id, - "IamInstanceProfile": {"Arn": "arn:aws:iam::123456789012:role/fleet"}, - "WeightedCapacity": "4", - "SpotPrice": "10.00", - } - ] + "Type": "AWS::EC2::SpotFleet", + "Properties": { + "SpotFleetRequestConfigData": { + "IamFleetRole": "arn:aws:iam::123456789012:role/fleet", + "SpotPrice": "0.12", + "TargetCapacity": 6, + "AllocationStrategy": "diversified", + "LaunchSpecifications": [ + { + "EbsOptimized": "false", + "InstanceType": 't2.small', + "ImageId": "ami-1234", + "SubnetId": subnet_id, + "WeightedCapacity": "2", + "SpotPrice": "0.13", + }, + { + "EbsOptimized": "true", + "InstanceType": 't2.large', + "ImageId": "ami-1234", + "Monitoring": {"Enabled": "true"}, + "SecurityGroups": [{"GroupId": "sg-123"}], + "SubnetId": subnet_id, + "IamInstanceProfile": {"Arn": "arn:aws:iam::123456789012:role/fleet"}, + "WeightedCapacity": "4", + "SpotPrice": "10.00", + } + ] + } } - } } } } @@ -1993,9 +2052,11 @@ def test_stack_spot_fleet(): stack_resources = cf_conn.list_stack_resources(StackName=stack_id) stack_resources['StackResourceSummaries'].should.have.length_of(1) - spot_fleet_id = stack_resources['StackResourceSummaries'][0]['PhysicalResourceId'] + spot_fleet_id = stack_resources[ + 'StackResourceSummaries'][0]['PhysicalResourceId'] - spot_fleet_requests = conn.describe_spot_fleet_requests(SpotFleetRequestIds=[spot_fleet_id])['SpotFleetRequestConfigs'] + spot_fleet_requests = conn.describe_spot_fleet_requests( + SpotFleetRequestIds=[spot_fleet_id])['SpotFleetRequestConfigs'] len(spot_fleet_requests).should.equal(1) spot_fleet_request = spot_fleet_requests[0] spot_fleet_request['SpotFleetRequestState'].should.equal("active") @@ -2003,7 +2064,8 @@ def test_stack_spot_fleet(): spot_fleet_config['SpotPrice'].should.equal('0.12') spot_fleet_config['TargetCapacity'].should.equal(6) - spot_fleet_config['IamFleetRole'].should.equal('arn:aws:iam::123456789012:role/fleet') + spot_fleet_config['IamFleetRole'].should.equal( + 'arn:aws:iam::123456789012:role/fleet') spot_fleet_config['AllocationStrategy'].should.equal('diversified') spot_fleet_config['FulfilledCapacity'].should.equal(6.0) diff --git a/tests/test_cloudformation/test_server.py b/tests/test_cloudformation/test_server.py index b4f50024b..de3ab77b5 100644 --- a/tests/test_cloudformation/test_server.py +++ b/tests/test_cloudformation/test_server.py @@ -20,11 +20,14 @@ def test_cloudformation_server_get(): "Resources": {}, } create_stack_resp = test_client.action_data("CreateStack", StackName=stack_name, - TemplateBody=json.dumps(template_body)) - create_stack_resp.should.match(r".*.*.*.*.*", re.DOTALL) - stack_id_from_create_response = re.search("(.*)", create_stack_resp).groups()[0] + TemplateBody=json.dumps(template_body)) + create_stack_resp.should.match( + r".*.*.*.*.*", re.DOTALL) + stack_id_from_create_response = re.search( + "(.*)", create_stack_resp).groups()[0] list_stacks_resp = test_client.action_data("ListStacks") - stack_id_from_list_response = re.search("(.*)", list_stacks_resp).groups()[0] + stack_id_from_list_response = re.search( + "(.*)", list_stacks_resp).groups()[0] stack_id_from_create_response.should.equal(stack_id_from_list_response) diff --git a/tests/test_cloudformation/test_stack_parsing.py b/tests/test_cloudformation/test_stack_parsing.py index 42208810f..be459eff1 100644 --- a/tests/test_cloudformation/test_stack_parsing.py +++ b/tests/test_cloudformation/test_stack_parsing.py @@ -25,8 +25,8 @@ dummy_template = { } }, "S3Bucket": { - "Type": "AWS::S3::Bucket", - "DeletionPolicy": "Retain" + "Type": "AWS::S3::Bucket", + "DeletionPolicy": "Retain" }, }, } @@ -71,15 +71,19 @@ get_attribute_output = { } } -outputs_template = dict(list(dummy_template.items()) + list(output_dict.items())) -bad_outputs_template = dict(list(dummy_template.items()) + list(bad_output.items())) -get_attribute_outputs_template = dict(list(dummy_template.items()) + list(get_attribute_output.items())) +outputs_template = dict(list(dummy_template.items()) + + list(output_dict.items())) +bad_outputs_template = dict( + list(dummy_template.items()) + list(bad_output.items())) +get_attribute_outputs_template = dict( + list(dummy_template.items()) + list(get_attribute_output.items())) dummy_template_json = json.dumps(dummy_template) name_type_template_json = json.dumps(name_type_template) output_type_template_json = json.dumps(outputs_template) bad_output_template_json = json.dumps(bad_outputs_template) -get_attribute_outputs_template_json = json.dumps(get_attribute_outputs_template) +get_attribute_outputs_template_json = json.dumps( + get_attribute_outputs_template) def test_parse_stack_resources(): @@ -104,7 +108,8 @@ def test_parse_stack_resources(): @patch("moto.cloudformation.parsing.logger") def test_missing_resource_logs(logger): resource_class_from_type("foobar") - logger.warning.assert_called_with('No Moto CloudFormation support for %s', 'foobar') + logger.warning.assert_called_with( + 'No Moto CloudFormation support for %s', 'foobar') def test_parse_stack_with_name_type_resource(): diff --git a/tests/test_cloudwatch/test_cloudwatch.py b/tests/test_cloudwatch/test_cloudwatch.py index 88a3190c6..9b3f76c36 100644 --- a/tests/test_cloudwatch/test_cloudwatch.py +++ b/tests/test_cloudwatch/test_cloudwatch.py @@ -4,6 +4,7 @@ import sure # noqa from moto import mock_cloudwatch_deprecated + def alarm_fixture(name="tester", action=None): action = action or ['arn:alarm'] return MetricAlarm( @@ -23,6 +24,7 @@ def alarm_fixture(name="tester", action=None): unit='Seconds', ) + @mock_cloudwatch_deprecated def test_create_alarm(): conn = boto.connect_cloudwatch() @@ -42,7 +44,8 @@ def test_create_alarm(): alarm.evaluation_periods.should.equal(5) alarm.statistic.should.equal('Average') alarm.description.should.equal('A test') - dict(alarm.dimensions).should.equal({'InstanceId': ['i-0123456,i-0123457']}) + dict(alarm.dimensions).should.equal( + {'InstanceId': ['i-0123456,i-0123457']}) list(alarm.alarm_actions).should.equal(['arn:alarm']) list(alarm.ok_actions).should.equal(['arn:ok']) list(alarm.insufficient_data_actions).should.equal(['arn:insufficient']) @@ -84,7 +87,8 @@ def test_put_metric_data(): metric = metrics[0] metric.namespace.should.equal('tester') metric.name.should.equal('metric') - dict(metric.dimensions).should.equal({'InstanceId': ['i-0123456,i-0123457']}) + dict(metric.dimensions).should.equal( + {'InstanceId': ['i-0123456,i-0123457']}) @mock_cloudwatch_deprecated @@ -103,7 +107,8 @@ def test_describe_alarms(): alarms.should.have.length_of(4) alarms = conn.describe_alarms(alarm_name_prefix="nfoo") alarms.should.have.length_of(2) - alarms = conn.describe_alarms(alarm_names=["nfoobar", "nbarfoo", "nbazfoo"]) + alarms = conn.describe_alarms( + alarm_names=["nfoobar", "nbarfoo", "nbazfoo"]) alarms.should.have.length_of(3) alarms = conn.describe_alarms(action_prefix="afoo") alarms.should.have.length_of(2) @@ -114,10 +119,11 @@ def test_describe_alarms(): alarms = conn.describe_alarms() alarms.should.have.length_of(0) + @mock_cloudwatch_deprecated def test_describe_state_value_unimplemented(): conn = boto.connect_cloudwatch() conn.describe_alarms() - conn.describe_alarms.when.called_with(state_value="foo").should.throw(NotImplementedError) - + conn.describe_alarms.when.called_with( + state_value="foo").should.throw(NotImplementedError) diff --git a/tests/test_core/test_decorator_calls.py b/tests/test_core/test_decorator_calls.py index 81dc0639a..9e3638cc2 100644 --- a/tests/test_core/test_decorator_calls.py +++ b/tests/test_core/test_decorator_calls.py @@ -59,11 +59,13 @@ def test_decorater_wrapped_gets_set(): """ Moto decorator's __wrapped__ should get set to the tests function """ - test_decorater_wrapped_gets_set.__wrapped__.__name__.should.equal('test_decorater_wrapped_gets_set') + test_decorater_wrapped_gets_set.__wrapped__.__name__.should.equal( + 'test_decorater_wrapped_gets_set') @mock_ec2_deprecated class Tester(object): + def test_the_class(self): conn = boto.connect_ec2() list(conn.get_all_instances()).should.have.length_of(0) @@ -75,6 +77,7 @@ class Tester(object): @mock_s3_deprecated class TesterWithSetup(unittest.TestCase): + def setUp(self): self.conn = boto.connect_s3() self.conn.create_bucket('mybucket') diff --git a/tests/test_core/test_instance_metadata.py b/tests/test_core/test_instance_metadata.py index 80dd501e7..69b9052e9 100644 --- a/tests/test_core/test_instance_metadata.py +++ b/tests/test_core/test_instance_metadata.py @@ -30,13 +30,15 @@ def test_meta_data_iam(): @mock_ec2 def test_meta_data_security_credentials(): - res = requests.get("{0}/latest/meta-data/iam/security-credentials/".format(BASE_URL)) + res = requests.get( + "{0}/latest/meta-data/iam/security-credentials/".format(BASE_URL)) res.content.should.equal(b"default-role") @mock_ec2 def test_meta_data_default_role(): - res = requests.get("{0}/latest/meta-data/iam/security-credentials/default-role".format(BASE_URL)) + res = requests.get( + "{0}/latest/meta-data/iam/security-credentials/default-role".format(BASE_URL)) json_response = res.json() json_response.should.contain('AccessKeyId') json_response.should.contain('SecretAccessKey') diff --git a/tests/test_core/test_responses.py b/tests/test_core/test_responses.py index aa89ac840..c3cc27aef 100644 --- a/tests/test_core/test_responses.py +++ b/tests/test_core/test_responses.py @@ -7,7 +7,8 @@ from moto.core.responses import flatten_json_request_body def test_flatten_json_request_body(): - spec = AWSServiceSpec('data/emr/2009-03-31/service-2.json').input_spec('RunJobFlow') + spec = AWSServiceSpec( + 'data/emr/2009-03-31/service-2.json').input_spec('RunJobFlow') body = { 'Name': 'cluster', @@ -42,25 +43,32 @@ def test_flatten_json_request_body(): flat['Name'].should.equal(body['Name']) flat['Instances.Ec2KeyName'].should.equal(body['Instances']['Ec2KeyName']) for idx in range(2): - flat['Instances.InstanceGroups.member.' + str(idx + 1) + '.InstanceRole'].should.equal(body['Instances']['InstanceGroups'][idx]['InstanceRole']) - flat['Instances.InstanceGroups.member.' + str(idx + 1) + '.InstanceType'].should.equal(body['Instances']['InstanceGroups'][idx]['InstanceType']) - flat['Instances.Placement.AvailabilityZone'].should.equal(body['Instances']['Placement']['AvailabilityZone']) + flat['Instances.InstanceGroups.member.' + str(idx + 1) + '.InstanceRole'].should.equal( + body['Instances']['InstanceGroups'][idx]['InstanceRole']) + flat['Instances.InstanceGroups.member.' + str(idx + 1) + '.InstanceType'].should.equal( + body['Instances']['InstanceGroups'][idx]['InstanceType']) + flat['Instances.Placement.AvailabilityZone'].should.equal( + body['Instances']['Placement']['AvailabilityZone']) for idx in range(1): prefix = 'Steps.member.' + str(idx + 1) + '.HadoopJarStep' step = body['Steps'][idx]['HadoopJarStep'] i = 0 while prefix + '.Properties.member.' + str(i + 1) + '.Key' in flat: - flat[prefix + '.Properties.member.' + str(i + 1) + '.Key'].should.equal(step['Properties'][i]['Key']) - flat[prefix + '.Properties.member.' + str(i + 1) + '.Value'].should.equal(step['Properties'][i]['Value']) + flat[prefix + '.Properties.member.' + + str(i + 1) + '.Key'].should.equal(step['Properties'][i]['Key']) + flat[prefix + '.Properties.member.' + + str(i + 1) + '.Value'].should.equal(step['Properties'][i]['Value']) i += 1 i = 0 while prefix + '.Args.member.' + str(i + 1) in flat: - flat[prefix + '.Args.member.' + str(i + 1)].should.equal(step['Args'][i]) + flat[prefix + '.Args.member.' + + str(i + 1)].should.equal(step['Args'][i]) i += 1 for idx in range(2): - flat['Configurations.member.' + str(idx + 1) + '.Classification'].should.equal(body['Configurations'][idx]['Classification']) + flat['Configurations.member.' + str(idx + 1) + '.Classification'].should.equal( + body['Configurations'][idx]['Classification']) props = {} i = 1 diff --git a/tests/test_core/test_server.py b/tests/test_core/test_server.py index a0fb328cf..b7290e351 100644 --- a/tests/test_core/test_server.py +++ b/tests/test_core/test_server.py @@ -32,19 +32,22 @@ def test_port_argument(run_simple): def test_domain_dispatched(): dispatcher = DomainDispatcherApplication(create_backend_app) - backend_app = dispatcher.get_application({"HTTP_HOST": "email.us-east1.amazonaws.com"}) + backend_app = dispatcher.get_application( + {"HTTP_HOST": "email.us-east1.amazonaws.com"}) keys = list(backend_app.view_functions.keys()) keys[0].should.equal('EmailResponse.dispatch') def test_domain_without_matches(): dispatcher = DomainDispatcherApplication(create_backend_app) - dispatcher.get_application.when.called_with({"HTTP_HOST": "not-matching-anything.com"}).should.throw(RuntimeError) + dispatcher.get_application.when.called_with( + {"HTTP_HOST": "not-matching-anything.com"}).should.throw(RuntimeError) def test_domain_dispatched_with_service(): # If we pass a particular service, always return that. dispatcher = DomainDispatcherApplication(create_backend_app, service="s3") - backend_app = dispatcher.get_application({"HTTP_HOST": "s3.us-east1.amazonaws.com"}) + backend_app = dispatcher.get_application( + {"HTTP_HOST": "s3.us-east1.amazonaws.com"}) keys = set(backend_app.view_functions.keys()) keys.should.contain('ResponseObject.key_response') diff --git a/tests/test_core/test_url_mapping.py b/tests/test_core/test_url_mapping.py index 4e4e19a3a..8f7921a5a 100644 --- a/tests/test_core/test_url_mapping.py +++ b/tests/test_core/test_url_mapping.py @@ -14,7 +14,8 @@ def test_flask_path_converting_simple(): def test_flask_path_converting_regex(): - convert_regex_to_flask_path("/(?P[a-zA-Z0-9\-_]+)").should.equal('/') + convert_regex_to_flask_path( + "/(?P[a-zA-Z0-9\-_]+)").should.equal('/') convert_regex_to_flask_path("(?P\d+)/(?P.*)$").should.equal( '/' diff --git a/tests/test_datapipeline/test_datapipeline.py b/tests/test_datapipeline/test_datapipeline.py index aaa9f7f77..520142c2e 100644 --- a/tests/test_datapipeline/test_datapipeline.py +++ b/tests/test_datapipeline/test_datapipeline.py @@ -20,7 +20,8 @@ def test_create_pipeline(): res = conn.create_pipeline("mypipeline", "some-unique-id") pipeline_id = res["pipelineId"] - pipeline_descriptions = conn.describe_pipelines([pipeline_id])["pipelineDescriptionList"] + pipeline_descriptions = conn.describe_pipelines( + [pipeline_id])["pipelineDescriptionList"] pipeline_descriptions.should.have.length_of(1) pipeline_description = pipeline_descriptions[0] @@ -105,7 +106,8 @@ def test_describing_pipeline_objects(): conn.put_pipeline_definition(PIPELINE_OBJECTS, pipeline_id) - objects = conn.describe_objects(["Schedule", "Default"], pipeline_id)['pipelineObjects'] + objects = conn.describe_objects(["Schedule", "Default"], pipeline_id)[ + 'pipelineObjects'] objects.should.have.length_of(2) default_object = [x for x in objects if x['id'] == 'Default'][0] @@ -125,7 +127,8 @@ def test_activate_pipeline(): pipeline_id = res["pipelineId"] conn.activate_pipeline(pipeline_id) - pipeline_descriptions = conn.describe_pipelines([pipeline_id])["pipelineDescriptionList"] + pipeline_descriptions = conn.describe_pipelines( + [pipeline_id])["pipelineDescriptionList"] pipeline_descriptions.should.have.length_of(1) pipeline_description = pipeline_descriptions[0] fields = pipeline_description['fields'] diff --git a/tests/test_datapipeline/test_server.py b/tests/test_datapipeline/test_server.py index 012c5ad55..03c77b034 100644 --- a/tests/test_datapipeline/test_server.py +++ b/tests/test_datapipeline/test_server.py @@ -17,9 +17,10 @@ def test_list_streams(): test_client = backend.test_client() res = test_client.post('/', - data={"pipelineIds": ["ASdf"]}, - headers={"X-Amz-Target": "DataPipeline.DescribePipelines"}, - ) + data={"pipelineIds": ["ASdf"]}, + headers={ + "X-Amz-Target": "DataPipeline.DescribePipelines"}, + ) json_data = json.loads(res.data.decode("utf-8")) json_data.should.equal({ diff --git a/tests/test_dynamodb/test_dynamodb.py b/tests/test_dynamodb/test_dynamodb.py index f2df39a22..d48519755 100644 --- a/tests/test_dynamodb/test_dynamodb.py +++ b/tests/test_dynamodb/test_dynamodb.py @@ -16,15 +16,18 @@ from boto.exception import DynamoDBResponseError @mock_dynamodb_deprecated def test_list_tables(): name = 'TestTable' - dynamodb_backend.create_table(name, hash_key_attr="name", hash_key_type="S") + dynamodb_backend.create_table( + name, hash_key_attr="name", hash_key_type="S") conn = boto.connect_dynamodb('the_key', 'the_secret') assert conn.list_tables() == ['TestTable'] @mock_dynamodb_deprecated def test_list_tables_layer_1(): - dynamodb_backend.create_table("test_1", hash_key_attr="name", hash_key_type="S") - dynamodb_backend.create_table("test_2", hash_key_attr="name", hash_key_type="S") + dynamodb_backend.create_table( + "test_1", hash_key_attr="name", hash_key_type="S") + dynamodb_backend.create_table( + "test_2", hash_key_attr="name", hash_key_type="S") conn = boto.connect_dynamodb('the_key', 'the_secret') res = conn.layer1.list_tables(limit=1) expected = {"TableNames": ["test_1"], "LastEvaluatedTableName": "test_1"} diff --git a/tests/test_dynamodb/test_dynamodb_table_with_range_key.py b/tests/test_dynamodb/test_dynamodb_table_with_range_key.py index c7832b08f..2a482b31e 100644 --- a/tests/test_dynamodb/test_dynamodb_table_with_range_key.py +++ b/tests/test_dynamodb/test_dynamodb_table_with_range_key.py @@ -69,7 +69,8 @@ def test_delete_table(): conn.layer1.delete_table('messages') conn.list_tables().should.have.length_of(0) - conn.layer1.delete_table.when.called_with('messages').should.throw(DynamoDBResponseError) + conn.layer1.delete_table.when.called_with( + 'messages').should.throw(DynamoDBResponseError) @mock_dynamodb_deprecated @@ -192,7 +193,8 @@ def test_get_item_without_range_key(): new_item = table.new_item(hash_key=hash_key, range_key=range_key) new_item.put() - table.get_item.when.called_with(hash_key=hash_key).should.throw(DynamoDBValidationError) + table.get_item.when.called_with( + hash_key=hash_key).should.throw(DynamoDBValidationError) @mock_dynamodb_deprecated @@ -304,22 +306,28 @@ def test_query(): ) item.put() - results = table.query(hash_key='the-key', range_key_condition=condition.GT('1')) + results = table.query(hash_key='the-key', + range_key_condition=condition.GT('1')) results.response['Items'].should.have.length_of(3) - results = table.query(hash_key='the-key', range_key_condition=condition.GT('234')) + results = table.query(hash_key='the-key', + range_key_condition=condition.GT('234')) results.response['Items'].should.have.length_of(2) - results = table.query(hash_key='the-key', range_key_condition=condition.GT('9999')) + results = table.query(hash_key='the-key', + range_key_condition=condition.GT('9999')) results.response['Items'].should.have.length_of(0) - results = table.query(hash_key='the-key', range_key_condition=condition.CONTAINS('12')) + results = table.query(hash_key='the-key', + range_key_condition=condition.CONTAINS('12')) results.response['Items'].should.have.length_of(1) - results = table.query(hash_key='the-key', range_key_condition=condition.BEGINS_WITH('7')) + results = table.query(hash_key='the-key', + range_key_condition=condition.BEGINS_WITH('7')) results.response['Items'].should.have.length_of(1) - results = table.query(hash_key='the-key', range_key_condition=condition.BETWEEN('567', '890')) + results = table.query(hash_key='the-key', + range_key_condition=condition.BETWEEN('567', '890')) results.response['Items'].should.have.length_of(1) diff --git a/tests/test_dynamodb/test_dynamodb_table_without_range_key.py b/tests/test_dynamodb/test_dynamodb_table_without_range_key.py index 18d353928..ebd0c2051 100644 --- a/tests/test_dynamodb/test_dynamodb_table_without_range_key.py +++ b/tests/test_dynamodb/test_dynamodb_table_without_range_key.py @@ -63,7 +63,8 @@ def test_delete_table(): conn.layer1.delete_table('messages') conn.list_tables().should.have.length_of(0) - conn.layer1.delete_table.when.called_with('messages').should.throw(DynamoDBResponseError) + conn.layer1.delete_table.when.called_with( + 'messages').should.throw(DynamoDBResponseError) @mock_dynamodb_deprecated diff --git a/tests/test_dynamodb2/test_dynamodb.py b/tests/test_dynamodb2/test_dynamodb.py index 9e92e7985..860333e50 100644 --- a/tests/test_dynamodb2/test_dynamodb.py +++ b/tests/test_dynamodb2/test_dynamodb.py @@ -15,17 +15,18 @@ try: except ImportError: print("This boto version is not supported") + @requires_boto_gte("2.9") @mock_dynamodb2_deprecated def test_list_tables(): name = 'TestTable' #{'schema': } - dynamodb_backend2.create_table(name,schema=[ + dynamodb_backend2.create_table(name, schema=[ {u'KeyType': u'HASH', u'AttributeName': u'forum_name'}, {u'KeyType': u'RANGE', u'AttributeName': u'subject'} ]) - conn = boto.dynamodb2.connect_to_region( - 'us-west-2', + conn = boto.dynamodb2.connect_to_region( + 'us-west-2', aws_access_key_id="ak", aws_secret_access_key="sk") assert conn.list_tables()["TableNames"] == [name] @@ -34,13 +35,13 @@ def test_list_tables(): @requires_boto_gte("2.9") @mock_dynamodb2_deprecated def test_list_tables_layer_1(): - dynamodb_backend2.create_table("test_1",schema=[ + dynamodb_backend2.create_table("test_1", schema=[ {u'KeyType': u'HASH', u'AttributeName': u'name'} ]) - dynamodb_backend2.create_table("test_2",schema=[ + dynamodb_backend2.create_table("test_2", schema=[ {u'KeyType': u'HASH', u'AttributeName': u'name'} ]) - conn = boto.dynamodb2.connect_to_region( + conn = boto.dynamodb2.connect_to_region( 'us-west-2', aws_access_key_id="ak", aws_secret_access_key="sk") @@ -57,7 +58,7 @@ def test_list_tables_layer_1(): @requires_boto_gte("2.9") @mock_dynamodb2_deprecated def test_describe_missing_table(): - conn = boto.dynamodb2.connect_to_region( + conn = boto.dynamodb2.connect_to_region( 'us-west-2', aws_access_key_id="ak", aws_secret_access_key="sk") diff --git a/tests/test_dynamodb2/test_dynamodb_table_with_range_key.py b/tests/test_dynamodb2/test_dynamodb_table_with_range_key.py index 029506378..58e0d66d1 100644 --- a/tests/test_dynamodb2/test_dynamodb_table_with_range_key.py +++ b/tests/test_dynamodb2/test_dynamodb_table_with_range_key.py @@ -140,7 +140,8 @@ def test_delete_table(): table.delete() conn.list_tables()["TableNames"].should.have.length_of(0) - conn.delete_table.when.called_with('messages').should.throw(JSONResponseError) + conn.delete_table.when.called_with( + 'messages').should.throw(JSONResponseError) @requires_boto_gte("2.9") @@ -181,7 +182,8 @@ def test_item_add_and_describe_and_update(): }) ok.should.equal(True) - table.get_item(forum_name="LOLCat Forum", subject='Check this out!').should_not.be.none + table.get_item(forum_name="LOLCat Forum", + subject='Check this out!').should_not.be.none returned_item = table.get_item( forum_name='LOLCat Forum', @@ -224,7 +226,8 @@ def test_item_partial_save(): } table.put_item(data=data) - returned_item = table.get_item(forum_name="LOLCat Forum", subject='The LOLz') + returned_item = table.get_item( + forum_name="LOLCat Forum", subject='The LOLz') returned_item['SentBy'] = 'User B' returned_item.partial_save() @@ -270,7 +273,8 @@ def test_get_missing_item(): @mock_dynamodb2_deprecated def test_get_item_with_undeclared_table(): table = Table('undeclared-table') - table.get_item.when.called_with(test_hash=3241526475).should.throw(JSONResponseError) + table.get_item.when.called_with( + test_hash=3241526475).should.throw(JSONResponseError) @requires_boto_gte("2.9") @@ -287,7 +291,8 @@ def test_get_item_without_range_key(): hash_key = 3241526475 range_key = 1234567890987 table.put_item(data={'test_hash': hash_key, 'test_range': range_key}) - table.get_item.when.called_with(test_hash=hash_key).should.throw(ValidationException) + table.get_item.when.called_with( + test_hash=hash_key).should.throw(ValidationException) @requires_boto_gte("2.30.0") @@ -355,19 +360,23 @@ def test_query(): table.count().should.equal(4) - results = table.query_2(forum_name__eq='the-key', subject__gt='1', consistent=True) + results = table.query_2(forum_name__eq='the-key', + subject__gt='1', consistent=True) expected = ["123", "456", "789"] for index, item in enumerate(results): item["subject"].should.equal(expected[index]) - results = table.query_2(forum_name__eq="the-key", subject__gt='1', reverse=True) + results = table.query_2(forum_name__eq="the-key", + subject__gt='1', reverse=True) for index, item in enumerate(results): item["subject"].should.equal(expected[len(expected) - 1 - index]) - results = table.query_2(forum_name__eq='the-key', subject__gt='1', consistent=True) + results = table.query_2(forum_name__eq='the-key', + subject__gt='1', consistent=True) sum(1 for _ in results).should.equal(3) - results = table.query_2(forum_name__eq='the-key', subject__gt='234', consistent=True) + results = table.query_2(forum_name__eq='the-key', + subject__gt='234', consistent=True) sum(1 for _ in results).should.equal(2) results = table.query_2(forum_name__eq='the-key', subject__gt='9999') @@ -379,7 +388,8 @@ def test_query(): results = table.query_2(forum_name__eq='the-key', subject__beginswith='7') sum(1 for _ in results).should.equal(1) - results = table.query_2(forum_name__eq='the-key', subject__between=['567', '890']) + results = table.query_2(forum_name__eq='the-key', + subject__between=['567', '890']) sum(1 for _ in results).should.equal(1) @@ -558,15 +568,15 @@ def test_create_with_global_indexes(): RangeKey('version'), ], global_indexes=[ GlobalAllIndex('topic-created_at-index', - parts=[ - HashKey('topic'), - RangeKey('created_at', data_type='N') - ], - throughput={ - 'read': 6, - 'write': 1 - } - ), + parts=[ + HashKey('topic'), + RangeKey('created_at', data_type='N') + ], + throughput={ + 'read': 6, + 'write': 1 + } + ), ]) table_description = conn.describe_table("messages") @@ -601,25 +611,25 @@ def test_query_with_global_indexes(): RangeKey('version'), ], global_indexes=[ GlobalAllIndex('topic-created_at-index', - parts=[ - HashKey('topic'), - RangeKey('created_at', data_type='N') - ], - throughput={ - 'read': 6, - 'write': 1 - } - ), + parts=[ + HashKey('topic'), + RangeKey('created_at', data_type='N') + ], + throughput={ + 'read': 6, + 'write': 1 + } + ), GlobalAllIndex('status-created_at-index', - parts=[ - HashKey('status'), - RangeKey('created_at', data_type='N') - ], - throughput={ - 'read': 2, - 'write': 1 - } - ) + parts=[ + HashKey('status'), + RangeKey('created_at', data_type='N') + ], + throughput={ + 'read': 2, + 'write': 1 + } + ) ]) item_data = { @@ -653,7 +663,8 @@ def test_query_with_local_indexes(): item['version'] = '2' item.save(overwrite=True) - results = table.query(forum_name__eq='Cool Forum', index='threads_index', threads__eq=1) + results = table.query(forum_name__eq='Cool Forum', + index='threads_index', threads__eq=1) list(results).should.have.length_of(1) @@ -888,7 +899,8 @@ def test_failed_overwrite(): table.put_item(data=data2, overwrite=True) data3 = {'id': '123', 'range': 'abc', 'data': '812'} - table.put_item.when.called_with(data=data3).should.throw(ConditionalCheckFailedException) + table.put_item.when.called_with(data=data3).should.throw( + ConditionalCheckFailedException) returned_item = table.lookup('123', 'abc') dict(returned_item).should.equal(data2) @@ -972,7 +984,8 @@ def test_boto3_conditions(): # Test a query returning all items results = table.query( - KeyConditionExpression=Key('forum_name').eq('the-key') & Key("subject").gt('1'), + KeyConditionExpression=Key('forum_name').eq( + 'the-key') & Key("subject").gt('1'), ScanIndexForward=True, ) expected = ["123", "456", "789"] @@ -981,7 +994,8 @@ def test_boto3_conditions(): # Return all items again, but in reverse results = table.query( - KeyConditionExpression=Key('forum_name').eq('the-key') & Key("subject").gt('1'), + KeyConditionExpression=Key('forum_name').eq( + 'the-key') & Key("subject").gt('1'), ScanIndexForward=False, ) for index, item in enumerate(reversed(results['Items'])): @@ -989,29 +1003,34 @@ def test_boto3_conditions(): # Filter the subjects to only return some of the results results = table.query( - KeyConditionExpression=Key('forum_name').eq('the-key') & Key("subject").gt('234'), + KeyConditionExpression=Key('forum_name').eq( + 'the-key') & Key("subject").gt('234'), ConsistentRead=True, ) results['Count'].should.equal(2) # Filter to return no results results = table.query( - KeyConditionExpression=Key('forum_name').eq('the-key') & Key("subject").gt('9999') + KeyConditionExpression=Key('forum_name').eq( + 'the-key') & Key("subject").gt('9999') ) results['Count'].should.equal(0) results = table.query( - KeyConditionExpression=Key('forum_name').eq('the-key') & Key("subject").begins_with('12') + KeyConditionExpression=Key('forum_name').eq( + 'the-key') & Key("subject").begins_with('12') ) results['Count'].should.equal(1) results = table.query( - KeyConditionExpression=Key("subject").begins_with('7') & Key('forum_name').eq('the-key') + KeyConditionExpression=Key("subject").begins_with( + '7') & Key('forum_name').eq('the-key') ) results['Count'].should.equal(1) results = table.query( - KeyConditionExpression=Key('forum_name').eq('the-key') & Key("subject").between('567', '890') + KeyConditionExpression=Key('forum_name').eq( + 'the-key') & Key("subject").between('567', '890') ) results['Count'].should.equal(1) @@ -1337,7 +1356,8 @@ def test_boto3_query_gsi_range_comparison(): # Test a query returning all johndoe items results = table.query( - KeyConditionExpression=Key('username').eq('johndoe') & Key("created").gt(0), + KeyConditionExpression=Key('username').eq( + 'johndoe') & Key("created").gt(0), ScanIndexForward=True, IndexName='TestGSI', ) @@ -1347,7 +1367,8 @@ def test_boto3_query_gsi_range_comparison(): # Return all johndoe items again, but in reverse results = table.query( - KeyConditionExpression=Key('username').eq('johndoe') & Key("created").gt(0), + KeyConditionExpression=Key('username').eq( + 'johndoe') & Key("created").gt(0), ScanIndexForward=False, IndexName='TestGSI', ) @@ -1357,7 +1378,8 @@ def test_boto3_query_gsi_range_comparison(): # Filter the creation to only return some of the results # And reverse order of hash + range key results = table.query( - KeyConditionExpression=Key("created").gt(1) & Key('username').eq('johndoe'), + KeyConditionExpression=Key("created").gt( + 1) & Key('username').eq('johndoe'), ConsistentRead=True, IndexName='TestGSI', ) @@ -1365,20 +1387,23 @@ def test_boto3_query_gsi_range_comparison(): # Filter to return no results results = table.query( - KeyConditionExpression=Key('username').eq('janedoe') & Key("created").gt(9), + KeyConditionExpression=Key('username').eq( + 'janedoe') & Key("created").gt(9), IndexName='TestGSI', ) results['Count'].should.equal(0) results = table.query( - KeyConditionExpression=Key('username').eq('janedoe') & Key("created").eq(5), + KeyConditionExpression=Key('username').eq( + 'janedoe') & Key("created").eq(5), IndexName='TestGSI', ) results['Count'].should.equal(1) # Test range key sorting results = table.query( - KeyConditionExpression=Key('username').eq('johndoe') & Key("created").gt(0), + KeyConditionExpression=Key('username').eq( + 'johndoe') & Key("created").gt(0), IndexName='TestGSI', ) expected = [Decimal('1'), Decimal('2'), Decimal('3')] @@ -1516,7 +1541,6 @@ def test_boto3_update_table_gsi_throughput(): gsi_throughput['WriteCapacityUnits'].should.equal(11) - @mock_dynamodb2 def test_update_table_gsi_create(): dynamodb = boto3.resource('dynamodb', region_name='us-east-1') diff --git a/tests/test_dynamodb2/test_dynamodb_table_without_range_key.py b/tests/test_dynamodb2/test_dynamodb_table_without_range_key.py index 83eff6519..36e1b6c61 100644 --- a/tests/test_dynamodb2/test_dynamodb_table_without_range_key.py +++ b/tests/test_dynamodb2/test_dynamodb_table_without_range_key.py @@ -71,7 +71,8 @@ def test_delete_table(): conn.delete_table('messages') conn.list_tables()["TableNames"].should.have.length_of(0) - conn.delete_table.when.called_with('messages').should.throw(JSONResponseError) + conn.delete_table.when.called_with( + 'messages').should.throw(JSONResponseError) @requires_boto_gte("2.9") @@ -239,7 +240,8 @@ def test_query_with_undeclared_table(): conn.query.when.called_with( table_name='undeclared-table', - key_conditions={"forum_name": {"ComparisonOperator": "EQ", "AttributeValueList": [{"S": "the-key"}]}} + key_conditions={"forum_name": { + "ComparisonOperator": "EQ", "AttributeValueList": [{"S": "the-key"}]}} ).should.throw(JSONResponseError) @@ -396,7 +398,8 @@ def test_get_key_fields(): @mock_dynamodb2_deprecated def test_get_missing_item(): table = create_table() - table.get_item.when.called_with(forum_name='missing').should.throw(ItemNotFound) + table.get_item.when.called_with( + forum_name='missing').should.throw(ItemNotFound) @requires_boto_gte("2.9") @@ -436,7 +439,8 @@ def test_update_item_remove(): } # Then remove the SentBy field - conn.update_item("messages", key_map, update_expression="REMOVE SentBy, SentTo") + conn.update_item("messages", key_map, + update_expression="REMOVE SentBy, SentTo") returned_item = table.get_item(username="steve") dict(returned_item).should.equal({ @@ -460,7 +464,8 @@ def test_update_item_set(): 'username': {"S": "steve"} } - conn.update_item("messages", key_map, update_expression="SET foo=bar, blah=baz REMOVE SentBy") + conn.update_item("messages", key_map, + update_expression="SET foo=bar, blah=baz REMOVE SentBy") returned_item = table.get_item(username="steve") dict(returned_item).should.equal({ @@ -470,7 +475,6 @@ def test_update_item_set(): }) - @mock_dynamodb2_deprecated def test_failed_overwrite(): table = Table.create('messages', schema=[ @@ -487,7 +491,8 @@ def test_failed_overwrite(): table.put_item(data=data2, overwrite=True) data3 = {'id': '123', 'data': '812'} - table.put_item.when.called_with(data=data3).should.throw(ConditionalCheckFailedException) + table.put_item.when.called_with(data=data3).should.throw( + ConditionalCheckFailedException) returned_item = table.lookup('123') dict(returned_item).should.equal(data2) @@ -521,6 +526,7 @@ def test_conflicting_writes(): boto3 """ + @mock_dynamodb2 def test_boto3_create_table(): dynamodb = boto3.resource('dynamodb', region_name='us-east-1') @@ -617,7 +623,6 @@ def test_boto3_put_item_conditions_pass(): assert dict(returned_item)['Item']['foo'].should.equal("baz") - @mock_dynamodb2 def test_scan_pagination(): table = _create_user_table() diff --git a/tests/test_ec2/test_amis.py b/tests/test_ec2/test_amis.py index 4c154ae84..40cc5fe24 100755 --- a/tests/test_ec2/test_amis.py +++ b/tests/test_ec2/test_amis.py @@ -20,10 +20,12 @@ def test_ami_create_and_delete(): instance = reservation.instances[0] with assert_raises(EC2ResponseError) as ex: - image_id = conn.create_image(instance.id, "test-ami", "this is a test ami", dry_run=True) + image_id = conn.create_image( + instance.id, "test-ami", "this is a test ami", dry_run=True) ex.exception.error_code.should.equal('DryRunOperation') ex.exception.status.should.equal(400) - ex.exception.message.should.equal('An error occurred (DryRunOperation) when calling the CreateImage operation: Request would have succeeded, but DryRun flag is set') + ex.exception.message.should.equal( + 'An error occurred (DryRunOperation) when calling the CreateImage operation: Request would have succeeded, but DryRun flag is set') image_id = conn.create_image(instance.id, "test-ami", "this is a test ami") @@ -47,8 +49,10 @@ def test_ami_create_and_delete(): snapshots.should.have.length_of(1) snapshot = snapshots[0] - image.block_device_mapping.current_value.snapshot_id.should.equal(snapshot.id) - snapshot.description.should.equal("Auto-created snapshot for AMI {0}".format(image.id)) + image.block_device_mapping.current_value.snapshot_id.should.equal( + snapshot.id) + snapshot.description.should.equal( + "Auto-created snapshot for AMI {0}".format(image.id)) snapshot.volume_id.should.equal(volume.id) # Deregister @@ -56,7 +60,8 @@ def test_ami_create_and_delete(): success = conn.deregister_image(image_id, dry_run=True) ex.exception.error_code.should.equal('DryRunOperation') ex.exception.status.should.equal(400) - ex.exception.message.should.equal('An error occurred (DryRunOperation) when calling the DeregisterImage operation: Request would have succeeded, but DryRun flag is set') + ex.exception.message.should.equal( + 'An error occurred (DryRunOperation) when calling the DeregisterImage operation: Request would have succeeded, but DryRun flag is set') success = conn.deregister_image(image_id) success.should.be.true @@ -75,23 +80,29 @@ def test_ami_copy(): reservation = conn.run_instances('ami-1234abcd') instance = reservation.instances[0] - source_image_id = conn.create_image(instance.id, "test-ami", "this is a test ami") + source_image_id = conn.create_image( + instance.id, "test-ami", "this is a test ami") instance.terminate() source_image = conn.get_all_images(image_ids=[source_image_id])[0] - # Boto returns a 'CopyImage' object with an image_id attribute here. Use the image_id to fetch the full info. + # Boto returns a 'CopyImage' object with an image_id attribute here. Use + # the image_id to fetch the full info. with assert_raises(EC2ResponseError) as ex: - copy_image_ref = conn.copy_image(source_image.region.name, source_image.id, "test-copy-ami", "this is a test copy ami", dry_run=True) + copy_image_ref = conn.copy_image( + source_image.region.name, source_image.id, "test-copy-ami", "this is a test copy ami", dry_run=True) ex.exception.error_code.should.equal('DryRunOperation') ex.exception.status.should.equal(400) - ex.exception.message.should.equal('An error occurred (DryRunOperation) when calling the CopyImage operation: Request would have succeeded, but DryRun flag is set') + ex.exception.message.should.equal( + 'An error occurred (DryRunOperation) when calling the CopyImage operation: Request would have succeeded, but DryRun flag is set') - copy_image_ref = conn.copy_image(source_image.region.name, source_image.id, "test-copy-ami", "this is a test copy ami") + copy_image_ref = conn.copy_image( + source_image.region.name, source_image.id, "test-copy-ami", "this is a test copy ami") copy_image_id = copy_image_ref.image_id copy_image = conn.get_all_images(image_ids=[copy_image_id])[0] copy_image.id.should.equal(copy_image_id) - copy_image.virtualization_type.should.equal(source_image.virtualization_type) + copy_image.virtualization_type.should.equal( + source_image.virtualization_type) copy_image.architecture.should.equal(source_image.architecture) copy_image.kernel_id.should.equal(source_image.kernel_id) copy_image.platform.should.equal(source_image.platform) @@ -105,15 +116,18 @@ def test_ami_copy(): # Copy from non-existent source ID. with assert_raises(EC2ResponseError) as cm: - conn.copy_image(source_image.region.name, 'ami-abcd1234', "test-copy-ami", "this is a test copy ami") + conn.copy_image(source_image.region.name, 'ami-abcd1234', + "test-copy-ami", "this is a test copy ami") cm.exception.code.should.equal('InvalidAMIID.NotFound') cm.exception.status.should.equal(400) cm.exception.request_id.should_not.be.none # Copy from non-existent source region. with assert_raises(EC2ResponseError) as cm: - invalid_region = 'us-east-1' if (source_image.region.name != 'us-east-1') else 'us-west-1' - conn.copy_image(invalid_region, source_image.id, "test-copy-ami", "this is a test copy ami") + invalid_region = 'us-east-1' if (source_image.region.name != + 'us-east-1') else 'us-west-1' + conn.copy_image(invalid_region, source_image.id, + "test-copy-ami", "this is a test copy ami") cm.exception.code.should.equal('InvalidAMIID.NotFound') cm.exception.status.should.equal(400) cm.exception.request_id.should_not.be.none @@ -131,7 +145,8 @@ def test_ami_tagging(): image.add_tag("a key", "some value", dry_run=True) ex.exception.error_code.should.equal('DryRunOperation') ex.exception.status.should.equal(400) - ex.exception.message.should.equal('An error occurred (DryRunOperation) when calling the CreateTags operation: Request would have succeeded, but DryRun flag is set') + ex.exception.message.should.equal( + 'An error occurred (DryRunOperation) when calling the CreateTags operation: Request would have succeeded, but DryRun flag is set') image.add_tag("a key", "some value") @@ -179,7 +194,8 @@ def test_ami_filters(): instanceA.modify_attribute("kernel", "k-1234abcd") instanceA.modify_attribute("platform", "windows") instanceA.modify_attribute("virtualization_type", "hvm") - imageA_id = conn.create_image(instanceA.id, "test-ami-A", "this is a test ami") + imageA_id = conn.create_image( + instanceA.id, "test-ami-A", "this is a test ami") imageA = conn.get_image(imageA_id) reservationB = conn.run_instances('ami-abcd1234') @@ -188,18 +204,22 @@ def test_ami_filters(): instanceB.modify_attribute("kernel", "k-abcd1234") instanceB.modify_attribute("platform", "linux") instanceB.modify_attribute("virtualization_type", "paravirtual") - imageB_id = conn.create_image(instanceB.id, "test-ami-B", "this is a test ami") + imageB_id = conn.create_image( + instanceB.id, "test-ami-B", "this is a test ami") imageB = conn.get_image(imageB_id) imageB.set_launch_permissions(group_names=("all")) - amis_by_architecture = conn.get_all_images(filters={'architecture': 'x86_64'}) + amis_by_architecture = conn.get_all_images( + filters={'architecture': 'x86_64'}) set([ami.id for ami in amis_by_architecture]).should.equal(set([imageB.id])) amis_by_kernel = conn.get_all_images(filters={'kernel-id': 'k-abcd1234'}) set([ami.id for ami in amis_by_kernel]).should.equal(set([imageB.id])) - amis_by_virtualization = conn.get_all_images(filters={'virtualization-type': 'paravirtual'}) - set([ami.id for ami in amis_by_virtualization]).should.equal(set([imageB.id])) + amis_by_virtualization = conn.get_all_images( + filters={'virtualization-type': 'paravirtual'}) + set([ami.id for ami in amis_by_virtualization] + ).should.equal(set([imageB.id])) amis_by_platform = conn.get_all_images(filters={'platform': 'windows'}) set([ami.id for ami in amis_by_platform]).should.equal(set([imageA.id])) @@ -208,7 +228,8 @@ def test_ami_filters(): set([ami.id for ami in amis_by_id]).should.equal(set([imageA.id])) amis_by_state = conn.get_all_images(filters={'state': 'available'}) - set([ami.id for ami in amis_by_state]).should.equal(set([imageA.id, imageB.id])) + set([ami.id for ami in amis_by_state]).should.equal( + set([imageA.id, imageB.id])) amis_by_name = conn.get_all_images(filters={'name': imageA.name}) set([ami.id for ami in amis_by_name]).should.equal(set([imageA.id])) @@ -226,20 +247,23 @@ def test_ami_filtering_via_tag(): reservationA = conn.run_instances('ami-1234abcd') instanceA = reservationA.instances[0] - imageA_id = conn.create_image(instanceA.id, "test-ami-A", "this is a test ami") + imageA_id = conn.create_image( + instanceA.id, "test-ami-A", "this is a test ami") imageA = conn.get_image(imageA_id) imageA.add_tag("a key", "some value") reservationB = conn.run_instances('ami-abcd1234') instanceB = reservationB.instances[0] - imageB_id = conn.create_image(instanceB.id, "test-ami-B", "this is a test ami") + imageB_id = conn.create_image( + instanceB.id, "test-ami-B", "this is a test ami") imageB = conn.get_image(imageB_id) imageB.add_tag("another key", "some other value") amis_by_tagA = conn.get_all_images(filters={'tag:a key': 'some value'}) set([ami.id for ami in amis_by_tagA]).should.equal(set([imageA.id])) - amis_by_tagB = conn.get_all_images(filters={'tag:another key': 'some other value'}) + amis_by_tagB = conn.get_all_images( + filters={'tag:another key': 'some other value'}) set([ami.id for ami in amis_by_tagB]).should.equal(set([imageB.id])) @@ -274,7 +298,8 @@ def test_ami_attribute_group_permissions(): image = conn.get_image(image_id) # Baseline - attributes = conn.get_image_attribute(image.id, attribute='launchPermission') + attributes = conn.get_image_attribute( + image.id, attribute='launchPermission') attributes.name.should.equal('launch_permission') attributes.attrs.should.have.length_of(0) @@ -290,32 +315,38 @@ def test_ami_attribute_group_permissions(): # Add 'all' group and confirm with assert_raises(EC2ResponseError) as ex: - conn.modify_image_attribute(**dict(ADD_GROUP_ARGS, **{'dry_run': True})) + conn.modify_image_attribute( + **dict(ADD_GROUP_ARGS, **{'dry_run': True})) ex.exception.error_code.should.equal('DryRunOperation') ex.exception.status.should.equal(400) - ex.exception.message.should.equal('An error occurred (DryRunOperation) when calling the ModifyImageAttribute operation: Request would have succeeded, but DryRun flag is set') + ex.exception.message.should.equal( + 'An error occurred (DryRunOperation) when calling the ModifyImageAttribute operation: Request would have succeeded, but DryRun flag is set') conn.modify_image_attribute(**ADD_GROUP_ARGS) - attributes = conn.get_image_attribute(image.id, attribute='launchPermission') + attributes = conn.get_image_attribute( + image.id, attribute='launchPermission') attributes.attrs['groups'].should.have.length_of(1) attributes.attrs['groups'].should.equal(['all']) image = conn.get_image(image_id) image.is_public.should.equal(True) # Add is idempotent - conn.modify_image_attribute.when.called_with(**ADD_GROUP_ARGS).should_not.throw(EC2ResponseError) + conn.modify_image_attribute.when.called_with( + **ADD_GROUP_ARGS).should_not.throw(EC2ResponseError) # Remove 'all' group and confirm conn.modify_image_attribute(**REMOVE_GROUP_ARGS) - attributes = conn.get_image_attribute(image.id, attribute='launchPermission') + attributes = conn.get_image_attribute( + image.id, attribute='launchPermission') attributes.attrs.should.have.length_of(0) image = conn.get_image(image_id) image.is_public.should.equal(False) # Remove is idempotent - conn.modify_image_attribute.when.called_with(**REMOVE_GROUP_ARGS).should_not.throw(EC2ResponseError) + conn.modify_image_attribute.when.called_with( + **REMOVE_GROUP_ARGS).should_not.throw(EC2ResponseError) @mock_emr_deprecated @@ -327,7 +358,8 @@ def test_ami_attribute_user_permissions(): image = conn.get_image(image_id) # Baseline - attributes = conn.get_image_attribute(image.id, attribute='launchPermission') + attributes = conn.get_image_attribute( + image.id, attribute='launchPermission') attributes.name.should.equal('launch_permission') attributes.attrs.should.have.length_of(0) @@ -353,19 +385,23 @@ def test_ami_attribute_user_permissions(): # Add multiple users and confirm conn.modify_image_attribute(**ADD_USERS_ARGS) - attributes = conn.get_image_attribute(image.id, attribute='launchPermission') + attributes = conn.get_image_attribute( + image.id, attribute='launchPermission') attributes.attrs['user_ids'].should.have.length_of(2) - set(attributes.attrs['user_ids']).should.equal(set([str(USER1), str(USER2)])) + set(attributes.attrs['user_ids']).should.equal( + set([str(USER1), str(USER2)])) image = conn.get_image(image_id) image.is_public.should.equal(False) # Add is idempotent - conn.modify_image_attribute.when.called_with(**ADD_USERS_ARGS).should_not.throw(EC2ResponseError) + conn.modify_image_attribute.when.called_with( + **ADD_USERS_ARGS).should_not.throw(EC2ResponseError) # Remove single user and confirm conn.modify_image_attribute(**REMOVE_SINGLE_USER_ARGS) - attributes = conn.get_image_attribute(image.id, attribute='launchPermission') + attributes = conn.get_image_attribute( + image.id, attribute='launchPermission') attributes.attrs['user_ids'].should.have.length_of(1) set(attributes.attrs['user_ids']).should.equal(set([str(USER2)])) image = conn.get_image(image_id) @@ -374,13 +410,15 @@ def test_ami_attribute_user_permissions(): # Remove multiple users and confirm conn.modify_image_attribute(**REMOVE_USERS_ARGS) - attributes = conn.get_image_attribute(image.id, attribute='launchPermission') + attributes = conn.get_image_attribute( + image.id, attribute='launchPermission') attributes.attrs.should.have.length_of(0) image = conn.get_image(image_id) image.is_public.should.equal(False) # Remove is idempotent - conn.modify_image_attribute.when.called_with(**REMOVE_USERS_ARGS).should_not.throw(EC2ResponseError) + conn.modify_image_attribute.when.called_with( + **REMOVE_USERS_ARGS).should_not.throw(EC2ResponseError) @mock_emr_deprecated @@ -397,7 +435,8 @@ def test_ami_attribute_user_and_group_permissions(): image = conn.get_image(image_id) # Baseline - attributes = conn.get_image_attribute(image.id, attribute='launchPermission') + attributes = conn.get_image_attribute( + image.id, attribute='launchPermission') attributes.name.should.equal('launch_permission') attributes.attrs.should.have.length_of(0) @@ -419,7 +458,8 @@ def test_ami_attribute_user_and_group_permissions(): # Add and confirm conn.modify_image_attribute(**ADD_ARGS) - attributes = conn.get_image_attribute(image.id, attribute='launchPermission') + attributes = conn.get_image_attribute( + image.id, attribute='launchPermission') attributes.attrs['user_ids'].should.have.length_of(2) set(attributes.attrs['user_ids']).should.equal(set([USER1, USER2])) set(attributes.attrs['groups']).should.equal(set(['all'])) @@ -429,7 +469,8 @@ def test_ami_attribute_user_and_group_permissions(): # Remove and confirm conn.modify_image_attribute(**REMOVE_ARGS) - attributes = conn.get_image_attribute(image.id, attribute='launchPermission') + attributes = conn.get_image_attribute( + image.id, attribute='launchPermission') attributes.attrs.should.have.length_of(0) image = conn.get_image(image_id) image.is_public.should.equal(False) @@ -483,7 +524,8 @@ def test_ami_attribute_error_cases(): cm.exception.status.should.equal(400) cm.exception.request_id.should_not.be.none - # Error: Add with one invalid user ID among other valid IDs, ensure no partial changes. + # Error: Add with one invalid user ID among other valid IDs, ensure no + # partial changes. with assert_raises(EC2ResponseError) as cm: conn.modify_image_attribute(image.id, attribute='launchPermission', @@ -493,7 +535,8 @@ def test_ami_attribute_error_cases(): cm.exception.status.should.equal(400) cm.exception.request_id.should_not.be.none - attributes = conn.get_image_attribute(image.id, attribute='launchPermission') + attributes = conn.get_image_attribute( + image.id, attribute='launchPermission') attributes.attrs.should.have.length_of(0) # Error: Add with invalid image ID diff --git a/tests/test_ec2/test_customer_gateways.py b/tests/test_ec2/test_customer_gateways.py index 93e35dc6a..589f887f6 100644 --- a/tests/test_ec2/test_customer_gateways.py +++ b/tests/test_ec2/test_customer_gateways.py @@ -12,26 +12,31 @@ from moto import mock_ec2_deprecated def test_create_customer_gateways(): conn = boto.connect_vpc('the_key', 'the_secret') - customer_gateway = conn.create_customer_gateway('ipsec.1', '205.251.242.54', 65534) + customer_gateway = conn.create_customer_gateway( + 'ipsec.1', '205.251.242.54', 65534) customer_gateway.should_not.be.none customer_gateway.id.should.match(r'cgw-\w+') customer_gateway.type.should.equal('ipsec.1') customer_gateway.bgp_asn.should.equal(65534) customer_gateway.ip_address.should.equal('205.251.242.54') + @mock_ec2_deprecated def test_describe_customer_gateways(): conn = boto.connect_vpc('the_key', 'the_secret') - customer_gateway = conn.create_customer_gateway('ipsec.1', '205.251.242.54', 65534) + customer_gateway = conn.create_customer_gateway( + 'ipsec.1', '205.251.242.54', 65534) cgws = conn.get_all_customer_gateways() cgws.should.have.length_of(1) cgws[0].id.should.match(customer_gateway.id) + @mock_ec2_deprecated def test_delete_customer_gateways(): conn = boto.connect_vpc('the_key', 'the_secret') - customer_gateway = conn.create_customer_gateway('ipsec.1', '205.251.242.54', 65534) + customer_gateway = conn.create_customer_gateway( + 'ipsec.1', '205.251.242.54', 65534) customer_gateway.should_not.be.none cgws = conn.get_all_customer_gateways() cgws[0].id.should.match(customer_gateway.id) @@ -39,6 +44,7 @@ def test_delete_customer_gateways(): cgws = conn.get_all_customer_gateways() cgws.should.have.length_of(0) + @mock_ec2_deprecated def test_delete_customer_gateways_bad_id(): conn = boto.connect_vpc('the_key', 'the_secret') diff --git a/tests/test_ec2/test_dhcp_options.py b/tests/test_ec2/test_dhcp_options.py index 0279a3d54..4e2520241 100644 --- a/tests/test_ec2/test_dhcp_options.py +++ b/tests/test_ec2/test_dhcp_options.py @@ -19,7 +19,8 @@ SAMPLE_NAME_SERVERS = [u'10.0.0.6', u'10.0.0.7'] def test_dhcp_options_associate(): """ associate dhcp option """ conn = boto.connect_vpc('the_key', 'the_secret') - dhcp_options = conn.create_dhcp_options(SAMPLE_DOMAIN_NAME, SAMPLE_NAME_SERVERS) + dhcp_options = conn.create_dhcp_options( + SAMPLE_DOMAIN_NAME, SAMPLE_NAME_SERVERS) vpc = conn.create_vpc("10.0.0.0/16") rval = conn.associate_dhcp_options(dhcp_options.id, vpc.id) @@ -43,7 +44,8 @@ def test_dhcp_options_associate_invalid_dhcp_id(): def test_dhcp_options_associate_invalid_vpc_id(): """ associate dhcp option invalid vpc id """ conn = boto.connect_vpc('the_key', 'the_secret') - dhcp_options = conn.create_dhcp_options(SAMPLE_DOMAIN_NAME, SAMPLE_NAME_SERVERS) + dhcp_options = conn.create_dhcp_options( + SAMPLE_DOMAIN_NAME, SAMPLE_NAME_SERVERS) with assert_raises(EC2ResponseError) as cm: conn.associate_dhcp_options(dhcp_options.id, "foo") @@ -56,7 +58,8 @@ def test_dhcp_options_associate_invalid_vpc_id(): def test_dhcp_options_delete_with_vpc(): """Test deletion of dhcp options with vpc""" conn = boto.connect_vpc('the_key', 'the_secret') - dhcp_options = conn.create_dhcp_options(SAMPLE_DOMAIN_NAME, SAMPLE_NAME_SERVERS) + dhcp_options = conn.create_dhcp_options( + SAMPLE_DOMAIN_NAME, SAMPLE_NAME_SERVERS) dhcp_options_id = dhcp_options.id vpc = conn.create_vpc("10.0.0.0/16") @@ -83,10 +86,13 @@ def test_create_dhcp_options(): """Create most basic dhcp option""" conn = boto.connect_vpc('the_key', 'the_secret') - dhcp_option = conn.create_dhcp_options(SAMPLE_DOMAIN_NAME, SAMPLE_NAME_SERVERS) + dhcp_option = conn.create_dhcp_options( + SAMPLE_DOMAIN_NAME, SAMPLE_NAME_SERVERS) dhcp_option.options[u'domain-name'][0].should.be.equal(SAMPLE_DOMAIN_NAME) - dhcp_option.options[u'domain-name-servers'][0].should.be.equal(SAMPLE_NAME_SERVERS[0]) - dhcp_option.options[u'domain-name-servers'][1].should.be.equal(SAMPLE_NAME_SERVERS[1]) + dhcp_option.options[ + u'domain-name-servers'][0].should.be.equal(SAMPLE_NAME_SERVERS[0]) + dhcp_option.options[ + u'domain-name-servers'][1].should.be.equal(SAMPLE_NAME_SERVERS[1]) @mock_ec2_deprecated @@ -210,8 +216,10 @@ def test_dhcp_options_get_by_tag(): dhcp_options_sets = conn.get_all_dhcp_options(filters=filters) dhcp_options_sets.should.have.length_of(1) - dhcp_options_sets[0].options['domain-name'][0].should.be.equal('example.com') - dhcp_options_sets[0].options['domain-name-servers'][0].should.be.equal('10.0.10.2') + dhcp_options_sets[0].options[ + 'domain-name'][0].should.be.equal('example.com') + dhcp_options_sets[0].options[ + 'domain-name-servers'][0].should.be.equal('10.0.10.2') dhcp_options_sets[0].tags['Name'].should.equal('TestDhcpOptions1') dhcp_options_sets[0].tags['test-tag'].should.equal('test-value') @@ -219,8 +227,10 @@ def test_dhcp_options_get_by_tag(): dhcp_options_sets = conn.get_all_dhcp_options(filters=filters) dhcp_options_sets.should.have.length_of(1) - dhcp_options_sets[0].options['domain-name'][0].should.be.equal('example.com') - dhcp_options_sets[0].options['domain-name-servers'][0].should.be.equal('10.0.20.2') + dhcp_options_sets[0].options[ + 'domain-name'][0].should.be.equal('example.com') + dhcp_options_sets[0].options[ + 'domain-name-servers'][0].should.be.equal('10.0.20.2') dhcp_options_sets[0].tags['Name'].should.equal('TestDhcpOptions2') dhcp_options_sets[0].tags['test-tag'].should.equal('test-value') @@ -247,17 +257,21 @@ def test_dhcp_options_get_by_id(): dhcp_options_sets = conn.get_all_dhcp_options() dhcp_options_sets.should.have.length_of(2) - dhcp_options_sets = conn.get_all_dhcp_options(filters={'dhcp-options-id': dhcp1_id}) + dhcp_options_sets = conn.get_all_dhcp_options( + filters={'dhcp-options-id': dhcp1_id}) dhcp_options_sets.should.have.length_of(1) dhcp_options_sets[0].options['domain-name'][0].should.be.equal('test1.com') - dhcp_options_sets[0].options['domain-name-servers'][0].should.be.equal('10.0.10.2') + dhcp_options_sets[0].options[ + 'domain-name-servers'][0].should.be.equal('10.0.10.2') - dhcp_options_sets = conn.get_all_dhcp_options(filters={'dhcp-options-id': dhcp2_id}) + dhcp_options_sets = conn.get_all_dhcp_options( + filters={'dhcp-options-id': dhcp2_id}) dhcp_options_sets.should.have.length_of(1) dhcp_options_sets[0].options['domain-name'][0].should.be.equal('test2.com') - dhcp_options_sets[0].options['domain-name-servers'][0].should.be.equal('10.0.20.2') + dhcp_options_sets[0].options[ + 'domain-name-servers'][0].should.be.equal('10.0.20.2') @mock_ec2 @@ -315,4 +329,5 @@ def test_dhcp_options_get_by_invalid_filter(): conn.create_dhcp_options(SAMPLE_DOMAIN_NAME, SAMPLE_NAME_SERVERS) filters = {'invalid-filter': 'invalid-value'} - conn.get_all_dhcp_options.when.called_with(filters=filters).should.throw(NotImplementedError) + conn.get_all_dhcp_options.when.called_with( + filters=filters).should.throw(NotImplementedError) diff --git a/tests/test_ec2/test_elastic_block_store.py b/tests/test_ec2/test_elastic_block_store.py index 6491412e3..83c89d129 100644 --- a/tests/test_ec2/test_elastic_block_store.py +++ b/tests/test_ec2/test_elastic_block_store.py @@ -28,7 +28,8 @@ def test_create_and_delete_volume(): volume.delete(dry_run=True) ex.exception.error_code.should.equal('DryRunOperation') ex.exception.status.should.equal(400) - ex.exception.message.should.equal('An error occurred (DryRunOperation) when calling the DeleteVolume operation: Request would have succeeded, but DryRun flag is set') + ex.exception.message.should.equal( + 'An error occurred (DryRunOperation) when calling the DeleteVolume operation: Request would have succeeded, but DryRun flag is set') volume.delete() @@ -42,7 +43,6 @@ def test_create_and_delete_volume(): cm.exception.request_id.should_not.be.none - @mock_ec2_deprecated def test_create_encrypted_volume_dryrun(): conn = boto.connect_ec2('the_key', 'the_secret') @@ -50,7 +50,8 @@ def test_create_encrypted_volume_dryrun(): conn.create_volume(80, "us-east-1a", encrypted=True, dry_run=True) ex.exception.error_code.should.equal('DryRunOperation') ex.exception.status.should.equal(400) - ex.exception.message.should.equal('An error occurred (DryRunOperation) when calling the CreateVolume operation: Request would have succeeded, but DryRun flag is set') + ex.exception.message.should.equal( + 'An error occurred (DryRunOperation) when calling the CreateVolume operation: Request would have succeeded, but DryRun flag is set') @mock_ec2_deprecated @@ -62,7 +63,8 @@ def test_create_encrypted_volume(): conn.create_volume(80, "us-east-1a", encrypted=True, dry_run=True) ex.exception.error_code.should.equal('DryRunOperation') ex.exception.status.should.equal(400) - ex.exception.message.should.equal('An error occurred (DryRunOperation) when calling the CreateVolume operation: Request would have succeeded, but DryRun flag is set') + ex.exception.message.should.equal( + 'An error occurred (DryRunOperation) when calling the CreateVolume operation: Request would have succeeded, but DryRun flag is set') all_volumes = conn.get_all_volumes() all_volumes[0].encrypted.should.be(True) @@ -108,29 +110,42 @@ def test_volume_filters(): block_mapping = instance.block_device_mapping['/dev/sda1'] - volumes_by_attach_time = conn.get_all_volumes(filters={'attachment.attach-time': block_mapping.attach_time}) - set([vol.id for vol in volumes_by_attach_time]).should.equal(set([block_mapping.volume_id])) + volumes_by_attach_time = conn.get_all_volumes( + filters={'attachment.attach-time': block_mapping.attach_time}) + set([vol.id for vol in volumes_by_attach_time] + ).should.equal(set([block_mapping.volume_id])) - volumes_by_attach_device = conn.get_all_volumes(filters={'attachment.device': '/dev/sda1'}) - set([vol.id for vol in volumes_by_attach_device]).should.equal(set([block_mapping.volume_id])) + volumes_by_attach_device = conn.get_all_volumes( + filters={'attachment.device': '/dev/sda1'}) + set([vol.id for vol in volumes_by_attach_device] + ).should.equal(set([block_mapping.volume_id])) - volumes_by_attach_instance_id = conn.get_all_volumes(filters={'attachment.instance-id': instance.id}) - set([vol.id for vol in volumes_by_attach_instance_id]).should.equal(set([block_mapping.volume_id])) + volumes_by_attach_instance_id = conn.get_all_volumes( + filters={'attachment.instance-id': instance.id}) + set([vol.id for vol in volumes_by_attach_instance_id] + ).should.equal(set([block_mapping.volume_id])) - volumes_by_attach_status = conn.get_all_volumes(filters={'attachment.status': 'attached'}) - set([vol.id for vol in volumes_by_attach_status]).should.equal(set([block_mapping.volume_id])) + volumes_by_attach_status = conn.get_all_volumes( + filters={'attachment.status': 'attached'}) + set([vol.id for vol in volumes_by_attach_status] + ).should.equal(set([block_mapping.volume_id])) - volumes_by_create_time = conn.get_all_volumes(filters={'create-time': volume4.create_time}) - set([vol.create_time for vol in volumes_by_create_time]).should.equal(set([volume4.create_time])) + volumes_by_create_time = conn.get_all_volumes( + filters={'create-time': volume4.create_time}) + set([vol.create_time for vol in volumes_by_create_time] + ).should.equal(set([volume4.create_time])) volumes_by_size = conn.get_all_volumes(filters={'size': volume2.size}) set([vol.id for vol in volumes_by_size]).should.equal(set([volume2.id])) - volumes_by_snapshot_id = conn.get_all_volumes(filters={'snapshot-id': snapshot.id}) - set([vol.id for vol in volumes_by_snapshot_id]).should.equal(set([volume4.id])) + volumes_by_snapshot_id = conn.get_all_volumes( + filters={'snapshot-id': snapshot.id}) + set([vol.id for vol in volumes_by_snapshot_id] + ).should.equal(set([volume4.id])) volumes_by_status = conn.get_all_volumes(filters={'status': 'in-use'}) - set([vol.id for vol in volumes_by_status]).should.equal(set([block_mapping.volume_id])) + set([vol.id for vol in volumes_by_status]).should.equal( + set([block_mapping.volume_id])) volumes_by_id = conn.get_all_volumes(filters={'volume-id': volume1.id}) set([vol.id for vol in volumes_by_id]).should.equal(set([volume1.id])) @@ -138,13 +153,17 @@ def test_volume_filters(): volumes_by_tag_key = conn.get_all_volumes(filters={'tag-key': 'testkey1'}) set([vol.id for vol in volumes_by_tag_key]).should.equal(set([volume1.id])) - volumes_by_tag_value = conn.get_all_volumes(filters={'tag-value': 'testvalue1'}) - set([vol.id for vol in volumes_by_tag_value]).should.equal(set([volume1.id])) + volumes_by_tag_value = conn.get_all_volumes( + filters={'tag-value': 'testvalue1'}) + set([vol.id for vol in volumes_by_tag_value] + ).should.equal(set([volume1.id])) - volumes_by_tag = conn.get_all_volumes(filters={'tag:testkey1': 'testvalue1'}) + volumes_by_tag = conn.get_all_volumes( + filters={'tag:testkey1': 'testvalue1'}) set([vol.id for vol in volumes_by_tag]).should.equal(set([volume1.id])) - volumes_by_unencrypted = conn.get_all_volumes(filters={'encrypted': 'false'}) + volumes_by_unencrypted = conn.get_all_volumes( + filters={'encrypted': 'false'}) set([vol.id for vol in volumes_by_unencrypted]).should.equal( set([block_mapping.volume_id, volume2.id]) ) @@ -169,7 +188,8 @@ def test_volume_attach_and_detach(): volume.attach(instance.id, "/dev/sdh", dry_run=True) ex.exception.error_code.should.equal('DryRunOperation') ex.exception.status.should.equal(400) - ex.exception.message.should.equal('An error occurred (DryRunOperation) when calling the AttachVolume operation: Request would have succeeded, but DryRun flag is set') + ex.exception.message.should.equal( + 'An error occurred (DryRunOperation) when calling the AttachVolume operation: Request would have succeeded, but DryRun flag is set') volume.attach(instance.id, "/dev/sdh") @@ -183,7 +203,8 @@ def test_volume_attach_and_detach(): volume.detach(dry_run=True) ex.exception.error_code.should.equal('DryRunOperation') ex.exception.status.should.equal(400) - ex.exception.message.should.equal('An error occurred (DryRunOperation) when calling the DetachVolume operation: Request would have succeeded, but DryRun flag is set') + ex.exception.message.should.equal( + 'An error occurred (DryRunOperation) when calling the DetachVolume operation: Request would have succeeded, but DryRun flag is set') volume.detach() @@ -218,7 +239,8 @@ def test_create_snapshot(): snapshot = volume.create_snapshot('a dryrun snapshot', dry_run=True) ex.exception.error_code.should.equal('DryRunOperation') ex.exception.status.should.equal(400) - ex.exception.message.should.equal('An error occurred (DryRunOperation) when calling the CreateSnapshot operation: Request would have succeeded, but DryRun flag is set') + ex.exception.message.should.equal( + 'An error occurred (DryRunOperation) when calling the CreateSnapshot operation: Request would have succeeded, but DryRun flag is set') snapshot = volume.create_snapshot('a test snapshot') snapshot.update() @@ -294,32 +316,50 @@ def test_snapshot_filters(): conn.create_tags([snapshot1.id], {'testkey1': 'testvalue1'}) conn.create_tags([snapshot2.id], {'testkey2': 'testvalue2'}) - snapshots_by_description = conn.get_all_snapshots(filters={'description': 'testsnapshot1'}) - set([snap.id for snap in snapshots_by_description]).should.equal(set([snapshot1.id])) + snapshots_by_description = conn.get_all_snapshots( + filters={'description': 'testsnapshot1'}) + set([snap.id for snap in snapshots_by_description] + ).should.equal(set([snapshot1.id])) - snapshots_by_id = conn.get_all_snapshots(filters={'snapshot-id': snapshot1.id}) - set([snap.id for snap in snapshots_by_id]).should.equal(set([snapshot1.id])) + snapshots_by_id = conn.get_all_snapshots( + filters={'snapshot-id': snapshot1.id}) + set([snap.id for snap in snapshots_by_id] + ).should.equal(set([snapshot1.id])) - snapshots_by_start_time = conn.get_all_snapshots(filters={'start-time': snapshot1.start_time}) - set([snap.start_time for snap in snapshots_by_start_time]).should.equal(set([snapshot1.start_time])) + snapshots_by_start_time = conn.get_all_snapshots( + filters={'start-time': snapshot1.start_time}) + set([snap.start_time for snap in snapshots_by_start_time] + ).should.equal(set([snapshot1.start_time])) - snapshots_by_volume_id = conn.get_all_snapshots(filters={'volume-id': volume1.id}) - set([snap.id for snap in snapshots_by_volume_id]).should.equal(set([snapshot1.id, snapshot2.id])) + snapshots_by_volume_id = conn.get_all_snapshots( + filters={'volume-id': volume1.id}) + set([snap.id for snap in snapshots_by_volume_id] + ).should.equal(set([snapshot1.id, snapshot2.id])) - snapshots_by_volume_size = conn.get_all_snapshots(filters={'volume-size': volume1.size}) - set([snap.id for snap in snapshots_by_volume_size]).should.equal(set([snapshot1.id, snapshot2.id])) + snapshots_by_volume_size = conn.get_all_snapshots( + filters={'volume-size': volume1.size}) + set([snap.id for snap in snapshots_by_volume_size] + ).should.equal(set([snapshot1.id, snapshot2.id])) - snapshots_by_tag_key = conn.get_all_snapshots(filters={'tag-key': 'testkey1'}) - set([snap.id for snap in snapshots_by_tag_key]).should.equal(set([snapshot1.id])) + snapshots_by_tag_key = conn.get_all_snapshots( + filters={'tag-key': 'testkey1'}) + set([snap.id for snap in snapshots_by_tag_key] + ).should.equal(set([snapshot1.id])) - snapshots_by_tag_value = conn.get_all_snapshots(filters={'tag-value': 'testvalue1'}) - set([snap.id for snap in snapshots_by_tag_value]).should.equal(set([snapshot1.id])) + snapshots_by_tag_value = conn.get_all_snapshots( + filters={'tag-value': 'testvalue1'}) + set([snap.id for snap in snapshots_by_tag_value] + ).should.equal(set([snapshot1.id])) - snapshots_by_tag = conn.get_all_snapshots(filters={'tag:testkey1': 'testvalue1'}) - set([snap.id for snap in snapshots_by_tag]).should.equal(set([snapshot1.id])) + snapshots_by_tag = conn.get_all_snapshots( + filters={'tag:testkey1': 'testvalue1'}) + set([snap.id for snap in snapshots_by_tag] + ).should.equal(set([snapshot1.id])) - snapshots_by_encrypted = conn.get_all_snapshots(filters={'encrypted': 'true'}) - set([snap.id for snap in snapshots_by_encrypted]).should.equal(set([snapshot3.id])) + snapshots_by_encrypted = conn.get_all_snapshots( + filters={'encrypted': 'true'}) + set([snap.id for snap in snapshots_by_encrypted] + ).should.equal(set([snapshot3.id])) @mock_ec2_deprecated @@ -331,7 +371,8 @@ def test_snapshot_attribute(): snapshot = volume.create_snapshot() # Baseline - attributes = conn.get_snapshot_attribute(snapshot.id, attribute='createVolumePermission') + attributes = conn.get_snapshot_attribute( + snapshot.id, attribute='createVolumePermission') attributes.name.should.equal('create_volume_permission') attributes.attrs.should.have.length_of(0) @@ -348,34 +389,42 @@ def test_snapshot_attribute(): # Add 'all' group and confirm with assert_raises(EC2ResponseError) as ex: - conn.modify_snapshot_attribute(**dict(ADD_GROUP_ARGS, **{'dry_run': True})) + conn.modify_snapshot_attribute( + **dict(ADD_GROUP_ARGS, **{'dry_run': True})) ex.exception.error_code.should.equal('DryRunOperation') ex.exception.status.should.equal(400) - ex.exception.message.should.equal('An error occurred (DryRunOperation) when calling the ModifySnapshotAttribute operation: Request would have succeeded, but DryRun flag is set') + ex.exception.message.should.equal( + 'An error occurred (DryRunOperation) when calling the ModifySnapshotAttribute operation: Request would have succeeded, but DryRun flag is set') conn.modify_snapshot_attribute(**ADD_GROUP_ARGS) - attributes = conn.get_snapshot_attribute(snapshot.id, attribute='createVolumePermission') + attributes = conn.get_snapshot_attribute( + snapshot.id, attribute='createVolumePermission') attributes.attrs['groups'].should.have.length_of(1) attributes.attrs['groups'].should.equal(['all']) # Add is idempotent - conn.modify_snapshot_attribute.when.called_with(**ADD_GROUP_ARGS).should_not.throw(EC2ResponseError) + conn.modify_snapshot_attribute.when.called_with( + **ADD_GROUP_ARGS).should_not.throw(EC2ResponseError) # Remove 'all' group and confirm with assert_raises(EC2ResponseError) as ex: - conn.modify_snapshot_attribute(**dict(REMOVE_GROUP_ARGS, **{'dry_run': True})) + conn.modify_snapshot_attribute( + **dict(REMOVE_GROUP_ARGS, **{'dry_run': True})) ex.exception.error_code.should.equal('DryRunOperation') ex.exception.status.should.equal(400) - ex.exception.message.should.equal('An error occurred (DryRunOperation) when calling the ModifySnapshotAttribute operation: Request would have succeeded, but DryRun flag is set') + ex.exception.message.should.equal( + 'An error occurred (DryRunOperation) when calling the ModifySnapshotAttribute operation: Request would have succeeded, but DryRun flag is set') conn.modify_snapshot_attribute(**REMOVE_GROUP_ARGS) - attributes = conn.get_snapshot_attribute(snapshot.id, attribute='createVolumePermission') + attributes = conn.get_snapshot_attribute( + snapshot.id, attribute='createVolumePermission') attributes.attrs.should.have.length_of(0) # Remove is idempotent - conn.modify_snapshot_attribute.when.called_with(**REMOVE_GROUP_ARGS).should_not.throw(EC2ResponseError) + conn.modify_snapshot_attribute.when.called_with( + **REMOVE_GROUP_ARGS).should_not.throw(EC2ResponseError) # Error: Add with group != 'all' with assert_raises(EC2ResponseError) as cm: @@ -428,7 +477,8 @@ def test_create_volume_from_snapshot(): snapshot = volume.create_snapshot('a test snapshot', dry_run=True) ex.exception.error_code.should.equal('DryRunOperation') ex.exception.status.should.equal(400) - ex.exception.message.should.equal('An error occurred (DryRunOperation) when calling the CreateSnapshot operation: Request would have succeeded, but DryRun flag is set') + ex.exception.message.should.equal( + 'An error occurred (DryRunOperation) when calling the CreateSnapshot operation: Request would have succeeded, but DryRun flag is set') snapshot = volume.create_snapshot('a test snapshot') snapshot.update() @@ -469,16 +519,19 @@ def test_modify_attribute_blockDeviceMapping(): instance = reservation.instances[0] with assert_raises(EC2ResponseError) as ex: - instance.modify_attribute('blockDeviceMapping', {'/dev/sda1': True}, dry_run=True) + instance.modify_attribute('blockDeviceMapping', { + '/dev/sda1': True}, dry_run=True) ex.exception.error_code.should.equal('DryRunOperation') ex.exception.status.should.equal(400) - ex.exception.message.should.equal('An error occurred (DryRunOperation) when calling the ModifyInstanceAttribute operation: Request would have succeeded, but DryRun flag is set') + ex.exception.message.should.equal( + 'An error occurred (DryRunOperation) when calling the ModifyInstanceAttribute operation: Request would have succeeded, but DryRun flag is set') instance.modify_attribute('blockDeviceMapping', {'/dev/sda1': True}) instance = ec2_backends[conn.region.name].get_instance(instance.id) instance.block_device_mapping.should.have.key('/dev/sda1') - instance.block_device_mapping['/dev/sda1'].delete_on_termination.should.be(True) + instance.block_device_mapping[ + '/dev/sda1'].delete_on_termination.should.be(True) @mock_ec2_deprecated @@ -491,8 +544,10 @@ def test_volume_tag_escaping(): snapshot.add_tags({'key': ''}, dry_run=True) ex.exception.error_code.should.equal('DryRunOperation') ex.exception.status.should.equal(400) - ex.exception.message.should.equal('An error occurred (DryRunOperation) when calling the CreateTags operation: Request would have succeeded, but DryRun flag is set') - dict(conn.get_all_snapshots()[0].tags).should_not.be.equal({'key': ''}) + ex.exception.message.should.equal( + 'An error occurred (DryRunOperation) when calling the CreateTags operation: Request would have succeeded, but DryRun flag is set') + dict(conn.get_all_snapshots()[0].tags).should_not.be.equal( + {'key': ''}) snapshot.add_tags({'key': ''}) diff --git a/tests/test_ec2/test_elastic_ip_addresses.py b/tests/test_ec2/test_elastic_ip_addresses.py index f92c4df8b..2e1ae189a 100644 --- a/tests/test_ec2/test_elastic_ip_addresses.py +++ b/tests/test_ec2/test_elastic_ip_addresses.py @@ -24,7 +24,8 @@ def test_eip_allocate_classic(): standard = conn.allocate_address(dry_run=True) ex.exception.error_code.should.equal('DryRunOperation') ex.exception.status.should.equal(400) - ex.exception.message.should.equal('An error occurred (DryRunOperation) when calling the AllocateAddress operation: Request would have succeeded, but DryRun flag is set') + ex.exception.message.should.equal( + 'An error occurred (DryRunOperation) when calling the AllocateAddress operation: Request would have succeeded, but DryRun flag is set') standard = conn.allocate_address() standard.should.be.a(boto.ec2.address.Address) @@ -36,7 +37,8 @@ def test_eip_allocate_classic(): standard.release(dry_run=True) ex.exception.error_code.should.equal('DryRunOperation') ex.exception.status.should.equal(400) - ex.exception.message.should.equal('An error occurred (DryRunOperation) when calling the ReleaseAddress operation: Request would have succeeded, but DryRun flag is set') + ex.exception.message.should.equal( + 'An error occurred (DryRunOperation) when calling the ReleaseAddress operation: Request would have succeeded, but DryRun flag is set') standard.release() standard.should_not.be.within(conn.get_all_addresses()) @@ -51,7 +53,8 @@ def test_eip_allocate_vpc(): vpc = conn.allocate_address(domain="vpc", dry_run=True) ex.exception.error_code.should.equal('DryRunOperation') ex.exception.status.should.equal(400) - ex.exception.message.should.equal('An error occurred (DryRunOperation) when calling the AllocateAddress operation: Request would have succeeded, but DryRun flag is set') + ex.exception.message.should.equal( + 'An error occurred (DryRunOperation) when calling the AllocateAddress operation: Request would have succeeded, but DryRun flag is set') vpc = conn.allocate_address(domain="vpc") vpc.should.be.a(boto.ec2.address.Address) @@ -90,23 +93,28 @@ def test_eip_associate_classic(): cm.exception.request_id.should_not.be.none with assert_raises(EC2ResponseError) as ex: - conn.associate_address(instance_id=instance.id, public_ip=eip.public_ip, dry_run=True) + conn.associate_address(instance_id=instance.id, + public_ip=eip.public_ip, dry_run=True) ex.exception.error_code.should.equal('DryRunOperation') ex.exception.status.should.equal(400) - ex.exception.message.should.equal('An error occurred (DryRunOperation) when calling the AssociateAddress operation: Request would have succeeded, but DryRun flag is set') + ex.exception.message.should.equal( + 'An error occurred (DryRunOperation) when calling the AssociateAddress operation: Request would have succeeded, but DryRun flag is set') conn.associate_address(instance_id=instance.id, public_ip=eip.public_ip) - eip = conn.get_all_addresses(addresses=[eip.public_ip])[0] # no .update() on address ): + # no .update() on address ): + eip = conn.get_all_addresses(addresses=[eip.public_ip])[0] eip.instance_id.should.be.equal(instance.id) with assert_raises(EC2ResponseError) as ex: conn.disassociate_address(public_ip=eip.public_ip, dry_run=True) ex.exception.error_code.should.equal('DryRunOperation') ex.exception.status.should.equal(400) - ex.exception.message.should.equal('An error occurred (DryRunOperation) when calling the DisAssociateAddress operation: Request would have succeeded, but DryRun flag is set') + ex.exception.message.should.equal( + 'An error occurred (DryRunOperation) when calling the DisAssociateAddress operation: Request would have succeeded, but DryRun flag is set') conn.disassociate_address(public_ip=eip.public_ip) - eip = conn.get_all_addresses(addresses=[eip.public_ip])[0] # no .update() on address ): + # no .update() on address ): + eip = conn.get_all_addresses(addresses=[eip.public_ip])[0] eip.instance_id.should.be.equal(u'') eip.release() eip.should_not.be.within(conn.get_all_addresses()) @@ -114,6 +122,7 @@ def test_eip_associate_classic(): instance.terminate() + @mock_ec2_deprecated def test_eip_associate_vpc(): """Associate/Disassociate EIP to VPC instance""" @@ -131,11 +140,14 @@ def test_eip_associate_vpc(): cm.exception.status.should.equal(400) cm.exception.request_id.should_not.be.none - conn.associate_address(instance_id=instance.id, allocation_id=eip.allocation_id) - eip = conn.get_all_addresses(addresses=[eip.public_ip])[0] # no .update() on address ): + conn.associate_address(instance_id=instance.id, + allocation_id=eip.allocation_id) + # no .update() on address ): + eip = conn.get_all_addresses(addresses=[eip.public_ip])[0] eip.instance_id.should.be.equal(instance.id) conn.disassociate_address(association_id=eip.association_id) - eip = conn.get_all_addresses(addresses=[eip.public_ip])[0] # no .update() on address ): + # no .update() on address ): + eip = conn.get_all_addresses(addresses=[eip.public_ip])[0] eip.instance_id.should.be.equal(u'') eip.association_id.should.be.none @@ -143,13 +155,15 @@ def test_eip_associate_vpc(): eip.release(dry_run=True) ex.exception.error_code.should.equal('DryRunOperation') ex.exception.status.should.equal(400) - ex.exception.message.should.equal('An error occurred (DryRunOperation) when calling the ReleaseAddress operation: Request would have succeeded, but DryRun flag is set') + ex.exception.message.should.equal( + 'An error occurred (DryRunOperation) when calling the ReleaseAddress operation: Request would have succeeded, but DryRun flag is set') eip.release() eip = None instance.terminate() + @mock_ec2 def test_eip_boto3_vpc_association(): """Associate EIP to VPC instance in a new subnet with boto3""" @@ -157,7 +171,7 @@ def test_eip_boto3_vpc_association(): client = boto3.client('ec2', region_name='us-west-1') vpc_res = client.create_vpc(CidrBlock='10.0.0.0/24') subnet_res = client.create_subnet( - VpcId=vpc_res['Vpc']['VpcId'], CidrBlock='10.0.0.0/24') + VpcId=vpc_res['Vpc']['VpcId'], CidrBlock='10.0.0.0/24') instance = service.create_instances(**{ 'InstanceType': 't2.micro', 'ImageId': 'ami-test', @@ -192,17 +206,21 @@ def test_eip_associate_network_interface(): cm.exception.status.should.equal(400) cm.exception.request_id.should_not.be.none - conn.associate_address(network_interface_id=eni.id, allocation_id=eip.allocation_id) - eip = conn.get_all_addresses(addresses=[eip.public_ip])[0] # no .update() on address ): + conn.associate_address(network_interface_id=eni.id, + allocation_id=eip.allocation_id) + # no .update() on address ): + eip = conn.get_all_addresses(addresses=[eip.public_ip])[0] eip.network_interface_id.should.be.equal(eni.id) conn.disassociate_address(association_id=eip.association_id) - eip = conn.get_all_addresses(addresses=[eip.public_ip])[0] # no .update() on address ): + # no .update() on address ): + eip = conn.get_all_addresses(addresses=[eip.public_ip])[0] eip.network_interface_id.should.be.equal(u'') eip.association_id.should.be.none eip.release() eip = None + @mock_ec2_deprecated def test_eip_reassociate(): """reassociate EIP""" @@ -219,12 +237,14 @@ def test_eip_reassociate(): # Different ID detects resource association with assert_raises(EC2ResponseError) as cm: - conn.associate_address(instance_id=instance2.id, public_ip=eip.public_ip, allow_reassociation=False) + conn.associate_address( + instance_id=instance2.id, public_ip=eip.public_ip, allow_reassociation=False) cm.exception.code.should.equal('Resource.AlreadyAssociated') cm.exception.status.should.equal(400) cm.exception.request_id.should_not.be.none - conn.associate_address.when.called_with(instance_id=instance2.id, public_ip=eip.public_ip, allow_reassociation=True).should_not.throw(EC2ResponseError) + conn.associate_address.when.called_with( + instance_id=instance2.id, public_ip=eip.public_ip, allow_reassociation=True).should_not.throw(EC2ResponseError) eip.release() eip = None @@ -232,6 +252,7 @@ def test_eip_reassociate(): instance1.terminate() instance2.terminate() + @mock_ec2_deprecated def test_eip_reassociate_nic(): """reassociate EIP""" @@ -243,23 +264,28 @@ def test_eip_reassociate_nic(): eni2 = conn.create_network_interface(subnet.id) eip = conn.allocate_address() - conn.associate_address(network_interface_id=eni1.id, public_ip=eip.public_ip) + conn.associate_address(network_interface_id=eni1.id, + public_ip=eip.public_ip) # Same ID is idempotent - conn.associate_address(network_interface_id=eni1.id, public_ip=eip.public_ip) + conn.associate_address(network_interface_id=eni1.id, + public_ip=eip.public_ip) # Different ID detects resource association with assert_raises(EC2ResponseError) as cm: - conn.associate_address(network_interface_id=eni2.id, public_ip=eip.public_ip) + conn.associate_address( + network_interface_id=eni2.id, public_ip=eip.public_ip) cm.exception.code.should.equal('Resource.AlreadyAssociated') cm.exception.status.should.equal(400) cm.exception.request_id.should_not.be.none - conn.associate_address.when.called_with(network_interface_id=eni2.id, public_ip=eip.public_ip, allow_reassociation=True).should_not.throw(EC2ResponseError) + conn.associate_address.when.called_with( + network_interface_id=eni2.id, public_ip=eip.public_ip, allow_reassociation=True).should_not.throw(EC2ResponseError) eip.release() eip = None + @mock_ec2_deprecated def test_eip_associate_invalid_args(): """Associate EIP, invalid args """ @@ -290,6 +316,7 @@ def test_eip_disassociate_bogus_association(): cm.exception.status.should.equal(400) cm.exception.request_id.should_not.be.none + @mock_ec2_deprecated def test_eip_release_bogus_eip(): """Release bogus EIP""" @@ -334,7 +361,7 @@ def test_eip_describe(): number_of_classic_ips = 2 number_of_vpc_ips = 2 - #allocate some IPs + # allocate some IPs for _ in range(number_of_classic_ips): eips.append(conn.allocate_address()) for _ in range(number_of_vpc_ips): @@ -344,19 +371,22 @@ def test_eip_describe(): # Can we find each one individually? for eip in eips: if eip.allocation_id: - lookup_addresses = conn.get_all_addresses(allocation_ids=[eip.allocation_id]) + lookup_addresses = conn.get_all_addresses( + allocation_ids=[eip.allocation_id]) else: - lookup_addresses = conn.get_all_addresses(addresses=[eip.public_ip]) + lookup_addresses = conn.get_all_addresses( + addresses=[eip.public_ip]) len(lookup_addresses).should.be.equal(1) lookup_addresses[0].public_ip.should.be.equal(eip.public_ip) # Can we find first two when we search for them? - lookup_addresses = conn.get_all_addresses(addresses=[eips[0].public_ip, eips[1].public_ip]) + lookup_addresses = conn.get_all_addresses( + addresses=[eips[0].public_ip, eips[1].public_ip]) len(lookup_addresses).should.be.equal(2) lookup_addresses[0].public_ip.should.be.equal(eips[0].public_ip) lookup_addresses[1].public_ip.should.be.equal(eips[1].public_ip) - #Release all IPs + # Release all IPs for eip in eips: eip.release() len(conn.get_all_addresses()).should.be.equal(0) @@ -372,4 +402,3 @@ def test_eip_describe_none(): cm.exception.code.should.equal('InvalidAddress.NotFound') cm.exception.status.should.equal(400) cm.exception.request_id.should_not.be.none - diff --git a/tests/test_ec2/test_elastic_network_interfaces.py b/tests/test_ec2/test_elastic_network_interfaces.py index 9027e0448..4ec23b919 100644 --- a/tests/test_ec2/test_elastic_network_interfaces.py +++ b/tests/test_ec2/test_elastic_network_interfaces.py @@ -27,7 +27,8 @@ def test_elastic_network_interfaces(): eni = conn.create_network_interface(subnet.id, dry_run=True) ex.exception.error_code.should.equal('DryRunOperation') ex.exception.status.should.equal(400) - ex.exception.message.should.equal('An error occurred (DryRunOperation) when calling the CreateNetworkInterface operation: Request would have succeeded, but DryRun flag is set') + ex.exception.message.should.equal( + 'An error occurred (DryRunOperation) when calling the CreateNetworkInterface operation: Request would have succeeded, but DryRun flag is set') eni = conn.create_network_interface(subnet.id) @@ -41,7 +42,8 @@ def test_elastic_network_interfaces(): conn.delete_network_interface(eni.id, dry_run=True) ex.exception.error_code.should.equal('DryRunOperation') ex.exception.status.should.equal(400) - ex.exception.message.should.equal('An error occurred (DryRunOperation) when calling the DeleteNetworkInterface operation: Request would have succeeded, but DryRun flag is set') + ex.exception.message.should.equal( + 'An error occurred (DryRunOperation) when calling the DeleteNetworkInterface operation: Request would have succeeded, but DryRun flag is set') conn.delete_network_interface(eni.id) @@ -89,16 +91,20 @@ def test_elastic_network_interfaces_with_groups(): conn = boto.connect_vpc('the_key', 'the_secret') vpc = conn.create_vpc("10.0.0.0/16") subnet = conn.create_subnet(vpc.id, "10.0.0.0/18") - security_group1 = conn.create_security_group('test security group #1', 'this is a test security group') - security_group2 = conn.create_security_group('test security group #2', 'this is a test security group') - conn.create_network_interface(subnet.id, groups=[security_group1.id, security_group2.id]) + security_group1 = conn.create_security_group( + 'test security group #1', 'this is a test security group') + security_group2 = conn.create_security_group( + 'test security group #2', 'this is a test security group') + conn.create_network_interface( + subnet.id, groups=[security_group1.id, security_group2.id]) all_enis = conn.get_all_network_interfaces() all_enis.should.have.length_of(1) eni = all_enis[0] eni.groups.should.have.length_of(2) - set([group.id for group in eni.groups]).should.equal(set([security_group1.id, security_group2.id])) + set([group.id for group in eni.groups]).should.equal( + set([security_group1.id, security_group2.id])) @requires_boto_gte("2.12.0") @@ -107,8 +113,10 @@ def test_elastic_network_interfaces_modify_attribute(): conn = boto.connect_vpc('the_key', 'the_secret') vpc = conn.create_vpc("10.0.0.0/16") subnet = conn.create_subnet(vpc.id, "10.0.0.0/18") - security_group1 = conn.create_security_group('test security group #1', 'this is a test security group') - security_group2 = conn.create_security_group('test security group #2', 'this is a test security group') + security_group1 = conn.create_security_group( + 'test security group #1', 'this is a test security group') + security_group2 = conn.create_security_group( + 'test security group #2', 'this is a test security group') conn.create_network_interface(subnet.id, groups=[security_group1.id]) all_enis = conn.get_all_network_interfaces() @@ -119,12 +127,15 @@ def test_elastic_network_interfaces_modify_attribute(): eni.groups[0].id.should.equal(security_group1.id) with assert_raises(EC2ResponseError) as ex: - conn.modify_network_interface_attribute(eni.id, 'groupset', [security_group2.id], dry_run=True) + conn.modify_network_interface_attribute( + eni.id, 'groupset', [security_group2.id], dry_run=True) ex.exception.error_code.should.equal('DryRunOperation') ex.exception.status.should.equal(400) - ex.exception.message.should.equal('An error occurred (DryRunOperation) when calling the ModifyNetworkInterface operation: Request would have succeeded, but DryRun flag is set') + ex.exception.message.should.equal( + 'An error occurred (DryRunOperation) when calling the ModifyNetworkInterface operation: Request would have succeeded, but DryRun flag is set') - conn.modify_network_interface_attribute(eni.id, 'groupset', [security_group2.id]) + conn.modify_network_interface_attribute( + eni.id, 'groupset', [security_group2.id]) all_enis = conn.get_all_network_interfaces() all_enis.should.have.length_of(1) @@ -140,11 +151,15 @@ def test_elastic_network_interfaces_filtering(): vpc = conn.create_vpc("10.0.0.0/16") subnet = conn.create_subnet(vpc.id, "10.0.0.0/18") - security_group1 = conn.create_security_group('test security group #1', 'this is a test security group') - security_group2 = conn.create_security_group('test security group #2', 'this is a test security group') + security_group1 = conn.create_security_group( + 'test security group #1', 'this is a test security group') + security_group2 = conn.create_security_group( + 'test security group #2', 'this is a test security group') - eni1 = conn.create_network_interface(subnet.id, groups=[security_group1.id, security_group2.id]) - eni2 = conn.create_network_interface(subnet.id, groups=[security_group1.id]) + eni1 = conn.create_network_interface( + subnet.id, groups=[security_group1.id, security_group2.id]) + eni2 = conn.create_network_interface( + subnet.id, groups=[security_group1.id]) eni3 = conn.create_network_interface(subnet.id) all_enis = conn.get_all_network_interfaces() @@ -156,22 +171,26 @@ def test_elastic_network_interfaces_filtering(): set([eni.id for eni in enis_by_id]).should.equal(set([eni1.id])) # Filter by ENI ID - enis_by_id = conn.get_all_network_interfaces(filters={'network-interface-id': eni1.id}) + enis_by_id = conn.get_all_network_interfaces( + filters={'network-interface-id': eni1.id}) enis_by_id.should.have.length_of(1) set([eni.id for eni in enis_by_id]).should.equal(set([eni1.id])) # Filter by Security Group - enis_by_group = conn.get_all_network_interfaces(filters={'group-id': security_group1.id}) + enis_by_group = conn.get_all_network_interfaces( + filters={'group-id': security_group1.id}) enis_by_group.should.have.length_of(2) set([eni.id for eni in enis_by_group]).should.equal(set([eni1.id, eni2.id])) # Filter by ENI ID and Security Group - enis_by_group = conn.get_all_network_interfaces(filters={'network-interface-id': eni1.id, 'group-id': security_group1.id}) + enis_by_group = conn.get_all_network_interfaces( + filters={'network-interface-id': eni1.id, 'group-id': security_group1.id}) enis_by_group.should.have.length_of(1) set([eni.id for eni in enis_by_group]).should.equal(set([eni1.id])) # Unsupported filter - conn.get_all_network_interfaces.when.called_with(filters={'not-implemented-filter': 'foobar'}).should.throw(NotImplementedError) + conn.get_all_network_interfaces.when.called_with( + filters={'not-implemented-filter': 'foobar'}).should.throw(NotImplementedError) @mock_ec2 @@ -180,15 +199,19 @@ def test_elastic_network_interfaces_get_by_tag_name(): ec2_client = boto3.client('ec2', region_name='us-west-2') vpc = ec2.create_vpc(CidrBlock='10.0.0.0/16') - subnet = ec2.create_subnet(VpcId=vpc.id, CidrBlock='10.0.0.0/24', AvailabilityZone='us-west-2a') + subnet = ec2.create_subnet( + VpcId=vpc.id, CidrBlock='10.0.0.0/24', AvailabilityZone='us-west-2a') - eni1 = ec2.create_network_interface(SubnetId=subnet.id, PrivateIpAddress='10.0.10.5') + eni1 = ec2.create_network_interface( + SubnetId=subnet.id, PrivateIpAddress='10.0.10.5') with assert_raises(ClientError) as ex: eni1.create_tags(Tags=[{'Key': 'Name', 'Value': 'eni1'}], DryRun=True) ex.exception.response['Error']['Code'].should.equal('DryRunOperation') - ex.exception.response['ResponseMetadata']['HTTPStatusCode'].should.equal(400) - ex.exception.response['Error']['Message'].should.equal('An error occurred (DryRunOperation) when calling the CreateTags operation: Request would have succeeded, but DryRun flag is set') + ex.exception.response['ResponseMetadata'][ + 'HTTPStatusCode'].should.equal(400) + ex.exception.response['Error']['Message'].should.equal( + 'An error occurred (DryRunOperation) when calling the CreateTags operation: Request would have succeeded, but DryRun flag is set') eni1.create_tags(Tags=[{'Key': 'Name', 'Value': 'eni1'}]) @@ -211,9 +234,11 @@ def test_elastic_network_interfaces_get_by_private_ip(): ec2_client = boto3.client('ec2', region_name='us-west-2') vpc = ec2.create_vpc(CidrBlock='10.0.0.0/16') - subnet = ec2.create_subnet(VpcId=vpc.id, CidrBlock='10.0.0.0/24', AvailabilityZone='us-west-2a') + subnet = ec2.create_subnet( + VpcId=vpc.id, CidrBlock='10.0.0.0/24', AvailabilityZone='us-west-2a') - eni1 = ec2.create_network_interface(SubnetId=subnet.id, PrivateIpAddress='10.0.10.5') + eni1 = ec2.create_network_interface( + SubnetId=subnet.id, PrivateIpAddress='10.0.10.5') # The status of the new interface should be 'available' waiter = ec2_client.get_waiter('network_interface_available') @@ -242,9 +267,11 @@ def test_elastic_network_interfaces_get_by_vpc_id(): ec2_client = boto3.client('ec2', region_name='us-west-2') vpc = ec2.create_vpc(CidrBlock='10.0.0.0/16') - subnet = ec2.create_subnet(VpcId=vpc.id, CidrBlock='10.0.0.0/24', AvailabilityZone='us-west-2a') + subnet = ec2.create_subnet( + VpcId=vpc.id, CidrBlock='10.0.0.0/24', AvailabilityZone='us-west-2a') - eni1 = ec2.create_network_interface(SubnetId=subnet.id, PrivateIpAddress='10.0.10.5') + eni1 = ec2.create_network_interface( + SubnetId=subnet.id, PrivateIpAddress='10.0.10.5') # The status of the new interface should be 'available' waiter = ec2_client.get_waiter('network_interface_available') @@ -265,9 +292,11 @@ def test_elastic_network_interfaces_get_by_subnet_id(): ec2_client = boto3.client('ec2', region_name='us-west-2') vpc = ec2.create_vpc(CidrBlock='10.0.0.0/16') - subnet = ec2.create_subnet(VpcId=vpc.id, CidrBlock='10.0.0.0/24', AvailabilityZone='us-west-2a') + subnet = ec2.create_subnet( + VpcId=vpc.id, CidrBlock='10.0.0.0/24', AvailabilityZone='us-west-2a') - eni1 = ec2.create_network_interface(SubnetId=subnet.id, PrivateIpAddress='10.0.10.5') + eni1 = ec2.create_network_interface( + SubnetId=subnet.id, PrivateIpAddress='10.0.10.5') # The status of the new interface should be 'available' waiter = ec2_client.get_waiter('network_interface_available') @@ -297,5 +326,6 @@ def test_elastic_network_interfaces_cloudformation(): stack = conn.describe_stacks()[0] resources = stack.describe_resources() - cfn_eni = [resource for resource in resources if resource.resource_type == 'AWS::EC2::NetworkInterface'][0] + cfn_eni = [resource for resource in resources if resource.resource_type == + 'AWS::EC2::NetworkInterface'][0] cfn_eni.physical_resource_id.should.equal(eni.id) diff --git a/tests/test_ec2/test_instances.py b/tests/test_ec2/test_instances.py index b6601e87f..49020555b 100644 --- a/tests/test_ec2/test_instances.py +++ b/tests/test_ec2/test_instances.py @@ -45,7 +45,8 @@ def test_instance_launch_and_terminate(): reservation = conn.run_instances('ami-1234abcd', dry_run=True) ex.exception.error_code.should.equal('DryRunOperation') ex.exception.status.should.equal(400) - ex.exception.message.should.equal('An error occurred (DryRunOperation) when calling the RunInstance operation: Request would have succeeded, but DryRun flag is set') + ex.exception.message.should.equal( + 'An error occurred (DryRunOperation) when calling the RunInstance operation: Request would have succeeded, but DryRun flag is set') reservation = conn.run_instances('ami-1234abcd') reservation.should.be.a(Reservation) @@ -66,7 +67,8 @@ def test_instance_launch_and_terminate(): instance.placement.should.equal('us-east-1a') root_device_name = instance.root_device_name - instance.block_device_mapping[root_device_name].status.should.equal('in-use') + instance.block_device_mapping[ + root_device_name].status.should.equal('in-use') volume_id = instance.block_device_mapping[root_device_name].volume_id volume_id.should.match(r'vol-\w+') @@ -78,7 +80,8 @@ def test_instance_launch_and_terminate(): conn.terminate_instances([instance.id], dry_run=True) ex.exception.error_code.should.equal('DryRunOperation') ex.exception.status.should.equal(400) - ex.exception.message.should.equal('An error occurred (DryRunOperation) when calling the TerminateInstance operation: Request would have succeeded, but DryRun flag is set') + ex.exception.message.should.equal( + 'An error occurred (DryRunOperation) when calling the TerminateInstance operation: Request would have succeeded, but DryRun flag is set') conn.terminate_instances([instance.id]) @@ -90,7 +93,8 @@ def test_instance_launch_and_terminate(): @mock_ec2_deprecated def test_terminate_empty_instances(): conn = boto.connect_ec2('the_key', 'the_secret') - conn.terminate_instances.when.called_with([]).should.throw(EC2ResponseError) + conn.terminate_instances.when.called_with( + []).should.throw(EC2ResponseError) @freeze_time("2014-01-01 05:00:00") @@ -117,8 +121,10 @@ def test_instance_attach_volume(): for v in conn.get_all_volumes(volume_ids=[instance.block_device_mapping['/dev/sdc1'].volume_id]): v.attach_data.instance_id.should.equal(instance.id) - v.attach_data.attach_time.should.equal(instance.launch_time) # can do due to freeze_time decorator. - v.create_time.should.equal(instance.launch_time) # can do due to freeze_time decorator. + # can do due to freeze_time decorator. + v.attach_data.attach_time.should.equal(instance.launch_time) + # can do due to freeze_time decorator. + v.create_time.should.equal(instance.launch_time) v.region.name.should.equal(instance.region.name) v.status.should.equal('in-use') @@ -135,7 +141,8 @@ def test_get_instances_by_id(): reservation.instances.should.have.length_of(1) reservation.instances[0].id.should.equal(instance1.id) - reservations = conn.get_all_instances(instance_ids=[instance1.id, instance2.id]) + reservations = conn.get_all_instances( + instance_ids=[instance1.id, instance2.id]) reservations.should.have.length_of(1) reservation = reservations[0] reservation.instances.should.have.length_of(2) @@ -158,25 +165,31 @@ def test_get_instances_filtering_by_state(): conn.terminate_instances([instance1.id]) - reservations = conn.get_all_instances(filters={'instance-state-name': 'running'}) + reservations = conn.get_all_instances( + filters={'instance-state-name': 'running'}) reservations.should.have.length_of(1) - # Since we terminated instance1, only instance2 and instance3 should be returned + # Since we terminated instance1, only instance2 and instance3 should be + # returned instance_ids = [instance.id for instance in reservations[0].instances] set(instance_ids).should.equal(set([instance2.id, instance3.id])) - reservations = conn.get_all_instances([instance2.id], filters={'instance-state-name': 'running'}) + reservations = conn.get_all_instances( + [instance2.id], filters={'instance-state-name': 'running'}) reservations.should.have.length_of(1) instance_ids = [instance.id for instance in reservations[0].instances] instance_ids.should.equal([instance2.id]) - reservations = conn.get_all_instances([instance2.id], filters={'instance-state-name': 'terminated'}) + reservations = conn.get_all_instances( + [instance2.id], filters={'instance-state-name': 'terminated'}) list(reservations).should.equal([]) # get_all_instances should still return all 3 reservations = conn.get_all_instances() reservations[0].instances.should.have.length_of(3) - conn.get_all_instances.when.called_with(filters={'not-implemented-filter': 'foobar'}).should.throw(NotImplementedError) + conn.get_all_instances.when.called_with( + filters={'not-implemented-filter': 'foobar'}).should.throw(NotImplementedError) + @mock_ec2_deprecated def test_get_instances_filtering_by_instance_id(): @@ -184,16 +197,19 @@ def test_get_instances_filtering_by_instance_id(): reservation = conn.run_instances('ami-1234abcd', min_count=3) instance1, instance2, instance3 = reservation.instances - reservations = conn.get_all_instances(filters={'instance-id': instance1.id}) + reservations = conn.get_all_instances( + filters={'instance-id': instance1.id}) # get_all_instances should return just instance1 reservations[0].instances.should.have.length_of(1) reservations[0].instances[0].id.should.equal(instance1.id) - reservations = conn.get_all_instances(filters={'instance-id': [instance1.id, instance2.id]}) + reservations = conn.get_all_instances( + filters={'instance-id': [instance1.id, instance2.id]}) # get_all_instances should return two reservations[0].instances.should.have.length_of(2) - reservations = conn.get_all_instances(filters={'instance-id': 'non-existing-id'}) + reservations = conn.get_all_instances( + filters={'instance-id': 'non-existing-id'}) reservations.should.have.length_of(0) @@ -207,22 +223,25 @@ def test_get_instances_filtering_by_instance_type(): reservation3 = conn.run_instances('ami-1234abcd', instance_type='t1.micro') instance3 = reservation3.instances[0] - reservations = conn.get_all_instances(filters={'instance-type': 'm1.small'}) + reservations = conn.get_all_instances( + filters={'instance-type': 'm1.small'}) # get_all_instances should return instance1,2 reservations.should.have.length_of(2) reservations[0].instances.should.have.length_of(1) reservations[1].instances.should.have.length_of(1) - instance_ids = [ reservations[0].instances[0].id, - reservations[1].instances[0].id ] + instance_ids = [reservations[0].instances[0].id, + reservations[1].instances[0].id] set(instance_ids).should.equal(set([instance1.id, instance2.id])) - reservations = conn.get_all_instances(filters={'instance-type': 't1.micro'}) + reservations = conn.get_all_instances( + filters={'instance-type': 't1.micro'}) # get_all_instances should return one reservations.should.have.length_of(1) reservations[0].instances.should.have.length_of(1) reservations[0].instances[0].id.should.equal(instance3.id) - reservations = conn.get_all_instances(filters={'instance-type': ['t1.micro', 'm1.small']}) + reservations = conn.get_all_instances( + filters={'instance-type': ['t1.micro', 'm1.small']}) reservations.should.have.length_of(3) reservations[0].instances.should.have.length_of(1) reservations[1].instances.should.have.length_of(1) @@ -231,13 +250,15 @@ def test_get_instances_filtering_by_instance_type(): reservations[0].instances[0].id, reservations[1].instances[0].id, reservations[2].instances[0].id, - ] - set(instance_ids).should.equal(set([instance1.id, instance2.id, instance3.id])) + ] + set(instance_ids).should.equal( + set([instance1.id, instance2.id, instance3.id])) reservations = conn.get_all_instances(filters={'instance-type': 'bogus'}) - #bogus instance-type should return none + # bogus instance-type should return none reservations.should.have.length_of(0) + @mock_ec2_deprecated def test_get_instances_filtering_by_reason_code(): conn = boto.connect_ec2() @@ -246,10 +267,12 @@ def test_get_instances_filtering_by_reason_code(): instance1.stop() instance2.terminate() - reservations = conn.get_all_instances(filters={'state-reason-code': 'Client.UserInitiatedShutdown'}) + reservations = conn.get_all_instances( + filters={'state-reason-code': 'Client.UserInitiatedShutdown'}) # get_all_instances should return instance1 and instance2 reservations[0].instances.should.have.length_of(2) - set([instance1.id, instance2.id]).should.equal(set([i.id for i in reservations[0].instances])) + set([instance1.id, instance2.id]).should.equal( + set([i.id for i in reservations[0].instances])) reservations = conn.get_all_instances(filters={'state-reason-code': ''}) # get_all_instances should return instance 3 @@ -262,10 +285,13 @@ def test_get_instances_filtering_by_source_dest_check(): conn = boto.connect_ec2() reservation = conn.run_instances('ami-1234abcd', min_count=2) instance1, instance2 = reservation.instances - conn.modify_instance_attribute(instance1.id, attribute='sourceDestCheck', value=False) + conn.modify_instance_attribute( + instance1.id, attribute='sourceDestCheck', value=False) - source_dest_check_false = conn.get_all_instances(filters={'source-dest-check': 'false'}) - source_dest_check_true = conn.get_all_instances(filters={'source-dest-check': 'true'}) + source_dest_check_false = conn.get_all_instances( + filters={'source-dest-check': 'false'}) + source_dest_check_true = conn.get_all_instances( + filters={'source-dest-check': 'true'}) source_dest_check_false[0].instances.should.have.length_of(1) source_dest_check_false[0].instances[0].id.should.equal(instance1.id) @@ -279,12 +305,14 @@ def test_get_instances_filtering_by_vpc_id(): conn = boto.connect_vpc('the_key', 'the_secret') vpc1 = conn.create_vpc("10.0.0.0/16") subnet1 = conn.create_subnet(vpc1.id, "10.0.0.0/27") - reservation1 = conn.run_instances('ami-1234abcd', min_count=1, subnet_id=subnet1.id) + reservation1 = conn.run_instances( + 'ami-1234abcd', min_count=1, subnet_id=subnet1.id) instance1 = reservation1.instances[0] vpc2 = conn.create_vpc("10.1.0.0/16") subnet2 = conn.create_subnet(vpc2.id, "10.1.0.0/27") - reservation2 = conn.run_instances('ami-1234abcd', min_count=1, subnet_id=subnet2.id) + reservation2 = conn.run_instances( + 'ami-1234abcd', min_count=1, subnet_id=subnet2.id) instance2 = reservation2.instances[0] reservations1 = conn.get_all_instances(filters={'vpc-id': vpc1.id}) @@ -320,31 +348,35 @@ def test_get_instances_filtering_by_tag(): instance2.add_tag('tag2', 'wrong value') instance3.add_tag('tag2', 'value2') - reservations = conn.get_all_instances(filters={'tag:tag0' : 'value0'}) + reservations = conn.get_all_instances(filters={'tag:tag0': 'value0'}) # get_all_instances should return no instances reservations.should.have.length_of(0) - reservations = conn.get_all_instances(filters={'tag:tag1' : 'value1'}) + reservations = conn.get_all_instances(filters={'tag:tag1': 'value1'}) # get_all_instances should return both instances with this tag value reservations.should.have.length_of(1) reservations[0].instances.should.have.length_of(2) reservations[0].instances[0].id.should.equal(instance1.id) reservations[0].instances[1].id.should.equal(instance2.id) - reservations = conn.get_all_instances(filters={'tag:tag1' : 'value1', 'tag:tag2' : 'value2'}) + reservations = conn.get_all_instances( + filters={'tag:tag1': 'value1', 'tag:tag2': 'value2'}) # get_all_instances should return the instance with both tag values reservations.should.have.length_of(1) reservations[0].instances.should.have.length_of(1) reservations[0].instances[0].id.should.equal(instance1.id) - reservations = conn.get_all_instances(filters={'tag:tag1' : 'value1', 'tag:tag2' : 'value2'}) + reservations = conn.get_all_instances( + filters={'tag:tag1': 'value1', 'tag:tag2': 'value2'}) # get_all_instances should return the instance with both tag values reservations.should.have.length_of(1) reservations[0].instances.should.have.length_of(1) reservations[0].instances[0].id.should.equal(instance1.id) - reservations = conn.get_all_instances(filters={'tag:tag2' : ['value2', 'bogus']}) - # get_all_instances should return both instances with one of the acceptable tag values + reservations = conn.get_all_instances( + filters={'tag:tag2': ['value2', 'bogus']}) + # get_all_instances should return both instances with one of the + # acceptable tag values reservations.should.have.length_of(1) reservations[0].instances.should.have.length_of(2) reservations[0].instances[0].id.should.equal(instance1.id) @@ -362,32 +394,37 @@ def test_get_instances_filtering_by_tag_value(): instance2.add_tag('tag2', 'wrong value') instance3.add_tag('tag2', 'value2') - reservations = conn.get_all_instances(filters={'tag-value' : 'value0'}) + reservations = conn.get_all_instances(filters={'tag-value': 'value0'}) # get_all_instances should return no instances reservations.should.have.length_of(0) - reservations = conn.get_all_instances(filters={'tag-value' : 'value1'}) + reservations = conn.get_all_instances(filters={'tag-value': 'value1'}) # get_all_instances should return both instances with this tag value reservations.should.have.length_of(1) reservations[0].instances.should.have.length_of(2) reservations[0].instances[0].id.should.equal(instance1.id) reservations[0].instances[1].id.should.equal(instance2.id) - reservations = conn.get_all_instances(filters={'tag-value' : ['value2', 'value1']}) - # get_all_instances should return both instances with one of the acceptable tag values + reservations = conn.get_all_instances( + filters={'tag-value': ['value2', 'value1']}) + # get_all_instances should return both instances with one of the + # acceptable tag values reservations.should.have.length_of(1) reservations[0].instances.should.have.length_of(3) reservations[0].instances[0].id.should.equal(instance1.id) reservations[0].instances[1].id.should.equal(instance2.id) reservations[0].instances[2].id.should.equal(instance3.id) - reservations = conn.get_all_instances(filters={'tag-value' : ['value2', 'bogus']}) - # get_all_instances should return both instances with one of the acceptable tag values + reservations = conn.get_all_instances( + filters={'tag-value': ['value2', 'bogus']}) + # get_all_instances should return both instances with one of the + # acceptable tag values reservations.should.have.length_of(1) reservations[0].instances.should.have.length_of(2) reservations[0].instances[0].id.should.equal(instance1.id) reservations[0].instances[1].id.should.equal(instance3.id) + @mock_ec2_deprecated def test_get_instances_filtering_by_tag_name(): conn = boto.connect_ec2() @@ -399,25 +436,28 @@ def test_get_instances_filtering_by_tag_name(): instance2.add_tag('tag2X') instance3.add_tag('tag3') - reservations = conn.get_all_instances(filters={'tag-key' : 'tagX'}) + reservations = conn.get_all_instances(filters={'tag-key': 'tagX'}) # get_all_instances should return no instances reservations.should.have.length_of(0) - reservations = conn.get_all_instances(filters={'tag-key' : 'tag1'}) + reservations = conn.get_all_instances(filters={'tag-key': 'tag1'}) # get_all_instances should return both instances with this tag value reservations.should.have.length_of(1) reservations[0].instances.should.have.length_of(2) reservations[0].instances[0].id.should.equal(instance1.id) reservations[0].instances[1].id.should.equal(instance2.id) - reservations = conn.get_all_instances(filters={'tag-key' : ['tag1', 'tag3']}) - # get_all_instances should return both instances with one of the acceptable tag values + reservations = conn.get_all_instances( + filters={'tag-key': ['tag1', 'tag3']}) + # get_all_instances should return both instances with one of the + # acceptable tag values reservations.should.have.length_of(1) reservations[0].instances.should.have.length_of(3) reservations[0].instances[0].id.should.equal(instance1.id) reservations[0].instances[1].id.should.equal(instance2.id) reservations[0].instances[2].id.should.equal(instance3.id) + @mock_ec2_deprecated def test_instance_start_and_stop(): conn = boto.connect_ec2('the_key', 'the_secret') @@ -431,7 +471,8 @@ def test_instance_start_and_stop(): stopped_instances = conn.stop_instances(instance_ids, dry_run=True) ex.exception.error_code.should.equal('DryRunOperation') ex.exception.status.should.equal(400) - ex.exception.message.should.equal('An error occurred (DryRunOperation) when calling the StopInstance operation: Request would have succeeded, but DryRun flag is set') + ex.exception.message.should.equal( + 'An error occurred (DryRunOperation) when calling the StopInstance operation: Request would have succeeded, but DryRun flag is set') stopped_instances = conn.stop_instances(instance_ids) @@ -439,10 +480,12 @@ def test_instance_start_and_stop(): instance.state.should.equal('stopping') with assert_raises(EC2ResponseError) as ex: - started_instances = conn.start_instances([instances[0].id], dry_run=True) + started_instances = conn.start_instances( + [instances[0].id], dry_run=True) ex.exception.error_code.should.equal('DryRunOperation') ex.exception.status.should.equal(400) - ex.exception.message.should.equal('An error occurred (DryRunOperation) when calling the StartInstance operation: Request would have succeeded, but DryRun flag is set') + ex.exception.message.should.equal( + 'An error occurred (DryRunOperation) when calling the StartInstance operation: Request would have succeeded, but DryRun flag is set') started_instances = conn.start_instances([instances[0].id]) started_instances[0].state.should.equal('pending') @@ -458,7 +501,8 @@ def test_instance_reboot(): instance.reboot(dry_run=True) ex.exception.error_code.should.equal('DryRunOperation') ex.exception.status.should.equal(400) - ex.exception.message.should.equal('An error occurred (DryRunOperation) when calling the RebootInstance operation: Request would have succeeded, but DryRun flag is set') + ex.exception.message.should.equal( + 'An error occurred (DryRunOperation) when calling the RebootInstance operation: Request would have succeeded, but DryRun flag is set') instance.reboot() instance.state.should.equal('pending') @@ -474,7 +518,8 @@ def test_instance_attribute_instance_type(): instance.modify_attribute("instanceType", "m1.small", dry_run=True) ex.exception.error_code.should.equal('DryRunOperation') ex.exception.status.should.equal(400) - ex.exception.message.should.equal('An error occurred (DryRunOperation) when calling the ModifyInstanceType operation: Request would have succeeded, but DryRun flag is set') + ex.exception.message.should.equal( + 'An error occurred (DryRunOperation) when calling the ModifyInstanceType operation: Request would have succeeded, but DryRun flag is set') instance.modify_attribute("instanceType", "m1.small") @@ -482,6 +527,7 @@ def test_instance_attribute_instance_type(): instance_attribute.should.be.a(InstanceAttribute) instance_attribute.get('instanceType').should.equal("m1.small") + @mock_ec2_deprecated def test_modify_instance_attribute_security_groups(): conn = boto.connect_ec2('the_key', 'the_secret') @@ -495,7 +541,8 @@ def test_modify_instance_attribute_security_groups(): instance.modify_attribute("groupSet", [sg_id, sg_id2], dry_run=True) ex.exception.error_code.should.equal('DryRunOperation') ex.exception.status.should.equal(400) - ex.exception.message.should.equal('An error occurred (DryRunOperation) when calling the ModifyInstanceSecurityGroups operation: Request would have succeeded, but DryRun flag is set') + ex.exception.message.should.equal( + 'An error occurred (DryRunOperation) when calling the ModifyInstanceSecurityGroups operation: Request would have succeeded, but DryRun flag is set') instance.modify_attribute("groupSet", [sg_id, sg_id2]) @@ -513,10 +560,12 @@ def test_instance_attribute_user_data(): instance = reservation.instances[0] with assert_raises(EC2ResponseError) as ex: - instance.modify_attribute("userData", "this is my user data", dry_run=True) + instance.modify_attribute( + "userData", "this is my user data", dry_run=True) ex.exception.error_code.should.equal('DryRunOperation') ex.exception.status.should.equal(400) - ex.exception.message.should.equal('An error occurred (DryRunOperation) when calling the ModifyUserData operation: Request would have succeeded, but DryRun flag is set') + ex.exception.message.should.equal( + 'An error occurred (DryRunOperation) when calling the ModifyUserData operation: Request would have succeeded, but DryRun flag is set') instance.modify_attribute("userData", "this is my user data") @@ -544,7 +593,8 @@ def test_instance_attribute_source_dest_check(): instance.modify_attribute("sourceDestCheck", False, dry_run=True) ex.exception.error_code.should.equal('DryRunOperation') ex.exception.status.should.equal(400) - ex.exception.message.should.equal('An error occurred (DryRunOperation) when calling the ModifySourceDestCheck operation: Request would have succeeded, but DryRun flag is set') + ex.exception.message.should.equal( + 'An error occurred (DryRunOperation) when calling the ModifySourceDestCheck operation: Request would have succeeded, but DryRun flag is set') instance.modify_attribute("sourceDestCheck", False) @@ -585,10 +635,12 @@ def test_run_instance_with_security_group_name(): conn = boto.connect_ec2('the_key', 'the_secret') with assert_raises(EC2ResponseError) as ex: - group = conn.create_security_group('group1', "some description", dry_run=True) + group = conn.create_security_group( + 'group1', "some description", dry_run=True) ex.exception.error_code.should.equal('DryRunOperation') ex.exception.status.should.equal(400) - ex.exception.message.should.equal('An error occurred (DryRunOperation) when calling the CreateSecurityGroup operation: Request would have succeeded, but DryRun flag is set') + ex.exception.message.should.equal( + 'An error occurred (DryRunOperation) when calling the CreateSecurityGroup operation: Request would have succeeded, but DryRun flag is set') group = conn.create_security_group('group1', "some description") @@ -658,14 +710,16 @@ def test_run_instance_with_nic_autocreated(): conn = boto.connect_vpc('the_key', 'the_secret') vpc = conn.create_vpc("10.0.0.0/16") subnet = conn.create_subnet(vpc.id, "10.0.0.0/18") - security_group1 = conn.create_security_group('test security group #1', 'this is a test security group') - security_group2 = conn.create_security_group('test security group #2', 'this is a test security group') + security_group1 = conn.create_security_group( + 'test security group #1', 'this is a test security group') + security_group2 = conn.create_security_group( + 'test security group #2', 'this is a test security group') private_ip = "54.0.0.1" reservation = conn.run_instances('ami-1234abcd', subnet_id=subnet.id, - security_groups=[security_group1.name], - security_group_ids=[security_group2.id], - private_ip_address=private_ip) + security_groups=[security_group1.name], + security_group_ids=[security_group2.id], + private_ip_address=private_ip) instance = reservation.instances[0] all_enis = conn.get_all_network_interfaces() @@ -677,11 +731,13 @@ def test_run_instance_with_nic_autocreated(): instance.subnet_id.should.equal(subnet.id) instance.groups.should.have.length_of(2) - set([group.id for group in instance.groups]).should.equal(set([security_group1.id,security_group2.id])) + set([group.id for group in instance.groups]).should.equal( + set([security_group1.id, security_group2.id])) eni.subnet_id.should.equal(subnet.id) eni.groups.should.have.length_of(2) - set([group.id for group in eni.groups]).should.equal(set([security_group1.id,security_group2.id])) + set([group.id for group in eni.groups]).should.equal( + set([security_group1.id, security_group2.id])) eni.private_ip_addresses.should.have.length_of(1) eni.private_ip_addresses[0].private_ip_address.should.equal(private_ip) @@ -691,20 +747,24 @@ def test_run_instance_with_nic_preexisting(): conn = boto.connect_vpc('the_key', 'the_secret') vpc = conn.create_vpc("10.0.0.0/16") subnet = conn.create_subnet(vpc.id, "10.0.0.0/18") - security_group1 = conn.create_security_group('test security group #1', 'this is a test security group') - security_group2 = conn.create_security_group('test security group #2', 'this is a test security group') + security_group1 = conn.create_security_group( + 'test security group #1', 'this is a test security group') + security_group2 = conn.create_security_group( + 'test security group #2', 'this is a test security group') private_ip = "54.0.0.1" - eni = conn.create_network_interface(subnet.id, private_ip, groups=[security_group1.id]) + eni = conn.create_network_interface( + subnet.id, private_ip, groups=[security_group1.id]) # Boto requires NetworkInterfaceCollection of NetworkInterfaceSpecifications... # annoying, but generates the desired querystring. from boto.ec2.networkinterface import NetworkInterfaceSpecification, NetworkInterfaceCollection - interface = NetworkInterfaceSpecification(network_interface_id=eni.id, device_index=0) + interface = NetworkInterfaceSpecification( + network_interface_id=eni.id, device_index=0) interfaces = NetworkInterfaceCollection(interface) # end Boto objects reservation = conn.run_instances('ami-1234abcd', network_interfaces=interfaces, - security_group_ids=[security_group2.id]) + security_group_ids=[security_group2.id]) instance = reservation.instances[0] instance.subnet_id.should.equal(subnet.id) @@ -718,9 +778,11 @@ def test_run_instance_with_nic_preexisting(): instance_eni.subnet_id.should.equal(subnet.id) instance_eni.groups.should.have.length_of(2) - set([group.id for group in instance_eni.groups]).should.equal(set([security_group1.id,security_group2.id])) + set([group.id for group in instance_eni.groups]).should.equal( + set([security_group1.id, security_group2.id])) instance_eni.private_ip_addresses.should.have.length_of(1) - instance_eni.private_ip_addresses[0].private_ip_address.should.equal(private_ip) + instance_eni.private_ip_addresses[ + 0].private_ip_address.should.equal(private_ip) @requires_boto_gte("2.32.0") @@ -730,10 +792,13 @@ def test_instance_with_nic_attach_detach(): vpc = conn.create_vpc("10.0.0.0/16") subnet = conn.create_subnet(vpc.id, "10.0.0.0/18") - security_group1 = conn.create_security_group('test security group #1', 'this is a test security group') - security_group2 = conn.create_security_group('test security group #2', 'this is a test security group') + security_group1 = conn.create_security_group( + 'test security group #1', 'this is a test security group') + security_group2 = conn.create_security_group( + 'test security group #2', 'this is a test security group') - reservation = conn.run_instances('ami-1234abcd', security_group_ids=[security_group1.id]) + reservation = conn.run_instances( + 'ami-1234abcd', security_group_ids=[security_group1.id]) instance = reservation.instances[0] eni = conn.create_network_interface(subnet.id, groups=[security_group2.id]) @@ -742,14 +807,17 @@ def test_instance_with_nic_attach_detach(): instance.interfaces.should.have.length_of(1) eni.groups.should.have.length_of(1) - set([group.id for group in eni.groups]).should.equal(set([security_group2.id])) + set([group.id for group in eni.groups]).should.equal( + set([security_group2.id])) # Attach with assert_raises(EC2ResponseError) as ex: - conn.attach_network_interface(eni.id, instance.id, device_index=1, dry_run=True) + conn.attach_network_interface( + eni.id, instance.id, device_index=1, dry_run=True) ex.exception.error_code.should.equal('DryRunOperation') ex.exception.status.should.equal(400) - ex.exception.message.should.equal('An error occurred (DryRunOperation) when calling the AttachNetworkInterface operation: Request would have succeeded, but DryRun flag is set') + ex.exception.message.should.equal( + 'An error occurred (DryRunOperation) when calling the AttachNetworkInterface operation: Request would have succeeded, but DryRun flag is set') conn.attach_network_interface(eni.id, instance.id, device_index=1) @@ -759,18 +827,22 @@ def test_instance_with_nic_attach_detach(): instance_eni = instance.interfaces[1] instance_eni.id.should.equal(eni.id) instance_eni.groups.should.have.length_of(2) - set([group.id for group in instance_eni.groups]).should.equal(set([security_group1.id,security_group2.id])) + set([group.id for group in instance_eni.groups]).should.equal( + set([security_group1.id, security_group2.id])) - eni = conn.get_all_network_interfaces(filters={'network-interface-id': eni.id})[0] + eni = conn.get_all_network_interfaces( + filters={'network-interface-id': eni.id})[0] eni.groups.should.have.length_of(2) - set([group.id for group in eni.groups]).should.equal(set([security_group1.id,security_group2.id])) + set([group.id for group in eni.groups]).should.equal( + set([security_group1.id, security_group2.id])) # Detach with assert_raises(EC2ResponseError) as ex: conn.detach_network_interface(instance_eni.attachment.id, dry_run=True) ex.exception.error_code.should.equal('DryRunOperation') ex.exception.status.should.equal(400) - ex.exception.message.should.equal('An error occurred (DryRunOperation) when calling the DetachNetworkInterface operation: Request would have succeeded, but DryRun flag is set') + ex.exception.message.should.equal( + 'An error occurred (DryRunOperation) when calling the DetachNetworkInterface operation: Request would have succeeded, but DryRun flag is set') conn.detach_network_interface(instance_eni.attachment.id) @@ -778,9 +850,11 @@ def test_instance_with_nic_attach_detach(): instance.update() instance.interfaces.should.have.length_of(1) - eni = conn.get_all_network_interfaces(filters={'network-interface-id': eni.id})[0] + eni = conn.get_all_network_interfaces( + filters={'network-interface-id': eni.id})[0] eni.groups.should.have.length_of(1) - set([group.id for group in eni.groups]).should.equal(set([security_group2.id])) + set([group.id for group in eni.groups]).should.equal( + set([security_group2.id])) # Detach with invalid attachment ID with assert_raises(EC2ResponseError) as cm: @@ -851,6 +925,7 @@ def test_describe_instance_status_with_instance_filter(): cm.exception.status.should.equal(400) cm.exception.request_id.should_not.be.none + @requires_boto_gte("2.32.0") @mock_ec2_deprecated def test_describe_instance_status_with_non_running_instances(): @@ -877,6 +952,7 @@ def test_describe_instance_status_with_non_running_instances(): status3 = next((s for s in all_status if s.id == instance3.id), None) status3.state_name.should.equal('running') + @mock_ec2_deprecated def test_get_instance_by_security_group(): conn = boto.connect_ec2('the_key', 'the_secret') @@ -887,12 +963,15 @@ def test_get_instance_by_security_group(): security_group = conn.create_security_group('test', 'test') with assert_raises(EC2ResponseError) as ex: - conn.modify_instance_attribute(instance.id, "groupSet", [security_group.id], dry_run=True) + conn.modify_instance_attribute(instance.id, "groupSet", [ + security_group.id], dry_run=True) ex.exception.error_code.should.equal('DryRunOperation') ex.exception.status.should.equal(400) - ex.exception.message.should.equal('An error occurred (DryRunOperation) when calling the ModifyInstanceSecurityGroups operation: Request would have succeeded, but DryRun flag is set') + ex.exception.message.should.equal( + 'An error occurred (DryRunOperation) when calling the ModifyInstanceSecurityGroups operation: Request would have succeeded, but DryRun flag is set') - conn.modify_instance_attribute(instance.id, "groupSet", [security_group.id]) + conn.modify_instance_attribute( + instance.id, "groupSet", [security_group.id]) security_group_instances = security_group.instances() diff --git a/tests/test_ec2/test_internet_gateways.py b/tests/test_ec2/test_internet_gateways.py index fe5e4945d..5842621cd 100644 --- a/tests/test_ec2/test_internet_gateways.py +++ b/tests/test_ec2/test_internet_gateways.py @@ -13,9 +13,10 @@ import sure # noqa from moto import mock_ec2_deprecated -VPC_CIDR="10.0.0.0/16" -BAD_VPC="vpc-deadbeef" -BAD_IGW="igw-deadbeef" +VPC_CIDR = "10.0.0.0/16" +BAD_VPC = "vpc-deadbeef" +BAD_IGW = "igw-deadbeef" + @mock_ec2_deprecated def test_igw_create(): @@ -28,7 +29,8 @@ def test_igw_create(): igw = conn.create_internet_gateway(dry_run=True) ex.exception.error_code.should.equal('DryRunOperation') ex.exception.status.should.equal(400) - ex.exception.message.should.equal('An error occurred (DryRunOperation) when calling the CreateInternetGateway operation: Request would have succeeded, but DryRun flag is set') + ex.exception.message.should.equal( + 'An error occurred (DryRunOperation) when calling the CreateInternetGateway operation: Request would have succeeded, but DryRun flag is set') igw = conn.create_internet_gateway() conn.get_all_internet_gateways().should.have.length_of(1) @@ -37,6 +39,7 @@ def test_igw_create(): igw = conn.get_all_internet_gateways()[0] igw.attachments.should.have.length_of(0) + @mock_ec2_deprecated def test_igw_attach(): """ internet gateway attach """ @@ -48,13 +51,15 @@ def test_igw_attach(): conn.attach_internet_gateway(igw.id, vpc.id, dry_run=True) ex.exception.error_code.should.equal('DryRunOperation') ex.exception.status.should.equal(400) - ex.exception.message.should.equal('An error occurred (DryRunOperation) when calling the AttachInternetGateway operation: Request would have succeeded, but DryRun flag is set') + ex.exception.message.should.equal( + 'An error occurred (DryRunOperation) when calling the AttachInternetGateway operation: Request would have succeeded, but DryRun flag is set') conn.attach_internet_gateway(igw.id, vpc.id) igw = conn.get_all_internet_gateways()[0] igw.attachments[0].vpc_id.should.be.equal(vpc.id) + @mock_ec2_deprecated def test_igw_attach_bad_vpc(): """ internet gateway fail to attach w/ bad vpc """ @@ -67,6 +72,7 @@ def test_igw_attach_bad_vpc(): cm.exception.status.should.equal(400) cm.exception.request_id.should_not.be.none + @mock_ec2_deprecated def test_igw_attach_twice(): """ internet gateway fail to attach twice """ @@ -82,6 +88,7 @@ def test_igw_attach_twice(): cm.exception.status.should.equal(400) cm.exception.request_id.should_not.be.none + @mock_ec2_deprecated def test_igw_detach(): """ internet gateway detach""" @@ -94,12 +101,14 @@ def test_igw_detach(): conn.detach_internet_gateway(igw.id, vpc.id, dry_run=True) ex.exception.error_code.should.equal('DryRunOperation') ex.exception.status.should.equal(400) - ex.exception.message.should.equal('An error occurred (DryRunOperation) when calling the DetachInternetGateway operation: Request would have succeeded, but DryRun flag is set') + ex.exception.message.should.equal( + 'An error occurred (DryRunOperation) when calling the DetachInternetGateway operation: Request would have succeeded, but DryRun flag is set') conn.detach_internet_gateway(igw.id, vpc.id) igw = conn.get_all_internet_gateways()[0] igw.attachments.should.have.length_of(0) + @mock_ec2_deprecated def test_igw_detach_wrong_vpc(): """ internet gateway fail to detach w/ wrong vpc """ @@ -115,6 +124,7 @@ def test_igw_detach_wrong_vpc(): cm.exception.status.should.equal(400) cm.exception.request_id.should_not.be.none + @mock_ec2_deprecated def test_igw_detach_invalid_vpc(): """ internet gateway fail to detach w/ invalid vpc """ @@ -129,6 +139,7 @@ def test_igw_detach_invalid_vpc(): cm.exception.status.should.equal(400) cm.exception.request_id.should_not.be.none + @mock_ec2_deprecated def test_igw_detach_unattached(): """ internet gateway fail to detach unattached """ @@ -142,6 +153,7 @@ def test_igw_detach_unattached(): cm.exception.status.should.equal(400) cm.exception.request_id.should_not.be.none + @mock_ec2_deprecated def test_igw_delete(): """ internet gateway delete""" @@ -155,11 +167,13 @@ def test_igw_delete(): conn.delete_internet_gateway(igw.id, dry_run=True) ex.exception.error_code.should.equal('DryRunOperation') ex.exception.status.should.equal(400) - ex.exception.message.should.equal('An error occurred (DryRunOperation) when calling the DeleteInternetGateway operation: Request would have succeeded, but DryRun flag is set') + ex.exception.message.should.equal( + 'An error occurred (DryRunOperation) when calling the DeleteInternetGateway operation: Request would have succeeded, but DryRun flag is set') conn.delete_internet_gateway(igw.id) conn.get_all_internet_gateways().should.have.length_of(0) + @mock_ec2_deprecated def test_igw_delete_attached(): """ internet gateway fail to delete attached """ @@ -174,6 +188,7 @@ def test_igw_delete_attached(): cm.exception.status.should.equal(400) cm.exception.request_id.should_not.be.none + @mock_ec2_deprecated def test_igw_desribe(): """ internet gateway fetch by id """ @@ -182,6 +197,7 @@ def test_igw_desribe(): igw_by_search = conn.get_all_internet_gateways([igw.id])[0] igw.id.should.equal(igw_by_search.id) + @mock_ec2_deprecated def test_igw_desribe_bad_id(): """ internet gateway fail to fetch by bad id """ @@ -203,7 +219,8 @@ def test_igw_filter_by_vpc_id(): vpc = conn.create_vpc(VPC_CIDR) conn.attach_internet_gateway(igw1.id, vpc.id) - result = conn.get_all_internet_gateways(filters={"attachment.vpc-id": vpc.id}) + result = conn.get_all_internet_gateways( + filters={"attachment.vpc-id": vpc.id}) result.should.have.length_of(1) result[0].id.should.equal(igw1.id) @@ -230,7 +247,8 @@ def test_igw_filter_by_internet_gateway_id(): igw1 = conn.create_internet_gateway() igw2 = conn.create_internet_gateway() - result = conn.get_all_internet_gateways(filters={"internet-gateway-id": igw1.id}) + result = conn.get_all_internet_gateways( + filters={"internet-gateway-id": igw1.id}) result.should.have.length_of(1) result[0].id.should.equal(igw1.id) @@ -245,6 +263,7 @@ def test_igw_filter_by_attachment_state(): vpc = conn.create_vpc(VPC_CIDR) conn.attach_internet_gateway(igw1.id, vpc.id) - result = conn.get_all_internet_gateways(filters={"attachment.state": "available"}) + result = conn.get_all_internet_gateways( + filters={"attachment.state": "available"}) result.should.have.length_of(1) result[0].id.should.equal(igw1.id) diff --git a/tests/test_ec2/test_key_pairs.py b/tests/test_ec2/test_key_pairs.py index 6c4773200..ec979a871 100644 --- a/tests/test_ec2/test_key_pairs.py +++ b/tests/test_ec2/test_key_pairs.py @@ -36,7 +36,8 @@ def test_key_pairs_create(): kp = conn.create_key_pair('foo', dry_run=True) ex.exception.error_code.should.equal('DryRunOperation') ex.exception.status.should.equal(400) - ex.exception.message.should.equal('An error occurred (DryRunOperation) when calling the CreateKeyPair operation: Request would have succeeded, but DryRun flag is set') + ex.exception.message.should.equal( + 'An error occurred (DryRunOperation) when calling the CreateKeyPair operation: Request would have succeeded, but DryRun flag is set') kp = conn.create_key_pair('foo') assert kp.material.startswith('---- BEGIN RSA PRIVATE KEY ----') @@ -91,7 +92,8 @@ def test_key_pairs_delete_exist(): r = conn.delete_key_pair('foo', dry_run=True) ex.exception.error_code.should.equal('DryRunOperation') ex.exception.status.should.equal(400) - ex.exception.message.should.equal('An error occurred (DryRunOperation) when calling the DeleteKeyPair operation: Request would have succeeded, but DryRun flag is set') + ex.exception.message.should.equal( + 'An error occurred (DryRunOperation) when calling the DeleteKeyPair operation: Request would have succeeded, but DryRun flag is set') r = conn.delete_key_pair('foo') r.should.be.ok @@ -106,7 +108,8 @@ def test_key_pairs_import(): kp = conn.import_key_pair('foo', b'content', dry_run=True) ex.exception.error_code.should.equal('DryRunOperation') ex.exception.status.should.equal(400) - ex.exception.message.should.equal('An error occurred (DryRunOperation) when calling the ImportKeyPair operation: Request would have succeeded, but DryRun flag is set') + ex.exception.message.should.equal( + 'An error occurred (DryRunOperation) when calling the ImportKeyPair operation: Request would have succeeded, but DryRun flag is set') kp = conn.import_key_pair('foo', b'content') assert kp.name == 'foo' diff --git a/tests/test_ec2/test_nat_gateway.py b/tests/test_ec2/test_nat_gateway.py index b9c95f7c3..27e8753be 100644 --- a/tests/test_ec2/test_nat_gateway.py +++ b/tests/test_ec2/test_nat_gateway.py @@ -56,7 +56,8 @@ def test_delete_nat_gateway(): nat_gateway_id = nat_gateway['NatGateway']['NatGatewayId'] response = conn.delete_nat_gateway(NatGatewayId=nat_gateway_id) - response['ResponseMetadata'].pop('HTTPHeaders', None) # this is hard to match against, so remove it + # this is hard to match against, so remove it + response['ResponseMetadata'].pop('HTTPHeaders', None) response['ResponseMetadata'].pop('RetryAttempts', None) response.should.equal({ 'NatGatewayId': nat_gateway_id, @@ -89,14 +90,20 @@ def test_create_and_describe_nat_gateway(): enis = conn.describe_network_interfaces()['NetworkInterfaces'] eni_id = enis[0]['NetworkInterfaceId'] - public_ip = conn.describe_addresses(AllocationIds=[allocation_id])['Addresses'][0]['PublicIp'] + public_ip = conn.describe_addresses(AllocationIds=[allocation_id])[ + 'Addresses'][0]['PublicIp'] describe_response['NatGateways'].should.have.length_of(1) - describe_response['NatGateways'][0]['NatGatewayId'].should.equal(nat_gateway_id) + describe_response['NatGateways'][0][ + 'NatGatewayId'].should.equal(nat_gateway_id) describe_response['NatGateways'][0]['State'].should.equal('available') describe_response['NatGateways'][0]['SubnetId'].should.equal(subnet_id) describe_response['NatGateways'][0]['VpcId'].should.equal(vpc_id) - describe_response['NatGateways'][0]['NatGatewayAddresses'][0]['AllocationId'].should.equal(allocation_id) - describe_response['NatGateways'][0]['NatGatewayAddresses'][0]['NetworkInterfaceId'].should.equal(eni_id) - assert describe_response['NatGateways'][0]['NatGatewayAddresses'][0]['PrivateIp'].startswith('10.') - describe_response['NatGateways'][0]['NatGatewayAddresses'][0]['PublicIp'].should.equal(public_ip) + describe_response['NatGateways'][0]['NatGatewayAddresses'][ + 0]['AllocationId'].should.equal(allocation_id) + describe_response['NatGateways'][0]['NatGatewayAddresses'][ + 0]['NetworkInterfaceId'].should.equal(eni_id) + assert describe_response['NatGateways'][0][ + 'NatGatewayAddresses'][0]['PrivateIp'].startswith('10.') + describe_response['NatGateways'][0]['NatGatewayAddresses'][ + 0]['PublicIp'].should.equal(public_ip) diff --git a/tests/test_ec2/test_regions.py b/tests/test_ec2/test_regions.py index 07e02c526..4beca7c67 100644 --- a/tests/test_ec2/test_regions.py +++ b/tests/test_ec2/test_regions.py @@ -50,9 +50,11 @@ def test_add_servers_to_multiple_regions(): @mock_elb_deprecated def test_create_autoscaling_group(): elb_conn = boto.ec2.elb.connect_to_region('us-east-1') - elb_conn.create_load_balancer('us_test_lb', zones=[], listeners=[(80, 8080, 'http')]) + elb_conn.create_load_balancer( + 'us_test_lb', zones=[], listeners=[(80, 8080, 'http')]) elb_conn = boto.ec2.elb.connect_to_region('ap-northeast-1') - elb_conn.create_load_balancer('ap_test_lb', zones=[], listeners=[(80, 8080, 'http')]) + elb_conn.create_load_balancer( + 'ap_test_lb', zones=[], listeners=[(80, 8080, 'http')]) us_conn = boto.ec2.autoscale.connect_to_region('us-east-1') config = boto.ec2.autoscale.LaunchConfiguration( @@ -79,7 +81,6 @@ def test_create_autoscaling_group(): ) us_conn.create_auto_scaling_group(group) - ap_conn = boto.ec2.autoscale.connect_to_region('ap-northeast-1') config = boto.ec2.autoscale.LaunchConfiguration( name='ap_tester', @@ -105,7 +106,6 @@ def test_create_autoscaling_group(): ) ap_conn.create_auto_scaling_group(group) - len(us_conn.get_all_groups()).should.equal(1) len(ap_conn.get_all_groups()).should.equal(1) @@ -122,7 +122,8 @@ def test_create_autoscaling_group(): us_group.health_check_type.should.equal("EC2") list(us_group.load_balancers).should.equal(["us_test_lb"]) us_group.placement_group.should.equal("us_test_placement") - list(us_group.termination_policies).should.equal(["OldestInstance", "NewestInstance"]) + list(us_group.termination_policies).should.equal( + ["OldestInstance", "NewestInstance"]) ap_group = ap_conn.get_all_groups()[0] ap_group.name.should.equal('ap_tester_group') @@ -137,4 +138,5 @@ def test_create_autoscaling_group(): ap_group.health_check_type.should.equal("EC2") list(ap_group.load_balancers).should.equal(["ap_test_lb"]) ap_group.placement_group.should.equal("ap_test_placement") - list(ap_group.termination_policies).should.equal(["OldestInstance", "NewestInstance"]) + list(ap_group.termination_policies).should.equal( + ["OldestInstance", "NewestInstance"]) diff --git a/tests/test_ec2/test_route_tables.py b/tests/test_ec2/test_route_tables.py index 3aa4b460a..6e6c62741 100644 --- a/tests/test_ec2/test_route_tables.py +++ b/tests/test_ec2/test_route_tables.py @@ -91,28 +91,34 @@ def test_route_tables_filters_standard(): all_route_tables.should.have.length_of(5) # Filter by main route table - main_route_tables = conn.get_all_route_tables(filters={'association.main':'true'}) + main_route_tables = conn.get_all_route_tables( + filters={'association.main': 'true'}) main_route_tables.should.have.length_of(3) - main_route_table_ids = [route_table.id for route_table in main_route_tables] + main_route_table_ids = [ + route_table.id for route_table in main_route_tables] main_route_table_ids.should_not.contain(route_table1.id) main_route_table_ids.should_not.contain(route_table2.id) # Filter by VPC - vpc1_route_tables = conn.get_all_route_tables(filters={'vpc-id':vpc1.id}) + vpc1_route_tables = conn.get_all_route_tables(filters={'vpc-id': vpc1.id}) vpc1_route_tables.should.have.length_of(2) - vpc1_route_table_ids = [route_table.id for route_table in vpc1_route_tables] + vpc1_route_table_ids = [ + route_table.id for route_table in vpc1_route_tables] vpc1_route_table_ids.should.contain(route_table1.id) vpc1_route_table_ids.should_not.contain(route_table2.id) # Filter by VPC and main route table - vpc2_main_route_tables = conn.get_all_route_tables(filters={'association.main':'true', 'vpc-id':vpc2.id}) + vpc2_main_route_tables = conn.get_all_route_tables( + filters={'association.main': 'true', 'vpc-id': vpc2.id}) vpc2_main_route_tables.should.have.length_of(1) - vpc2_main_route_table_ids = [route_table.id for route_table in vpc2_main_route_tables] + vpc2_main_route_table_ids = [ + route_table.id for route_table in vpc2_main_route_tables] vpc2_main_route_table_ids.should_not.contain(route_table1.id) vpc2_main_route_table_ids.should_not.contain(route_table2.id) # Unsupported filter - conn.get_all_route_tables.when.called_with(filters={'not-implemented-filter': 'foobar'}).should.throw(NotImplementedError) + conn.get_all_route_tables.when.called_with( + filters={'not-implemented-filter': 'foobar'}).should.throw(NotImplementedError) @mock_ec2_deprecated @@ -134,19 +140,22 @@ def test_route_tables_filters_associations(): all_route_tables.should.have.length_of(4) # Filter by association ID - association1_route_tables = conn.get_all_route_tables(filters={'association.route-table-association-id':association_id1}) + association1_route_tables = conn.get_all_route_tables( + filters={'association.route-table-association-id': association_id1}) association1_route_tables.should.have.length_of(1) association1_route_tables[0].id.should.equal(route_table1.id) association1_route_tables[0].associations.should.have.length_of(2) # Filter by route table ID - route_table2_route_tables = conn.get_all_route_tables(filters={'association.route-table-id':route_table2.id}) + route_table2_route_tables = conn.get_all_route_tables( + filters={'association.route-table-id': route_table2.id}) route_table2_route_tables.should.have.length_of(1) route_table2_route_tables[0].id.should.equal(route_table2.id) route_table2_route_tables[0].associations.should.have.length_of(1) # Filter by subnet ID - subnet_route_tables = conn.get_all_route_tables(filters={'association.subnet-id':subnet1.id}) + subnet_route_tables = conn.get_all_route_tables( + filters={'association.subnet-id': subnet1.id}) subnet_route_tables.should.have.length_of(1) subnet_route_tables[0].id.should.equal(route_table1.id) association1_route_tables[0].associations.should.have.length_of(2) @@ -179,7 +188,8 @@ def test_route_table_associations(): route_table.associations[0].subnet_id.should.equal(subnet.id) # Associate is idempotent - association_id_idempotent = conn.associate_route_table(route_table.id, subnet.id) + association_id_idempotent = conn.associate_route_table( + route_table.id, subnet.id) association_id_idempotent.should.equal(association_id) # Error: Attempt delete associated route table. @@ -255,7 +265,8 @@ def test_route_table_replace_route_table_association(): route_table1.associations[0].subnet_id.should.equal(subnet.id) # Replace Association - association_id2 = conn.replace_route_table_association_with_assoc(association_id1, route_table2.id) + association_id2 = conn.replace_route_table_association_with_assoc( + association_id1, route_table2.id) # Refresh route_table1 = conn.get_all_route_tables(route_table1.id)[0] @@ -271,19 +282,22 @@ def test_route_table_replace_route_table_association(): route_table2.associations[0].subnet_id.should.equal(subnet.id) # Replace Association is idempotent - association_id_idempotent = conn.replace_route_table_association_with_assoc(association_id2, route_table2.id) + association_id_idempotent = conn.replace_route_table_association_with_assoc( + association_id2, route_table2.id) association_id_idempotent.should.equal(association_id2) # Error: Replace association with invalid association ID with assert_raises(EC2ResponseError) as cm: - conn.replace_route_table_association_with_assoc("rtbassoc-1234abcd", route_table1.id) + conn.replace_route_table_association_with_assoc( + "rtbassoc-1234abcd", route_table1.id) cm.exception.code.should.equal('InvalidAssociationID.NotFound') cm.exception.status.should.equal(400) cm.exception.request_id.should_not.be.none # Error: Replace association with invalid route table ID with assert_raises(EC2ResponseError) as cm: - conn.replace_route_table_association_with_assoc(association_id2, "rtb-1234abcd") + conn.replace_route_table_association_with_assoc( + association_id2, "rtb-1234abcd") cm.exception.code.should.equal('InvalidRouteTableID.NotFound') cm.exception.status.should.equal(400) cm.exception.request_id.should_not.be.none @@ -298,7 +312,8 @@ def test_route_table_get_by_tag(): route_table = conn.create_route_table(vpc.id) route_table.add_tag('Name', 'TestRouteTable') - route_tables = conn.get_all_route_tables(filters={'tag:Name': 'TestRouteTable'}) + route_tables = conn.get_all_route_tables( + filters={'tag:Name': 'TestRouteTable'}) route_tables.should.have.length_of(1) route_tables[0].vpc_id.should.equal(vpc.id) @@ -323,7 +338,8 @@ def test_route_table_get_by_tag_boto3(): route_tables[0].vpc_id.should.equal(vpc.id) route_tables[0].id.should.equal(route_table.id) route_tables[0].tags.should.have.length_of(1) - route_tables[0].tags[0].should.equal({'Key': 'Name', 'Value': 'TestRouteTable'}) + route_tables[0].tags[0].should.equal( + {'Key': 'Name', 'Value': 'TestRouteTable'}) @mock_ec2_deprecated @@ -337,10 +353,12 @@ def test_routes_additional(): conn.create_route(main_route_table.id, ROUTE_CIDR, gateway_id=igw.id) - main_route_table = conn.get_all_route_tables(filters={'vpc-id': vpc.id})[0] # Refresh route table + main_route_table = conn.get_all_route_tables( + filters={'vpc-id': vpc.id})[0] # Refresh route table main_route_table.routes.should.have.length_of(2) - new_routes = [route for route in main_route_table.routes if route.destination_cidr_block != vpc.cidr_block] + new_routes = [ + route for route in main_route_table.routes if route.destination_cidr_block != vpc.cidr_block] new_routes.should.have.length_of(1) new_route = new_routes[0] @@ -351,10 +369,12 @@ def test_routes_additional(): conn.delete_route(main_route_table.id, ROUTE_CIDR) - main_route_table = conn.get_all_route_tables(filters={'vpc-id': vpc.id})[0] # Refresh route table + main_route_table = conn.get_all_route_tables( + filters={'vpc-id': vpc.id})[0] # Refresh route table main_route_table.routes.should.have.length_of(1) - new_routes = [route for route in main_route_table.routes if route.destination_cidr_block != vpc.cidr_block] + new_routes = [ + route for route in main_route_table.routes if route.destination_cidr_block != vpc.cidr_block] new_routes.should.have.length_of(0) with assert_raises(EC2ResponseError) as cm: @@ -368,7 +388,8 @@ def test_routes_additional(): def test_routes_replace(): conn = boto.connect_vpc('the_key', 'the_secret') vpc = conn.create_vpc("10.0.0.0/16") - main_route_table = conn.get_all_route_tables(filters={'association.main':'true','vpc-id':vpc.id})[0] + main_route_table = conn.get_all_route_tables( + filters={'association.main': 'true', 'vpc-id': vpc.id})[0] local_route = main_route_table.routes[0] ROUTE_CIDR = "10.0.0.4/24" @@ -384,11 +405,13 @@ def test_routes_replace(): # Replace... def get_target_route(): route_table = conn.get_all_route_tables(main_route_table.id)[0] - routes = [route for route in route_table.routes if route.destination_cidr_block != vpc.cidr_block] + routes = [ + route for route in route_table.routes if route.destination_cidr_block != vpc.cidr_block] routes.should.have.length_of(1) return routes[0] - conn.replace_route(main_route_table.id, ROUTE_CIDR, instance_id=instance.id) + conn.replace_route(main_route_table.id, ROUTE_CIDR, + instance_id=instance.id) target_route = get_target_route() target_route.gateway_id.should.be.none @@ -422,12 +445,14 @@ def test_routes_not_supported(): ROUTE_CIDR = "10.0.0.4/24" # Create - conn.create_route.when.called_with(main_route_table.id, ROUTE_CIDR, interface_id='eni-1234abcd').should.throw(NotImplementedError) + conn.create_route.when.called_with( + main_route_table.id, ROUTE_CIDR, interface_id='eni-1234abcd').should.throw(NotImplementedError) # Replace igw = conn.create_internet_gateway() conn.create_route(main_route_table.id, ROUTE_CIDR, gateway_id=igw.id) - conn.replace_route.when.called_with(main_route_table.id, ROUTE_CIDR, interface_id='eni-1234abcd').should.throw(NotImplementedError) + conn.replace_route.when.called_with( + main_route_table.id, ROUTE_CIDR, interface_id='eni-1234abcd').should.throw(NotImplementedError) @requires_boto_gte("2.34.0") @@ -435,18 +460,21 @@ def test_routes_not_supported(): def test_routes_vpc_peering_connection(): conn = boto.connect_vpc('the_key', 'the_secret') vpc = conn.create_vpc("10.0.0.0/16") - main_route_table = conn.get_all_route_tables(filters={'association.main':'true','vpc-id':vpc.id})[0] + main_route_table = conn.get_all_route_tables( + filters={'association.main': 'true', 'vpc-id': vpc.id})[0] local_route = main_route_table.routes[0] ROUTE_CIDR = "10.0.0.4/24" peer_vpc = conn.create_vpc("11.0.0.0/16") vpc_pcx = conn.create_vpc_peering_connection(vpc.id, peer_vpc.id) - conn.create_route(main_route_table.id, ROUTE_CIDR, vpc_peering_connection_id=vpc_pcx.id) + conn.create_route(main_route_table.id, ROUTE_CIDR, + vpc_peering_connection_id=vpc_pcx.id) # Refresh route table main_route_table = conn.get_all_route_tables(main_route_table.id)[0] - new_routes = [route for route in main_route_table.routes if route.destination_cidr_block != vpc.cidr_block] + new_routes = [ + route for route in main_route_table.routes if route.destination_cidr_block != vpc.cidr_block] new_routes.should.have.length_of(1) new_route = new_routes[0] @@ -463,7 +491,8 @@ def test_routes_vpn_gateway(): conn = boto.connect_vpc('the_key', 'the_secret') vpc = conn.create_vpc("10.0.0.0/16") - main_route_table = conn.get_all_route_tables(filters={'association.main':'true','vpc-id':vpc.id})[0] + main_route_table = conn.get_all_route_tables( + filters={'association.main': 'true', 'vpc-id': vpc.id})[0] ROUTE_CIDR = "10.0.0.4/24" vpn_gw = conn.create_vpn_gateway(type="ipsec.1") @@ -471,7 +500,8 @@ def test_routes_vpn_gateway(): conn.create_route(main_route_table.id, ROUTE_CIDR, gateway_id=vpn_gw.id) main_route_table = conn.get_all_route_tables(main_route_table.id)[0] - new_routes = [route for route in main_route_table.routes if route.destination_cidr_block != vpc.cidr_block] + new_routes = [ + route for route in main_route_table.routes if route.destination_cidr_block != vpc.cidr_block] new_routes.should.have.length_of(1) new_route = new_routes[0] diff --git a/tests/test_ec2/test_security_groups.py b/tests/test_ec2/test_security_groups.py index 3056331be..21ecad11e 100644 --- a/tests/test_ec2/test_security_groups.py +++ b/tests/test_ec2/test_security_groups.py @@ -20,25 +20,30 @@ def test_create_and_describe_security_group(): conn = boto.connect_ec2('the_key', 'the_secret') with assert_raises(EC2ResponseError) as ex: - security_group = conn.create_security_group('test security group', 'this is a test security group', dry_run=True) + security_group = conn.create_security_group( + 'test security group', 'this is a test security group', dry_run=True) ex.exception.error_code.should.equal('DryRunOperation') ex.exception.status.should.equal(400) - ex.exception.message.should.equal('An error occurred (DryRunOperation) when calling the CreateSecurityGroup operation: Request would have succeeded, but DryRun flag is set') + ex.exception.message.should.equal( + 'An error occurred (DryRunOperation) when calling the CreateSecurityGroup operation: Request would have succeeded, but DryRun flag is set') - security_group = conn.create_security_group('test security group', 'this is a test security group') + security_group = conn.create_security_group( + 'test security group', 'this is a test security group') security_group.name.should.equal('test security group') security_group.description.should.equal('this is a test security group') # Trying to create another group with the same name should throw an error with assert_raises(EC2ResponseError) as cm: - conn.create_security_group('test security group', 'this is a test security group') + conn.create_security_group( + 'test security group', 'this is a test security group') cm.exception.code.should.equal('InvalidGroup.Duplicate') cm.exception.status.should.equal(400) cm.exception.request_id.should_not.be.none all_groups = conn.get_all_security_groups() - all_groups.should.have.length_of(3) # The default group gets created automatically + # The default group gets created automatically + all_groups.should.have.length_of(3) group_names = [group.name for group in all_groups] set(group_names).should.equal(set(["default", "test security group"])) @@ -66,16 +71,19 @@ def test_default_security_group(): def test_create_and_describe_vpc_security_group(): conn = boto.connect_ec2('the_key', 'the_secret') vpc_id = 'vpc-5300000c' - security_group = conn.create_security_group('test security group', 'this is a test security group', vpc_id=vpc_id) + security_group = conn.create_security_group( + 'test security group', 'this is a test security group', vpc_id=vpc_id) security_group.vpc_id.should.equal(vpc_id) security_group.name.should.equal('test security group') security_group.description.should.equal('this is a test security group') - # Trying to create another group with the same name in the same VPC should throw an error + # Trying to create another group with the same name in the same VPC should + # throw an error with assert_raises(EC2ResponseError) as cm: - conn.create_security_group('test security group', 'this is a test security group', vpc_id) + conn.create_security_group( + 'test security group', 'this is a test security group', vpc_id) cm.exception.code.should.equal('InvalidGroup.Duplicate') cm.exception.status.should.equal(400) cm.exception.request_id.should_not.be.none @@ -94,8 +102,10 @@ def test_create_two_security_groups_with_same_name_in_different_vpc(): vpc_id = 'vpc-5300000c' vpc_id2 = 'vpc-5300000d' - conn.create_security_group('test security group', 'this is a test security group', vpc_id) - conn.create_security_group('test security group', 'this is a test security group', vpc_id2) + conn.create_security_group( + 'test security group', 'this is a test security group', vpc_id) + conn.create_security_group( + 'test security group', 'this is a test security group', vpc_id2) all_groups = conn.get_all_security_groups() @@ -125,7 +135,8 @@ def test_deleting_security_groups(): conn.delete_security_group('test2', dry_run=True) ex.exception.error_code.should.equal('DryRunOperation') ex.exception.status.should.equal(400) - ex.exception.message.should.equal('An error occurred (DryRunOperation) when calling the DeleteSecurityGroup operation: Request would have succeeded, but DryRun flag is set') + ex.exception.message.should.equal( + 'An error occurred (DryRunOperation) when calling the DeleteSecurityGroup operation: Request would have succeeded, but DryRun flag is set') conn.delete_security_group('test2') conn.get_all_security_groups().should.have.length_of(3) @@ -151,65 +162,83 @@ def test_authorize_ip_range_and_revoke(): security_group = conn.create_security_group('test', 'test') with assert_raises(EC2ResponseError) as ex: - success = security_group.authorize(ip_protocol="tcp", from_port="22", to_port="2222", cidr_ip="123.123.123.123/32", dry_run=True) + success = security_group.authorize( + ip_protocol="tcp", from_port="22", to_port="2222", cidr_ip="123.123.123.123/32", dry_run=True) ex.exception.error_code.should.equal('DryRunOperation') ex.exception.status.should.equal(400) - ex.exception.message.should.equal('An error occurred (DryRunOperation) when calling the GrantSecurityGroupIngress operation: Request would have succeeded, but DryRun flag is set') + ex.exception.message.should.equal( + 'An error occurred (DryRunOperation) when calling the GrantSecurityGroupIngress operation: Request would have succeeded, but DryRun flag is set') - success = security_group.authorize(ip_protocol="tcp", from_port="22", to_port="2222", cidr_ip="123.123.123.123/32") + success = security_group.authorize( + ip_protocol="tcp", from_port="22", to_port="2222", cidr_ip="123.123.123.123/32") assert success.should.be.true security_group = conn.get_all_security_groups(groupnames=['test'])[0] int(security_group.rules[0].to_port).should.equal(2222) - security_group.rules[0].grants[0].cidr_ip.should.equal("123.123.123.123/32") + security_group.rules[0].grants[ + 0].cidr_ip.should.equal("123.123.123.123/32") # Wrong Cidr should throw error with assert_raises(EC2ResponseError) as cm: - security_group.revoke(ip_protocol="tcp", from_port="22", to_port="2222", cidr_ip="123.123.123.122/32") + security_group.revoke(ip_protocol="tcp", from_port="22", + to_port="2222", cidr_ip="123.123.123.122/32") cm.exception.code.should.equal('InvalidPermission.NotFound') cm.exception.status.should.equal(400) cm.exception.request_id.should_not.be.none # Actually revoke with assert_raises(EC2ResponseError) as ex: - security_group.revoke(ip_protocol="tcp", from_port="22", to_port="2222", cidr_ip="123.123.123.123/32", dry_run=True) + security_group.revoke(ip_protocol="tcp", from_port="22", + to_port="2222", cidr_ip="123.123.123.123/32", dry_run=True) ex.exception.error_code.should.equal('DryRunOperation') ex.exception.status.should.equal(400) - ex.exception.message.should.equal('An error occurred (DryRunOperation) when calling the RevokeSecurityGroupIngress operation: Request would have succeeded, but DryRun flag is set') + ex.exception.message.should.equal( + 'An error occurred (DryRunOperation) when calling the RevokeSecurityGroupIngress operation: Request would have succeeded, but DryRun flag is set') - security_group.revoke(ip_protocol="tcp", from_port="22", to_port="2222", cidr_ip="123.123.123.123/32") + security_group.revoke(ip_protocol="tcp", from_port="22", + to_port="2222", cidr_ip="123.123.123.123/32") security_group = conn.get_all_security_groups()[0] security_group.rules.should.have.length_of(0) # Test for egress as well - egress_security_group = conn.create_security_group('testegress', 'testegress', vpc_id='vpc-3432589') + egress_security_group = conn.create_security_group( + 'testegress', 'testegress', vpc_id='vpc-3432589') with assert_raises(EC2ResponseError) as ex: - success = conn.authorize_security_group_egress(egress_security_group.id, "tcp", from_port="22", to_port="2222", cidr_ip="123.123.123.123/32", dry_run=True) + success = conn.authorize_security_group_egress( + egress_security_group.id, "tcp", from_port="22", to_port="2222", cidr_ip="123.123.123.123/32", dry_run=True) ex.exception.error_code.should.equal('DryRunOperation') ex.exception.status.should.equal(400) - ex.exception.message.should.equal('An error occurred (DryRunOperation) when calling the GrantSecurityGroupEgress operation: Request would have succeeded, but DryRun flag is set') + ex.exception.message.should.equal( + 'An error occurred (DryRunOperation) when calling the GrantSecurityGroupEgress operation: Request would have succeeded, but DryRun flag is set') - success = conn.authorize_security_group_egress(egress_security_group.id, "tcp", from_port="22", to_port="2222", cidr_ip="123.123.123.123/32") + success = conn.authorize_security_group_egress( + egress_security_group.id, "tcp", from_port="22", to_port="2222", cidr_ip="123.123.123.123/32") assert success.should.be.true - egress_security_group = conn.get_all_security_groups(groupnames='testegress')[0] + egress_security_group = conn.get_all_security_groups( + groupnames='testegress')[0] # There are two egress rules associated with the security group: # the default outbound rule and the new one int(egress_security_group.rules_egress[1].to_port).should.equal(2222) - egress_security_group.rules_egress[1].grants[0].cidr_ip.should.equal("123.123.123.123/32") + egress_security_group.rules_egress[1].grants[ + 0].cidr_ip.should.equal("123.123.123.123/32") # Wrong Cidr should throw error - egress_security_group.revoke.when.called_with(ip_protocol="tcp", from_port="22", to_port="2222", cidr_ip="123.123.123.122/32").should.throw(EC2ResponseError) + egress_security_group.revoke.when.called_with( + ip_protocol="tcp", from_port="22", to_port="2222", cidr_ip="123.123.123.122/32").should.throw(EC2ResponseError) # Actually revoke with assert_raises(EC2ResponseError) as ex: - conn.revoke_security_group_egress(egress_security_group.id, "tcp", from_port="22", to_port="2222", cidr_ip="123.123.123.123/32", dry_run=True) + conn.revoke_security_group_egress( + egress_security_group.id, "tcp", from_port="22", to_port="2222", cidr_ip="123.123.123.123/32", dry_run=True) ex.exception.error_code.should.equal('DryRunOperation') ex.exception.status.should.equal(400) - ex.exception.message.should.equal('An error occurred (DryRunOperation) when calling the RevokeSecurityGroupEgress operation: Request would have succeeded, but DryRun flag is set') + ex.exception.message.should.equal( + 'An error occurred (DryRunOperation) when calling the RevokeSecurityGroupEgress operation: Request would have succeeded, but DryRun flag is set') - conn.revoke_security_group_egress(egress_security_group.id, "tcp", from_port="22", to_port="2222", cidr_ip="123.123.123.123/32") + conn.revoke_security_group_egress( + egress_security_group.id, "tcp", from_port="22", to_port="2222", cidr_ip="123.123.123.123/32") egress_security_group = conn.get_all_security_groups()[0] # There is still the default outbound rule @@ -223,24 +252,30 @@ def test_authorize_other_group_and_revoke(): other_security_group = conn.create_security_group('other', 'other') wrong_group = conn.create_security_group('wrong', 'wrong') - success = security_group.authorize(ip_protocol="tcp", from_port="22", to_port="2222", src_group=other_security_group) + success = security_group.authorize( + ip_protocol="tcp", from_port="22", to_port="2222", src_group=other_security_group) assert success.should.be.true - security_group = [group for group in conn.get_all_security_groups() if group.name == 'test'][0] + security_group = [ + group for group in conn.get_all_security_groups() if group.name == 'test'][0] int(security_group.rules[0].to_port).should.equal(2222) - security_group.rules[0].grants[0].group_id.should.equal(other_security_group.id) + security_group.rules[0].grants[ + 0].group_id.should.equal(other_security_group.id) # Wrong source group should throw error with assert_raises(EC2ResponseError) as cm: - security_group.revoke(ip_protocol="tcp", from_port="22", to_port="2222", src_group=wrong_group) + security_group.revoke(ip_protocol="tcp", from_port="22", + to_port="2222", src_group=wrong_group) cm.exception.code.should.equal('InvalidPermission.NotFound') cm.exception.status.should.equal(400) cm.exception.request_id.should_not.be.none # Actually revoke - security_group.revoke(ip_protocol="tcp", from_port="22", to_port="2222", src_group=other_security_group) + security_group.revoke(ip_protocol="tcp", from_port="22", + to_port="2222", src_group=other_security_group) - security_group = [group for group in conn.get_all_security_groups() if group.name == 'test'][0] + security_group = [ + group for group in conn.get_all_security_groups() if group.name == 'test'][0] security_group.rules.should.have.length_of(0) @@ -250,8 +285,10 @@ def test_authorize_other_group_egress_and_revoke(): vpc = ec2.create_vpc(CidrBlock='10.0.0.0/16') - sg01 = ec2.create_security_group(GroupName='sg01', Description='Test security group sg01', VpcId=vpc.id) - sg02 = ec2.create_security_group(GroupName='sg02', Description='Test security group sg02', VpcId=vpc.id) + sg01 = ec2.create_security_group( + GroupName='sg01', Description='Test security group sg01', VpcId=vpc.id) + sg02 = ec2.create_security_group( + GroupName='sg02', Description='Test security group sg02', VpcId=vpc.id) ip_permission = { 'IpProtocol': 'tcp', @@ -278,27 +315,33 @@ def test_authorize_group_in_vpc(): security_group = conn.create_security_group('test1', 'test1', vpc_id) other_security_group = conn.create_security_group('test2', 'test2', vpc_id) - success = security_group.authorize(ip_protocol="tcp", from_port="22", to_port="2222", src_group=other_security_group) + success = security_group.authorize( + ip_protocol="tcp", from_port="22", to_port="2222", src_group=other_security_group) success.should.be.true # Check that the rule is accurate - security_group = [group for group in conn.get_all_security_groups() if group.name == 'test1'][0] + security_group = [ + group for group in conn.get_all_security_groups() if group.name == 'test1'][0] int(security_group.rules[0].to_port).should.equal(2222) - security_group.rules[0].grants[0].group_id.should.equal(other_security_group.id) + security_group.rules[0].grants[ + 0].group_id.should.equal(other_security_group.id) # Now remove the rule - success = security_group.revoke(ip_protocol="tcp", from_port="22", to_port="2222", src_group=other_security_group) + success = security_group.revoke( + ip_protocol="tcp", from_port="22", to_port="2222", src_group=other_security_group) success.should.be.true # And check that it gets revoked - security_group = [group for group in conn.get_all_security_groups() if group.name == 'test1'][0] + security_group = [ + group for group in conn.get_all_security_groups() if group.name == 'test1'][0] security_group.rules.should.have.length_of(0) @mock_ec2_deprecated def test_get_all_security_groups(): conn = boto.connect_ec2() - sg1 = conn.create_security_group(name='test1', description='test1', vpc_id='vpc-mjm05d27') + sg1 = conn.create_security_group( + name='test1', description='test1', vpc_id='vpc-mjm05d27') conn.create_security_group(name='test2', description='test2') resp = conn.get_all_security_groups(groupnames=['test1']) @@ -326,7 +369,8 @@ def test_authorize_bad_cidr_throws_invalid_parameter_value(): conn = boto.connect_ec2('the_key', 'the_secret') security_group = conn.create_security_group('test', 'test') with assert_raises(EC2ResponseError) as cm: - security_group.authorize(ip_protocol="tcp", from_port="22", to_port="2222", cidr_ip="123.123.123.123") + security_group.authorize( + ip_protocol="tcp", from_port="22", to_port="2222", cidr_ip="123.123.123.123") cm.exception.code.should.equal('InvalidParameterValue') cm.exception.status.should.equal(400) cm.exception.request_id.should_not.be.none @@ -343,7 +387,8 @@ def test_security_group_tagging(): sg.add_tag("Test", "Tag", dry_run=True) ex.exception.error_code.should.equal('DryRunOperation') ex.exception.status.should.equal(400) - ex.exception.message.should.equal('An error occurred (DryRunOperation) when calling the CreateTags operation: Request would have succeeded, but DryRun flag is set') + ex.exception.message.should.equal( + 'An error occurred (DryRunOperation) when calling the CreateTags operation: Request would have succeeded, but DryRun flag is set') sg.add_tag("Test", "Tag") @@ -362,7 +407,8 @@ def test_security_group_tag_filtering(): sg = conn.create_security_group("test-sg", "Test SG") sg.add_tag("test-tag", "test-value") - groups = conn.get_all_security_groups(filters={"tag:test-tag": "test-value"}) + groups = conn.get_all_security_groups( + filters={"tag:test-tag": "test-value"}) groups.should.have.length_of(1) @@ -507,18 +553,18 @@ def test_sec_group_rule_limit_vpc(): cm.exception.error_code.should.equal('RulesPerSecurityGroupLimitExceeded') - - ''' Boto3 ''' + @mock_ec2 def test_add_same_rule_twice_throws_error(): ec2 = boto3.resource('ec2', region_name='us-west-1') vpc = ec2.create_vpc(CidrBlock='10.0.0.0/16') - sg = ec2.create_security_group(GroupName='sg1', Description='Test security group sg1', VpcId=vpc.id) + sg = ec2.create_security_group( + GroupName='sg1', Description='Test security group sg1', VpcId=vpc.id) ip_permissions = [ { @@ -541,13 +587,18 @@ def test_security_group_tagging_boto3(): sg = conn.create_security_group(GroupName="test-sg", Description="Test SG") with assert_raises(ClientError) as ex: - conn.create_tags(Resources=[sg['GroupId']], Tags=[{'Key': 'Test', 'Value': 'Tag'}], DryRun=True) + conn.create_tags(Resources=[sg['GroupId']], Tags=[ + {'Key': 'Test', 'Value': 'Tag'}], DryRun=True) ex.exception.response['Error']['Code'].should.equal('DryRunOperation') - ex.exception.response['ResponseMetadata']['HTTPStatusCode'].should.equal(400) - ex.exception.response['Error']['Message'].should.equal('An error occurred (DryRunOperation) when calling the CreateTags operation: Request would have succeeded, but DryRun flag is set') + ex.exception.response['ResponseMetadata'][ + 'HTTPStatusCode'].should.equal(400) + ex.exception.response['Error']['Message'].should.equal( + 'An error occurred (DryRunOperation) when calling the CreateTags operation: Request would have succeeded, but DryRun flag is set') - conn.create_tags(Resources=[sg['GroupId']], Tags=[{'Key': 'Test', 'Value': 'Tag'}]) - describe = conn.describe_security_groups(Filters=[{'Name': 'tag-value', 'Values': ['Tag']}]) + conn.create_tags(Resources=[sg['GroupId']], Tags=[ + {'Key': 'Test', 'Value': 'Tag'}]) + describe = conn.describe_security_groups( + Filters=[{'Name': 'tag-value', 'Values': ['Tag']}]) tag = describe["SecurityGroups"][0]['Tags'][0] tag['Value'].should.equal("Tag") tag['Key'].should.equal("Test") @@ -559,9 +610,12 @@ def test_authorize_and_revoke_in_bulk(): vpc = ec2.create_vpc(CidrBlock='10.0.0.0/16') - sg01 = ec2.create_security_group(GroupName='sg01', Description='Test security group sg01', VpcId=vpc.id) - sg02 = ec2.create_security_group(GroupName='sg02', Description='Test security group sg02', VpcId=vpc.id) - sg03 = ec2.create_security_group(GroupName='sg03', Description='Test security group sg03') + sg01 = ec2.create_security_group( + GroupName='sg01', Description='Test security group sg01', VpcId=vpc.id) + sg02 = ec2.create_security_group( + GroupName='sg02', Description='Test security group sg02', VpcId=vpc.id) + sg03 = ec2.create_security_group( + GroupName='sg03', Description='Test security group sg03') ip_permissions = [ { @@ -611,15 +665,19 @@ def test_authorize_and_revoke_in_bulk(): for ip_permission in expected_ip_permissions: sg01.ip_permissions_egress.shouldnt.contain(ip_permission) + @mock_ec2_deprecated def test_get_all_security_groups_filter_with_same_vpc_id(): conn = boto.connect_ec2('the_key', 'the_secret') vpc_id = 'vpc-5300000c' - security_group = conn.create_security_group('test1', 'test1', vpc_id=vpc_id) - security_group2 = conn.create_security_group('test2', 'test2', vpc_id=vpc_id) + security_group = conn.create_security_group( + 'test1', 'test1', vpc_id=vpc_id) + security_group2 = conn.create_security_group( + 'test2', 'test2', vpc_id=vpc_id) security_group.vpc_id.should.equal(vpc_id) security_group2.vpc_id.should.equal(vpc_id) - security_groups = conn.get_all_security_groups(group_ids=[security_group.id], filters={'vpc-id': [vpc_id]}) + security_groups = conn.get_all_security_groups( + group_ids=[security_group.id], filters={'vpc-id': [vpc_id]}) security_groups.should.have.length_of(1) diff --git a/tests/test_ec2/test_server.py b/tests/test_ec2/test_server.py index e6e9998ba..00be62593 100644 --- a/tests/test_ec2/test_server.py +++ b/tests/test_ec2/test_server.py @@ -18,7 +18,8 @@ def test_ec2_server_get(): headers={"Host": "ec2.us-east-1.amazonaws.com"} ) - groups = re.search("(.*)", res.data.decode('utf-8')) + groups = re.search("(.*)", + res.data.decode('utf-8')) instance_id = groups.groups()[0] res = test_client.get('/?Action=DescribeInstances') diff --git a/tests/test_ec2/test_spot_fleet.py b/tests/test_ec2/test_spot_fleet.py index 5b51ae68a..8ac91c57b 100644 --- a/tests/test_ec2/test_spot_fleet.py +++ b/tests/test_ec2/test_spot_fleet.py @@ -5,9 +5,11 @@ import sure # noqa from moto import mock_ec2 + def get_subnet_id(conn): vpc = conn.create_vpc(CidrBlock="10.0.0.0/8")['Vpc'] - subnet = conn.create_subnet(VpcId=vpc['VpcId'], CidrBlock='10.0.0.0/16', AvailabilityZone='us-east-1a')['Subnet'] + subnet = conn.create_subnet( + VpcId=vpc['VpcId'], CidrBlock='10.0.0.0/16', AvailabilityZone='us-east-1a')['Subnet'] subnet_id = subnet['SubnetId'] return subnet_id @@ -19,60 +21,60 @@ def spot_config(subnet_id, allocation_strategy="lowestPrice"): 'TargetCapacity': 6, 'IamFleetRole': 'arn:aws:iam::123456789012:role/fleet', 'LaunchSpecifications': [{ - 'ImageId': 'ami-123', - 'KeyName': 'my-key', - 'SecurityGroups': [ - { + 'ImageId': 'ami-123', + 'KeyName': 'my-key', + 'SecurityGroups': [ + { 'GroupId': 'sg-123' - }, - ], - 'UserData': 'some user data', - 'InstanceType': 't2.small', - 'BlockDeviceMappings': [ - { + }, + ], + 'UserData': 'some user data', + 'InstanceType': 't2.small', + 'BlockDeviceMappings': [ + { 'VirtualName': 'string', 'DeviceName': 'string', 'Ebs': { 'SnapshotId': 'string', 'VolumeSize': 123, - 'DeleteOnTermination': True|False, + 'DeleteOnTermination': True | False, 'VolumeType': 'standard', 'Iops': 123, - 'Encrypted': True|False + 'Encrypted': True | False }, - 'NoDevice': 'string' - }, - ], - 'Monitoring': { - 'Enabled': True + 'NoDevice': 'string' }, - 'SubnetId': subnet_id, - 'IamInstanceProfile': { - 'Arn': 'arn:aws:iam::123456789012:role/fleet' - }, - 'EbsOptimized': False, - 'WeightedCapacity': 2.0, - 'SpotPrice': '0.13' + ], + 'Monitoring': { + 'Enabled': True + }, + 'SubnetId': subnet_id, + 'IamInstanceProfile': { + 'Arn': 'arn:aws:iam::123456789012:role/fleet' + }, + 'EbsOptimized': False, + 'WeightedCapacity': 2.0, + 'SpotPrice': '0.13' }, { - 'ImageId': 'ami-123', - 'KeyName': 'my-key', - 'SecurityGroups': [ - { - 'GroupId': 'sg-123' - }, - ], - 'UserData': 'some user data', - 'InstanceType': 't2.large', - 'Monitoring': { - 'Enabled': True + 'ImageId': 'ami-123', + 'KeyName': 'my-key', + 'SecurityGroups': [ + { + 'GroupId': 'sg-123' }, - 'SubnetId': subnet_id, - 'IamInstanceProfile': { - 'Arn': 'arn:aws:iam::123456789012:role/fleet' - }, - 'EbsOptimized': False, - 'WeightedCapacity': 4.0, - 'SpotPrice': '10.00', + ], + 'UserData': 'some user data', + 'InstanceType': 't2.large', + 'Monitoring': { + 'Enabled': True + }, + 'SubnetId': subnet_id, + 'IamInstanceProfile': { + 'Arn': 'arn:aws:iam::123456789012:role/fleet' + }, + 'EbsOptimized': False, + 'WeightedCapacity': 4.0, + 'SpotPrice': '10.00', }], 'AllocationStrategy': allocation_strategy, 'FulfilledCapacity': 6, @@ -89,7 +91,8 @@ def test_create_spot_fleet_with_lowest_price(): ) spot_fleet_id = spot_fleet_res['SpotFleetRequestId'] - spot_fleet_requests = conn.describe_spot_fleet_requests(SpotFleetRequestIds=[spot_fleet_id])['SpotFleetRequestConfigs'] + spot_fleet_requests = conn.describe_spot_fleet_requests( + SpotFleetRequestIds=[spot_fleet_id])['SpotFleetRequestConfigs'] len(spot_fleet_requests).should.equal(1) spot_fleet_request = spot_fleet_requests[0] spot_fleet_request['SpotFleetRequestState'].should.equal("active") @@ -97,7 +100,8 @@ def test_create_spot_fleet_with_lowest_price(): spot_fleet_config['SpotPrice'].should.equal('0.12') spot_fleet_config['TargetCapacity'].should.equal(6) - spot_fleet_config['IamFleetRole'].should.equal('arn:aws:iam::123456789012:role/fleet') + spot_fleet_config['IamFleetRole'].should.equal( + 'arn:aws:iam::123456789012:role/fleet') spot_fleet_config['AllocationStrategy'].should.equal('lowestPrice') spot_fleet_config['FulfilledCapacity'].should.equal(6.0) @@ -106,7 +110,8 @@ def test_create_spot_fleet_with_lowest_price(): launch_spec['EbsOptimized'].should.equal(False) launch_spec['SecurityGroups'].should.equal([{"GroupId": "sg-123"}]) - launch_spec['IamInstanceProfile'].should.equal({"Arn": "arn:aws:iam::123456789012:role/fleet"}) + launch_spec['IamInstanceProfile'].should.equal( + {"Arn": "arn:aws:iam::123456789012:role/fleet"}) launch_spec['ImageId'].should.equal("ami-123") launch_spec['InstanceType'].should.equal("t2.small") launch_spec['KeyName'].should.equal("my-key") @@ -116,7 +121,8 @@ def test_create_spot_fleet_with_lowest_price(): launch_spec['UserData'].should.equal("some user data") launch_spec['WeightedCapacity'].should.equal(2.0) - instance_res = conn.describe_spot_fleet_instances(SpotFleetRequestId=spot_fleet_id) + instance_res = conn.describe_spot_fleet_instances( + SpotFleetRequestId=spot_fleet_id) instances = instance_res['ActiveInstances'] len(instances).should.equal(3) @@ -125,14 +131,16 @@ def test_create_spot_fleet_with_lowest_price(): def test_create_diversified_spot_fleet(): conn = boto3.client("ec2", region_name='us-west-2') subnet_id = get_subnet_id(conn) - diversified_config = spot_config(subnet_id, allocation_strategy='diversified') + diversified_config = spot_config( + subnet_id, allocation_strategy='diversified') spot_fleet_res = conn.request_spot_fleet( SpotFleetRequestConfig=diversified_config ) spot_fleet_id = spot_fleet_res['SpotFleetRequestId'] - instance_res = conn.describe_spot_fleet_instances(SpotFleetRequestId=spot_fleet_id) + instance_res = conn.describe_spot_fleet_instances( + SpotFleetRequestId=spot_fleet_id) instances = instance_res['ActiveInstances'] len(instances).should.equal(2) instance_types = set([instance['InstanceType'] for instance in instances]) @@ -150,7 +158,9 @@ def test_cancel_spot_fleet_request(): ) spot_fleet_id = spot_fleet_res['SpotFleetRequestId'] - conn.cancel_spot_fleet_requests(SpotFleetRequestIds=[spot_fleet_id], TerminateInstances=True) + conn.cancel_spot_fleet_requests( + SpotFleetRequestIds=[spot_fleet_id], TerminateInstances=True) - spot_fleet_requests = conn.describe_spot_fleet_requests(SpotFleetRequestIds=[spot_fleet_id])['SpotFleetRequestConfigs'] + spot_fleet_requests = conn.describe_spot_fleet_requests( + SpotFleetRequestIds=[spot_fleet_id])['SpotFleetRequestConfigs'] len(spot_fleet_requests).should.equal(0) diff --git a/tests/test_ec2/test_spot_instances.py b/tests/test_ec2/test_spot_instances.py index 2d3cb3036..5c3bdff12 100644 --- a/tests/test_ec2/test_spot_instances.py +++ b/tests/test_ec2/test_spot_instances.py @@ -18,7 +18,8 @@ from moto.core.utils import iso_8601_datetime_with_milliseconds def test_request_spot_instances(): conn = boto3.client('ec2', 'us-east-1') vpc = conn.create_vpc(CidrBlock="10.0.0.0/8")['Vpc'] - subnet = conn.create_subnet(VpcId=vpc['VpcId'], CidrBlock='10.0.0.0/16', AvailabilityZone='us-east-1a')['Subnet'] + subnet = conn.create_subnet( + VpcId=vpc['VpcId'], CidrBlock='10.0.0.0/16', AvailabilityZone='us-east-1a')['Subnet'] subnet_id = subnet['SubnetId'] conn.create_security_group(GroupName='group1', Description='description') @@ -53,29 +54,31 @@ def test_request_spot_instances(): DryRun=True, ) ex.exception.response['Error']['Code'].should.equal('DryRunOperation') - ex.exception.response['ResponseMetadata']['HTTPStatusCode'].should.equal(400) - ex.exception.response['Error']['Message'].should.equal('An error occurred (DryRunOperation) when calling the RequestSpotInstance operation: Request would have succeeded, but DryRun flag is set') + ex.exception.response['ResponseMetadata'][ + 'HTTPStatusCode'].should.equal(400) + ex.exception.response['Error']['Message'].should.equal( + 'An error occurred (DryRunOperation) when calling the RequestSpotInstance operation: Request would have succeeded, but DryRun flag is set') request = conn.request_spot_instances( - SpotPrice="0.5", InstanceCount=1, Type='one-time', - ValidFrom=start, ValidUntil=end, LaunchGroup="the-group", - AvailabilityZoneGroup='my-group', - LaunchSpecification={ - "ImageId": 'ami-abcd1234', - "KeyName": "test", - "SecurityGroups": ['group1', 'group2'], - "UserData": b"some test data", - "InstanceType": 'm1.small', - "Placement": { - "AvailabilityZone": 'us-east-1c', - }, - "KernelId": "test-kernel", - "RamdiskId": "test-ramdisk", - "Monitoring": { - "Enabled": True, - }, - "SubnetId": subnet_id, + SpotPrice="0.5", InstanceCount=1, Type='one-time', + ValidFrom=start, ValidUntil=end, LaunchGroup="the-group", + AvailabilityZoneGroup='my-group', + LaunchSpecification={ + "ImageId": 'ami-abcd1234', + "KeyName": "test", + "SecurityGroups": ['group1', 'group2'], + "UserData": b"some test data", + "InstanceType": 'm1.small', + "Placement": { + "AvailabilityZone": 'us-east-1c', }, + "KernelId": "test-kernel", + "RamdiskId": "test-ramdisk", + "Monitoring": { + "Enabled": True, + }, + "SubnetId": subnet_id, + }, ) requests = conn.describe_spot_instance_requests()['SpotInstanceRequests'] @@ -91,7 +94,8 @@ def test_request_spot_instances(): request['AvailabilityZoneGroup'].should.equal('my-group') launch_spec = request['LaunchSpecification'] - security_group_names = [group['GroupName'] for group in launch_spec['SecurityGroups']] + security_group_names = [group['GroupName'] + for group in launch_spec['SecurityGroups']] set(security_group_names).should.equal(set(['group1', 'group2'])) launch_spec['ImageId'].should.equal('ami-abcd1234') @@ -112,7 +116,7 @@ def test_request_spot_instances_default_arguments(): request = conn.request_spot_instances( SpotPrice="0.5", LaunchSpecification={ - "ImageId": 'ami-abcd1234', + "ImageId": 'ami-abcd1234', } ) @@ -130,7 +134,8 @@ def test_request_spot_instances_default_arguments(): launch_spec = request['LaunchSpecification'] - security_group_names = [group['GroupName'] for group in launch_spec['SecurityGroups']] + security_group_names = [group['GroupName'] + for group in launch_spec['SecurityGroups']] security_group_names.should.equal(["default"]) launch_spec['ImageId'].should.equal('ami-abcd1234') @@ -152,12 +157,12 @@ def test_cancel_spot_instance_request(): requests = conn.get_all_spot_instance_requests() requests.should.have.length_of(1) - with assert_raises(EC2ResponseError) as ex: conn.cancel_spot_instance_requests([requests[0].id], dry_run=True) ex.exception.error_code.should.equal('DryRunOperation') ex.exception.status.should.equal(400) - ex.exception.message.should.equal('An error occurred (DryRunOperation) when calling the CancelSpotInstance operation: Request would have succeeded, but DryRun flag is set') + ex.exception.message.should.equal( + 'An error occurred (DryRunOperation) when calling the CancelSpotInstance operation: Request would have succeeded, but DryRun flag is set') conn.cancel_spot_instance_requests([requests[0].id]) @@ -239,10 +244,12 @@ def test_get_all_spot_instance_requests_filtering(): requests = conn.get_all_spot_instance_requests(filters={'state': 'open'}) requests.should.have.length_of(3) - requests = conn.get_all_spot_instance_requests(filters={'tag:tag1': 'value1'}) + requests = conn.get_all_spot_instance_requests( + filters={'tag:tag1': 'value1'}) requests.should.have.length_of(2) - requests = conn.get_all_spot_instance_requests(filters={'tag:tag1': 'value1', 'tag:tag2': 'value2'}) + requests = conn.get_all_spot_instance_requests( + filters={'tag:tag1': 'value1', 'tag:tag2': 'value2'}) requests.should.have.length_of(1) @@ -259,4 +266,3 @@ def test_request_spot_instances_setting_instance_id(): request = conn.get_all_spot_instance_requests()[0] assert request.state == 'active' assert request.instance_id == 'i-12345678' - diff --git a/tests/test_ec2/test_subnets.py b/tests/test_ec2/test_subnets.py index 0a9b41b8e..38565a28f 100644 --- a/tests/test_ec2/test_subnets.py +++ b/tests/test_ec2/test_subnets.py @@ -69,7 +69,8 @@ def test_subnet_tagging(): def test_subnet_should_have_proper_availability_zone_set(): conn = boto.vpc.connect_to_region('us-west-1') vpcA = conn.create_vpc("10.0.0.0/16") - subnetA = conn.create_subnet(vpcA.id, "10.0.0.0/24", availability_zone='us-west-1b') + subnetA = conn.create_subnet( + vpcA.id, "10.0.0.0/24", availability_zone='us-west-1b') subnetA.availability_zone.should.equal('us-west-1b') @@ -82,7 +83,8 @@ def test_default_subnet(): default_vpc.reload() default_vpc.is_default.should.be.ok - subnet = ec2.create_subnet(VpcId=default_vpc.id, CidrBlock='172.31.0.0/20', AvailabilityZone='us-west-1a') + subnet = ec2.create_subnet( + VpcId=default_vpc.id, CidrBlock='172.31.0.0/20', AvailabilityZone='us-west-1a') subnet.reload() subnet.map_public_ip_on_launch.shouldnt.be.ok @@ -109,7 +111,8 @@ def test_boto3_non_default_subnet(): vpc.reload() vpc.is_default.shouldnt.be.ok - subnet = ec2.create_subnet(VpcId=vpc.id, CidrBlock='10.0.0.0/24', AvailabilityZone='us-west-1a') + subnet = ec2.create_subnet( + VpcId=vpc.id, CidrBlock='10.0.0.0/24', AvailabilityZone='us-west-1a') subnet.reload() subnet.map_public_ip_on_launch.shouldnt.be.ok @@ -122,7 +125,8 @@ def test_modify_subnet_attribute(): # Get the default VPC vpc = list(ec2.vpcs.all())[0] - subnet = ec2.create_subnet(VpcId=vpc.id, CidrBlock='10.0.0.0/24', AvailabilityZone='us-west-1a') + subnet = ec2.create_subnet( + VpcId=vpc.id, CidrBlock='10.0.0.0/24', AvailabilityZone='us-west-1a') # 'map_public_ip_on_launch' is set when calling 'DescribeSubnets' action subnet.reload() @@ -130,11 +134,13 @@ def test_modify_subnet_attribute(): # For non default subnet, attribute value should be 'False' subnet.map_public_ip_on_launch.shouldnt.be.ok - client.modify_subnet_attribute(SubnetId=subnet.id, MapPublicIpOnLaunch={'Value': False}) + client.modify_subnet_attribute( + SubnetId=subnet.id, MapPublicIpOnLaunch={'Value': False}) subnet.reload() subnet.map_public_ip_on_launch.shouldnt.be.ok - client.modify_subnet_attribute(SubnetId=subnet.id, MapPublicIpOnLaunch={'Value': True}) + client.modify_subnet_attribute( + SubnetId=subnet.id, MapPublicIpOnLaunch={'Value': True}) subnet.reload() subnet.map_public_ip_on_launch.should.be.ok @@ -144,10 +150,12 @@ def test_modify_subnet_attribute_validation(): ec2 = boto3.resource('ec2', region_name='us-west-1') client = boto3.client('ec2', region_name='us-west-1') vpc = ec2.create_vpc(CidrBlock='10.0.0.0/16') - subnet = ec2.create_subnet(VpcId=vpc.id, CidrBlock='10.0.0.0/24', AvailabilityZone='us-west-1a') + subnet = ec2.create_subnet( + VpcId=vpc.id, CidrBlock='10.0.0.0/24', AvailabilityZone='us-west-1a') with assert_raises(ParamValidationError): - client.modify_subnet_attribute(SubnetId=subnet.id, MapPublicIpOnLaunch={'Value': 'invalid'}) + client.modify_subnet_attribute( + SubnetId=subnet.id, MapPublicIpOnLaunch={'Value': 'invalid'}) @mock_ec2_deprecated @@ -155,10 +163,13 @@ def test_get_subnets_filtering(): ec2 = boto.ec2.connect_to_region('us-west-1') conn = boto.vpc.connect_to_region('us-west-1') vpcA = conn.create_vpc("10.0.0.0/16") - subnetA = conn.create_subnet(vpcA.id, "10.0.0.0/24", availability_zone='us-west-1a') + subnetA = conn.create_subnet( + vpcA.id, "10.0.0.0/24", availability_zone='us-west-1a') vpcB = conn.create_vpc("10.0.0.0/16") - subnetB1 = conn.create_subnet(vpcB.id, "10.0.0.0/24", availability_zone='us-west-1a') - subnetB2 = conn.create_subnet(vpcB.id, "10.0.1.0/24", availability_zone='us-west-1b') + subnetB1 = conn.create_subnet( + vpcB.id, "10.0.0.0/24", availability_zone='us-west-1a') + subnetB2 = conn.create_subnet( + vpcB.id, "10.0.1.0/24", availability_zone='us-west-1b') all_subnets = conn.get_all_subnets() all_subnets.should.have.length_of(3 + len(ec2.get_all_zones())) @@ -166,25 +177,33 @@ def test_get_subnets_filtering(): # Filter by VPC ID subnets_by_vpc = conn.get_all_subnets(filters={'vpc-id': vpcB.id}) subnets_by_vpc.should.have.length_of(2) - set([subnet.id for subnet in subnets_by_vpc]).should.equal(set([subnetB1.id, subnetB2.id])) + set([subnet.id for subnet in subnets_by_vpc]).should.equal( + set([subnetB1.id, subnetB2.id])) # Filter by CIDR variations subnets_by_cidr1 = conn.get_all_subnets(filters={'cidr': "10.0.0.0/24"}) subnets_by_cidr1.should.have.length_of(2) - set([subnet.id for subnet in subnets_by_cidr1]).should.equal(set([subnetA.id, subnetB1.id])) + set([subnet.id for subnet in subnets_by_cidr1] + ).should.equal(set([subnetA.id, subnetB1.id])) - subnets_by_cidr2 = conn.get_all_subnets(filters={'cidr-block': "10.0.0.0/24"}) + subnets_by_cidr2 = conn.get_all_subnets( + filters={'cidr-block': "10.0.0.0/24"}) subnets_by_cidr2.should.have.length_of(2) - set([subnet.id for subnet in subnets_by_cidr2]).should.equal(set([subnetA.id, subnetB1.id])) + set([subnet.id for subnet in subnets_by_cidr2] + ).should.equal(set([subnetA.id, subnetB1.id])) - subnets_by_cidr3 = conn.get_all_subnets(filters={'cidrBlock': "10.0.0.0/24"}) + subnets_by_cidr3 = conn.get_all_subnets( + filters={'cidrBlock': "10.0.0.0/24"}) subnets_by_cidr3.should.have.length_of(2) - set([subnet.id for subnet in subnets_by_cidr3]).should.equal(set([subnetA.id, subnetB1.id])) + set([subnet.id for subnet in subnets_by_cidr3] + ).should.equal(set([subnetA.id, subnetB1.id])) # Filter by VPC ID and CIDR - subnets_by_vpc_and_cidr = conn.get_all_subnets(filters={'vpc-id': vpcB.id, 'cidr': "10.0.0.0/24"}) + subnets_by_vpc_and_cidr = conn.get_all_subnets( + filters={'vpc-id': vpcB.id, 'cidr': "10.0.0.0/24"}) subnets_by_vpc_and_cidr.should.have.length_of(1) - set([subnet.id for subnet in subnets_by_vpc_and_cidr]).should.equal(set([subnetB1.id])) + set([subnet.id for subnet in subnets_by_vpc_and_cidr] + ).should.equal(set([subnetB1.id])) # Filter by subnet ID subnets_by_id = conn.get_all_subnets(filters={'subnet-id': subnetA.id}) @@ -192,9 +211,11 @@ def test_get_subnets_filtering(): set([subnet.id for subnet in subnets_by_id]).should.equal(set([subnetA.id])) # Filter by availabilityZone - subnets_by_az = conn.get_all_subnets(filters={'availabilityZone': 'us-west-1a', 'vpc-id': vpcB.id}) + subnets_by_az = conn.get_all_subnets( + filters={'availabilityZone': 'us-west-1a', 'vpc-id': vpcB.id}) subnets_by_az.should.have.length_of(1) - set([subnet.id for subnet in subnets_by_az]).should.equal(set([subnetB1.id])) + set([subnet.id for subnet in subnets_by_az] + ).should.equal(set([subnetB1.id])) # Filter by defaultForAz @@ -202,7 +223,8 @@ def test_get_subnets_filtering(): subnets_by_az.should.have.length_of(len(conn.get_all_zones())) # Unsupported filter - conn.get_all_subnets.when.called_with(filters={'not-implemented-filter': 'foobar'}).should.throw(NotImplementedError) + conn.get_all_subnets.when.called_with( + filters={'not-implemented-filter': 'foobar'}).should.throw(NotImplementedError) @mock_ec2_deprecated diff --git a/tests/test_ec2/test_tags.py b/tests/test_ec2/test_tags.py index 23b7d0bd4..bb3a8d36b 100644 --- a/tests/test_ec2/test_tags.py +++ b/tests/test_ec2/test_tags.py @@ -22,11 +22,13 @@ def test_add_tag(): instance.add_tag("a key", "some value", dry_run=True) ex.exception.error_code.should.equal('DryRunOperation') ex.exception.status.should.equal(400) - ex.exception.message.should.equal('An error occurred (DryRunOperation) when calling the CreateTags operation: Request would have succeeded, but DryRun flag is set') + ex.exception.message.should.equal( + 'An error occurred (DryRunOperation) when calling the CreateTags operation: Request would have succeeded, but DryRun flag is set') instance.add_tag("a key", "some value") chain = itertools.chain.from_iterable - existing_instances = list(chain([res.instances for res in conn.get_all_instances()])) + existing_instances = list( + chain([res.instances for res in conn.get_all_instances()])) existing_instances.should.have.length_of(1) existing_instance = existing_instances[0] existing_instance.tags["a key"].should.equal("some value") @@ -49,7 +51,8 @@ def test_remove_tag(): instance.remove_tag("a key", dry_run=True) ex.exception.error_code.should.equal('DryRunOperation') ex.exception.status.should.equal(400) - ex.exception.message.should.equal('An error occurred (DryRunOperation) when calling the DeleteTags operation: Request would have succeeded, but DryRun flag is set') + ex.exception.message.should.equal( + 'An error occurred (DryRunOperation) when calling the DeleteTags operation: Request would have succeeded, but DryRun flag is set') instance.remove_tag("a key") conn.get_all_tags().should.have.length_of(0) @@ -100,12 +103,15 @@ def test_create_tags(): conn.create_tags(instance.id, tag_dict, dry_run=True) ex.exception.error_code.should.equal('DryRunOperation') ex.exception.status.should.equal(400) - ex.exception.message.should.equal('An error occurred (DryRunOperation) when calling the CreateTags operation: Request would have succeeded, but DryRun flag is set') + ex.exception.message.should.equal( + 'An error occurred (DryRunOperation) when calling the CreateTags operation: Request would have succeeded, but DryRun flag is set') conn.create_tags(instance.id, tag_dict) tags = conn.get_all_tags() - set([key for key in tag_dict]).should.equal(set([tag.name for tag in tags])) - set([tag_dict[key] for key in tag_dict]).should.equal(set([tag.value for tag in tags])) + set([key for key in tag_dict]).should.equal( + set([tag.name for tag in tags])) + set([tag_dict[key] for key in tag_dict]).should.equal( + set([tag.value for tag in tags])) @mock_ec2_deprecated @@ -115,7 +121,7 @@ def test_tag_limit_exceeded(): instance = reservation.instances[0] tag_dict = {} for i in range(51): - tag_dict['{0:02d}'.format(i+1)] = '' + tag_dict['{0:02d}'.format(i + 1)] = '' with assert_raises(EC2ResponseError) as cm: conn.create_tags(instance.id, tag_dict) @@ -342,7 +348,8 @@ def test_retrieved_snapshots_must_contain_their_tags(): tag_key = 'Tag name' tag_value = 'Tag value' tags_to_be_set = {tag_key: tag_value} - conn = boto.connect_ec2(aws_access_key_id='the_key', aws_secret_access_key='the_secret') + conn = boto.connect_ec2(aws_access_key_id='the_key', + aws_secret_access_key='the_secret') volume = conn.create_volume(80, "eu-west-1a") snapshot = conn.create_snapshot(volume.id) conn.create_tags([snapshot.id], tags_to_be_set) @@ -361,7 +368,8 @@ def test_retrieved_snapshots_must_contain_their_tags(): @mock_ec2_deprecated def test_filter_instances_by_wildcard_tags(): - conn = boto.connect_ec2(aws_access_key_id='the_key', aws_secret_access_key='the_secret') + conn = boto.connect_ec2(aws_access_key_id='the_key', + aws_secret_access_key='the_secret') reservation = conn.run_instances('ami-1234abcd') instance_a = reservation.instances[0] instance_a.add_tag("Key1", "Value1") diff --git a/tests/test_ec2/test_virtual_private_gateways.py b/tests/test_ec2/test_virtual_private_gateways.py index 0a7e34ea5..d90e97b45 100644 --- a/tests/test_ec2/test_virtual_private_gateways.py +++ b/tests/test_ec2/test_virtual_private_gateways.py @@ -16,6 +16,7 @@ def test_virtual_private_gateways(): vpn_gateway.state.should.equal('available') vpn_gateway.availability_zone.should.equal('us-east-1a') + @mock_ec2_deprecated def test_describe_vpn_gateway(): conn = boto.connect_vpc('the_key', 'the_secret') diff --git a/tests/test_ec2/test_vpc_peering.py b/tests/test_ec2/test_vpc_peering.py index c6a2feffb..6722eed60 100644 --- a/tests/test_ec2/test_vpc_peering.py +++ b/tests/test_ec2/test_vpc_peering.py @@ -93,4 +93,3 @@ def test_vpc_peering_connections_delete(): cm.exception.code.should.equal('InvalidVpcPeeringConnectionId.NotFound') cm.exception.status.should.equal(400) cm.exception.request_id.should_not.be.none - diff --git a/tests/test_ec2/test_vpcs.py b/tests/test_ec2/test_vpcs.py index c4dbf788e..904603f6d 100644 --- a/tests/test_ec2/test_vpcs.py +++ b/tests/test_ec2/test_vpcs.py @@ -42,13 +42,16 @@ def test_vpc_defaults(): conn.get_all_vpcs().should.have.length_of(2) conn.get_all_route_tables().should.have.length_of(2) - conn.get_all_security_groups(filters={'vpc-id': [vpc.id]}).should.have.length_of(1) + conn.get_all_security_groups( + filters={'vpc-id': [vpc.id]}).should.have.length_of(1) vpc.delete() conn.get_all_vpcs().should.have.length_of(1) conn.get_all_route_tables().should.have.length_of(1) - conn.get_all_security_groups(filters={'vpc-id': [vpc.id]}).should.have.length_of(0) + conn.get_all_security_groups( + filters={'vpc-id': [vpc.id]}).should.have.length_of(0) + @mock_ec2_deprecated def test_vpc_isdefault_filter(): @@ -80,6 +83,7 @@ def test_vpc_state_available_filter(): vpc.delete() conn.get_all_vpcs(filters={'state': 'available'}).should.have.length_of(2) + @mock_ec2_deprecated def test_vpc_tagging(): conn = boto.connect_vpc() @@ -127,7 +131,8 @@ def test_vpc_get_by_cidr_block(): @mock_ec2_deprecated def test_vpc_get_by_dhcp_options_id(): conn = boto.connect_vpc() - dhcp_options = conn.create_dhcp_options(SAMPLE_DOMAIN_NAME, SAMPLE_NAME_SERVERS) + dhcp_options = conn.create_dhcp_options( + SAMPLE_DOMAIN_NAME, SAMPLE_NAME_SERVERS) vpc1 = conn.create_vpc("10.0.0.0/16") vpc2 = conn.create_vpc("10.0.0.0/16") conn.create_vpc("10.0.0.0/24") @@ -284,6 +289,7 @@ def test_non_default_vpc(): attr = response.get('EnableDnsHostnames') attr.get('Value').shouldnt.be.ok + @mock_ec2 def test_vpc_dedicated_tenancy(): ec2 = boto3.resource('ec2', region_name='us-west-1') @@ -298,6 +304,7 @@ def test_vpc_dedicated_tenancy(): vpc.instance_tenancy.should.equal('dedicated') + @mock_ec2 def test_vpc_modify_enable_dns_support(): ec2 = boto3.resource('ec2', region_name='us-west-1') @@ -339,10 +346,12 @@ def test_vpc_modify_enable_dns_hostnames(): attr = response.get('EnableDnsHostnames') attr.get('Value').should.be.ok + @mock_ec2_deprecated def test_vpc_associate_dhcp_options(): conn = boto.connect_vpc() - dhcp_options = conn.create_dhcp_options(SAMPLE_DOMAIN_NAME, SAMPLE_NAME_SERVERS) + dhcp_options = conn.create_dhcp_options( + SAMPLE_DOMAIN_NAME, SAMPLE_NAME_SERVERS) vpc = conn.create_vpc("10.0.0.0/16") conn.associate_dhcp_options(dhcp_options.id, vpc.id) diff --git a/tests/test_ec2/test_vpn_connections.py b/tests/test_ec2/test_vpn_connections.py index 864c1c3ee..e95aa76ee 100644 --- a/tests/test_ec2/test_vpn_connections.py +++ b/tests/test_ec2/test_vpn_connections.py @@ -10,27 +10,32 @@ from moto import mock_ec2_deprecated @mock_ec2_deprecated def test_create_vpn_connections(): conn = boto.connect_vpc('the_key', 'the_secret') - vpn_connection = conn.create_vpn_connection('ipsec.1', 'vgw-0123abcd', 'cgw-0123abcd') + vpn_connection = conn.create_vpn_connection( + 'ipsec.1', 'vgw-0123abcd', 'cgw-0123abcd') vpn_connection.should_not.be.none vpn_connection.id.should.match(r'vpn-\w+') vpn_connection.type.should.equal('ipsec.1') + @mock_ec2_deprecated def test_delete_vpn_connections(): conn = boto.connect_vpc('the_key', 'the_secret') - vpn_connection = conn.create_vpn_connection('ipsec.1', 'vgw-0123abcd', 'cgw-0123abcd') + vpn_connection = conn.create_vpn_connection( + 'ipsec.1', 'vgw-0123abcd', 'cgw-0123abcd') list_of_vpn_connections = conn.get_all_vpn_connections() list_of_vpn_connections.should.have.length_of(1) conn.delete_vpn_connection(vpn_connection.id) list_of_vpn_connections = conn.get_all_vpn_connections() list_of_vpn_connections.should.have.length_of(0) + @mock_ec2_deprecated def test_delete_vpn_connections_bad_id(): conn = boto.connect_vpc('the_key', 'the_secret') with assert_raises(EC2ResponseError): conn.delete_vpn_connection('vpn-0123abcd') + @mock_ec2_deprecated def test_describe_vpn_connections(): conn = boto.connect_vpc('the_key', 'the_secret') diff --git a/tests/test_ecs/test_ecs_boto3.py b/tests/test_ecs/test_ecs_boto3.py index f073628a9..044d827c9 100644 --- a/tests/test_ecs/test_ecs_boto3.py +++ b/tests/test_ecs/test_ecs_boto3.py @@ -20,7 +20,8 @@ def test_create_cluster(): clusterName='test_ecs_cluster' ) response['cluster']['clusterName'].should.equal('test_ecs_cluster') - response['cluster']['clusterArn'].should.equal('arn:aws:ecs:us-east-1:012345678910:cluster/test_ecs_cluster') + response['cluster']['clusterArn'].should.equal( + 'arn:aws:ecs:us-east-1:012345678910:cluster/test_ecs_cluster') response['cluster']['status'].should.equal('ACTIVE') response['cluster']['registeredContainerInstancesCount'].should.equal(0) response['cluster']['runningTasksCount'].should.equal(0) @@ -38,8 +39,10 @@ def test_list_clusters(): clusterName='test_cluster1' ) response = client.list_clusters() - response['clusterArns'].should.contain('arn:aws:ecs:us-east-1:012345678910:cluster/test_cluster0') - response['clusterArns'].should.contain('arn:aws:ecs:us-east-1:012345678910:cluster/test_cluster1') + response['clusterArns'].should.contain( + 'arn:aws:ecs:us-east-1:012345678910:cluster/test_cluster0') + response['clusterArns'].should.contain( + 'arn:aws:ecs:us-east-1:012345678910:cluster/test_cluster1') @mock_ecs @@ -50,7 +53,8 @@ def test_delete_cluster(): ) response = client.delete_cluster(cluster='test_ecs_cluster') response['cluster']['clusterName'].should.equal('test_ecs_cluster') - response['cluster']['clusterArn'].should.equal('arn:aws:ecs:us-east-1:012345678910:cluster/test_ecs_cluster') + response['cluster']['clusterArn'].should.equal( + 'arn:aws:ecs:us-east-1:012345678910:cluster/test_ecs_cluster') response['cluster']['status'].should.equal('ACTIVE') response['cluster']['registeredContainerInstancesCount'].should.equal(0) response['cluster']['runningTasksCount'].should.equal(0) @@ -82,15 +86,24 @@ def test_register_task_definition(): ] ) type(response['taskDefinition']).should.be(dict) - response['taskDefinition']['taskDefinitionArn'].should.equal('arn:aws:ecs:us-east-1:012345678910:task-definition/test_ecs_task:1') - response['taskDefinition']['containerDefinitions'][0]['name'].should.equal('hello_world') - response['taskDefinition']['containerDefinitions'][0]['image'].should.equal('docker/hello-world:latest') - response['taskDefinition']['containerDefinitions'][0]['cpu'].should.equal(1024) - response['taskDefinition']['containerDefinitions'][0]['memory'].should.equal(400) - response['taskDefinition']['containerDefinitions'][0]['essential'].should.equal(True) - response['taskDefinition']['containerDefinitions'][0]['environment'][0]['name'].should.equal('AWS_ACCESS_KEY_ID') - response['taskDefinition']['containerDefinitions'][0]['environment'][0]['value'].should.equal('SOME_ACCESS_KEY') - response['taskDefinition']['containerDefinitions'][0]['logConfiguration']['logDriver'].should.equal('json-file') + response['taskDefinition']['taskDefinitionArn'].should.equal( + 'arn:aws:ecs:us-east-1:012345678910:task-definition/test_ecs_task:1') + response['taskDefinition']['containerDefinitions'][ + 0]['name'].should.equal('hello_world') + response['taskDefinition']['containerDefinitions'][0][ + 'image'].should.equal('docker/hello-world:latest') + response['taskDefinition']['containerDefinitions'][ + 0]['cpu'].should.equal(1024) + response['taskDefinition']['containerDefinitions'][ + 0]['memory'].should.equal(400) + response['taskDefinition']['containerDefinitions'][ + 0]['essential'].should.equal(True) + response['taskDefinition']['containerDefinitions'][0][ + 'environment'][0]['name'].should.equal('AWS_ACCESS_KEY_ID') + response['taskDefinition']['containerDefinitions'][0][ + 'environment'][0]['value'].should.equal('SOME_ACCESS_KEY') + response['taskDefinition']['containerDefinitions'][0][ + 'logConfiguration']['logDriver'].should.equal('json-file') @mock_ecs @@ -132,8 +145,10 @@ def test_list_task_definitions(): ) response = client.list_task_definitions() len(response['taskDefinitionArns']).should.equal(2) - response['taskDefinitionArns'][0].should.equal('arn:aws:ecs:us-east-1:012345678910:task-definition/test_ecs_task:1') - response['taskDefinitionArns'][1].should.equal('arn:aws:ecs:us-east-1:012345678910:task-definition/test_ecs_task:2') + response['taskDefinitionArns'][0].should.equal( + 'arn:aws:ecs:us-east-1:012345678910:task-definition/test_ecs_task:1') + response['taskDefinitionArns'][1].should.equal( + 'arn:aws:ecs:us-east-1:012345678910:task-definition/test_ecs_task:2') @mock_ecs @@ -191,10 +206,13 @@ def test_describe_task_definition(): ] ) response = client.describe_task_definition(taskDefinition='test_ecs_task') - response['taskDefinition']['taskDefinitionArn'].should.equal('arn:aws:ecs:us-east-1:012345678910:task-definition/test_ecs_task:3') + response['taskDefinition']['taskDefinitionArn'].should.equal( + 'arn:aws:ecs:us-east-1:012345678910:task-definition/test_ecs_task:3') - response = client.describe_task_definition(taskDefinition='test_ecs_task:2') - response['taskDefinition']['taskDefinitionArn'].should.equal('arn:aws:ecs:us-east-1:012345678910:task-definition/test_ecs_task:2') + response = client.describe_task_definition( + taskDefinition='test_ecs_task:2') + response['taskDefinition']['taskDefinitionArn'].should.equal( + 'arn:aws:ecs:us-east-1:012345678910:task-definition/test_ecs_task:2') @mock_ecs @@ -221,15 +239,24 @@ def test_deregister_task_definition(): taskDefinition='test_ecs_task:1' ) type(response['taskDefinition']).should.be(dict) - response['taskDefinition']['taskDefinitionArn'].should.equal('arn:aws:ecs:us-east-1:012345678910:task-definition/test_ecs_task:1') - response['taskDefinition']['containerDefinitions'][0]['name'].should.equal('hello_world') - response['taskDefinition']['containerDefinitions'][0]['image'].should.equal('docker/hello-world:latest') - response['taskDefinition']['containerDefinitions'][0]['cpu'].should.equal(1024) - response['taskDefinition']['containerDefinitions'][0]['memory'].should.equal(400) - response['taskDefinition']['containerDefinitions'][0]['essential'].should.equal(True) - response['taskDefinition']['containerDefinitions'][0]['environment'][0]['name'].should.equal('AWS_ACCESS_KEY_ID') - response['taskDefinition']['containerDefinitions'][0]['environment'][0]['value'].should.equal('SOME_ACCESS_KEY') - response['taskDefinition']['containerDefinitions'][0]['logConfiguration']['logDriver'].should.equal('json-file') + response['taskDefinition']['taskDefinitionArn'].should.equal( + 'arn:aws:ecs:us-east-1:012345678910:task-definition/test_ecs_task:1') + response['taskDefinition']['containerDefinitions'][ + 0]['name'].should.equal('hello_world') + response['taskDefinition']['containerDefinitions'][0][ + 'image'].should.equal('docker/hello-world:latest') + response['taskDefinition']['containerDefinitions'][ + 0]['cpu'].should.equal(1024) + response['taskDefinition']['containerDefinitions'][ + 0]['memory'].should.equal(400) + response['taskDefinition']['containerDefinitions'][ + 0]['essential'].should.equal(True) + response['taskDefinition']['containerDefinitions'][0][ + 'environment'][0]['name'].should.equal('AWS_ACCESS_KEY_ID') + response['taskDefinition']['containerDefinitions'][0][ + 'environment'][0]['value'].should.equal('SOME_ACCESS_KEY') + response['taskDefinition']['containerDefinitions'][0][ + 'logConfiguration']['logDriver'].should.equal('json-file') @mock_ecs @@ -261,16 +288,19 @@ def test_create_service(): taskDefinition='test_ecs_task', desiredCount=2 ) - response['service']['clusterArn'].should.equal('arn:aws:ecs:us-east-1:012345678910:cluster/test_ecs_cluster') + response['service']['clusterArn'].should.equal( + 'arn:aws:ecs:us-east-1:012345678910:cluster/test_ecs_cluster') response['service']['desiredCount'].should.equal(2) len(response['service']['events']).should.equal(0) len(response['service']['loadBalancers']).should.equal(0) response['service']['pendingCount'].should.equal(0) response['service']['runningCount'].should.equal(0) - response['service']['serviceArn'].should.equal('arn:aws:ecs:us-east-1:012345678910:service/test_ecs_service') + response['service']['serviceArn'].should.equal( + 'arn:aws:ecs:us-east-1:012345678910:service/test_ecs_service') response['service']['serviceName'].should.equal('test_ecs_service') response['service']['status'].should.equal('ACTIVE') - response['service']['taskDefinition'].should.equal('arn:aws:ecs:us-east-1:012345678910:task-definition/test_ecs_task:1') + response['service']['taskDefinition'].should.equal( + 'arn:aws:ecs:us-east-1:012345678910:task-definition/test_ecs_task:1') @mock_ecs @@ -312,8 +342,10 @@ def test_list_services(): cluster='test_ecs_cluster' ) len(response['serviceArns']).should.equal(2) - response['serviceArns'][0].should.equal('arn:aws:ecs:us-east-1:012345678910:service/test_ecs_service1') - response['serviceArns'][1].should.equal('arn:aws:ecs:us-east-1:012345678910:service/test_ecs_service2') + response['serviceArns'][0].should.equal( + 'arn:aws:ecs:us-east-1:012345678910:service/test_ecs_service1') + response['serviceArns'][1].should.equal( + 'arn:aws:ecs:us-east-1:012345678910:service/test_ecs_service2') @mock_ecs @@ -359,12 +391,15 @@ def test_describe_services(): ) response = client.describe_services( cluster='test_ecs_cluster', - services=['test_ecs_service1', 'arn:aws:ecs:us-east-1:012345678910:service/test_ecs_service2'] + services=['test_ecs_service1', + 'arn:aws:ecs:us-east-1:012345678910:service/test_ecs_service2'] ) len(response['services']).should.equal(2) - response['services'][0]['serviceArn'].should.equal('arn:aws:ecs:us-east-1:012345678910:service/test_ecs_service1') + response['services'][0]['serviceArn'].should.equal( + 'arn:aws:ecs:us-east-1:012345678910:service/test_ecs_service1') response['services'][0]['serviceName'].should.equal('test_ecs_service1') - response['services'][1]['serviceArn'].should.equal('arn:aws:ecs:us-east-1:012345678910:service/test_ecs_service2') + response['services'][1]['serviceArn'].should.equal( + 'arn:aws:ecs:us-east-1:012345678910:service/test_ecs_service2') response['services'][1]['serviceName'].should.equal('test_ecs_service2') @@ -446,16 +481,20 @@ def test_delete_service(): cluster='test_ecs_cluster', service='test_ecs_service' ) - response['service']['clusterArn'].should.equal('arn:aws:ecs:us-east-1:012345678910:cluster/test_ecs_cluster') + response['service']['clusterArn'].should.equal( + 'arn:aws:ecs:us-east-1:012345678910:cluster/test_ecs_cluster') response['service']['desiredCount'].should.equal(0) len(response['service']['events']).should.equal(0) len(response['service']['loadBalancers']).should.equal(0) response['service']['pendingCount'].should.equal(0) response['service']['runningCount'].should.equal(0) - response['service']['serviceArn'].should.equal('arn:aws:ecs:us-east-1:012345678910:service/test_ecs_service') + response['service']['serviceArn'].should.equal( + 'arn:aws:ecs:us-east-1:012345678910:service/test_ecs_service') response['service']['serviceName'].should.equal('test_ecs_service') response['service']['status'].should.equal('ACTIVE') - response['service']['taskDefinition'].should.equal('arn:aws:ecs:us-east-1:012345678910:task-definition/test_ecs_task:1') + response['service']['taskDefinition'].should.equal( + 'arn:aws:ecs:us-east-1:012345678910:task-definition/test_ecs_task:1') + @mock_ec2 @mock_ecs @@ -484,18 +523,23 @@ def test_register_container_instance(): instanceIdentityDocument=instance_id_document ) - response['containerInstance']['ec2InstanceId'].should.equal(test_instance.id) + response['containerInstance'][ + 'ec2InstanceId'].should.equal(test_instance.id) full_arn = response['containerInstance']['containerInstanceArn'] arn_part = full_arn.split('/') - arn_part[0].should.equal('arn:aws:ecs:us-east-1:012345678910:container-instance') + arn_part[0].should.equal( + 'arn:aws:ecs:us-east-1:012345678910:container-instance') arn_part[1].should.equal(str(UUID(arn_part[1]))) response['containerInstance']['status'].should.equal('ACTIVE') len(response['containerInstance']['registeredResources']).should.equal(0) len(response['containerInstance']['remainingResources']).should.equal(0) response['containerInstance']['agentConnected'].should.equal(True) - response['containerInstance']['versionInfo']['agentVersion'].should.equal('1.0.0') - response['containerInstance']['versionInfo']['agentHash'].should.equal('4023248') - response['containerInstance']['versionInfo']['dockerVersion'].should.equal('DockerVersion: 1.5.0') + response['containerInstance']['versionInfo'][ + 'agentVersion'].should.equal('1.0.0') + response['containerInstance']['versionInfo'][ + 'agentHash'].should.equal('4023248') + response['containerInstance']['versionInfo'][ + 'dockerVersion'].should.equal('DockerVersion: 1.5.0') @mock_ec2 @@ -526,7 +570,8 @@ def test_list_container_instances(): cluster=test_cluster_name, instanceIdentityDocument=instance_id_document) - test_instance_arns.append(response['containerInstance']['containerInstanceArn']) + test_instance_arns.append(response['containerInstance'][ + 'containerInstanceArn']) response = ecs_client.list_container_instances(cluster=test_cluster_name) @@ -563,13 +608,17 @@ def test_describe_container_instances(): cluster=test_cluster_name, instanceIdentityDocument=instance_id_document) - test_instance_arns.append(response['containerInstance']['containerInstanceArn']) + test_instance_arns.append(response['containerInstance'][ + 'containerInstanceArn']) - test_instance_ids = list(map((lambda x: x.split('/')[1]), test_instance_arns)) - response = ecs_client.describe_container_instances(cluster=test_cluster_name, containerInstances=test_instance_ids) + test_instance_ids = list( + map((lambda x: x.split('/')[1]), test_instance_arns)) + response = ecs_client.describe_container_instances( + cluster=test_cluster_name, containerInstances=test_instance_ids) len(response['failures']).should.equal(0) len(response['containerInstances']).should.equal(instance_to_create) - response_arns = [ci['containerInstanceArn'] for ci in response['containerInstances']] + response_arns = [ci['containerInstanceArn'] + for ci in response['containerInstances']] for arn in test_instance_arns: response_arns.should.contain(arn) @@ -626,10 +675,14 @@ def test_run_task(): startedBy='moto' ) len(response['tasks']).should.equal(2) - response['tasks'][0]['taskArn'].should.contain('arn:aws:ecs:us-east-1:012345678910:task/') - response['tasks'][0]['clusterArn'].should.equal('arn:aws:ecs:us-east-1:012345678910:cluster/test_ecs_cluster') - response['tasks'][0]['taskDefinitionArn'].should.equal('arn:aws:ecs:us-east-1:012345678910:task-definition/test_ecs_task:1') - response['tasks'][0]['containerInstanceArn'].should.contain('arn:aws:ecs:us-east-1:012345678910:container-instance/') + response['tasks'][0]['taskArn'].should.contain( + 'arn:aws:ecs:us-east-1:012345678910:task/') + response['tasks'][0]['clusterArn'].should.equal( + 'arn:aws:ecs:us-east-1:012345678910:cluster/test_ecs_cluster') + response['tasks'][0]['taskDefinitionArn'].should.equal( + 'arn:aws:ecs:us-east-1:012345678910:task-definition/test_ecs_task:1') + response['tasks'][0]['containerInstanceArn'].should.contain( + 'arn:aws:ecs:us-east-1:012345678910:container-instance/') response['tasks'][0]['overrides'].should.equal({}) response['tasks'][0]['lastStatus'].should.equal("RUNNING") response['tasks'][0]['desiredStatus'].should.equal("RUNNING") @@ -664,8 +717,10 @@ def test_start_task(): instanceIdentityDocument=instance_id_document ) - container_instances = client.list_container_instances(cluster=test_cluster_name) - container_instance_id = container_instances['containerInstanceArns'][0].split('/')[-1] + container_instances = client.list_container_instances( + cluster=test_cluster_name) + container_instance_id = container_instances[ + 'containerInstanceArns'][0].split('/')[-1] _ = client.register_task_definition( family='test_ecs_task', @@ -694,10 +749,14 @@ def test_start_task(): ) len(response['tasks']).should.equal(1) - response['tasks'][0]['taskArn'].should.contain('arn:aws:ecs:us-east-1:012345678910:task/') - response['tasks'][0]['clusterArn'].should.equal('arn:aws:ecs:us-east-1:012345678910:cluster/test_ecs_cluster') - response['tasks'][0]['taskDefinitionArn'].should.equal('arn:aws:ecs:us-east-1:012345678910:task-definition/test_ecs_task:1') - response['tasks'][0]['containerInstanceArn'].should.equal('arn:aws:ecs:us-east-1:012345678910:container-instance/{0}'.format(container_instance_id)) + response['tasks'][0]['taskArn'].should.contain( + 'arn:aws:ecs:us-east-1:012345678910:task/') + response['tasks'][0]['clusterArn'].should.equal( + 'arn:aws:ecs:us-east-1:012345678910:cluster/test_ecs_cluster') + response['tasks'][0]['taskDefinitionArn'].should.equal( + 'arn:aws:ecs:us-east-1:012345678910:task-definition/test_ecs_task:1') + response['tasks'][0]['containerInstanceArn'].should.equal( + 'arn:aws:ecs:us-east-1:012345678910:container-instance/{0}'.format(container_instance_id)) response['tasks'][0]['overrides'].should.equal({}) response['tasks'][0]['lastStatus'].should.equal("RUNNING") response['tasks'][0]['desiredStatus'].should.equal("RUNNING") @@ -732,8 +791,10 @@ def test_list_tasks(): instanceIdentityDocument=instance_id_document ) - container_instances = client.list_container_instances(cluster=test_cluster_name) - container_instance_id = container_instances['containerInstanceArns'][0].split('/')[-1] + container_instances = client.list_container_instances( + cluster=test_cluster_name) + container_instance_id = container_instances[ + 'containerInstanceArns'][0].split('/')[-1] _ = client.register_task_definition( family='test_ecs_task', @@ -770,7 +831,8 @@ def test_list_tasks(): ) assert len(client.list_tasks()['taskArns']).should.equal(2) - assert len(client.list_tasks(cluster='test_ecs_cluster')['taskArns']).should.equal(2) + assert len(client.list_tasks(cluster='test_ecs_cluster') + ['taskArns']).should.equal(2) assert len(client.list_tasks(startedBy='foo')['taskArns']).should.equal(1) @@ -819,7 +881,7 @@ def test_describe_tasks(): ] ) tasks_arns = [ - task['taskArn'] for task in client.run_task( + task['taskArn'] for task in client.run_task( cluster='test_ecs_cluster', overrides={}, taskDefinition='test_ecs_task', @@ -833,7 +895,8 @@ def test_describe_tasks(): ) len(response['tasks']).should.equal(2) - set([response['tasks'][0]['taskArn'], response['tasks'][1]['taskArn']]).should.equal(set(tasks_arns)) + set([response['tasks'][0]['taskArn'], response['tasks'] + [1]['taskArn']]).should.equal(set(tasks_arns)) @mock_ecs @@ -858,9 +921,11 @@ def describe_task_definition(): family = task_definition['family'] task = client.describe_task_definition(taskDefinition=family) task['containerDefinitions'][0].should.equal(container_definition) - task['taskDefinitionArn'].should.equal('arn:aws:ecs:us-east-1:012345678910:task-definition/test_ecs_task2:1') + task['taskDefinitionArn'].should.equal( + 'arn:aws:ecs:us-east-1:012345678910:task-definition/test_ecs_task2:1') task['volumes'].should.equal([]) + @mock_ec2 @mock_ecs def test_stop_task(): @@ -918,7 +983,8 @@ def test_stop_task(): reason='moto testing' ) - stop_response['task']['taskArn'].should.equal(run_response['tasks'][0].get('taskArn')) + stop_response['task']['taskArn'].should.equal( + run_response['tasks'][0].get('taskArn')) stop_response['task']['lastStatus'].should.equal('STOPPED') stop_response['task']['desiredStatus'].should.equal('STOPPED') stop_response['task']['stoppedReason'].should.equal('moto testing') @@ -967,7 +1033,8 @@ def test_update_cluster_name_through_cloudformation_should_trigger_a_replacement } } template2 = deepcopy(template1) - template2['Resources']['testCluster']['Properties']['ClusterName'] = 'testcluster2' + template2['Resources']['testCluster'][ + 'Properties']['ClusterName'] = 'testcluster2' template1_json = json.dumps(template1) cfn_conn = boto3.client('cloudformation', region_name='us-west-1') stack_resp = cfn_conn.create_stack( @@ -994,18 +1061,18 @@ def test_create_task_definition_through_cloudformation(): "Description": "ECS Cluster Test CloudFormation", "Resources": { "testTaskDefinition": { - "Type" : "AWS::ECS::TaskDefinition", - "Properties" : { - "ContainerDefinitions" : [ + "Type": "AWS::ECS::TaskDefinition", + "Properties": { + "ContainerDefinitions": [ { "Name": "ecs-sample", - "Image":"amazon/amazon-ecs-sample", + "Image": "amazon/amazon-ecs-sample", "Cpu": "200", "Memory": "500", "Essential": "true" } ], - "Volumes" : [], + "Volumes": [], } } } @@ -1030,19 +1097,19 @@ def test_update_task_definition_family_through_cloudformation_should_trigger_a_r "Description": "ECS Cluster Test CloudFormation", "Resources": { "testTaskDefinition": { - "Type" : "AWS::ECS::TaskDefinition", - "Properties" : { + "Type": "AWS::ECS::TaskDefinition", + "Properties": { "Family": "testTaskDefinition1", - "ContainerDefinitions" : [ + "ContainerDefinitions": [ { "Name": "ecs-sample", - "Image":"amazon/amazon-ecs-sample", + "Image": "amazon/amazon-ecs-sample", "Cpu": "200", "Memory": "500", "Essential": "true" } ], - "Volumes" : [], + "Volumes": [], } } } @@ -1055,7 +1122,8 @@ def test_update_task_definition_family_through_cloudformation_should_trigger_a_r ) template2 = deepcopy(template1) - template2['Resources']['testTaskDefinition']['Properties']['Family'] = 'testTaskDefinition2' + template2['Resources']['testTaskDefinition'][ + 'Properties']['Family'] = 'testTaskDefinition2' template2_json = json.dumps(template2) cfn_conn.update_stack( StackName="test_stack", @@ -1065,7 +1133,8 @@ def test_update_task_definition_family_through_cloudformation_should_trigger_a_r ecs_conn = boto3.client('ecs', region_name='us-west-1') resp = ecs_conn.list_task_definitions(familyPrefix='testTaskDefinition') len(resp['taskDefinitionArns']).should.equal(1) - resp['taskDefinitionArns'][0].endswith('testTaskDefinition2:1').should.be.true + resp['taskDefinitionArns'][0].endswith( + 'testTaskDefinition2:1').should.be.true @mock_ecs @@ -1082,18 +1151,18 @@ def test_create_service_through_cloudformation(): } }, "testTaskDefinition": { - "Type" : "AWS::ECS::TaskDefinition", - "Properties" : { - "ContainerDefinitions" : [ + "Type": "AWS::ECS::TaskDefinition", + "Properties": { + "ContainerDefinitions": [ { "Name": "ecs-sample", - "Image":"amazon/amazon-ecs-sample", + "Image": "amazon/amazon-ecs-sample", "Cpu": "200", "Memory": "500", "Essential": "true" } ], - "Volumes" : [], + "Volumes": [], } }, "testService": { @@ -1132,18 +1201,18 @@ def test_update_service_through_cloudformation_should_trigger_replacement(): } }, "testTaskDefinition": { - "Type" : "AWS::ECS::TaskDefinition", - "Properties" : { - "ContainerDefinitions" : [ + "Type": "AWS::ECS::TaskDefinition", + "Properties": { + "ContainerDefinitions": [ { "Name": "ecs-sample", - "Image":"amazon/amazon-ecs-sample", + "Image": "amazon/amazon-ecs-sample", "Cpu": "200", "Memory": "500", "Essential": "true" } ], - "Volumes" : [], + "Volumes": [], } }, "testService": { diff --git a/tests/test_elb/test_elb.py b/tests/test_elb/test_elb.py index fa13fc23b..4b5d59d6d 100644 --- a/tests/test_elb/test_elb.py +++ b/tests/test_elb/test_elb.py @@ -20,6 +20,7 @@ import sure # noqa from moto import mock_elb, mock_ec2, mock_elb_deprecated, mock_ec2_deprecated + @mock_elb_deprecated def test_create_load_balancer(): conn = boto.connect_elb() @@ -32,7 +33,8 @@ def test_create_load_balancer(): balancer = balancers[0] balancer.name.should.equal("my-lb") balancer.scheme.should.equal("internal") - set(balancer.availability_zones).should.equal(set(['us-east-1a', 'us-east-1b'])) + set(balancer.availability_zones).should.equal( + set(['us-east-1a', 'us-east-1b'])) listener1 = balancer.listeners[0] listener1.load_balancer_port.should.equal(80) listener1.instance_port.should.equal(8080) @@ -46,7 +48,8 @@ def test_create_load_balancer(): @mock_elb_deprecated def test_getting_missing_elb(): conn = boto.connect_elb() - conn.get_all_load_balancers.when.called_with(load_balancer_names='aaa').should.throw(BotoServerError) + conn.get_all_load_balancers.when.called_with( + load_balancer_names='aaa').should.throw(BotoServerError) @mock_elb_deprecated @@ -63,12 +66,14 @@ def test_create_elb_in_multiple_region(): list(west1_conn.get_all_load_balancers()).should.have.length_of(1) list(west2_conn.get_all_load_balancers()).should.have.length_of(1) + @mock_elb_deprecated def test_create_load_balancer_with_certificate(): conn = boto.connect_elb() zones = ['us-east-1a'] - ports = [(443, 8443, 'https', 'arn:aws:iam:123456789012:server-certificate/test-cert')] + ports = [ + (443, 8443, 'https', 'arn:aws:iam:123456789012:server-certificate/test-cert')] conn.create_load_balancer('my-lb', zones, ports) balancers = conn.get_all_load_balancers() @@ -80,7 +85,8 @@ def test_create_load_balancer_with_certificate(): listener.load_balancer_port.should.equal(443) listener.instance_port.should.equal(8443) listener.protocol.should.equal("HTTPS") - listener.ssl_certificate_id.should.equal('arn:aws:iam:123456789012:server-certificate/test-cert') + listener.ssl_certificate_id.should.equal( + 'arn:aws:iam:123456789012:server-certificate/test-cert') @mock_elb @@ -89,15 +95,19 @@ def test_create_and_delete_boto3_support(): client.create_load_balancer( LoadBalancerName='my-lb', - Listeners=[{'Protocol':'tcp', 'LoadBalancerPort':80, 'InstancePort':8080}], + Listeners=[ + {'Protocol': 'tcp', 'LoadBalancerPort': 80, 'InstancePort': 8080}], AvailabilityZones=['us-east-1a', 'us-east-1b'] ) - list(client.describe_load_balancers()['LoadBalancerDescriptions']).should.have.length_of(1) + list(client.describe_load_balancers()[ + 'LoadBalancerDescriptions']).should.have.length_of(1) client.delete_load_balancer( LoadBalancerName='my-lb' ) - list(client.describe_load_balancers()['LoadBalancerDescriptions']).should.have.length_of(0) + list(client.describe_load_balancers()[ + 'LoadBalancerDescriptions']).should.have.length_of(0) + @mock_elb_deprecated def test_add_listener(): @@ -142,23 +152,32 @@ def test_create_and_delete_listener_boto3_support(): client.create_load_balancer( LoadBalancerName='my-lb', - Listeners=[{'Protocol':'http', 'LoadBalancerPort':80, 'InstancePort':8080}], + Listeners=[{'Protocol': 'http', + 'LoadBalancerPort': 80, 'InstancePort': 8080}], AvailabilityZones=['us-east-1a', 'us-east-1b'] ) - list(client.describe_load_balancers()['LoadBalancerDescriptions']).should.have.length_of(1) + list(client.describe_load_balancers()[ + 'LoadBalancerDescriptions']).should.have.length_of(1) client.create_load_balancer_listeners( LoadBalancerName='my-lb', - Listeners=[{'Protocol':'tcp', 'LoadBalancerPort':443, 'InstancePort':8443}] + Listeners=[ + {'Protocol': 'tcp', 'LoadBalancerPort': 443, 'InstancePort': 8443}] ) balancer = client.describe_load_balancers()['LoadBalancerDescriptions'][0] list(balancer['ListenerDescriptions']).should.have.length_of(2) - balancer['ListenerDescriptions'][0]['Listener']['Protocol'].should.equal('HTTP') - balancer['ListenerDescriptions'][0]['Listener']['LoadBalancerPort'].should.equal(80) - balancer['ListenerDescriptions'][0]['Listener']['InstancePort'].should.equal(8080) - balancer['ListenerDescriptions'][1]['Listener']['Protocol'].should.equal('TCP') - balancer['ListenerDescriptions'][1]['Listener']['LoadBalancerPort'].should.equal(443) - balancer['ListenerDescriptions'][1]['Listener']['InstancePort'].should.equal(8443) + balancer['ListenerDescriptions'][0][ + 'Listener']['Protocol'].should.equal('HTTP') + balancer['ListenerDescriptions'][0]['Listener'][ + 'LoadBalancerPort'].should.equal(80) + balancer['ListenerDescriptions'][0]['Listener'][ + 'InstancePort'].should.equal(8080) + balancer['ListenerDescriptions'][1][ + 'Listener']['Protocol'].should.equal('TCP') + balancer['ListenerDescriptions'][1]['Listener'][ + 'LoadBalancerPort'].should.equal(443) + balancer['ListenerDescriptions'][1]['Listener'][ + 'InstancePort'].should.equal(8443) @mock_elb_deprecated @@ -189,8 +208,10 @@ def test_get_load_balancers_by_name(): conn.create_load_balancer('my-lb3', zones, ports) conn.get_all_load_balancers().should.have.length_of(3) - conn.get_all_load_balancers(load_balancer_names=['my-lb1']).should.have.length_of(1) - conn.get_all_load_balancers(load_balancer_names=['my-lb1', 'my-lb2']).should.have.length_of(2) + conn.get_all_load_balancers( + load_balancer_names=['my-lb1']).should.have.length_of(1) + conn.get_all_load_balancers( + load_balancer_names=['my-lb1', 'my-lb2']).should.have.length_of(2) @mock_elb_deprecated @@ -240,7 +261,8 @@ def test_create_health_check_boto3(): client.create_load_balancer( LoadBalancerName='my-lb', - Listeners=[{'Protocol':'http', 'LoadBalancerPort':80, 'InstancePort':8080}], + Listeners=[{'Protocol': 'http', + 'LoadBalancerPort': 80, 'InstancePort': 8080}], AvailabilityZones=['us-east-1a', 'us-east-1b'] ) client.configure_health_check( @@ -285,14 +307,16 @@ def test_register_instances(): @mock_elb def test_register_instances_boto3(): ec2 = boto3.resource('ec2', region_name='us-east-1') - response = ec2.create_instances(ImageId='ami-1234abcd', MinCount=2, MaxCount=2) + response = ec2.create_instances( + ImageId='ami-1234abcd', MinCount=2, MaxCount=2) instance_id1 = response[0].id instance_id2 = response[1].id client = boto3.client('elb', region_name='us-east-1') client.create_load_balancer( LoadBalancerName='my-lb', - Listeners=[{'Protocol':'http', 'LoadBalancerPort':80, 'InstancePort':8080}], + Listeners=[{'Protocol': 'http', + 'LoadBalancerPort': 80, 'InstancePort': 8080}], AvailabilityZones=['us-east-1a', 'us-east-1b'] ) client.register_instances_with_load_balancer( @@ -303,7 +327,8 @@ def test_register_instances_boto3(): ] ) balancer = client.describe_load_balancers()['LoadBalancerDescriptions'][0] - instance_ids = [instance['InstanceId'] for instance in balancer['Instances']] + instance_ids = [instance['InstanceId'] + for instance in balancer['Instances']] set(instance_ids).should.equal(set([instance_id1, instance_id2])) @@ -328,18 +353,21 @@ def test_deregister_instances(): balancer.instances.should.have.length_of(1) balancer.instances[0].id.should.equal(instance_id2) + @mock_ec2 @mock_elb def test_deregister_instances_boto3(): ec2 = boto3.resource('ec2', region_name='us-east-1') - response = ec2.create_instances(ImageId='ami-1234abcd', MinCount=2, MaxCount=2) + response = ec2.create_instances( + ImageId='ami-1234abcd', MinCount=2, MaxCount=2) instance_id1 = response[0].id instance_id2 = response[1].id client = boto3.client('elb', region_name='us-east-1') client.create_load_balancer( LoadBalancerName='my-lb', - Listeners=[{'Protocol':'http', 'LoadBalancerPort':80, 'InstancePort':8080}], + Listeners=[{'Protocol': 'http', + 'LoadBalancerPort': 80, 'InstancePort': 8080}], AvailabilityZones=['us-east-1a', 'us-east-1b'] ) client.register_instances_with_load_balancer( @@ -403,18 +431,21 @@ def test_connection_draining_attribute(): connection_draining.enabled = True connection_draining.timeout = 60 - conn.modify_lb_attribute("my-lb", "ConnectionDraining", connection_draining) + conn.modify_lb_attribute( + "my-lb", "ConnectionDraining", connection_draining) attributes = lb.get_attributes(force=True) attributes.connection_draining.enabled.should.be.true attributes.connection_draining.timeout.should.equal(60) connection_draining.timeout = 30 - conn.modify_lb_attribute("my-lb", "ConnectionDraining", connection_draining) + conn.modify_lb_attribute( + "my-lb", "ConnectionDraining", connection_draining) attributes = lb.get_attributes(force=True) attributes.connection_draining.timeout.should.equal(30) connection_draining.enabled = False - conn.modify_lb_attribute("my-lb", "ConnectionDraining", connection_draining) + conn.modify_lb_attribute( + "my-lb", "ConnectionDraining", connection_draining) attributes = lb.get_attributes(force=True) attributes.connection_draining.enabled.should.be.false @@ -453,15 +484,18 @@ def test_connection_settings_attribute(): connection_settings = ConnectionSettingAttribute(conn) connection_settings.idle_timeout = 120 - conn.modify_lb_attribute("my-lb", "ConnectingSettings", connection_settings) + conn.modify_lb_attribute( + "my-lb", "ConnectingSettings", connection_settings) attributes = lb.get_attributes(force=True) attributes.connecting_settings.idle_timeout.should.equal(120) connection_settings.idle_timeout = 60 - conn.modify_lb_attribute("my-lb", "ConnectingSettings", connection_settings) + conn.modify_lb_attribute( + "my-lb", "ConnectingSettings", connection_settings) attributes = lb.get_attributes(force=True) attributes.connecting_settings.idle_timeout.should.equal(60) + @mock_elb_deprecated def test_create_lb_cookie_stickiness_policy(): conn = boto.connect_elb() @@ -478,9 +512,13 @@ def test_create_lb_cookie_stickiness_policy(): # documentation to be a long numeric. # # To work around that, this value is converted to an int and checked. - cookie_expiration_period_response_str = lb.policies.lb_cookie_stickiness_policies[0].cookie_expiration_period - int(cookie_expiration_period_response_str).should.equal(cookie_expiration_period) - lb.policies.lb_cookie_stickiness_policies[0].policy_name.should.equal(policy_name) + cookie_expiration_period_response_str = lb.policies.lb_cookie_stickiness_policies[ + 0].cookie_expiration_period + int(cookie_expiration_period_response_str).should.equal( + cookie_expiration_period) + lb.policies.lb_cookie_stickiness_policies[ + 0].policy_name.should.equal(policy_name) + @mock_elb_deprecated def test_create_lb_cookie_stickiness_policy_no_expiry(): @@ -492,8 +530,11 @@ def test_create_lb_cookie_stickiness_policy_no_expiry(): lb.create_cookie_stickiness_policy(None, policy_name) lb = conn.get_all_load_balancers()[0] - lb.policies.lb_cookie_stickiness_policies[0].cookie_expiration_period.should.be.none - lb.policies.lb_cookie_stickiness_policies[0].policy_name.should.equal(policy_name) + lb.policies.lb_cookie_stickiness_policies[ + 0].cookie_expiration_period.should.be.none + lb.policies.lb_cookie_stickiness_policies[ + 0].policy_name.should.equal(policy_name) + @mock_elb_deprecated def test_create_app_cookie_stickiness_policy(): @@ -506,8 +547,11 @@ def test_create_app_cookie_stickiness_policy(): lb.create_app_cookie_stickiness_policy(cookie_name, policy_name) lb = conn.get_all_load_balancers()[0] - lb.policies.app_cookie_stickiness_policies[0].cookie_name.should.equal(cookie_name) - lb.policies.app_cookie_stickiness_policies[0].policy_name.should.equal(policy_name) + lb.policies.app_cookie_stickiness_policies[ + 0].cookie_name.should.equal(cookie_name) + lb.policies.app_cookie_stickiness_policies[ + 0].policy_name.should.equal(policy_name) + @mock_elb_deprecated def test_create_lb_policy(): @@ -516,11 +560,13 @@ def test_create_lb_policy(): lb = conn.create_load_balancer('my-lb', [], ports) policy_name = "ProxyPolicy" - lb.create_lb_policy(policy_name, 'ProxyProtocolPolicyType', {'ProxyProtocol': True}) + lb.create_lb_policy(policy_name, 'ProxyProtocolPolicyType', { + 'ProxyProtocol': True}) lb = conn.get_all_load_balancers()[0] lb.policies.other_policies[0].policy_name.should.equal(policy_name) + @mock_elb_deprecated def test_set_policies_of_listener(): conn = boto.connect_elb() @@ -543,6 +589,7 @@ def test_set_policies_of_listener(): # by contrast to a backend, a listener stores only policy name strings listener.policy_names[0].should.equal(policy_name) + @mock_elb_deprecated def test_set_policies_of_backend_server(): conn = boto.connect_elb() @@ -553,7 +600,8 @@ def test_set_policies_of_backend_server(): # in a real flow, it is necessary first to create a policy, # then to set that policy to the backend - lb.create_lb_policy(policy_name, 'ProxyProtocolPolicyType', {'ProxyProtocol': True}) + lb.create_lb_policy(policy_name, 'ProxyProtocolPolicyType', { + 'ProxyProtocol': True}) lb.set_policies_of_backend_server(instance_port, [policy_name]) lb = conn.get_all_load_balancers()[0] @@ -562,6 +610,7 @@ def test_set_policies_of_backend_server(): # by contrast to a listener, a backend stores OtherPolicy objects backend.policies[0].policy_name.should.equal(policy_name) + @mock_ec2_deprecated @mock_elb_deprecated def test_describe_instance_health(): @@ -583,7 +632,8 @@ def test_describe_instance_health(): instances_health = conn.describe_instance_health('my-lb') instances_health.should.have.length_of(2) for instance_health in instances_health: - instance_health.instance_id.should.be.within([instance_id1, instance_id2]) + instance_health.instance_id.should.be.within( + [instance_id1, instance_id2]) instance_health.state.should.equal('InService') instances_health = conn.describe_instance_health('my-lb', [instance_id1]) @@ -597,76 +647,78 @@ def test_add_remove_tags(): client = boto3.client('elb', region_name='us-east-1') client.add_tags.when.called_with(LoadBalancerNames=['my-lb'], - Tags=[{ - 'Key': 'a', - 'Value': 'b' - }]).should.throw(botocore.exceptions.ClientError) - + Tags=[{ + 'Key': 'a', + 'Value': 'b' + }]).should.throw(botocore.exceptions.ClientError) client.create_load_balancer( LoadBalancerName='my-lb', - Listeners=[{'Protocol':'tcp', 'LoadBalancerPort':80, 'InstancePort':8080}], + Listeners=[ + {'Protocol': 'tcp', 'LoadBalancerPort': 80, 'InstancePort': 8080}], AvailabilityZones=['us-east-1a', 'us-east-1b'] ) - list(client.describe_load_balancers()['LoadBalancerDescriptions']).should.have.length_of(1) + list(client.describe_load_balancers()[ + 'LoadBalancerDescriptions']).should.have.length_of(1) client.add_tags(LoadBalancerNames=['my-lb'], Tags=[{ - 'Key': 'a', - 'Value': 'b' + 'Key': 'a', + 'Value': 'b' }]) - tags = dict([(d['Key'], d['Value']) for d in client.describe_tags(LoadBalancerNames=['my-lb'])['TagDescriptions'][0]['Tags']]) + tags = dict([(d['Key'], d['Value']) for d in client.describe_tags( + LoadBalancerNames=['my-lb'])['TagDescriptions'][0]['Tags']]) tags.should.have.key('a').which.should.equal('b') client.add_tags(LoadBalancerNames=['my-lb'], Tags=[{ - 'Key': 'a', - 'Value': 'b' + 'Key': 'a', + 'Value': 'b' }, { - 'Key': 'b', - 'Value': 'b' + 'Key': 'b', + 'Value': 'b' }, { - 'Key': 'c', - 'Value': 'b' + 'Key': 'c', + 'Value': 'b' }, { - 'Key': 'd', - 'Value': 'b' + 'Key': 'd', + 'Value': 'b' }, { - 'Key': 'e', - 'Value': 'b' + 'Key': 'e', + 'Value': 'b' }, { - 'Key': 'f', - 'Value': 'b' + 'Key': 'f', + 'Value': 'b' }, { - 'Key': 'g', - 'Value': 'b' + 'Key': 'g', + 'Value': 'b' }, { - 'Key': 'h', - 'Value': 'b' + 'Key': 'h', + 'Value': 'b' }, { - 'Key': 'i', - 'Value': 'b' + 'Key': 'i', + 'Value': 'b' }, { - 'Key': 'j', - 'Value': 'b' + 'Key': 'j', + 'Value': 'b' }]) client.add_tags.when.called_with(LoadBalancerNames=['my-lb'], - Tags=[{ - 'Key': 'k', - 'Value': 'b' - }]).should.throw(botocore.exceptions.ClientError) + Tags=[{ + 'Key': 'k', + 'Value': 'b' + }]).should.throw(botocore.exceptions.ClientError) client.add_tags(LoadBalancerNames=['my-lb'], Tags=[{ - 'Key': 'j', - 'Value': 'c' + 'Key': 'j', + 'Value': 'c' }]) - - tags = dict([(d['Key'], d['Value']) for d in client.describe_tags(LoadBalancerNames=['my-lb'])['TagDescriptions'][0]['Tags']]) + tags = dict([(d['Key'], d['Value']) for d in client.describe_tags( + LoadBalancerNames=['my-lb'])['TagDescriptions'][0]['Tags']]) tags.should.have.key('a').which.should.equal('b') tags.should.have.key('b').which.should.equal('b') @@ -681,11 +733,12 @@ def test_add_remove_tags(): tags.shouldnt.have.key('k') client.remove_tags(LoadBalancerNames=['my-lb'], - Tags=[{ - 'Key': 'a' - }]) + Tags=[{ + 'Key': 'a' + }]) - tags = dict([(d['Key'], d['Value']) for d in client.describe_tags(LoadBalancerNames=['my-lb'])['TagDescriptions'][0]['Tags']]) + tags = dict([(d['Key'], d['Value']) for d in client.describe_tags( + LoadBalancerNames=['my-lb'])['TagDescriptions'][0]['Tags']]) tags.shouldnt.have.key('a') tags.should.have.key('b').which.should.equal('b') @@ -698,17 +751,17 @@ def test_add_remove_tags(): tags.should.have.key('i').which.should.equal('b') tags.should.have.key('j').which.should.equal('c') - client.create_load_balancer( LoadBalancerName='other-lb', - Listeners=[{'Protocol':'tcp', 'LoadBalancerPort':433, 'InstancePort':8433}], + Listeners=[ + {'Protocol': 'tcp', 'LoadBalancerPort': 433, 'InstancePort': 8433}], AvailabilityZones=['us-east-1a', 'us-east-1b'] ) client.add_tags(LoadBalancerNames=['other-lb'], Tags=[{ - 'Key': 'other', - 'Value': 'something' + 'Key': 'other', + 'Value': 'something' }]) lb_tags = dict([(l['LoadBalancerName'], dict([(d['Key'], d['Value']) for d in l['Tags']])) @@ -718,7 +771,8 @@ def test_add_remove_tags(): lb_tags.should.have.key('other-lb') lb_tags['my-lb'].shouldnt.have.key('other') - lb_tags['other-lb'].should.have.key('other').which.should.equal('something') + lb_tags[ + 'other-lb'].should.have.key('other').which.should.equal('something') @mock_elb @@ -727,15 +781,17 @@ def test_create_with_tags(): client.create_load_balancer( LoadBalancerName='my-lb', - Listeners=[{'Protocol':'tcp', 'LoadBalancerPort':80, 'InstancePort':8080}], + Listeners=[ + {'Protocol': 'tcp', 'LoadBalancerPort': 80, 'InstancePort': 8080}], AvailabilityZones=['us-east-1a', 'us-east-1b'], Tags=[{ - 'Key': 'k', - 'Value': 'v' + 'Key': 'k', + 'Value': 'v' }] ) - tags = dict((d['Key'], d['Value']) for d in client.describe_tags(LoadBalancerNames=['my-lb'])['TagDescriptions'][0]['Tags']) + tags = dict((d['Key'], d['Value']) for d in client.describe_tags( + LoadBalancerNames=['my-lb'])['TagDescriptions'][0]['Tags']) tags.should.have.key('k').which.should.equal('v') @@ -754,7 +810,8 @@ def test_subnets(): client = boto3.client('elb', region_name='us-east-1') client.create_load_balancer( LoadBalancerName='my-lb', - Listeners=[{'Protocol':'tcp', 'LoadBalancerPort':80, 'InstancePort':8080}], + Listeners=[ + {'Protocol': 'tcp', 'LoadBalancerPort': 80, 'InstancePort': 8080}], Subnets=[subnet.id] ) @@ -770,5 +827,5 @@ def test_create_load_balancer_duplicate(): conn = boto.connect_elb() ports = [(80, 8080, 'http'), (443, 8443, 'tcp')] conn.create_load_balancer('my-lb', [], ports) - conn.create_load_balancer.when.called_with('my-lb', [], ports).should.throw(BotoServerError) - + conn.create_load_balancer.when.called_with( + 'my-lb', [], ports).should.throw(BotoServerError) diff --git a/tests/test_emr/test_emr.py b/tests/test_emr/test_emr.py index 4b06d7516..4acd7067c 100644 --- a/tests/test_emr/test_emr.py +++ b/tests/test_emr/test_emr.py @@ -100,7 +100,8 @@ def test_describe_cluster(): # cluster.status.timeline.enddatetime.should.be.a(six.string_types) # cluster.status.timeline.readydatetime.should.be.a(six.string_types) - dict((item.key, item.value) for item in cluster.tags).should.equal(input_tags) + dict((item.key, item.value) + for item in cluster.tags).should.equal(input_tags) cluster.terminationprotected.should.equal('false') cluster.visibletoallusers.should.equal('true') @@ -285,7 +286,8 @@ def test_list_clusters(): y = expected[x.id] x.id.should.equal(y['id']) x.name.should.equal(y['name']) - x.normalizedinstancehours.should.equal(y['normalizedinstancehours']) + x.normalizedinstancehours.should.equal( + y['normalizedinstancehours']) x.status.state.should.equal(y['state']) x.status.timeline.creationdatetime.should.be.a(six.string_types) if y['state'] == 'TERMINATED': @@ -371,11 +373,13 @@ def test_run_jobflow_with_instance_groups(): job_id = conn.run_jobflow(instance_groups=input_instance_groups, **run_jobflow_args) job_flow = conn.describe_jobflow(job_id) - int(job_flow.instancecount).should.equal(sum(g.num_instances for g in input_instance_groups)) + int(job_flow.instancecount).should.equal( + sum(g.num_instances for g in input_instance_groups)) for instance_group in job_flow.instancegroups: expected = input_groups[instance_group.name] instance_group.should.have.property('instancegroupid') - int(instance_group.instancerunningcount).should.equal(expected.num_instances) + int(instance_group.instancerunningcount).should.equal( + expected.num_instances) instance_group.instancerole.should.equal(expected.role) instance_group.instancetype.should.equal(expected.type) instance_group.market.should.equal(expected.market) @@ -483,7 +487,8 @@ def test_instance_groups(): conn.add_instance_groups(job_id, input_instance_groups[2:]) jf = conn.describe_jobflow(job_id) - int(jf.instancecount).should.equal(sum(g.num_instances for g in input_instance_groups)) + int(jf.instancecount).should.equal( + sum(g.num_instances for g in input_instance_groups)) for x in jf.instancegroups: y = input_groups[x.name] if hasattr(y, 'bidprice'): @@ -572,7 +577,8 @@ def test_steps(): list(arg.value for arg in step.args).should.have.length_of(8) step.creationdatetime.should.be.a(six.string_types) # step.enddatetime.should.be.a(six.string_types) - step.jar.should.equal('/home/hadoop/contrib/streaming/hadoop-streaming.jar') + step.jar.should.equal( + '/home/hadoop/contrib/streaming/hadoop-streaming.jar') step.laststatechangereason.should.be.a(six.string_types) step.mainclass.should.equal('') step.name.should.be.a(six.string_types) @@ -592,7 +598,8 @@ def test_steps(): '-input', y.input, '-output', y.output, ]) - x.config.jar.should.equal('/home/hadoop/contrib/streaming/hadoop-streaming.jar') + x.config.jar.should.equal( + '/home/hadoop/contrib/streaming/hadoop-streaming.jar') x.config.mainclass.should.equal('') # properties x.should.have.property('id').should.be.a(six.string_types) @@ -610,7 +617,8 @@ def test_steps(): '-input', y.input, '-output', y.output, ]) - x.config.jar.should.equal('/home/hadoop/contrib/streaming/hadoop-streaming.jar') + x.config.jar.should.equal( + '/home/hadoop/contrib/streaming/hadoop-streaming.jar') x.config.mainclass.should.equal('') # properties x.should.have.property('id').should.be.a(six.string_types) diff --git a/tests/test_emr/test_emr_boto3.py b/tests/test_emr/test_emr_boto3.py index 4fb5c3d79..4999935c5 100644 --- a/tests/test_emr/test_emr_boto3.py +++ b/tests/test_emr/test_emr_boto3.py @@ -88,15 +88,20 @@ def test_describe_cluster(): config['Properties'].should.equal(args['Configurations'][0]['Properties']) attrs = cl['Ec2InstanceAttributes'] - attrs['AdditionalMasterSecurityGroups'].should.equal(args['Instances']['AdditionalMasterSecurityGroups']) - attrs['AdditionalSlaveSecurityGroups'].should.equal(args['Instances']['AdditionalSlaveSecurityGroups']) + attrs['AdditionalMasterSecurityGroups'].should.equal( + args['Instances']['AdditionalMasterSecurityGroups']) + attrs['AdditionalSlaveSecurityGroups'].should.equal( + args['Instances']['AdditionalSlaveSecurityGroups']) attrs['Ec2AvailabilityZone'].should.equal('us-east-1a') attrs['Ec2KeyName'].should.equal(args['Instances']['Ec2KeyName']) attrs['Ec2SubnetId'].should.equal(args['Instances']['Ec2SubnetId']) - attrs['EmrManagedMasterSecurityGroup'].should.equal(args['Instances']['EmrManagedMasterSecurityGroup']) - attrs['EmrManagedSlaveSecurityGroup'].should.equal(args['Instances']['EmrManagedSlaveSecurityGroup']) + attrs['EmrManagedMasterSecurityGroup'].should.equal( + args['Instances']['EmrManagedMasterSecurityGroup']) + attrs['EmrManagedSlaveSecurityGroup'].should.equal( + args['Instances']['EmrManagedSlaveSecurityGroup']) attrs['IamInstanceProfile'].should.equal(args['JobFlowRole']) - attrs['ServiceAccessSecurityGroup'].should.equal(args['Instances']['ServiceAccessSecurityGroup']) + attrs['ServiceAccessSecurityGroup'].should.equal( + args['Instances']['ServiceAccessSecurityGroup']) cl['Id'].should.equal(cluster_id) cl['LogUri'].should.equal(args['LogUri']) cl['MasterPublicDnsName'].should.be.a(six.string_types) @@ -222,11 +227,14 @@ def test_describe_job_flow(): ig['State'].should.equal('RUNNING') attrs['KeepJobFlowAliveWhenNoSteps'].should.equal(True) # attrs['MasterInstanceId'].should.be.a(six.string_types) - attrs['MasterInstanceType'].should.equal(args['Instances']['MasterInstanceType']) + attrs['MasterInstanceType'].should.equal( + args['Instances']['MasterInstanceType']) attrs['MasterPublicDnsName'].should.be.a(six.string_types) attrs['NormalizedInstanceHours'].should.equal(0) - attrs['Placement']['AvailabilityZone'].should.equal(args['Instances']['Placement']['AvailabilityZone']) - attrs['SlaveInstanceType'].should.equal(args['Instances']['SlaveInstanceType']) + attrs['Placement']['AvailabilityZone'].should.equal( + args['Instances']['Placement']['AvailabilityZone']) + attrs['SlaveInstanceType'].should.equal( + args['Instances']['SlaveInstanceType']) attrs['TerminationProtected'].should.equal(False) jf['JobFlowId'].should.equal(cluster_id) jf['JobFlowRole'].should.equal(args['JobFlowRole']) @@ -282,14 +290,18 @@ def test_list_clusters(): y = expected[x['Id']] x['Id'].should.equal(y['Id']) x['Name'].should.equal(y['Name']) - x['NormalizedInstanceHours'].should.equal(y['NormalizedInstanceHours']) + x['NormalizedInstanceHours'].should.equal( + y['NormalizedInstanceHours']) x['Status']['State'].should.equal(y['State']) - x['Status']['Timeline']['CreationDateTime'].should.be.a('datetime.datetime') + x['Status']['Timeline'][ + 'CreationDateTime'].should.be.a('datetime.datetime') if y['State'] == 'TERMINATED': - x['Status']['Timeline']['EndDateTime'].should.be.a('datetime.datetime') + x['Status']['Timeline'][ + 'EndDateTime'].should.be.a('datetime.datetime') else: x['Status']['Timeline'].shouldnt.have.key('EndDateTime') - x['Status']['Timeline']['ReadyDateTime'].should.be.a('datetime.datetime') + x['Status']['Timeline'][ + 'ReadyDateTime'].should.be.a('datetime.datetime') marker = resp.get('Marker') if marker is None: break @@ -316,8 +328,10 @@ def test_run_job_flow(): resp['ExecutionStatusDetail']['State'].should.equal('WAITING') resp['JobFlowId'].should.equal(cluster_id) resp['Name'].should.equal(args['Name']) - resp['Instances']['MasterInstanceType'].should.equal(args['Instances']['MasterInstanceType']) - resp['Instances']['SlaveInstanceType'].should.equal(args['Instances']['SlaveInstanceType']) + resp['Instances']['MasterInstanceType'].should.equal( + args['Instances']['MasterInstanceType']) + resp['Instances']['SlaveInstanceType'].should.equal( + args['Instances']['SlaveInstanceType']) resp['LogUri'].should.equal(args['LogUri']) resp['VisibleToAllUsers'].should.equal(args['VisibleToAllUsers']) resp['Instances']['NormalizedInstanceHours'].should.equal(0) @@ -333,7 +347,8 @@ def test_run_job_flow_with_invalid_params(): args['AmiVersion'] = '2.4' args['ReleaseLabel'] = 'emr-5.0.0' client.run_job_flow(**args) - ex.exception.response['Error']['Message'].should.contain('ValidationException') + ex.exception.response['Error'][ + 'Message'].should.contain('ValidationException') @mock_emr @@ -378,7 +393,8 @@ def test_run_job_flow_with_instance_groups(): args = deepcopy(run_job_flow_args) args['Instances'] = {'InstanceGroups': input_instance_groups} cluster_id = client.run_job_flow(**args)['JobFlowId'] - groups = client.list_instance_groups(ClusterId=cluster_id)['InstanceGroups'] + groups = client.list_instance_groups(ClusterId=cluster_id)[ + 'InstanceGroups'] for x in groups: y = input_groups[x['Name']] x.should.have.key('Id') @@ -484,10 +500,12 @@ def test_instance_groups(): jf = client.describe_job_flows(JobFlowIds=[cluster_id])['JobFlows'][0] base_instance_count = jf['Instances']['InstanceCount'] - client.add_instance_groups(JobFlowId=cluster_id, InstanceGroups=input_instance_groups[2:]) + client.add_instance_groups( + JobFlowId=cluster_id, InstanceGroups=input_instance_groups[2:]) jf = client.describe_job_flows(JobFlowIds=[cluster_id])['JobFlows'][0] - jf['Instances']['InstanceCount'].should.equal(sum(g['InstanceCount'] for g in input_instance_groups)) + jf['Instances']['InstanceCount'].should.equal( + sum(g['InstanceCount'] for g in input_instance_groups)) for x in jf['Instances']['InstanceGroups']: y = input_groups[x['Name']] if hasattr(y, 'BidPrice'): @@ -506,7 +524,8 @@ def test_instance_groups(): x['StartDateTime'].should.be.a('datetime.datetime') x['State'].should.equal('RUNNING') - groups = client.list_instance_groups(ClusterId=cluster_id)['InstanceGroups'] + groups = client.list_instance_groups(ClusterId=cluster_id)[ + 'InstanceGroups'] for x in groups: y = input_groups[x['Name']] if hasattr(y, 'BidPrice'): @@ -525,9 +544,11 @@ def test_instance_groups(): x['Status']['State'].should.equal('RUNNING') x['Status']['StateChangeReason']['Code'].should.be.a(six.string_types) # x['Status']['StateChangeReason']['Message'].should.be.a(six.string_types) - x['Status']['Timeline']['CreationDateTime'].should.be.a('datetime.datetime') + x['Status']['Timeline'][ + 'CreationDateTime'].should.be.a('datetime.datetime') # x['Status']['Timeline']['EndDateTime'].should.be.a('datetime.datetime') - x['Status']['Timeline']['ReadyDateTime'].should.be.a('datetime.datetime') + x['Status']['Timeline'][ + 'ReadyDateTime'].should.be.a('datetime.datetime') igs = dict((g['Name'], g) for g in groups) client.modify_instance_groups( @@ -592,14 +613,19 @@ def test_steps(): # x['ExecutionStatusDetail'].should.have.key('EndDateTime') # x['ExecutionStatusDetail'].should.have.key('LastStateChangeReason') # x['ExecutionStatusDetail'].should.have.key('StartDateTime') - x['ExecutionStatusDetail']['State'].should.equal('STARTING' if idx == 0 else 'PENDING') + x['ExecutionStatusDetail']['State'].should.equal( + 'STARTING' if idx == 0 else 'PENDING') x['StepConfig']['ActionOnFailure'].should.equal('TERMINATE_CLUSTER') - x['StepConfig']['HadoopJarStep']['Args'].should.equal(y['HadoopJarStep']['Args']) - x['StepConfig']['HadoopJarStep']['Jar'].should.equal(y['HadoopJarStep']['Jar']) + x['StepConfig']['HadoopJarStep'][ + 'Args'].should.equal(y['HadoopJarStep']['Args']) + x['StepConfig']['HadoopJarStep'][ + 'Jar'].should.equal(y['HadoopJarStep']['Jar']) if 'MainClass' in y['HadoopJarStep']: - x['StepConfig']['HadoopJarStep']['MainClass'].should.equal(y['HadoopJarStep']['MainClass']) + x['StepConfig']['HadoopJarStep']['MainClass'].should.equal( + y['HadoopJarStep']['MainClass']) if 'Properties' in y['HadoopJarStep']: - x['StepConfig']['HadoopJarStep']['Properties'].should.equal(y['HadoopJarStep']['Properties']) + x['StepConfig']['HadoopJarStep']['Properties'].should.equal( + y['HadoopJarStep']['Properties']) x['StepConfig']['Name'].should.equal(y['Name']) expected = dict((s['Name'], s) for s in input_steps) @@ -617,7 +643,8 @@ def test_steps(): x['Name'].should.equal(y['Name']) x['Status']['State'].should.be.within(['STARTING', 'PENDING']) # StateChangeReason - x['Status']['Timeline']['CreationDateTime'].should.be.a('datetime.datetime') + x['Status']['Timeline'][ + 'CreationDateTime'].should.be.a('datetime.datetime') # x['Status']['Timeline']['EndDateTime'].should.be.a('datetime.datetime') # x['Status']['Timeline']['StartDateTime'].should.be.a('datetime.datetime') @@ -631,7 +658,8 @@ def test_steps(): x['Name'].should.equal(y['Name']) x['Status']['State'].should.be.within(['STARTING', 'PENDING']) # StateChangeReason - x['Status']['Timeline']['CreationDateTime'].should.be.a('datetime.datetime') + x['Status']['Timeline'][ + 'CreationDateTime'].should.be.a('datetime.datetime') # x['Status']['Timeline']['EndDateTime'].should.be.a('datetime.datetime') # x['Status']['Timeline']['StartDateTime'].should.be.a('datetime.datetime') @@ -640,7 +668,8 @@ def test_steps(): steps.should.have.length_of(1) steps[0]['Id'].should.equal(step_id) - steps = client.list_steps(ClusterId=cluster_id, StepStates=['STARTING'])['Steps'] + steps = client.list_steps(ClusterId=cluster_id, + StepStates=['STARTING'])['Steps'] steps.should.have.length_of(1) steps[0]['Id'].should.equal(step_id) @@ -656,8 +685,10 @@ def test_tags(): client.add_tags(ResourceId=cluster_id, Tags=input_tags) resp = client.describe_cluster(ClusterId=cluster_id)['Cluster'] resp['Tags'].should.have.length_of(2) - dict((t['Key'], t['Value']) for t in resp['Tags']).should.equal(dict((t['Key'], t['Value']) for t in input_tags)) + dict((t['Key'], t['Value']) for t in resp['Tags']).should.equal( + dict((t['Key'], t['Value']) for t in input_tags)) - client.remove_tags(ResourceId=cluster_id, TagKeys=[t['Key'] for t in input_tags]) + client.remove_tags(ResourceId=cluster_id, TagKeys=[ + t['Key'] for t in input_tags]) resp = client.describe_cluster(ClusterId=cluster_id)['Cluster'] resp['Tags'].should.equal([]) diff --git a/tests/test_glacier/test_glacier_jobs.py b/tests/test_glacier/test_glacier_jobs.py index ef4a00b75..66780f681 100644 --- a/tests/test_glacier/test_glacier_jobs.py +++ b/tests/test_glacier/test_glacier_jobs.py @@ -13,14 +13,16 @@ def test_init_glacier_job(): conn = Layer1(region_name="us-west-2") vault_name = "my_vault" conn.create_vault(vault_name) - archive_id = conn.upload_archive(vault_name, "some stuff", "", "", "some description") + archive_id = conn.upload_archive( + vault_name, "some stuff", "", "", "some description") job_response = conn.initiate_job(vault_name, { "ArchiveId": archive_id, "Type": "archive-retrieval", }) job_id = job_response['JobId'] - job_response['Location'].should.equal("//vaults/my_vault/jobs/{0}".format(job_id)) + job_response['Location'].should.equal( + "//vaults/my_vault/jobs/{0}".format(job_id)) @mock_glacier_deprecated @@ -28,7 +30,8 @@ def test_describe_job(): conn = Layer1(region_name="us-west-2") vault_name = "my_vault" conn.create_vault(vault_name) - archive_id = conn.upload_archive(vault_name, "some stuff", "", "", "some description") + archive_id = conn.upload_archive( + vault_name, "some stuff", "", "", "some description") job_response = conn.initiate_job(vault_name, { "ArchiveId": archive_id, "Type": "archive-retrieval", @@ -61,8 +64,10 @@ def test_list_glacier_jobs(): conn = Layer1(region_name="us-west-2") vault_name = "my_vault" conn.create_vault(vault_name) - archive_id1 = conn.upload_archive(vault_name, "some stuff", "", "", "some description")['ArchiveId'] - archive_id2 = conn.upload_archive(vault_name, "some other stuff", "", "", "some description")['ArchiveId'] + archive_id1 = conn.upload_archive( + vault_name, "some stuff", "", "", "some description")['ArchiveId'] + archive_id2 = conn.upload_archive( + vault_name, "some other stuff", "", "", "some description")['ArchiveId'] conn.initiate_job(vault_name, { "ArchiveId": archive_id1, @@ -82,7 +87,8 @@ def test_get_job_output(): conn = Layer1(region_name="us-west-2") vault_name = "my_vault" conn.create_vault(vault_name) - archive_response = conn.upload_archive(vault_name, "some stuff", "", "", "some description") + archive_response = conn.upload_archive( + vault_name, "some stuff", "", "", "some description") archive_id = archive_response['ArchiveId'] job_response = conn.initiate_job(vault_name, { "ArchiveId": archive_id, diff --git a/tests/test_glacier/test_glacier_server.py b/tests/test_glacier/test_glacier_server.py index d3e09015f..fd8034421 100644 --- a/tests/test_glacier/test_glacier_server.py +++ b/tests/test_glacier/test_glacier_server.py @@ -18,4 +18,5 @@ def test_list_vaults(): res = test_client.get('/1234bcd/vaults') - json.loads(res.data.decode("utf-8")).should.equal({u'Marker': None, u'VaultList': []}) + json.loads(res.data.decode("utf-8") + ).should.equal({u'Marker': None, u'VaultList': []}) diff --git a/tests/test_iam/test_iam.py b/tests/test_iam/test_iam.py index 6504a5483..076f33916 100644 --- a/tests/test_iam/test_iam.py +++ b/tests/test_iam/test_iam.py @@ -19,11 +19,13 @@ def test_get_all_server_certs(): conn = boto.connect_iam() conn.upload_server_cert("certname", "certbody", "privatekey") - certs = conn.get_all_server_certs()['list_server_certificates_response']['list_server_certificates_result']['server_certificate_metadata_list'] + certs = conn.get_all_server_certs()['list_server_certificates_response'][ + 'list_server_certificates_result']['server_certificate_metadata_list'] certs.should.have.length_of(1) cert1 = certs[0] cert1.server_certificate_name.should.equal("certname") - cert1.arn.should.equal("arn:aws:iam::123456789012:server-certificate/certname") + cert1.arn.should.equal( + "arn:aws:iam::123456789012:server-certificate/certname") @mock_iam_deprecated() @@ -41,7 +43,8 @@ def test_get_server_cert(): conn.upload_server_cert("certname", "certbody", "privatekey") cert = conn.get_server_certificate("certname") cert.server_certificate_name.should.equal("certname") - cert.arn.should.equal("arn:aws:iam::123456789012:server-certificate/certname") + cert.arn.should.equal( + "arn:aws:iam::123456789012:server-certificate/certname") @mock_iam_deprecated() @@ -51,7 +54,8 @@ def test_upload_server_cert(): conn.upload_server_cert("certname", "certbody", "privatekey") cert = conn.get_server_certificate("certname") cert.server_certificate_name.should.equal("certname") - cert.arn.should.equal("arn:aws:iam::123456789012:server-certificate/certname") + cert.arn.should.equal( + "arn:aws:iam::123456789012:server-certificate/certname") @mock_iam_deprecated() @@ -74,7 +78,8 @@ def test_get_instance_profile__should_throw__when_instance_profile_does_not_exis def test_create_role_and_instance_profile(): conn = boto.connect_iam() conn.create_instance_profile("my-profile", path="my-path") - conn.create_role("my-role", assume_role_policy_document="some policy", path="my-path") + conn.create_role( + "my-role", assume_role_policy_document="some policy", path="my-path") conn.add_role_to_instance_profile("my-profile", "my-role") @@ -95,7 +100,8 @@ def test_create_role_and_instance_profile(): def test_remove_role_from_instance_profile(): conn = boto.connect_iam() conn.create_instance_profile("my-profile", path="my-path") - conn.create_role("my-role", assume_role_policy_document="some policy", path="my-path") + conn.create_role( + "my-role", assume_role_policy_document="some policy", path="my-path") conn.add_role_to_instance_profile("my-profile", "my-role") profile = conn.get_instance_profile("my-profile") @@ -127,29 +133,37 @@ def test_list_instance_profiles(): def test_list_instance_profiles_for_role(): conn = boto.connect_iam() - conn.create_role(role_name="my-role", assume_role_policy_document="some policy", path="my-path") - conn.create_role(role_name="my-role2", assume_role_policy_document="some policy2", path="my-path2") + conn.create_role(role_name="my-role", + assume_role_policy_document="some policy", path="my-path") + conn.create_role(role_name="my-role2", + assume_role_policy_document="some policy2", path="my-path2") profile_name_list = ['my-profile', 'my-profile2'] profile_path_list = ['my-path', 'my-path2'] for profile_count in range(0, 2): - conn.create_instance_profile(profile_name_list[profile_count], path=profile_path_list[profile_count]) + conn.create_instance_profile( + profile_name_list[profile_count], path=profile_path_list[profile_count]) for profile_count in range(0, 2): - conn.add_role_to_instance_profile(profile_name_list[profile_count], "my-role") + conn.add_role_to_instance_profile( + profile_name_list[profile_count], "my-role") profile_dump = conn.list_instance_profiles_for_role(role_name="my-role") - profile_list = profile_dump['list_instance_profiles_for_role_response']['list_instance_profiles_for_role_result']['instance_profiles'] + profile_list = profile_dump['list_instance_profiles_for_role_response'][ + 'list_instance_profiles_for_role_result']['instance_profiles'] for profile_count in range(0, len(profile_list)): - profile_name_list.remove(profile_list[profile_count]["instance_profile_name"]) + profile_name_list.remove(profile_list[profile_count][ + "instance_profile_name"]) profile_path_list.remove(profile_list[profile_count]["path"]) - profile_list[profile_count]["roles"]["member"]["role_name"].should.equal("my-role") + profile_list[profile_count]["roles"]["member"][ + "role_name"].should.equal("my-role") len(profile_name_list).should.equal(0) len(profile_path_list).should.equal(0) profile_dump2 = conn.list_instance_profiles_for_role(role_name="my-role2") - profile_list = profile_dump2['list_instance_profiles_for_role_response']['list_instance_profiles_for_role_result']['instance_profiles'] + profile_list = profile_dump2['list_instance_profiles_for_role_response'][ + 'list_instance_profiles_for_role_result']['instance_profiles'] len(profile_list).should.equal(0) @@ -165,9 +179,11 @@ def test_list_role_policies(): @mock_iam_deprecated() def test_put_role_policy(): conn = boto.connect_iam() - conn.create_role("my-role", assume_role_policy_document="some policy", path="my-path") + conn.create_role( + "my-role", assume_role_policy_document="some policy", path="my-path") conn.put_role_policy("my-role", "test policy", "my policy") - policy = conn.get_role_policy("my-role", "test policy")['get_role_policy_response']['get_role_policy_result']['policy_name'] + policy = conn.get_role_policy( + "my-role", "test policy")['get_role_policy_response']['get_role_policy_result']['policy_name'] policy.should.equal("test policy") @@ -246,13 +262,15 @@ def test_get_all_access_keys(): conn.create_user('my-user') response = conn.get_all_access_keys('my-user') assert_equals( - response['list_access_keys_response']['list_access_keys_result']['access_key_metadata'], + response['list_access_keys_response'][ + 'list_access_keys_result']['access_key_metadata'], [] ) conn.create_access_key('my-user') response = conn.get_all_access_keys('my-user') assert_not_equals( - response['list_access_keys_response']['list_access_keys_result']['access_key_metadata'], + response['list_access_keys_response'][ + 'list_access_keys_result']['access_key_metadata'], [] ) @@ -261,7 +279,8 @@ def test_get_all_access_keys(): def test_delete_access_key(): conn = boto.connect_iam() conn.create_user('my-user') - access_key_id = conn.create_access_key('my-user')['create_access_key_response']['create_access_key_result']['access_key']['access_key_id'] + access_key_id = conn.create_access_key('my-user')['create_access_key_response'][ + 'create_access_key_result']['access_key']['access_key_id'] conn.delete_access_key(access_key_id, 'my-user') @@ -278,9 +297,11 @@ def test_delete_user(): def test_generate_credential_report(): conn = boto.connect_iam() result = conn.generate_credential_report() - result['generate_credential_report_response']['generate_credential_report_result']['state'].should.equal('STARTED') + result['generate_credential_report_response'][ + 'generate_credential_report_result']['state'].should.equal('STARTED') result = conn.generate_credential_report() - result['generate_credential_report_response']['generate_credential_report_result']['state'].should.equal('COMPLETE') + result['generate_credential_report_response'][ + 'generate_credential_report_result']['state'].should.equal('COMPLETE') @mock_iam_deprecated() @@ -293,7 +314,8 @@ def test_get_credential_report(): while result['generate_credential_report_response']['generate_credential_report_result']['state'] != 'COMPLETE': result = conn.generate_credential_report() result = conn.get_credential_report() - report = base64.b64decode(result['get_credential_report_response']['get_credential_report_result']['content'].encode('ascii')).decode('ascii') + report = base64.b64decode(result['get_credential_report_response'][ + 'get_credential_report_result']['content'].encode('ascii')).decode('ascii') report.should.match(r'.*my-user.*') @@ -307,23 +329,31 @@ def test_managed_policy(): path='/mypolicy/', description='my user managed policy') - aws_policies = conn.list_policies(scope='AWS')['list_policies_response']['list_policies_result']['policies'] - set(p.name for p in aws_managed_policies).should.equal(set(p['policy_name'] for p in aws_policies)) + aws_policies = conn.list_policies(scope='AWS')['list_policies_response'][ + 'list_policies_result']['policies'] + set(p.name for p in aws_managed_policies).should.equal( + set(p['policy_name'] for p in aws_policies)) - user_policies = conn.list_policies(scope='Local')['list_policies_response']['list_policies_result']['policies'] - set(['UserManagedPolicy']).should.equal(set(p['policy_name'] for p in user_policies)) + user_policies = conn.list_policies(scope='Local')['list_policies_response'][ + 'list_policies_result']['policies'] + set(['UserManagedPolicy']).should.equal( + set(p['policy_name'] for p in user_policies)) - all_policies = conn.list_policies()['list_policies_response']['list_policies_result']['policies'] - set(p['policy_name'] for p in aws_policies + user_policies).should.equal(set(p['policy_name'] for p in all_policies)) + all_policies = conn.list_policies()['list_policies_response'][ + 'list_policies_result']['policies'] + set(p['policy_name'] for p in aws_policies + + user_policies).should.equal(set(p['policy_name'] for p in all_policies)) role_name = 'my-role' - conn.create_role(role_name, assume_role_policy_document={'policy': 'test'}, path="my-path") + conn.create_role(role_name, assume_role_policy_document={ + 'policy': 'test'}, path="my-path") for policy_name in ['AmazonElasticMapReduceRole', 'AmazonElasticMapReduceforEC2Role']: policy_arn = 'arn:aws:iam::aws:policy/service-role/' + policy_name conn.attach_role_policy(policy_arn, role_name) - rows = conn.list_policies(only_attached=True)['list_policies_response']['list_policies_result']['policies'] + rows = conn.list_policies(only_attached=True)['list_policies_response'][ + 'list_policies_result']['policies'] rows.should.have.length_of(2) for x in rows: int(x['attachment_count']).should.be.greater_than(0) @@ -332,7 +362,8 @@ def test_managed_policy(): resp = conn.get_response('ListAttachedRolePolicies', {'RoleName': role_name}, list_marker='AttachedPolicies') - resp['list_attached_role_policies_response']['list_attached_role_policies_result']['attached_policies'].should.have.length_of(2) + resp['list_attached_role_policies_response']['list_attached_role_policies_result'][ + 'attached_policies'].should.have.length_of(2) @mock_iam diff --git a/tests/test_iam/test_iam_groups.py b/tests/test_iam/test_iam_groups.py index 6fd0f47dd..a13d6de0b 100644 --- a/tests/test_iam/test_iam_groups.py +++ b/tests/test_iam/test_iam_groups.py @@ -29,7 +29,8 @@ def test_get_all_groups(): conn = boto.connect_iam() conn.create_group('my-group1') conn.create_group('my-group2') - groups = conn.get_all_groups()['list_groups_response']['list_groups_result']['groups'] + groups = conn.get_all_groups()['list_groups_response'][ + 'list_groups_result']['groups'] groups.should.have.length_of(2) @@ -68,5 +69,6 @@ def test_get_groups_for_user(): conn.add_user_to_group('my-group1', 'my-user') conn.add_user_to_group('my-group2', 'my-user') - groups = conn.get_groups_for_user('my-user')['list_groups_for_user_response']['list_groups_for_user_result']['groups'] + groups = conn.get_groups_for_user( + 'my-user')['list_groups_for_user_response']['list_groups_for_user_result']['groups'] groups.should.have.length_of(2) diff --git a/tests/test_iam/test_server.py b/tests/test_iam/test_server.py index 1b1c3bfe3..59aaf1462 100644 --- a/tests/test_iam/test_server.py +++ b/tests/test_iam/test_server.py @@ -16,10 +16,11 @@ def test_iam_server_get(): backend = server.create_backend_app("iam") test_client = backend.test_client() - group_data = test_client.action_data("CreateGroup", GroupName="test group", Path="/") + group_data = test_client.action_data( + "CreateGroup", GroupName="test group", Path="/") group_id = re.search("(.*)", group_data).groups()[0] groups_data = test_client.action_data("ListGroups") groups_ids = re.findall("(.*)", groups_data) - assert group_id in groups_ids \ No newline at end of file + assert group_id in groups_ids diff --git a/tests/test_kinesis/test_firehose.py b/tests/test_kinesis/test_firehose.py index 371be253b..6ab46c6f9 100644 --- a/tests/test_kinesis/test_firehose.py +++ b/tests/test_kinesis/test_firehose.py @@ -132,11 +132,13 @@ def test_create_stream_without_redshift(): "HasMoreDestinations": False, }) + @mock_kinesis def test_deescribe_non_existant_stream(): client = boto3.client('firehose', region_name='us-east-1') - client.describe_delivery_stream.when.called_with(DeliveryStreamName='not-a-stream').should.throw(ClientError) + client.describe_delivery_stream.when.called_with( + DeliveryStreamName='not-a-stream').should.throw(ClientError) @mock_kinesis @@ -146,11 +148,13 @@ def test_list_and_delete_stream(): create_stream(client, 'stream1') create_stream(client, 'stream2') - set(client.list_delivery_streams()['DeliveryStreamNames']).should.equal(set(['stream1', 'stream2'])) + set(client.list_delivery_streams()['DeliveryStreamNames']).should.equal( + set(['stream1', 'stream2'])) client.delete_delivery_stream(DeliveryStreamName='stream1') - set(client.list_delivery_streams()['DeliveryStreamNames']).should.equal(set(['stream2'])) + set(client.list_delivery_streams()[ + 'DeliveryStreamNames']).should.equal(set(['stream2'])) @mock_kinesis diff --git a/tests/test_kinesis/test_kinesis.py b/tests/test_kinesis/test_kinesis.py index a86bce44c..5b2f9ccf3 100644 --- a/tests/test_kinesis/test_kinesis.py +++ b/tests/test_kinesis/test_kinesis.py @@ -18,7 +18,8 @@ def test_create_cluster(): stream = stream_response["StreamDescription"] stream["StreamName"].should.equal("my_stream") stream["HasMoreShards"].should.equal(False) - stream["StreamARN"].should.equal("arn:aws:kinesis:us-west-2:123456789012:my_stream") + stream["StreamARN"].should.equal( + "arn:aws:kinesis:us-west-2:123456789012:my_stream") stream["StreamStatus"].should.equal("ACTIVE") shards = stream['Shards'] @@ -28,7 +29,8 @@ def test_create_cluster(): @mock_kinesis_deprecated def test_describe_non_existant_stream(): conn = boto.kinesis.connect_to_region("us-east-1") - conn.describe_stream.when.called_with("not-a-stream").should.throw(ResourceNotFoundException) + conn.describe_stream.when.called_with( + "not-a-stream").should.throw(ResourceNotFoundException) @mock_kinesis_deprecated @@ -45,7 +47,8 @@ def test_list_and_delete_stream(): conn.list_streams()['StreamNames'].should.have.length_of(1) # Delete invalid id - conn.delete_stream.when.called_with("not-a-stream").should.throw(ResourceNotFoundException) + conn.delete_stream.when.called_with( + "not-a-stream").should.throw(ResourceNotFoundException) @mock_kinesis_deprecated @@ -73,7 +76,8 @@ def test_get_invalid_shard_iterator(): stream_name = "my_stream" conn.create_stream(stream_name, 1) - conn.get_shard_iterator.when.called_with(stream_name, "123", 'TRIM_HORIZON').should.throw(ResourceNotFoundException) + conn.get_shard_iterator.when.called_with( + stream_name, "123", 'TRIM_HORIZON').should.throw(ResourceNotFoundException) @mock_kinesis_deprecated @@ -138,7 +142,8 @@ def test_get_records_limit(): @mock_kinesis_deprecated def test_get_records_at_sequence_number(): - # AT_SEQUENCE_NUMBER - Start reading exactly from the position denoted by a specific sequence number. + # AT_SEQUENCE_NUMBER - Start reading exactly from the position denoted by + # a specific sequence number. conn = boto.kinesis.connect_to_region("us-west-2") stream_name = "my_stream" conn.create_stream(stream_name, 1) @@ -158,7 +163,8 @@ def test_get_records_at_sequence_number(): second_sequence_id = response['Records'][1]['SequenceNumber'] # Then get a new iterator starting at that id - response = conn.get_shard_iterator(stream_name, shard_id, 'AT_SEQUENCE_NUMBER', second_sequence_id) + response = conn.get_shard_iterator( + stream_name, shard_id, 'AT_SEQUENCE_NUMBER', second_sequence_id) shard_iterator = response['ShardIterator'] response = conn.get_records(shard_iterator) @@ -169,7 +175,8 @@ def test_get_records_at_sequence_number(): @mock_kinesis_deprecated def test_get_records_after_sequence_number(): - # AFTER_SEQUENCE_NUMBER - Start reading right after the position denoted by a specific sequence number. + # AFTER_SEQUENCE_NUMBER - Start reading right after the position denoted + # by a specific sequence number. conn = boto.kinesis.connect_to_region("us-west-2") stream_name = "my_stream" conn.create_stream(stream_name, 1) @@ -189,7 +196,8 @@ def test_get_records_after_sequence_number(): second_sequence_id = response['Records'][1]['SequenceNumber'] # Then get a new iterator starting after that id - response = conn.get_shard_iterator(stream_name, shard_id, 'AFTER_SEQUENCE_NUMBER', second_sequence_id) + response = conn.get_shard_iterator( + stream_name, shard_id, 'AFTER_SEQUENCE_NUMBER', second_sequence_id) shard_iterator = response['ShardIterator'] response = conn.get_records(shard_iterator) @@ -199,7 +207,8 @@ def test_get_records_after_sequence_number(): @mock_kinesis_deprecated def test_get_records_latest(): - # LATEST - Start reading just after the most recent record in the shard, so that you always read the most recent data in the shard. + # LATEST - Start reading just after the most recent record in the shard, + # so that you always read the most recent data in the shard. conn = boto.kinesis.connect_to_region("us-west-2") stream_name = "my_stream" conn.create_stream(stream_name, 1) @@ -219,7 +228,8 @@ def test_get_records_latest(): second_sequence_id = response['Records'][1]['SequenceNumber'] # Then get a new iterator starting after that id - response = conn.get_shard_iterator(stream_name, shard_id, 'LATEST', second_sequence_id) + response = conn.get_shard_iterator( + stream_name, shard_id, 'LATEST', second_sequence_id) shard_iterator = response['ShardIterator'] # Write some more data @@ -251,10 +261,10 @@ def test_add_tags(): conn.create_stream(stream_name, 1) conn.describe_stream(stream_name) - conn.add_tags_to_stream(stream_name, {'tag1':'val1'}) - conn.add_tags_to_stream(stream_name, {'tag2':'val2'}) - conn.add_tags_to_stream(stream_name, {'tag1':'val3'}) - conn.add_tags_to_stream(stream_name, {'tag2':'val4'}) + conn.add_tags_to_stream(stream_name, {'tag1': 'val1'}) + conn.add_tags_to_stream(stream_name, {'tag2': 'val2'}) + conn.add_tags_to_stream(stream_name, {'tag1': 'val3'}) + conn.add_tags_to_stream(stream_name, {'tag2': 'val4'}) @mock_kinesis_deprecated @@ -264,17 +274,21 @@ def test_list_tags(): conn.create_stream(stream_name, 1) conn.describe_stream(stream_name) - conn.add_tags_to_stream(stream_name, {'tag1':'val1'}) - tags = dict([(tag['Key'], tag['Value']) for tag in conn.list_tags_for_stream(stream_name)['Tags']]) + conn.add_tags_to_stream(stream_name, {'tag1': 'val1'}) + tags = dict([(tag['Key'], tag['Value']) + for tag in conn.list_tags_for_stream(stream_name)['Tags']]) tags.get('tag1').should.equal('val1') - conn.add_tags_to_stream(stream_name, {'tag2':'val2'}) - tags = dict([(tag['Key'], tag['Value']) for tag in conn.list_tags_for_stream(stream_name)['Tags']]) + conn.add_tags_to_stream(stream_name, {'tag2': 'val2'}) + tags = dict([(tag['Key'], tag['Value']) + for tag in conn.list_tags_for_stream(stream_name)['Tags']]) tags.get('tag2').should.equal('val2') - conn.add_tags_to_stream(stream_name, {'tag1':'val3'}) - tags = dict([(tag['Key'], tag['Value']) for tag in conn.list_tags_for_stream(stream_name)['Tags']]) + conn.add_tags_to_stream(stream_name, {'tag1': 'val3'}) + tags = dict([(tag['Key'], tag['Value']) + for tag in conn.list_tags_for_stream(stream_name)['Tags']]) tags.get('tag1').should.equal('val3') - conn.add_tags_to_stream(stream_name, {'tag2':'val4'}) - tags = dict([(tag['Key'], tag['Value']) for tag in conn.list_tags_for_stream(stream_name)['Tags']]) + conn.add_tags_to_stream(stream_name, {'tag2': 'val4'}) + tags = dict([(tag['Key'], tag['Value']) + for tag in conn.list_tags_for_stream(stream_name)['Tags']]) tags.get('tag2').should.equal('val4') @@ -285,18 +299,22 @@ def test_remove_tags(): conn.create_stream(stream_name, 1) conn.describe_stream(stream_name) - conn.add_tags_to_stream(stream_name, {'tag1':'val1'}) - tags = dict([(tag['Key'], tag['Value']) for tag in conn.list_tags_for_stream(stream_name)['Tags']]) + conn.add_tags_to_stream(stream_name, {'tag1': 'val1'}) + tags = dict([(tag['Key'], tag['Value']) + for tag in conn.list_tags_for_stream(stream_name)['Tags']]) tags.get('tag1').should.equal('val1') conn.remove_tags_from_stream(stream_name, ['tag1']) - tags = dict([(tag['Key'], tag['Value']) for tag in conn.list_tags_for_stream(stream_name)['Tags']]) + tags = dict([(tag['Key'], tag['Value']) + for tag in conn.list_tags_for_stream(stream_name)['Tags']]) tags.get('tag1').should.equal(None) - conn.add_tags_to_stream(stream_name, {'tag2':'val2'}) - tags = dict([(tag['Key'], tag['Value']) for tag in conn.list_tags_for_stream(stream_name)['Tags']]) + conn.add_tags_to_stream(stream_name, {'tag2': 'val2'}) + tags = dict([(tag['Key'], tag['Value']) + for tag in conn.list_tags_for_stream(stream_name)['Tags']]) tags.get('tag2').should.equal('val2') conn.remove_tags_from_stream(stream_name, ['tag2']) - tags = dict([(tag['Key'], tag['Value']) for tag in conn.list_tags_for_stream(stream_name)['Tags']]) + tags = dict([(tag['Key'], tag['Value']) + for tag in conn.list_tags_for_stream(stream_name)['Tags']]) tags.get('tag2').should.equal(None) @@ -316,10 +334,12 @@ def test_split_shard(): stream = stream_response["StreamDescription"] shards = stream['Shards'] shards.should.have.length_of(2) - sum([shard['SequenceNumberRange']['EndingSequenceNumber'] for shard in shards]).should.equal(99) + sum([shard['SequenceNumberRange']['EndingSequenceNumber'] + for shard in shards]).should.equal(99) shard_range = shards[0]['HashKeyRange'] - new_starting_hash = (int(shard_range['EndingHashKey'])+int(shard_range['StartingHashKey'])) // 2 + new_starting_hash = ( + int(shard_range['EndingHashKey']) + int(shard_range['StartingHashKey'])) // 2 conn.split_shard("my_stream", shards[0]['ShardId'], str(new_starting_hash)) stream_response = conn.describe_stream(stream_name) @@ -327,10 +347,12 @@ def test_split_shard(): stream = stream_response["StreamDescription"] shards = stream['Shards'] shards.should.have.length_of(3) - sum([shard['SequenceNumberRange']['EndingSequenceNumber'] for shard in shards]).should.equal(99) + sum([shard['SequenceNumberRange']['EndingSequenceNumber'] + for shard in shards]).should.equal(99) shard_range = shards[2]['HashKeyRange'] - new_starting_hash = (int(shard_range['EndingHashKey'])+int(shard_range['StartingHashKey'])) // 2 + new_starting_hash = ( + int(shard_range['EndingHashKey']) + int(shard_range['StartingHashKey'])) // 2 conn.split_shard("my_stream", shards[2]['ShardId'], str(new_starting_hash)) stream_response = conn.describe_stream(stream_name) @@ -338,7 +360,8 @@ def test_split_shard(): stream = stream_response["StreamDescription"] shards = stream['Shards'] shards.should.have.length_of(4) - sum([shard['SequenceNumberRange']['EndingSequenceNumber'] for shard in shards]).should.equal(99) + sum([shard['SequenceNumberRange']['EndingSequenceNumber'] + for shard in shards]).should.equal(99) @mock_kinesis_deprecated @@ -358,28 +381,34 @@ def test_merge_shards(): shards = stream['Shards'] shards.should.have.length_of(4) - conn.merge_shards.when.called_with(stream_name, 'shardId-000000000000', 'shardId-000000000002').should.throw(InvalidArgumentException) + conn.merge_shards.when.called_with( + stream_name, 'shardId-000000000000', 'shardId-000000000002').should.throw(InvalidArgumentException) stream_response = conn.describe_stream(stream_name) stream = stream_response["StreamDescription"] shards = stream['Shards'] shards.should.have.length_of(4) - sum([shard['SequenceNumberRange']['EndingSequenceNumber'] for shard in shards]).should.equal(99) + sum([shard['SequenceNumberRange']['EndingSequenceNumber'] + for shard in shards]).should.equal(99) - conn.merge_shards(stream_name, 'shardId-000000000000', 'shardId-000000000001') + conn.merge_shards(stream_name, 'shardId-000000000000', + 'shardId-000000000001') stream_response = conn.describe_stream(stream_name) stream = stream_response["StreamDescription"] shards = stream['Shards'] shards.should.have.length_of(3) - sum([shard['SequenceNumberRange']['EndingSequenceNumber'] for shard in shards]).should.equal(99) - conn.merge_shards(stream_name, 'shardId-000000000002', 'shardId-000000000000') + sum([shard['SequenceNumberRange']['EndingSequenceNumber'] + for shard in shards]).should.equal(99) + conn.merge_shards(stream_name, 'shardId-000000000002', + 'shardId-000000000000') stream_response = conn.describe_stream(stream_name) stream = stream_response["StreamDescription"] shards = stream['Shards'] shards.should.have.length_of(2) - sum([shard['SequenceNumberRange']['EndingSequenceNumber'] for shard in shards]).should.equal(99) + sum([shard['SequenceNumberRange']['EndingSequenceNumber'] + for shard in shards]).should.equal(99) diff --git a/tests/test_kms/test_kms.py b/tests/test_kms/test_kms.py index 27850d4ad..e1468cce0 100644 --- a/tests/test_kms/test_kms.py +++ b/tests/test_kms/test_kms.py @@ -8,11 +8,13 @@ import sure # noqa from moto import mock_kms_deprecated from nose.tools import assert_raises + @mock_kms_deprecated def test_create_key(): conn = boto.kms.connect_to_region("us-west-2") - key = conn.create_key(policy="my policy", description="my key", key_usage='ENCRYPT_DECRYPT') + key = conn.create_key(policy="my policy", + description="my key", key_usage='ENCRYPT_DECRYPT') key['KeyMetadata']['Description'].should.equal("my key") key['KeyMetadata']['KeyUsage'].should.equal("ENCRYPT_DECRYPT") @@ -22,7 +24,8 @@ def test_create_key(): @mock_kms_deprecated def test_describe_key(): conn = boto.kms.connect_to_region("us-west-2") - key = conn.create_key(policy="my policy", description="my key", key_usage='ENCRYPT_DECRYPT') + key = conn.create_key(policy="my policy", + description="my key", key_usage='ENCRYPT_DECRYPT') key_id = key['KeyMetadata']['KeyId'] key = conn.describe_key(key_id) @@ -33,8 +36,10 @@ def test_describe_key(): @mock_kms_deprecated def test_describe_key_via_alias(): conn = boto.kms.connect_to_region("us-west-2") - key = conn.create_key(policy="my policy", description="my key", key_usage='ENCRYPT_DECRYPT') - conn.create_alias(alias_name='alias/my-key-alias', target_key_id=key['KeyMetadata']['KeyId']) + key = conn.create_key(policy="my policy", + description="my key", key_usage='ENCRYPT_DECRYPT') + conn.create_alias(alias_name='alias/my-key-alias', + target_key_id=key['KeyMetadata']['KeyId']) alias_key = conn.describe_key('alias/my-key-alias') alias_key['KeyMetadata']['Description'].should.equal("my key") @@ -45,16 +50,20 @@ def test_describe_key_via_alias(): @mock_kms_deprecated def test_describe_key_via_alias_not_found(): conn = boto.kms.connect_to_region("us-west-2") - key = conn.create_key(policy="my policy", description="my key", key_usage='ENCRYPT_DECRYPT') - conn.create_alias(alias_name='alias/my-key-alias', target_key_id=key['KeyMetadata']['KeyId']) + key = conn.create_key(policy="my policy", + description="my key", key_usage='ENCRYPT_DECRYPT') + conn.create_alias(alias_name='alias/my-key-alias', + target_key_id=key['KeyMetadata']['KeyId']) - conn.describe_key.when.called_with('alias/not-found-alias').should.throw(JSONResponseError) + conn.describe_key.when.called_with( + 'alias/not-found-alias').should.throw(JSONResponseError) @mock_kms_deprecated def test_describe_key_via_arn(): conn = boto.kms.connect_to_region("us-west-2") - key = conn.create_key(policy="my policy", description="my key", key_usage='ENCRYPT_DECRYPT') + key = conn.create_key(policy="my policy", + description="my key", key_usage='ENCRYPT_DECRYPT') arn = key['KeyMetadata']['Arn'] the_key = conn.describe_key(arn) @@ -66,15 +75,18 @@ def test_describe_key_via_arn(): @mock_kms_deprecated def test_describe_missing_key(): conn = boto.kms.connect_to_region("us-west-2") - conn.describe_key.when.called_with("not-a-key").should.throw(JSONResponseError) + conn.describe_key.when.called_with( + "not-a-key").should.throw(JSONResponseError) @mock_kms_deprecated def test_list_keys(): conn = boto.kms.connect_to_region("us-west-2") - conn.create_key(policy="my policy", description="my key1", key_usage='ENCRYPT_DECRYPT') - conn.create_key(policy="my policy", description="my key2", key_usage='ENCRYPT_DECRYPT') + conn.create_key(policy="my policy", description="my key1", + key_usage='ENCRYPT_DECRYPT') + conn.create_key(policy="my policy", description="my key2", + key_usage='ENCRYPT_DECRYPT') keys = conn.list_keys() keys['Keys'].should.have.length_of(2) @@ -84,56 +96,67 @@ def test_list_keys(): def test_enable_key_rotation(): conn = boto.kms.connect_to_region("us-west-2") - key = conn.create_key(policy="my policy", description="my key", key_usage='ENCRYPT_DECRYPT') + key = conn.create_key(policy="my policy", + description="my key", key_usage='ENCRYPT_DECRYPT') key_id = key['KeyMetadata']['KeyId'] conn.enable_key_rotation(key_id) - conn.get_key_rotation_status(key_id)['KeyRotationEnabled'].should.equal(True) + conn.get_key_rotation_status( + key_id)['KeyRotationEnabled'].should.equal(True) + @mock_kms_deprecated def test_enable_key_rotation_via_arn(): conn = boto.kms.connect_to_region("us-west-2") - key = conn.create_key(policy="my policy", description="my key", key_usage='ENCRYPT_DECRYPT') + key = conn.create_key(policy="my policy", + description="my key", key_usage='ENCRYPT_DECRYPT') key_id = key['KeyMetadata']['Arn'] conn.enable_key_rotation(key_id) - conn.get_key_rotation_status(key_id)['KeyRotationEnabled'].should.equal(True) - + conn.get_key_rotation_status( + key_id)['KeyRotationEnabled'].should.equal(True) @mock_kms_deprecated def test_enable_key_rotation_with_missing_key(): conn = boto.kms.connect_to_region("us-west-2") - conn.enable_key_rotation.when.called_with("not-a-key").should.throw(JSONResponseError) + conn.enable_key_rotation.when.called_with( + "not-a-key").should.throw(JSONResponseError) @mock_kms_deprecated def test_enable_key_rotation_with_alias_name_should_fail(): conn = boto.kms.connect_to_region("us-west-2") - key = conn.create_key(policy="my policy", description="my key", key_usage='ENCRYPT_DECRYPT') - conn.create_alias(alias_name='alias/my-key-alias', target_key_id=key['KeyMetadata']['KeyId']) + key = conn.create_key(policy="my policy", + description="my key", key_usage='ENCRYPT_DECRYPT') + conn.create_alias(alias_name='alias/my-key-alias', + target_key_id=key['KeyMetadata']['KeyId']) alias_key = conn.describe_key('alias/my-key-alias') alias_key['KeyMetadata']['Arn'].should.equal(key['KeyMetadata']['Arn']) - conn.enable_key_rotation.when.called_with('alias/my-alias').should.throw(JSONResponseError) + conn.enable_key_rotation.when.called_with( + 'alias/my-alias').should.throw(JSONResponseError) @mock_kms_deprecated def test_disable_key_rotation(): conn = boto.kms.connect_to_region("us-west-2") - key = conn.create_key(policy="my policy", description="my key", key_usage='ENCRYPT_DECRYPT') + key = conn.create_key(policy="my policy", + description="my key", key_usage='ENCRYPT_DECRYPT') key_id = key['KeyMetadata']['KeyId'] conn.enable_key_rotation(key_id) - conn.get_key_rotation_status(key_id)['KeyRotationEnabled'].should.equal(True) + conn.get_key_rotation_status( + key_id)['KeyRotationEnabled'].should.equal(True) conn.disable_key_rotation(key_id) - conn.get_key_rotation_status(key_id)['KeyRotationEnabled'].should.equal(False) + conn.get_key_rotation_status( + key_id)['KeyRotationEnabled'].should.equal(False) @mock_kms_deprecated @@ -157,59 +180,70 @@ def test_decrypt(): @mock_kms_deprecated def test_disable_key_rotation_with_missing_key(): conn = boto.kms.connect_to_region("us-west-2") - conn.disable_key_rotation.when.called_with("not-a-key").should.throw(JSONResponseError) + conn.disable_key_rotation.when.called_with( + "not-a-key").should.throw(JSONResponseError) @mock_kms_deprecated def test_get_key_rotation_status_with_missing_key(): conn = boto.kms.connect_to_region("us-west-2") - conn.get_key_rotation_status.when.called_with("not-a-key").should.throw(JSONResponseError) + conn.get_key_rotation_status.when.called_with( + "not-a-key").should.throw(JSONResponseError) @mock_kms_deprecated def test_get_key_rotation_status(): conn = boto.kms.connect_to_region("us-west-2") - key = conn.create_key(policy="my policy", description="my key", key_usage='ENCRYPT_DECRYPT') + key = conn.create_key(policy="my policy", + description="my key", key_usage='ENCRYPT_DECRYPT') key_id = key['KeyMetadata']['KeyId'] - conn.get_key_rotation_status(key_id)['KeyRotationEnabled'].should.equal(False) + conn.get_key_rotation_status( + key_id)['KeyRotationEnabled'].should.equal(False) @mock_kms_deprecated def test_create_key_defaults_key_rotation(): conn = boto.kms.connect_to_region("us-west-2") - key = conn.create_key(policy="my policy", description="my key", key_usage='ENCRYPT_DECRYPT') + key = conn.create_key(policy="my policy", + description="my key", key_usage='ENCRYPT_DECRYPT') key_id = key['KeyMetadata']['KeyId'] - conn.get_key_rotation_status(key_id)['KeyRotationEnabled'].should.equal(False) + conn.get_key_rotation_status( + key_id)['KeyRotationEnabled'].should.equal(False) @mock_kms_deprecated def test_get_key_policy(): conn = boto.kms.connect_to_region('us-west-2') - key = conn.create_key(policy='my policy', description='my key1', key_usage='ENCRYPT_DECRYPT') + key = conn.create_key(policy='my policy', + description='my key1', key_usage='ENCRYPT_DECRYPT') key_id = key['KeyMetadata']['KeyId'] policy = conn.get_key_policy(key_id, 'default') policy['Policy'].should.equal('my policy') + @mock_kms_deprecated def test_get_key_policy_via_arn(): conn = boto.kms.connect_to_region('us-west-2') - key = conn.create_key(policy='my policy', description='my key1', key_usage='ENCRYPT_DECRYPT') + key = conn.create_key(policy='my policy', + description='my key1', key_usage='ENCRYPT_DECRYPT') policy = conn.get_key_policy(key['KeyMetadata']['Arn'], 'default') policy['Policy'].should.equal('my policy') + @mock_kms_deprecated def test_put_key_policy(): conn = boto.kms.connect_to_region('us-west-2') - key = conn.create_key(policy='my policy', description='my key1', key_usage='ENCRYPT_DECRYPT') + key = conn.create_key(policy='my policy', + description='my key1', key_usage='ENCRYPT_DECRYPT') key_id = key['KeyMetadata']['KeyId'] conn.put_key_policy(key_id, 'default', 'new policy') @@ -221,7 +255,8 @@ def test_put_key_policy(): def test_put_key_policy_via_arn(): conn = boto.kms.connect_to_region('us-west-2') - key = conn.create_key(policy='my policy', description='my key1', key_usage='ENCRYPT_DECRYPT') + key = conn.create_key(policy='my policy', + description='my key1', key_usage='ENCRYPT_DECRYPT') key_id = key['KeyMetadata']['Arn'] conn.put_key_policy(key_id, 'default', 'new policy') @@ -233,10 +268,13 @@ def test_put_key_policy_via_arn(): def test_put_key_policy_via_alias_should_not_update(): conn = boto.kms.connect_to_region('us-west-2') - key = conn.create_key(policy='my policy', description='my key1', key_usage='ENCRYPT_DECRYPT') - conn.create_alias(alias_name='alias/my-key-alias', target_key_id=key['KeyMetadata']['KeyId']) + key = conn.create_key(policy='my policy', + description='my key1', key_usage='ENCRYPT_DECRYPT') + conn.create_alias(alias_name='alias/my-key-alias', + target_key_id=key['KeyMetadata']['KeyId']) - conn.put_key_policy.when.called_with('alias/my-key-alias', 'default', 'new policy').should.throw(JSONResponseError) + conn.put_key_policy.when.called_with( + 'alias/my-key-alias', 'default', 'new policy').should.throw(JSONResponseError) policy = conn.get_key_policy(key['KeyMetadata']['KeyId'], 'default') policy['Policy'].should.equal('my policy') @@ -246,7 +284,8 @@ def test_put_key_policy_via_alias_should_not_update(): def test_put_key_policy(): conn = boto.kms.connect_to_region('us-west-2') - key = conn.create_key(policy='my policy', description='my key1', key_usage='ENCRYPT_DECRYPT') + key = conn.create_key(policy='my policy', + description='my key1', key_usage='ENCRYPT_DECRYPT') conn.put_key_policy(key['KeyMetadata']['Arn'], 'default', 'new policy') policy = conn.get_key_policy(key['KeyMetadata']['KeyId'], 'default') @@ -257,7 +296,8 @@ def test_put_key_policy(): def test_list_key_policies(): conn = boto.kms.connect_to_region('us-west-2') - key = conn.create_key(policy='my policy', description='my key1', key_usage='ENCRYPT_DECRYPT') + key = conn.create_key(policy='my policy', + description='my key1', key_usage='ENCRYPT_DECRYPT') key_id = key['KeyMetadata']['KeyId'] policies = conn.list_key_policies(key_id) @@ -323,7 +363,8 @@ def test__create_alias__raises_if_wrong_prefix(): ex = err.exception ex.error_message.should.equal('Invalid identifier') ex.error_code.should.equal('ValidationException') - ex.body.should.equal({'message': 'Invalid identifier', '__type': 'ValidationException'}) + ex.body.should.equal({'message': 'Invalid identifier', + '__type': 'ValidationException'}) ex.reason.should.equal('Bad Request') ex.status.should.equal(400) @@ -371,16 +412,19 @@ def test__create_alias__raises_if_alias_has_restricted_characters(): kms.create_alias(alias_name, key_id) ex = err.exception ex.body['__type'].should.equal('ValidationException') - ex.body['message'].should.equal("1 validation error detected: Value '{alias_name}' at 'aliasName' failed to satisfy constraint: Member must satisfy regular expression pattern: ^[a-zA-Z0-9:/_-]+$".format(**locals())) + ex.body['message'].should.equal( + "1 validation error detected: Value '{alias_name}' at 'aliasName' failed to satisfy constraint: Member must satisfy regular expression pattern: ^[a-zA-Z0-9:/_-]+$".format(**locals())) ex.error_code.should.equal('ValidationException') - ex.message.should.equal("1 validation error detected: Value '{alias_name}' at 'aliasName' failed to satisfy constraint: Member must satisfy regular expression pattern: ^[a-zA-Z0-9:/_-]+$".format(**locals())) + ex.message.should.equal( + "1 validation error detected: Value '{alias_name}' at 'aliasName' failed to satisfy constraint: Member must satisfy regular expression pattern: ^[a-zA-Z0-9:/_-]+$".format(**locals())) ex.reason.should.equal('Bad Request') ex.status.should.equal(400) @mock_kms_deprecated def test__create_alias__raises_if_alias_has_colon_character(): - # For some reason, colons are not accepted for an alias, even though they are accepted by regex ^[a-zA-Z0-9:/_-]+$ + # For some reason, colons are not accepted for an alias, even though they + # are accepted by regex ^[a-zA-Z0-9:/_-]+$ kms = boto.connect_kms() create_resp = kms.create_key() key_id = create_resp['KeyMetadata']['KeyId'] @@ -394,9 +438,11 @@ def test__create_alias__raises_if_alias_has_colon_character(): kms.create_alias(alias_name, key_id) ex = err.exception ex.body['__type'].should.equal('ValidationException') - ex.body['message'].should.equal("{alias_name} contains invalid characters for an alias".format(**locals())) + ex.body['message'].should.equal( + "{alias_name} contains invalid characters for an alias".format(**locals())) ex.error_code.should.equal('ValidationException') - ex.message.should.equal("{alias_name} contains invalid characters for an alias".format(**locals())) + ex.message.should.equal( + "{alias_name} contains invalid characters for an alias".format(**locals())) ex.reason.should.equal('Bad Request') ex.status.should.equal(400) @@ -481,10 +527,12 @@ def test__delete_alias__raises_if_alias_is_not_found(): ex = err.exception ex.body['__type'].should.equal('NotFoundException') - ex.body['message'].should.match(r'Alias arn:aws:kms:{region}:\d{{12}}:{alias_name} is not found.'.format(**locals())) + ex.body['message'].should.match( + r'Alias arn:aws:kms:{region}:\d{{12}}:{alias_name} is not found.'.format(**locals())) ex.box_usage.should.be.none ex.error_code.should.be.none - ex.message.should.match(r'Alias arn:aws:kms:{region}:\d{{12}}:{alias_name} is not found.'.format(**locals())) + ex.message.should.match( + r'Alias arn:aws:kms:{region}:\d{{12}}:{alias_name} is not found.'.format(**locals())) ex.reason.should.equal('Bad Request') ex.request_id.should.be.none ex.status.should.equal(400) @@ -527,7 +575,8 @@ def test__list_aliases(): len([alias for alias in aliases if has_correct_arn(alias) and 'alias/my-alias2' == alias['AliasName']]).should.equal(1) - len([alias for alias in aliases if 'TargetKeyId' in alias and key_id == alias['TargetKeyId']]).should.equal(3) + len([alias for alias in aliases if 'TargetKeyId' in alias and key_id == + alias['TargetKeyId']]).should.equal(3) len(aliases).should.equal(7) @@ -537,13 +586,17 @@ def test__assert_valid_key_id(): from moto.kms.responses import _assert_valid_key_id import uuid - _assert_valid_key_id.when.called_with("not-a-key").should.throw(JSONResponseError) - _assert_valid_key_id.when.called_with(str(uuid.uuid4())).should_not.throw(JSONResponseError) + _assert_valid_key_id.when.called_with( + "not-a-key").should.throw(JSONResponseError) + _assert_valid_key_id.when.called_with( + str(uuid.uuid4())).should_not.throw(JSONResponseError) @mock_kms_deprecated def test__assert_default_policy(): from moto.kms.responses import _assert_default_policy - _assert_default_policy.when.called_with("not-default").should.throw(JSONResponseError) - _assert_default_policy.when.called_with("default").should_not.throw(JSONResponseError) + _assert_default_policy.when.called_with( + "not-default").should.throw(JSONResponseError) + _assert_default_policy.when.called_with( + "default").should_not.throw(JSONResponseError) diff --git a/tests/test_opsworks/test_instances.py b/tests/test_opsworks/test_instances.py index e24486a2f..9c9e20878 100644 --- a/tests/test_opsworks/test_instances.py +++ b/tests/test_opsworks/test_instances.py @@ -102,7 +102,8 @@ def test_describe_instances(): S1L1_i1.should.be.within([i["InstanceId"] for i in response]) S1L1_i2.should.be.within([i["InstanceId"] for i in response]) - response2 = client.describe_instances(InstanceIds=[S1L1_i1, S1L1_i2])['Instances'] + response2 = client.describe_instances( + InstanceIds=[S1L1_i1, S1L1_i2])['Instances'] sorted(response2, key=lambda d: d['InstanceId']).should.equal( sorted(response, key=lambda d: d['InstanceId'])) @@ -168,9 +169,8 @@ def test_ec2_integration(): reservations = ec2.describe_instances()['Reservations'] reservations[0]['Instances'].should.have.length_of(1) instance = reservations[0]['Instances'][0] - opsworks_instance = opsworks.describe_instances(StackId=stack_id)['Instances'][0] + opsworks_instance = opsworks.describe_instances(StackId=stack_id)[ + 'Instances'][0] instance['InstanceId'].should.equal(opsworks_instance['Ec2InstanceId']) instance['PrivateIpAddress'].should.equal(opsworks_instance['PrivateIp']) - - diff --git a/tests/test_opsworks/test_layers.py b/tests/test_opsworks/test_layers.py index dc268bbe5..31fdeae8c 100644 --- a/tests/test_opsworks/test_layers.py +++ b/tests/test_opsworks/test_layers.py @@ -43,7 +43,8 @@ def test_create_layer_response(): Name="_", Shortname="TestLayerShortName" ).should.throw( - Exception, re.compile(r'already a layer with shortname "TestLayerShortName"') + Exception, re.compile( + r'already a layer with shortname "TestLayerShortName"') ) @@ -69,4 +70,3 @@ def test_describe_layers(): rv1['Layers'].should.equal(rv2['Layers']) rv1['Layers'][0]['Name'].should.equal("TestLayer") - diff --git a/tests/test_opsworks/test_stack.py b/tests/test_opsworks/test_stack.py index 8d86e4207..5913ce6d5 100644 --- a/tests/test_opsworks/test_stack.py +++ b/tests/test_opsworks/test_stack.py @@ -44,5 +44,3 @@ def test_describe_stacks(): client.describe_stacks.when.called_with(StackIds=["foo"]).should.throw( Exception, re.compile(r'foo') ) - - diff --git a/tests/test_rds/test_rds.py b/tests/test_rds/test_rds.py index 7a6cab633..090147d11 100644 --- a/tests/test_rds/test_rds.py +++ b/tests/test_rds/test_rds.py @@ -15,14 +15,15 @@ def test_create_database(): conn = boto.rds.connect_to_region("us-west-2") database = conn.create_dbinstance("db-master-1", 10, 'db.m1.small', 'root', 'hunter2', - security_groups=["my_sg"]) + security_groups=["my_sg"]) database.status.should.equal('available') database.id.should.equal("db-master-1") database.allocated_storage.should.equal(10) database.instance_class.should.equal("db.m1.small") database.master_username.should.equal("root") - database.endpoint.should.equal(('db-master-1.aaaaaaaaaa.us-west-2.rds.amazonaws.com', 3306)) + database.endpoint.should.equal( + ('db-master-1.aaaaaaaaaa.us-west-2.rds.amazonaws.com', 3306)) database.security_groups[0].name.should.equal('my_sg') @@ -47,7 +48,8 @@ def test_get_databases(): @mock_rds_deprecated def test_describe_non_existant_database(): conn = boto.rds.connect_to_region("us-west-2") - conn.get_all_dbinstances.when.called_with("not-a-db").should.throw(BotoServerError) + conn.get_all_dbinstances.when.called_with( + "not-a-db").should.throw(BotoServerError) @disable_on_py3() @@ -66,7 +68,8 @@ def test_delete_database(): @mock_rds_deprecated def test_delete_non_existant_database(): conn = boto.rds.connect_to_region("us-west-2") - conn.delete_dbinstance.when.called_with("not-a-db").should.throw(BotoServerError) + conn.delete_dbinstance.when.called_with( + "not-a-db").should.throw(BotoServerError) @mock_rds_deprecated @@ -99,7 +102,8 @@ def test_get_security_groups(): @mock_rds_deprecated def test_get_non_existant_security_group(): conn = boto.rds.connect_to_region("us-west-2") - conn.get_all_dbsecurity_groups.when.called_with("not-a-sg").should.throw(BotoServerError) + conn.get_all_dbsecurity_groups.when.called_with( + "not-a-sg").should.throw(BotoServerError) @mock_rds_deprecated @@ -116,7 +120,8 @@ def test_delete_database_security_group(): @mock_rds_deprecated def test_delete_non_existant_security_group(): conn = boto.rds.connect_to_region("us-west-2") - conn.delete_dbsecurity_group.when.called_with("not-a-db").should.throw(BotoServerError) + conn.delete_dbsecurity_group.when.called_with( + "not-a-db").should.throw(BotoServerError) @disable_on_py3() @@ -137,7 +142,8 @@ def test_security_group_authorize(): def test_add_security_group_to_database(): conn = boto.rds.connect_to_region("us-west-2") - database = conn.create_dbinstance("db-master-1", 10, 'db.m1.small', 'root', 'hunter2') + database = conn.create_dbinstance( + "db-master-1", 10, 'db.m1.small', 'root', 'hunter2') security_group = conn.create_dbsecurity_group('db_sg', 'DB Security Group') database.modify(security_groups=[security_group]) @@ -157,7 +163,8 @@ def test_add_database_subnet_group(): subnet_ids = [subnet1.id, subnet2.id] conn = boto.rds.connect_to_region("us-west-2") - subnet_group = conn.create_db_subnet_group("db_subnet", "my db subnet", subnet_ids) + subnet_group = conn.create_db_subnet_group( + "db_subnet", "my db subnet", subnet_ids) subnet_group.name.should.equal('db_subnet') subnet_group.description.should.equal("my db subnet") list(subnet_group.subnet_ids).should.equal(subnet_ids) @@ -177,7 +184,8 @@ def test_describe_database_subnet_group(): list(conn.get_all_db_subnet_groups()).should.have.length_of(2) list(conn.get_all_db_subnet_groups("db_subnet1")).should.have.length_of(1) - conn.get_all_db_subnet_groups.when.called_with("not-a-subnet").should.throw(BotoServerError) + conn.get_all_db_subnet_groups.when.called_with( + "not-a-subnet").should.throw(BotoServerError) @mock_ec2_deprecated @@ -194,7 +202,8 @@ def test_delete_database_subnet_group(): conn.delete_db_subnet_group("db_subnet1") list(conn.get_all_db_subnet_groups()).should.have.length_of(0) - conn.delete_db_subnet_group.when.called_with("db_subnet1").should.throw(BotoServerError) + conn.delete_db_subnet_group.when.called_with( + "db_subnet1").should.throw(BotoServerError) @disable_on_py3() @@ -209,7 +218,7 @@ def test_create_database_in_subnet_group(): conn.create_db_subnet_group("db_subnet1", "my db subnet", [subnet.id]) database = conn.create_dbinstance("db-master-1", 10, 'db.m1.small', - 'root', 'hunter2', db_subnet_group_name="db_subnet1") + 'root', 'hunter2', db_subnet_group_name="db_subnet1") database = conn.get_all_dbinstances("db-master-1")[0] database.subnet_group.name.should.equal("db_subnet1") @@ -220,9 +229,11 @@ def test_create_database_in_subnet_group(): def test_create_database_replica(): conn = boto.rds.connect_to_region("us-west-2") - primary = conn.create_dbinstance("db-master-1", 10, 'db.m1.small', 'root', 'hunter2') + primary = conn.create_dbinstance( + "db-master-1", 10, 'db.m1.small', 'root', 'hunter2') - replica = conn.create_dbinstance_read_replica("replica", "db-master-1", "db.m1.small") + replica = conn.create_dbinstance_read_replica( + "replica", "db-master-1", "db.m1.small") replica.id.should.equal("replica") replica.instance_class.should.equal("db.m1.small") status_info = replica.status_infos[0] @@ -238,13 +249,15 @@ def test_create_database_replica(): primary = conn.get_all_dbinstances("db-master-1")[0] list(primary.read_replica_dbinstance_identifiers).should.have.length_of(0) + @disable_on_py3() @mock_rds_deprecated def test_create_cross_region_database_replica(): west_1_conn = boto.rds.connect_to_region("us-west-1") west_2_conn = boto.rds.connect_to_region("us-west-2") - primary = west_1_conn.create_dbinstance("db-master-1", 10, 'db.m1.small', 'root', 'hunter2') + primary = west_1_conn.create_dbinstance( + "db-master-1", 10, 'db.m1.small', 'root', 'hunter2') primary_arn = "arn:aws:rds:us-west-1:1234567890:db:db-master-1" replica = west_2_conn.create_dbinstance_read_replica( @@ -274,14 +287,15 @@ def test_connecting_to_us_east_1(): conn = boto.rds.connect_to_region("us-east-1") database = conn.create_dbinstance("db-master-1", 10, 'db.m1.small', 'root', 'hunter2', - security_groups=["my_sg"]) + security_groups=["my_sg"]) database.status.should.equal('available') database.id.should.equal("db-master-1") database.allocated_storage.should.equal(10) database.instance_class.should.equal("db.m1.small") database.master_username.should.equal("root") - database.endpoint.should.equal(('db-master-1.aaaaaaaaaa.us-east-1.rds.amazonaws.com', 3306)) + database.endpoint.should.equal( + ('db-master-1.aaaaaaaaaa.us-east-1.rds.amazonaws.com', 3306)) database.security_groups[0].name.should.equal('my_sg') @@ -290,7 +304,8 @@ def test_connecting_to_us_east_1(): def test_create_database_with_iops(): conn = boto.rds.connect_to_region("us-west-2") - database = conn.create_dbinstance("db-master-1", 10, 'db.m1.small', 'root', 'hunter2', iops=6000) + database = conn.create_dbinstance( + "db-master-1", 10, 'db.m1.small', 'root', 'hunter2', iops=6000) database.status.should.equal('available') database.iops.should.equal(6000) diff --git a/tests/test_rds2/test_rds2.py b/tests/test_rds2/test_rds2.py index 581209655..731bc75c1 100644 --- a/tests/test_rds2/test_rds2.py +++ b/tests/test_rds2/test_rds2.py @@ -24,7 +24,8 @@ def test_create_database(): database['DBInstance']['AllocatedStorage'].should.equal(10) database['DBInstance']['DBInstanceClass'].should.equal("db.m1.small") database['DBInstance']['MasterUsername'].should.equal("root") - database['DBInstance']['DBSecurityGroups'][0]['DBSecurityGroupName'].should.equal('my_sg') + database['DBInstance']['DBSecurityGroups'][0][ + 'DBSecurityGroupName'].should.equal('my_sg') @disable_on_py3() @@ -56,14 +57,16 @@ def test_get_databases(): instances = conn.describe_db_instances(DBInstanceIdentifier="db-master-1") list(instances['DBInstances']).should.have.length_of(1) - instances['DBInstances'][0]['DBInstanceIdentifier'].should.equal("db-master-1") + instances['DBInstances'][0][ + 'DBInstanceIdentifier'].should.equal("db-master-1") @disable_on_py3() @mock_rds2 def test_describe_non_existant_database(): conn = boto3.client('rds', region_name='us-west-2') - conn.describe_db_instances.when.called_with(DBInstanceIdentifier="not-a-db").should.throw(ClientError) + conn.describe_db_instances.when.called_with( + DBInstanceIdentifier="not-a-db").should.throw(ClientError) @disable_on_py3() @@ -95,6 +98,7 @@ def test_modify_non_existant_database(): AllocatedStorage=20, ApplyImmediately=True).should.throw(ClientError) + @disable_on_py3() @mock_rds2 def test_reboot_db_instance(): @@ -115,7 +119,8 @@ def test_reboot_db_instance(): @mock_rds2 def test_reboot_non_existant_database(): conn = boto3.client('rds', region_name='us-west-2') - conn.reboot_db_instance.when.called_with(DBInstanceIdentifier="not-a-db").should.throw(ClientError) + conn.reboot_db_instance.when.called_with( + DBInstanceIdentifier="not-a-db").should.throw(ClientError) @disable_on_py3() @@ -144,7 +149,8 @@ def test_delete_database(): @mock_rds2 def test_delete_non_existant_database(): conn = boto3.client('rds2', region_name="us-west-2") - conn.delete_db_instance.when.called_with(DBInstanceIdentifier="not-a-db").should.throw(ClientError) + conn.delete_db_instance.when.called_with( + DBInstanceIdentifier="not-a-db").should.throw(ClientError) @disable_on_py3() @@ -157,7 +163,8 @@ def test_create_option_group(): OptionGroupDescription='test option group') option_group['OptionGroup']['OptionGroupName'].should.equal('test') option_group['OptionGroup']['EngineName'].should.equal('mysql') - option_group['OptionGroup']['OptionGroupDescription'].should.equal('test option group') + option_group['OptionGroup'][ + 'OptionGroupDescription'].should.equal('test option group') option_group['OptionGroup']['MajorEngineVersion'].should.equal('5.6') @@ -214,14 +221,16 @@ def test_describe_option_group(): MajorEngineVersion='5.6', OptionGroupDescription='test option group') option_groups = conn.describe_option_groups(OptionGroupName='test') - option_groups['OptionGroupsList'][0]['OptionGroupName'].should.equal('test') + option_groups['OptionGroupsList'][0][ + 'OptionGroupName'].should.equal('test') @disable_on_py3() @mock_rds2 def test_describe_non_existant_option_group(): conn = boto3.client('rds', region_name='us-west-2') - conn.describe_option_groups.when.called_with(OptionGroupName="not-a-option-group").should.throw(ClientError) + conn.describe_option_groups.when.called_with( + OptionGroupName="not-a-option-group").should.throw(ClientError) @disable_on_py3() @@ -233,41 +242,51 @@ def test_delete_option_group(): MajorEngineVersion='5.6', OptionGroupDescription='test option group') option_groups = conn.describe_option_groups(OptionGroupName='test') - option_groups['OptionGroupsList'][0]['OptionGroupName'].should.equal('test') + option_groups['OptionGroupsList'][0][ + 'OptionGroupName'].should.equal('test') conn.delete_option_group(OptionGroupName='test') - conn.describe_option_groups.when.called_with(OptionGroupName='test').should.throw(ClientError) + conn.describe_option_groups.when.called_with( + OptionGroupName='test').should.throw(ClientError) @disable_on_py3() @mock_rds2 def test_delete_non_existant_option_group(): conn = boto3.client('rds', region_name='us-west-2') - conn.delete_option_group.when.called_with(OptionGroupName='non-existant').should.throw(ClientError) + conn.delete_option_group.when.called_with( + OptionGroupName='non-existant').should.throw(ClientError) @disable_on_py3() @mock_rds2 def test_describe_option_group_options(): conn = boto3.client('rds', region_name='us-west-2') - option_group_options = conn.describe_option_group_options(EngineName='sqlserver-ee') + option_group_options = conn.describe_option_group_options( + EngineName='sqlserver-ee') len(option_group_options['OptionGroupOptions']).should.equal(4) - option_group_options = conn.describe_option_group_options(EngineName='sqlserver-ee', MajorEngineVersion='11.00') + option_group_options = conn.describe_option_group_options( + EngineName='sqlserver-ee', MajorEngineVersion='11.00') len(option_group_options['OptionGroupOptions']).should.equal(2) - option_group_options = conn.describe_option_group_options(EngineName='mysql', MajorEngineVersion='5.6') + option_group_options = conn.describe_option_group_options( + EngineName='mysql', MajorEngineVersion='5.6') len(option_group_options['OptionGroupOptions']).should.equal(1) - conn.describe_option_group_options.when.called_with(EngineName='non-existent').should.throw(ClientError) - conn.describe_option_group_options.when.called_with(EngineName='mysql', MajorEngineVersion='non-existent').should.throw(ClientError) + conn.describe_option_group_options.when.called_with( + EngineName='non-existent').should.throw(ClientError) + conn.describe_option_group_options.when.called_with( + EngineName='mysql', MajorEngineVersion='non-existent').should.throw(ClientError) @disable_on_py3() @mock_rds2 def test_modify_option_group(): conn = boto3.client('rds', region_name='us-west-2') - conn.create_option_group(OptionGroupName='test', EngineName='mysql', MajorEngineVersion='5.6', OptionGroupDescription='test option group') + conn.create_option_group(OptionGroupName='test', EngineName='mysql', + MajorEngineVersion='5.6', OptionGroupDescription='test option group') # TODO: create option and validate before deleting. # if Someone can tell me how the hell to use this function # to add options to an option_group, I can finish coding this. - result = conn.modify_option_group(OptionGroupName='test', OptionsToInclude=[], OptionsToRemove=['MEMCACHED'], ApplyImmediately=True) + result = conn.modify_option_group(OptionGroupName='test', OptionsToInclude=[ + ], OptionsToRemove=['MEMCACHED'], ApplyImmediately=True) result['OptionGroup']['EngineName'].should.equal('mysql') result['OptionGroup']['Options'].should.equal([]) result['OptionGroup']['OptionGroupName'].should.equal('test') @@ -277,36 +296,42 @@ def test_modify_option_group(): @mock_rds2 def test_modify_option_group_no_options(): conn = boto3.client('rds', region_name='us-west-2') - conn.create_option_group(OptionGroupName='test', EngineName='mysql', MajorEngineVersion='5.6', OptionGroupDescription='test option group') - conn.modify_option_group.when.called_with(OptionGroupName='test').should.throw(ClientError) + conn.create_option_group(OptionGroupName='test', EngineName='mysql', + MajorEngineVersion='5.6', OptionGroupDescription='test option group') + conn.modify_option_group.when.called_with( + OptionGroupName='test').should.throw(ClientError) @disable_on_py3() @mock_rds2 def test_modify_non_existant_option_group(): conn = boto3.client('rds', region_name='us-west-2') - conn.modify_option_group.when.called_with(OptionGroupName='non-existant', OptionsToInclude=[('OptionName', 'Port', 'DBSecurityGroupMemberships', 'VpcSecurityGroupMemberships', 'OptionSettings')]).should.throw(ParamValidationError) + conn.modify_option_group.when.called_with(OptionGroupName='non-existant', OptionsToInclude=[( + 'OptionName', 'Port', 'DBSecurityGroupMemberships', 'VpcSecurityGroupMemberships', 'OptionSettings')]).should.throw(ParamValidationError) @disable_on_py3() @mock_rds2 def test_delete_non_existant_database(): conn = boto3.client('rds', region_name='us-west-2') - conn.delete_db_instance.when.called_with(DBInstanceIdentifier="not-a-db").should.throw(ClientError) + conn.delete_db_instance.when.called_with( + DBInstanceIdentifier="not-a-db").should.throw(ClientError) @disable_on_py3() @mock_rds2 def test_list_tags_invalid_arn(): conn = boto3.client('rds', region_name='us-west-2') - conn.list_tags_for_resource.when.called_with(ResourceName='arn:aws:rds:bad-arn').should.throw(ClientError) + conn.list_tags_for_resource.when.called_with( + ResourceName='arn:aws:rds:bad-arn').should.throw(ClientError) @disable_on_py3() @mock_rds2 def test_list_tags_db(): conn = boto3.client('rds', region_name='us-west-2') - result = conn.list_tags_for_resource(ResourceName='arn:aws:rds:us-west-2:1234567890:db:foo') + result = conn.list_tags_for_resource( + ResourceName='arn:aws:rds:us-west-2:1234567890:db:foo') result['TagList'].should.equal([]) conn.create_db_instance(DBInstanceIdentifier='db-with-tags', AllocatedStorage=10, @@ -326,11 +351,12 @@ def test_list_tags_db(): 'Value': 'bar1', }, ]) - result = conn.list_tags_for_resource(ResourceName='arn:aws:rds:us-west-2:1234567890:db:db-with-tags') + result = conn.list_tags_for_resource( + ResourceName='arn:aws:rds:us-west-2:1234567890:db:db-with-tags') result['TagList'].should.equal([{'Value': 'bar', 'Key': 'foo'}, - {'Value': 'bar1', - 'Key': 'foo1'}]) + {'Value': 'bar1', + 'Key': 'foo1'}]) @disable_on_py3() @@ -355,7 +381,8 @@ def test_add_tags_db(): 'Value': 'bar1', }, ]) - result = conn.list_tags_for_resource(ResourceName='arn:aws:rds:us-west-2:1234567890:db:db-without-tags') + result = conn.list_tags_for_resource( + ResourceName='arn:aws:rds:us-west-2:1234567890:db:db-without-tags') list(result['TagList']).should.have.length_of(2) conn.add_tags_to_resource(ResourceName='arn:aws:rds:us-west-2:1234567890:db:db-without-tags', Tags=[ @@ -368,7 +395,8 @@ def test_add_tags_db(): 'Value': 'bar2', }, ]) - result = conn.list_tags_for_resource(ResourceName='arn:aws:rds:us-west-2:1234567890:db:db-without-tags') + result = conn.list_tags_for_resource( + ResourceName='arn:aws:rds:us-west-2:1234567890:db:db-without-tags') list(result['TagList']).should.have.length_of(3) @@ -394,10 +422,13 @@ def test_remove_tags_db(): 'Value': 'bar1', }, ]) - result = conn.list_tags_for_resource(ResourceName='arn:aws:rds:us-west-2:1234567890:db:db-with-tags') + result = conn.list_tags_for_resource( + ResourceName='arn:aws:rds:us-west-2:1234567890:db:db-with-tags') list(result['TagList']).should.have.length_of(2) - conn.remove_tags_from_resource(ResourceName='arn:aws:rds:us-west-2:1234567890:db:db-with-tags', TagKeys=['foo']) - result = conn.list_tags_for_resource(ResourceName='arn:aws:rds:us-west-2:1234567890:db:db-with-tags') + conn.remove_tags_from_resource( + ResourceName='arn:aws:rds:us-west-2:1234567890:db:db-with-tags', TagKeys=['foo']) + result = conn.list_tags_for_resource( + ResourceName='arn:aws:rds:us-west-2:1234567890:db:db-with-tags') len(result['TagList']).should.equal(1) @@ -409,7 +440,8 @@ def test_add_tags_option_group(): EngineName='mysql', MajorEngineVersion='5.6', OptionGroupDescription='test option group') - result = conn.list_tags_for_resource(ResourceName='arn:aws:rds:us-west-2:1234567890:og:test') + result = conn.list_tags_for_resource( + ResourceName='arn:aws:rds:us-west-2:1234567890:og:test') list(result['TagList']).should.have.length_of(0) conn.add_tags_to_resource(ResourceName='arn:aws:rds:us-west-2:1234567890:og:test', Tags=[ @@ -421,7 +453,8 @@ def test_add_tags_option_group(): 'Key': 'foo2', 'Value': 'bar2', }]) - result = conn.list_tags_for_resource(ResourceName='arn:aws:rds:us-west-2:1234567890:og:test') + result = conn.list_tags_for_resource( + ResourceName='arn:aws:rds:us-west-2:1234567890:og:test') list(result['TagList']).should.have.length_of(2) @@ -433,7 +466,8 @@ def test_remove_tags_option_group(): EngineName='mysql', MajorEngineVersion='5.6', OptionGroupDescription='test option group') - result = conn.list_tags_for_resource(ResourceName='arn:aws:rds:us-west-2:1234567890:og:test') + result = conn.list_tags_for_resource( + ResourceName='arn:aws:rds:us-west-2:1234567890:og:test') conn.add_tags_to_resource(ResourceName='arn:aws:rds:us-west-2:1234567890:og:test', Tags=[ { @@ -444,11 +478,13 @@ def test_remove_tags_option_group(): 'Key': 'foo2', 'Value': 'bar2', }]) - result = conn.list_tags_for_resource(ResourceName='arn:aws:rds:us-west-2:1234567890:og:test') + result = conn.list_tags_for_resource( + ResourceName='arn:aws:rds:us-west-2:1234567890:og:test') list(result['TagList']).should.have.length_of(2) conn.remove_tags_from_resource(ResourceName='arn:aws:rds:us-west-2:1234567890:og:test', TagKeys=['foo']) - result = conn.list_tags_for_resource(ResourceName='arn:aws:rds:us-west-2:1234567890:og:test') + result = conn.list_tags_for_resource( + ResourceName='arn:aws:rds:us-west-2:1234567890:og:test') list(result['TagList']).should.have.length_of(1) @@ -457,9 +493,11 @@ def test_remove_tags_option_group(): def test_create_database_security_group(): conn = boto3.client('rds', region_name='us-west-2') - result = conn.create_db_security_group(DBSecurityGroupName='db_sg', DBSecurityGroupDescription='DB Security Group') + result = conn.create_db_security_group( + DBSecurityGroupName='db_sg', DBSecurityGroupDescription='DB Security Group') result['DBSecurityGroup']['DBSecurityGroupName'].should.equal("db_sg") - result['DBSecurityGroup']['DBSecurityGroupDescription'].should.equal("DB Security Group") + result['DBSecurityGroup'][ + 'DBSecurityGroupDescription'].should.equal("DB Security Group") result['DBSecurityGroup']['IPRanges'].should.equal([]) @@ -471,8 +509,10 @@ def test_get_security_groups(): result = conn.describe_db_security_groups() result['DBSecurityGroups'].should.have.length_of(0) - conn.create_db_security_group(DBSecurityGroupName='db_sg1', DBSecurityGroupDescription='DB Security Group') - conn.create_db_security_group(DBSecurityGroupName='db_sg2', DBSecurityGroupDescription='DB Security Group') + conn.create_db_security_group( + DBSecurityGroupName='db_sg1', DBSecurityGroupDescription='DB Security Group') + conn.create_db_security_group( + DBSecurityGroupName='db_sg2', DBSecurityGroupDescription='DB Security Group') result = conn.describe_db_security_groups() result['DBSecurityGroups'].should.have.length_of(2) @@ -486,14 +526,16 @@ def test_get_security_groups(): @mock_rds2 def test_get_non_existant_security_group(): conn = boto3.client('rds', region_name='us-west-2') - conn.describe_db_security_groups.when.called_with(DBSecurityGroupName="not-a-sg").should.throw(ClientError) + conn.describe_db_security_groups.when.called_with( + DBSecurityGroupName="not-a-sg").should.throw(ClientError) @disable_on_py3() @mock_rds2 def test_delete_database_security_group(): conn = boto3.client('rds', region_name='us-west-2') - conn.create_db_security_group(DBSecurityGroupName='db_sg', DBSecurityGroupDescription='DB Security Group') + conn.create_db_security_group( + DBSecurityGroupName='db_sg', DBSecurityGroupDescription='DB Security Group') result = conn.describe_db_security_groups() result['DBSecurityGroups'].should.have.length_of(1) @@ -507,7 +549,8 @@ def test_delete_database_security_group(): @mock_rds2 def test_delete_non_existant_security_group(): conn = boto3.client('rds', region_name='us-west-2') - conn.delete_db_security_group.when.called_with(DBSecurityGroupName="not-a-db").should.throw(ClientError) + conn.delete_db_security_group.when.called_with( + DBSecurityGroupName="not-a-db").should.throw(ClientError) @disable_on_py3() @@ -518,13 +561,13 @@ def test_security_group_authorize(): DBSecurityGroupDescription='DB Security Group') security_group['DBSecurityGroup']['IPRanges'].should.equal([]) - conn.authorize_db_security_group_ingress(DBSecurityGroupName='db_sg', CIDRIP='10.3.2.45/32') result = conn.describe_db_security_groups(DBSecurityGroupName="db_sg") result['DBSecurityGroups'][0]['IPRanges'].should.have.length_of(1) - result['DBSecurityGroups'][0]['IPRanges'].should.equal([{'Status': 'authorized', 'CIDRIP': '10.3.2.45/32'}]) + result['DBSecurityGroups'][0]['IPRanges'].should.equal( + [{'Status': 'authorized', 'CIDRIP': '10.3.2.45/32'}]) conn.authorize_db_security_group_ingress(DBSecurityGroupName='db_sg', CIDRIP='10.3.2.46/32') @@ -554,9 +597,10 @@ def test_add_security_group_to_database(): conn.create_db_security_group(DBSecurityGroupName='db_sg', DBSecurityGroupDescription='DB Security Group') conn.modify_db_instance(DBInstanceIdentifier='db-master-1', - DBSecurityGroups=['db_sg']) + DBSecurityGroups=['db_sg']) result = conn.describe_db_instances() - result['DBInstances'][0]['DBSecurityGroups'][0]['DBSecurityGroupName'].should.equal('db_sg') + result['DBInstances'][0]['DBSecurityGroups'][0][ + 'DBSecurityGroupName'].should.equal('db_sg') @disable_on_py3() @@ -572,12 +616,13 @@ def test_list_tags_security_group(): 'Key': 'foo'}, {'Value': 'bar1', 'Key': 'foo1'}])['DBSecurityGroup']['DBSecurityGroupName'] - resource = 'arn:aws:rds:us-west-2:1234567890:secgrp:{0}'.format(security_group) + resource = 'arn:aws:rds:us-west-2:1234567890:secgrp:{0}'.format( + security_group) result = conn.list_tags_for_resource(ResourceName=resource) result['TagList'].should.equal([{'Value': 'bar', 'Key': 'foo'}, - {'Value': 'bar1', - 'Key': 'foo1'}]) + {'Value': 'bar1', + 'Key': 'foo1'}]) @disable_on_py3() @@ -590,7 +635,8 @@ def test_add_tags_security_group(): security_group = conn.create_db_security_group(DBSecurityGroupName="db_sg", DBSecurityGroupDescription='DB Security Group')['DBSecurityGroup']['DBSecurityGroupName'] - resource = 'arn:aws:rds:us-west-2:1234567890:secgrp:{0}'.format(security_group) + resource = 'arn:aws:rds:us-west-2:1234567890:secgrp:{0}'.format( + security_group) conn.add_tags_to_resource(ResourceName=resource, Tags=[{'Value': 'bar', 'Key': 'foo'}, @@ -600,8 +646,9 @@ def test_add_tags_security_group(): result = conn.list_tags_for_resource(ResourceName=resource) result['TagList'].should.equal([{'Value': 'bar', 'Key': 'foo'}, - {'Value': 'bar1', - 'Key': 'foo1'}]) + {'Value': 'bar1', + 'Key': 'foo1'}]) + @disable_on_py3() @mock_rds2 @@ -617,7 +664,8 @@ def test_remove_tags_security_group(): {'Value': 'bar1', 'Key': 'foo1'}])['DBSecurityGroup']['DBSecurityGroupName'] - resource = 'arn:aws:rds:us-west-2:1234567890:secgrp:{0}'.format(security_group) + resource = 'arn:aws:rds:us-west-2:1234567890:secgrp:{0}'.format( + security_group) conn.remove_tags_from_resource(ResourceName=resource, TagKeys=['foo']) result = conn.list_tags_for_resource(ResourceName=resource) @@ -630,8 +678,10 @@ def test_remove_tags_security_group(): def test_create_database_subnet_group(): vpc_conn = boto3.client('ec2', 'us-west-2') vpc = vpc_conn.create_vpc(CidrBlock='10.0.0.0/16')['Vpc'] - subnet1 = vpc_conn.create_subnet(VpcId=vpc['VpcId'], CidrBlock='10.1.0.0/24')['Subnet'] - subnet2 = vpc_conn.create_subnet(VpcId=vpc['VpcId'], CidrBlock='10.1.0.0/26')['Subnet'] + subnet1 = vpc_conn.create_subnet( + VpcId=vpc['VpcId'], CidrBlock='10.1.0.0/24')['Subnet'] + subnet2 = vpc_conn.create_subnet( + VpcId=vpc['VpcId'], CidrBlock='10.1.0.0/26')['Subnet'] subnet_ids = [subnet1['SubnetId'], subnet2['SubnetId']] conn = boto3.client('rds', region_name='us-west-2') @@ -639,9 +689,11 @@ def test_create_database_subnet_group(): DBSubnetGroupDescription='my db subnet', SubnetIds=subnet_ids) result['DBSubnetGroup']['DBSubnetGroupName'].should.equal("db_subnet") - result['DBSubnetGroup']['DBSubnetGroupDescription'].should.equal("my db subnet") + result['DBSubnetGroup'][ + 'DBSubnetGroupDescription'].should.equal("my db subnet") subnets = result['DBSubnetGroup']['Subnets'] - subnet_group_ids = [subnets[0]['SubnetIdentifier'], subnets[1]['SubnetIdentifier']] + subnet_group_ids = [subnets[0]['SubnetIdentifier'], + subnets[1]['SubnetIdentifier']] list(subnet_group_ids).should.equal(subnet_ids) @@ -651,7 +703,8 @@ def test_create_database_subnet_group(): def test_create_database_in_subnet_group(): vpc_conn = boto3.client('ec2', 'us-west-2') vpc = vpc_conn.create_vpc(CidrBlock='10.0.0.0/16')['Vpc'] - subnet = vpc_conn.create_subnet(VpcId=vpc['VpcId'], CidrBlock='10.1.0.0/24')['Subnet'] + subnet = vpc_conn.create_subnet( + VpcId=vpc['VpcId'], CidrBlock='10.1.0.0/24')['Subnet'] conn = boto3.client('rds', region_name='us-west-2') conn.create_db_subnet_group(DBSubnetGroupName='db_subnet1', @@ -666,7 +719,8 @@ def test_create_database_in_subnet_group(): Port=1234, DBSubnetGroupName='db_subnet1') result = conn.describe_db_instances(DBInstanceIdentifier='db-master-1') - result['DBInstances'][0]['DBSubnetGroup']['DBSubnetGroupName'].should.equal('db_subnet1') + result['DBInstances'][0]['DBSubnetGroup'][ + 'DBSubnetGroupName'].should.equal('db_subnet1') @disable_on_py3() @@ -675,7 +729,8 @@ def test_create_database_in_subnet_group(): def test_describe_database_subnet_group(): vpc_conn = boto3.client('ec2', 'us-west-2') vpc = vpc_conn.create_vpc(CidrBlock='10.0.0.0/16')['Vpc'] - subnet = vpc_conn.create_subnet(VpcId=vpc['VpcId'], CidrBlock='10.1.0.0/24')['Subnet'] + subnet = vpc_conn.create_subnet( + VpcId=vpc['VpcId'], CidrBlock='10.1.0.0/24')['Subnet'] conn = boto3.client('rds', region_name='us-west-2') conn.create_db_subnet_group(DBSubnetGroupName="db_subnet1", @@ -691,9 +746,11 @@ def test_describe_database_subnet_group(): subnets = resp['DBSubnetGroups'][0]['Subnets'] subnets.should.have.length_of(1) - list(conn.describe_db_subnet_groups(DBSubnetGroupName="db_subnet1")['DBSubnetGroups']).should.have.length_of(1) + list(conn.describe_db_subnet_groups(DBSubnetGroupName="db_subnet1") + ['DBSubnetGroups']).should.have.length_of(1) - conn.describe_db_subnet_groups.when.called_with(DBSubnetGroupName="not-a-subnet").should.throw(ClientError) + conn.describe_db_subnet_groups.when.called_with( + DBSubnetGroupName="not-a-subnet").should.throw(ClientError) @disable_on_py3() @@ -702,7 +759,8 @@ def test_describe_database_subnet_group(): def test_delete_database_subnet_group(): vpc_conn = boto3.client('ec2', 'us-west-2') vpc = vpc_conn.create_vpc(CidrBlock='10.0.0.0/16')['Vpc'] - subnet = vpc_conn.create_subnet(VpcId=vpc['VpcId'], CidrBlock='10.1.0.0/24')['Subnet'] + subnet = vpc_conn.create_subnet( + VpcId=vpc['VpcId'], CidrBlock='10.1.0.0/24')['Subnet'] conn = boto3.client('rds', region_name='us-west-2') result = conn.describe_db_subnet_groups() @@ -718,7 +776,8 @@ def test_delete_database_subnet_group(): result = conn.describe_db_subnet_groups() result['DBSubnetGroups'].should.have.length_of(0) - conn.delete_db_subnet_group.when.called_with(DBSubnetGroupName="db_subnet1").should.throw(ClientError) + conn.delete_db_subnet_group.when.called_with( + DBSubnetGroupName="db_subnet1").should.throw(ClientError) @disable_on_py3() @@ -727,7 +786,8 @@ def test_delete_database_subnet_group(): def test_list_tags_database_subnet_group(): vpc_conn = boto3.client('ec2', 'us-west-2') vpc = vpc_conn.create_vpc(CidrBlock='10.0.0.0/16')['Vpc'] - subnet = vpc_conn.create_subnet(VpcId=vpc['VpcId'], CidrBlock='10.1.0.0/24')['Subnet'] + subnet = vpc_conn.create_subnet( + VpcId=vpc['VpcId'], CidrBlock='10.1.0.0/24')['Subnet'] conn = boto3.client('rds', region_name='us-west-2') result = conn.describe_db_subnet_groups() @@ -740,11 +800,13 @@ def test_list_tags_database_subnet_group(): 'Key': 'foo'}, {'Value': 'bar1', 'Key': 'foo1'}])['DBSubnetGroup']['DBSubnetGroupName'] - result = conn.list_tags_for_resource(ResourceName='arn:aws:rds:us-west-2:1234567890:subgrp:{0}'.format(subnet)) + result = conn.list_tags_for_resource( + ResourceName='arn:aws:rds:us-west-2:1234567890:subgrp:{0}'.format(subnet)) result['TagList'].should.equal([{'Value': 'bar', 'Key': 'foo'}, - {'Value': 'bar1', - 'Key': 'foo1'}]) + {'Value': 'bar1', + 'Key': 'foo1'}]) + @disable_on_py3() @mock_ec2 @@ -752,7 +814,8 @@ def test_list_tags_database_subnet_group(): def test_add_tags_database_subnet_group(): vpc_conn = boto3.client('ec2', 'us-west-2') vpc = vpc_conn.create_vpc(CidrBlock='10.0.0.0/16')['Vpc'] - subnet = vpc_conn.create_subnet(VpcId=vpc['VpcId'], CidrBlock='10.1.0.0/24')['Subnet'] + subnet = vpc_conn.create_subnet( + VpcId=vpc['VpcId'], CidrBlock='10.1.0.0/24')['Subnet'] conn = boto3.client('rds', region_name='us-west-2') result = conn.describe_db_subnet_groups() @@ -773,8 +836,9 @@ def test_add_tags_database_subnet_group(): result = conn.list_tags_for_resource(ResourceName=resource) result['TagList'].should.equal([{'Value': 'bar', 'Key': 'foo'}, - {'Value': 'bar1', - 'Key': 'foo1'}]) + {'Value': 'bar1', + 'Key': 'foo1'}]) + @disable_on_py3() @mock_ec2 @@ -782,7 +846,8 @@ def test_add_tags_database_subnet_group(): def test_remove_tags_database_subnet_group(): vpc_conn = boto3.client('ec2', 'us-west-2') vpc = vpc_conn.create_vpc(CidrBlock='10.0.0.0/16')['Vpc'] - subnet = vpc_conn.create_subnet(VpcId=vpc['VpcId'], CidrBlock='10.1.0.0/24')['Subnet'] + subnet = vpc_conn.create_subnet( + VpcId=vpc['VpcId'], CidrBlock='10.1.0.0/24')['Subnet'] conn = boto3.client('rds', region_name='us-west-2') result = conn.describe_db_subnet_groups() @@ -820,17 +885,22 @@ def test_create_database_replica(): replica = conn.create_db_instance_read_replica(DBInstanceIdentifier="db-replica-1", SourceDBInstanceIdentifier="db-master-1", DBInstanceClass="db.m1.small") - replica['DBInstance']['ReadReplicaSourceDBInstanceIdentifier'].should.equal('db-master-1') + replica['DBInstance'][ + 'ReadReplicaSourceDBInstanceIdentifier'].should.equal('db-master-1') replica['DBInstance']['DBInstanceClass'].should.equal('db.m1.small') replica['DBInstance']['DBInstanceIdentifier'].should.equal('db-replica-1') master = conn.describe_db_instances(DBInstanceIdentifier="db-master-1") - master['DBInstances'][0]['ReadReplicaDBInstanceIdentifiers'].should.equal(['db-replica-1']) + master['DBInstances'][0]['ReadReplicaDBInstanceIdentifiers'].should.equal([ + 'db-replica-1']) - conn.delete_db_instance(DBInstanceIdentifier="db-replica-1", SkipFinalSnapshot=True) + conn.delete_db_instance( + DBInstanceIdentifier="db-replica-1", SkipFinalSnapshot=True) master = conn.describe_db_instances(DBInstanceIdentifier="db-master-1") - master['DBInstances'][0]['ReadReplicaDBInstanceIdentifiers'].should.equal([]) + master['DBInstances'][0][ + 'ReadReplicaDBInstanceIdentifiers'].should.equal([]) + @disable_on_py3() @mock_rds2 @@ -854,19 +924,25 @@ def test_create_database_with_encrypted_storage(): KmsKeyId=key['KeyMetadata']['KeyId']) database['DBInstance']['StorageEncrypted'].should.equal(True) - database['DBInstance']['KmsKeyId'].should.equal(key['KeyMetadata']['KeyId']) + database['DBInstance']['KmsKeyId'].should.equal( + key['KeyMetadata']['KeyId']) + @disable_on_py3() @mock_rds2 def test_create_db_parameter_group(): conn = boto3.client('rds', region_name='us-west-2') db_parameter_group = conn.create_db_parameter_group(DBParameterGroupName='test', - DBParameterGroupFamily='mysql5.6', - Description='test parameter group') + DBParameterGroupFamily='mysql5.6', + Description='test parameter group') + + db_parameter_group['DBParameterGroup'][ + 'DBParameterGroupName'].should.equal('test') + db_parameter_group['DBParameterGroup'][ + 'DBParameterGroupFamily'].should.equal('mysql5.6') + db_parameter_group['DBParameterGroup'][ + 'Description'].should.equal('test parameter group') - db_parameter_group['DBParameterGroup']['DBParameterGroupName'].should.equal('test') - db_parameter_group['DBParameterGroup']['DBParameterGroupFamily'].should.equal('mysql5.6') - db_parameter_group['DBParameterGroup']['Description'].should.equal('test parameter group') @disable_on_py3() @mock_rds2 @@ -886,8 +962,11 @@ def test_create_db_instance_with_parameter_group(): Port=1234) len(database['DBInstance']['DBParameterGroups']).should.equal(1) - database['DBInstance']['DBParameterGroups'][0]['DBParameterGroupName'].should.equal('test') - database['DBInstance']['DBParameterGroups'][0]['ParameterApplyStatus'].should.equal('in-sync') + database['DBInstance']['DBParameterGroups'][0][ + 'DBParameterGroupName'].should.equal('test') + database['DBInstance']['DBParameterGroups'][0][ + 'ParameterApplyStatus'].should.equal('in-sync') + @disable_on_py3() @mock_rds2 @@ -902,8 +981,10 @@ def test_modify_db_instance_with_parameter_group(): Port=1234) len(database['DBInstance']['DBParameterGroups']).should.equal(1) - database['DBInstance']['DBParameterGroups'][0]['DBParameterGroupName'].should.equal('default.mysql5.6') - database['DBInstance']['DBParameterGroups'][0]['ParameterApplyStatus'].should.equal('in-sync') + database['DBInstance']['DBParameterGroups'][0][ + 'DBParameterGroupName'].should.equal('default.mysql5.6') + database['DBInstance']['DBParameterGroups'][0][ + 'ParameterApplyStatus'].should.equal('in-sync') db_parameter_group = conn.create_db_parameter_group(DBParameterGroupName='test', DBParameterGroupFamily='mysql5.6', @@ -912,10 +993,13 @@ def test_modify_db_instance_with_parameter_group(): DBParameterGroupName='test', ApplyImmediately=True) - database = conn.describe_db_instances(DBInstanceIdentifier='db-master-1')['DBInstances'][0] + database = conn.describe_db_instances( + DBInstanceIdentifier='db-master-1')['DBInstances'][0] len(database['DBParameterGroups']).should.equal(1) - database['DBParameterGroups'][0]['DBParameterGroupName'].should.equal('test') - database['DBParameterGroups'][0]['ParameterApplyStatus'].should.equal('in-sync') + database['DBParameterGroups'][0][ + 'DBParameterGroupName'].should.equal('test') + database['DBParameterGroups'][0][ + 'ParameterApplyStatus'].should.equal('in-sync') @disable_on_py3() @@ -946,15 +1030,18 @@ def test_describe_db_parameter_group(): conn.create_db_parameter_group(DBParameterGroupName='test', DBParameterGroupFamily='mysql5.6', Description='test parameter group') - db_parameter_groups = conn.describe_db_parameter_groups(DBParameterGroupName='test') - db_parameter_groups['DBParameterGroups'][0]['DBParameterGroupName'].should.equal('test') + db_parameter_groups = conn.describe_db_parameter_groups( + DBParameterGroupName='test') + db_parameter_groups['DBParameterGroups'][0][ + 'DBParameterGroupName'].should.equal('test') @disable_on_py3() @mock_rds2 def test_describe_non_existant_db_parameter_group(): conn = boto3.client('rds', region_name='us-west-2') - db_parameter_groups = conn.describe_db_parameter_groups(DBParameterGroupName='test') + db_parameter_groups = conn.describe_db_parameter_groups( + DBParameterGroupName='test') len(db_parameter_groups['DBParameterGroups']).should.equal(0) @@ -963,14 +1050,18 @@ def test_describe_non_existant_db_parameter_group(): def test_delete_db_parameter_group(): conn = boto3.client('rds', region_name='us-west-2') conn.create_db_parameter_group(DBParameterGroupName='test', - DBParameterGroupFamily='mysql5.6', - Description='test parameter group') - db_parameter_groups = conn.describe_db_parameter_groups(DBParameterGroupName='test') - db_parameter_groups['DBParameterGroups'][0]['DBParameterGroupName'].should.equal('test') + DBParameterGroupFamily='mysql5.6', + Description='test parameter group') + db_parameter_groups = conn.describe_db_parameter_groups( + DBParameterGroupName='test') + db_parameter_groups['DBParameterGroups'][0][ + 'DBParameterGroupName'].should.equal('test') conn.delete_db_parameter_group(DBParameterGroupName='test') - db_parameter_groups = conn.describe_db_parameter_groups(DBParameterGroupName='test') + db_parameter_groups = conn.describe_db_parameter_groups( + DBParameterGroupName='test') len(db_parameter_groups['DBParameterGroups']).should.equal(0) + @disable_on_py3() @mock_rds2 def test_modify_db_parameter_group(): @@ -986,7 +1077,7 @@ def test_modify_db_parameter_group(): 'Description': 'test param', 'ApplyMethod': 'immediate' }] - ) + ) modify_result['DBParameterGroupName'].should.equal('test') @@ -1001,7 +1092,9 @@ def test_modify_db_parameter_group(): @mock_rds2 def test_delete_non_existant_db_parameter_group(): conn = boto3.client('rds', region_name='us-west-2') - conn.delete_db_parameter_group.when.called_with(DBParameterGroupName='non-existant').should.throw(ClientError) + conn.delete_db_parameter_group.when.called_with( + DBParameterGroupName='non-existant').should.throw(ClientError) + @disable_on_py3() @mock_rds2 @@ -1011,8 +1104,9 @@ def test_create_parameter_group_with_tags(): DBParameterGroupFamily='mysql5.6', Description='test parameter group', Tags=[{ - 'Key': 'foo', - 'Value': 'bar', + 'Key': 'foo', + 'Value': 'bar', }]) - result = conn.list_tags_for_resource(ResourceName='arn:aws:rds:us-west-2:1234567890:pg:test') + result = conn.list_tags_for_resource( + ResourceName='arn:aws:rds:us-west-2:1234567890:pg:test') result['TagList'].should.equal([{'Value': 'bar', 'Key': 'foo'}]) diff --git a/tests/test_rds2/test_server.py b/tests/test_rds2/test_server.py index 19c2b6e9f..f9489e054 100644 --- a/tests/test_rds2/test_server.py +++ b/tests/test_rds2/test_server.py @@ -11,7 +11,7 @@ Test the different server responses #@mock_rds2 -#def test_list_databases(): +# def test_list_databases(): # backend = server.create_backend_app("rds2") # test_client = backend.test_client() # diff --git a/tests/test_redshift/test_redshift.py b/tests/test_redshift/test_redshift.py index 13acf6d7c..41be8f022 100644 --- a/tests/test_redshift/test_redshift.py +++ b/tests/test_redshift/test_redshift.py @@ -35,18 +35,21 @@ def test_create_cluster(): ) cluster_response = conn.describe_clusters(cluster_identifier) - cluster = cluster_response['DescribeClustersResponse']['DescribeClustersResult']['Clusters'][0] + cluster = cluster_response['DescribeClustersResponse'][ + 'DescribeClustersResult']['Clusters'][0] cluster['ClusterIdentifier'].should.equal(cluster_identifier) cluster['NodeType'].should.equal("dw.hs1.xlarge") cluster['MasterUsername'].should.equal("username") cluster['DBName'].should.equal("my_db") - cluster['ClusterSecurityGroups'][0]['ClusterSecurityGroupName'].should.equal("Default") + cluster['ClusterSecurityGroups'][0][ + 'ClusterSecurityGroupName'].should.equal("Default") cluster['VpcSecurityGroups'].should.equal([]) cluster['ClusterSubnetGroupName'].should.equal(None) cluster['AvailabilityZone'].should.equal("us-east-1d") cluster['PreferredMaintenanceWindow'].should.equal("Mon:03:00-Mon:11:00") - cluster['ClusterParameterGroups'][0]['ParameterGroupName'].should.equal("default.redshift-1.0") + cluster['ClusterParameterGroups'][0][ + 'ParameterGroupName'].should.equal("default.redshift-1.0") cluster['AutomatedSnapshotRetentionPeriod'].should.equal(10) cluster['Port'].should.equal(1234) cluster['ClusterVersion'].should.equal("1.0") @@ -69,7 +72,8 @@ def test_create_single_node_cluster(): ) cluster_response = conn.describe_clusters(cluster_identifier) - cluster = cluster_response['DescribeClustersResponse']['DescribeClustersResult']['Clusters'][0] + cluster = cluster_response['DescribeClustersResponse'][ + 'DescribeClustersResult']['Clusters'][0] cluster['ClusterIdentifier'].should.equal(cluster_identifier) cluster['NodeType'].should.equal("dw.hs1.xlarge") @@ -91,13 +95,15 @@ def test_default_cluster_attibutes(): ) cluster_response = conn.describe_clusters(cluster_identifier) - cluster = cluster_response['DescribeClustersResponse']['DescribeClustersResult']['Clusters'][0] + cluster = cluster_response['DescribeClustersResponse'][ + 'DescribeClustersResult']['Clusters'][0] cluster['DBName'].should.equal("dev") cluster['ClusterSubnetGroupName'].should.equal(None) assert "us-east-" in cluster['AvailabilityZone'] cluster['PreferredMaintenanceWindow'].should.equal("Mon:03:00-Mon:03:30") - cluster['ClusterParameterGroups'][0]['ParameterGroupName'].should.equal("default.redshift-1.0") + cluster['ClusterParameterGroups'][0][ + 'ParameterGroupName'].should.equal("default.redshift-1.0") cluster['AutomatedSnapshotRetentionPeriod'].should.equal(1) cluster['Port'].should.equal(5439) cluster['ClusterVersion'].should.equal("1.0") @@ -127,7 +133,8 @@ def test_create_cluster_in_subnet_group(): ) cluster_response = redshift_conn.describe_clusters("my_cluster") - cluster = cluster_response['DescribeClustersResponse']['DescribeClustersResult']['Clusters'][0] + cluster = cluster_response['DescribeClustersResponse'][ + 'DescribeClustersResult']['Clusters'][0] cluster['ClusterSubnetGroupName'].should.equal('my_subnet_group') @@ -153,8 +160,10 @@ def test_create_cluster_with_security_group(): ) cluster_response = conn.describe_clusters(cluster_identifier) - cluster = cluster_response['DescribeClustersResponse']['DescribeClustersResult']['Clusters'][0] - group_names = [group['ClusterSecurityGroupName'] for group in cluster['ClusterSecurityGroups']] + cluster = cluster_response['DescribeClustersResponse'][ + 'DescribeClustersResult']['Clusters'][0] + group_names = [group['ClusterSecurityGroupName'] + for group in cluster['ClusterSecurityGroups']] set(group_names).should.equal(set(["security_group1", "security_group2"])) @@ -165,7 +174,8 @@ def test_create_cluster_with_vpc_security_groups(): ec2_conn = boto.connect_ec2() redshift_conn = boto.connect_redshift() vpc = vpc_conn.create_vpc("10.0.0.0/16") - security_group = ec2_conn.create_security_group("vpc_security_group", "a group", vpc_id=vpc.id) + security_group = ec2_conn.create_security_group( + "vpc_security_group", "a group", vpc_id=vpc.id) redshift_conn.create_cluster( "my_cluster", @@ -176,8 +186,10 @@ def test_create_cluster_with_vpc_security_groups(): ) cluster_response = redshift_conn.describe_clusters("my_cluster") - cluster = cluster_response['DescribeClustersResponse']['DescribeClustersResult']['Clusters'][0] - group_ids = [group['VpcSecurityGroupId'] for group in cluster['VpcSecurityGroups']] + cluster = cluster_response['DescribeClustersResponse'][ + 'DescribeClustersResult']['Clusters'][0] + group_ids = [group['VpcSecurityGroupId'] + for group in cluster['VpcSecurityGroups']] list(group_ids).should.equal([security_group.id]) @@ -199,14 +211,17 @@ def test_create_cluster_with_parameter_group(): ) cluster_response = conn.describe_clusters("my_cluster") - cluster = cluster_response['DescribeClustersResponse']['DescribeClustersResult']['Clusters'][0] - cluster['ClusterParameterGroups'][0]['ParameterGroupName'].should.equal("my_parameter_group") + cluster = cluster_response['DescribeClustersResponse'][ + 'DescribeClustersResult']['Clusters'][0] + cluster['ClusterParameterGroups'][0][ + 'ParameterGroupName'].should.equal("my_parameter_group") @mock_redshift_deprecated def test_describe_non_existant_cluster(): conn = boto.redshift.connect_to_region("us-east-1") - conn.describe_clusters.when.called_with("not-a-cluster").should.throw(ClusterNotFound) + conn.describe_clusters.when.called_with( + "not-a-cluster").should.throw(ClusterNotFound) @mock_redshift_deprecated @@ -221,16 +236,19 @@ def test_delete_cluster(): master_user_password="password", ) - clusters = conn.describe_clusters()['DescribeClustersResponse']['DescribeClustersResult']['Clusters'] + clusters = conn.describe_clusters()['DescribeClustersResponse'][ + 'DescribeClustersResult']['Clusters'] list(clusters).should.have.length_of(1) conn.delete_cluster(cluster_identifier) - clusters = conn.describe_clusters()['DescribeClustersResponse']['DescribeClustersResult']['Clusters'] + clusters = conn.describe_clusters()['DescribeClustersResponse'][ + 'DescribeClustersResult']['Clusters'] list(clusters).should.have.length_of(0) # Delete invalid id - conn.delete_cluster.when.called_with("not-a-cluster").should.throw(ClusterNotFound) + conn.delete_cluster.when.called_with( + "not-a-cluster").should.throw(ClusterNotFound) @mock_redshift_deprecated @@ -269,13 +287,16 @@ def test_modify_cluster(): ) cluster_response = conn.describe_clusters("new_identifier") - cluster = cluster_response['DescribeClustersResponse']['DescribeClustersResult']['Clusters'][0] + cluster = cluster_response['DescribeClustersResponse'][ + 'DescribeClustersResult']['Clusters'][0] cluster['ClusterIdentifier'].should.equal("new_identifier") cluster['NodeType'].should.equal("dw.hs1.xlarge") - cluster['ClusterSecurityGroups'][0]['ClusterSecurityGroupName'].should.equal("security_group") + cluster['ClusterSecurityGroups'][0][ + 'ClusterSecurityGroupName'].should.equal("security_group") cluster['PreferredMaintenanceWindow'].should.equal("Tue:03:00-Tue:11:00") - cluster['ClusterParameterGroups'][0]['ParameterGroupName'].should.equal("my_parameter_group") + cluster['ClusterParameterGroups'][0][ + 'ParameterGroupName'].should.equal("my_parameter_group") cluster['AutomatedSnapshotRetentionPeriod'].should.equal(7) cluster['AllowVersionUpgrade'].should.equal(False) cluster['NumberOfNodes'].should.equal(2) @@ -297,12 +318,15 @@ def test_create_cluster_subnet_group(): subnet_ids=[subnet1.id, subnet2.id], ) - subnets_response = redshift_conn.describe_cluster_subnet_groups("my_subnet") - my_subnet = subnets_response['DescribeClusterSubnetGroupsResponse']['DescribeClusterSubnetGroupsResult']['ClusterSubnetGroups'][0] + subnets_response = redshift_conn.describe_cluster_subnet_groups( + "my_subnet") + my_subnet = subnets_response['DescribeClusterSubnetGroupsResponse'][ + 'DescribeClusterSubnetGroupsResult']['ClusterSubnetGroups'][0] my_subnet['ClusterSubnetGroupName'].should.equal("my_subnet") my_subnet['Description'].should.equal("This is my subnet group") - subnet_ids = [subnet['SubnetIdentifier'] for subnet in my_subnet['Subnets']] + subnet_ids = [subnet['SubnetIdentifier'] + for subnet in my_subnet['Subnets']] set(subnet_ids).should.equal(set([subnet1.id, subnet2.id])) @@ -320,7 +344,8 @@ def test_create_invalid_cluster_subnet_group(): @mock_redshift_deprecated def test_describe_non_existant_subnet_group(): conn = boto.redshift.connect_to_region("us-east-1") - conn.describe_cluster_subnet_groups.when.called_with("not-a-subnet-group").should.throw(ClusterSubnetGroupNotFound) + conn.describe_cluster_subnet_groups.when.called_with( + "not-a-subnet-group").should.throw(ClusterSubnetGroupNotFound) @mock_redshift_deprecated @@ -338,17 +363,20 @@ def test_delete_cluster_subnet_group(): ) subnets_response = redshift_conn.describe_cluster_subnet_groups() - subnets = subnets_response['DescribeClusterSubnetGroupsResponse']['DescribeClusterSubnetGroupsResult']['ClusterSubnetGroups'] + subnets = subnets_response['DescribeClusterSubnetGroupsResponse'][ + 'DescribeClusterSubnetGroupsResult']['ClusterSubnetGroups'] subnets.should.have.length_of(1) redshift_conn.delete_cluster_subnet_group("my_subnet") subnets_response = redshift_conn.describe_cluster_subnet_groups() - subnets = subnets_response['DescribeClusterSubnetGroupsResponse']['DescribeClusterSubnetGroupsResult']['ClusterSubnetGroups'] + subnets = subnets_response['DescribeClusterSubnetGroupsResponse'][ + 'DescribeClusterSubnetGroupsResult']['ClusterSubnetGroups'] subnets.should.have.length_of(0) # Delete invalid id - redshift_conn.delete_cluster_subnet_group.when.called_with("not-a-subnet-group").should.throw(ClusterSubnetGroupNotFound) + redshift_conn.delete_cluster_subnet_group.when.called_with( + "not-a-subnet-group").should.throw(ClusterSubnetGroupNotFound) @mock_redshift_deprecated @@ -359,8 +387,10 @@ def test_create_cluster_security_group(): "This is my security group", ) - groups_response = conn.describe_cluster_security_groups("my_security_group") - my_group = groups_response['DescribeClusterSecurityGroupsResponse']['DescribeClusterSecurityGroupsResult']['ClusterSecurityGroups'][0] + groups_response = conn.describe_cluster_security_groups( + "my_security_group") + my_group = groups_response['DescribeClusterSecurityGroupsResponse'][ + 'DescribeClusterSecurityGroupsResult']['ClusterSecurityGroups'][0] my_group['ClusterSecurityGroupName'].should.equal("my_security_group") my_group['Description'].should.equal("This is my security group") @@ -370,7 +400,8 @@ def test_create_cluster_security_group(): @mock_redshift_deprecated def test_describe_non_existant_security_group(): conn = boto.redshift.connect_to_region("us-east-1") - conn.describe_cluster_security_groups.when.called_with("not-a-security-group").should.throw(ClusterSecurityGroupNotFound) + conn.describe_cluster_security_groups.when.called_with( + "not-a-security-group").should.throw(ClusterSecurityGroupNotFound) @mock_redshift_deprecated @@ -382,17 +413,20 @@ def test_delete_cluster_security_group(): ) groups_response = conn.describe_cluster_security_groups() - groups = groups_response['DescribeClusterSecurityGroupsResponse']['DescribeClusterSecurityGroupsResult']['ClusterSecurityGroups'] + groups = groups_response['DescribeClusterSecurityGroupsResponse'][ + 'DescribeClusterSecurityGroupsResult']['ClusterSecurityGroups'] groups.should.have.length_of(2) # The default group already exists conn.delete_cluster_security_group("my_security_group") groups_response = conn.describe_cluster_security_groups() - groups = groups_response['DescribeClusterSecurityGroupsResponse']['DescribeClusterSecurityGroupsResult']['ClusterSecurityGroups'] + groups = groups_response['DescribeClusterSecurityGroupsResponse'][ + 'DescribeClusterSecurityGroupsResult']['ClusterSecurityGroups'] groups.should.have.length_of(1) # Delete invalid id - conn.delete_cluster_security_group.when.called_with("not-a-security-group").should.throw(ClusterSecurityGroupNotFound) + conn.delete_cluster_security_group.when.called_with( + "not-a-security-group").should.throw(ClusterSecurityGroupNotFound) @mock_redshift_deprecated @@ -404,8 +438,10 @@ def test_create_cluster_parameter_group(): "This is my parameter group", ) - groups_response = conn.describe_cluster_parameter_groups("my_parameter_group") - my_group = groups_response['DescribeClusterParameterGroupsResponse']['DescribeClusterParameterGroupsResult']['ParameterGroups'][0] + groups_response = conn.describe_cluster_parameter_groups( + "my_parameter_group") + my_group = groups_response['DescribeClusterParameterGroupsResponse'][ + 'DescribeClusterParameterGroupsResult']['ParameterGroups'][0] my_group['ParameterGroupName'].should.equal("my_parameter_group") my_group['ParameterGroupFamily'].should.equal("redshift-1.0") @@ -415,7 +451,8 @@ def test_create_cluster_parameter_group(): @mock_redshift_deprecated def test_describe_non_existant_parameter_group(): conn = boto.redshift.connect_to_region("us-east-1") - conn.describe_cluster_parameter_groups.when.called_with("not-a-parameter-group").should.throw(ClusterParameterGroupNotFound) + conn.describe_cluster_parameter_groups.when.called_with( + "not-a-parameter-group").should.throw(ClusterParameterGroupNotFound) @mock_redshift_deprecated @@ -428,14 +465,17 @@ def test_delete_cluster_parameter_group(): ) groups_response = conn.describe_cluster_parameter_groups() - groups = groups_response['DescribeClusterParameterGroupsResponse']['DescribeClusterParameterGroupsResult']['ParameterGroups'] + groups = groups_response['DescribeClusterParameterGroupsResponse'][ + 'DescribeClusterParameterGroupsResult']['ParameterGroups'] groups.should.have.length_of(2) # The default group already exists conn.delete_cluster_parameter_group("my_parameter_group") groups_response = conn.describe_cluster_parameter_groups() - groups = groups_response['DescribeClusterParameterGroupsResponse']['DescribeClusterParameterGroupsResult']['ParameterGroups'] + groups = groups_response['DescribeClusterParameterGroupsResponse'][ + 'DescribeClusterParameterGroupsResult']['ParameterGroups'] groups.should.have.length_of(1) # Delete invalid id - conn.delete_cluster_parameter_group.when.called_with("not-a-parameter-group").should.throw(ClusterParameterGroupNotFound) + conn.delete_cluster_parameter_group.when.called_with( + "not-a-parameter-group").should.throw(ClusterParameterGroupNotFound) diff --git a/tests/test_redshift/test_server.py b/tests/test_redshift/test_server.py index a6bdc93f3..ba407ab4c 100644 --- a/tests/test_redshift/test_server.py +++ b/tests/test_redshift/test_server.py @@ -19,5 +19,6 @@ def test_describe_clusters(): res = test_client.get('/?Action=DescribeClusters') json_data = json.loads(res.data.decode("utf-8")) - clusters = json_data['DescribeClustersResponse']['DescribeClustersResult']['Clusters'] + clusters = json_data['DescribeClustersResponse'][ + 'DescribeClustersResult']['Clusters'] list(clusters).should.equal([]) diff --git a/tests/test_route53/test_route53.py b/tests/test_route53/test_route53.py index f376375a0..ea8609556 100644 --- a/tests/test_route53/test_route53.py +++ b/tests/test_route53/test_route53.py @@ -23,15 +23,18 @@ def test_hosted_zone(): zones = conn.get_all_hosted_zones() len(zones["ListHostedZonesResponse"]["HostedZones"]).should.equal(2) - id1 = firstzone["CreateHostedZoneResponse"]["HostedZone"]["Id"].split("/")[-1] + id1 = firstzone["CreateHostedZoneResponse"][ + "HostedZone"]["Id"].split("/")[-1] zone = conn.get_hosted_zone(id1) - zone["GetHostedZoneResponse"]["HostedZone"]["Name"].should.equal("testdns.aws.com.") + zone["GetHostedZoneResponse"]["HostedZone"][ + "Name"].should.equal("testdns.aws.com.") conn.delete_hosted_zone(id1) zones = conn.get_all_hosted_zones() len(zones["ListHostedZonesResponse"]["HostedZones"]).should.equal(1) - conn.get_hosted_zone.when.called_with("abcd").should.throw(boto.route53.exception.DNSServerError, "404 Not Found") + conn.get_hosted_zone.when.called_with("abcd").should.throw( + boto.route53.exception.DNSServerError, "404 Not Found") @mock_route53_deprecated @@ -42,7 +45,8 @@ def test_rrset(): boto.route53.exception.DNSServerError, "404 Not Found") zone = conn.create_hosted_zone("testdns.aws.com") - zoneid = zone["CreateHostedZoneResponse"]["HostedZone"]["Id"].split("/")[-1] + zoneid = zone["CreateHostedZoneResponse"][ + "HostedZone"]["Id"].split("/")[-1] changes = ResourceRecordSets(conn, zoneid) change = changes.add_change("CREATE", "foo.bar.testdns.aws.com", "A") @@ -105,15 +109,18 @@ def test_rrset(): rrsets = conn.get_all_rrsets(zoneid, type="A") rrsets.should.have.length_of(2) - rrsets = conn.get_all_rrsets(zoneid, name="foo.bar.testdns.aws.com", type="A") + rrsets = conn.get_all_rrsets( + zoneid, name="foo.bar.testdns.aws.com", type="A") rrsets.should.have.length_of(1) rrsets[0].resource_records[0].should.equal('1.2.3.4') - rrsets = conn.get_all_rrsets(zoneid, name="bar.foo.testdns.aws.com", type="A") + rrsets = conn.get_all_rrsets( + zoneid, name="bar.foo.testdns.aws.com", type="A") rrsets.should.have.length_of(1) rrsets[0].resource_records[0].should.equal('5.6.7.8') - rrsets = conn.get_all_rrsets(zoneid, name="foo.foo.testdns.aws.com", type="A") + rrsets = conn.get_all_rrsets( + zoneid, name="foo.foo.testdns.aws.com", type="A") rrsets.should.have.length_of(0) @@ -121,7 +128,8 @@ def test_rrset(): def test_rrset_with_multiple_values(): conn = boto.connect_route53('the_key', 'the_secret') zone = conn.create_hosted_zone("testdns.aws.com") - zoneid = zone["CreateHostedZoneResponse"]["HostedZone"]["Id"].split("/")[-1] + zoneid = zone["CreateHostedZoneResponse"][ + "HostedZone"]["Id"].split("/")[-1] changes = ResourceRecordSets(conn, zoneid) change = changes.add_change("CREATE", "foo.bar.testdns.aws.com", "A") @@ -138,11 +146,14 @@ def test_rrset_with_multiple_values(): def test_alias_rrset(): conn = boto.connect_route53('the_key', 'the_secret') zone = conn.create_hosted_zone("testdns.aws.com") - zoneid = zone["CreateHostedZoneResponse"]["HostedZone"]["Id"].split("/")[-1] + zoneid = zone["CreateHostedZoneResponse"][ + "HostedZone"]["Id"].split("/")[-1] changes = ResourceRecordSets(conn, zoneid) - changes.add_change("CREATE", "foo.alias.testdns.aws.com", "A", alias_hosted_zone_id="Z3DG6IL3SJCGPX", alias_dns_name="foo.testdns.aws.com") - changes.add_change("CREATE", "bar.alias.testdns.aws.com", "CNAME", alias_hosted_zone_id="Z3DG6IL3SJCGPX", alias_dns_name="bar.testdns.aws.com") + changes.add_change("CREATE", "foo.alias.testdns.aws.com", "A", + alias_hosted_zone_id="Z3DG6IL3SJCGPX", alias_dns_name="foo.testdns.aws.com") + changes.add_change("CREATE", "bar.alias.testdns.aws.com", "CNAME", + alias_hosted_zone_id="Z3DG6IL3SJCGPX", alias_dns_name="bar.testdns.aws.com") changes.commit() rrsets = conn.get_all_rrsets(zoneid, type="A") @@ -169,7 +180,8 @@ def test_create_health_check(): ) conn.create_health_check(check) - checks = conn.get_list_health_checks()['ListHealthChecksResponse']['HealthChecks'] + checks = conn.get_list_health_checks()['ListHealthChecksResponse'][ + 'HealthChecks'] list(checks).should.have.length_of(1) check = checks[0] config = check['HealthCheckConfig'] @@ -195,12 +207,14 @@ def test_delete_health_check(): ) conn.create_health_check(check) - checks = conn.get_list_health_checks()['ListHealthChecksResponse']['HealthChecks'] + checks = conn.get_list_health_checks()['ListHealthChecksResponse'][ + 'HealthChecks'] list(checks).should.have.length_of(1) health_check_id = checks[0]['Id'] conn.delete_health_check(health_check_id) - checks = conn.get_list_health_checks()['ListHealthChecksResponse']['HealthChecks'] + checks = conn.get_list_health_checks()['ListHealthChecksResponse'][ + 'HealthChecks'] list(checks).should.have.length_of(0) @@ -214,14 +228,17 @@ def test_use_health_check_in_resource_record_set(): hc_type="HTTP", resource_path="/", ) - check = conn.create_health_check(check)['CreateHealthCheckResponse']['HealthCheck'] + check = conn.create_health_check( + check)['CreateHealthCheckResponse']['HealthCheck'] check_id = check['Id'] zone = conn.create_hosted_zone("testdns.aws.com") - zone_id = zone["CreateHostedZoneResponse"]["HostedZone"]["Id"].split("/")[-1] + zone_id = zone["CreateHostedZoneResponse"][ + "HostedZone"]["Id"].split("/")[-1] changes = ResourceRecordSets(conn, zone_id) - change = changes.add_change("CREATE", "foo.bar.testdns.aws.com", "A", health_check=check_id) + change = changes.add_change( + "CREATE", "foo.bar.testdns.aws.com", "A", health_check=check_id) change.add_value("1.2.3.4") changes.commit() @@ -233,14 +250,18 @@ def test_use_health_check_in_resource_record_set(): def test_hosted_zone_comment_preserved(): conn = boto.connect_route53('the_key', 'the_secret') - firstzone = conn.create_hosted_zone("testdns.aws.com.", comment="test comment") - zone_id = firstzone["CreateHostedZoneResponse"]["HostedZone"]["Id"].split("/")[-1] + firstzone = conn.create_hosted_zone( + "testdns.aws.com.", comment="test comment") + zone_id = firstzone["CreateHostedZoneResponse"][ + "HostedZone"]["Id"].split("/")[-1] hosted_zone = conn.get_hosted_zone(zone_id) - hosted_zone["GetHostedZoneResponse"]["HostedZone"]["Config"]["Comment"].should.equal("test comment") + hosted_zone["GetHostedZoneResponse"]["HostedZone"][ + "Config"]["Comment"].should.equal("test comment") hosted_zones = conn.get_all_hosted_zones() - hosted_zones["ListHostedZonesResponse"]["HostedZones"][0]["Config"]["Comment"].should.equal("test comment") + hosted_zones["ListHostedZonesResponse"]["HostedZones"][ + 0]["Config"]["Comment"].should.equal("test comment") zone = conn.get_zone("testdns.aws.com.") zone.config["Comment"].should.equal("test comment") @@ -253,16 +274,20 @@ def test_deleting_weighted_route(): conn.create_hosted_zone("testdns.aws.com.") zone = conn.get_zone("testdns.aws.com.") - zone.add_cname("cname.testdns.aws.com", "example.com", identifier=('success-test-foo', '50')) - zone.add_cname("cname.testdns.aws.com", "example.com", identifier=('success-test-bar', '50')) + zone.add_cname("cname.testdns.aws.com", "example.com", + identifier=('success-test-foo', '50')) + zone.add_cname("cname.testdns.aws.com", "example.com", + identifier=('success-test-bar', '50')) cnames = zone.get_cname('cname.testdns.aws.com.', all=True) cnames.should.have.length_of(2) - foo_cname = [cname for cname in cnames if cname.identifier == 'success-test-foo'][0] + foo_cname = [cname for cname in cnames if cname.identifier == + 'success-test-foo'][0] zone.delete_record(foo_cname) cname = zone.get_cname('cname.testdns.aws.com.', all=True) - # When get_cname only had one result, it returns just that result instead of a list. + # When get_cname only had one result, it returns just that result instead + # of a list. cname.identifier.should.equal('success-test-bar') @@ -273,17 +298,21 @@ def test_deleting_latency_route(): conn.create_hosted_zone("testdns.aws.com.") zone = conn.get_zone("testdns.aws.com.") - zone.add_cname("cname.testdns.aws.com", "example.com", identifier=('success-test-foo', 'us-west-2')) - zone.add_cname("cname.testdns.aws.com", "example.com", identifier=('success-test-bar', 'us-west-1')) + zone.add_cname("cname.testdns.aws.com", "example.com", + identifier=('success-test-foo', 'us-west-2')) + zone.add_cname("cname.testdns.aws.com", "example.com", + identifier=('success-test-bar', 'us-west-1')) cnames = zone.get_cname('cname.testdns.aws.com.', all=True) cnames.should.have.length_of(2) - foo_cname = [cname for cname in cnames if cname.identifier == 'success-test-foo'][0] + foo_cname = [cname for cname in cnames if cname.identifier == + 'success-test-foo'][0] foo_cname.region.should.equal('us-west-2') zone.delete_record(foo_cname) cname = zone.get_cname('cname.testdns.aws.com.', all=True) - # When get_cname only had one result, it returns just that result instead of a list. + # When get_cname only had one result, it returns just that result instead + # of a list. cname.identifier.should.equal('success-test-bar') cname.region.should.equal('us-west-1') @@ -292,15 +321,19 @@ def test_deleting_latency_route(): def test_hosted_zone_private_zone_preserved(): conn = boto.connect_route53('the_key', 'the_secret') - firstzone = conn.create_hosted_zone("testdns.aws.com.", private_zone=True, vpc_id='vpc-fake', vpc_region='us-east-1') - zone_id = firstzone["CreateHostedZoneResponse"]["HostedZone"]["Id"].split("/")[-1] + firstzone = conn.create_hosted_zone( + "testdns.aws.com.", private_zone=True, vpc_id='vpc-fake', vpc_region='us-east-1') + zone_id = firstzone["CreateHostedZoneResponse"][ + "HostedZone"]["Id"].split("/")[-1] hosted_zone = conn.get_hosted_zone(zone_id) # in (original) boto, these bools returned as strings. - hosted_zone["GetHostedZoneResponse"]["HostedZone"]["Config"]["PrivateZone"].should.equal('True') + hosted_zone["GetHostedZoneResponse"]["HostedZone"][ + "Config"]["PrivateZone"].should.equal('True') hosted_zones = conn.get_all_hosted_zones() - hosted_zones["ListHostedZonesResponse"]["HostedZones"][0]["Config"]["PrivateZone"].should.equal('True') + hosted_zones["ListHostedZonesResponse"]["HostedZones"][ + 0]["Config"]["PrivateZone"].should.equal('True') zone = conn.get_zone("testdns.aws.com.") zone.config["PrivateZone"].should.equal('True') @@ -331,6 +364,7 @@ def test_hosted_zone_private_zone_preserved_boto3(): # zone = conn.list_hosted_zones_by_name(DNSName="testdns.aws.com.") # zone.config["PrivateZone"].should.equal(True) + @mock_route53 def test_list_or_change_tags_for_resource_request(): conn = boto3.client('route53', region_name='us-east-1') @@ -359,7 +393,8 @@ def test_list_or_change_tags_for_resource_request(): ) # Check to make sure that the response has the 'ResourceTagSet' key - response = conn.list_tags_for_resource(ResourceType='healthcheck', ResourceId=healthcheck_id) + response = conn.list_tags_for_resource( + ResourceType='healthcheck', ResourceId=healthcheck_id) response.should.contain('ResourceTagSet') # Validate that each key was added @@ -376,7 +411,8 @@ def test_list_or_change_tags_for_resource_request(): ) # Check to make sure that the response has the 'ResourceTagSet' key - response = conn.list_tags_for_resource(ResourceType='healthcheck', ResourceId=healthcheck_id) + response = conn.list_tags_for_resource( + ResourceType='healthcheck', ResourceId=healthcheck_id) response.should.contain('ResourceTagSet') response['ResourceTagSet']['Tags'].should_not.contain(tag1) response['ResourceTagSet']['Tags'].should.contain(tag2) @@ -388,7 +424,8 @@ def test_list_or_change_tags_for_resource_request(): RemoveTagKeys=[tag2['Key']] ) - response = conn.list_tags_for_resource(ResourceType='healthcheck', ResourceId=healthcheck_id) + response = conn.list_tags_for_resource( + ResourceType='healthcheck', ResourceId=healthcheck_id) response['ResourceTagSet']['Tags'].should_not.contain(tag2) # Re-add the tags @@ -405,5 +442,6 @@ def test_list_or_change_tags_for_resource_request(): RemoveTagKeys=[tag1['Key'], tag2['Key']] ) - response = conn.list_tags_for_resource(ResourceType='healthcheck', ResourceId=healthcheck_id) + response = conn.list_tags_for_resource( + ResourceType='healthcheck', ResourceId=healthcheck_id) response['ResourceTagSet']['Tags'].should.be.empty diff --git a/tests/test_s3/test_s3.py b/tests/test_s3/test_s3.py index e424ba6a3..32b772abe 100644 --- a/tests/test_s3/test_s3.py +++ b/tests/test_s3/test_s3.py @@ -48,6 +48,7 @@ def reduced_min_part_size(f): class MyModel(object): + def __init__(self, name, value): self.name = name self.value = value @@ -67,7 +68,8 @@ def test_my_model_save(): model_instance = MyModel('steve', 'is awesome') model_instance.save() - body = conn.Object('mybucket', 'steve').get()['Body'].read().decode("utf-8") + body = conn.Object('mybucket', 'steve').get()[ + 'Body'].read().decode("utf-8") assert body == b'is awesome' @@ -110,7 +112,8 @@ def test_multipart_upload(): multipart.upload_part_from_file(BytesIO(part2), 2) multipart.complete_upload() # we should get both parts as the key contents - bucket.get_key("the-key").get_contents_as_string().should.equal(part1 + part2) + bucket.get_key( + "the-key").get_contents_as_string().should.equal(part1 + part2) @mock_s3_deprecated @@ -127,7 +130,8 @@ def test_multipart_upload_out_of_order(): multipart.upload_part_from_file(BytesIO(part1), 2) multipart.complete_upload() # we should get both parts as the key contents - bucket.get_key("the-key").get_contents_as_string().should.equal(part1 + part2) + bucket.get_key( + "the-key").get_contents_as_string().should.equal(part1 + part2) @mock_s3_deprecated @@ -136,7 +140,8 @@ def test_multipart_upload_with_headers(): conn = boto.connect_s3('the_key', 'the_secret') bucket = conn.create_bucket("foobar") - multipart = bucket.initiate_multipart_upload("the-key", metadata={"foo": "bar"}) + multipart = bucket.initiate_multipart_upload( + "the-key", metadata={"foo": "bar"}) part1 = b'0' * 10 multipart.upload_part_from_file(BytesIO(part1), 1) multipart.complete_upload() @@ -159,7 +164,8 @@ def test_multipart_upload_with_copy_key(): multipart.upload_part_from_file(BytesIO(part1), 1) multipart.copy_part_from_key("foobar", "original-key", 2, 0, 3) multipart.complete_upload() - bucket.get_key("the-key").get_contents_as_string().should.equal(part1 + b"key_") + bucket.get_key( + "the-key").get_contents_as_string().should.equal(part1 + b"key_") @mock_s3_deprecated @@ -229,7 +235,8 @@ def test_multipart_duplicate_upload(): multipart.upload_part_from_file(BytesIO(part2), 2) multipart.complete_upload() # We should get only one copy of part 1. - bucket.get_key("the-key").get_contents_as_string().should.equal(part1 + part2) + bucket.get_key( + "the-key").get_contents_as_string().should.equal(part1 + part2) @mock_s3_deprecated @@ -260,7 +267,8 @@ def test_key_save_to_missing_bucket(): key = Key(bucket) key.key = "the-key" - key.set_contents_from_string.when.called_with("foobar").should.throw(S3ResponseError) + key.set_contents_from_string.when.called_with( + "foobar").should.throw(S3ResponseError) @mock_s3_deprecated @@ -275,7 +283,8 @@ def test_missing_key_urllib2(): conn = boto.connect_s3('the_key', 'the_secret') conn.create_bucket("foobar") - urlopen.when.called_with("http://foobar.s3.amazonaws.com/the-key").should.throw(HTTPError) + urlopen.when.called_with( + "http://foobar.s3.amazonaws.com/the-key").should.throw(HTTPError) @mock_s3_deprecated @@ -315,7 +324,8 @@ def test_large_key_save(): key.key = "the-key" key.set_contents_from_string("foobar" * 100000) - bucket.get_key("the-key").get_contents_as_string().should.equal(b'foobar' * 100000) + bucket.get_key( + "the-key").get_contents_as_string().should.equal(b'foobar' * 100000) @mock_s3_deprecated @@ -328,8 +338,10 @@ def test_copy_key(): bucket.copy_key('new-key', 'foobar', 'the-key') - bucket.get_key("the-key").get_contents_as_string().should.equal(b"some value") - bucket.get_key("new-key").get_contents_as_string().should.equal(b"some value") + bucket.get_key( + "the-key").get_contents_as_string().should.equal(b"some value") + bucket.get_key( + "new-key").get_contents_as_string().should.equal(b"some value") @mock_s3_deprecated @@ -344,8 +356,10 @@ def test_copy_key_with_version(): bucket.copy_key('new-key', 'foobar', 'the-key', src_version_id='0') - bucket.get_key("the-key").get_contents_as_string().should.equal(b"another value") - bucket.get_key("new-key").get_contents_as_string().should.equal(b"some value") + bucket.get_key( + "the-key").get_contents_as_string().should.equal(b"another value") + bucket.get_key( + "new-key").get_contents_as_string().should.equal(b"some value") @mock_s3_deprecated @@ -373,7 +387,8 @@ def test_copy_key_replace_metadata(): metadata={'momd': 'Mometadatastring'}) bucket.get_key("new-key").get_metadata('md').should.be.none - bucket.get_key("new-key").get_metadata('momd').should.equal('Mometadatastring') + bucket.get_key( + "new-key").get_metadata('momd').should.equal('Mometadatastring') @freeze_time("2012-01-01 12:00:00") @@ -389,7 +404,8 @@ def test_last_modified(): rs = bucket.get_all_keys() rs[0].last_modified.should.equal('2012-01-01T12:00:00.000Z') - bucket.get_key("the-key").last_modified.should.equal('Sun, 01 Jan 2012 12:00:00 GMT') + bucket.get_key( + "the-key").last_modified.should.equal('Sun, 01 Jan 2012 12:00:00 GMT') @mock_s3_deprecated @@ -401,7 +417,8 @@ def test_missing_bucket(): @mock_s3_deprecated def test_bucket_with_dash(): conn = boto.connect_s3('the_key', 'the_secret') - conn.get_bucket.when.called_with('mybucket-test').should.throw(S3ResponseError) + conn.get_bucket.when.called_with( + 'mybucket-test').should.throw(S3ResponseError) @mock_s3_deprecated @@ -432,7 +449,8 @@ def test_create_existing_bucket_in_us_east_1(): @mock_s3_deprecated def test_other_region(): - conn = S3Connection('key', 'secret', host='s3-website-ap-southeast-2.amazonaws.com') + conn = S3Connection( + 'key', 'secret', host='s3-website-ap-southeast-2.amazonaws.com') conn.create_bucket("foobar") list(conn.get_bucket("foobar").get_all_keys()).should.equal([]) @@ -613,7 +631,8 @@ def test_bucket_key_listing_order(): delimiter = None keys = [x.name for x in bucket.list(prefix + 'x', delimiter)] - keys.should.equal([u'toplevel/x/key', u'toplevel/x/y/key', u'toplevel/x/y/z/key']) + keys.should.equal( + [u'toplevel/x/key', u'toplevel/x/y/key', u'toplevel/x/y/z/key']) delimiter = '/' keys = [x.name for x in bucket.list(prefix + 'x', delimiter)] @@ -640,7 +659,8 @@ def test_copy_key_reduced_redundancy(): key.key = "the-key" key.set_contents_from_string("some value") - bucket.copy_key('new-key', 'foobar', 'the-key', storage_class='REDUCED_REDUNDANCY') + bucket.copy_key('new-key', 'foobar', 'the-key', + storage_class='REDUCED_REDUNDANCY') # we use the bucket iterator because of: # https:/github.com/boto/boto/issues/1173 @@ -886,34 +906,54 @@ def test_ranged_get(): key.set_contents_from_string(rep * 10) # Implicitly bounded range requests. - key.get_contents_as_string(headers={'Range': 'bytes=0-'}).should.equal(rep * 10) - key.get_contents_as_string(headers={'Range': 'bytes=50-'}).should.equal(rep * 5) - key.get_contents_as_string(headers={'Range': 'bytes=99-'}).should.equal(b'9') + key.get_contents_as_string( + headers={'Range': 'bytes=0-'}).should.equal(rep * 10) + key.get_contents_as_string( + headers={'Range': 'bytes=50-'}).should.equal(rep * 5) + key.get_contents_as_string( + headers={'Range': 'bytes=99-'}).should.equal(b'9') # Explicitly bounded range requests starting from the first byte. - key.get_contents_as_string(headers={'Range': 'bytes=0-0'}).should.equal(b'0') - key.get_contents_as_string(headers={'Range': 'bytes=0-49'}).should.equal(rep * 5) - key.get_contents_as_string(headers={'Range': 'bytes=0-99'}).should.equal(rep * 10) - key.get_contents_as_string(headers={'Range': 'bytes=0-100'}).should.equal(rep * 10) - key.get_contents_as_string(headers={'Range': 'bytes=0-700'}).should.equal(rep * 10) + key.get_contents_as_string( + headers={'Range': 'bytes=0-0'}).should.equal(b'0') + key.get_contents_as_string( + headers={'Range': 'bytes=0-49'}).should.equal(rep * 5) + key.get_contents_as_string( + headers={'Range': 'bytes=0-99'}).should.equal(rep * 10) + key.get_contents_as_string( + headers={'Range': 'bytes=0-100'}).should.equal(rep * 10) + key.get_contents_as_string( + headers={'Range': 'bytes=0-700'}).should.equal(rep * 10) # Explicitly bounded range requests starting from the / a middle byte. - key.get_contents_as_string(headers={'Range': 'bytes=50-54'}).should.equal(rep[:5]) - key.get_contents_as_string(headers={'Range': 'bytes=50-99'}).should.equal(rep * 5) - key.get_contents_as_string(headers={'Range': 'bytes=50-100'}).should.equal(rep * 5) - key.get_contents_as_string(headers={'Range': 'bytes=50-700'}).should.equal(rep * 5) + key.get_contents_as_string( + headers={'Range': 'bytes=50-54'}).should.equal(rep[:5]) + key.get_contents_as_string( + headers={'Range': 'bytes=50-99'}).should.equal(rep * 5) + key.get_contents_as_string( + headers={'Range': 'bytes=50-100'}).should.equal(rep * 5) + key.get_contents_as_string( + headers={'Range': 'bytes=50-700'}).should.equal(rep * 5) # Explicitly bounded range requests starting from the last byte. - key.get_contents_as_string(headers={'Range': 'bytes=99-99'}).should.equal(b'9') - key.get_contents_as_string(headers={'Range': 'bytes=99-100'}).should.equal(b'9') - key.get_contents_as_string(headers={'Range': 'bytes=99-700'}).should.equal(b'9') + key.get_contents_as_string( + headers={'Range': 'bytes=99-99'}).should.equal(b'9') + key.get_contents_as_string( + headers={'Range': 'bytes=99-100'}).should.equal(b'9') + key.get_contents_as_string( + headers={'Range': 'bytes=99-700'}).should.equal(b'9') # Suffix range requests. - key.get_contents_as_string(headers={'Range': 'bytes=-1'}).should.equal(b'9') - key.get_contents_as_string(headers={'Range': 'bytes=-60'}).should.equal(rep * 6) - key.get_contents_as_string(headers={'Range': 'bytes=-100'}).should.equal(rep * 10) - key.get_contents_as_string(headers={'Range': 'bytes=-101'}).should.equal(rep * 10) - key.get_contents_as_string(headers={'Range': 'bytes=-700'}).should.equal(rep * 10) + key.get_contents_as_string( + headers={'Range': 'bytes=-1'}).should.equal(b'9') + key.get_contents_as_string( + headers={'Range': 'bytes=-60'}).should.equal(rep * 6) + key.get_contents_as_string( + headers={'Range': 'bytes=-100'}).should.equal(rep * 10) + key.get_contents_as_string( + headers={'Range': 'bytes=-101'}).should.equal(rep * 10) + key.get_contents_as_string( + headers={'Range': 'bytes=-700'}).should.equal(rep * 10) key.size.should.equal(100) @@ -1006,6 +1046,7 @@ def test_boto3_key_etag(): resp = s3.get_object(Bucket='mybucket', Key='steve') resp['ETag'].should.equal('"d32bda93738f7e03adb22e66c90fbc04"') + @mock_s3 def test_boto3_list_keys_xml_escaped(): s3 = boto3.client('s3', region_name='us-east-1') @@ -1045,13 +1086,13 @@ def test_boto3_list_objects_v2_truncated_response(): assert resp['IsTruncated'] == True assert 'Delimiter' not in resp assert 'StartAfter' not in resp - assert 'Owner' not in listed_object # owner info was not requested + assert 'Owner' not in listed_object # owner info was not requested next_token = resp['NextContinuationToken'] - # Second list - resp = s3.list_objects_v2(Bucket='mybucket', MaxKeys=1, ContinuationToken=next_token) + resp = s3.list_objects_v2( + Bucket='mybucket', MaxKeys=1, ContinuationToken=next_token) listed_object = resp['Contents'][0] assert listed_object['Key'] == 'three' @@ -1065,9 +1106,9 @@ def test_boto3_list_objects_v2_truncated_response(): next_token = resp['NextContinuationToken'] - # Third list - resp = s3.list_objects_v2(Bucket='mybucket', MaxKeys=1, ContinuationToken=next_token) + resp = s3.list_objects_v2( + Bucket='mybucket', MaxKeys=1, ContinuationToken=next_token) listed_object = resp['Contents'][0] assert listed_object['Key'] == 'two' @@ -1107,7 +1148,7 @@ def test_boto3_list_objects_v2_truncated_response_start_after(): # Second list # The ContinuationToken must take precedence over StartAfter. resp = s3.list_objects_v2(Bucket='mybucket', MaxKeys=1, StartAfter='one', - ContinuationToken=next_token) + ContinuationToken=next_token) listed_object = resp['Contents'][0] assert listed_object['Key'] == 'two' @@ -1143,7 +1184,8 @@ def test_boto3_bucket_create(): s3.Object('blah', 'hello.txt').put(Body="some text") - s3.Object('blah', 'hello.txt').get()['Body'].read().decode("utf-8").should.equal("some text") + s3.Object('blah', 'hello.txt').get()['Body'].read().decode( + "utf-8").should.equal("some text") @mock_s3 @@ -1153,7 +1195,8 @@ def test_boto3_bucket_create_eu_central(): s3.Object('blah', 'hello.txt').put(Body="some text") - s3.Object('blah', 'hello.txt').get()['Body'].read().decode("utf-8").should.equal("some text") + s3.Object('blah', 'hello.txt').get()['Body'].read().decode( + "utf-8").should.equal("some text") @mock_s3 @@ -1163,10 +1206,12 @@ def test_boto3_head_object(): s3.Object('blah', 'hello.txt').put(Body="some text") - s3.Object('blah', 'hello.txt').meta.client.head_object(Bucket='blah', Key='hello.txt') + s3.Object('blah', 'hello.txt').meta.client.head_object( + Bucket='blah', Key='hello.txt') with assert_raises(ClientError): - s3.Object('blah', 'hello2.txt').meta.client.head_object(Bucket='blah', Key='hello_bad.txt') + s3.Object('blah', 'hello2.txt').meta.client.head_object( + Bucket='blah', Key='hello_bad.txt') @mock_s3 @@ -1176,7 +1221,8 @@ def test_boto3_get_object(): s3.Object('blah', 'hello.txt').put(Body="some text") - s3.Object('blah', 'hello.txt').meta.client.head_object(Bucket='blah', Key='hello.txt') + s3.Object('blah', 'hello.txt').meta.client.head_object( + Bucket='blah', Key='hello.txt') with assert_raises(ClientError) as e: s3.Object('blah', 'hello2.txt').get() diff --git a/tests/test_s3/test_s3_lifecycle.py b/tests/test_s3/test_s3_lifecycle.py index f0a70bc6f..5cae8f790 100644 --- a/tests/test_s3/test_s3_lifecycle.py +++ b/tests/test_s3/test_s3_lifecycle.py @@ -56,9 +56,9 @@ def test_lifecycle_multi(): lifecycle.add_rule("2", "2/", "Enabled", Expiration(days=2)) lifecycle.add_rule("3", "3/", "Enabled", Expiration(date=date)) lifecycle.add_rule("4", "4/", "Enabled", None, - Transition(days=4, storage_class=sc)) + Transition(days=4, storage_class=sc)) lifecycle.add_rule("5", "5/", "Enabled", None, - Transition(date=date, storage_class=sc)) + Transition(date=date, storage_class=sc)) bucket.configure_lifecycle(lifecycle) # read the lifecycle back diff --git a/tests/test_s3/test_s3_utils.py b/tests/test_s3/test_s3_utils.py index 3b1d4a01a..b4f56d89a 100644 --- a/tests/test_s3/test_s3_utils.py +++ b/tests/test_s3/test_s3_utils.py @@ -8,11 +8,14 @@ def test_base_url(): def test_localhost_bucket(): - expect(bucket_name_from_url('https://wfoobar.localhost:5000/abc')).should.equal("wfoobar") + expect(bucket_name_from_url('https://wfoobar.localhost:5000/abc') + ).should.equal("wfoobar") def test_localhost_without_bucket(): - expect(bucket_name_from_url('https://www.localhost:5000/def')).should.equal(None) + expect(bucket_name_from_url( + 'https://www.localhost:5000/def')).should.equal(None) + def test_versioned_key_store(): d = _VersionedKeyStore() diff --git a/tests/test_s3/test_server.py b/tests/test_s3/test_server.py index 303224541..f6b8f889c 100644 --- a/tests/test_s3/test_server.py +++ b/tests/test_s3/test_server.py @@ -31,7 +31,8 @@ def test_s3_server_bucket_create(): res.status_code.should.equal(200) res.data.should.contain(b"ListBucketResult") - res = test_client.put('/bar', 'http://foobaz.localhost:5000/', data='test value') + res = test_client.put( + '/bar', 'http://foobaz.localhost:5000/', data='test value') res.status_code.should.equal(200) res = test_client.get('/bar', 'http://foobaz.localhost:5000/') @@ -45,7 +46,8 @@ def test_s3_server_bucket_versioning(): # Just enough XML to enable versioning body = 'Enabled' - res = test_client.put('/?versioning', 'http://foobaz.localhost:5000', data=body) + res = test_client.put( + '/?versioning', 'http://foobaz.localhost:5000', data=body) res.status_code.should.equal(200) diff --git a/tests/test_s3bucket_path/test_bucket_path_server.py b/tests/test_s3bucket_path/test_bucket_path_server.py index adc5de532..c67a2bcaa 100644 --- a/tests/test_s3bucket_path/test_bucket_path_server.py +++ b/tests/test_s3bucket_path/test_bucket_path_server.py @@ -44,7 +44,8 @@ def test_s3_server_bucket_create(): res = test_client.get('/missing-bucket', 'http://localhost:5000') res.status_code.should.equal(404) - res = test_client.put('/foobar/bar', 'http://localhost:5000', data='test value') + res = test_client.put( + '/foobar/bar', 'http://localhost:5000', data='test value') res.status_code.should.equal(200) res = test_client.get('/foobar/bar', 'http://localhost:5000') diff --git a/tests/test_s3bucket_path/test_s3bucket_path.py b/tests/test_s3bucket_path/test_s3bucket_path.py index 528c75368..21d786c61 100644 --- a/tests/test_s3bucket_path/test_s3bucket_path.py +++ b/tests/test_s3bucket_path/test_s3bucket_path.py @@ -20,6 +20,7 @@ def create_connection(key=None, secret=None): class MyModel(object): + def __init__(self, name, value): self.name = name self.value = value @@ -42,7 +43,8 @@ def test_my_model_save(): model_instance = MyModel('steve', 'is awesome') model_instance.save() - conn.get_bucket('mybucket').get_key('steve').get_contents_as_string().should.equal(b'is awesome') + conn.get_bucket('mybucket').get_key( + 'steve').get_contents_as_string().should.equal(b'is awesome') @mock_s3_deprecated @@ -57,7 +59,8 @@ def test_missing_key_urllib2(): conn = create_connection('the_key', 'the_secret') conn.create_bucket("foobar") - urlopen.when.called_with("http://s3.amazonaws.com/foobar/the-key").should.throw(HTTPError) + urlopen.when.called_with( + "http://s3.amazonaws.com/foobar/the-key").should.throw(HTTPError) @mock_s3_deprecated @@ -93,7 +96,8 @@ def test_large_key_save(): key.key = "the-key" key.set_contents_from_string("foobar" * 100000) - bucket.get_key("the-key").get_contents_as_string().should.equal(b'foobar' * 100000) + bucket.get_key( + "the-key").get_contents_as_string().should.equal(b'foobar' * 100000) @mock_s3_deprecated @@ -106,8 +110,10 @@ def test_copy_key(): bucket.copy_key('new-key', 'foobar', 'the-key') - bucket.get_key("the-key").get_contents_as_string().should.equal(b"some value") - bucket.get_key("new-key").get_contents_as_string().should.equal(b"some value") + bucket.get_key( + "the-key").get_contents_as_string().should.equal(b"some value") + bucket.get_key( + "new-key").get_contents_as_string().should.equal(b"some value") @mock_s3_deprecated @@ -135,7 +141,8 @@ def test_last_modified(): rs = bucket.get_all_keys() rs[0].last_modified.should.equal('2012-01-01T12:00:00.000Z') - bucket.get_key("the-key").last_modified.should.equal('Sun, 01 Jan 2012 12:00:00 GMT') + bucket.get_key( + "the-key").last_modified.should.equal('Sun, 01 Jan 2012 12:00:00 GMT') @mock_s3_deprecated @@ -147,7 +154,8 @@ def test_missing_bucket(): @mock_s3_deprecated def test_bucket_with_dash(): conn = create_connection('the_key', 'the_secret') - conn.get_bucket.when.called_with('mybucket-test').should.throw(S3ResponseError) + conn.get_bucket.when.called_with( + 'mybucket-test').should.throw(S3ResponseError) @mock_s3_deprecated @@ -268,7 +276,8 @@ def test_bucket_key_listing_order(): delimiter = None keys = [x.name for x in bucket.list(prefix + 'x', delimiter)] - keys.should.equal(['toplevel/x/key', 'toplevel/x/y/key', 'toplevel/x/y/z/key']) + keys.should.equal( + ['toplevel/x/key', 'toplevel/x/y/key', 'toplevel/x/y/z/key']) delimiter = '/' keys = [x.name for x in bucket.list(prefix + 'x', delimiter)] diff --git a/tests/test_s3bucket_path/test_s3bucket_path_utils.py b/tests/test_s3bucket_path/test_s3bucket_path_utils.py index 8497f8184..c607ea2ec 100644 --- a/tests/test_s3bucket_path/test_s3bucket_path_utils.py +++ b/tests/test_s3bucket_path/test_s3bucket_path_utils.py @@ -8,7 +8,8 @@ def test_base_url(): def test_localhost_bucket(): - expect(bucket_name_from_url('https://localhost:5000/wfoobar/abc')).should.equal("wfoobar") + expect(bucket_name_from_url('https://localhost:5000/wfoobar/abc') + ).should.equal("wfoobar") def test_localhost_without_bucket(): diff --git a/tests/test_ses/test_ses.py b/tests/test_ses/test_ses.py index 7771b9a65..431d42e1d 100644 --- a/tests/test_ses/test_ses.py +++ b/tests/test_ses/test_ses.py @@ -15,7 +15,8 @@ def test_verify_email_identity(): conn.verify_email_identity("test@example.com") identities = conn.list_identities() - address = identities['ListIdentitiesResponse']['ListIdentitiesResult']['Identities'][0] + address = identities['ListIdentitiesResponse'][ + 'ListIdentitiesResult']['Identities'][0] address.should.equal('test@example.com') @@ -27,7 +28,8 @@ def test_domain_verify(): conn.verify_domain_identity("domain2.com") identities = conn.list_identities() - domains = list(identities['ListIdentitiesResponse']['ListIdentitiesResult']['Identities']) + domains = list(identities['ListIdentitiesResponse'][ + 'ListIdentitiesResult']['Identities']) domains.should.equal(['domain1.com', 'domain2.com']) @@ -36,9 +38,11 @@ def test_delete_identity(): conn = boto.connect_ses('the_key', 'the_secret') conn.verify_email_identity("test@example.com") - conn.list_identities()['ListIdentitiesResponse']['ListIdentitiesResult']['Identities'].should.have.length_of(1) + conn.list_identities()['ListIdentitiesResponse']['ListIdentitiesResult'][ + 'Identities'].should.have.length_of(1) conn.delete_identity("test@example.com") - conn.list_identities()['ListIdentitiesResponse']['ListIdentitiesResult']['Identities'].should.have.length_of(0) + conn.list_identities()['ListIdentitiesResponse']['ListIdentitiesResult'][ + 'Identities'].should.have.length_of(0) @mock_ses_deprecated @@ -50,12 +54,15 @@ def test_send_email(): "test body", "test_to@example.com").should.throw(BotoServerError) conn.verify_email_identity("test@example.com") - conn.send_email("test@example.com", "test subject", "test body", "test_to@example.com") + conn.send_email("test@example.com", "test subject", + "test body", "test_to@example.com") send_quota = conn.get_send_quota() - sent_count = int(send_quota['GetSendQuotaResponse']['GetSendQuotaResult']['SentLast24Hours']) + sent_count = int(send_quota['GetSendQuotaResponse'][ + 'GetSendQuotaResult']['SentLast24Hours']) sent_count.should.equal(1) + @mock_ses_deprecated def test_send_html_email(): conn = boto.connect_ses('the_key', 'the_secret') @@ -65,12 +72,15 @@ def test_send_html_email(): "test body", "test_to@example.com", format="html").should.throw(BotoServerError) conn.verify_email_identity("test@example.com") - conn.send_email("test@example.com", "test subject", "test body", "test_to@example.com", format="html") + conn.send_email("test@example.com", "test subject", + "test body", "test_to@example.com", format="html") send_quota = conn.get_send_quota() - sent_count = int(send_quota['GetSendQuotaResponse']['GetSendQuotaResult']['SentLast24Hours']) + sent_count = int(send_quota['GetSendQuotaResponse'][ + 'GetSendQuotaResult']['SentLast24Hours']) sent_count.should.equal(1) + @mock_ses_deprecated def test_send_raw_email(): conn = boto.connect_ses('the_key', 'the_secret') @@ -101,5 +111,6 @@ def test_send_raw_email(): ) send_quota = conn.get_send_quota() - sent_count = int(send_quota['GetSendQuotaResponse']['GetSendQuotaResult']['SentLast24Hours']) + sent_count = int(send_quota['GetSendQuotaResponse'][ + 'GetSendQuotaResult']['SentLast24Hours']) sent_count.should.equal(1) diff --git a/tests/test_sns/test_application.py b/tests/test_sns/test_application.py index 31db73f62..613b11af5 100644 --- a/tests/test_sns/test_application.py +++ b/tests/test_sns/test_application.py @@ -17,8 +17,10 @@ def test_create_platform_application(): "PlatformPrincipal": "platform_principal", }, ) - application_arn = platform_application['CreatePlatformApplicationResponse']['CreatePlatformApplicationResult']['PlatformApplicationArn'] - application_arn.should.equal('arn:aws:sns:us-east-1:123456789012:app/APNS/my-application') + application_arn = platform_application['CreatePlatformApplicationResponse'][ + 'CreatePlatformApplicationResult']['PlatformApplicationArn'] + application_arn.should.equal( + 'arn:aws:sns:us-east-1:123456789012:app/APNS/my-application') @mock_sns_deprecated @@ -32,8 +34,10 @@ def test_get_platform_application_attributes(): "PlatformPrincipal": "platform_principal", }, ) - arn = platform_application['CreatePlatformApplicationResponse']['CreatePlatformApplicationResult']['PlatformApplicationArn'] - attributes = conn.get_platform_application_attributes(arn)['GetPlatformApplicationAttributesResponse']['GetPlatformApplicationAttributesResult']['Attributes'] + arn = platform_application['CreatePlatformApplicationResponse'][ + 'CreatePlatformApplicationResult']['PlatformApplicationArn'] + attributes = conn.get_platform_application_attributes(arn)['GetPlatformApplicationAttributesResponse'][ + 'GetPlatformApplicationAttributesResult']['Attributes'] attributes.should.equal({ "PlatformCredential": "platform_credential", "PlatformPrincipal": "platform_principal", @@ -43,7 +47,8 @@ def test_get_platform_application_attributes(): @mock_sns_deprecated def test_get_missing_platform_application_attributes(): conn = boto.connect_sns() - conn.get_platform_application_attributes.when.called_with("a-fake-arn").should.throw(BotoServerError) + conn.get_platform_application_attributes.when.called_with( + "a-fake-arn").should.throw(BotoServerError) @mock_sns_deprecated @@ -57,11 +62,13 @@ def test_set_platform_application_attributes(): "PlatformPrincipal": "platform_principal", }, ) - arn = platform_application['CreatePlatformApplicationResponse']['CreatePlatformApplicationResult']['PlatformApplicationArn'] + arn = platform_application['CreatePlatformApplicationResponse'][ + 'CreatePlatformApplicationResult']['PlatformApplicationArn'] conn.set_platform_application_attributes(arn, - {"PlatformPrincipal": "other"} - ) - attributes = conn.get_platform_application_attributes(arn)['GetPlatformApplicationAttributesResponse']['GetPlatformApplicationAttributesResult']['Attributes'] + {"PlatformPrincipal": "other"} + ) + attributes = conn.get_platform_application_attributes(arn)['GetPlatformApplicationAttributesResponse'][ + 'GetPlatformApplicationAttributesResult']['Attributes'] attributes.should.equal({ "PlatformCredential": "platform_credential", "PlatformPrincipal": "other", @@ -81,7 +88,8 @@ def test_list_platform_applications(): ) applications_repsonse = conn.list_platform_applications() - applications = applications_repsonse['ListPlatformApplicationsResponse']['ListPlatformApplicationsResult']['PlatformApplications'] + applications = applications_repsonse['ListPlatformApplicationsResponse'][ + 'ListPlatformApplicationsResult']['PlatformApplications'] applications.should.have.length_of(2) @@ -98,14 +106,16 @@ def test_delete_platform_application(): ) applications_repsonse = conn.list_platform_applications() - applications = applications_repsonse['ListPlatformApplicationsResponse']['ListPlatformApplicationsResult']['PlatformApplications'] + applications = applications_repsonse['ListPlatformApplicationsResponse'][ + 'ListPlatformApplicationsResult']['PlatformApplications'] applications.should.have.length_of(2) application_arn = applications[0]['PlatformApplicationArn'] conn.delete_platform_application(application_arn) applications_repsonse = conn.list_platform_applications() - applications = applications_repsonse['ListPlatformApplicationsResponse']['ListPlatformApplicationsResult']['PlatformApplications'] + applications = applications_repsonse['ListPlatformApplicationsResponse'][ + 'ListPlatformApplicationsResult']['PlatformApplications'] applications.should.have.length_of(1) @@ -116,7 +126,8 @@ def test_create_platform_endpoint(): name="my-application", platform="APNS", ) - application_arn = platform_application['CreatePlatformApplicationResponse']['CreatePlatformApplicationResult']['PlatformApplicationArn'] + application_arn = platform_application['CreatePlatformApplicationResponse'][ + 'CreatePlatformApplicationResult']['PlatformApplicationArn'] endpoint = conn.create_platform_endpoint( platform_application_arn=application_arn, @@ -127,8 +138,10 @@ def test_create_platform_endpoint(): }, ) - endpoint_arn = endpoint['CreatePlatformEndpointResponse']['CreatePlatformEndpointResult']['EndpointArn'] - endpoint_arn.should.contain("arn:aws:sns:us-east-1:123456789012:endpoint/APNS/my-application/") + endpoint_arn = endpoint['CreatePlatformEndpointResponse'][ + 'CreatePlatformEndpointResult']['EndpointArn'] + endpoint_arn.should.contain( + "arn:aws:sns:us-east-1:123456789012:endpoint/APNS/my-application/") @mock_sns_deprecated @@ -138,7 +151,8 @@ def test_get_list_endpoints_by_platform_application(): name="my-application", platform="APNS", ) - application_arn = platform_application['CreatePlatformApplicationResponse']['CreatePlatformApplicationResult']['PlatformApplicationArn'] + application_arn = platform_application['CreatePlatformApplicationResponse'][ + 'CreatePlatformApplicationResult']['PlatformApplicationArn'] endpoint = conn.create_platform_endpoint( platform_application_arn=application_arn, @@ -148,7 +162,8 @@ def test_get_list_endpoints_by_platform_application(): "CustomUserData": "some data", }, ) - endpoint_arn = endpoint['CreatePlatformEndpointResponse']['CreatePlatformEndpointResult']['EndpointArn'] + endpoint_arn = endpoint['CreatePlatformEndpointResponse'][ + 'CreatePlatformEndpointResult']['EndpointArn'] endpoint_list = conn.list_endpoints_by_platform_application( platform_application_arn=application_arn @@ -166,7 +181,8 @@ def test_get_endpoint_attributes(): name="my-application", platform="APNS", ) - application_arn = platform_application['CreatePlatformApplicationResponse']['CreatePlatformApplicationResult']['PlatformApplicationArn'] + application_arn = platform_application['CreatePlatformApplicationResponse'][ + 'CreatePlatformApplicationResult']['PlatformApplicationArn'] endpoint = conn.create_platform_endpoint( platform_application_arn=application_arn, @@ -177,9 +193,11 @@ def test_get_endpoint_attributes(): "CustomUserData": "some data", }, ) - endpoint_arn = endpoint['CreatePlatformEndpointResponse']['CreatePlatformEndpointResult']['EndpointArn'] + endpoint_arn = endpoint['CreatePlatformEndpointResponse'][ + 'CreatePlatformEndpointResult']['EndpointArn'] - attributes = conn.get_endpoint_attributes(endpoint_arn)['GetEndpointAttributesResponse']['GetEndpointAttributesResult']['Attributes'] + attributes = conn.get_endpoint_attributes(endpoint_arn)['GetEndpointAttributesResponse'][ + 'GetEndpointAttributesResult']['Attributes'] attributes.should.equal({ "Token": "some_unique_id", "Enabled": 'False', @@ -190,7 +208,8 @@ def test_get_endpoint_attributes(): @mock_sns_deprecated def test_get_missing_endpoint_attributes(): conn = boto.connect_sns() - conn.get_endpoint_attributes.when.called_with("a-fake-arn").should.throw(BotoServerError) + conn.get_endpoint_attributes.when.called_with( + "a-fake-arn").should.throw(BotoServerError) @mock_sns_deprecated @@ -200,7 +219,8 @@ def test_set_endpoint_attributes(): name="my-application", platform="APNS", ) - application_arn = platform_application['CreatePlatformApplicationResponse']['CreatePlatformApplicationResult']['PlatformApplicationArn'] + application_arn = platform_application['CreatePlatformApplicationResponse'][ + 'CreatePlatformApplicationResult']['PlatformApplicationArn'] endpoint = conn.create_platform_endpoint( platform_application_arn=application_arn, @@ -211,12 +231,14 @@ def test_set_endpoint_attributes(): "CustomUserData": "some data", }, ) - endpoint_arn = endpoint['CreatePlatformEndpointResponse']['CreatePlatformEndpointResult']['EndpointArn'] + endpoint_arn = endpoint['CreatePlatformEndpointResponse'][ + 'CreatePlatformEndpointResult']['EndpointArn'] conn.set_endpoint_attributes(endpoint_arn, - {"CustomUserData": "other data"} - ) - attributes = conn.get_endpoint_attributes(endpoint_arn)['GetEndpointAttributesResponse']['GetEndpointAttributesResult']['Attributes'] + {"CustomUserData": "other data"} + ) + attributes = conn.get_endpoint_attributes(endpoint_arn)['GetEndpointAttributesResponse'][ + 'GetEndpointAttributesResult']['Attributes'] attributes.should.equal({ "Token": "some_unique_id", "Enabled": 'False', @@ -231,7 +253,8 @@ def test_delete_endpoint(): name="my-application", platform="APNS", ) - application_arn = platform_application['CreatePlatformApplicationResponse']['CreatePlatformApplicationResult']['PlatformApplicationArn'] + application_arn = platform_application['CreatePlatformApplicationResponse'][ + 'CreatePlatformApplicationResult']['PlatformApplicationArn'] endpoint = conn.create_platform_endpoint( platform_application_arn=application_arn, @@ -242,7 +265,8 @@ def test_delete_endpoint(): "CustomUserData": "some data", }, ) - endpoint_arn = endpoint['CreatePlatformEndpointResponse']['CreatePlatformEndpointResult']['EndpointArn'] + endpoint_arn = endpoint['CreatePlatformEndpointResponse'][ + 'CreatePlatformEndpointResult']['EndpointArn'] endpoint_list = conn.list_endpoints_by_platform_application( platform_application_arn=application_arn @@ -265,7 +289,8 @@ def test_publish_to_platform_endpoint(): name="my-application", platform="APNS", ) - application_arn = platform_application['CreatePlatformApplicationResponse']['CreatePlatformApplicationResult']['PlatformApplicationArn'] + application_arn = platform_application['CreatePlatformApplicationResponse'][ + 'CreatePlatformApplicationResult']['PlatformApplicationArn'] endpoint = conn.create_platform_endpoint( platform_application_arn=application_arn, @@ -276,6 +301,8 @@ def test_publish_to_platform_endpoint(): }, ) - endpoint_arn = endpoint['CreatePlatformEndpointResponse']['CreatePlatformEndpointResult']['EndpointArn'] + endpoint_arn = endpoint['CreatePlatformEndpointResponse'][ + 'CreatePlatformEndpointResult']['EndpointArn'] - conn.publish(message="some message", message_structure="json", target_arn=endpoint_arn) + conn.publish(message="some message", message_structure="json", + target_arn=endpoint_arn) diff --git a/tests/test_sns/test_application_boto3.py b/tests/test_sns/test_application_boto3.py index 251d1cf1d..968240b15 100644 --- a/tests/test_sns/test_application_boto3.py +++ b/tests/test_sns/test_application_boto3.py @@ -18,7 +18,8 @@ def test_create_platform_application(): }, ) application_arn = response['PlatformApplicationArn'] - application_arn.should.equal('arn:aws:sns:us-east-1:123456789012:app/APNS/my-application') + application_arn.should.equal( + 'arn:aws:sns:us-east-1:123456789012:app/APNS/my-application') @mock_sns @@ -33,7 +34,8 @@ def test_get_platform_application_attributes(): }, ) arn = platform_application['PlatformApplicationArn'] - attributes = conn.get_platform_application_attributes(PlatformApplicationArn=arn)['Attributes'] + attributes = conn.get_platform_application_attributes( + PlatformApplicationArn=arn)['Attributes'] attributes.should.equal({ "PlatformCredential": "platform_credential", "PlatformPrincipal": "platform_principal", @@ -43,7 +45,8 @@ def test_get_platform_application_attributes(): @mock_sns def test_get_missing_platform_application_attributes(): conn = boto3.client('sns', region_name='us-east-1') - conn.get_platform_application_attributes.when.called_with(PlatformApplicationArn="a-fake-arn").should.throw(ClientError) + conn.get_platform_application_attributes.when.called_with( + PlatformApplicationArn="a-fake-arn").should.throw(ClientError) @mock_sns @@ -59,9 +62,11 @@ def test_set_platform_application_attributes(): ) arn = platform_application['PlatformApplicationArn'] conn.set_platform_application_attributes(PlatformApplicationArn=arn, - Attributes={"PlatformPrincipal": "other"} - ) - attributes = conn.get_platform_application_attributes(PlatformApplicationArn=arn)['Attributes'] + Attributes={ + "PlatformPrincipal": "other"} + ) + attributes = conn.get_platform_application_attributes( + PlatformApplicationArn=arn)['Attributes'] attributes.should.equal({ "PlatformCredential": "platform_credential", "PlatformPrincipal": "other", @@ -133,7 +138,8 @@ def test_create_platform_endpoint(): ) endpoint_arn = endpoint['EndpointArn'] - endpoint_arn.should.contain("arn:aws:sns:us-east-1:123456789012:endpoint/APNS/my-application/") + endpoint_arn.should.contain( + "arn:aws:sns:us-east-1:123456789012:endpoint/APNS/my-application/") @mock_sns @@ -186,7 +192,8 @@ def test_get_endpoint_attributes(): ) endpoint_arn = endpoint['EndpointArn'] - attributes = conn.get_endpoint_attributes(EndpointArn=endpoint_arn)['Attributes'] + attributes = conn.get_endpoint_attributes( + EndpointArn=endpoint_arn)['Attributes'] attributes.should.equal({ "Token": "some_unique_id", "Enabled": 'false', @@ -197,7 +204,8 @@ def test_get_endpoint_attributes(): @mock_sns def test_get_missing_endpoint_attributes(): conn = boto3.client('sns', region_name='us-east-1') - conn.get_endpoint_attributes.when.called_with(EndpointArn="a-fake-arn").should.throw(ClientError) + conn.get_endpoint_attributes.when.called_with( + EndpointArn="a-fake-arn").should.throw(ClientError) @mock_sns @@ -222,9 +230,10 @@ def test_set_endpoint_attributes(): endpoint_arn = endpoint['EndpointArn'] conn.set_endpoint_attributes(EndpointArn=endpoint_arn, - Attributes={"CustomUserData": "other data"} - ) - attributes = conn.get_endpoint_attributes(EndpointArn=endpoint_arn)['Attributes'] + Attributes={"CustomUserData": "other data"} + ) + attributes = conn.get_endpoint_attributes( + EndpointArn=endpoint_arn)['Attributes'] attributes.should.equal({ "Token": "some_unique_id", "Enabled": 'false', @@ -253,4 +262,5 @@ def test_publish_to_platform_endpoint(): endpoint_arn = endpoint['EndpointArn'] - conn.publish(Message="some message", MessageStructure="json", TargetArn=endpoint_arn) + conn.publish(Message="some message", + MessageStructure="json", TargetArn=endpoint_arn) diff --git a/tests/test_sns/test_publishing.py b/tests/test_sns/test_publishing.py index dab2a569b..718bce5c4 100644 --- a/tests/test_sns/test_publishing.py +++ b/tests/test_sns/test_publishing.py @@ -15,12 +15,14 @@ def test_publish_to_sqs(): conn = boto.connect_sns() conn.create_topic("some-topic") topics_json = conn.get_all_topics() - topic_arn = topics_json["ListTopicsResponse"]["ListTopicsResult"]["Topics"][0]['TopicArn'] + topic_arn = topics_json["ListTopicsResponse"][ + "ListTopicsResult"]["Topics"][0]['TopicArn'] sqs_conn = boto.connect_sqs() sqs_conn.create_queue("test-queue") - conn.subscribe(topic_arn, "sqs", "arn:aws:sqs:us-east-1:123456789012:test-queue") + conn.subscribe(topic_arn, "sqs", + "arn:aws:sqs:us-east-1:123456789012:test-queue") conn.publish(topic=topic_arn, message="my message") @@ -35,12 +37,14 @@ def test_publish_to_sqs_in_different_region(): conn = boto.sns.connect_to_region("us-west-1") conn.create_topic("some-topic") topics_json = conn.get_all_topics() - topic_arn = topics_json["ListTopicsResponse"]["ListTopicsResult"]["Topics"][0]['TopicArn'] + topic_arn = topics_json["ListTopicsResponse"][ + "ListTopicsResult"]["Topics"][0]['TopicArn'] sqs_conn = boto.sqs.connect_to_region("us-west-2") sqs_conn.create_queue("test-queue") - conn.subscribe(topic_arn, "sqs", "arn:aws:sqs:us-west-2:123456789012:test-queue") + conn.subscribe(topic_arn, "sqs", + "arn:aws:sqs:us-west-2:123456789012:test-queue") conn.publish(topic=topic_arn, message="my message") @@ -61,9 +65,11 @@ def test_publish_to_http(): conn = boto.connect_sns() conn.create_topic("some-topic") topics_json = conn.get_all_topics() - topic_arn = topics_json["ListTopicsResponse"]["ListTopicsResult"]["Topics"][0]['TopicArn'] + topic_arn = topics_json["ListTopicsResponse"][ + "ListTopicsResult"]["Topics"][0]['TopicArn'] conn.subscribe(topic_arn, "http", "http://example.com/foobar") - response = conn.publish(topic=topic_arn, message="my message", subject="my subject") + response = conn.publish( + topic=topic_arn, message="my message", subject="my subject") message_id = response['PublishResponse']['PublishResult']['MessageId'] diff --git a/tests/test_sns/test_publishing_boto3.py b/tests/test_sns/test_publishing_boto3.py index edf2948fb..cda9fed60 100644 --- a/tests/test_sns/test_publishing_boto3.py +++ b/tests/test_sns/test_publishing_boto3.py @@ -70,5 +70,6 @@ def test_publish_to_http(): Protocol="http", Endpoint="http://example.com/foobar") - response = conn.publish(TopicArn=topic_arn, Message="my message", Subject="my subject") + response = conn.publish( + TopicArn=topic_arn, Message="my message", Subject="my subject") message_id = response['MessageId'] diff --git a/tests/test_sns/test_server.py b/tests/test_sns/test_server.py index 422763dac..ce505278f 100644 --- a/tests/test_sns/test_server.py +++ b/tests/test_sns/test_server.py @@ -15,8 +15,10 @@ def test_sns_server_get(): topic_data = test_client.action_data("CreateTopic", Name="test topic") topic_data.should.contain("CreateTopicResult") - topic_data.should.contain("arn:aws:sns:us-east-1:123456789012:test topic") + topic_data.should.contain( + "arn:aws:sns:us-east-1:123456789012:test topic") topics_data = test_client.action_data("ListTopics") topics_data.should.contain("ListTopicsResult") - topic_data.should.contain("arn:aws:sns:us-east-1:123456789012:test topic") + topic_data.should.contain( + "arn:aws:sns:us-east-1:123456789012:test topic") diff --git a/tests/test_sns/test_subscriptions.py b/tests/test_sns/test_subscriptions.py index e141c503a..c521bb428 100644 --- a/tests/test_sns/test_subscriptions.py +++ b/tests/test_sns/test_subscriptions.py @@ -12,11 +12,13 @@ def test_creating_subscription(): conn = boto.connect_sns() conn.create_topic("some-topic") topics_json = conn.get_all_topics() - topic_arn = topics_json["ListTopicsResponse"]["ListTopicsResult"]["Topics"][0]['TopicArn'] + topic_arn = topics_json["ListTopicsResponse"][ + "ListTopicsResult"]["Topics"][0]['TopicArn'] conn.subscribe(topic_arn, "http", "http://example.com/") - subscriptions = conn.get_all_subscriptions()["ListSubscriptionsResponse"]["ListSubscriptionsResult"]["Subscriptions"] + subscriptions = conn.get_all_subscriptions()["ListSubscriptionsResponse"][ + "ListSubscriptionsResult"]["Subscriptions"] subscriptions.should.have.length_of(1) subscription = subscriptions[0] subscription["TopicArn"].should.equal(topic_arn) @@ -28,7 +30,8 @@ def test_creating_subscription(): conn.unsubscribe(subscription["SubscriptionArn"]) # And there should be zero subscriptions left - subscriptions = conn.get_all_subscriptions()["ListSubscriptionsResponse"]["ListSubscriptionsResult"]["Subscriptions"] + subscriptions = conn.get_all_subscriptions()["ListSubscriptionsResponse"][ + "ListSubscriptionsResult"]["Subscriptions"] subscriptions.should.have.length_of(0) @@ -46,7 +49,8 @@ def test_getting_subscriptions_by_topic(): conn.subscribe(topic1_arn, "http", "http://example1.com/") conn.subscribe(topic2_arn, "http", "http://example2.com/") - topic1_subscriptions = conn.get_all_subscriptions_by_topic(topic1_arn)["ListSubscriptionsByTopicResponse"]["ListSubscriptionsByTopicResult"]["Subscriptions"] + topic1_subscriptions = conn.get_all_subscriptions_by_topic(topic1_arn)[ + "ListSubscriptionsByTopicResponse"]["ListSubscriptionsByTopicResult"]["Subscriptions"] topic1_subscriptions.should.have.length_of(1) topic1_subscriptions[0]['Endpoint'].should.equal("http://example1.com/") @@ -63,25 +67,36 @@ def test_subscription_paging(): topic2_arn = topics[1]['TopicArn'] for index in range(DEFAULT_PAGE_SIZE + int(DEFAULT_PAGE_SIZE / 3)): - conn.subscribe(topic1_arn, 'email', 'email_' + str(index) + '@test.com') - conn.subscribe(topic2_arn, 'email', 'email_' + str(index) + '@test.com') + conn.subscribe(topic1_arn, 'email', 'email_' + + str(index) + '@test.com') + conn.subscribe(topic2_arn, 'email', 'email_' + + str(index) + '@test.com') all_subscriptions = conn.get_all_subscriptions() - all_subscriptions["ListSubscriptionsResponse"]["ListSubscriptionsResult"]["Subscriptions"].should.have.length_of(DEFAULT_PAGE_SIZE) - next_token = all_subscriptions["ListSubscriptionsResponse"]["ListSubscriptionsResult"]["NextToken"] + all_subscriptions["ListSubscriptionsResponse"]["ListSubscriptionsResult"][ + "Subscriptions"].should.have.length_of(DEFAULT_PAGE_SIZE) + next_token = all_subscriptions["ListSubscriptionsResponse"][ + "ListSubscriptionsResult"]["NextToken"] next_token.should.equal(DEFAULT_PAGE_SIZE) all_subscriptions = conn.get_all_subscriptions(next_token=next_token * 2) - all_subscriptions["ListSubscriptionsResponse"]["ListSubscriptionsResult"]["Subscriptions"].should.have.length_of(int(DEFAULT_PAGE_SIZE * 2 / 3)) - next_token = all_subscriptions["ListSubscriptionsResponse"]["ListSubscriptionsResult"]["NextToken"] + all_subscriptions["ListSubscriptionsResponse"]["ListSubscriptionsResult"][ + "Subscriptions"].should.have.length_of(int(DEFAULT_PAGE_SIZE * 2 / 3)) + next_token = all_subscriptions["ListSubscriptionsResponse"][ + "ListSubscriptionsResult"]["NextToken"] next_token.should.equal(None) topic1_subscriptions = conn.get_all_subscriptions_by_topic(topic1_arn) - topic1_subscriptions["ListSubscriptionsByTopicResponse"]["ListSubscriptionsByTopicResult"]["Subscriptions"].should.have.length_of(DEFAULT_PAGE_SIZE) - next_token = topic1_subscriptions["ListSubscriptionsByTopicResponse"]["ListSubscriptionsByTopicResult"]["NextToken"] + topic1_subscriptions["ListSubscriptionsByTopicResponse"]["ListSubscriptionsByTopicResult"][ + "Subscriptions"].should.have.length_of(DEFAULT_PAGE_SIZE) + next_token = topic1_subscriptions["ListSubscriptionsByTopicResponse"][ + "ListSubscriptionsByTopicResult"]["NextToken"] next_token.should.equal(DEFAULT_PAGE_SIZE) - topic1_subscriptions = conn.get_all_subscriptions_by_topic(topic1_arn, next_token=next_token) - topic1_subscriptions["ListSubscriptionsByTopicResponse"]["ListSubscriptionsByTopicResult"]["Subscriptions"].should.have.length_of(int(DEFAULT_PAGE_SIZE / 3)) - next_token = topic1_subscriptions["ListSubscriptionsByTopicResponse"]["ListSubscriptionsByTopicResult"]["NextToken"] + topic1_subscriptions = conn.get_all_subscriptions_by_topic( + topic1_arn, next_token=next_token) + topic1_subscriptions["ListSubscriptionsByTopicResponse"]["ListSubscriptionsByTopicResult"][ + "Subscriptions"].should.have.length_of(int(DEFAULT_PAGE_SIZE / 3)) + next_token = topic1_subscriptions["ListSubscriptionsByTopicResponse"][ + "ListSubscriptionsByTopicResult"]["NextToken"] next_token.should.equal(None) diff --git a/tests/test_sns/test_subscriptions_boto3.py b/tests/test_sns/test_subscriptions_boto3.py index b884ca54d..906c483f7 100644 --- a/tests/test_sns/test_subscriptions_boto3.py +++ b/tests/test_sns/test_subscriptions_boto3.py @@ -52,7 +52,8 @@ def test_getting_subscriptions_by_topic(): Protocol="http", Endpoint="http://example2.com/") - topic1_subscriptions = conn.list_subscriptions_by_topic(TopicArn=topic1_arn)["Subscriptions"] + topic1_subscriptions = conn.list_subscriptions_by_topic(TopicArn=topic1_arn)[ + "Subscriptions"] topic1_subscriptions.should.have.length_of(1) topic1_subscriptions[0]['Endpoint'].should.equal("http://example1.com/") @@ -77,14 +78,19 @@ def test_subscription_paging(): next_token.should.equal(str(DEFAULT_PAGE_SIZE)) all_subscriptions = conn.list_subscriptions(NextToken=next_token) - all_subscriptions["Subscriptions"].should.have.length_of(int(DEFAULT_PAGE_SIZE / 3)) + all_subscriptions["Subscriptions"].should.have.length_of( + int(DEFAULT_PAGE_SIZE / 3)) all_subscriptions.shouldnt.have("NextToken") - topic1_subscriptions = conn.list_subscriptions_by_topic(TopicArn=topic1_arn) - topic1_subscriptions["Subscriptions"].should.have.length_of(DEFAULT_PAGE_SIZE) + topic1_subscriptions = conn.list_subscriptions_by_topic( + TopicArn=topic1_arn) + topic1_subscriptions["Subscriptions"].should.have.length_of( + DEFAULT_PAGE_SIZE) next_token = topic1_subscriptions["NextToken"] next_token.should.equal(str(DEFAULT_PAGE_SIZE)) - topic1_subscriptions = conn.list_subscriptions_by_topic(TopicArn=topic1_arn, NextToken=next_token) - topic1_subscriptions["Subscriptions"].should.have.length_of(int(DEFAULT_PAGE_SIZE / 3)) + topic1_subscriptions = conn.list_subscriptions_by_topic( + TopicArn=topic1_arn, NextToken=next_token) + topic1_subscriptions["Subscriptions"].should.have.length_of( + int(DEFAULT_PAGE_SIZE / 3)) topic1_subscriptions.shouldnt.have("NextToken") diff --git a/tests/test_sns/test_topics.py b/tests/test_sns/test_topics.py index ab2f06382..79b85f709 100644 --- a/tests/test_sns/test_topics.py +++ b/tests/test_sns/test_topics.py @@ -34,7 +34,8 @@ def test_create_and_delete_topic(): @mock_sns_deprecated def test_get_missing_topic(): conn = boto.connect_sns() - conn.get_topic_attributes.when.called_with("a-fake-arn").should.throw(BotoServerError) + conn.get_topic_attributes.when.called_with( + "a-fake-arn").should.throw(BotoServerError) @mock_sns_deprecated @@ -42,7 +43,9 @@ def test_create_topic_in_multiple_regions(): for region in ['us-west-1', 'us-west-2']: conn = boto.sns.connect_to_region(region) conn.create_topic("some-topic") - list(conn.get_all_topics()["ListTopicsResponse"]["ListTopicsResult"]["Topics"]).should.have.length_of(1) + list(conn.get_all_topics()["ListTopicsResponse"][ + "ListTopicsResult"]["Topics"]).should.have.length_of(1) + @mock_sns_deprecated def test_topic_corresponds_to_region(): @@ -50,8 +53,11 @@ def test_topic_corresponds_to_region(): conn = boto.sns.connect_to_region(region) conn.create_topic("some-topic") topics_json = conn.get_all_topics() - topic_arn = topics_json["ListTopicsResponse"]["ListTopicsResult"]["Topics"][0]['TopicArn'] - topic_arn.should.equal("arn:aws:sns:{0}:123456789012:some-topic".format(region)) + topic_arn = topics_json["ListTopicsResponse"][ + "ListTopicsResult"]["Topics"][0]['TopicArn'] + topic_arn.should.equal( + "arn:aws:sns:{0}:123456789012:some-topic".format(region)) + @mock_sns_deprecated def test_topic_attributes(): @@ -59,9 +65,11 @@ def test_topic_attributes(): conn.create_topic("some-topic") topics_json = conn.get_all_topics() - topic_arn = topics_json["ListTopicsResponse"]["ListTopicsResult"]["Topics"][0]['TopicArn'] + topic_arn = topics_json["ListTopicsResponse"][ + "ListTopicsResult"]["Topics"][0]['TopicArn'] - attributes = conn.get_topic_attributes(topic_arn)['GetTopicAttributesResponse']['GetTopicAttributesResult']['Attributes'] + attributes = conn.get_topic_attributes(topic_arn)['GetTopicAttributesResponse'][ + 'GetTopicAttributesResult']['Attributes'] attributes["TopicArn"].should.equal( "arn:aws:sns:{0}:123456789012:some-topic" .format(conn.region.name) @@ -73,7 +81,8 @@ def test_topic_attributes(): attributes["SubscriptionsConfirmed"].should.equal(0) attributes["SubscriptionsDeleted"].should.equal(0) attributes["DeliveryPolicy"].should.equal("") - attributes["EffectiveDeliveryPolicy"].should.equal(DEFAULT_EFFECTIVE_DELIVERY_POLICY) + attributes["EffectiveDeliveryPolicy"].should.equal( + DEFAULT_EFFECTIVE_DELIVERY_POLICY) # boto can't handle prefix-mandatory strings: # i.e. unicode on Python 2 -- u"foobar" @@ -90,10 +99,13 @@ def test_topic_attributes(): conn.set_topic_attributes(topic_arn, "DisplayName", displayname) conn.set_topic_attributes(topic_arn, "DeliveryPolicy", delivery) - attributes = conn.get_topic_attributes(topic_arn)['GetTopicAttributesResponse']['GetTopicAttributesResult']['Attributes'] + attributes = conn.get_topic_attributes(topic_arn)['GetTopicAttributesResponse'][ + 'GetTopicAttributesResult']['Attributes'] attributes["Policy"].should.equal("{'foo': 'bar'}") attributes["DisplayName"].should.equal("My display name") - attributes["DeliveryPolicy"].should.equal("{'http': {'defaultHealthyRetryPolicy': {'numRetries': 5}}}") + attributes["DeliveryPolicy"].should.equal( + "{'http': {'defaultHealthyRetryPolicy': {'numRetries': 5}}}") + @mock_sns_deprecated def test_topic_paging(): @@ -102,15 +114,19 @@ def test_topic_paging(): conn.create_topic("some-topic_" + str(index)) topics_json = conn.get_all_topics() - topics_list = topics_json["ListTopicsResponse"]["ListTopicsResult"]["Topics"] - next_token = topics_json["ListTopicsResponse"]["ListTopicsResult"]["NextToken"] + topics_list = topics_json["ListTopicsResponse"][ + "ListTopicsResult"]["Topics"] + next_token = topics_json["ListTopicsResponse"][ + "ListTopicsResult"]["NextToken"] len(topics_list).should.equal(DEFAULT_PAGE_SIZE) next_token.should.equal(DEFAULT_PAGE_SIZE) topics_json = conn.get_all_topics(next_token=next_token) - topics_list = topics_json["ListTopicsResponse"]["ListTopicsResult"]["Topics"] - next_token = topics_json["ListTopicsResponse"]["ListTopicsResult"]["NextToken"] + topics_list = topics_json["ListTopicsResponse"][ + "ListTopicsResult"]["Topics"] + next_token = topics_json["ListTopicsResponse"][ + "ListTopicsResult"]["NextToken"] topics_list.should.have.length_of(int(DEFAULT_PAGE_SIZE / 2)) next_token.should.equal(None) diff --git a/tests/test_sns/test_topics_boto3.py b/tests/test_sns/test_topics_boto3.py index b757a3750..55d03afff 100644 --- a/tests/test_sns/test_topics_boto3.py +++ b/tests/test_sns/test_topics_boto3.py @@ -35,7 +35,8 @@ def test_create_and_delete_topic(): @mock_sns def test_get_missing_topic(): conn = boto3.client("sns", region_name="us-east-1") - conn.get_topic_attributes.when.called_with(TopicArn="a-fake-arn").should.throw(ClientError) + conn.get_topic_attributes.when.called_with( + TopicArn="a-fake-arn").should.throw(ClientError) @mock_sns @@ -53,7 +54,8 @@ def test_topic_corresponds_to_region(): conn.create_topic(Name="some-topic") topics_json = conn.list_topics() topic_arn = topics_json["Topics"][0]['TopicArn'] - topic_arn.should.equal("arn:aws:sns:{0}:123456789012:some-topic".format(region)) + topic_arn.should.equal( + "arn:aws:sns:{0}:123456789012:some-topic".format(region)) @mock_sns @@ -76,7 +78,8 @@ def test_topic_attributes(): attributes["SubscriptionsConfirmed"].should.equal('0') attributes["SubscriptionsDeleted"].should.equal('0') attributes["DeliveryPolicy"].should.equal("") - attributes["EffectiveDeliveryPolicy"].should.equal(DEFAULT_EFFECTIVE_DELIVERY_POLICY) + attributes["EffectiveDeliveryPolicy"].should.equal( + DEFAULT_EFFECTIVE_DELIVERY_POLICY) # boto can't handle prefix-mandatory strings: # i.e. unicode on Python 2 -- u"foobar" @@ -84,11 +87,13 @@ def test_topic_attributes(): if six.PY2: policy = json.dumps({b"foo": b"bar"}) displayname = b"My display name" - delivery = json.dumps({b"http": {b"defaultHealthyRetryPolicy": {b"numRetries": 5}}}) + delivery = json.dumps( + {b"http": {b"defaultHealthyRetryPolicy": {b"numRetries": 5}}}) else: policy = json.dumps({u"foo": u"bar"}) displayname = u"My display name" - delivery = json.dumps({u"http": {u"defaultHealthyRetryPolicy": {u"numRetries": 5}}}) + delivery = json.dumps( + {u"http": {u"defaultHealthyRetryPolicy": {u"numRetries": 5}}}) conn.set_topic_attributes(TopicArn=topic_arn, AttributeName="Policy", AttributeValue=policy) @@ -102,7 +107,8 @@ def test_topic_attributes(): attributes = conn.get_topic_attributes(TopicArn=topic_arn)['Attributes'] attributes["Policy"].should.equal('{"foo": "bar"}') attributes["DisplayName"].should.equal("My display name") - attributes["DeliveryPolicy"].should.equal('{"http": {"defaultHealthyRetryPolicy": {"numRetries": 5}}}') + attributes["DeliveryPolicy"].should.equal( + '{"http": {"defaultHealthyRetryPolicy": {"numRetries": 5}}}') @mock_sns diff --git a/tests/test_sqs/test_server.py b/tests/test_sqs/test_server.py index c7411193a..b7a43ab90 100644 --- a/tests/test_sqs/test_server.py +++ b/tests/test_sqs/test_server.py @@ -31,7 +31,8 @@ def test_sqs_list_identities(): res = test_client.get( '/123/testqueue?Action=ReceiveMessage&MaxNumberOfMessages=1') - message = re.search("(.*?)", res.data.decode('utf-8')).groups()[0] + message = re.search("(.*?)", + res.data.decode('utf-8')).groups()[0] message.should.equal('test-message') @@ -58,7 +59,8 @@ def test_messages_polling(): msg_res = test_client.get( '/123/testqueue?Action=ReceiveMessage&MaxNumberOfMessages=1&WaitTimeSeconds=5' ) - new_msgs = re.findall("(.*?)", msg_res.data.decode('utf-8')) + new_msgs = re.findall("(.*?)", + msg_res.data.decode('utf-8')) count += len(new_msgs) messages.append(new_msgs) @@ -71,5 +73,6 @@ def test_messages_polling(): get_messages_thread.join() insert_messages_thread.join() - # got each message in a separate call to ReceiveMessage, despite the long WaitTimeSeconds + # got each message in a separate call to ReceiveMessage, despite the long + # WaitTimeSeconds assert len(messages) == 5 diff --git a/tests/test_sqs/test_sqs.py b/tests/test_sqs/test_sqs.py index 89ea7413d..653963122 100644 --- a/tests/test_sqs/test_sqs.py +++ b/tests/test_sqs/test_sqs.py @@ -34,7 +34,8 @@ def test_create_queue(): @mock_sqs def test_get_inexistent_queue(): sqs = boto3.resource('sqs', region_name='us-east-1') - sqs.get_queue_by_name.when.called_with(QueueName='nonexisting-queue').should.throw(botocore.exceptions.ClientError) + sqs.get_queue_by_name.when.called_with( + QueueName='nonexisting-queue').should.throw(botocore.exceptions.ClientError) @mock_sqs @@ -43,8 +44,10 @@ def test_message_send(): queue = sqs.create_queue(QueueName="blah") msg = queue.send_message(MessageBody="derp") - msg.get('MD5OfMessageBody').should.equal('58fd9edd83341c29f1aebba81c31e257') - msg.get('ResponseMetadata', {}).get('RequestId').should.equal('27daac76-34dd-47df-bd01-1f6e873584a0') + msg.get('MD5OfMessageBody').should.equal( + '58fd9edd83341c29f1aebba81c31e257') + msg.get('ResponseMetadata', {}).get('RequestId').should.equal( + '27daac76-34dd-47df-bd01-1f6e873584a0') msg.get('MessageId').should_not.contain(' \n') messages = queue.receive_messages() @@ -73,7 +76,8 @@ def test_create_queues_in_multiple_region(): list(west1_conn.list_queues()['QueueUrls']).should.have.length_of(1) list(west2_conn.list_queues()['QueueUrls']).should.have.length_of(1) - west1_conn.list_queues()['QueueUrls'][0].should.equal('http://sqs.us-west-1.amazonaws.com/123456789012/blah') + west1_conn.list_queues()['QueueUrls'][0].should.equal( + 'http://sqs.us-west-1.amazonaws.com/123456789012/blah') @mock_sqs @@ -87,14 +91,16 @@ def test_get_queue_with_prefix(): queue = conn.list_queues(QueueNamePrefix="test-")['QueueUrls'] queue.should.have.length_of(1) - queue[0].should.equal("http://sqs.us-west-1.amazonaws.com/123456789012/test-queue") + queue[0].should.equal( + "http://sqs.us-west-1.amazonaws.com/123456789012/test-queue") @mock_sqs def test_delete_queue(): sqs = boto3.resource('sqs', region_name='us-east-1') conn = boto3.client("sqs", region_name='us-east-1') - conn.create_queue(QueueName="test-queue", Attributes={"VisibilityTimeout": "60"}) + conn.create_queue(QueueName="test-queue", + Attributes={"VisibilityTimeout": "60"}) queue = sqs.Queue('test-queue') conn.list_queues()['QueueUrls'].should.have.length_of(1) @@ -110,7 +116,8 @@ def test_delete_queue(): def test_set_queue_attribute(): sqs = boto3.resource('sqs', region_name='us-east-1') conn = boto3.client("sqs", region_name='us-east-1') - conn.create_queue(QueueName="test-queue", Attributes={"VisibilityTimeout": '60'}) + conn.create_queue(QueueName="test-queue", + Attributes={"VisibilityTimeout": '60'}) queue = sqs.Queue("test-queue") queue.attributes['VisibilityTimeout'].should.equal('60') @@ -133,7 +140,8 @@ def test_send_message(): response = queue.send_message(MessageBody=body_one) response = queue.send_message(MessageBody=body_two) - messages = conn.receive_message(QueueUrl=queue.url, MaxNumberOfMessages=2)['Messages'] + messages = conn.receive_message( + QueueUrl=queue.url, MaxNumberOfMessages=2)['Messages'] messages[0]['Body'].should.equal(body_one) messages[1]['Body'].should.equal(body_two) @@ -244,13 +252,15 @@ def test_receive_message_with_explicit_visibility_timeout(): queue.write(queue.new_message(body_one)) queue.count().should.equal(1) - messages = conn.receive_message(queue, number_messages=1, visibility_timeout=0) + messages = conn.receive_message( + queue, number_messages=1, visibility_timeout=0) assert len(messages) == 1 # Message should remain visible queue.count().should.equal(1) + @mock_sqs_deprecated def test_change_message_visibility(): conn = boto.connect_sqs('the_key', 'the_secret') @@ -381,7 +391,8 @@ def test_send_batch_operation_with_message_attributes(): queue = conn.create_queue("test-queue", visibility_timeout=60) queue.set_message_class(RawMessage) - message_tuple = ("my_first_message", 'test message 1', 0, {'name1': {'data_type': 'String', 'string_value': 'foo'}}) + message_tuple = ("my_first_message", 'test message 1', 0, { + 'name1': {'data_type': 'String', 'string_value': 'foo'}}) queue.write_batch([message_tuple]) messages = queue.get_messages() @@ -415,7 +426,8 @@ def test_queue_attributes(): queue_name = 'test-queue' visibility_timeout = 60 - queue = conn.create_queue(queue_name, visibility_timeout=visibility_timeout) + queue = conn.create_queue( + queue_name, visibility_timeout=visibility_timeout) attributes = queue.get_attributes() @@ -462,7 +474,8 @@ def test_change_message_visibility_on_invalid_receipt(): assert len(messages) == 1 - original_message.change_visibility.when.called_with(100).should.throw(SQSError) + original_message.change_visibility.when.called_with( + 100).should.throw(SQSError) @mock_sqs_deprecated @@ -485,7 +498,8 @@ def test_change_message_visibility_on_visible_message(): queue.count().should.equal(1) - original_message.change_visibility.when.called_with(100).should.throw(SQSError) + original_message.change_visibility.when.called_with( + 100).should.throw(SQSError) @mock_sqs_deprecated @@ -505,7 +519,8 @@ def test_purge_action(): def test_delete_message_after_visibility_timeout(): VISIBILITY_TIMEOUT = 1 conn = boto.sqs.connect_to_region("us-east-1") - new_queue = conn.create_queue('new-queue', visibility_timeout=VISIBILITY_TIMEOUT) + new_queue = conn.create_queue( + 'new-queue', visibility_timeout=VISIBILITY_TIMEOUT) m1 = Message() m1.set_body('Message 1!') diff --git a/tests/test_sts/test_sts.py b/tests/test_sts/test_sts.py index 19865ca77..4e0e52606 100644 --- a/tests/test_sts/test_sts.py +++ b/tests/test_sts/test_sts.py @@ -16,7 +16,8 @@ def test_get_session_token(): token = conn.get_session_token(duration=123) token.expiration.should.equal('2012-01-01T12:02:03.000Z') - token.session_token.should.equal("AQoEXAMPLEH4aoAH0gNCAPyJxz4BlCFFxWNE1OPTgk5TthT+FvwqnKwRcOIfrRh3c/LTo6UDdyJwOOvEVPvLXCrrrUtdnniCEXAMPLE/IvU1dYUg2RVAJBanLiHb4IgRmpRV3zrkuWJOgQs8IZZaIv2BXIa2R4OlgkBN9bkUDNCJiBeb/AXlzBBko7b15fjrBs2+cTQtpZ3CYWFXG8C5zqx37wnOE49mRl/+OtkIKGO7fAE") + token.session_token.should.equal( + "AQoEXAMPLEH4aoAH0gNCAPyJxz4BlCFFxWNE1OPTgk5TthT+FvwqnKwRcOIfrRh3c/LTo6UDdyJwOOvEVPvLXCrrrUtdnniCEXAMPLE/IvU1dYUg2RVAJBanLiHb4IgRmpRV3zrkuWJOgQs8IZZaIv2BXIa2R4OlgkBN9bkUDNCJiBeb/AXlzBBko7b15fjrBs2+cTQtpZ3CYWFXG8C5zqx37wnOE49mRl/+OtkIKGO7fAE") token.access_key.should.equal("AKIAIOSFODNN7EXAMPLE") token.secret_key.should.equal("wJalrXUtnFEMI/K7MDENG/bPxRfiCYzEXAMPLEKEY") @@ -28,10 +29,13 @@ def test_get_federation_token(): token = conn.get_federation_token(duration=123, name="Bob") token.credentials.expiration.should.equal('2012-01-01T12:02:03.000Z') - token.credentials.session_token.should.equal("AQoDYXdzEPT//////////wEXAMPLEtc764bNrC9SAPBSM22wDOk4x4HIZ8j4FZTwdQWLWsKWHGBuFqwAeMicRXmxfpSPfIeoIYRqTflfKD8YUuwthAx7mSEI/qkPpKPi/kMcGdQrmGdeehM4IC1NtBmUpp2wUE8phUZampKsburEDy0KPkyQDYwT7WZ0wq5VSXDvp75YU9HFvlRd8Tx6q6fE8YQcHNVXAkiY9q6d+xo0rKwT38xVqr7ZD0u0iPPkUL64lIZbqBAz+scqKmlzm8FDrypNC9Yjc8fPOLn9FX9KSYvKTr4rvx3iSIlTJabIQwj2ICCR/oLxBA==") + token.credentials.session_token.should.equal( + "AQoDYXdzEPT//////////wEXAMPLEtc764bNrC9SAPBSM22wDOk4x4HIZ8j4FZTwdQWLWsKWHGBuFqwAeMicRXmxfpSPfIeoIYRqTflfKD8YUuwthAx7mSEI/qkPpKPi/kMcGdQrmGdeehM4IC1NtBmUpp2wUE8phUZampKsburEDy0KPkyQDYwT7WZ0wq5VSXDvp75YU9HFvlRd8Tx6q6fE8YQcHNVXAkiY9q6d+xo0rKwT38xVqr7ZD0u0iPPkUL64lIZbqBAz+scqKmlzm8FDrypNC9Yjc8fPOLn9FX9KSYvKTr4rvx3iSIlTJabIQwj2ICCR/oLxBA==") token.credentials.access_key.should.equal("AKIAIOSFODNN7EXAMPLE") - token.credentials.secret_key.should.equal("wJalrXUtnFEMI/K7MDENG/bPxRfiCYzEXAMPLEKEY") - token.federated_user_arn.should.equal("arn:aws:sts::123456789012:federated-user/Bob") + token.credentials.secret_key.should.equal( + "wJalrXUtnFEMI/K7MDENG/bPxRfiCYzEXAMPLEKEY") + token.federated_user_arn.should.equal( + "arn:aws:sts::123456789012:federated-user/Bob") token.federated_user_id.should.equal("123456789012:Bob") @@ -55,20 +59,25 @@ def test_assume_role(): ] }) s3_role = "arn:aws:iam::123456789012:role/test-role" - role = conn.assume_role(s3_role, "session-name", policy, duration_seconds=123) + role = conn.assume_role(s3_role, "session-name", + policy, duration_seconds=123) credentials = role.credentials credentials.expiration.should.equal('2012-01-01T12:02:03.000Z') - credentials.session_token.should.equal("BQoEXAMPLEH4aoAH0gNCAPyJxz4BlCFFxWNE1OPTgk5TthT+FvwqnKwRcOIfrRh3c/LTo6UDdyJwOOvEVPvLXCrrrUtdnniCEXAMPLE/IvU1dYUg2RVAJBanLiHb4IgRmpRV3zrkuWJOgQs8IZZaIv2BXIa2R4OlgkBN9bkUDNCJiBeb/AXlzBBko7b15fjrBs2+cTQtpZ3CYWFXG8C5zqx37wnOE49mRl/+OtkIKGO7fAE") + credentials.session_token.should.equal( + "BQoEXAMPLEH4aoAH0gNCAPyJxz4BlCFFxWNE1OPTgk5TthT+FvwqnKwRcOIfrRh3c/LTo6UDdyJwOOvEVPvLXCrrrUtdnniCEXAMPLE/IvU1dYUg2RVAJBanLiHb4IgRmpRV3zrkuWJOgQs8IZZaIv2BXIa2R4OlgkBN9bkUDNCJiBeb/AXlzBBko7b15fjrBs2+cTQtpZ3CYWFXG8C5zqx37wnOE49mRl/+OtkIKGO7fAE") credentials.access_key.should.equal("AKIAIOSFODNN7EXAMPLE") - credentials.secret_key.should.equal("aJalrXUtnFEMI/K7MDENG/bPxRfiCYzEXAMPLEKEY") + credentials.secret_key.should.equal( + "aJalrXUtnFEMI/K7MDENG/bPxRfiCYzEXAMPLEKEY") role.user.arn.should.equal("arn:aws:iam::123456789012:role/test-role") role.user.assume_role_id.should.contain("session-name") + @mock_sts def test_get_caller_identity(): - identity = boto3.client("sts", region_name='us-east-1').get_caller_identity() + identity = boto3.client( + "sts", region_name='us-east-1').get_caller_identity() identity['Arn'].should.equal('arn:aws:sts::123456789012:user/moto') identity['UserId'].should.equal('AKIAIOSFODNN7EXAMPLE') diff --git a/tests/test_swf/models/test_activity_task.py b/tests/test_swf/models/test_activity_task.py index 0885c4b1e..5dddab975 100644 --- a/tests/test_swf/models/test_activity_task.py +++ b/tests/test_swf/models/test_activity_task.py @@ -147,6 +147,7 @@ def test_activity_task_cannot_change_state_on_closed_workflow_execution(): ) wfe.complete(123) - task.timeout.when.called_with(Timeout(task, 0, "foo")).should.throw(SWFWorkflowExecutionClosedError) + task.timeout.when.called_with(Timeout(task, 0, "foo")).should.throw( + SWFWorkflowExecutionClosedError) task.complete.when.called_with().should.throw(SWFWorkflowExecutionClosedError) task.fail.when.called_with().should.throw(SWFWorkflowExecutionClosedError) diff --git a/tests/test_swf/models/test_decision_task.py b/tests/test_swf/models/test_decision_task.py index fdb53d28a..b5e23eaca 100644 --- a/tests/test_swf/models/test_decision_task.py +++ b/tests/test_swf/models/test_decision_task.py @@ -75,5 +75,6 @@ def test_decision_task_cannot_change_state_on_closed_workflow_execution(): wfe.complete(123) - task.timeout.when.called_with(Timeout(task, 0, "foo")).should.throw(SWFWorkflowExecutionClosedError) + task.timeout.when.called_with(Timeout(task, 0, "foo")).should.throw( + SWFWorkflowExecutionClosedError) task.complete.when.called_with().should.throw(SWFWorkflowExecutionClosedError) diff --git a/tests/test_swf/models/test_domain.py b/tests/test_swf/models/test_domain.py index ce3ed0f13..57f66c830 100644 --- a/tests/test_swf/models/test_domain.py +++ b/tests/test_swf/models/test_domain.py @@ -15,7 +15,8 @@ WorkflowExecution = namedtuple( def test_domain_short_dict_representation(): domain = Domain("foo", "52") - domain.to_short_dict().should.equal({"name": "foo", "status": "REGISTERED"}) + domain.to_short_dict().should.equal( + {"name": "foo", "status": "REGISTERED"}) domain.description = "foo bar" domain.to_short_dict()["description"].should.equal("foo bar") @@ -67,16 +68,23 @@ def test_domain_decision_tasks(): def test_domain_get_workflow_execution(): domain = Domain("my-domain", "60") - wfe1 = WorkflowExecution(workflow_id="wf-id-1", run_id="run-id-1", execution_status="OPEN", open=True) - wfe2 = WorkflowExecution(workflow_id="wf-id-1", run_id="run-id-2", execution_status="CLOSED", open=False) - wfe3 = WorkflowExecution(workflow_id="wf-id-2", run_id="run-id-3", execution_status="OPEN", open=True) - wfe4 = WorkflowExecution(workflow_id="wf-id-3", run_id="run-id-4", execution_status="CLOSED", open=False) + wfe1 = WorkflowExecution( + workflow_id="wf-id-1", run_id="run-id-1", execution_status="OPEN", open=True) + wfe2 = WorkflowExecution( + workflow_id="wf-id-1", run_id="run-id-2", execution_status="CLOSED", open=False) + wfe3 = WorkflowExecution( + workflow_id="wf-id-2", run_id="run-id-3", execution_status="OPEN", open=True) + wfe4 = WorkflowExecution( + workflow_id="wf-id-3", run_id="run-id-4", execution_status="CLOSED", open=False) domain.workflow_executions = [wfe1, wfe2, wfe3, wfe4] # get workflow execution through workflow_id and run_id - domain.get_workflow_execution("wf-id-1", run_id="run-id-1").should.equal(wfe1) - domain.get_workflow_execution("wf-id-1", run_id="run-id-2").should.equal(wfe2) - domain.get_workflow_execution("wf-id-3", run_id="run-id-4").should.equal(wfe4) + domain.get_workflow_execution( + "wf-id-1", run_id="run-id-1").should.equal(wfe1) + domain.get_workflow_execution( + "wf-id-1", run_id="run-id-2").should.equal(wfe2) + domain.get_workflow_execution( + "wf-id-3", run_id="run-id-4").should.equal(wfe4) domain.get_workflow_execution.when.called_with( "wf-id-1", run_id="non-existent" @@ -98,7 +106,8 @@ def test_domain_get_workflow_execution(): ) # raise_if_closed attribute - domain.get_workflow_execution("wf-id-1", run_id="run-id-1", raise_if_closed=True).should.equal(wfe1) + domain.get_workflow_execution( + "wf-id-1", run_id="run-id-1", raise_if_closed=True).should.equal(wfe1) domain.get_workflow_execution.when.called_with( "wf-id-3", run_id="run-id-4", raise_if_closed=True ).should.throw( diff --git a/tests/test_swf/models/test_generic_type.py b/tests/test_swf/models/test_generic_type.py index 692c66a47..d7410f395 100644 --- a/tests/test_swf/models/test_generic_type.py +++ b/tests/test_swf/models/test_generic_type.py @@ -3,6 +3,7 @@ from moto.swf.models import GenericType # Tests for GenericType (ActivityType, WorkflowType) class FooType(GenericType): + @property def kind(self): return "foo" @@ -38,10 +39,12 @@ def test_type_full_dict_representation(): _type.to_full_dict()["configuration"].should.equal({}) _type.task_list = "foo" - _type.to_full_dict()["configuration"]["defaultTaskList"].should.equal({"name": "foo"}) + _type.to_full_dict()["configuration"][ + "defaultTaskList"].should.equal({"name": "foo"}) _type.just_an_example_timeout = "60" - _type.to_full_dict()["configuration"]["justAnExampleTimeout"].should.equal("60") + _type.to_full_dict()["configuration"][ + "justAnExampleTimeout"].should.equal("60") _type.non_whitelisted_property = "34" keys = _type.to_full_dict()["configuration"].keys() @@ -50,4 +53,5 @@ def test_type_full_dict_representation(): def test_type_string_representation(): _type = FooType("test-foo", "v1.0") - str(_type).should.equal("FooType(name: test-foo, version: v1.0, status: REGISTERED)") + str(_type).should.equal( + "FooType(name: test-foo, version: v1.0, status: REGISTERED)") diff --git a/tests/test_swf/models/test_workflow_execution.py b/tests/test_swf/models/test_workflow_execution.py index f6a69f8d7..45b91c86a 100644 --- a/tests/test_swf/models/test_workflow_execution.py +++ b/tests/test_swf/models/test_workflow_execution.py @@ -240,8 +240,10 @@ def test_workflow_execution_schedule_activity_task(): wfe.open_counts["openActivityTasks"].should.equal(1) last_event = wfe.events()[-1] last_event.event_type.should.equal("ActivityTaskScheduled") - last_event.event_attributes["decisionTaskCompletedEventId"].should.equal(123) - last_event.event_attributes["taskList"]["name"].should.equal("task-list-name") + last_event.event_attributes[ + "decisionTaskCompletedEventId"].should.equal(123) + last_event.event_attributes["taskList"][ + "name"].should.equal("task-list-name") wfe.activity_tasks.should.have.length_of(1) task = wfe.activity_tasks[0] @@ -288,43 +290,50 @@ def test_workflow_execution_schedule_activity_task_should_fail_if_wrong_attribut wfe.schedule_activity_task(123, hsh) last_event = wfe.events()[-1] last_event.event_type.should.equal("ScheduleActivityTaskFailed") - last_event.event_attributes["cause"].should.equal("ACTIVITY_TYPE_DOES_NOT_EXIST") + last_event.event_attributes["cause"].should.equal( + "ACTIVITY_TYPE_DOES_NOT_EXIST") hsh["activityType"]["name"] = "test-activity" wfe.schedule_activity_task(123, hsh) last_event = wfe.events()[-1] last_event.event_type.should.equal("ScheduleActivityTaskFailed") - last_event.event_attributes["cause"].should.equal("ACTIVITY_TYPE_DEPRECATED") + last_event.event_attributes["cause"].should.equal( + "ACTIVITY_TYPE_DEPRECATED") hsh["activityType"]["version"] = "v1.2" wfe.schedule_activity_task(123, hsh) last_event = wfe.events()[-1] last_event.event_type.should.equal("ScheduleActivityTaskFailed") - last_event.event_attributes["cause"].should.equal("DEFAULT_TASK_LIST_UNDEFINED") + last_event.event_attributes["cause"].should.equal( + "DEFAULT_TASK_LIST_UNDEFINED") hsh["taskList"] = {"name": "foobar"} wfe.schedule_activity_task(123, hsh) last_event = wfe.events()[-1] last_event.event_type.should.equal("ScheduleActivityTaskFailed") - last_event.event_attributes["cause"].should.equal("DEFAULT_SCHEDULE_TO_START_TIMEOUT_UNDEFINED") + last_event.event_attributes["cause"].should.equal( + "DEFAULT_SCHEDULE_TO_START_TIMEOUT_UNDEFINED") hsh["scheduleToStartTimeout"] = "600" wfe.schedule_activity_task(123, hsh) last_event = wfe.events()[-1] last_event.event_type.should.equal("ScheduleActivityTaskFailed") - last_event.event_attributes["cause"].should.equal("DEFAULT_SCHEDULE_TO_CLOSE_TIMEOUT_UNDEFINED") + last_event.event_attributes["cause"].should.equal( + "DEFAULT_SCHEDULE_TO_CLOSE_TIMEOUT_UNDEFINED") hsh["scheduleToCloseTimeout"] = "600" wfe.schedule_activity_task(123, hsh) last_event = wfe.events()[-1] last_event.event_type.should.equal("ScheduleActivityTaskFailed") - last_event.event_attributes["cause"].should.equal("DEFAULT_START_TO_CLOSE_TIMEOUT_UNDEFINED") + last_event.event_attributes["cause"].should.equal( + "DEFAULT_START_TO_CLOSE_TIMEOUT_UNDEFINED") hsh["startToCloseTimeout"] = "600" wfe.schedule_activity_task(123, hsh) last_event = wfe.events()[-1] last_event.event_type.should.equal("ScheduleActivityTaskFailed") - last_event.event_attributes["cause"].should.equal("DEFAULT_HEARTBEAT_TIMEOUT_UNDEFINED") + last_event.event_attributes["cause"].should.equal( + "DEFAULT_HEARTBEAT_TIMEOUT_UNDEFINED") wfe.open_counts["openActivityTasks"].should.equal(0) wfe.activity_tasks.should.have.length_of(0) @@ -393,7 +402,8 @@ def test_workflow_execution_schedule_activity_task_with_same_activity_id(): wfe.open_counts["openActivityTasks"].should.equal(1) last_event = wfe.events()[-1] last_event.event_type.should.equal("ScheduleActivityTaskFailed") - last_event.event_attributes["cause"].should.equal("ACTIVITY_ID_ALREADY_IN_USE") + last_event.event_attributes["cause"].should.equal( + "ACTIVITY_ID_ALREADY_IN_USE") def test_workflow_execution_start_activity_task(): @@ -456,7 +466,8 @@ def test_first_timeout(): wfe.first_timeout().should.be.a(Timeout) -# See moto/swf/models/workflow_execution.py "_process_timeouts()" for more details +# See moto/swf/models/workflow_execution.py "_process_timeouts()" for more +# details def test_timeouts_are_processed_in_order_and_reevaluated(): # Let's make a Workflow Execution with the following properties: # - execution start to close timeout of 8 mins diff --git a/tests/test_swf/responses/test_activity_tasks.py b/tests/test_swf/responses/test_activity_tasks.py index e6671e9e9..3511d4e56 100644 --- a/tests/test_swf/responses/test_activity_tasks.py +++ b/tests/test_swf/responses/test_activity_tasks.py @@ -11,15 +11,18 @@ from ..utils import setup_workflow, SCHEDULE_ACTIVITY_TASK_DECISION @mock_swf_deprecated def test_poll_for_activity_task_when_one(): conn = setup_workflow() - decision_token = conn.poll_for_decision_task("test-domain", "queue")["taskToken"] + decision_token = conn.poll_for_decision_task( + "test-domain", "queue")["taskToken"] conn.respond_decision_task_completed(decision_token, decisions=[ SCHEDULE_ACTIVITY_TASK_DECISION ]) - resp = conn.poll_for_activity_task("test-domain", "activity-task-list", identity="surprise") + resp = conn.poll_for_activity_task( + "test-domain", "activity-task-list", identity="surprise") resp["activityId"].should.equal("my-activity-001") resp["taskToken"].should_not.be.none - resp = conn.get_workflow_execution_history("test-domain", conn.run_id, "uid-abcd1234") + resp = conn.get_workflow_execution_history( + "test-domain", conn.run_id, "uid-abcd1234") resp["events"][-1]["eventType"].should.equal("ActivityTaskStarted") resp["events"][-1]["activityTaskStartedEventAttributes"].should.equal( {"identity": "surprise", "scheduledEventId": 5} @@ -44,12 +47,14 @@ def test_poll_for_activity_task_on_non_existent_queue(): @mock_swf_deprecated def test_count_pending_activity_tasks(): conn = setup_workflow() - decision_token = conn.poll_for_decision_task("test-domain", "queue")["taskToken"] + decision_token = conn.poll_for_decision_task( + "test-domain", "queue")["taskToken"] conn.respond_decision_task_completed(decision_token, decisions=[ SCHEDULE_ACTIVITY_TASK_DECISION ]) - resp = conn.count_pending_activity_tasks("test-domain", "activity-task-list") + resp = conn.count_pending_activity_tasks( + "test-domain", "activity-task-list") resp.should.equal({"count": 1, "truncated": False}) @@ -64,16 +69,20 @@ def test_count_pending_decision_tasks_on_non_existent_task_list(): @mock_swf_deprecated def test_respond_activity_task_completed(): conn = setup_workflow() - decision_token = conn.poll_for_decision_task("test-domain", "queue")["taskToken"] + decision_token = conn.poll_for_decision_task( + "test-domain", "queue")["taskToken"] conn.respond_decision_task_completed(decision_token, decisions=[ SCHEDULE_ACTIVITY_TASK_DECISION ]) - activity_token = conn.poll_for_activity_task("test-domain", "activity-task-list")["taskToken"] + activity_token = conn.poll_for_activity_task( + "test-domain", "activity-task-list")["taskToken"] - resp = conn.respond_activity_task_completed(activity_token, result="result of the task") + resp = conn.respond_activity_task_completed( + activity_token, result="result of the task") resp.should.be.none - resp = conn.get_workflow_execution_history("test-domain", conn.run_id, "uid-abcd1234") + resp = conn.get_workflow_execution_history( + "test-domain", conn.run_id, "uid-abcd1234") resp["events"][-2]["eventType"].should.equal("ActivityTaskCompleted") resp["events"][-2]["activityTaskCompletedEventAttributes"].should.equal( {"result": "result of the task", "scheduledEventId": 5, "startedEventId": 6} @@ -83,13 +92,16 @@ def test_respond_activity_task_completed(): @mock_swf_deprecated def test_respond_activity_task_completed_on_closed_workflow_execution(): conn = setup_workflow() - decision_token = conn.poll_for_decision_task("test-domain", "queue")["taskToken"] + decision_token = conn.poll_for_decision_task( + "test-domain", "queue")["taskToken"] conn.respond_decision_task_completed(decision_token, decisions=[ SCHEDULE_ACTIVITY_TASK_DECISION ]) - activity_token = conn.poll_for_activity_task("test-domain", "activity-task-list")["taskToken"] + activity_token = conn.poll_for_activity_task( + "test-domain", "activity-task-list")["taskToken"] - # bad: we're closing workflow execution manually, but endpoints are not coded for now.. + # bad: we're closing workflow execution manually, but endpoints are not + # coded for now.. wfe = swf_backend.domains[0].workflow_executions[-1] wfe.execution_status = "CLOSED" # /bad @@ -102,11 +114,13 @@ def test_respond_activity_task_completed_on_closed_workflow_execution(): @mock_swf_deprecated def test_respond_activity_task_completed_with_task_already_completed(): conn = setup_workflow() - decision_token = conn.poll_for_decision_task("test-domain", "queue")["taskToken"] + decision_token = conn.poll_for_decision_task( + "test-domain", "queue")["taskToken"] conn.respond_decision_task_completed(decision_token, decisions=[ SCHEDULE_ACTIVITY_TASK_DECISION ]) - activity_token = conn.poll_for_activity_task("test-domain", "activity-task-list")["taskToken"] + activity_token = conn.poll_for_activity_task( + "test-domain", "activity-task-list")["taskToken"] conn.respond_activity_task_completed(activity_token) @@ -119,18 +133,21 @@ def test_respond_activity_task_completed_with_task_already_completed(): @mock_swf_deprecated def test_respond_activity_task_failed(): conn = setup_workflow() - decision_token = conn.poll_for_decision_task("test-domain", "queue")["taskToken"] + decision_token = conn.poll_for_decision_task( + "test-domain", "queue")["taskToken"] conn.respond_decision_task_completed(decision_token, decisions=[ SCHEDULE_ACTIVITY_TASK_DECISION ]) - activity_token = conn.poll_for_activity_task("test-domain", "activity-task-list")["taskToken"] + activity_token = conn.poll_for_activity_task( + "test-domain", "activity-task-list")["taskToken"] resp = conn.respond_activity_task_failed(activity_token, reason="short reason", details="long details") resp.should.be.none - resp = conn.get_workflow_execution_history("test-domain", conn.run_id, "uid-abcd1234") + resp = conn.get_workflow_execution_history( + "test-domain", conn.run_id, "uid-abcd1234") resp["events"][-2]["eventType"].should.equal("ActivityTaskFailed") resp["events"][-2]["activityTaskFailedEventAttributes"].should.equal( {"reason": "short reason", "details": "long details", @@ -144,7 +161,8 @@ def test_respond_activity_task_completed_with_wrong_token(): # because the safeguards are shared with RespondActivityTaskCompleted, so # no need to retest everything end-to-end. conn = setup_workflow() - decision_token = conn.poll_for_decision_task("test-domain", "queue")["taskToken"] + decision_token = conn.poll_for_decision_task( + "test-domain", "queue")["taskToken"] conn.respond_decision_task_completed(decision_token, decisions=[ SCHEDULE_ACTIVITY_TASK_DECISION ]) @@ -158,11 +176,13 @@ def test_respond_activity_task_completed_with_wrong_token(): @mock_swf_deprecated def test_record_activity_task_heartbeat(): conn = setup_workflow() - decision_token = conn.poll_for_decision_task("test-domain", "queue")["taskToken"] + decision_token = conn.poll_for_decision_task( + "test-domain", "queue")["taskToken"] conn.respond_decision_task_completed(decision_token, decisions=[ SCHEDULE_ACTIVITY_TASK_DECISION ]) - activity_token = conn.poll_for_activity_task("test-domain", "activity-task-list")["taskToken"] + activity_token = conn.poll_for_activity_task( + "test-domain", "activity-task-list")["taskToken"] resp = conn.record_activity_task_heartbeat(activity_token) resp.should.equal({"cancelRequested": False}) @@ -171,11 +191,13 @@ def test_record_activity_task_heartbeat(): @mock_swf_deprecated def test_record_activity_task_heartbeat_with_wrong_token(): conn = setup_workflow() - decision_token = conn.poll_for_decision_task("test-domain", "queue")["taskToken"] + decision_token = conn.poll_for_decision_task( + "test-domain", "queue")["taskToken"] conn.respond_decision_task_completed(decision_token, decisions=[ SCHEDULE_ACTIVITY_TASK_DECISION ]) - conn.poll_for_activity_task("test-domain", "activity-task-list")["taskToken"] + conn.poll_for_activity_task( + "test-domain", "activity-task-list")["taskToken"] conn.record_activity_task_heartbeat.when.called_with( "bad-token", details="some progress details" @@ -185,17 +207,21 @@ def test_record_activity_task_heartbeat_with_wrong_token(): @mock_swf_deprecated def test_record_activity_task_heartbeat_sets_details_in_case_of_timeout(): conn = setup_workflow() - decision_token = conn.poll_for_decision_task("test-domain", "queue")["taskToken"] + decision_token = conn.poll_for_decision_task( + "test-domain", "queue")["taskToken"] conn.respond_decision_task_completed(decision_token, decisions=[ SCHEDULE_ACTIVITY_TASK_DECISION ]) with freeze_time("2015-01-01 12:00:00"): - activity_token = conn.poll_for_activity_task("test-domain", "activity-task-list")["taskToken"] - conn.record_activity_task_heartbeat(activity_token, details="some progress details") + activity_token = conn.poll_for_activity_task( + "test-domain", "activity-task-list")["taskToken"] + conn.record_activity_task_heartbeat( + activity_token, details="some progress details") with freeze_time("2015-01-01 12:05:30"): # => Activity Task Heartbeat timeout reached!! - resp = conn.get_workflow_execution_history("test-domain", conn.run_id, "uid-abcd1234") + resp = conn.get_workflow_execution_history( + "test-domain", conn.run_id, "uid-abcd1234") resp["events"][-2]["eventType"].should.equal("ActivityTaskTimedOut") attrs = resp["events"][-2]["activityTaskTimedOutEventAttributes"] attrs["details"].should.equal("some progress details") diff --git a/tests/test_swf/responses/test_activity_types.py b/tests/test_swf/responses/test_activity_types.py index 20c44dc5f..b283d3448 100644 --- a/tests/test_swf/responses/test_activity_types.py +++ b/tests/test_swf/responses/test_activity_types.py @@ -48,8 +48,10 @@ def test_list_activity_types(): conn.register_activity_type("test-domain", "c-test-activity", "v1.0") all_activity_types = conn.list_activity_types("test-domain", "REGISTERED") - names = [activity_type["activityType"]["name"] for activity_type in all_activity_types["typeInfos"]] - names.should.equal(["a-test-activity", "b-test-activity", "c-test-activity"]) + names = [activity_type["activityType"]["name"] + for activity_type in all_activity_types["typeInfos"]] + names.should.equal( + ["a-test-activity", "b-test-activity", "c-test-activity"]) @mock_swf_deprecated @@ -62,8 +64,10 @@ def test_list_activity_types_reverse_order(): all_activity_types = conn.list_activity_types("test-domain", "REGISTERED", reverse_order=True) - names = [activity_type["activityType"]["name"] for activity_type in all_activity_types["typeInfos"]] - names.should.equal(["c-test-activity", "b-test-activity", "a-test-activity"]) + names = [activity_type["activityType"]["name"] + for activity_type in all_activity_types["typeInfos"]] + names.should.equal( + ["c-test-activity", "b-test-activity", "a-test-activity"]) # DeprecateActivityType endpoint @@ -110,7 +114,8 @@ def test_describe_activity_type(): conn.register_activity_type("test-domain", "test-activity", "v1.0", task_list="foo", default_task_heartbeat_timeout="32") - actype = conn.describe_activity_type("test-domain", "test-activity", "v1.0") + actype = conn.describe_activity_type( + "test-domain", "test-activity", "v1.0") actype["configuration"]["defaultTaskList"]["name"].should.equal("foo") infos = actype["typeInfo"] infos["activityType"]["name"].should.equal("test-activity") diff --git a/tests/test_swf/responses/test_decision_tasks.py b/tests/test_swf/responses/test_decision_tasks.py index b552723cb..466e1a2ae 100644 --- a/tests/test_swf/responses/test_decision_tasks.py +++ b/tests/test_swf/responses/test_decision_tasks.py @@ -12,15 +12,19 @@ from ..utils import setup_workflow def test_poll_for_decision_task_when_one(): conn = setup_workflow() - resp = conn.get_workflow_execution_history("test-domain", conn.run_id, "uid-abcd1234") + resp = conn.get_workflow_execution_history( + "test-domain", conn.run_id, "uid-abcd1234") types = [evt["eventType"] for evt in resp["events"]] types.should.equal(["WorkflowExecutionStarted", "DecisionTaskScheduled"]) - resp = conn.poll_for_decision_task("test-domain", "queue", identity="srv01") + resp = conn.poll_for_decision_task( + "test-domain", "queue", identity="srv01") types = [evt["eventType"] for evt in resp["events"]] - types.should.equal(["WorkflowExecutionStarted", "DecisionTaskScheduled", "DecisionTaskStarted"]) + types.should.equal(["WorkflowExecutionStarted", + "DecisionTaskScheduled", "DecisionTaskStarted"]) - resp["events"][-1]["decisionTaskStartedEventAttributes"]["identity"].should.equal("srv01") + resp[ + "events"][-1]["decisionTaskStartedEventAttributes"]["identity"].should.equal("srv01") @mock_swf_deprecated @@ -44,9 +48,11 @@ def test_poll_for_decision_task_on_non_existent_queue(): @mock_swf_deprecated def test_poll_for_decision_task_with_reverse_order(): conn = setup_workflow() - resp = conn.poll_for_decision_task("test-domain", "queue", reverse_order=True) + resp = conn.poll_for_decision_task( + "test-domain", "queue", reverse_order=True) types = [evt["eventType"] for evt in resp["events"]] - types.should.equal(["DecisionTaskStarted", "DecisionTaskScheduled", "WorkflowExecutionStarted"]) + types.should.equal( + ["DecisionTaskStarted", "DecisionTaskScheduled", "WorkflowExecutionStarted"]) # CountPendingDecisionTasks endpoint @@ -89,7 +95,8 @@ def test_respond_decision_task_completed_with_no_decision(): ) resp.should.be.none - resp = conn.get_workflow_execution_history("test-domain", conn.run_id, "uid-abcd1234") + resp = conn.get_workflow_execution_history( + "test-domain", conn.run_id, "uid-abcd1234") types = [evt["eventType"] for evt in resp["events"]] types.should.equal([ "WorkflowExecutionStarted", @@ -104,7 +111,8 @@ def test_respond_decision_task_completed_with_no_decision(): "startedEventId": 3, }) - resp = conn.describe_workflow_execution("test-domain", conn.run_id, "uid-abcd1234") + resp = conn.describe_workflow_execution( + "test-domain", conn.run_id, "uid-abcd1234") resp["latestExecutionContext"].should.equal("free-form context") @@ -123,7 +131,8 @@ def test_respond_decision_task_completed_on_close_workflow_execution(): resp = conn.poll_for_decision_task("test-domain", "queue") task_token = resp["taskToken"] - # bad: we're closing workflow execution manually, but endpoints are not coded for now.. + # bad: we're closing workflow execution manually, but endpoints are not + # coded for now.. wfe = swf_backend.domains[0].workflow_executions[-1] wfe.execution_status = "CLOSED" # /bad @@ -155,10 +164,12 @@ def test_respond_decision_task_completed_with_complete_workflow_execution(): "decisionType": "CompleteWorkflowExecution", "completeWorkflowExecutionDecisionAttributes": {"result": "foo bar"} }] - resp = conn.respond_decision_task_completed(task_token, decisions=decisions) + resp = conn.respond_decision_task_completed( + task_token, decisions=decisions) resp.should.be.none - resp = conn.get_workflow_execution_history("test-domain", conn.run_id, "uid-abcd1234") + resp = conn.get_workflow_execution_history( + "test-domain", conn.run_id, "uid-abcd1234") types = [evt["eventType"] for evt in resp["events"]] types.should.equal([ "WorkflowExecutionStarted", @@ -167,7 +178,8 @@ def test_respond_decision_task_completed_with_complete_workflow_execution(): "DecisionTaskCompleted", "WorkflowExecutionCompleted", ]) - resp["events"][-1]["workflowExecutionCompletedEventAttributes"]["result"].should.equal("foo bar") + resp["events"][-1]["workflowExecutionCompletedEventAttributes"][ + "result"].should.equal("foo bar") @mock_swf_deprecated @@ -255,10 +267,12 @@ def test_respond_decision_task_completed_with_fail_workflow_execution(): "decisionType": "FailWorkflowExecution", "failWorkflowExecutionDecisionAttributes": {"reason": "my rules", "details": "foo"} }] - resp = conn.respond_decision_task_completed(task_token, decisions=decisions) + resp = conn.respond_decision_task_completed( + task_token, decisions=decisions) resp.should.be.none - resp = conn.get_workflow_execution_history("test-domain", conn.run_id, "uid-abcd1234") + resp = conn.get_workflow_execution_history( + "test-domain", conn.run_id, "uid-abcd1234") types = [evt["eventType"] for evt in resp["events"]] types.should.equal([ "WorkflowExecutionStarted", @@ -294,10 +308,12 @@ def test_respond_decision_task_completed_with_schedule_activity_task(): }, } }] - resp = conn.respond_decision_task_completed(task_token, decisions=decisions) + resp = conn.respond_decision_task_completed( + task_token, decisions=decisions) resp.should.be.none - resp = conn.get_workflow_execution_history("test-domain", conn.run_id, "uid-abcd1234") + resp = conn.get_workflow_execution_history( + "test-domain", conn.run_id, "uid-abcd1234") types = [evt["eventType"] for evt in resp["events"]] types.should.equal([ "WorkflowExecutionStarted", @@ -320,5 +336,6 @@ def test_respond_decision_task_completed_with_schedule_activity_task(): }, }) - resp = conn.describe_workflow_execution("test-domain", conn.run_id, "uid-abcd1234") + resp = conn.describe_workflow_execution( + "test-domain", conn.run_id, "uid-abcd1234") resp["latestActivityTaskTimestamp"].should.equal(1420113600.0) diff --git a/tests/test_swf/responses/test_domains.py b/tests/test_swf/responses/test_domains.py index 1f785095c..3fa12d665 100644 --- a/tests/test_swf/responses/test_domains.py +++ b/tests/test_swf/responses/test_domains.py @@ -102,7 +102,8 @@ def test_describe_domain(): conn.register_domain("test-domain", "60", description="A test domain") domain = conn.describe_domain("test-domain") - domain["configuration"]["workflowExecutionRetentionPeriodInDays"].should.equal("60") + domain["configuration"][ + "workflowExecutionRetentionPeriodInDays"].should.equal("60") domain["domainInfo"]["description"].should.equal("A test domain") domain["domainInfo"]["name"].should.equal("test-domain") domain["domainInfo"]["status"].should.equal("REGISTERED") diff --git a/tests/test_swf/responses/test_timeouts.py b/tests/test_swf/responses/test_timeouts.py index 726410e76..5bd0ead96 100644 --- a/tests/test_swf/responses/test_timeouts.py +++ b/tests/test_swf/responses/test_timeouts.py @@ -11,19 +11,23 @@ from ..utils import setup_workflow, SCHEDULE_ACTIVITY_TASK_DECISION def test_activity_task_heartbeat_timeout(): with freeze_time("2015-01-01 12:00:00"): conn = setup_workflow() - decision_token = conn.poll_for_decision_task("test-domain", "queue")["taskToken"] + decision_token = conn.poll_for_decision_task( + "test-domain", "queue")["taskToken"] conn.respond_decision_task_completed(decision_token, decisions=[ SCHEDULE_ACTIVITY_TASK_DECISION ]) - conn.poll_for_activity_task("test-domain", "activity-task-list", identity="surprise") + conn.poll_for_activity_task( + "test-domain", "activity-task-list", identity="surprise") with freeze_time("2015-01-01 12:04:30"): - resp = conn.get_workflow_execution_history("test-domain", conn.run_id, "uid-abcd1234") + resp = conn.get_workflow_execution_history( + "test-domain", conn.run_id, "uid-abcd1234") resp["events"][-1]["eventType"].should.equal("ActivityTaskStarted") with freeze_time("2015-01-01 12:05:30"): # => Activity Task Heartbeat timeout reached!! - resp = conn.get_workflow_execution_history("test-domain", conn.run_id, "uid-abcd1234") + resp = conn.get_workflow_execution_history( + "test-domain", conn.run_id, "uid-abcd1234") resp["events"][-2]["eventType"].should.equal("ActivityTaskTimedOut") attrs = resp["events"][-2]["activityTaskTimedOutEventAttributes"] @@ -44,7 +48,8 @@ def test_decision_task_start_to_close_timeout(): conn.poll_for_decision_task("test-domain", "queue")["taskToken"] with freeze_time("2015-01-01 12:04:30"): - resp = conn.get_workflow_execution_history("test-domain", conn.run_id, "uid-abcd1234") + resp = conn.get_workflow_execution_history( + "test-domain", conn.run_id, "uid-abcd1234") event_types = [evt["eventType"] for evt in resp["events"]] event_types.should.equal( @@ -53,7 +58,8 @@ def test_decision_task_start_to_close_timeout(): with freeze_time("2015-01-01 12:05:30"): # => Decision Task Start to Close timeout reached!! - resp = conn.get_workflow_execution_history("test-domain", conn.run_id, "uid-abcd1234") + resp = conn.get_workflow_execution_history( + "test-domain", conn.run_id, "uid-abcd1234") event_types = [evt["eventType"] for evt in resp["events"]] event_types.should.equal( @@ -77,7 +83,8 @@ def test_workflow_execution_start_to_close_timeout(): conn = setup_workflow() with freeze_time("2015-01-01 13:59:30"): - resp = conn.get_workflow_execution_history("test-domain", conn.run_id, "uid-abcd1234") + resp = conn.get_workflow_execution_history( + "test-domain", conn.run_id, "uid-abcd1234") event_types = [evt["eventType"] for evt in resp["events"]] event_types.should.equal( @@ -86,11 +93,13 @@ def test_workflow_execution_start_to_close_timeout(): with freeze_time("2015-01-01 14:00:30"): # => Workflow Execution Start to Close timeout reached!! - resp = conn.get_workflow_execution_history("test-domain", conn.run_id, "uid-abcd1234") + resp = conn.get_workflow_execution_history( + "test-domain", conn.run_id, "uid-abcd1234") event_types = [evt["eventType"] for evt in resp["events"]] event_types.should.equal( - ["WorkflowExecutionStarted", "DecisionTaskScheduled", "WorkflowExecutionTimedOut"] + ["WorkflowExecutionStarted", "DecisionTaskScheduled", + "WorkflowExecutionTimedOut"] ) attrs = resp["events"][-1]["workflowExecutionTimedOutEventAttributes"] attrs.should.equal({ diff --git a/tests/test_swf/responses/test_workflow_executions.py b/tests/test_swf/responses/test_workflow_executions.py index d5dc44a38..5c97c778b 100644 --- a/tests/test_swf/responses/test_workflow_executions.py +++ b/tests/test_swf/responses/test_workflow_executions.py @@ -30,14 +30,16 @@ def setup_swf_environment(): def test_start_workflow_execution(): conn = setup_swf_environment() - wf = conn.start_workflow_execution("test-domain", "uid-abcd1234", "test-workflow", "v1.0") + wf = conn.start_workflow_execution( + "test-domain", "uid-abcd1234", "test-workflow", "v1.0") wf.should.contain("runId") @mock_swf_deprecated def test_start_already_started_workflow_execution(): conn = setup_swf_environment() - conn.start_workflow_execution("test-domain", "uid-abcd1234", "test-workflow", "v1.0") + conn.start_workflow_execution( + "test-domain", "uid-abcd1234", "test-workflow", "v1.0") conn.start_workflow_execution.when.called_with( "test-domain", "uid-abcd1234", "test-workflow", "v1.0" @@ -58,11 +60,14 @@ def test_start_workflow_execution_on_deprecated_type(): @mock_swf_deprecated def test_describe_workflow_execution(): conn = setup_swf_environment() - hsh = conn.start_workflow_execution("test-domain", "uid-abcd1234", "test-workflow", "v1.0") + hsh = conn.start_workflow_execution( + "test-domain", "uid-abcd1234", "test-workflow", "v1.0") run_id = hsh["runId"] - wfe = conn.describe_workflow_execution("test-domain", run_id, "uid-abcd1234") - wfe["executionInfo"]["execution"]["workflowId"].should.equal("uid-abcd1234") + wfe = conn.describe_workflow_execution( + "test-domain", run_id, "uid-abcd1234") + wfe["executionInfo"]["execution"][ + "workflowId"].should.equal("uid-abcd1234") wfe["executionInfo"]["executionStatus"].should.equal("OPEN") @@ -79,10 +84,12 @@ def test_describe_non_existent_workflow_execution(): @mock_swf_deprecated def test_get_workflow_execution_history(): conn = setup_swf_environment() - hsh = conn.start_workflow_execution("test-domain", "uid-abcd1234", "test-workflow", "v1.0") + hsh = conn.start_workflow_execution( + "test-domain", "uid-abcd1234", "test-workflow", "v1.0") run_id = hsh["runId"] - resp = conn.get_workflow_execution_history("test-domain", run_id, "uid-abcd1234") + resp = conn.get_workflow_execution_history( + "test-domain", run_id, "uid-abcd1234") types = [evt["eventType"] for evt in resp["events"]] types.should.equal(["WorkflowExecutionStarted", "DecisionTaskScheduled"]) @@ -90,7 +97,8 @@ def test_get_workflow_execution_history(): @mock_swf_deprecated def test_get_workflow_execution_history_with_reverse_order(): conn = setup_swf_environment() - hsh = conn.start_workflow_execution("test-domain", "uid-abcd1234", "test-workflow", "v1.0") + hsh = conn.start_workflow_execution( + "test-domain", "uid-abcd1234", "test-workflow", "v1.0") run_id = hsh["runId"] resp = conn.get_workflow_execution_history("test-domain", run_id, "uid-abcd1234", @@ -191,7 +199,8 @@ def test_terminate_workflow_execution(): run_id=run_id) resp.should.be.none - resp = conn.get_workflow_execution_history("test-domain", run_id, "uid-abcd1234") + resp = conn.get_workflow_execution_history( + "test-domain", run_id, "uid-abcd1234") evt = resp["events"][-1] evt["eventType"].should.equal("WorkflowExecutionTerminated") attrs = evt["workflowExecutionTerminatedEventAttributes"] diff --git a/tests/test_swf/responses/test_workflow_types.py b/tests/test_swf/responses/test_workflow_types.py index 1e838c2ee..9e097a873 100644 --- a/tests/test_swf/responses/test_workflow_types.py +++ b/tests/test_swf/responses/test_workflow_types.py @@ -49,8 +49,10 @@ def test_list_workflow_types(): conn.register_workflow_type("test-domain", "c-test-workflow", "v1.0") all_workflow_types = conn.list_workflow_types("test-domain", "REGISTERED") - names = [activity_type["workflowType"]["name"] for activity_type in all_workflow_types["typeInfos"]] - names.should.equal(["a-test-workflow", "b-test-workflow", "c-test-workflow"]) + names = [activity_type["workflowType"]["name"] + for activity_type in all_workflow_types["typeInfos"]] + names.should.equal( + ["a-test-workflow", "b-test-workflow", "c-test-workflow"]) @mock_swf_deprecated @@ -63,8 +65,10 @@ def test_list_workflow_types_reverse_order(): all_workflow_types = conn.list_workflow_types("test-domain", "REGISTERED", reverse_order=True) - names = [activity_type["workflowType"]["name"] for activity_type in all_workflow_types["typeInfos"]] - names.should.equal(["c-test-workflow", "b-test-workflow", "a-test-workflow"]) + names = [activity_type["workflowType"]["name"] + for activity_type in all_workflow_types["typeInfos"]] + names.should.equal( + ["c-test-workflow", "b-test-workflow", "a-test-workflow"]) # DeprecateWorkflowType endpoint @@ -111,10 +115,12 @@ def test_describe_workflow_type(): conn.register_workflow_type("test-domain", "test-workflow", "v1.0", task_list="foo", default_child_policy="TERMINATE") - actype = conn.describe_workflow_type("test-domain", "test-workflow", "v1.0") + actype = conn.describe_workflow_type( + "test-domain", "test-workflow", "v1.0") actype["configuration"]["defaultTaskList"]["name"].should.equal("foo") actype["configuration"]["defaultChildPolicy"].should.equal("TERMINATE") - actype["configuration"].keys().should_not.contain("defaultTaskStartToCloseTimeout") + actype["configuration"].keys().should_not.contain( + "defaultTaskStartToCloseTimeout") infos = actype["typeInfo"] infos["workflowType"]["name"].should.equal("test-workflow") infos["workflowType"]["version"].should.equal("v1.0") diff --git a/tests/test_swf/utils.py b/tests/test_swf/utils.py index 756d17c27..2197b71df 100644 --- a/tests/test_swf/utils.py +++ b/tests/test_swf/utils.py @@ -29,7 +29,8 @@ SCHEDULE_ACTIVITY_TASK_DECISION = { } } for key, value in ACTIVITY_TASK_TIMEOUTS.items(): - SCHEDULE_ACTIVITY_TASK_DECISION["scheduleActivityTaskDecisionAttributes"][key] = value + SCHEDULE_ACTIVITY_TASK_DECISION[ + "scheduleActivityTaskDecisionAttributes"][key] = value # A test Domain @@ -86,7 +87,8 @@ def setup_workflow(): default_task_schedule_to_start_timeout="600", default_task_start_to_close_timeout="600", ) - wfe = conn.start_workflow_execution("test-domain", "uid-abcd1234", "test-workflow", "v1.0") + wfe = conn.start_workflow_execution( + "test-domain", "uid-abcd1234", "test-workflow", "v1.0") conn.run_id = wfe["runId"] return conn From 0dda687762d44f58fa643372cb79037bf0c122a5 Mon Sep 17 00:00:00 2001 From: Steve Pulec Date: Thu, 23 Feb 2017 21:41:05 -0500 Subject: [PATCH 058/213] Fix urlparse for py3. --- moto/compat.py | 5 ----- moto/emr/responses.py | 2 +- moto/instance_metadata/responses.py | 2 +- 3 files changed, 2 insertions(+), 7 deletions(-) diff --git a/moto/compat.py b/moto/compat.py index 2dd2d879e..a92a5f67b 100644 --- a/moto/compat.py +++ b/moto/compat.py @@ -3,8 +3,3 @@ try: except ImportError: # python 2.6 or earlier, use backport from ordereddict import OrderedDict # flake8: noqa - -try: - from urlparse import urlparse # flake8: noqa -except ImportError: - from urllib.parse import urlparse # flake8: noqa diff --git a/moto/emr/responses.py b/moto/emr/responses.py index 91dc8cc11..3919d8b3e 100644 --- a/moto/emr/responses.py +++ b/moto/emr/responses.py @@ -6,7 +6,7 @@ from functools import wraps import pytz -from moto.compat import urlparse +from six.moves.urllib.parse import urlparse from moto.core.responses import AWSServiceSpec from moto.core.responses import BaseResponse from moto.core.responses import xml_to_json_response diff --git a/moto/instance_metadata/responses.py b/moto/instance_metadata/responses.py index 2ea9aa9a8..460e65aca 100644 --- a/moto/instance_metadata/responses.py +++ b/moto/instance_metadata/responses.py @@ -1,7 +1,7 @@ from __future__ import unicode_literals import datetime import json -from urlparse import urlparse +from six.moves.urllib.parse import urlparse from moto.core.responses import BaseResponse From 3c0c4c29960cf290b346c6dbb11b348776f60373 Mon Sep 17 00:00:00 2001 From: Steve Pulec Date: Thu, 23 Feb 2017 22:28:09 -0500 Subject: [PATCH 059/213] Fix tests for py3. --- moto/awslambda/responses.py | 2 +- moto/backends.py | 4 ++-- moto/cloudformation/parsing.py | 6 ++---- moto/emr/responses.py | 2 +- moto/server.py | 4 ++-- tests/test_cloudformation/test_stack_parsing.py | 4 ++-- tests/test_ec2/test_spot_instances.py | 4 ++-- tests/test_emr/test_emr_boto3.py | 3 +-- tests/test_events/test_events.py | 5 ++--- tests/test_s3/test_s3.py | 2 +- 10 files changed, 16 insertions(+), 20 deletions(-) diff --git a/moto/awslambda/responses.py b/moto/awslambda/responses.py index b7664c314..d145f4760 100644 --- a/moto/awslambda/responses.py +++ b/moto/awslambda/responses.py @@ -57,7 +57,7 @@ class LambdaResponse(BaseResponse): def _create_function(self, request, full_url, headers): lambda_backend = self.get_lambda_backend(full_url) - spec = json.loads(self.body.decode('utf-8')) + spec = json.loads(self.body) try: fn = lambda_backend.create_function(spec) except ValueError as e: diff --git a/moto/backends.py b/moto/backends.py index 5b1695e3b..94c7f4849 100644 --- a/moto/backends.py +++ b/moto/backends.py @@ -62,10 +62,10 @@ BACKENDS = { } -def get_model(name, region): +def get_model(name, region_name): for backends in BACKENDS.values(): for region, backend in backends.items(): - if region == region: + if region == region_name: models = getattr(backend.__class__, '__models__', {}) if name in models: return list(getattr(backend, models[name])()) diff --git a/moto/cloudformation/parsing.py b/moto/cloudformation/parsing.py index f2ba08522..9dcbdae29 100644 --- a/moto/cloudformation/parsing.py +++ b/moto/cloudformation/parsing.py @@ -21,9 +21,8 @@ from moto.s3 import models as s3_models from moto.sns import models as sns_models from moto.sqs import models as sqs_models from .utils import random_suffix -from .exceptions import MissingParameterError, UnformattedGetAttTemplateException +from .exceptions import MissingParameterError, UnformattedGetAttTemplateException, ValidationError from boto.cloudformation.stack import Output -from boto.exception import BotoServerError MODEL_MAP = { "AWS::AutoScaling::AutoScalingGroup": autoscaling_models.FakeAutoScalingGroup, @@ -137,8 +136,7 @@ def clean_json(resource_json, resources_map): logger.warning(n.message.format( resource_json['Fn::GetAtt'][0])) except UnformattedGetAttTemplateException: - raise BotoServerError( - UnformattedGetAttTemplateException.status_code, + raise ValidationError( 'Bad Request', UnformattedGetAttTemplateException.description.format( resource_json['Fn::GetAtt'][0], resource_json['Fn::GetAtt'][1])) diff --git a/moto/emr/responses.py b/moto/emr/responses.py index 3919d8b3e..8442e4010 100644 --- a/moto/emr/responses.py +++ b/moto/emr/responses.py @@ -259,7 +259,7 @@ class ElasticMapReduceResponse(BaseResponse): 'Provided AMI: {0}, release label: {1}.').format( ami_version, release_label) raise EmrError(error_type="ValidationException", - message=message, template='single_error') + message=message, template='error_json') else: if ami_version: kwargs['requested_ami_version'] = ami_version diff --git a/moto/server.py b/moto/server.py index c7e7f18fb..fcc91ac6c 100644 --- a/moto/server.py +++ b/moto/server.py @@ -39,7 +39,7 @@ class DomainDispatcherApplication(object): return host for backend_name, backend in BACKENDS.items(): - for url_base in backend.values()[0].url_bases: + for url_base in list(backend.values())[0].url_bases: if re.match(url_base, 'http://%s' % host): return backend_name @@ -118,7 +118,7 @@ def create_backend_app(service): backend_app.view_functions = {} backend_app.url_map = Map() backend_app.url_map.converters['regex'] = RegexConverter - backend = BACKENDS[service].values()[0] + backend = list(BACKENDS[service].values())[0] for url_path, handler in backend.flask_paths.items(): if handler.__name__ == 'dispatch': endpoint = '{0}.dispatch'.format(handler.__self__.__name__) diff --git a/tests/test_cloudformation/test_stack_parsing.py b/tests/test_cloudformation/test_stack_parsing.py index be459eff1..c2af6363a 100644 --- a/tests/test_cloudformation/test_stack_parsing.py +++ b/tests/test_cloudformation/test_stack_parsing.py @@ -4,12 +4,12 @@ import json from mock import patch import sure # noqa +from moto.cloudformation.exceptions import ValidationError from moto.cloudformation.models import FakeStack from moto.cloudformation.parsing import resource_class_from_type, parse_condition from moto.sqs.models import Queue from moto.s3.models import FakeBucket from boto.cloudformation.stack import Output -from boto.exception import BotoServerError dummy_template = { "AWSTemplateFormatVersion": "2010-09-09", @@ -158,7 +158,7 @@ def test_parse_stack_with_get_attribute_outputs(): def test_parse_stack_with_bad_get_attribute_outputs(): FakeStack.when.called_with( - "test_id", "test_stack", bad_output_template_json, {}, "us-west-1").should.throw(BotoServerError) + "test_id", "test_stack", bad_output_template_json, {}, "us-west-1").should.throw(ValidationError) def test_parse_equals_condition(): diff --git a/tests/test_ec2/test_spot_instances.py b/tests/test_ec2/test_spot_instances.py index 5c3bdff12..05f8ee88f 100644 --- a/tests/test_ec2/test_spot_instances.py +++ b/tests/test_ec2/test_spot_instances.py @@ -39,7 +39,7 @@ def test_request_spot_instances(): "ImageId": 'ami-abcd1234', "KeyName": "test", "SecurityGroups": ['group1', 'group2'], - "UserData": b"some test data", + "UserData": "some test data", "InstanceType": 'm1.small', "Placement": { "AvailabilityZone": 'us-east-1c', @@ -67,7 +67,7 @@ def test_request_spot_instances(): "ImageId": 'ami-abcd1234', "KeyName": "test", "SecurityGroups": ['group1', 'group2'], - "UserData": b"some test data", + "UserData": "some test data", "InstanceType": 'm1.small', "Placement": { "AvailabilityZone": 'us-east-1c', diff --git a/tests/test_emr/test_emr_boto3.py b/tests/test_emr/test_emr_boto3.py index 4999935c5..b2877c7f5 100644 --- a/tests/test_emr/test_emr_boto3.py +++ b/tests/test_emr/test_emr_boto3.py @@ -347,8 +347,7 @@ def test_run_job_flow_with_invalid_params(): args['AmiVersion'] = '2.4' args['ReleaseLabel'] = 'emr-5.0.0' client.run_job_flow(**args) - ex.exception.response['Error'][ - 'Message'].should.contain('ValidationException') + ex.exception.response['Error']['Code'].should.equal('ValidationException') @mock_emr diff --git a/tests/test_events/test_events.py b/tests/test_events/test_events.py index a2d5a5d47..537b741f2 100644 --- a/tests/test_events/test_events.py +++ b/tests/test_events/test_events.py @@ -49,7 +49,6 @@ def get_random_rule(): return RULES[random.randint(0, len(RULES) - 1)] -@mock_events def generate_environment(): client = boto3.client('events', 'us-west-2') @@ -115,12 +114,12 @@ def test_list_rule_names_by_target(): client = generate_environment() rules = client.list_rule_names_by_target(TargetArn=test_1_target['Arn']) - assert(len(rules) == len(test_1_target['Rules'])) + assert(len(rules['RuleNames']) == len(test_1_target['Rules'])) for rule in rules['RuleNames']: assert(rule in test_1_target['Rules']) rules = client.list_rule_names_by_target(TargetArn=test_2_target['Arn']) - assert(len(rules) == len(test_2_target['Rules'])) + assert(len(rules['RuleNames']) == len(test_2_target['Rules'])) for rule in rules['RuleNames']: assert(rule in test_2_target['Rules']) diff --git a/tests/test_s3/test_s3.py b/tests/test_s3/test_s3.py index 32b772abe..36d4bdbc4 100644 --- a/tests/test_s3/test_s3.py +++ b/tests/test_s3/test_s3.py @@ -71,7 +71,7 @@ def test_my_model_save(): body = conn.Object('mybucket', 'steve').get()[ 'Body'].read().decode("utf-8") - assert body == b'is awesome' + assert body == 'is awesome' @mock_s3 From b73360c1873612ca1e59e708fc8e42ab6377254b Mon Sep 17 00:00:00 2001 From: Steve Pulec Date: Thu, 23 Feb 2017 22:34:43 -0500 Subject: [PATCH 060/213] Fix api gateway callback. --- moto/apigateway/models.py | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/moto/apigateway/models.py b/moto/apigateway/models.py index 6585d19f5..bfcfdbfa6 100644 --- a/moto/apigateway/models.py +++ b/moto/apigateway/models.py @@ -339,9 +339,11 @@ class RestAPI(object): return status_code, {}, response def update_integration_mocks(self, stage_name): - stage_url = STAGE_URL.format(api_id=self.id.upper(), + stage_url = STAGE_URL.format(api_id=self.id, region_name=self.region_name, stage_name=stage_name) - responses.add_callback(responses.GET, stage_url, + responses.add_callback(responses.GET, stage_url.upper(), + callback=self.resource_callback) + responses.add_callback(responses.GET, stage_url.lower(), callback=self.resource_callback) def create_stage(self, name, deployment_id, variables=None, description='', cacheClusterEnabled=None, cacheClusterSize=None): From 5324638573e18e51b895b39ec0d32ac2133a37d0 Mon Sep 17 00:00:00 2001 From: Steve Pulec Date: Sun, 26 Feb 2017 19:55:19 -0500 Subject: [PATCH 061/213] Add docs on contributing and code of conduct. --- CODE_OF_CONDUCT.md | 74 ++++++++++++++++++++++++++++++++++++++++++++++ CONTRIBUTING.md | 4 +++ ISSUE_TEMPLATE.md | 33 +++++++++++++++++++++ 3 files changed, 111 insertions(+) create mode 100644 CODE_OF_CONDUCT.md create mode 100644 CONTRIBUTING.md create mode 100644 ISSUE_TEMPLATE.md diff --git a/CODE_OF_CONDUCT.md b/CODE_OF_CONDUCT.md new file mode 100644 index 000000000..8f2d40361 --- /dev/null +++ b/CODE_OF_CONDUCT.md @@ -0,0 +1,74 @@ +# Contributor Covenant Code of Conduct + +## Our Pledge + +In the interest of fostering an open and welcoming environment, we as +contributors and maintainers pledge to making participation in our project and +our community a harassment-free experience for everyone, regardless of age, body +size, disability, ethnicity, gender identity and expression, level of experience, +nationality, personal appearance, race, religion, or sexual identity and +orientation. + +## Our Standards + +Examples of behavior that contributes to creating a positive environment +include: + +* Using welcoming and inclusive language +* Being respectful of differing viewpoints and experiences +* Gracefully accepting constructive criticism +* Focusing on what is best for the community +* Showing empathy towards other community members + +Examples of unacceptable behavior by participants include: + +* The use of sexualized language or imagery and unwelcome sexual attention or +advances +* Trolling, insulting/derogatory comments, and personal or political attacks +* Public or private harassment +* Publishing others' private information, such as a physical or electronic + address, without explicit permission +* Other conduct which could reasonably be considered inappropriate in a + professional setting + +## Our Responsibilities + +Project maintainers are responsible for clarifying the standards of acceptable +behavior and are expected to take appropriate and fair corrective action in +response to any instances of unacceptable behavior. + +Project maintainers have the right and responsibility to remove, edit, or +reject comments, commits, code, wiki edits, issues, and other contributions +that are not aligned to this Code of Conduct, or to ban temporarily or +permanently any contributor for other behaviors that they deem inappropriate, +threatening, offensive, or harmful. + +## Scope + +This Code of Conduct applies both within project spaces and in public spaces +when an individual is representing the project or its community. Examples of +representing a project or community include using an official project e-mail +address, posting via an official social media account, or acting as an appointed +representative at an online or offline event. Representation of a project may be +further defined and clarified by project maintainers. + +## Enforcement + +Instances of abusive, harassing, or otherwise unacceptable behavior may be +reported by contacting the project maintainer at spulec@gmail.com. All +complaints will be reviewed and investigated and will result in a response that +is deemed necessary and appropriate to the circumstances. The project team is +obligated to maintain confidentiality with regard to the reporter of an incident. +Further details of specific enforcement policies may be posted separately. + +Project maintainers who do not follow or enforce the Code of Conduct in good +faith may face temporary or permanent repercussions as determined by other +members of the project's leadership. + +## Attribution + +This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4, +available at [http://contributor-covenant.org/version/1/4][version] + +[homepage]: http://contributor-covenant.org +[version]: http://contributor-covenant.org/version/1/4/ \ No newline at end of file diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md new file mode 100644 index 000000000..1266d508e --- /dev/null +++ b/CONTRIBUTING.md @@ -0,0 +1,4 @@ +### Contributing code + +If you have improvements to Moto, send us your pull requests! For those +just getting started, Github has a [howto](https://help.github.com/articles/using-pull-requests/). diff --git a/ISSUE_TEMPLATE.md b/ISSUE_TEMPLATE.md new file mode 100644 index 000000000..c3d7d3f65 --- /dev/null +++ b/ISSUE_TEMPLATE.md @@ -0,0 +1,33 @@ +## Reporting Bugs + +Please be aware of the following things when filing bug reports: + +1. Avoid raising duplicate issues. *Please* use the GitHub issue search feature + to check whether your bug report or feature request has been mentioned in + the past. +2. When filing bug reports about exceptions or tracebacks, please include the + *complete* traceback. Partial tracebacks, or just the exception text, are + not helpful. +3. Make sure you provide a suitable amount of information to work with. This + means you should provide: + + - Guidance on **how to reproduce the issue**. Ideally, this should be a + *small* code sample that can be run immediately by the maintainers. + Failing that, let us know what you're doing, how often it happens, what + environment you're using, etc. Be thorough: it prevents us needing to ask + further questions. + - Tell us **what you expected to happen**. When we run your example code, + what are we expecting to happen? What does "success" look like for your + code? + - Tell us **what actually happens**. It's not helpful for you to say "it + doesn't work" or "it fails". Tell us *how* it fails: do you get an + exception? A hang? How was the actual result different from your expected + result? + - Tell us **what version of Moto you're using**, and + **how you installed it**. Tell us whether you're using standalone server + mode or the Python mocks. If you are using the Python mocks, include the + version of boto/boto3/botocore. + + + If you do not provide all of these things, it will take us much longer to + fix your problem. From e5bcafd22f297dd0b5af4b2b840e379819c009b5 Mon Sep 17 00:00:00 2001 From: Steve Pulec Date: Sun, 26 Feb 2017 23:40:54 -0500 Subject: [PATCH 062/213] Cleanup travis. --- .travis.yml | 10 +++------- 1 file changed, 3 insertions(+), 7 deletions(-) diff --git a/.travis.yml b/.travis.yml index 35506f2dc..9c867f237 100644 --- a/.travis.yml +++ b/.travis.yml @@ -3,15 +3,11 @@ sudo: false python: - 2.6 - 2.7 + - 3.3 env: - matrix: - - BOTO_VERSION=2.45.0 -matrix: - include: - - python: "3.3" - env: BOTO_VERSION=2.45.0 + - TEST_SERVER_MODE=false install: - - travis_retry pip install boto==$BOTO_VERSION + - travis_retry pip install boto==2.45.0 - travis_retry pip install boto3 - travis_retry pip install . - travis_retry pip install -r requirements-dev.txt From 089b2a66d259bbe0b5c162cf2bbdf1f9e64f71fb Mon Sep 17 00:00:00 2001 From: Steve Pulec Date: Sun, 26 Feb 2017 23:56:50 -0500 Subject: [PATCH 063/213] Add server-mode tests. --- .travis.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/.travis.yml b/.travis.yml index 9c867f237..8a2e8ce6f 100644 --- a/.travis.yml +++ b/.travis.yml @@ -6,6 +6,7 @@ python: - 3.3 env: - TEST_SERVER_MODE=false + - TEST_SERVER_MODE=true install: - travis_retry pip install boto==2.45.0 - travis_retry pip install boto3 From e841c0d2f5cb04b825097b7966c3a69c135670a0 Mon Sep 17 00:00:00 2001 From: Steve Pulec Date: Mon, 27 Feb 2017 00:02:23 -0500 Subject: [PATCH 064/213] Need to run moto_server... --- .travis.yml | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/.travis.yml b/.travis.yml index 8a2e8ce6f..bf1bece4b 100644 --- a/.travis.yml +++ b/.travis.yml @@ -13,6 +13,10 @@ install: - travis_retry pip install . - travis_retry pip install -r requirements-dev.txt - travis_retry pip install coveralls + - | + if [ "$TEST_SERVER_MODE" = "true" ]; then + moto_server -p 8086& + fi script: - make test after_success: From b63618b97589f2f865142cbdc84ca12aa9fbb6b1 Mon Sep 17 00:00:00 2001 From: Steve Pulec Date: Mon, 27 Feb 2017 00:17:01 -0500 Subject: [PATCH 065/213] Add keys for server mode. --- .travis.yml | 2 ++ 1 file changed, 2 insertions(+) diff --git a/.travis.yml b/.travis.yml index bf1bece4b..85180bcab 100644 --- a/.travis.yml +++ b/.travis.yml @@ -16,6 +16,8 @@ install: - | if [ "$TEST_SERVER_MODE" = "true" ]; then moto_server -p 8086& + export AWS_SECRET_ACCESS_KEY=foobar_secret + export AWS_ACCESS_KEY_ID=foobar_key fi script: - make test From 5a56b3a049c648a9720ce742bc18a021eb0e3411 Mon Sep 17 00:00:00 2001 From: Steve Pulec Date: Mon, 27 Feb 2017 08:54:33 -0500 Subject: [PATCH 066/213] Set credentials for server too. --- .travis.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.travis.yml b/.travis.yml index 85180bcab..d0c8ce45b 100644 --- a/.travis.yml +++ b/.travis.yml @@ -15,7 +15,7 @@ install: - travis_retry pip install coveralls - | if [ "$TEST_SERVER_MODE" = "true" ]; then - moto_server -p 8086& + AWS_SECRET_ACCESS_KEY=server_secret AWS_ACCESS_KEY_ID=server_key moto_server -p 8086& export AWS_SECRET_ACCESS_KEY=foobar_secret export AWS_ACCESS_KEY_ID=foobar_key fi From a22caf27ab698eb13da0f030dc67b108cd9b47ef Mon Sep 17 00:00:00 2001 From: Steve Pulec Date: Mon, 27 Feb 2017 10:20:53 -0500 Subject: [PATCH 067/213] Cleanup sns default topic. --- moto/sns/models.py | 6 +++--- tests/test_sns/test_topics.py | 3 ++- tests/test_sns/test_topics_boto3.py | 2 +- 3 files changed, 6 insertions(+), 5 deletions(-) diff --git a/moto/sns/models.py b/moto/sns/models.py index 0ad00928d..4fa72a7d0 100644 --- a/moto/sns/models.py +++ b/moto/sns/models.py @@ -26,7 +26,7 @@ class Topic(object): self.sns_backend = sns_backend self.account_id = DEFAULT_ACCOUNT_ID self.display_name = "" - self.policy = DEFAULT_TOPIC_POLICY + self.policy = json.dumps(DEFAULT_TOPIC_POLICY) self.delivery_policy = "" self.effective_delivery_policy = DEFAULT_EFFECTIVE_DELIVERY_POLICY self.arn = make_arn_for_topic( @@ -288,7 +288,7 @@ for region in boto.sns.regions(): sns_backends[region.name] = SNSBackend(region.name) -DEFAULT_TOPIC_POLICY = json.dumps({ +DEFAULT_TOPIC_POLICY = { "Version": "2008-10-17", "Id": "us-east-1/698519295917/test__default_policy_ID", "Statement": [{ @@ -315,7 +315,7 @@ DEFAULT_TOPIC_POLICY = json.dumps({ } } }] -}) +} DEFAULT_EFFECTIVE_DELIVERY_POLICY = json.dumps({ 'http': { diff --git a/tests/test_sns/test_topics.py b/tests/test_sns/test_topics.py index 79b85f709..cbb4849c8 100644 --- a/tests/test_sns/test_topics.py +++ b/tests/test_sns/test_topics.py @@ -1,5 +1,6 @@ from __future__ import unicode_literals import boto +import json import six import sure # noqa @@ -75,7 +76,7 @@ def test_topic_attributes(): .format(conn.region.name) ) attributes["Owner"].should.equal(123456789012) - attributes["Policy"].should.equal(DEFAULT_TOPIC_POLICY) + json.loads(attributes["Policy"]).should.equal(DEFAULT_TOPIC_POLICY) attributes["DisplayName"].should.equal("") attributes["SubscriptionsPending"].should.equal(0) attributes["SubscriptionsConfirmed"].should.equal(0) diff --git a/tests/test_sns/test_topics_boto3.py b/tests/test_sns/test_topics_boto3.py index 55d03afff..bfa9b5d1f 100644 --- a/tests/test_sns/test_topics_boto3.py +++ b/tests/test_sns/test_topics_boto3.py @@ -72,7 +72,7 @@ def test_topic_attributes(): .format(conn._client_config.region_name) ) attributes["Owner"].should.equal('123456789012') - attributes["Policy"].should.equal(DEFAULT_TOPIC_POLICY) + json.loads(attributes["Policy"]).should.equal(DEFAULT_TOPIC_POLICY) attributes["DisplayName"].should.equal("") attributes["SubscriptionsPending"].should.equal('0') attributes["SubscriptionsConfirmed"].should.equal('0') From 1287d53817b83dc6509a1ff5d568267970421175 Mon Sep 17 00:00:00 2001 From: Steve Pulec Date: Mon, 27 Feb 2017 20:53:57 -0500 Subject: [PATCH 068/213] Fix tests for py26 and py3. --- moto/sns/models.py | 6 +++--- tests/test_sns/test_publishing.py | 3 +-- tests/test_sns/test_topics.py | 2 +- tests/test_sns/test_topics_boto3.py | 2 +- 4 files changed, 6 insertions(+), 7 deletions(-) diff --git a/moto/sns/models.py b/moto/sns/models.py index 4fa72a7d0..0ccf60ea9 100644 --- a/moto/sns/models.py +++ b/moto/sns/models.py @@ -28,7 +28,7 @@ class Topic(object): self.display_name = "" self.policy = json.dumps(DEFAULT_TOPIC_POLICY) self.delivery_policy = "" - self.effective_delivery_policy = DEFAULT_EFFECTIVE_DELIVERY_POLICY + self.effective_delivery_policy = json.dumps(DEFAULT_EFFECTIVE_DELIVERY_POLICY) self.arn = make_arn_for_topic( self.account_id, name, sns_backend.region_name) @@ -317,7 +317,7 @@ DEFAULT_TOPIC_POLICY = { }] } -DEFAULT_EFFECTIVE_DELIVERY_POLICY = json.dumps({ +DEFAULT_EFFECTIVE_DELIVERY_POLICY = { 'http': { 'disableSubscriptionOverrides': False, 'defaultHealthyRetryPolicy': { @@ -330,4 +330,4 @@ DEFAULT_EFFECTIVE_DELIVERY_POLICY = json.dumps({ 'backoffFunction': 'linear' } } -}) +} diff --git a/tests/test_sns/test_publishing.py b/tests/test_sns/test_publishing.py index 718bce5c4..51042675f 100644 --- a/tests/test_sns/test_publishing.py +++ b/tests/test_sns/test_publishing.py @@ -6,7 +6,7 @@ from freezegun import freeze_time import sure # noqa from moto.packages.responses import responses -from moto import mock_sns, mock_sns_deprecated, mock_sqs_deprecated +from moto import mock_sns_deprecated, mock_sqs_deprecated @mock_sqs_deprecated @@ -54,7 +54,6 @@ def test_publish_to_sqs_in_different_region(): @freeze_time("2013-01-01") -@mock_sns @mock_sns_deprecated def test_publish_to_http(): responses.add( diff --git a/tests/test_sns/test_topics.py b/tests/test_sns/test_topics.py index cbb4849c8..1b039c51d 100644 --- a/tests/test_sns/test_topics.py +++ b/tests/test_sns/test_topics.py @@ -82,7 +82,7 @@ def test_topic_attributes(): attributes["SubscriptionsConfirmed"].should.equal(0) attributes["SubscriptionsDeleted"].should.equal(0) attributes["DeliveryPolicy"].should.equal("") - attributes["EffectiveDeliveryPolicy"].should.equal( + json.loads(attributes["EffectiveDeliveryPolicy"]).should.equal( DEFAULT_EFFECTIVE_DELIVERY_POLICY) # boto can't handle prefix-mandatory strings: diff --git a/tests/test_sns/test_topics_boto3.py b/tests/test_sns/test_topics_boto3.py index bfa9b5d1f..4702744c3 100644 --- a/tests/test_sns/test_topics_boto3.py +++ b/tests/test_sns/test_topics_boto3.py @@ -78,7 +78,7 @@ def test_topic_attributes(): attributes["SubscriptionsConfirmed"].should.equal('0') attributes["SubscriptionsDeleted"].should.equal('0') attributes["DeliveryPolicy"].should.equal("") - attributes["EffectiveDeliveryPolicy"].should.equal( + json.loads(attributes["EffectiveDeliveryPolicy"]).should.equal( DEFAULT_EFFECTIVE_DELIVERY_POLICY) # boto can't handle prefix-mandatory strings: From 3be1b16eb90635dbd2f6e34c7b82b8b0bcf10593 Mon Sep 17 00:00:00 2001 From: Steve Pulec Date: Mon, 27 Feb 2017 21:24:34 -0500 Subject: [PATCH 069/213] Drop py26. --- .travis.yml | 1 - 1 file changed, 1 deletion(-) diff --git a/.travis.yml b/.travis.yml index d0c8ce45b..87ee121e6 100644 --- a/.travis.yml +++ b/.travis.yml @@ -1,7 +1,6 @@ language: python sudo: false python: - - 2.6 - 2.7 - 3.3 env: From d530bcf4a7a2fdce06474b8bec452e07d1f25aed Mon Sep 17 00:00:00 2001 From: Steve Pulec Date: Tue, 28 Feb 2017 21:29:28 -0500 Subject: [PATCH 070/213] remove py26. --- setup.py | 1 - 1 file changed, 1 deletion(-) diff --git a/setup.py b/setup.py index ee9c07aed..d34715554 100644 --- a/setup.py +++ b/setup.py @@ -41,7 +41,6 @@ setup( test_suite="tests", classifiers=[ "Programming Language :: Python :: 2", - "Programming Language :: Python :: 2.6", "Programming Language :: Python :: 2.7", "Programming Language :: Python :: 3", "Programming Language :: Python :: 3.3", From bcc3e57949831b31661c1c91d739128e8bbd56d9 Mon Sep 17 00:00:00 2001 From: David Wilcox Date: Sun, 5 Mar 2017 14:26:23 +1100 Subject: [PATCH 071/213] Cloudformation ResourceMaps incorrectly share namespaces for Conditions and Resources (#828) * add tests to check CF's conditions and resources have distinct namespace * separate the resource and condition namespaces for CF --- moto/cloudformation/parsing.py | 14 ++--- .../test_cloudformation_stack_crud.py | 54 +++++++++++++++++++ 2 files changed, 61 insertions(+), 7 deletions(-) diff --git a/moto/cloudformation/parsing.py b/moto/cloudformation/parsing.py index 521658cee..fdc569dc1 100644 --- a/moto/cloudformation/parsing.py +++ b/moto/cloudformation/parsing.py @@ -143,7 +143,7 @@ def clean_json(resource_json, resources_map): if 'Fn::If' in resource_json: condition_name, true_value, false_value = resource_json['Fn::If'] - if resources_map[condition_name]: + if resources_map.lazy_condition_map[condition_name]: return clean_json(true_value, resources_map) else: return clean_json(false_value, resources_map) @@ -206,7 +206,7 @@ def parse_resource(logical_id, resource_json, resources_map): def parse_and_create_resource(logical_id, resource_json, resources_map, region_name): condition = resource_json.get('Condition') - if condition and not resources_map[condition]: + if condition and not resources_map.lazy_condition_map[condition]: # If this has a False condition, don't create the resource return None @@ -352,13 +352,13 @@ class ResourceMap(collections.Mapping): def load_conditions(self): conditions = self._template.get('Conditions', {}) - lazy_condition_map = LazyDict() + self.lazy_condition_map = LazyDict() for condition_name, condition in conditions.items(): - lazy_condition_map[condition_name] = functools.partial(parse_condition, - condition, self._parsed_resources, lazy_condition_map) + self.lazy_condition_map[condition_name] = functools.partial(parse_condition, + condition, self._parsed_resources, self.lazy_condition_map) - for condition_name in lazy_condition_map: - self._parsed_resources[condition_name] = lazy_condition_map[condition_name] + for condition_name in self.lazy_condition_map: + _ = self.lazy_condition_map[condition_name] def create(self): self.load_mapping() diff --git a/tests/test_cloudformation/test_cloudformation_stack_crud.py b/tests/test_cloudformation/test_cloudformation_stack_crud.py index e45dafbfa..0696d5ada 100644 --- a/tests/test_cloudformation/test_cloudformation_stack_crud.py +++ b/tests/test_cloudformation/test_cloudformation_stack_crud.py @@ -279,6 +279,60 @@ def test_cloudformation_params(): param.value.should.equal('testing123') +@mock_cloudformation() +def test_cloudformation_params_conditions_and_resources_are_distinct(): + dummy_template = { + "AWSTemplateFormatVersion": "2010-09-09", + "Description": "Stack 1", + "Conditions": { + "FooEnabled": { + "Fn::Equals": [ + { + "Ref": "FooEnabled" + }, + "true" + ] + }, + "FooDisabled": { + "Fn::Not": [ + { + "Fn::Equals": [ + { + "Ref": "FooEnabled" + }, + "true" + ] + } + ] + } + }, + "Parameters": { + "FooEnabled": { + "Type": "String", + "AllowedValues": [ + "true", + "false" + ] + } + }, + "Resources": { + "Bar": { + "Properties": { + "CidrBlock": "192.168.0.0/16", + }, + "Condition": "FooDisabled", + "Type": "AWS::EC2::VPC" + } + } + } + dummy_template_json = json.dumps(dummy_template) + cfn = boto.connect_cloudformation() + cfn.create_stack('test_stack1', template_body=dummy_template_json, parameters=[('FooEnabled', 'true')]) + stack = cfn.describe_stacks('test_stack1')[0] + resources = stack.list_resources() + assert not [resource for resource in resources if resource.logical_resource_id == 'Bar'] + + @mock_cloudformation def test_stack_tags(): conn = boto.connect_cloudformation() From 7d75c3ba189d41f35d23469fc47d1211ff3a3231 Mon Sep 17 00:00:00 2001 From: Guy Templeton Date: Sun, 5 Mar 2017 03:30:36 +0000 Subject: [PATCH 072/213] Feat: ECS container status updating (#831) * Uptick boto3 version to version supporting ECS container instance state changes * Add initial status update * Only place tasks on active instances * PEP8 cleanup --- moto/ecs/models.py | 47 ++++++++++++++++------------ moto/ecs/responses.py | 42 ++++++++++++------------- requirements-dev.txt | 2 +- tests/test_ecs/test_ecs_boto3.py | 53 ++++++++++++++++++++++++++++++++ 4 files changed, 103 insertions(+), 41 deletions(-) diff --git a/moto/ecs/models.py b/moto/ecs/models.py index 3ce7be8b5..25fe0ffec 100644 --- a/moto/ecs/models.py +++ b/moto/ecs/models.py @@ -61,6 +61,7 @@ class Cluster(BaseObject): # ClusterName is optional in CloudFormation, thus create a random name if necessary cluster_name=properties.get('ClusterName', 'ecscluster{0}'.format(int(random() * 10 ** 6))), ) + @classmethod def update_from_cloudformation_json(cls, original_resource, new_resource_name, cloudformation_json, region_name): properties = cloudformation_json['Properties'] @@ -126,6 +127,7 @@ class TaskDefinition(BaseObject): # no-op when nothing changed between old and new resources return original_resource + class Task(BaseObject): def __init__(self, cluster, task_definition, container_instance_arn, overrides={}, started_by=''): self.cluster_arn = cluster.arn @@ -227,10 +229,10 @@ class ContainerInstance(BaseObject): self.remainingResources = [] self.runningTaskCount = 0 self.versionInfo = { - 'agentVersion': "1.0.0", - 'agentHash': '4023248', - 'dockerVersion': 'DockerVersion: 1.5.0' - } + 'agentVersion': "1.0.0", + 'agentHash': '4023248', + 'dockerVersion': 'DockerVersion: 1.5.0' + } @property def response_object(self): @@ -327,20 +329,6 @@ class EC2ContainerServiceBackend(BaseBackend): task_arns.extend([task_definition.arn for task_definition in task_definition_list]) return task_arns - def describe_task_definition(self, task_definition_str): - task_definition_name = task_definition_str.split('/')[-1] - if ':' in task_definition_name: - family, revision = task_definition_name.split(':') - revision = int(revision) - else: - family = task_definition_name - revision = len(self.task_definitions.get(family, [])) - - if family in self.task_definitions and 0 < revision <= len(self.task_definitions[family]): - return self.task_definitions[family][revision-1] - else: - raise Exception("{0} is not a task_definition".format(task_definition_name)) - def deregister_task_definition(self, task_definition_str): task_definition_name = task_definition_str.split('/')[-1] family, revision = task_definition_name.split(':') @@ -363,9 +351,11 @@ class EC2ContainerServiceBackend(BaseBackend): container_instances = list(self.container_instances.get(cluster_name, {}).keys()) if not container_instances: raise Exception("No instances found in cluster {}".format(cluster_name)) + active_container_instances = [x for x in container_instances if + self.container_instances[cluster_name][x].status == 'ACTIVE'] for _ in range(count or 1): container_instance_arn = self.container_instances[cluster_name][ - container_instances[randint(0, len(container_instances) - 1)] + active_container_instances[randint(0, len(active_container_instances) - 1)] ].containerInstanceArn task = Task(cluster, task_definition, container_instance_arn, overrides or {}, started_by or '') tasks.append(task) @@ -537,6 +527,25 @@ class EC2ContainerServiceBackend(BaseBackend): return container_instance_objects, failures + def update_container_instances_state(self, cluster_str, list_container_instance_ids, status): + cluster_name = cluster_str.split('/')[-1] + if cluster_name not in self.clusters: + raise Exception("{0} is not a cluster".format(cluster_name)) + status = status.upper() + if status not in ['ACTIVE', 'DRAINING']: + raise Exception("An error occurred (InvalidParameterException) when calling the UpdateContainerInstancesState operation: Container instances status should be one of [ACTIVE,DRAINING]") + failures = [] + container_instance_objects = [] + for container_instance_id in list_container_instance_ids: + container_instance = self.container_instances[cluster_name].get(container_instance_id, None) + if container_instance is not None: + container_instance.status = status + container_instance_objects.append(container_instance) + else: + failures.append(ContainerInstanceFailure('MISSING', container_instance_id)) + + return container_instance_objects, failures + def deregister_container_instance(self, cluster_str, container_instance_str): pass diff --git a/moto/ecs/responses.py b/moto/ecs/responses.py index ce90de379..d61b7dd15 100644 --- a/moto/ecs/responses.py +++ b/moto/ecs/responses.py @@ -1,6 +1,5 @@ from __future__ import unicode_literals import json -import uuid from moto.core.responses import BaseResponse from .models import ecs_backends @@ -34,8 +33,8 @@ class EC2ContainerServiceResponse(BaseResponse): cluster_arns = self.ecs_backend.list_clusters() return json.dumps({ 'clusterArns': cluster_arns - #, - #'nextToken': str(uuid.uuid1()) + # , + # 'nextToken': str(uuid.uuid1()) }) def describe_clusters(self): @@ -66,15 +65,8 @@ class EC2ContainerServiceResponse(BaseResponse): task_definition_arns = self.ecs_backend.list_task_definitions() return json.dumps({ 'taskDefinitionArns': task_definition_arns - #, - #'nextToken': str(uuid.uuid1()) - }) - - def describe_task_definition(self): - task_definition_str = self._get_param('taskDefinition') - task_definition = self.ecs_backend.describe_task_definition(task_definition_str) - return json.dumps({ - 'taskDefinition': task_definition.response_object + # , + # 'nextToken': str(uuid.uuid1()) }) def deregister_task_definition(self): @@ -94,7 +86,7 @@ class EC2ContainerServiceResponse(BaseResponse): return json.dumps({ 'tasks': [task.response_object for task in tasks], 'failures': [] - }) + }) def describe_tasks(self): cluster = self._get_param('cluster') @@ -123,7 +115,7 @@ class EC2ContainerServiceResponse(BaseResponse): return json.dumps({ 'tasks': [task.response_object for task in tasks], 'failures': [] - }) + }) def list_tasks(self): cluster_str = self._get_param('cluster') @@ -135,8 +127,7 @@ class EC2ContainerServiceResponse(BaseResponse): task_arns = self.ecs_backend.list_tasks(cluster_str, container_instance, family, started_by, service_name, desiredStatus) return json.dumps({ 'taskArns': task_arns - }) - + }) def stop_task(self): cluster_str = self._get_param('cluster') @@ -145,8 +136,7 @@ class EC2ContainerServiceResponse(BaseResponse): task = self.ecs_backend.stop_task(cluster_str, task, reason) return json.dumps({ 'task': task.response_object - }) - + }) def create_service(self): cluster_str = self._get_param('cluster') @@ -201,7 +191,7 @@ class EC2ContainerServiceResponse(BaseResponse): ec2_instance_id = instance_identity_document["instanceId"] container_instance = self.ecs_backend.register_container_instance(cluster_str, ec2_instance_id) return json.dumps({ - 'containerInstance' : container_instance.response_object + 'containerInstance': container_instance.response_object }) def list_container_instances(self): @@ -216,6 +206,16 @@ class EC2ContainerServiceResponse(BaseResponse): list_container_instance_arns = self._get_param('containerInstances') container_instances, failures = self.ecs_backend.describe_container_instances(cluster_str, list_container_instance_arns) return json.dumps({ - 'failures': [ci.response_object for ci in failures], - 'containerInstances': [ci.response_object for ci in container_instances] + 'failures': [ci.response_object for ci in failures], + 'containerInstances': [ci.response_object for ci in container_instances] + }) + + def update_container_instances_state(self): + cluster_str = self._get_param('cluster') + list_container_instance_arns = self._get_param('containerInstances') + status_str = self._get_param('status') + container_instances, failures = self.ecs_backend.update_container_instances_state(cluster_str, list_container_instance_arns, status_str) + return json.dumps({ + 'failures': [ci.response_object for ci in failures], + 'containerInstances': [ci.response_object for ci in container_instances] }) diff --git a/requirements-dev.txt b/requirements-dev.txt index 9bdccc6e4..554834a51 100644 --- a/requirements-dev.txt +++ b/requirements-dev.txt @@ -5,6 +5,6 @@ sure==1.2.24 coverage freezegun flask -boto3>=1.3.1 +boto3>=1.4.4 botocore>=1.4.28 six diff --git a/tests/test_ecs/test_ecs_boto3.py b/tests/test_ecs/test_ecs_boto3.py index f073628a9..bbb86dbe3 100644 --- a/tests/test_ecs/test_ecs_boto3.py +++ b/tests/test_ecs/test_ecs_boto3.py @@ -573,6 +573,58 @@ def test_describe_container_instances(): for arn in test_instance_arns: response_arns.should.contain(arn) +@mock_ec2 +@mock_ecs +def test_update_container_instances_state(): + ecs_client = boto3.client('ecs', region_name='us-east-1') + ec2 = boto3.resource('ec2', region_name='us-east-1') + + test_cluster_name = 'test_ecs_cluster' + _ = ecs_client.create_cluster( + clusterName=test_cluster_name + ) + + instance_to_create = 3 + test_instance_arns = [] + for i in range(0, instance_to_create): + test_instance = ec2.create_instances( + ImageId="ami-1234abcd", + MinCount=1, + MaxCount=1, + )[0] + + instance_id_document = json.dumps( + ec2_utils.generate_instance_identity_document(test_instance) + ) + + response = ecs_client.register_container_instance( + cluster=test_cluster_name, + instanceIdentityDocument=instance_id_document) + + test_instance_arns.append(response['containerInstance']['containerInstanceArn']) + + test_instance_ids = list(map((lambda x: x.split('/')[1]), test_instance_arns)) + response = ecs_client.update_container_instances_state(cluster=test_cluster_name, containerInstances=test_instance_ids, status='DRAINING') + len(response['failures']).should.equal(0) + len(response['containerInstances']).should.equal(instance_to_create) + response_statuses = [ci['status'] for ci in response['containerInstances']] + for status in response_statuses: + status.should.equal('DRAINING') + response = ecs_client.update_container_instances_state(cluster=test_cluster_name, containerInstances=test_instance_ids, status='DRAINING') + len(response['failures']).should.equal(0) + len(response['containerInstances']).should.equal(instance_to_create) + response_statuses = [ci['status'] for ci in response['containerInstances']] + for status in response_statuses: + status.should.equal('DRAINING') + response = ecs_client.update_container_instances_state(cluster=test_cluster_name, containerInstances=test_instance_ids, status='ACTIVE') + len(response['failures']).should.equal(0) + len(response['containerInstances']).should.equal(instance_to_create) + response_statuses = [ci['status'] for ci in response['containerInstances']] + for status in response_statuses: + status.should.equal('ACTIVE') + ecs_client.update_container_instances_state.when.called_with(cluster=test_cluster_name, containerInstances=test_instance_ids, status='test_status').should.throw(Exception) + + @mock_ec2 @mock_ecs @@ -861,6 +913,7 @@ def describe_task_definition(): task['taskDefinitionArn'].should.equal('arn:aws:ecs:us-east-1:012345678910:task-definition/test_ecs_task2:1') task['volumes'].should.equal([]) + @mock_ec2 @mock_ecs def test_stop_task(): From 8d737eb59d031b2e00a4dd32b5c3a8ac6af3af69 Mon Sep 17 00:00:00 2001 From: David Wilcox Date: Sun, 5 Mar 2017 14:31:45 +1100 Subject: [PATCH 073/213] Route53: allow hosted zone id as well when creating record sets (#833) * add test that creates r53 record set from hosted zone id (not name) * pass test to enable creating record sets by hosted zone ids --- moto/route53/models.py | 7 ++- .../test_cloudformation_stack_crud.py | 53 ++++++++++++++++++- 2 files changed, 57 insertions(+), 3 deletions(-) diff --git a/moto/route53/models.py b/moto/route53/models.py index 6b293a1ca..552deebdf 100644 --- a/moto/route53/models.py +++ b/moto/route53/models.py @@ -213,8 +213,11 @@ class RecordSetGroup(object): def create_from_cloudformation_json(cls, resource_name, cloudformation_json, region_name): properties = cloudformation_json['Properties'] - zone_name = properties["HostedZoneName"] - hosted_zone = route53_backend.get_hosted_zone_by_name(zone_name) + zone_name = properties.get("HostedZoneName") + if zone_name: + hosted_zone = route53_backend.get_hosted_zone_by_name(zone_name) + else: + hosted_zone = route53_backend.get_hosted_zone(properties["HostedZoneId"]) record_sets = properties["RecordSets"] for record_set in record_sets: hosted_zone.add_rrset(record_set) diff --git a/tests/test_cloudformation/test_cloudformation_stack_crud.py b/tests/test_cloudformation/test_cloudformation_stack_crud.py index 0696d5ada..a2b5a06f5 100644 --- a/tests/test_cloudformation/test_cloudformation_stack_crud.py +++ b/tests/test_cloudformation/test_cloudformation_stack_crud.py @@ -12,7 +12,7 @@ import sure # noqa import tests.backport_assert_raises # noqa from nose.tools import assert_raises -from moto import mock_cloudformation, mock_s3 +from moto import mock_cloudformation, mock_s3, mock_route53 from moto.cloudformation import cloudformation_backends dummy_template = { @@ -69,6 +69,57 @@ def test_create_stack(): }) +@mock_cloudformation +@mock_route53 +def test_create_stack_hosted_zone_by_id(): + conn = boto.connect_cloudformation() + dummy_template = { + "AWSTemplateFormatVersion": "2010-09-09", + "Description": "Stack 1", + "Parameters": { + }, + "Resources": { + "Bar": { + "Type" : "AWS::Route53::HostedZone", + "Properties" : { + "Name" : "foo.bar.baz", + } + }, + }, + } + dummy_template2 = { + "AWSTemplateFormatVersion": "2010-09-09", + "Description": "Stack 2", + "Parameters": { + "ZoneId": { "Type": "String" } + }, + "Resources": { + "Foo": { + "Properties": { + "HostedZoneId": {"Ref": "ZoneId"}, + "RecordSets": [] + }, + "Type": "AWS::Route53::RecordSetGroup" + } + }, + } + conn.create_stack( + "test_stack", + template_body=json.dumps(dummy_template), + parameters={}.items() + ) + r53_conn = boto.connect_route53() + zone_id = r53_conn.get_zones()[0].id + conn.create_stack( + "test_stack", + template_body=json.dumps(dummy_template2), + parameters={"ZoneId": zone_id}.items() + ) + + stack = conn.describe_stacks()[0] + assert stack.list_resources() + + @mock_cloudformation def test_creating_stacks_across_regions(): west1_conn = boto.cloudformation.connect_to_region("us-west-1") From 1b6007e2b2b8cf44ec8a3bf799cd51925bd9659d Mon Sep 17 00:00:00 2001 From: David Wilcox Date: Sun, 5 Mar 2017 14:36:25 +1100 Subject: [PATCH 074/213] Correct IAM list_server_certs template that was based off incorrect docs (#836) The documentation for this method is here https://docs.aws.amazon.com/IAM/latest/APIReference/API_ListServerCertificates.html The docs say the return type is this ServerCertificateMetadataList.member.N but the sample response incorrectly include a . I've sent feedback to the AWS docs telling them to fix their stuff but this also needs to be fixed. I haven't checked other templates with tags in them, as they may be prone to this same problem. --- moto/iam/responses.py | 22 ++++++++++------------ 1 file changed, 10 insertions(+), 12 deletions(-) diff --git a/moto/iam/responses.py b/moto/iam/responses.py index 223691e1e..6884c2025 100644 --- a/moto/iam/responses.py +++ b/moto/iam/responses.py @@ -578,18 +578,16 @@ LIST_SERVER_CERTIFICATES_TEMPLATE = """ {% for certificate in server_certificates %} - - {{ certificate.cert_name }} - {% if certificate.path %} - {{ certificate.path }} - arn:aws:iam::123456789012:server-certificate/{{ certificate.path }}/{{ certificate.cert_name }} - {% else %} - arn:aws:iam::123456789012:server-certificate/{{ certificate.cert_name }} - {% endif %} - 2010-05-08T01:02:03.004Z - ASCACKCEVSQ6C2EXAMPLE - 2012-05-08T01:02:03.004Z - + {{ certificate.cert_name }} + {% if certificate.path %} + {{ certificate.path }} + arn:aws:iam::123456789012:server-certificate/{{ certificate.path }}/{{ certificate.cert_name }} + {% else %} + arn:aws:iam::123456789012:server-certificate/{{ certificate.cert_name }} + {% endif %} + 2010-05-08T01:02:03.004Z + ASCACKCEVSQ6C2EXAMPLE + 2012-05-08T01:02:03.004Z {% endfor %} From a30ba2b597764d5132c818084933398d9fcf755c Mon Sep 17 00:00:00 2001 From: Andy Freeland Date: Sat, 4 Mar 2017 19:37:53 -0800 Subject: [PATCH 075/213] EC2 tags specified in CloudFormation should be applied to the instances (#840) Fixes #839. --- moto/ec2/models.py | 7 +++++-- .../fixtures/vpc_single_instance_in_subnet.py | 4 ++++ .../test_cloudformation_stack_integration.py | 1 + 3 files changed, 10 insertions(+), 2 deletions(-) diff --git a/moto/ec2/models.py b/moto/ec2/models.py index 30769fd7e..35c7bd878 100755 --- a/moto/ec2/models.py +++ b/moto/ec2/models.py @@ -336,7 +336,7 @@ class NetworkInterfaceBackend(object): return generic_filter(filters, enis) -class Instance(BotoInstance, TaggedEC2Resource): +class Instance(TaggedEC2Resource, BotoInstance): def __init__(self, ec2_backend, image_id, user_data, security_groups, **kwargs): super(Instance, self).__init__() self.ec2_backend = ec2_backend @@ -441,7 +441,10 @@ class Instance(BotoInstance, TaggedEC2Resource): key_name=properties.get("KeyName"), private_ip=properties.get('PrivateIpAddress'), ) - return reservation.instances[0] + instance = reservation.instances[0] + for tag in properties.get("Tags", []): + instance.add_tag(tag["Key"], tag["Value"]) + return instance @property def physical_resource_id(self): diff --git a/tests/test_cloudformation/fixtures/vpc_single_instance_in_subnet.py b/tests/test_cloudformation/fixtures/vpc_single_instance_in_subnet.py index 1f296cf0c..177da884e 100644 --- a/tests/test_cloudformation/fixtures/vpc_single_instance_in_subnet.py +++ b/tests/test_cloudformation/fixtures/vpc_single_instance_in_subnet.py @@ -236,6 +236,10 @@ template = { "Ref": "AWS::StackId" }, "Key": "Application" + }, + { + "Value": "Bar", + "Key": "Foo" } ], "SecurityGroupIds": [ diff --git a/tests/test_cloudformation/test_cloudformation_stack_integration.py b/tests/test_cloudformation/test_cloudformation_stack_integration.py index 4237bee19..f76e02a49 100644 --- a/tests/test_cloudformation/test_cloudformation_stack_integration.py +++ b/tests/test_cloudformation/test_cloudformation_stack_integration.py @@ -676,6 +676,7 @@ def test_vpc_single_instance_in_subnet(): ec2_conn = boto.ec2.connect_to_region("us-west-1") reservation = ec2_conn.get_all_instances()[0] instance = reservation.instances[0] + instance.tags["Foo"].should.equal("Bar") # Check that the EIP is attached the the EC2 instance eip = ec2_conn.get_all_addresses()[0] eip.domain.should.equal('vpc') From 783242b696570c641f748ae4f029f86c09332ffe Mon Sep 17 00:00:00 2001 From: Andy Freeland Date: Sat, 4 Mar 2017 19:40:43 -0800 Subject: [PATCH 076/213] Elastic IP PhysicalResourceId should always be its public IP (#841) According to the [CloudFormation `Ref` docs][docs], the `Ref` return value (and physical ID of the resource) for an Elastic IP is its public IP address. [docs]: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/intrinsic-function-reference-ref.html --- moto/ec2/models.py | 2 +- .../test_cloudformation_stack_integration.py | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/moto/ec2/models.py b/moto/ec2/models.py index 35c7bd878..c18f8b390 100755 --- a/moto/ec2/models.py +++ b/moto/ec2/models.py @@ -2836,7 +2836,7 @@ class ElasticAddress(object): @property def physical_resource_id(self): - return self.allocation_id if self.allocation_id else self.public_ip + return self.public_ip def get_cfn_attribute(self, attribute_name): from moto.cloudformation.exceptions import UnformattedGetAttTemplateException diff --git a/tests/test_cloudformation/test_cloudformation_stack_integration.py b/tests/test_cloudformation/test_cloudformation_stack_integration.py index f76e02a49..c168ff723 100644 --- a/tests/test_cloudformation/test_cloudformation_stack_integration.py +++ b/tests/test_cloudformation/test_cloudformation_stack_integration.py @@ -694,7 +694,7 @@ def test_vpc_single_instance_in_subnet(): subnet_resource.physical_resource_id.should.equal(subnet.id) eip_resource = [resource for resource in resources if resource.resource_type == 'AWS::EC2::EIP'][0] - eip_resource.physical_resource_id.should.equal(eip.allocation_id) + eip_resource.physical_resource_id.should.equal(eip.public_ip) @mock_cloudformation() @mock_ec2() @@ -991,7 +991,7 @@ def test_vpc_eip(): stack = conn.describe_stacks()[0] resources = stack.describe_resources() cfn_eip = [resource for resource in resources if resource.resource_type == 'AWS::EC2::EIP'][0] - cfn_eip.physical_resource_id.should.equal(eip.allocation_id) + cfn_eip.physical_resource_id.should.equal(eip.public_ip) @mock_ec2() From a9554924df793fe36d11938208617f8e8a3381c7 Mon Sep 17 00:00:00 2001 From: David Wilcox Date: Sun, 5 Mar 2017 14:48:51 +1100 Subject: [PATCH 077/213] make cloudformation update stack use parameters provided (#843) --- moto/cloudformation/models.py | 8 ++--- moto/cloudformation/parsing.py | 4 ++- moto/cloudformation/responses.py | 6 ++++ .../test_cloudformation_stack_crud.py | 36 +++++++++++++++++++ 4 files changed, 49 insertions(+), 5 deletions(-) diff --git a/moto/cloudformation/models.py b/moto/cloudformation/models.py index 1f091251b..a9dda8fdc 100644 --- a/moto/cloudformation/models.py +++ b/moto/cloudformation/models.py @@ -78,10 +78,10 @@ class FakeStack(object): def stack_outputs(self): return self.output_map.values() - def update(self, template, role_arn=None): + def update(self, template, role_arn=None, parameters=None): self._add_stack_event("UPDATE_IN_PROGRESS", resource_status_reason="User Initiated") self.template = template - self.resource_map.update(json.loads(template)) + self.resource_map.update(json.loads(template), parameters) self.output_map = self._create_output_map() self._add_stack_event("UPDATE_COMPLETE") self.status = "UPDATE_COMPLETE" @@ -157,9 +157,9 @@ class CloudFormationBackend(BaseBackend): if stack.name == name_or_stack_id: return stack - def update_stack(self, name, template, role_arn=None): + def update_stack(self, name, template, role_arn=None, parameters=None): stack = self.get_stack(name) - stack.update(template, role_arn) + stack.update(template, role_arn, parameters=parameters) return stack def list_stack_resources(self, stack_name_or_id): diff --git a/moto/cloudformation/parsing.py b/moto/cloudformation/parsing.py index fdc569dc1..06673bd8c 100644 --- a/moto/cloudformation/parsing.py +++ b/moto/cloudformation/parsing.py @@ -374,7 +374,9 @@ class ResourceMap(collections.Mapping): self.tags['aws:cloudformation:logical-id'] = resource ec2_models.ec2_backends[self._region_name].create_tags([self[resource].physical_resource_id], self.tags) - def update(self, template): + def update(self, template, parameters=None): + if parameters: + self.input_parameters = parameters self.load_mapping() self.load_parameters() self.load_conditions() diff --git a/moto/cloudformation/responses.py b/moto/cloudformation/responses.py index d16b3560c..06d0bbb00 100644 --- a/moto/cloudformation/responses.py +++ b/moto/cloudformation/responses.py @@ -138,6 +138,11 @@ class CloudFormationResponse(BaseResponse): stack_body = self.cloudformation_backend.get_stack(stack_name).template else: stack_body = self._get_param('TemplateBody') + parameters = dict([ + (parameter['parameter_key'], parameter['parameter_value']) + for parameter + in self._get_list_prefix("Parameters.member") + ]) stack = self.cloudformation_backend.get_stack(stack_name) if stack.status == 'ROLLBACK_COMPLETE': @@ -147,6 +152,7 @@ class CloudFormationResponse(BaseResponse): name=stack_name, template=stack_body, role_arn=role_arn, + parameters=parameters ) if self.request_json: stack_body = { diff --git a/tests/test_cloudformation/test_cloudformation_stack_crud.py b/tests/test_cloudformation/test_cloudformation_stack_crud.py index a2b5a06f5..e145ea283 100644 --- a/tests/test_cloudformation/test_cloudformation_stack_crud.py +++ b/tests/test_cloudformation/test_cloudformation_stack_crud.py @@ -444,6 +444,42 @@ def test_update_stack(): }) +@mock_cloudformation +def test_update_stack_with_parameters(): + dummy_template = { + "AWSTemplateFormatVersion": "2010-09-09", + "Description": "Stack", + "Resources": { + "VPC": { + "Properties": { + "CidrBlock": {"Ref": "Bar"} + }, + "Type": "AWS::EC2::VPC" + } + }, + "Parameters": { + "Bar": { + "Type": "String" + } + } + } + dummy_template_json = json.dumps(dummy_template) + conn = boto.connect_cloudformation() + conn.create_stack( + "test_stack", + template_body=dummy_template_json, + parameters=[("Bar", "192.168.0.0/16")] + ) + conn.update_stack( + "test_stack", + template_body=dummy_template_json, + parameters=[("Bar", "192.168.0.1/16")] + ) + + stack = conn.describe_stacks()[0] + assert stack.parameters[0].value == "192.168.0.1/16" + + @mock_cloudformation def test_update_stack_when_rolled_back(): conn = boto.connect_cloudformation() From f46a24180f9a2a84b61b05efdb57126cffaca113 Mon Sep 17 00:00:00 2001 From: William Richard Date: Sat, 4 Mar 2017 22:51:01 -0500 Subject: [PATCH 078/213] Cast desired capacity for cloudformation asg to int (#846) Cloudformation passes MaxSize, MinSize and DesiredCapacity as strings, but we want to store them as ints. Also includes tests of this fix, to help avoid regression. --- moto/autoscaling/models.py | 1 + .../test_cloudformation_stack_integration.py | 4 ++++ 2 files changed, 5 insertions(+) diff --git a/moto/autoscaling/models.py b/moto/autoscaling/models.py index 53a0f62df..2b5a07c15 100644 --- a/moto/autoscaling/models.py +++ b/moto/autoscaling/models.py @@ -324,6 +324,7 @@ class AutoScalingBackend(BaseBackend): max_size = make_int(max_size) min_size = make_int(min_size) + desired_capacity = make_int(desired_capacity) default_cooldown = make_int(default_cooldown) if health_check_period is None: health_check_period = 300 diff --git a/tests/test_cloudformation/test_cloudformation_stack_integration.py b/tests/test_cloudformation/test_cloudformation_stack_integration.py index c168ff723..f842ffe70 100644 --- a/tests/test_cloudformation/test_cloudformation_stack_integration.py +++ b/tests/test_cloudformation/test_cloudformation_stack_integration.py @@ -535,6 +535,7 @@ def test_autoscaling_group_with_elb(): "LaunchConfigurationName": {"Ref": "my-launch-config"}, "MinSize": "2", "MaxSize": "2", + "DesiredCapacity": "2", "LoadBalancerNames": [{"Ref": "my-elb"}] }, }, @@ -614,6 +615,7 @@ def test_autoscaling_group_update(): "LaunchConfigurationName": {"Ref": "my-launch-config"}, "MinSize": "2", "MaxSize": "2", + "DesiredCapacity": "2" }, }, @@ -638,6 +640,7 @@ def test_autoscaling_group_update(): asg = autoscale_conn.get_all_groups()[0] asg.min_size.should.equal(2) asg.max_size.should.equal(2) + asg.desired_capacity.should.equal(2) asg_template['Resources']['my-as-group']['Properties']['MaxSize'] = 3 asg_template_json = json.dumps(asg_template) @@ -648,6 +651,7 @@ def test_autoscaling_group_update(): asg = autoscale_conn.get_all_groups()[0] asg.min_size.should.equal(2) asg.max_size.should.equal(3) + asg.desired_capacity.should.equal(2) @mock_ec2() From 56f9409ca995b73870316a2ac4ff1d024b3a5cab Mon Sep 17 00:00:00 2001 From: Chris LaRose Date: Sat, 4 Mar 2017 19:53:14 -0800 Subject: [PATCH 079/213] Use request URL to generate SQS queue URLs; fixes #626 (#827) --- moto/sqs/models.py | 5 ++--- moto/sqs/responses.py | 16 ++++++++++------ tests/test_sqs/test_sqs.py | 4 ++-- 3 files changed, 14 insertions(+), 11 deletions(-) diff --git a/moto/sqs/models.py b/moto/sqs/models.py index 13b8c34b6..efe9f9517 100644 --- a/moto/sqs/models.py +++ b/moto/sqs/models.py @@ -180,9 +180,8 @@ class Queue(object): result[attribute] = getattr(self, camelcase_to_underscores(attribute)) return result - @property - def url(self): - return "http://sqs.{0}.amazonaws.com/123456789012/{1}".format(self.region, self.name) + def url(self, request_url): + return "{0}://{1}/123456789012/{2}".format(request_url.scheme, request_url.netloc, self.name) @property def messages(self): diff --git a/moto/sqs/responses.py b/moto/sqs/responses.py index 15c067613..6720a09bd 100644 --- a/moto/sqs/responses.py +++ b/moto/sqs/responses.py @@ -1,4 +1,5 @@ from __future__ import unicode_literals +from six.moves.urllib.parse import urlparse from moto.core.responses import BaseResponse from moto.core.utils import camelcase_to_underscores @@ -57,26 +58,29 @@ class SQSResponse(BaseResponse): return status_code, headers, body def create_queue(self): + request_url = urlparse(self.uri) queue_name = self.querystring.get("QueueName")[0] queue = self.sqs_backend.create_queue(queue_name, visibility_timeout=self.attribute.get('VisibilityTimeout'), wait_time_seconds=self.attribute.get('WaitTimeSeconds')) template = self.response_template(CREATE_QUEUE_RESPONSE) - return template.render(queue=queue) + return template.render(queue=queue, request_url=request_url) def get_queue_url(self): + request_url = urlparse(self.uri) queue_name = self.querystring.get("QueueName")[0] queue = self.sqs_backend.get_queue(queue_name) if queue: template = self.response_template(GET_QUEUE_URL_RESPONSE) - return template.render(queue=queue) + return template.render(queue=queue, request_url=request_url) else: return "", dict(status=404) def list_queues(self): + request_url = urlparse(self.uri) queue_name_prefix = self.querystring.get("QueueNamePrefix", [None])[0] queues = self.sqs_backend.list_queues(queue_name_prefix) template = self.response_template(LIST_QUEUES_RESPONSE) - return template.render(queues=queues) + return template.render(queues=queues, request_url=request_url) def change_message_visibility(self): queue_name = self._get_queue_name() @@ -265,7 +269,7 @@ class SQSResponse(BaseResponse): CREATE_QUEUE_RESPONSE = """ - {{ queue.url }} + {{ queue.url(request_url) }} {{ queue.visibility_timeout }} @@ -275,7 +279,7 @@ CREATE_QUEUE_RESPONSE = """ GET_QUEUE_URL_RESPONSE = """ - {{ queue.url }} + {{ queue.url(request_url) }} 470a6f13-2ed9-4181-ad8a-2fdea142988e @@ -285,7 +289,7 @@ GET_QUEUE_URL_RESPONSE = """ LIST_QUEUES_RESPONSE = """ {% for queue in queues %} - {{ queue.url }} + {{ queue.url(request_url) }} {% endfor %} diff --git a/tests/test_sqs/test_sqs.py b/tests/test_sqs/test_sqs.py index 32b026a46..2ad5f1af1 100644 --- a/tests/test_sqs/test_sqs.py +++ b/tests/test_sqs/test_sqs.py @@ -71,7 +71,7 @@ def test_create_queues_in_multiple_region(): list(west1_conn.list_queues()['QueueUrls']).should.have.length_of(1) list(west2_conn.list_queues()['QueueUrls']).should.have.length_of(1) - west1_conn.list_queues()['QueueUrls'][0].should.equal('http://sqs.us-west-1.amazonaws.com/123456789012/blah') + west1_conn.list_queues()['QueueUrls'][0].should.equal('https://us-west-1.queue.amazonaws.com/123456789012/blah') @mock_sqs @@ -85,7 +85,7 @@ def test_get_queue_with_prefix(): queue = conn.list_queues(QueueNamePrefix="test-")['QueueUrls'] queue.should.have.length_of(1) - queue[0].should.equal("http://sqs.us-west-1.amazonaws.com/123456789012/test-queue") + queue[0].should.equal("https://us-west-1.queue.amazonaws.com/123456789012/test-queue") @mock_sqs From 9b6d3983d2aa4ae41ea67595a8d744c108796b79 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?L=C3=A9o=20Cavaill=C3=A9?= Date: Sun, 5 Mar 2017 04:56:36 +0100 Subject: [PATCH 080/213] iam: add group policy methods (#849) Implemented mocks for: * get_all_group_policies * list_group_policies (boto3) * get_group_policy * put_group_policy --- moto/iam/models.py | 31 +++++++++++++++++ moto/iam/responses.py | 56 +++++++++++++++++++++++++++++++ tests/test_iam/test_iam_groups.py | 38 +++++++++++++++++++++ 3 files changed, 125 insertions(+) diff --git a/moto/iam/models.py b/moto/iam/models.py index d27722f33..15a26f663 100644 --- a/moto/iam/models.py +++ b/moto/iam/models.py @@ -167,6 +167,7 @@ class Group(object): ) self.users = [] + self.policies = {} def get_cfn_attribute(self, attribute_name): from moto.cloudformation.exceptions import UnformattedGetAttTemplateException @@ -174,6 +175,24 @@ class Group(object): raise NotImplementedError('"Fn::GetAtt" : [ "{0}" , "Arn" ]"') raise UnformattedGetAttTemplateException() + def get_policy(self, policy_name): + try: + policy_json = self.policies[policy_name] + except KeyError: + raise IAMNotFoundException("Policy {0} not found".format(policy_name)) + + return { + 'policy_name': policy_name, + 'policy_document': policy_json, + 'group_name': self.name, + } + + def put_policy(self, policy_name, policy_json): + self.policies[policy_name] = policy_json + + def list_policies(self): + return self.policies.keys() + class User(object): def __init__(self, name, path=None): @@ -573,6 +592,18 @@ class IAMBackend(BaseBackend): return groups + def put_group_policy(self, group_name, policy_name, policy_json): + group = self.get_group(group_name) + group.put_policy(policy_name, policy_json) + + def list_group_policies(self, group_name, marker=None, max_items=None): + group = self.get_group(group_name) + return group.list_policies() + + def get_group_policy(self, group_name, policy_name): + group = self.get_group(group_name) + return group.get_policy(policy_name) + def create_user(self, user_name, path='/'): if user_name in self.users: raise IAMConflictException("EntityAlreadyExists", "User {0} already exists".format(user_name)) diff --git a/moto/iam/responses.py b/moto/iam/responses.py index 6884c2025..c23e9bd8e 100644 --- a/moto/iam/responses.py +++ b/moto/iam/responses.py @@ -186,6 +186,32 @@ class IamResponse(BaseResponse): template = self.response_template(LIST_GROUPS_FOR_USER_TEMPLATE) return template.render(groups=groups) + def put_group_policy(self): + group_name = self._get_param('GroupName') + policy_name = self._get_param('PolicyName') + policy_document = self._get_param('PolicyDocument') + iam_backend.put_group_policy(group_name, policy_name, policy_document) + template = self.response_template(GENERIC_EMPTY_TEMPLATE) + return template.render(name="PutGroupPolicyResponse") + + def list_group_policies(self): + group_name = self._get_param('GroupName') + marker = self._get_param('Marker') + max_items = self._get_param('MaxItems') + policies = iam_backend.list_group_policies(group_name, + marker=marker, max_items=max_items) + template = self.response_template(LIST_GROUP_POLICIES_TEMPLATE) + return template.render(name="ListGroupPoliciesResponse", + policies=policies, + marker=marker) + + def get_group_policy(self): + group_name = self._get_param('GroupName') + policy_name = self._get_param('PolicyName') + policy_result = iam_backend.get_group_policy(group_name, policy_name) + template = self.response_template(GET_GROUP_POLICY_TEMPLATE) + return template.render(name="GetGroupPolicyResponse", **policy_result) + def create_user(self): user_name = self._get_param('UserName') path = self._get_param('Path') @@ -194,6 +220,7 @@ class IamResponse(BaseResponse): template = self.response_template(USER_TEMPLATE) return template.render(action='Create', user=user) + def get_user(self): user_name = self._get_param('UserName') user = iam_backend.get_user(user_name) @@ -699,6 +726,35 @@ LIST_GROUPS_FOR_USER_TEMPLATE = """ """ +LIST_GROUP_POLICIES_TEMPLATE = """ + + {% if marker is none %} + false + {% else %} + true + {{ marker }} + {% endif %} + + {% for policy in policies %} + {{ policy }} + {% endfor %} + + + + 7a62c49f-347e-4fc4-9331-6e8eEXAMPLE + +""" + +GET_GROUP_POLICY_TEMPLATE = """ + + {{ policy_name }} + {{ group_name }} + {{ policy_document }} + + + 7e7cd8bc-99ef-11e1-a4c3-27EXAMPLE804 + +""" USER_TEMPLATE = """<{{ action }}UserResponse> <{{ action }}UserResult> diff --git a/tests/test_iam/test_iam_groups.py b/tests/test_iam/test_iam_groups.py index 412484a70..ccc802283 100644 --- a/tests/test_iam/test_iam_groups.py +++ b/tests/test_iam/test_iam_groups.py @@ -1,5 +1,6 @@ from __future__ import unicode_literals import boto +import boto3 import sure # noqa from nose.tools import assert_raises @@ -70,3 +71,40 @@ def test_get_groups_for_user(): groups = conn.get_groups_for_user('my-user')['list_groups_for_user_response']['list_groups_for_user_result']['groups'] groups.should.have.length_of(2) + + +@mock_iam() +def test_put_group_policy(): + conn = boto.connect_iam() + conn.create_group('my-group') + conn.put_group_policy('my-group', 'my-policy', '{"some": "json"}') + + +@mock_iam() +def test_get_group_policy(): + conn = boto.connect_iam() + conn.create_group('my-group') + with assert_raises(BotoServerError): + conn.get_group_policy('my-group', 'my-policy') + + conn.put_group_policy('my-group', 'my-policy', '{"some": "json"}') + policy = conn.get_group_policy('my-group', 'my-policy') + +@mock_iam() +def test_get_all_group_policies(): + conn = boto.connect_iam() + conn.create_group('my-group') + policies = conn.get_all_group_policies('my-group')['list_group_policies_response']['list_group_policies_result']['policy_names'] + assert policies == [] + conn.put_group_policy('my-group', 'my-policy', '{"some": "json"}') + policies = conn.get_all_group_policies('my-group')['list_group_policies_response']['list_group_policies_result']['policy_names'] + assert policies == ['my-policy'] + + +@mock_iam() +def test_list_group_policies(): + conn = boto3.client('iam') + conn.create_group(GroupName='my-group') + policies = conn.list_group_policies(GroupName='my-group')['PolicyNames'].should.be.empty + conn.put_group_policy(GroupName='my-group', PolicyName='my-policy', PolicyDocument='{"some": "json"}') + policies = conn.list_group_policies(GroupName='my-group')['PolicyNames'].should.equal(['my-policy']) From f6465df63077ad2b738c0dd94baaa3cca1a4f2c9 Mon Sep 17 00:00:00 2001 From: Andrew Garrett <2rs2ts@users.noreply.github.com> Date: Sat, 4 Mar 2017 20:00:25 -0800 Subject: [PATCH 081/213] Return CF Stack events in reverse chronological order (#853) This is how the AWS API works: http://boto3.readthedocs.io/en/latest/reference/services/cloudformation.html#CloudFormation.Client.describe_stack_events --- moto/cloudformation/responses.py | 2 +- .../test_cloudformation_stack_crud.py | 11 ++++++++--- .../test_cloudformation_stack_crud_boto3.py | 11 ++++++++--- 3 files changed, 17 insertions(+), 7 deletions(-) diff --git a/moto/cloudformation/responses.py b/moto/cloudformation/responses.py index 06d0bbb00..695118319 100644 --- a/moto/cloudformation/responses.py +++ b/moto/cloudformation/responses.py @@ -292,7 +292,7 @@ DESCRIBE_STACK_RESOURCES_RESPONSE = """ DESCRIBE_STACK_EVENTS_RESPONSE = """ - {% for event in stack.events %} + {% for event in stack.events[::-1] %} {{ event.timestamp.strftime('%Y-%m-%dT%H:%M:%S.%fZ') }} {{ event.resource_status }} diff --git a/tests/test_cloudformation/test_cloudformation_stack_crud.py b/tests/test_cloudformation/test_cloudformation_stack_crud.py index e145ea283..7eb563c42 100644 --- a/tests/test_cloudformation/test_cloudformation_stack_crud.py +++ b/tests/test_cloudformation/test_cloudformation_stack_crud.py @@ -509,10 +509,15 @@ def test_describe_stack_events_shows_create_update_and_delete(): events[-1].resource_type.should.equal("AWS::CloudFormation::Stack") # testing ordering of stack events without assuming resource events will not exist + # the AWS API returns events in reverse chronological order stack_events_to_look_for = iter([ - ("CREATE_IN_PROGRESS", "User Initiated"), ("CREATE_COMPLETE", None), - ("UPDATE_IN_PROGRESS", "User Initiated"), ("UPDATE_COMPLETE", None), - ("DELETE_IN_PROGRESS", "User Initiated"), ("DELETE_COMPLETE", None)]) + ("DELETE_COMPLETE", None), + ("DELETE_IN_PROGRESS", "User Initiated"), + ("UPDATE_COMPLETE", None), + ("UPDATE_IN_PROGRESS", "User Initiated"), + ("CREATE_COMPLETE", None), + ("CREATE_IN_PROGRESS", "User Initiated"), + ]) try: for event in events: event.stack_id.should.equal(stack_id) diff --git a/tests/test_cloudformation/test_cloudformation_stack_crud_boto3.py b/tests/test_cloudformation/test_cloudformation_stack_crud_boto3.py index 97c3e864a..98ed213e5 100644 --- a/tests/test_cloudformation/test_cloudformation_stack_crud_boto3.py +++ b/tests/test_cloudformation/test_cloudformation_stack_crud_boto3.py @@ -345,10 +345,15 @@ def test_stack_events(): events[-1].resource_type.should.equal("AWS::CloudFormation::Stack") # testing ordering of stack events without assuming resource events will not exist + # the AWS API returns events in reverse chronological order stack_events_to_look_for = iter([ - ("CREATE_IN_PROGRESS", "User Initiated"), ("CREATE_COMPLETE", None), - ("UPDATE_IN_PROGRESS", "User Initiated"), ("UPDATE_COMPLETE", None), - ("DELETE_IN_PROGRESS", "User Initiated"), ("DELETE_COMPLETE", None)]) + ("DELETE_COMPLETE", None), + ("DELETE_IN_PROGRESS", "User Initiated"), + ("UPDATE_COMPLETE", None), + ("UPDATE_IN_PROGRESS", "User Initiated"), + ("CREATE_COMPLETE", None), + ("CREATE_IN_PROGRESS", "User Initiated"), + ]) try: for event in events: event.stack_id.should.equal(stack.stack_id) From e7ea6b350c848c3ecbd6990db4c224250a0a7511 Mon Sep 17 00:00:00 2001 From: Andrew Garrett <2rs2ts@users.noreply.github.com> Date: Sat, 4 Mar 2017 20:01:50 -0800 Subject: [PATCH 082/213] Fix lambda stdout/stderr mocking (#851) Originally, the code was setting sys.stdout and sys.stderr back to the original, official forms, but this breaks idioms like mocking stdout to capture printing output for tests. So instead, we will reset sys.stdout and sys.stderr to what they were before running the lambda function, so that in case someone is mocking stdout or stderr, their tests won't break. --- moto/awslambda/models.py | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/moto/awslambda/models.py b/moto/awslambda/models.py index 069717ca4..c0593bcde 100644 --- a/moto/awslambda/models.py +++ b/moto/awslambda/models.py @@ -135,6 +135,8 @@ class LambdaFunction(object): print("Exception %s", ex) try: + original_stdout = sys.stdout + original_stderr = sys.stderr codeOut = StringIO() codeErr = StringIO() sys.stdout = codeOut @@ -150,8 +152,8 @@ class LambdaFunction(object): finally: codeErr.close() codeOut.close() - sys.stdout = sys.__stdout__ - sys.stderr = sys.__stderr__ + sys.stdout = original_stdout + sys.stderr = original_stderr return self.convert(result) def invoke(self, request, headers): From e7735c3ee1661ad4269ac9862c61600714668ff0 Mon Sep 17 00:00:00 2001 From: Andrew Garrett <2rs2ts@users.noreply.github.com> Date: Sat, 4 Mar 2017 20:12:55 -0800 Subject: [PATCH 083/213] Add event IDs to CF Stack events (#852) So that events can be uniquely identified. I tried to match the format documented here: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/using-cfn-listing-event-history.html --- moto/cloudformation/models.py | 2 ++ tests/test_cloudformation/test_cloudformation_stack_crud.py | 1 + .../test_cloudformation/test_cloudformation_stack_crud_boto3.py | 1 + 3 files changed, 4 insertions(+) diff --git a/moto/cloudformation/models.py b/moto/cloudformation/models.py index a9dda8fdc..e9493b2b6 100644 --- a/moto/cloudformation/models.py +++ b/moto/cloudformation/models.py @@ -1,6 +1,7 @@ from __future__ import unicode_literals from datetime import datetime import json +import uuid import boto.cloudformation from moto.core import BaseBackend @@ -105,6 +106,7 @@ class FakeEvent(object): self.resource_status_reason = resource_status_reason self.resource_properties = resource_properties self.timestamp = datetime.utcnow() + self.event_id = uuid.uuid4() class CloudFormationBackend(BaseBackend): diff --git a/tests/test_cloudformation/test_cloudformation_stack_crud.py b/tests/test_cloudformation/test_cloudformation_stack_crud.py index 7eb563c42..1a2d16e94 100644 --- a/tests/test_cloudformation/test_cloudformation_stack_crud.py +++ b/tests/test_cloudformation/test_cloudformation_stack_crud.py @@ -522,6 +522,7 @@ def test_describe_stack_events_shows_create_update_and_delete(): for event in events: event.stack_id.should.equal(stack_id) event.stack_name.should.equal("test_stack") + event.event_id.should.match(r"[0-9a-f]{8}-([0-9a-f]{4}-){3}[0-9a-f]{12}") if event.resource_type == "AWS::CloudFormation::Stack": event.logical_resource_id.should.equal("test_stack") diff --git a/tests/test_cloudformation/test_cloudformation_stack_crud_boto3.py b/tests/test_cloudformation/test_cloudformation_stack_crud_boto3.py index 98ed213e5..112b8bd04 100644 --- a/tests/test_cloudformation/test_cloudformation_stack_crud_boto3.py +++ b/tests/test_cloudformation/test_cloudformation_stack_crud_boto3.py @@ -358,6 +358,7 @@ def test_stack_events(): for event in events: event.stack_id.should.equal(stack.stack_id) event.stack_name.should.equal("test_stack") + event.event_id.should.match(r"[0-9a-f]{8}-([0-9a-f]{4}-){3}[0-9a-f]{12}") if event.resource_type == "AWS::CloudFormation::Stack": event.logical_resource_id.should.equal("test_stack") From 0393c384adec93c8808dbb6503dae5936e3f26e8 Mon Sep 17 00:00:00 2001 From: Matt Chamberlin Date: Sat, 4 Mar 2017 20:17:18 -0800 Subject: [PATCH 084/213] fix etag metadata field name in key response dict (etag --> ETag) (#855) --- moto/s3/models.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/moto/s3/models.py b/moto/s3/models.py index c41ff3901..c60c49b72 100644 --- a/moto/s3/models.py +++ b/moto/s3/models.py @@ -90,7 +90,7 @@ class FakeKey(object): @property def response_dict(self): r = { - 'etag': self.etag, + 'Etag': self.etag, 'last-modified': self.last_modified_RFC1123, } if self._storage_class != 'STANDARD': From 896f040fca975865c1d441d90016446c72db3ed4 Mon Sep 17 00:00:00 2001 From: Steve Pulec Date: Sun, 5 Mar 2017 10:09:19 -0500 Subject: [PATCH 085/213] Fix sqs tests for server mode. --- tests/test_iam/test_iam_groups.py | 2 +- tests/test_sqs/test_sqs.py | 17 ++++++++++++++--- 2 files changed, 15 insertions(+), 4 deletions(-) diff --git a/tests/test_iam/test_iam_groups.py b/tests/test_iam/test_iam_groups.py index 7ca8d4ac0..9d5095884 100644 --- a/tests/test_iam/test_iam_groups.py +++ b/tests/test_iam/test_iam_groups.py @@ -105,7 +105,7 @@ def test_get_all_group_policies(): @mock_iam() def test_list_group_policies(): - conn = boto3.client('iam') + conn = boto3.client('iam', region_name='us-east-1') conn.create_group(GroupName='my-group') policies = conn.list_group_policies(GroupName='my-group')['PolicyNames'].should.be.empty conn.put_group_policy(GroupName='my-group', PolicyName='my-policy', PolicyDocument='{"some": "json"}') diff --git a/tests/test_sqs/test_sqs.py b/tests/test_sqs/test_sqs.py index 19aa6d855..30e3e017b 100644 --- a/tests/test_sqs/test_sqs.py +++ b/tests/test_sqs/test_sqs.py @@ -10,7 +10,7 @@ import requests import sure # noqa import time -from moto import mock_sqs, mock_sqs_deprecated +from moto import settings, mock_sqs, mock_sqs_deprecated from tests.helpers import requires_boto_gte import tests.backport_assert_raises # noqa from nose.tools import assert_raises @@ -76,8 +76,13 @@ def test_create_queues_in_multiple_region(): list(west1_conn.list_queues()['QueueUrls']).should.have.length_of(1) list(west2_conn.list_queues()['QueueUrls']).should.have.length_of(1) + if settings.TEST_SERVER_MODE: + base_url = 'http://localhost:8086' + else: + base_url = 'https://us-west-1.queue.amazonaws.com' + west1_conn.list_queues()['QueueUrls'][0].should.equal( - 'https://us-west-1.queue.amazonaws.com/123456789012/blah') + '{base_url}/123456789012/blah'.format(base_url=base_url)) @mock_sqs @@ -91,8 +96,14 @@ def test_get_queue_with_prefix(): queue = conn.list_queues(QueueNamePrefix="test-")['QueueUrls'] queue.should.have.length_of(1) + + if settings.TEST_SERVER_MODE: + base_url = 'http://localhost:8086' + else: + base_url = 'https://us-west-1.queue.amazonaws.com' + queue[0].should.equal( - "https://us-west-1.queue.amazonaws.com/123456789012/test-queue") + "{base_url}/123456789012/test-queue".format(base_url=base_url)) @mock_sqs From cf771d7f14dc3c3758072878a6b670b747e7cd21 Mon Sep 17 00:00:00 2001 From: Steve Pulec Date: Mon, 6 Mar 2017 21:22:37 -0500 Subject: [PATCH 086/213] Add py26 deprecation to changelog --- CHANGELOG.md | 1 + setup.py | 3 --- 2 files changed, 1 insertion(+), 3 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 912659875..5938acbb6 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -7,6 +7,7 @@ Latest BACKWARDS INCOMPATIBLE * The normal @mock_ decorators will no longer work with boto. It is suggested that you upgrade to boto3 or use the standalone-server mode. If you would still like to use boto, you must use the @mock__deprecated decorators which will be removed in a future release. * The @mock_s3bucket_path decorator is now deprecated. Use the @mock_s3 decorator instead. + * Drop support for Python 2.6 Added * Reset API: a reset API has been added to flush all of the current data ex: `requests.post("http://motoapi.amazonaws.com/moto-api/reset")` diff --git a/setup.py b/setup.py index d34715554..a09438d69 100644 --- a/setup.py +++ b/setup.py @@ -15,9 +15,6 @@ install_requires = [ ] extras_require = { - # No builtin OrderedDict before 2.7 - ':python_version=="2.6"': ['ordereddict'], - 'server': ['flask'], } From 1068e26e66840d177a296fc9db711f9b03d25d86 Mon Sep 17 00:00:00 2001 From: Steve Pulec Date: Mon, 6 Mar 2017 21:48:22 -0500 Subject: [PATCH 087/213] Bump travis to python 3.6 --- .travis.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.travis.yml b/.travis.yml index 87ee121e6..c58ed85f8 100644 --- a/.travis.yml +++ b/.travis.yml @@ -2,7 +2,7 @@ language: python sudo: false python: - 2.7 - - 3.3 + - 3.6 env: - TEST_SERVER_MODE=false - TEST_SERVER_MODE=true From cdd6e476cca75e401aa778bd2e777580e757ca4a Mon Sep 17 00:00:00 2001 From: Steve Pulec Date: Sat, 11 Mar 2017 20:50:24 -0500 Subject: [PATCH 088/213] If using newer dynamodb api, use version 2. --- moto/server.py | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/moto/server.py b/moto/server.py index fcc91ac6c..b028f62bc 100644 --- a/moto/server.py +++ b/moto/server.py @@ -62,8 +62,14 @@ class DomainDispatcherApplication(object): except ValueError: region = 'us-east-1' service = 's3' - host = "{service}.{region}.amazonaws.com".format( - service=service, region=region) + if service == 'dynamodb': + dynamo_api_version = environ['HTTP_X_AMZ_TARGET'].split("_")[1].split(".")[0] + # If Newer API version, use dynamodb2 + if dynamo_api_version > "20111205": + host = "dynamodb2" + else: + host = "{service}.{region}.amazonaws.com".format( + service=service, region=region) with self.lock: backend = self.get_backend_for_host(host) From b2a360aaf78a42d63f4d54d34a97961bef402919 Mon Sep 17 00:00:00 2001 From: Steve Pulec Date: Sat, 11 Mar 2017 21:03:03 -0500 Subject: [PATCH 089/213] Remove old boto sns test in favor of boto3 test. --- tests/test_sns/test_publishing.py | 21 --------------------- 1 file changed, 21 deletions(-) diff --git a/tests/test_sns/test_publishing.py b/tests/test_sns/test_publishing.py index 51042675f..dd75ff4be 100644 --- a/tests/test_sns/test_publishing.py +++ b/tests/test_sns/test_publishing.py @@ -51,24 +51,3 @@ def test_publish_to_sqs_in_different_region(): queue = sqs_conn.get_queue("test-queue") message = queue.read(1) message.get_body().should.equal('my message') - - -@freeze_time("2013-01-01") -@mock_sns_deprecated -def test_publish_to_http(): - responses.add( - method="POST", - url="http://example.com/foobar", - ) - - conn = boto.connect_sns() - conn.create_topic("some-topic") - topics_json = conn.get_all_topics() - topic_arn = topics_json["ListTopicsResponse"][ - "ListTopicsResult"]["Topics"][0]['TopicArn'] - - conn.subscribe(topic_arn, "http", "http://example.com/foobar") - - response = conn.publish( - topic=topic_arn, message="my message", subject="my subject") - message_id = response['PublishResponse']['PublishResult']['MessageId'] From 1709208872b59e937dee4b82f25c7d1f9bb9c66d Mon Sep 17 00:00:00 2001 From: Steve Pulec Date: Sat, 11 Mar 2017 22:45:42 -0500 Subject: [PATCH 090/213] First version of dashboard. --- moto/core/models.py | 28 +++++ moto/core/responses.py | 27 +++++ moto/core/urls.py | 2 + moto/core/utils.py | 5 +- moto/ec2/models.py | 6 +- moto/server.py | 2 +- moto/sqs/models.py | 5 +- moto/templates/dashboard.html | 169 +++++++++++++++++++++++++++++++ tests/test_core/test_moto_api.py | 12 +++ 9 files changed, 249 insertions(+), 7 deletions(-) create mode 100644 moto/templates/dashboard.html diff --git a/moto/core/models.py b/moto/core/models.py index 492a0e2ff..055cbbd7e 100644 --- a/moto/core/models.py +++ b/moto/core/models.py @@ -1,9 +1,11 @@ from __future__ import unicode_literals from __future__ import absolute_import +from collections import defaultdict import functools import inspect import re +import six from moto import settings from moto.packages.responses import responses @@ -208,12 +210,38 @@ class Model(type): return dec +model_data = defaultdict(dict) +class InstanceTrackerMeta(type): + def __new__(meta, name, bases, dct): + cls = super(InstanceTrackerMeta, meta).__new__(meta, name, bases, dct) + if name == 'BaseModel': + return cls + + service = cls.__module__.split(".")[1] + if name not in model_data[service]: + model_data[service][name] = cls + cls.instances = [] + return cls + +@six.add_metaclass(InstanceTrackerMeta) +class BaseModel(object): + def __new__(cls, *args, **kwargs): + instance = super(BaseModel, cls).__new__(cls, *args, **kwargs) + cls.instances.append(instance) + return instance + + class BaseBackend(object): def reset(self): self.__dict__ = {} self.__init__() + def get_models(self): + import pdb;pdb.set_trace() + models = getattr(backend.__class__, '__models__', {}) + + @property def _url_module(self): backend_module = self.__class__.__module__ diff --git a/moto/core/responses.py b/moto/core/responses.py index 00e3ba742..ebc4e1743 100644 --- a/moto/core/responses.py +++ b/moto/core/responses.py @@ -12,6 +12,7 @@ from jinja2 import Environment, DictLoader, TemplateNotFound import six from six.moves.urllib.parse import parse_qs, urlparse +from flask import render_template import xmltodict from pkg_resources import resource_filename from werkzeug.exceptions import HTTPException @@ -350,6 +351,32 @@ class MotoAPIResponse(BaseResponse): return 200, {}, json.dumps({"status": "ok"}) return 400, {}, json.dumps({"Error": "Need to POST to reset Moto"}) + def model_data(self, request, full_url, headers): + from moto.core.models import model_data + + results = {} + for service in sorted(model_data): + models = model_data[service] + results[service] = {} + for name in sorted(models): + model = models[name] + results[service][name] = [] + for instance in model.instances: + inst_result = {} + for attr in dir(instance): + if not attr.startswith("_"): + try: + json.dumps(getattr(instance, attr)) + except TypeError: + pass + else: + inst_result[attr] = getattr(instance, attr) + results[service][name].append(inst_result) + return 200, {"Content-Type": "application/javascript"}, json.dumps(results) + + def dashboard(self, request, full_url, headers): + return render_template('dashboard.html') + class _RecursiveDictRef(object): """Store a recursive reference to dict.""" diff --git a/moto/core/urls.py b/moto/core/urls.py index ece486058..4d4906d77 100644 --- a/moto/core/urls.py +++ b/moto/core/urls.py @@ -8,5 +8,7 @@ url_bases = [ response_instance = MotoAPIResponse() url_paths = { + '{0}/moto-api/$': response_instance.dashboard, + '{0}/moto-api/data.json': response_instance.model_data, '{0}/moto-api/reset': response_instance.reset_response, } diff --git a/moto/core/utils.py b/moto/core/utils.py index d26694014..54622d0d7 100644 --- a/moto/core/utils.py +++ b/moto/core/utils.py @@ -122,7 +122,10 @@ class convert_flask_to_httpretty_response(object): result = self.callback(request, request.url, {}) # result is a status, headers, response tuple - status, headers, content = result + if len(result) == 3: + status, headers, content = result + else: + status, headers, content = 200, {}, result response = Response(response=content, status=status, headers=headers) if request.method == "HEAD" and 'content-length' in headers: diff --git a/moto/ec2/models.py b/moto/ec2/models.py index c7467feee..a26aac6a4 100755 --- a/moto/ec2/models.py +++ b/moto/ec2/models.py @@ -13,7 +13,7 @@ from boto.ec2.spotinstancerequest import SpotInstanceRequest as BotoSpotRequest from boto.ec2.launchspecification import LaunchSpecification from moto.core import BaseBackend -from moto.core.models import Model +from moto.core.models import Model, BaseModel from moto.core.utils import iso_8601_datetime_with_milliseconds, camelcase_to_underscores from .exceptions import ( EC2ClientError, @@ -129,7 +129,7 @@ class StateReason(object): self.code = code -class TaggedEC2Resource(object): +class TaggedEC2Resource(BaseModel): def get_tags(self, *args, **kwargs): tags = self.ec2_backend.describe_tags( @@ -2612,7 +2612,7 @@ class InternetGatewayBackend(object): return self.describe_internet_gateways(internet_gateway_ids=igw_ids)[0] -class VPCGatewayAttachment(object): +class VPCGatewayAttachment(BaseModel): def __init__(self, gateway_id, vpc_id): self.gateway_id = gateway_id diff --git a/moto/server.py b/moto/server.py index fcc91ac6c..b70bb99bd 100644 --- a/moto/server.py +++ b/moto/server.py @@ -47,7 +47,7 @@ class DomainDispatcherApplication(object): def get_application(self, environ): path_info = environ.get('PATH_INFO', '') - if path_info.startswith("/moto-api"): + if path_info.startswith("/moto-api") or path_info == "/favicon.ico": host = "moto_api" elif path_info.startswith("/latest/meta-data/"): host = "instance_metadata" diff --git a/moto/sqs/models.py b/moto/sqs/models.py index 2a6fc19b1..60258972b 100644 --- a/moto/sqs/models.py +++ b/moto/sqs/models.py @@ -7,6 +7,7 @@ from xml.sax.saxutils import escape import boto.sqs from moto.core import BaseBackend +from moto.core.models import BaseModel from moto.core.utils import camelcase_to_underscores, get_random_message_id, unix_time, unix_time_millis from .utils import generate_receipt_handle from .exceptions import ( @@ -18,7 +19,7 @@ DEFAULT_ACCOUNT_ID = 123456789012 DEFAULT_SENDER_ID = "AIDAIT2UOQQY3AUEKVGXU" -class Message(object): +class Message(BaseModel): def __init__(self, message_id, body): self.id = message_id @@ -93,7 +94,7 @@ class Message(object): return False -class Queue(object): +class Queue(BaseModel): camelcase_attributes = ['ApproximateNumberOfMessages', 'ApproximateNumberOfMessagesDelayed', 'ApproximateNumberOfMessagesNotVisible', diff --git a/moto/templates/dashboard.html b/moto/templates/dashboard.html new file mode 100644 index 000000000..dc0fd880d --- /dev/null +++ b/moto/templates/dashboard.html @@ -0,0 +1,169 @@ + + + + + + + + + Moto + + + + + + + + + + + +
+ +
+

Moto Dashboard

+
+ +
+ + + + + + + + {% raw %} + + + {% endraw %} + + + diff --git a/tests/test_core/test_moto_api.py b/tests/test_core/test_moto_api.py index 3b441a3f1..e65a92ac8 100644 --- a/tests/test_core/test_moto_api.py +++ b/tests/test_core/test_moto_api.py @@ -19,3 +19,15 @@ def test_reset_api(): res.content.should.equal(b'{"status": "ok"}') conn.list_queues().shouldnt.contain('QueueUrls') # No more queues + + +@mock_sqs +def test_data_api(): + conn = boto3.client("sqs", region_name='us-west-1') + conn.create_queue(QueueName="queue1") + + res = requests.post("{base_url}/moto-api/data.json".format(base_url=base_url)) + queues = res.json()['sqs']['Queue'] + len(queues).should.equal(1) + queue = queues[0] + queue['name'].should.equal("queue1") From caea5f441daf63191132d522f177fb1c7dfcee55 Mon Sep 17 00:00:00 2001 From: Steve Pulec Date: Sat, 11 Mar 2017 23:18:58 -0500 Subject: [PATCH 091/213] Fix resetting backends. --- moto/core/models.py | 8 +++----- moto/ec2/models.py | 2 +- 2 files changed, 4 insertions(+), 6 deletions(-) diff --git a/moto/core/models.py b/moto/core/models.py index 055cbbd7e..fd90493b2 100644 --- a/moto/core/models.py +++ b/moto/core/models.py @@ -234,14 +234,12 @@ class BaseModel(object): class BaseBackend(object): def reset(self): + for service, models in model_data.items(): + for model_name, model in models.items(): + model.instances = [] self.__dict__ = {} self.__init__() - def get_models(self): - import pdb;pdb.set_trace() - models = getattr(backend.__class__, '__models__', {}) - - @property def _url_module(self): backend_module = self.__class__.__module__ diff --git a/moto/ec2/models.py b/moto/ec2/models.py index a26aac6a4..0c72ac648 100755 --- a/moto/ec2/models.py +++ b/moto/ec2/models.py @@ -2633,7 +2633,7 @@ class VPCGatewayAttachment(BaseModel): @property def physical_resource_id(self): - return self.id + return self.vpc_id class VPCGatewayAttachmentBackend(object): From 6d422d1f37d8734dd006d6e3411b4fdb41cb0ccb Mon Sep 17 00:00:00 2001 From: Steve Pulec Date: Sat, 11 Mar 2017 23:41:12 -0500 Subject: [PATCH 092/213] Add BaseModel to all models. --- moto/apigateway/models.py | 18 +++++++++--------- moto/autoscaling/models.py | 8 ++++---- moto/awslambda/models.py | 4 ++-- moto/cloudformation/models.py | 6 +++--- moto/cloudwatch/models.py | 6 +++--- moto/core/__init__.py | 2 +- moto/datapipeline/models.py | 6 +++--- moto/dynamodb/models.py | 6 +++--- moto/dynamodb2/models.py | 6 +++--- moto/ecs/models.py | 4 ++-- moto/elb/models.py | 10 +++++----- moto/emr/models.py | 12 ++++++------ moto/events/models.py | 4 ++-- moto/glacier/models.py | 6 +++--- moto/iam/models.py | 16 ++++++++-------- moto/kinesis/models.py | 12 ++++++------ moto/kms/models.py | 4 ++-- moto/opsworks/models.py | 8 ++++---- moto/rds/models.py | 8 ++++---- moto/rds2/models.py | 8 ++++---- moto/redshift/models.py | 10 +++++----- moto/route53/models.py | 10 +++++----- moto/s3/models.py | 16 ++++++++-------- moto/ses/models.py | 8 ++++---- moto/sns/models.py | 10 +++++----- moto/sqs/models.py | 3 +-- moto/sts/models.py | 6 +++--- moto/swf/models/activity_task.py | 3 ++- moto/swf/models/decision_task.py | 3 ++- moto/swf/models/domain.py | 3 ++- moto/swf/models/generic_type.py | 3 ++- moto/swf/models/history_event.py | 3 ++- moto/swf/models/timeout.py | 3 ++- moto/swf/models/workflow_execution.py | 3 ++- 34 files changed, 122 insertions(+), 116 deletions(-) diff --git a/moto/apigateway/models.py b/moto/apigateway/models.py index d5564fa61..e7ff98119 100644 --- a/moto/apigateway/models.py +++ b/moto/apigateway/models.py @@ -5,7 +5,7 @@ import datetime import requests from moto.packages.responses import responses -from moto.core import BaseBackend +from moto.core import BaseBackend, BaseModel from moto.core.utils import iso_8601_datetime_with_milliseconds from .utils import create_id from .exceptions import StageNotFoundException @@ -13,7 +13,7 @@ from .exceptions import StageNotFoundException STAGE_URL = "https://{api_id}.execute-api.{region_name}.amazonaws.com/{stage_name}" -class Deployment(dict): +class Deployment(BaseModel, dict): def __init__(self, deployment_id, name, description=""): super(Deployment, self).__init__() @@ -24,7 +24,7 @@ class Deployment(dict): datetime.datetime.now()) -class IntegrationResponse(dict): +class IntegrationResponse(BaseModel, dict): def __init__(self, status_code, selection_pattern=None): self['responseTemplates'] = {"application/json": None} @@ -33,7 +33,7 @@ class IntegrationResponse(dict): self['selectionPattern'] = selection_pattern -class Integration(dict): +class Integration(BaseModel, dict): def __init__(self, integration_type, uri, http_method, request_templates=None): super(Integration, self).__init__() @@ -58,14 +58,14 @@ class Integration(dict): return self["integrationResponses"].pop(status_code) -class MethodResponse(dict): +class MethodResponse(BaseModel, dict): def __init__(self, status_code): super(MethodResponse, self).__init__() self['statusCode'] = status_code -class Method(dict): +class Method(BaseModel, dict): def __init__(self, method_type, authorization_type): super(Method, self).__init__() @@ -92,7 +92,7 @@ class Method(dict): return self.method_responses.pop(response_code) -class Resource(object): +class Resource(BaseModel): def __init__(self, id, region_name, api_id, path_part, parent_id): self.id = id @@ -165,7 +165,7 @@ class Resource(object): return self.resource_methods[method_type].pop('methodIntegration') -class Stage(dict): +class Stage(BaseModel, dict): def __init__(self, name=None, deployment_id=None, variables=None, description='', cacheClusterEnabled=False, cacheClusterSize=None): @@ -293,7 +293,7 @@ class Stage(dict): raise Exception('Patch operation "%s" not implemented' % op['op']) -class RestAPI(object): +class RestAPI(BaseModel): def __init__(self, id, region_name, name, description): self.id = id diff --git a/moto/autoscaling/models.py b/moto/autoscaling/models.py index 3b3a618e2..0fdd82ddb 100644 --- a/moto/autoscaling/models.py +++ b/moto/autoscaling/models.py @@ -1,6 +1,6 @@ from __future__ import unicode_literals from boto.ec2.blockdevicemapping import BlockDeviceType, BlockDeviceMapping -from moto.core import BaseBackend +from moto.core import BaseBackend, BaseModel from moto.ec2 import ec2_backends from moto.elb import elb_backends from moto.elb.exceptions import LoadBalancerNotFoundError @@ -16,7 +16,7 @@ class InstanceState(object): self.lifecycle_state = lifecycle_state -class FakeScalingPolicy(object): +class FakeScalingPolicy(BaseModel): def __init__(self, name, policy_type, adjustment_type, as_name, scaling_adjustment, cooldown, autoscaling_backend): @@ -43,7 +43,7 @@ class FakeScalingPolicy(object): self.as_name, self.scaling_adjustment) -class FakeLaunchConfiguration(object): +class FakeLaunchConfiguration(BaseModel): def __init__(self, name, image_id, key_name, ramdisk_id, kernel_id, security_groups, user_data, instance_type, instance_monitoring, instance_profile_name, @@ -142,7 +142,7 @@ class FakeLaunchConfiguration(object): return block_device_map -class FakeAutoScalingGroup(object): +class FakeAutoScalingGroup(BaseModel): def __init__(self, name, availability_zones, desired_capacity, max_size, min_size, launch_config_name, vpc_zone_identifier, diff --git a/moto/awslambda/models.py b/moto/awslambda/models.py index 7d21ccbe0..477537d10 100644 --- a/moto/awslambda/models.py +++ b/moto/awslambda/models.py @@ -14,12 +14,12 @@ except: from io import StringIO import boto.awslambda -from moto.core import BaseBackend +from moto.core import BaseBackend, BaseModel from moto.s3.models import s3_backend from moto.s3.exceptions import MissingBucket -class LambdaFunction(object): +class LambdaFunction(BaseModel): def __init__(self, spec): # required diff --git a/moto/cloudformation/models.py b/moto/cloudformation/models.py index a565c289c..df9f4a139 100644 --- a/moto/cloudformation/models.py +++ b/moto/cloudformation/models.py @@ -4,14 +4,14 @@ import json import uuid import boto.cloudformation -from moto.core import BaseBackend +from moto.core import BaseBackend, BaseModel from .parsing import ResourceMap, OutputMap from .utils import generate_stack_id from .exceptions import ValidationError -class FakeStack(object): +class FakeStack(BaseModel): def __init__(self, stack_id, name, template, parameters, region_name, notification_arns=None, tags=None, role_arn=None): self.stack_id = stack_id @@ -99,7 +99,7 @@ class FakeStack(object): self.status = "DELETE_COMPLETE" -class FakeEvent(object): +class FakeEvent(BaseModel): def __init__(self, stack_id, stack_name, logical_resource_id, physical_resource_id, resource_type, resource_status, resource_status_reason=None, resource_properties=None): self.stack_id = stack_id diff --git a/moto/cloudwatch/models.py b/moto/cloudwatch/models.py index 7257286ba..dd97ddcbb 100644 --- a/moto/cloudwatch/models.py +++ b/moto/cloudwatch/models.py @@ -1,4 +1,4 @@ -from moto.core import BaseBackend +from moto.core import BaseBackend, BaseModel import boto.ec2.cloudwatch import datetime @@ -10,7 +10,7 @@ class Dimension(object): self.value = value -class FakeAlarm(object): +class FakeAlarm(BaseModel): def __init__(self, name, namespace, metric_name, comparison_operator, evaluation_periods, period, threshold, statistic, description, dimensions, alarm_actions, @@ -34,7 +34,7 @@ class FakeAlarm(object): self.configuration_updated_timestamp = datetime.datetime.utcnow() -class MetricDatum(object): +class MetricDatum(BaseModel): def __init__(self, namespace, name, value, dimensions): self.namespace = namespace diff --git a/moto/core/__init__.py b/moto/core/__init__.py index 4f783d46c..9e2c1e70f 100644 --- a/moto/core/__init__.py +++ b/moto/core/__init__.py @@ -1,4 +1,4 @@ from __future__ import unicode_literals -from .models import BaseBackend, moto_api_backend # flake8: noqa +from .models import BaseModel, BaseBackend, moto_api_backend # flake8: noqa moto_api_backends = {"global": moto_api_backend} diff --git a/moto/datapipeline/models.py b/moto/datapipeline/models.py index 0cb33e4ed..77c84924d 100644 --- a/moto/datapipeline/models.py +++ b/moto/datapipeline/models.py @@ -2,11 +2,11 @@ from __future__ import unicode_literals import datetime import boto.datapipeline -from moto.core import BaseBackend +from moto.core import BaseBackend, BaseModel from .utils import get_random_pipeline_id, remove_capitalization_of_dict_keys -class PipelineObject(object): +class PipelineObject(BaseModel): def __init__(self, object_id, name, fields): self.object_id = object_id @@ -21,7 +21,7 @@ class PipelineObject(object): } -class Pipeline(object): +class Pipeline(BaseModel): def __init__(self, name, unique_id): self.name = name diff --git a/moto/dynamodb/models.py b/moto/dynamodb/models.py index db50dbcc6..39bf15fca 100644 --- a/moto/dynamodb/models.py +++ b/moto/dynamodb/models.py @@ -4,7 +4,7 @@ import datetime import json from moto.compat import OrderedDict -from moto.core import BaseBackend +from moto.core import BaseBackend, BaseModel from moto.core.utils import unix_time from .comparisons import get_comparison_func @@ -53,7 +53,7 @@ class DynamoType(object): return comparison_func(self.value, *range_values) -class Item(object): +class Item(BaseModel): def __init__(self, hash_key, hash_key_type, range_key, range_key_type, attrs): self.hash_key = hash_key @@ -90,7 +90,7 @@ class Item(object): } -class Table(object): +class Table(BaseModel): def __init__(self, name, hash_key_attr, hash_key_type, range_key_attr=None, range_key_type=None, read_capacity=None, diff --git a/moto/dynamodb2/models.py b/moto/dynamodb2/models.py index 15c30e590..2ee5da203 100644 --- a/moto/dynamodb2/models.py +++ b/moto/dynamodb2/models.py @@ -5,7 +5,7 @@ import decimal import json from moto.compat import OrderedDict -from moto.core import BaseBackend +from moto.core import BaseBackend, BaseModel from moto.core.utils import unix_time from .comparisons import get_comparison_func @@ -76,7 +76,7 @@ class DynamoType(object): return comparison_func(self.cast_value, *range_values) -class Item(object): +class Item(BaseModel): def __init__(self, hash_key, hash_key_type, range_key, range_key_type, attrs): self.hash_key = hash_key @@ -173,7 +173,7 @@ class Item(object): 'ADD not supported for %s' % ', '.join(update_action['Value'].keys())) -class Table(object): +class Table(BaseModel): def __init__(self, table_name, schema=None, attr=None, throughput=None, indexes=None, global_indexes=None): self.name = table_name diff --git a/moto/ecs/models.py b/moto/ecs/models.py index 7efefdbaa..e5a2e9f96 100644 --- a/moto/ecs/models.py +++ b/moto/ecs/models.py @@ -2,12 +2,12 @@ from __future__ import unicode_literals import uuid from random import randint, random -from moto.core import BaseBackend +from moto.core import BaseBackend, BaseModel from moto.ec2 import ec2_backends from copy import copy -class BaseObject(object): +class BaseObject(BaseModel): def camelCase(self, key): words = [] diff --git a/moto/elb/models.py b/moto/elb/models.py index 11559c2e7..41df8a649 100644 --- a/moto/elb/models.py +++ b/moto/elb/models.py @@ -11,7 +11,7 @@ from boto.ec2.elb.policies import ( Policies, OtherPolicy, ) -from moto.core import BaseBackend +from moto.core import BaseBackend, BaseModel from moto.ec2.models import ec2_backends from .exceptions import ( LoadBalancerNotFoundError, @@ -21,7 +21,7 @@ from .exceptions import ( ) -class FakeHealthCheck(object): +class FakeHealthCheck(BaseModel): def __init__(self, timeout, healthy_threshold, unhealthy_threshold, interval, target): @@ -34,7 +34,7 @@ class FakeHealthCheck(object): raise BadHealthCheckDefinition -class FakeListener(object): +class FakeListener(BaseModel): def __init__(self, load_balancer_port, instance_port, protocol, ssl_certificate_id): self.load_balancer_port = load_balancer_port @@ -47,7 +47,7 @@ class FakeListener(object): return "FakeListener(lbp: %s, inp: %s, pro: %s, cid: %s, policies: %s)" % (self.load_balancer_port, self.instance_port, self.protocol, self.ssl_certificate_id, self.policy_names) -class FakeBackend(object): +class FakeBackend(BaseModel): def __init__(self, instance_port): self.instance_port = instance_port @@ -57,7 +57,7 @@ class FakeBackend(object): return "FakeBackend(inp: %s, policies: %s)" % (self.instance_port, self.policy_names) -class FakeLoadBalancer(object): +class FakeLoadBalancer(BaseModel): def __init__(self, name, zones, ports, scheme='internet-facing', vpc_id=None, subnets=None): self.name = name diff --git a/moto/emr/models.py b/moto/emr/models.py index 94bc45ecc..78bedf574 100644 --- a/moto/emr/models.py +++ b/moto/emr/models.py @@ -5,12 +5,12 @@ from datetime import timedelta import boto.emr import pytz from dateutil.parser import parse as dtparse -from moto.core import BaseBackend +from moto.core import BaseBackend, BaseModel from .utils import random_instance_group_id, random_cluster_id, random_step_id -class FakeApplication(object): +class FakeApplication(BaseModel): def __init__(self, name, version, args=None, additional_info=None): self.additional_info = additional_info or {} @@ -19,7 +19,7 @@ class FakeApplication(object): self.version = version -class FakeBootstrapAction(object): +class FakeBootstrapAction(BaseModel): def __init__(self, args, name, script_path): self.args = args or [] @@ -27,7 +27,7 @@ class FakeBootstrapAction(object): self.script_path = script_path -class FakeInstanceGroup(object): +class FakeInstanceGroup(BaseModel): def __init__(self, instance_count, instance_role, instance_type, market='ON_DEMAND', name=None, id=None, bid_price=None): @@ -57,7 +57,7 @@ class FakeInstanceGroup(object): self.num_instances = instance_count -class FakeStep(object): +class FakeStep(BaseModel): def __init__(self, state, @@ -81,7 +81,7 @@ class FakeStep(object): self.state = state -class FakeCluster(object): +class FakeCluster(BaseModel): def __init__(self, emr_backend, diff --git a/moto/events/models.py b/moto/events/models.py index 3cf2c3d7a..faec7b434 100644 --- a/moto/events/models.py +++ b/moto/events/models.py @@ -1,10 +1,10 @@ import os import re -from moto.core import BaseBackend +from moto.core import BaseBackend, BaseModel -class Rule(object): +class Rule(BaseModel): def _generate_arn(self, name): return 'arn:aws:events:us-west-2:111111111111:rule/' + name diff --git a/moto/glacier/models.py b/moto/glacier/models.py index 8e3286887..1afb1241a 100644 --- a/moto/glacier/models.py +++ b/moto/glacier/models.py @@ -3,12 +3,12 @@ from __future__ import unicode_literals import hashlib import boto.glacier -from moto.core import BaseBackend +from moto.core import BaseBackend, BaseModel from .utils import get_job_id -class ArchiveJob(object): +class ArchiveJob(BaseModel): def __init__(self, job_id, archive_id): self.job_id = job_id @@ -35,7 +35,7 @@ class ArchiveJob(object): } -class Vault(object): +class Vault(BaseModel): def __init__(self, vault_name, region): self.vault_name = vault_name diff --git a/moto/iam/models.py b/moto/iam/models.py index f00e02052..ba6985895 100644 --- a/moto/iam/models.py +++ b/moto/iam/models.py @@ -3,13 +3,13 @@ import base64 from datetime import datetime import pytz -from moto.core import BaseBackend +from moto.core import BaseBackend, BaseModel from .exceptions import IAMNotFoundException, IAMConflictException, IAMReportNotPresentException from .utils import random_access_key, random_alphanumeric, random_resource_id, random_policy_id -class Policy(object): +class Policy(BaseModel): is_attachable = False @@ -54,7 +54,7 @@ class InlinePolicy(Policy): """TODO: is this needed?""" -class Role(object): +class Role(BaseModel): def __init__(self, role_id, name, assume_role_policy_document, path): self.id = role_id @@ -96,7 +96,7 @@ class Role(object): raise UnformattedGetAttTemplateException() -class InstanceProfile(object): +class InstanceProfile(BaseModel): def __init__(self, instance_profile_id, name, path, roles): self.id = instance_profile_id @@ -126,7 +126,7 @@ class InstanceProfile(object): raise UnformattedGetAttTemplateException() -class Certificate(object): +class Certificate(BaseModel): def __init__(self, cert_name, cert_body, private_key, cert_chain=None, path=None): self.cert_name = cert_name @@ -140,7 +140,7 @@ class Certificate(object): return self.name -class AccessKey(object): +class AccessKey(BaseModel): def __init__(self, user_name): self.user_name = user_name @@ -159,7 +159,7 @@ class AccessKey(object): raise UnformattedGetAttTemplateException() -class Group(object): +class Group(BaseModel): def __init__(self, name, path='/'): self.name = name @@ -198,7 +198,7 @@ class Group(object): return self.policies.keys() -class User(object): +class User(BaseModel): def __init__(self, name, path=None): self.name = name diff --git a/moto/kinesis/models.py b/moto/kinesis/models.py index 5d80426ae..84cbbb73a 100644 --- a/moto/kinesis/models.py +++ b/moto/kinesis/models.py @@ -11,13 +11,13 @@ from operator import attrgetter from hashlib import md5 from moto.compat import OrderedDict -from moto.core import BaseBackend +from moto.core import BaseBackend, BaseModel from .exceptions import StreamNotFoundError, ShardNotFoundError, ResourceInUseError, \ ResourceNotFoundError, InvalidArgumentError from .utils import compose_shard_iterator, compose_new_shard_iterator, decompose_shard_iterator -class Record(object): +class Record(BaseModel): def __init__(self, partition_key, data, sequence_number, explicit_hash_key): self.partition_key = partition_key @@ -33,7 +33,7 @@ class Record(object): } -class Shard(object): +class Shard(BaseModel): def __init__(self, shard_id, starting_hash, ending_hash): self._shard_id = shard_id @@ -94,7 +94,7 @@ class Shard(object): } -class Stream(object): +class Stream(BaseModel): def __init__(self, stream_name, shard_count, region): self.stream_name = stream_name @@ -173,14 +173,14 @@ class Stream(object): } -class FirehoseRecord(object): +class FirehoseRecord(BaseModel): def __init__(self, record_data): self.record_id = 12345678 self.record_data = record_data -class DeliveryStream(object): +class DeliveryStream(BaseModel): def __init__(self, stream_name, **stream_kwargs): self.name = stream_name diff --git a/moto/kms/models.py b/moto/kms/models.py index 37fde9eb8..be8c52162 100644 --- a/moto/kms/models.py +++ b/moto/kms/models.py @@ -1,12 +1,12 @@ from __future__ import unicode_literals import boto.kms -from moto.core import BaseBackend +from moto.core import BaseBackend, BaseModel from .utils import generate_key_id from collections import defaultdict -class Key(object): +class Key(BaseModel): def __init__(self, policy, key_usage, description, region): self.id = generate_key_id() diff --git a/moto/opsworks/models.py b/moto/opsworks/models.py index a1b8370dd..3adfd3323 100644 --- a/moto/opsworks/models.py +++ b/moto/opsworks/models.py @@ -1,5 +1,5 @@ from __future__ import unicode_literals -from moto.core import BaseBackend +from moto.core import BaseBackend, BaseModel from moto.ec2 import ec2_backends import uuid import datetime @@ -8,7 +8,7 @@ from random import choice from .exceptions import ResourceNotFoundException, ValidationException -class OpsworkInstance(object): +class OpsworkInstance(BaseModel): """ opsworks maintains its own set of ec2 instance metadata. This metadata exists before any instance reservations are made, and is @@ -166,7 +166,7 @@ class OpsworkInstance(object): return d -class Layer(object): +class Layer(BaseModel): def __init__(self, stack_id, type, name, shortname, attributes=None, @@ -292,7 +292,7 @@ class Layer(object): return d -class Stack(object): +class Stack(BaseModel): def __init__(self, name, region, service_role_arn, default_instance_profile_arn, vpcid="vpc-1f99bf7a", diff --git a/moto/rds/models.py b/moto/rds/models.py index 4334a9f72..670b0a808 100644 --- a/moto/rds/models.py +++ b/moto/rds/models.py @@ -6,13 +6,13 @@ import boto.rds from jinja2 import Template from moto.cloudformation.exceptions import UnformattedGetAttTemplateException -from moto.core import BaseBackend +from moto.core import BaseBackend, BaseModel from moto.core.utils import get_random_hex from moto.ec2.models import ec2_backends from moto.rds2.models import rds2_backends -class Database(object): +class Database(BaseModel): def __init__(self, **kwargs): self.status = "available" @@ -239,7 +239,7 @@ class Database(object): backend.delete_database(self.db_instance_identifier) -class SecurityGroup(object): +class SecurityGroup(BaseModel): def __init__(self, group_name, description): self.group_name = group_name @@ -317,7 +317,7 @@ class SecurityGroup(object): backend.delete_security_group(self.group_name) -class SubnetGroup(object): +class SubnetGroup(BaseModel): def __init__(self, subnet_name, description, subnets): self.subnet_name = subnet_name diff --git a/moto/rds2/models.py b/moto/rds2/models.py index 52cb298cd..f03cf4ad1 100644 --- a/moto/rds2/models.py +++ b/moto/rds2/models.py @@ -7,7 +7,7 @@ import boto.rds2 from jinja2 import Template from re import compile as re_compile from moto.cloudformation.exceptions import UnformattedGetAttTemplateException -from moto.core import BaseBackend +from moto.core import BaseBackend, BaseModel from moto.core.utils import get_random_hex from moto.ec2.models import ec2_backends from .exceptions import (RDSClientError, @@ -17,7 +17,7 @@ from .exceptions import (RDSClientError, DBParameterGroupNotFoundError) -class Database(object): +class Database(BaseModel): def __init__(self, **kwargs): self.status = "available" @@ -372,7 +372,7 @@ class Database(object): backend.delete_database(self.db_instance_identifier) -class SecurityGroup(object): +class SecurityGroup(BaseModel): def __init__(self, group_name, description, tags): self.group_name = group_name @@ -481,7 +481,7 @@ class SecurityGroup(object): backend.delete_security_group(self.group_name) -class SubnetGroup(object): +class SubnetGroup(BaseModel): def __init__(self, subnet_name, description, subnets, tags): self.subnet_name = subnet_name diff --git a/moto/redshift/models.py b/moto/redshift/models.py index af6c6f643..5e64f7a16 100644 --- a/moto/redshift/models.py +++ b/moto/redshift/models.py @@ -1,7 +1,7 @@ from __future__ import unicode_literals import boto.redshift -from moto.core import BaseBackend +from moto.core import BaseBackend, BaseModel from moto.ec2 import ec2_backends from .exceptions import ( ClusterNotFoundError, @@ -12,7 +12,7 @@ from .exceptions import ( ) -class Cluster(object): +class Cluster(BaseModel): def __init__(self, redshift_backend, cluster_identifier, node_type, master_username, master_user_password, db_name, cluster_type, cluster_security_groups, @@ -174,7 +174,7 @@ class Cluster(object): } -class SubnetGroup(object): +class SubnetGroup(BaseModel): def __init__(self, ec2_backend, cluster_subnet_group_name, description, subnet_ids): self.ec2_backend = ec2_backend @@ -220,7 +220,7 @@ class SubnetGroup(object): } -class SecurityGroup(object): +class SecurityGroup(BaseModel): def __init__(self, cluster_security_group_name, description): self.cluster_security_group_name = cluster_security_group_name @@ -235,7 +235,7 @@ class SecurityGroup(object): } -class ParameterGroup(object): +class ParameterGroup(BaseModel): def __init__(self, cluster_parameter_group_name, group_family, description): self.cluster_parameter_group_name = cluster_parameter_group_name diff --git a/moto/route53/models.py b/moto/route53/models.py index 6e0ad35c0..15679f0e3 100644 --- a/moto/route53/models.py +++ b/moto/route53/models.py @@ -5,11 +5,11 @@ from collections import defaultdict import uuid from jinja2 import Template -from moto.core import BaseBackend +from moto.core import BaseBackend, BaseModel from moto.core.utils import get_random_hex -class HealthCheck(object): +class HealthCheck(BaseModel): def __init__(self, health_check_id, health_check_args): self.id = health_check_id @@ -63,7 +63,7 @@ class HealthCheck(object): return template.render(health_check=self) -class RecordSet(object): +class RecordSet(BaseModel): def __init__(self, kwargs): self.name = kwargs.get('Name') @@ -154,7 +154,7 @@ class RecordSet(object): hosted_zone.delete_rrset_by_name(self.name) -class FakeZone(object): +class FakeZone(BaseModel): def __init__(self, name, id_, private_zone, comment=None): self.name = name @@ -212,7 +212,7 @@ class FakeZone(object): return hosted_zone -class RecordSetGroup(object): +class RecordSetGroup(BaseModel): def __init__(self, hosted_zone_id, record_sets): self.hosted_zone_id = hosted_zone_id diff --git a/moto/s3/models.py b/moto/s3/models.py index 77c6e1a00..04220c142 100644 --- a/moto/s3/models.py +++ b/moto/s3/models.py @@ -9,7 +9,7 @@ import codecs import six from bisect import insort -from moto.core import BaseBackend +from moto.core import BaseBackend, BaseModel from moto.core.utils import iso_8601_datetime_with_milliseconds, rfc_1123_datetime from .exceptions import BucketAlreadyExists, MissingBucket, MissingKey, InvalidPart, EntityTooSmall from .utils import clean_key_name, _VersionedKeyStore @@ -18,7 +18,7 @@ UPLOAD_ID_BYTES = 43 UPLOAD_PART_MIN_SIZE = 5242880 -class FakeKey(object): +class FakeKey(BaseModel): def __init__(self, name, value, storage="STANDARD", etag=None, is_versioned=False, version_id=0): self.name = name @@ -119,7 +119,7 @@ class FakeKey(object): return self._expiry.strftime("%a, %d %b %Y %H:%M:%S GMT") -class FakeMultipart(object): +class FakeMultipart(BaseModel): def __init__(self, key_name, metadata): self.key_name = key_name @@ -167,7 +167,7 @@ class FakeMultipart(object): yield self.parts[part_id] -class FakeGrantee(object): +class FakeGrantee(BaseModel): def __init__(self, id='', uri='', display_name=''): self.id = id @@ -193,14 +193,14 @@ PERMISSION_WRITE_ACP = 'WRITE_ACP' PERMISSION_READ_ACP = 'READ_ACP' -class FakeGrant(object): +class FakeGrant(BaseModel): def __init__(self, grantees, permissions): self.grantees = grantees self.permissions = permissions -class FakeAcl(object): +class FakeAcl(BaseModel): def __init__(self, grants=[]): self.grants = grants @@ -234,7 +234,7 @@ def get_canned_acl(acl): return FakeAcl(grants=grants) -class LifecycleRule(object): +class LifecycleRule(BaseModel): def __init__(self, id=None, prefix=None, status=None, expiration_days=None, expiration_date=None, transition_days=None, @@ -249,7 +249,7 @@ class LifecycleRule(object): self.storage_class = storage_class -class FakeBucket(object): +class FakeBucket(BaseModel): def __init__(self, name, region_name): self.name = name diff --git a/moto/ses/models.py b/moto/ses/models.py index 3502d6bc7..2f51d1473 100644 --- a/moto/ses/models.py +++ b/moto/ses/models.py @@ -2,7 +2,7 @@ from __future__ import unicode_literals import email -from moto.core import BaseBackend +from moto.core import BaseBackend, BaseModel from .exceptions import MessageRejectedError from .utils import get_random_message_id @@ -10,19 +10,19 @@ from .utils import get_random_message_id RECIPIENT_LIMIT = 50 -class Message(object): +class Message(BaseModel): def __init__(self, message_id): self.id = message_id -class RawMessage(object): +class RawMessage(BaseModel): def __init__(self, message_id): self.id = message_id -class SESQuota(object): +class SESQuota(BaseModel): def __init__(self, sent): self.sent = sent diff --git a/moto/sns/models.py b/moto/sns/models.py index 0ccf60ea9..64352d545 100644 --- a/moto/sns/models.py +++ b/moto/sns/models.py @@ -9,7 +9,7 @@ import requests import six from moto.compat import OrderedDict -from moto.core import BaseBackend +from moto.core import BaseBackend, BaseModel from moto.core.utils import iso_8601_datetime_with_milliseconds from moto.sqs import sqs_backends from .exceptions import SNSNotFoundError @@ -19,7 +19,7 @@ DEFAULT_ACCOUNT_ID = 123456789012 DEFAULT_PAGE_SIZE = 100 -class Topic(object): +class Topic(BaseModel): def __init__(self, name, sns_backend): self.name = name @@ -67,7 +67,7 @@ class Topic(object): return topic -class Subscription(object): +class Subscription(BaseModel): def __init__(self, topic, endpoint, protocol): self.topic = topic @@ -99,7 +99,7 @@ class Subscription(object): } -class PlatformApplication(object): +class PlatformApplication(BaseModel): def __init__(self, region, name, platform, attributes): self.region = region @@ -116,7 +116,7 @@ class PlatformApplication(object): ) -class PlatformEndpoint(object): +class PlatformEndpoint(BaseModel): def __init__(self, region, application, custom_user_data, token, attributes): self.region = region diff --git a/moto/sqs/models.py b/moto/sqs/models.py index 60258972b..62b79fdc1 100644 --- a/moto/sqs/models.py +++ b/moto/sqs/models.py @@ -6,8 +6,7 @@ from xml.sax.saxutils import escape import boto.sqs -from moto.core import BaseBackend -from moto.core.models import BaseModel +from moto.core import BaseBackend, BaseModel from moto.core.utils import camelcase_to_underscores, get_random_message_id, unix_time, unix_time_millis from .utils import generate_receipt_handle from .exceptions import ( diff --git a/moto/sts/models.py b/moto/sts/models.py index f1c6401d2..c7163a335 100644 --- a/moto/sts/models.py +++ b/moto/sts/models.py @@ -1,10 +1,10 @@ from __future__ import unicode_literals import datetime -from moto.core import BaseBackend +from moto.core import BaseBackend, BaseModel from moto.core.utils import iso_8601_datetime_with_milliseconds -class Token(object): +class Token(BaseModel): def __init__(self, duration, name=None, policy=None): now = datetime.datetime.utcnow() @@ -17,7 +17,7 @@ class Token(object): return iso_8601_datetime_with_milliseconds(self.expiration) -class AssumedRole(object): +class AssumedRole(BaseModel): def __init__(self, role_session_name, role_arn, policy, duration, external_id): self.session_name = role_session_name diff --git a/moto/swf/models/activity_task.py b/moto/swf/models/activity_task.py index e205cc07a..0c1f283ca 100644 --- a/moto/swf/models/activity_task.py +++ b/moto/swf/models/activity_task.py @@ -2,13 +2,14 @@ from __future__ import unicode_literals from datetime import datetime import uuid +from moto.core import BaseModel from moto.core.utils import unix_time from ..exceptions import SWFWorkflowExecutionClosedError from .timeout import Timeout -class ActivityTask(object): +class ActivityTask(BaseModel): def __init__(self, activity_id, activity_type, scheduled_event_id, workflow_execution, timeouts, input=None): diff --git a/moto/swf/models/decision_task.py b/moto/swf/models/decision_task.py index 13bddfd7a..9255dd6f2 100644 --- a/moto/swf/models/decision_task.py +++ b/moto/swf/models/decision_task.py @@ -2,13 +2,14 @@ from __future__ import unicode_literals from datetime import datetime import uuid +from moto.core import BaseModel from moto.core.utils import unix_time from ..exceptions import SWFWorkflowExecutionClosedError from .timeout import Timeout -class DecisionTask(object): +class DecisionTask(BaseModel): def __init__(self, workflow_execution, scheduled_event_id): self.workflow_execution = workflow_execution diff --git a/moto/swf/models/domain.py b/moto/swf/models/domain.py index ed7154067..0aa62f4f0 100644 --- a/moto/swf/models/domain.py +++ b/moto/swf/models/domain.py @@ -1,13 +1,14 @@ from __future__ import unicode_literals from collections import defaultdict +from moto.core import BaseModel from ..exceptions import ( SWFUnknownResourceFault, SWFWorkflowExecutionAlreadyStartedFault, ) -class Domain(object): +class Domain(BaseModel): def __init__(self, name, retention, description=None): self.name = name diff --git a/moto/swf/models/generic_type.py b/moto/swf/models/generic_type.py index 2ae98bb53..a56220ed6 100644 --- a/moto/swf/models/generic_type.py +++ b/moto/swf/models/generic_type.py @@ -1,9 +1,10 @@ from __future__ import unicode_literals +from moto.core import BaseModel from moto.core.utils import camelcase_to_underscores -class GenericType(object): +class GenericType(BaseModel): def __init__(self, name, version, **kwargs): self.name = name diff --git a/moto/swf/models/history_event.py b/moto/swf/models/history_event.py index e841ca38e..0dc21a09a 100644 --- a/moto/swf/models/history_event.py +++ b/moto/swf/models/history_event.py @@ -1,5 +1,6 @@ from __future__ import unicode_literals +from moto.core import BaseModel from moto.core.utils import underscores_to_camelcase, unix_time from ..utils import decapitalize @@ -27,7 +28,7 @@ SUPPORTED_HISTORY_EVENT_TYPES = ( ) -class HistoryEvent(object): +class HistoryEvent(BaseModel): def __init__(self, event_id, event_type, event_timestamp=None, **kwargs): if event_type not in SUPPORTED_HISTORY_EVENT_TYPES: diff --git a/moto/swf/models/timeout.py b/moto/swf/models/timeout.py index 09e0f6772..f26c8a38b 100644 --- a/moto/swf/models/timeout.py +++ b/moto/swf/models/timeout.py @@ -1,7 +1,8 @@ +from moto.core import BaseModel from moto.core.utils import unix_time -class Timeout(object): +class Timeout(BaseModel): def __init__(self, obj, timestamp, kind): self.obj = obj diff --git a/moto/swf/models/workflow_execution.py b/moto/swf/models/workflow_execution.py index 8b8acda4e..2f41c287f 100644 --- a/moto/swf/models/workflow_execution.py +++ b/moto/swf/models/workflow_execution.py @@ -1,6 +1,7 @@ from __future__ import unicode_literals import uuid +from moto.core import BaseModel from moto.core.utils import camelcase_to_underscores, unix_time from ..constants import ( @@ -20,7 +21,7 @@ from .timeout import Timeout # TODO: extract decision related logic into a Decision class -class WorkflowExecution(object): +class WorkflowExecution(BaseModel): # NB: the list is ordered exactly as in SWF validation exceptions so we can # mimic error messages closely ; don't reorder it without checking SWF. From 09ac3539b752913e64a85f4f95726d7c0a945997 Mon Sep 17 00:00:00 2001 From: Steve Pulec Date: Sun, 12 Mar 2017 12:34:54 -0400 Subject: [PATCH 093/213] Sort dashboard attributes. --- moto/templates/dashboard.html | 93 ++++++++++++++++++----------------- 1 file changed, 47 insertions(+), 46 deletions(-) diff --git a/moto/templates/dashboard.html b/moto/templates/dashboard.html index dc0fd880d..9c49904d0 100644 --- a/moto/templates/dashboard.html +++ b/moto/templates/dashboard.html @@ -50,15 +50,7 @@ + @@ -82,13 +74,13 @@
{{#each data}} -
+
{{#each this}} @@ -72,7 +67,7 @@ {% raw %} {% endraw %} + + + From 0e2fdf94f9d5d5b864253c621ba131a6f03a3eb3 Mon Sep 17 00:00:00 2001 From: Steve Pulec Date: Sun, 12 Mar 2017 20:18:49 -0400 Subject: [PATCH 098/213] Cleanup lints. --- Makefile | 3 +++ moto/cloudformation/parsing.py | 2 +- moto/ec2/models.py | 1 - moto/iam/responses.py | 1 - 4 files changed, 4 insertions(+), 3 deletions(-) diff --git a/Makefile b/Makefile index 58b74b2fb..82aef0cd1 100644 --- a/Makefile +++ b/Makefile @@ -4,6 +4,9 @@ init: @python setup.py develop @pip install -r requirements.txt +lint: + flake8 moto + test: rm -f .coverage rm -rf cover diff --git a/moto/cloudformation/parsing.py b/moto/cloudformation/parsing.py index 337de2f2d..fbf34b6f1 100644 --- a/moto/cloudformation/parsing.py +++ b/moto/cloudformation/parsing.py @@ -365,7 +365,7 @@ class ResourceMap(collections.Mapping): condition, self._parsed_resources, self.lazy_condition_map) for condition_name in self.lazy_condition_map: - _ = self.lazy_condition_map[condition_name] + self.lazy_condition_map[condition_name] def create(self): self.load_mapping() diff --git a/moto/ec2/models.py b/moto/ec2/models.py index 6ed6e9af0..87ce61c5b 100755 --- a/moto/ec2/models.py +++ b/moto/ec2/models.py @@ -349,7 +349,6 @@ class NetworkInterfaceBackend(object): return generic_filter(filters, enis) - class Instance(TaggedEC2Resource, BotoInstance): def __init__(self, ec2_backend, image_id, user_data, security_groups, **kwargs): super(Instance, self).__init__() diff --git a/moto/iam/responses.py b/moto/iam/responses.py index cd9ddbf75..318c04f3a 100644 --- a/moto/iam/responses.py +++ b/moto/iam/responses.py @@ -232,7 +232,6 @@ class IamResponse(BaseResponse): template = self.response_template(USER_TEMPLATE) return template.render(action='Create', user=user) - def get_user(self): user_name = self._get_param('UserName') user = iam_backend.get_user(user_name) From d2c56619cd770a47732fb53642cacd72ad5cdfd8 Mon Sep 17 00:00:00 2001 From: Steve Pulec Date: Sun, 12 Mar 2017 20:35:45 -0400 Subject: [PATCH 099/213] Add lint to Travis. --- .travis.yml | 1 + moto/core/models.py | 3 +++ 2 files changed, 4 insertions(+) diff --git a/.travis.yml b/.travis.yml index c58ed85f8..bdda2b402 100644 --- a/.travis.yml +++ b/.travis.yml @@ -19,6 +19,7 @@ install: export AWS_ACCESS_KEY_ID=foobar_key fi script: + - make lint - make test after_success: - coveralls diff --git a/moto/core/models.py b/moto/core/models.py index 82e78fdb8..9c2fc6d6b 100644 --- a/moto/core/models.py +++ b/moto/core/models.py @@ -211,6 +211,8 @@ class Model(type): model_data = defaultdict(dict) + + class InstanceTrackerMeta(type): def __new__(meta, name, bases, dct): cls = super(InstanceTrackerMeta, meta).__new__(meta, name, bases, dct) @@ -223,6 +225,7 @@ class InstanceTrackerMeta(type): cls.instances = [] return cls + @six.add_metaclass(InstanceTrackerMeta) class BaseModel(object): def __new__(cls, *args, **kwargs): From 1664e4412fac9efaf6e23ed8ea38e61fae1c5b86 Mon Sep 17 00:00:00 2001 From: Steve Pulec Date: Sun, 12 Mar 2017 20:37:23 -0400 Subject: [PATCH 100/213] Add lint to make test instead. --- .travis.yml | 1 - Makefile | 2 +- 2 files changed, 1 insertion(+), 2 deletions(-) diff --git a/.travis.yml b/.travis.yml index bdda2b402..c58ed85f8 100644 --- a/.travis.yml +++ b/.travis.yml @@ -19,7 +19,6 @@ install: export AWS_ACCESS_KEY_ID=foobar_key fi script: - - make lint - make test after_success: - coveralls diff --git a/Makefile b/Makefile index 82aef0cd1..300067296 100644 --- a/Makefile +++ b/Makefile @@ -7,7 +7,7 @@ init: lint: flake8 moto -test: +test: lint rm -f .coverage rm -rf cover @nosetests -sv --with-coverage --cover-html ./tests/ From 689adf7dbc9ded65e34090f9de23d913e5815858 Mon Sep 17 00:00:00 2001 From: Steve Pulec Date: Sun, 12 Mar 2017 20:41:08 -0400 Subject: [PATCH 101/213] Add flake8 to dev dependencies. --- requirements-dev.txt | 1 + 1 file changed, 1 insertion(+) diff --git a/requirements-dev.txt b/requirements-dev.txt index 554834a51..52def6ed0 100644 --- a/requirements-dev.txt +++ b/requirements-dev.txt @@ -3,6 +3,7 @@ mock nose sure==1.2.24 coverage +flake8 freezegun flask boto3>=1.4.4 From cda553abfb84ca575f9fe24d546fbd2117f302a8 Mon Sep 17 00:00:00 2001 From: Steve Pulec Date: Sun, 12 Mar 2017 21:04:19 -0400 Subject: [PATCH 102/213] Change tests to use default server port of 5000. --- .travis.yml | 2 +- moto/core/models.py | 6 +++--- other_langs/sqsSample.java | 2 +- other_langs/test.js | 2 +- other_langs/test.rb | 2 +- tests/test_awslambda/test_lambda.py | 2 +- tests/test_core/test_instance_metadata.py | 2 +- tests/test_core/test_moto_api.py | 2 +- tests/test_sqs/test_sqs.py | 4 ++-- 9 files changed, 12 insertions(+), 12 deletions(-) diff --git a/.travis.yml b/.travis.yml index c58ed85f8..4783e13c2 100644 --- a/.travis.yml +++ b/.travis.yml @@ -14,7 +14,7 @@ install: - travis_retry pip install coveralls - | if [ "$TEST_SERVER_MODE" = "true" ]; then - AWS_SECRET_ACCESS_KEY=server_secret AWS_ACCESS_KEY_ID=server_key moto_server -p 8086& + AWS_SECRET_ACCESS_KEY=server_secret AWS_ACCESS_KEY_ID=server_key moto_server -p 5000& export AWS_SECRET_ACCESS_KEY=foobar_secret export AWS_ACCESS_KEY_ID=foobar_key fi diff --git a/moto/core/models.py b/moto/core/models.py index 9c2fc6d6b..a3a343aa7 100644 --- a/moto/core/models.py +++ b/moto/core/models.py @@ -157,7 +157,7 @@ class ServerModeMockAWS(BaseMockAWS): def reset(self): import requests - requests.post("http://localhost:8086/moto-api/reset") + requests.post("http://localhost:5000/moto-api/reset") def enable_patching(self): if self.__class__.nested_count == 1: @@ -169,12 +169,12 @@ class ServerModeMockAWS(BaseMockAWS): def fake_boto3_client(*args, **kwargs): if 'endpoint_url' not in kwargs: - kwargs['endpoint_url'] = "http://localhost:8086" + kwargs['endpoint_url'] = "http://localhost:5000" return real_boto3_client(*args, **kwargs) def fake_boto3_resource(*args, **kwargs): if 'endpoint_url' not in kwargs: - kwargs['endpoint_url'] = "http://localhost:8086" + kwargs['endpoint_url'] = "http://localhost:5000" return real_boto3_resource(*args, **kwargs) self._client_patcher = mock.patch('boto3.client', fake_boto3_client) self._resource_patcher = mock.patch( diff --git a/other_langs/sqsSample.java b/other_langs/sqsSample.java index 23368272c..d303a4d27 100644 --- a/other_langs/sqsSample.java +++ b/other_langs/sqsSample.java @@ -36,7 +36,7 @@ public class S3Sample { AmazonSQS sqs = new AmazonSQSClient(); Region usWest2 = Region.getRegion(Regions.US_WEST_2); sqs.setRegion(usWest2); - sqs.setEndpoint("http://localhost:8086"); + sqs.setEndpoint("http://localhost:5000"); String queueName = "my-first-queue"; sqs.createQueue(queueName); diff --git a/other_langs/test.js b/other_langs/test.js index 65d65ae70..adc738a2d 100644 --- a/other_langs/test.js +++ b/other_langs/test.js @@ -1,6 +1,6 @@ var AWS = require('aws-sdk'); -var s3 = new AWS.S3({endpoint: "http://localhost:8086"}); +var s3 = new AWS.S3({endpoint: "http://localhost:5000"}); var myBucket = 'my.unique.bucket.name'; var myKey = 'myBucketKey'; diff --git a/other_langs/test.rb b/other_langs/test.rb index dc5b7914b..f7d84eb1f 100644 --- a/other_langs/test.rb +++ b/other_langs/test.rb @@ -1,6 +1,6 @@ require 'aws-sdk' -sqs = Aws::SQS::Resource.new(region: 'us-west-2', endpoint: 'http://localhost:8086') +sqs = Aws::SQS::Resource.new(region: 'us-west-2', endpoint: 'http://localhost:5000') my_queue = sqs.create_queue(queue_name: 'my-bucket') puts sqs.client.list_queues() diff --git a/tests/test_awslambda/test_lambda.py b/tests/test_awslambda/test_lambda.py index 84e8a8f2b..d967c8bad 100644 --- a/tests/test_awslambda/test_lambda.py +++ b/tests/test_awslambda/test_lambda.py @@ -40,7 +40,7 @@ def lambda_handler(event, context): vol = ec2.Volume(volume_id) print('Volume - %s state=%s, size=%s' % (volume_id, vol.state, vol.size)) return event -""".format(base_url="localhost:8086" if settings.TEST_SERVER_MODE else "ec2.us-west-2.amazonaws.com") +""".format(base_url="localhost:5000" if settings.TEST_SERVER_MODE else "ec2.us-west-2.amazonaws.com") return _process_lamda(pfunc) diff --git a/tests/test_core/test_instance_metadata.py b/tests/test_core/test_instance_metadata.py index 69b9052e9..f8bf24814 100644 --- a/tests/test_core/test_instance_metadata.py +++ b/tests/test_core/test_instance_metadata.py @@ -6,7 +6,7 @@ import requests from moto import mock_ec2, settings if settings.TEST_SERVER_MODE: - BASE_URL = 'http://localhost:8086' + BASE_URL = 'http://localhost:5000' else: BASE_URL = 'http://169.254.169.254' diff --git a/tests/test_core/test_moto_api.py b/tests/test_core/test_moto_api.py index e65a92ac8..cb0ca8939 100644 --- a/tests/test_core/test_moto_api.py +++ b/tests/test_core/test_moto_api.py @@ -6,7 +6,7 @@ import requests import boto3 from moto import mock_sqs, settings -base_url = "http://localhost:8086" if settings.TEST_SERVER_MODE else "http://motoapi.amazonaws.com" +base_url = "http://localhost:5000" if settings.TEST_SERVER_MODE else "http://motoapi.amazonaws.com" @mock_sqs diff --git a/tests/test_sqs/test_sqs.py b/tests/test_sqs/test_sqs.py index 30e3e017b..2889e520f 100644 --- a/tests/test_sqs/test_sqs.py +++ b/tests/test_sqs/test_sqs.py @@ -77,7 +77,7 @@ def test_create_queues_in_multiple_region(): list(west2_conn.list_queues()['QueueUrls']).should.have.length_of(1) if settings.TEST_SERVER_MODE: - base_url = 'http://localhost:8086' + base_url = 'http://localhost:5000' else: base_url = 'https://us-west-1.queue.amazonaws.com' @@ -98,7 +98,7 @@ def test_get_queue_with_prefix(): queue.should.have.length_of(1) if settings.TEST_SERVER_MODE: - base_url = 'http://localhost:8086' + base_url = 'http://localhost:5000' else: base_url = 'https://us-west-1.queue.amazonaws.com' From bd2ff89bf127315f398f49c734cdb25eaf96bfdf Mon Sep 17 00:00:00 2001 From: Seamus Cawley Date: Mon, 13 Mar 2017 13:52:57 +0000 Subject: [PATCH 103/213] Ensure SQS property WaitTimeSeconds is an integer --- moto/sqs/models.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/moto/sqs/models.py b/moto/sqs/models.py index 62b79fdc1..61093aa82 100644 --- a/moto/sqs/models.py +++ b/moto/sqs/models.py @@ -113,7 +113,7 @@ class Queue(BaseModel): self.region = region # wait_time_seconds will be set to immediate return messages - self.wait_time_seconds = wait_time_seconds or 0 + self.wait_time_seconds = int(wait_time_seconds) if wait_time_seconds else 0 self._messages = [] now = unix_time() From b9ea947aa0572234051d9ba22b77b862daa3b524 Mon Sep 17 00:00:00 2001 From: Andrew Garrett Date: Mon, 13 Mar 2017 14:09:51 +0000 Subject: [PATCH 104/213] Add ListHostedZonesByName --- moto/route53/responses.py | 47 +++++++++++++++++++++++++++- moto/route53/urls.py | 1 + tests/test_route53/test_route53.py | 50 ++++++++++++++++++++++++++++-- 3 files changed, 95 insertions(+), 3 deletions(-) diff --git a/moto/route53/responses.py b/moto/route53/responses.py index 07f6e2303..984f305ab 100644 --- a/moto/route53/responses.py +++ b/moto/route53/responses.py @@ -7,7 +7,7 @@ from .models import route53_backend import xmltodict -class Route53 (BaseResponse): +class Route53(BaseResponse): def list_or_create_hostzone_response(self, request, full_url, headers): self.setup_class(request, full_url, headers) @@ -47,6 +47,32 @@ class Route53 (BaseResponse): template = Template(LIST_HOSTED_ZONES_RESPONSE) return 200, headers, template.render(zones=all_zones) + def list_hosted_zones_by_name_response(self, request, full_url, headers): + self.setup_class(request, full_url, headers) + parsed_url = urlparse(full_url) + query_params = parse_qs(parsed_url.query) + dnsname = query_params.get("dnsname") + + if dnsname: + dnsname = dnsname[0] # parse_qs gives us a list, but this parameter doesn't repeat + # return all zones with that name (there can be more than one) + zones = [zone for zone in route53_backend.get_all_hosted_zones() if zone.name == dnsname] + else: + # sort by names, but with domain components reversed + # see http://boto3.readthedocs.io/en/latest/reference/services/route53.html#Route53.Client.list_hosted_zones_by_name + + def sort_key(zone): + domains = zone.name.split(".") + if domains[-1] == "": + domains = domains[-1:] + domains[:-1] + return ".".join(reversed(domains)) + + zones = route53_backend.get_all_hosted_zones() + zones = sorted(zones, key=sort_key) + + template = Template(LIST_HOSTED_ZONES_BY_NAME_RESPONSE) + return 200, headers, template.render(zones=zones) + def get_or_delete_hostzone_response(self, request, full_url, headers): self.setup_class(request, full_url, headers) parsed_url = urlparse(full_url) @@ -289,6 +315,25 @@ LIST_HOSTED_ZONES_RESPONSE = """ + + {% for zone in zones %} + + /hostedzone/{{ zone.id }} + {{ zone.name }} + + {% if zone.comment %} + {{ zone.comment }} + {% endif %} + {{ zone.private_zone }} + + {{ zone.rrsets|count }} + + {% endfor %} + + false +""" + CREATE_HEALTH_CHECK_RESPONSE = """ {{ health_check.to_xml() }} diff --git a/moto/route53/urls.py b/moto/route53/urls.py index 795f7d807..53abf23a2 100644 --- a/moto/route53/urls.py +++ b/moto/route53/urls.py @@ -18,6 +18,7 @@ url_paths = { '{0}/(?P[\d_-]+)/hostedzone$': Route53().list_or_create_hostzone_response, '{0}/(?P[\d_-]+)/hostedzone/(?P[^/]+)$': Route53().get_or_delete_hostzone_response, '{0}/(?P[\d_-]+)/hostedzone/(?P[^/]+)/rrset/?$': Route53().rrset_response, + '{0}/(?P[\d_-]+)/hostedzonesbyname': Route53().list_hosted_zones_by_name_response, '{0}/(?P[\d_-]+)/healthcheck': Route53().health_check_response, '{0}/(?P[\d_-]+)/tags/healthcheck/(?P[^/]+)$': tag_response1, '{0}/(?P[\d_-]+)/tags/hostedzone/(?P[^/]+)$': tag_response2, diff --git a/tests/test_route53/test_route53.py b/tests/test_route53/test_route53.py index ea8609556..b64c63a30 100644 --- a/tests/test_route53/test_route53.py +++ b/tests/test_route53/test_route53.py @@ -361,8 +361,9 @@ def test_hosted_zone_private_zone_preserved_boto3(): hosted_zones = conn.list_hosted_zones() hosted_zones["HostedZones"][0]["Config"]["PrivateZone"].should.equal(True) - # zone = conn.list_hosted_zones_by_name(DNSName="testdns.aws.com.") - # zone.config["PrivateZone"].should.equal(True) + hosted_zones = conn.list_hosted_zones_by_name(DNSName="testdns.aws.com.") + len(hosted_zones["HostedZones"]).should.equal(1) + hosted_zones["HostedZones"][0]["Config"]["PrivateZone"].should.equal(True) @mock_route53 @@ -445,3 +446,48 @@ def test_list_or_change_tags_for_resource_request(): response = conn.list_tags_for_resource( ResourceType='healthcheck', ResourceId=healthcheck_id) response['ResourceTagSet']['Tags'].should.be.empty + + +@mock_route53 +def test_list_hosted_zones_by_name(): + conn = boto3.client('route53', region_name='us-east-1') + conn.create_hosted_zone( + Name="test.b.com.", + CallerReference=str(hash('foo')), + HostedZoneConfig=dict( + PrivateZone=True, + Comment="test com", + ) + ) + conn.create_hosted_zone( + Name="test.a.org.", + CallerReference=str(hash('bar')), + HostedZoneConfig=dict( + PrivateZone=True, + Comment="test org", + ) + ) + conn.create_hosted_zone( + Name="test.a.org.", + CallerReference=str(hash('bar')), + HostedZoneConfig=dict( + PrivateZone=True, + Comment="test org 2", + ) + ) + + # test lookup + zones = conn.list_hosted_zones_by_name(DNSName="test.b.com.") + len(zones["HostedZones"]).should.equal(1) + zones["HostedZones"][0]["Name"].should.equal("test.b.com.") + zones = conn.list_hosted_zones_by_name(DNSName="test.a.org.") + len(zones["HostedZones"]).should.equal(2) + zones["HostedZones"][0]["Name"].should.equal("test.a.org.") + zones["HostedZones"][1]["Name"].should.equal("test.a.org.") + + # test sort order + zones = conn.list_hosted_zones_by_name() + len(zones["HostedZones"]).should.equal(3) + zones["HostedZones"][0]["Name"].should.equal("test.b.com.") + zones["HostedZones"][1]["Name"].should.equal("test.a.org.") + zones["HostedZones"][2]["Name"].should.equal("test.a.org.") From c5853b48da25e16f26e0611985b1d6bb708223ac Mon Sep 17 00:00:00 2001 From: Adam Stauffer Date: Mon, 13 Mar 2017 16:48:22 -0400 Subject: [PATCH 105/213] update RDS responses to return DBInstanceArn --- AUTHORS.md | 1 + moto/rds/models.py | 6 +++++ moto/rds2/models.py | 9 +++++++- tests/test_rds2/test_rds2.py | 43 ++++++++++++++++++++---------------- 4 files changed, 39 insertions(+), 20 deletions(-) diff --git a/AUTHORS.md b/AUTHORS.md index e85996125..08757d2bb 100644 --- a/AUTHORS.md +++ b/AUTHORS.md @@ -44,3 +44,4 @@ Moto is written by Steve Pulec with contributions from: * [Jean-Baptiste Barth](https://github.com/jbbarth) * [Tom Viner](https://github.com/tomviner) * [Justin Wiley](https://github.com/SectorNine50) +* [Adam Stauffer](https://github.com/adamstauffer) diff --git a/moto/rds/models.py b/moto/rds/models.py index 670b0a808..a499b134d 100644 --- a/moto/rds/models.py +++ b/moto/rds/models.py @@ -71,6 +71,11 @@ class Database(BaseModel): # DBParameterGroupName # VpcSecurityGroupIds.member.N + @property + def db_instance_arn(self): + return "arn:aws:rds:{0}:1234567890:db:{1}".format( + self.region, self.db_instance_identifier) + @property def physical_resource_id(self): return self.db_instance_identifier @@ -231,6 +236,7 @@ class Database(BaseModel):
{{ database.address }}
{{ database.port }} + {{ database.db_instance_arn }} """) return template.render(database=self) diff --git a/moto/rds2/models.py b/moto/rds2/models.py index f03cf4ad1..eecb608dd 100644 --- a/moto/rds2/models.py +++ b/moto/rds2/models.py @@ -95,6 +95,11 @@ class Database(BaseModel): self.character_set_name = kwargs.get('character_set_name', None) self.tags = kwargs.get('tags', []) + @property + def db_instance_arn(self): + return "arn:aws:rds:{0}:1234567890:db:{1}".format( + self.region, self.db_instance_identifier) + @property def physical_resource_id(self): return self.db_instance_identifier @@ -206,6 +211,7 @@ class Database(BaseModel):
{{ database.address }}
{{ database.port }} + {{ database.db_instance_arn }} """) return template.render(database=self) @@ -349,7 +355,8 @@ class Database(BaseModel): "Status": "active", "VpcSecurityGroupId": "sg-123456" } - ] + ], + "DBInstanceArn": "{{ database.db_instance_arn }}" }""") return template.render(database=self) diff --git a/tests/test_rds2/test_rds2.py b/tests/test_rds2/test_rds2.py index 731bc75c1..1e2e0abdf 100644 --- a/tests/test_rds2/test_rds2.py +++ b/tests/test_rds2/test_rds2.py @@ -26,6 +26,8 @@ def test_create_database(): database['DBInstance']['MasterUsername'].should.equal("root") database['DBInstance']['DBSecurityGroups'][0][ 'DBSecurityGroupName'].should.equal('my_sg') + database['DBInstance']['DBInstanceArn'].should.equal( + 'arn:aws:rds:us-west-2:1234567890:db:db-master-1') @disable_on_py3() @@ -59,6 +61,8 @@ def test_get_databases(): list(instances['DBInstances']).should.have.length_of(1) instances['DBInstances'][0][ 'DBInstanceIdentifier'].should.equal("db-master-1") + instances['DBInstances'][0]['DBInstanceArn'].should.equal( + 'arn:aws:rds:us-west-2:1234567890:db:db-master-1') @disable_on_py3() @@ -333,26 +337,27 @@ def test_list_tags_db(): result = conn.list_tags_for_resource( ResourceName='arn:aws:rds:us-west-2:1234567890:db:foo') result['TagList'].should.equal([]) - conn.create_db_instance(DBInstanceIdentifier='db-with-tags', - AllocatedStorage=10, - DBInstanceClass='postgres', - Engine='db.m1.small', - MasterUsername='root', - MasterUserPassword='hunter2', - Port=1234, - DBSecurityGroups=['my_sg'], - Tags=[ - { - 'Key': 'foo', - 'Value': 'bar', - }, - { - 'Key': 'foo1', - 'Value': 'bar1', - }, - ]) + test_instance = conn.create_db_instance( + DBInstanceIdentifier='db-with-tags', + AllocatedStorage=10, + DBInstanceClass='postgres', + Engine='db.m1.small', + MasterUsername='root', + MasterUserPassword='hunter2', + Port=1234, + DBSecurityGroups=['my_sg'], + Tags=[ + { + 'Key': 'foo', + 'Value': 'bar', + }, + { + 'Key': 'foo1', + 'Value': 'bar1', + }, + ]) result = conn.list_tags_for_resource( - ResourceName='arn:aws:rds:us-west-2:1234567890:db:db-with-tags') + ResourceName=test_instance['DBInstance']['DBInstanceArn']) result['TagList'].should.equal([{'Value': 'bar', 'Key': 'foo'}, {'Value': 'bar1', From 54c7e0bcf9e3386a7f0f2ec9f08919fad3b3a23d Mon Sep 17 00:00:00 2001 From: Steve Pulec Date: Mon, 13 Mar 2017 23:07:18 -0400 Subject: [PATCH 106/213] Update docs to newer sphinx. --- docs/_build/doctrees/ec2_tut.doctree | Bin 7904 -> 8282 bytes docs/_build/doctrees/environment.pickle | Bin 7968 -> 8914 bytes docs/_build/doctrees/getting_started.doctree | Bin 10820 -> 11613 bytes docs/_build/doctrees/index.doctree | Bin 16675 -> 18990 bytes docs/_build/html/.buildinfo | 2 +- docs/_build/html/_sources/ec2_tut.rst.txt | 74 + .../html/_sources/getting_started.rst.txt | 112 + docs/_build/html/_sources/index.rst.txt | 91 + docs/_build/html/_static/alabaster.css | 176 +- docs/_build/html/_static/basic.css | 79 +- docs/_build/html/_static/comment-bright.png | Bin 3500 -> 756 bytes docs/_build/html/_static/comment-close.png | Bin 3578 -> 829 bytes docs/_build/html/_static/comment.png | Bin 3445 -> 641 bytes docs/_build/html/_static/custom.css | 1 + docs/_build/html/_static/doctools.js | 28 +- docs/_build/html/_static/down-pressed.png | Bin 347 -> 222 bytes docs/_build/html/_static/down.png | Bin 347 -> 202 bytes docs/_build/html/_static/file.png | Bin 358 -> 286 bytes docs/_build/html/_static/jquery-3.1.0.js | 10074 ++++++++++++++++ docs/_build/html/_static/jquery.js | 8 +- docs/_build/html/_static/minus.png | Bin 173 -> 90 bytes docs/_build/html/_static/plus.png | Bin 173 -> 90 bytes docs/_build/html/_static/pygments.css | 6 + docs/_build/html/_static/searchtools.js | 166 +- docs/_build/html/_static/up-pressed.png | Bin 345 -> 214 bytes docs/_build/html/_static/up.png | Bin 345 -> 203 bytes docs/_build/html/_static/websupport.js | 4 +- docs/_build/html/ec2_tut.html | 38 +- docs/_build/html/genindex.html | 26 +- docs/_build/html/getting_started.html | 74 +- docs/_build/html/index.html | 40 +- docs/_build/html/objects.inv | 7 +- docs/_build/html/search.html | 19 +- docs/_build/html/searchindex.js | 2 +- 34 files changed, 10849 insertions(+), 178 deletions(-) create mode 100644 docs/_build/html/_sources/ec2_tut.rst.txt create mode 100644 docs/_build/html/_sources/getting_started.rst.txt create mode 100644 docs/_build/html/_sources/index.rst.txt create mode 100644 docs/_build/html/_static/custom.css create mode 100644 docs/_build/html/_static/jquery-3.1.0.js diff --git a/docs/_build/doctrees/ec2_tut.doctree b/docs/_build/doctrees/ec2_tut.doctree index 2d6d78f26bdfa1142833303de5b05379aae3bafa..719a1ed0b0b296abd8f5a0552f3c7da449e67020 100644 GIT binary patch delta 3991 zcmaE0d&{A|fhjp9Ke@CdGpATDFFz% zEiPoKEo9Y*5NBXuC@oG^$jvXwS4b>YNKH0UNJ>o3PR&axWb=yU|>j1Hi|DPEh*&as4oo_EG|tdF3Bt@Ey>K!i%&^SD=y@$ zE#wO2D@ZIVPL0n^EiO(>Pc1IwjxFQ~XI zF68qI6-q2E$xqAAFUiX$q`%$$^>)VxB`*g`Q4^pGq` zEJ{x;;VKl@Y%G+hEtJ#<FpkeW)pVMa3nBvZ3rHiRmC8 z7s`dQLEKR&AIgo}80dT%1~5s2Ur}3K3O{4P`A#P0LIvRL>A84Q0v6%u6lQ@Ma9W1)7fK%tIChEym+hI9r~hD?TR=;V8hQtpMiwS{^a92pYD z8FHcQP%VY}849(91{xVEu^Fnd8OpI4O0k888W}c_v4ut&8EhFGsihej-mDp# z8BC$#lkYQ%T4tsc8rK$@WQc%F6OAo2#f+xRlAKhoLNm?ALi5@}3ylmZaPppP&m7^uOAHJQlRq+xPyWud+X=~q zoN)DpE+ALBVsj-sD0D!sbkoRyy67XbxL~1sYh$5DZJ{T~0#N=2Sx}!Lo#BM(tenh} z)S|>3u0k)(#zODfLLZPGPC03}Gh)MTTC1ds!KF&!YC;RLeWFT*y1gMonoroPa> zwXrauwlENu9=uDF5;H{QLfN5~6$X_iW!Ofn19{z^b)8^gaBE{>NNr&#$ZnXZ1;o9n zIGr4yl#`#F%~cqt*;p7}TNnY-+yFAthpizFDI|HJwnEewMuI#Or4h}^JX|W-ZMbi zA+feF2__6pa~a@7pNtZUg((@riJ%mpn!%n3N_K)Uo6eu{O^0{~lvwj? z3kyKbaAaU$a7-&nEmFwMpFESp(xR{s$$oG;Ey8BMFw9%U8MYA)pg5Yy;UHL8(%M*9 zT3c8KG8!f-l_8yx53v~(vE{Xe6(H$kkS2YuWGf{5z%f~=krADNEz(3`c2;HBM!>Xd z6jrx37S_}j*1}?^9AX#9mvyy;^&q<}Kz3zwn_F5HHXvCC_Ff}4?@7X}YRa&UFo)_E zENpIVENrPQYz3JK6BW&n&ZvV}3G!ZBZDG4chScU;+{LV>NOpm})`81w(lGlvGikGSD8wx#bG!v0W> zqSQ1uIjLo-^*P>*kSq+VcQa%`m5f>}q;m6S3}pc|e?nP2{oMQu z7nXubutgc5_+MOGxCCzGQjn1#jo{h|oR6e4=7AJ1%dm~mWnf_Ns4Pe=%E`>jR)EyX z3QC~%#AF@`@%qB$t&N2%Y719FjFZk-3@QQTGF-q^C?Bea3RjgTC4$1fAit<2wWx5l zUnoar3betq#+xye7dgO-3)jXLt_$Ub1#mnl0t(mH7H$X?$jnR0EJ{tTFNudZskm@s zY~iL*j^fmklFYpH;=;{HRc2aIeqKpxUJ6%OVoqslF<0T1{KBoZh1)bjgye0V{ zvy!tR*6s^sP0cGQsw~_e%A1pzmtLBfo*JK=pORX5Ae1RJukc_fXG&s8YH>+oZb9K8 zkW*4Yp%|ZI4{BKz9`aJfX zKU&xq-V0?dElJb0D7+ua1=S8pq!0W;`7-m6W2x|AC?{B@ZgFOM;iFLg_>}t0;>4sJ zPT$VN%7nbIOx&?1LLwP_60-iD7 zg--q}shCsv9#pXBl_XZgXJn>lez#+gt)P;nHG z2#L?3OqnT#Uqbm(@{^Hc>}zM}Q0 zZbW=)9w@9+3V(Ejau+3*!}&i!1t8e3@KWrTUujaPC|czNO43Cog}=QSGL&iy|7c{0 zWN2pyXC!7+3T0@Qh6ha;u5ATKdDwRlSGlpfZi)U?c$DLw3|$ws;*r6p5(m@`wrG8{>X$=O99Ye352 z8rYL_5{rw$noD~)ixSI=^Gl18Q>XOs=;b>KSU)$vBws%zKe<>RYK&e{amkb(wt~bW zP*`NJWN^ez$zaT2DNV{?=wUBTO$G%8QW$0M*G|#M5R9FYAsRbHqer~7I8_&9sBU7h zF35i%AEoA{Ovw;Q%@B{Bk|7CGr2tZ;kXWpcnrx&1Qw0takZ-1>W=O|QDeVzYttd!M zE{QKnO-n6G%}Y*AiBGDG&rF$8J0(LTgSR+CwufO#hJ1!3Lq5YYLpehOW^@Ka z4@*dD1t>k>*3>!$n#M9@Gh8y1Gi*we5<$V7q2SF54rVjXxXBqOs} zp`^4VzbG>?M%z& zmuKdsD-`Et78InGC}b8ZlK$I z7L`B+lk@ZP6l~$9>4Es5lCLB`z9=<4Ge1v5DYaC$Jhiw)*HB3ll=vz#6uh~Sb4G@H z&%iqjDF=cRe>JpZD^1E!gH^03l}3*+B*SOsrR#!Ak-X&8;wg!+AV-o`K$Zqo4;iu< zrl7(fx%kz{u)=B$%sg0mF6;v?Wz zurw*dk0=*7I;NGR7Ab%d2dLNsCx+A%9fgvNRE4C}jKs3c{34J9h+mwTn+ht3^Ark- zQp+;)ON(?gmnW!}%1chQ z%G9+qOi3|HGd6J6i=?GL)>_UVRacRf`+#)GXEm0^gNi4}sR>&_>NKeg6 zElMm&RVYf#OUchw$WH>b_0)?M5RFnq(vd{-wmF#wqNEtI4arc>IE4rTq7zC)9xDR_ zD6Nzw78PsYihWJ4YW4Wcy!hPwyi8CdDK9-fH76C+=2y2;a7)Z7PSxR3P>(OjNlb=` z`sL?A1);ev9u&-}>Q)MH8JIvpYFXCdib;+{}1Lu@;}1my%kcZlzGFZlDg9 zDoW1CEJ;lUwZg!{6&7akW+q@caNQK2l3JFToEo2-SWu9emyTpva!zJyUP*jOes*e} zx|M=DSPv*iK$_+1Rtm1pM$Y+pc@Vx;YO;|YywcE1%uTGy&r2*X)=SRM1-T?8uQ(pm zKnA-4)L1t)(K9kM(K9qQ)w3`GsVXQfN!2w()d{vVHK{m0zn~;DH?t}=1v&Uq%Tn`7 zii1;2ker=flwVo^vk4Y(V8LQ_D~0G-kXte;3sQ^9GK=$zz^&z$2{OD?9g$rj zqPdCbsqs)bXwC;20?X_11x5L3nK{Vu1eYqQEC7j@s^=Q&73U`AL40CLN>OStNPVd~O29!3EJ@7;`y?h$ z0Sdt4*{MZ&sX1U7P)I<+)^bC#7^b9S`^$aZxKn*0lOi*~{ zB|<_RrVlM7QMEywQk0)knp_eO>OO#i8NIw7b-;6oSAFFweZSd@Fc%0{OitwXig`xFod*k``e>Qk0)x0?WkE{Dn;lni64B zkbb&LYH^8kMrtz1S*7YFMWw0WFeom_FM(Hi@lbnFZ3P!Z@o+D|jV(>eOD#cFT9TL! z3drhOkSEGAi%Lopb26*IRct)eRi)~n4k%O(?1Zv{WP~ZTuxe8pwf-0_iNOOTqa`u4 zp*30(kCw!QTN9%tF(oB2dMDyGyaf#&{ebjIVNBN2q|_e4#L|-dwEX;%y!?_>(AX+? z^jW01GB+tdCmusWsH7+{uNYMC$LFRN7bm8JhoUnWdiaVfi%U{-VWLETN*Edq^{$3trGDSka1#i=DFnR)5OQ+fnJa|+-|hP0ymyb{nbcn@1xVoqsl zF;Yfi6Vw=*@!6eT97f(GEHboB5PrGmz^;-OK+)Werjnp+SLYq){b^Q9IQu+=>>86_nJ#a8yD8QUkUS6(OP+5|Zp9gkMN@|&YL25y<{*)g6^wg4))Cy2T zGq)fyxnxQQc$x@2OAucHO8!$idt^(BGILR~2qY-uA>)RfJ)F?)cYe_nzaAOX$VB%g zTX9KIW^&1t9^Rb9y!28~rv}u>ozlaUnm46KAhn_(zc>{o^x2a0^U^Xw@yrLx!||CZ z@dcpiq>3p$OrSX>fz;fj)Rg$*lFFRa;*8YPk|~`%LZDfj;?g8kk1(g^l}+*M;Ri=> zL2+tnN`8DrE-20Lf`#D8*sn*RxH7LKu_8VrGd&|GGd-hZN)Jm;eqQ>N9@+fT66Dm1 z=D{peMh1o+{`i#4Vo=W_9`1OqjKrK0SSn-f;RA(3L40z4E~qySj#ZGG^Gb7};qJ`{ zbtt?f0x1$qt;j3^_g_kj5r%Q5B$lKWmn7yEfYLuSH-Hky6u%5hzaCzwUC=2tzaHL{ z{N&8Mw0wkBqRBb=iP`a~xrv!Mux?&z@sy4pc33vgQ0n0+&P^;Ti7zb82Tk&I_6Wg? zO+;b@WjK&^$tCe+nZ=+%2T+Kkh1rxIR&WlV(jyI0TL2rBD2^}BEXjz6&HefH@PSe^ zys!cV0AG1xQ68iZSCpEVJH@X@6r=`RNR$@k#FwPzf_iMJQ+hZGQVVod4NOv|^zcHX zAs(a*T=4V6r=%8_Wafc80H7iPk*YFGOOtxoL7f=T6lLucjf|-olQIl4bTbSyL`r*v zP$o!0%0Tm-J)-C}33zOCiZ?@#5Gd8cLkcR)*u#|q%G)se*+3~e1!Pi&NbHmh-o#?~ zfH8bJ6SM9p?cvA-rToOA5>O?>4Noempq$7FavZ2~0U4{2G1V`_z%RqFqqK(?*+t;s z;Dosk%;PIaEGkZgRdHY;0W|M{1vwx^C)hHMqSQ2yeIUDfcwxqY%3Y8Mv{qy65y;F- z$t(g7@`GoP!RGM5Y83DUt{-G>7!>d#85$XrGI}#~Gc-y;vl=~|u-XWuuQW*y0KZ89 Ag8%>k diff --git a/docs/_build/doctrees/environment.pickle b/docs/_build/doctrees/environment.pickle index 2f5d88b27e3d1167003b58b79671819b79c26345..a1145484936f5a699ba82d8b41e5a96f5f92bd35 100644 GIT binary patch literal 8914 zcmZo*N-i$Q$jqzIOU*0GEXvQzP0cIeaw^TtNpVFIC}h-ZEM%%JWY!2}PsvHk%tK`a?8v~%}dNp4b96ebjeIE;VNX+Y%F9;j*m|&&CDsu%!`lbDo!ooDrAps z4JzbF3gyU6%u7s9Eh;YLtPN#OEGQ`C@(bmRFHbDWODoMwF68zLWh==n0olY;Tga;s z!OFnEkeQc~T2aWCoRXgmvb9(*FFz%%G}JP5}j!vUn!F_f)1HL)l;qtGeC2_)bg z!HyIvE*ZL^?5W8{x}YF(Z7U6B&rixqO)e=ebgK>JOwKGxEXmBzD=u`e4P}Em&!e`` zQzHY_QH5Ti46tmOlb@WJlgbqzQk0lioCDS$P?TR$$yMl$rX;r#lxV<7*C&+6H$SB` zCsn}{oYj2W8Vmh0d@_7n3;o-Y+B57j_%iGXC)oB3u(vb3Gkii>AtnX1H5LYD_+$jM z76yS#0tW+F0e3JsU=#up(^CtB+Zqc)GJG`TK z!Z2@!Q0{`F{Pg?+NN|PMmWDDW*A_--WVqF4gn2WRhB6gY7Dm<l03EiR0P z$z~N7#?%(Zf@C4;nUf0&3gc=E<26FLi}LeJ;)^SDlk#&46JUV|NlwLj$pr;m!C*PA z!bHu+!lc^5WQ|b1lGNOSoWzpUfW)H2+~UF%zYImcQ0|n}$J$#f3{^3zudz zB!;pVrRJn27N-_2%a|C-TAW%^T2Q#$uN2g(Ni9hPC+HQmg)22O_-Zp$YBO|eOGEjf zIU_kEF%J|{t7=2}L3tvtG&epqucRn5wYYF~ZQ&Y?3_fqx3>9zo3|()g(onX{TyVgy z%>cEh)XbrGpy{#f4jI3%7wJ1u|4JR5Nrl^h!f{Qc??2^HNgtl0i<|UK=U^ElT1u^T3Is zxNt{p;m!=c*uq^{TO7L+i$gh}YEugLKwPyql%*s;xwvp&ZQ*{642%|BQfW?3YDs)f zW^oBu;Q`IY!h^Mihu{r_!&n;#N6=eeAl>nqC8@bwg-10T3y;+n9)}xo0@Db^jDw(d z&q)nf!I7Af0%=bs=jRqACYQt)Bo-y67bO;Ca21}?Y%DxoTX+U;;8|=29s?P84&7Zv zscET2sd>q%T!rT~8w)Sg7G8uKa|zt$NY2PF0+s)v454gAscEG}nHhYbrkQvww528) z3kp(DR!q$4D9upJI1RGtat1L?%`2^qg;#3}uW4imWhi7QW?TR_hC(^v&MLeP5fQ}j zcj1lFqzq7W+^j9U1$XmpoL;}9kpT+%yS0V);HvLqQ+*TU{0AAJP<>ci_z14}F*e0_ zL5iPbfWq!+ZQ(Px;^(MgC!h>!#$@KD>q6VU#D?WVkohk%WWcEzt@&H{vbC}BRc+yG zP%{(MzDAdSlcAXL6dZmTkYLQX0p^8r!jog++tMU(xV)<^d=Gd12W(yihs#HBxO}QD z{0vw81)Jh`Aa{QShs(Fx!tZd!KTyL(UOBn62-JDVsnpHPEr1k(sVTa}sYPX($*IM} z2Gb{ybw4v?BIFnt7@T27Dj*rD5DYc4@KXX(-gBn?ne(yP%bXw0~^B+F(wq$7c3G0 zb8v)+AjrIEAF5db%)!(QvInGD z5@hjt&Bh`ruwH4fa;RPz5C)@3Xv{vRSQe$Lb&}=MH2b-$_HVPC}idi6=LfIfGvq+PXfgwVNs7AFuxaSIL ztQKj3JfjrK4z;XE8^jl`Ez$v56UqT=gm^Q+1}HN4LLtL3#rb(Tm7Snro{E&zf|3kx zrcf48$D{+jUSo&0eqxJs85tP-GIqBW=`k`eB!;qOWTvE~=5>U!Cntj2wSJ*I&}IdA z1S+;jALN?diJ`nuVTgv#(ohyqleox$k%7TGEJG?591o$~NR5mlLq-OM(xlW3B~THQ zn4y$~B9MwMfKZX40?tZC;LtV(1p{omIq}cE3js3FdwSf2F$_K46+BL*%o9mC>z;<_1c4#L-jgQiqp2U9c19*{SDK;8g_urFAzA6Pk5uRoZB zsTX7oNN)g0FDQfq!Fq$h%AtCLK^&A27EsR2D=taQ$pJM|Ks_XQ)k`46K`sMX7Xq>l z(oXS&n*<&mD++~#a~Rki;UKFsguo3NG=&+8Ss=HfhdgMg4qDk3MS#tSgqQ(|fG98r zGaO)H7!8&O1!4?Xb1aw-4Tv}}2U9c19+0o%LB0Y7VgguiB3L<8ZxV=u5{TT&ppero zElx~NB{uv(jsn?~46;ckf(tr4s{l5nC~48%!hh87tF!b46+C0i9C=eKtY`k)>{Bp4%J%-=3wdt zSp(8r1kwu%>SD0o60mZp-ck?;C8)WSQ&N-jixNxni--+rkefgjm4PghiQr^lU~qw% zQB)2I=nAmil_2Xfgiw`aC}x3Nh91tysj~{KyBeY!65KUl4yLbRAyx}=3@F6vzv zE2e;~kcnVsU|?!3Yx-K>fnhB8A-il+>ankZ@cIcGohHQ!<3W zqwzR2WGH5V{D32DF`88v0!ZyFU2x&L9BlInh|Q3gSPAB!ho)3)(JGLCK!xgRu%28QpNWks^b8vfeFUTFB@ZAU2yC19^>dgaS4yIm^H6TA61nC6@?;)_>!(io5y+^YE`s%40_h7CP0qyEh);( zjZe$ZFUiXF4HO6Ydz~=jrEO;~Es?A5;_U8s-}0s;M7splcEvqgnJ2 z6f_KNK}C;1S~)=O11)Xc8_yH4ii}l2Y^>9GCo{vC`Cv60jc< zlS_(TfQ9)o^9o8!;#2dIL9^cJMK3`nah8^(>00O(XQmgu0(F~1xrkcC3fxe`!}fxd@VHSM>IW0*K E0G!1}VE_OC literal 7968 zcmZo*nJOyJ00uom#RVCec@=u8d1aYJ`FXjic_mYN1e{7Ub5dN9geFhXterAxO6?Sl z9=775!DLu0K8Tq-X`cW?o8a#gqxfJ$z-UMa7xkeZv6ni5}J zQkj!loROMZGNrSJrzkBsJ}I#{HNLbcXG)JuMoCFQv6a4lNq&A#v0i3sNt#}MQM!Ic zNp6n*lpf~Pys{}7EIoXgc?G2<@u_*q`6-!s=~H?*OH0ypEp&@B)2H-^Cgb7EkGb_%OAiBtAJmw;(aOWJ*U5S4Lt^NqkOfS!&J{Z`K~Zl>Fq9 z%#xhc_=>drqTDH+J+dW5nYr<4`S~Sz`6a3GMX712MX7nosqw`HiOH!`I(zua6N~a7 z!B&)-m^-CM5E^f#C7C&TdHE@+#Z!8C!y$@-OTcQ7;*0~NJ*_k^d5T{Tb9_C2eT%AJQYdrnj{AcT2TT@5F*g12L(rR zeqKr@$QV!~hnNl|7}X$d%IY4mWUloo)jE1nWNrH45) z1x#}!B_?MVrKW*7tZ-@esI!NJ^jDQ}F z)PPRWRjostn9J0&AFc1lKk?3B`s#Ehhj zdqXF!W(jI6#T~uv zQ+hbkQ}ZAdO~$&6q#j;q*2~FHPRvQ2(jyojQk0lioRbJDP6CSZD=McxIs%ir#*^JE@TUw{IO-TZoo(rlZK-E|ecR{5toR^W5u`y$F505X{a|&qM z_rkQZ6{jW^C1*^@I0Dtc9Sjyw2uMs%odUP+C`>)H%FN(_Rh~Trl1w|OBumW8&qIV^ z#;Oe4jMZ&ZGS-0tq-{#Z?uC|XK;SV})E22x- zmoUF+WPBr9)7Ol=EM=%GvzQqf7>Lyg4b3cFm}Y)#nu!U;9s%XdyyB9?oSe+ObX`#U z6;~w!3s!Teof+vp{GM>F;93Qx8o`jv$jfp>3S3b5k{47OWQA2I)ZJO+hZVOnD1dZJ zi$SFlfz%fdwJsyQhYM8mDL`}~*Y?Qb8F^WmNP&|Wwq>OEaJs-XAqOXta7JF%Y^2a6J*iO?FzcaCr68pVC_|h4 zy19vYiRq~XgJ&nyPEf9NhV&K`U|Nv_i631uBQNXlzy;NHsIw^uDi-CU#BxHZ=ONUd zjPxGnpu}>Nf(k5tFcYHkOe{@GMM9uFXiHMD2CBMAt02^& zULmi7FofzR6bZKEH%Y9aRuI`0%c#r9%W_2uQ1Up^hah{T_yCCXjw{2WF3eGN{W8O1-dQUIJ~!vrs!hl{eZz5lYd^k1m;!mvtQ} zd_iR_X(c8_LG>EyZ1U4xkGyhnX%T2xE2mO7Gq)fo6*L5&ngXuBGKt9bU!hLONbiwz zhMA;*WRgN~YEc>3B&38%P;EwD)_E(fQxLGC%84nEvEjrV-J(>;a3#S) zR2FJ+MtYBkBV4~i5L`cUAYxaMk(Z^;jA#XrnB`$UC$Yw|hPs=);C6-TrXaY5l{52F zK*>xO)CMa7jp-8%Y=5Y=8RFRjyt#)3G}u3-qlW`BY?_)f#hamr z3o<5}pO;e!nu&sro`NPpKqKrKT!}sGu%X-7DSjDTZBr6^c%iDGmUV(k&mNB4#EO*E zf|86W-b_eC+gW9xP|VB91yL!nQ^55#q}GPFJ`+omN_*H-av-*5xPwPoK$8k=$@zI{ zpy@2wXef-&<_wX5PC$TX4{{TWOHzyCQ}UCiz(yT21bf&F^i1>&4W{&PrB;-r=7DDK zKpyMiOw3740_&KP*u$TbSdv-+(pm~C-#`j9vSu?gF!XSMh6eOXQY)tP2!WUiF8Rq| zHQ;gBDLvf5C8=ep3IU}#smW7%*m4u|N)vOYluSwNVTVpSPU&F*X`Ir-TAW%^3L1vy zOwKQ;EXqvJD4EhDXk=h$s-u8nia=@wcnCYbAh85AvIcTa4;y6eW=dj@0B9Z%G(lOC znG8~r0h%6RhYCzd>=6XXXQbvpCceN!aXsuHCxAnS3nUv~l98I5Iwfm9*o!%ddFiE~ z5fi^2_M+6B)WqV{DOqzsQk=PodGQ6{xuDo7&=6q(4Y5qgTFlJA5IZGn2{QvjqBkSR z!+Zsm>EO_VOm0r;VS|iNPU+#yFH0>d%7hHc_wYi>y7=PKw6x3$kmFhOK*O<#J$#U8 zNQKlgpz+uqKG3uhNVpj8Rjzor7|6FJsTG-dY570g7E-lH-Db~x&Pf0E20&#Ow^Gdi188sUVnQ9A}H9|S_5_3~aDhpDJ3t4Il zSv4Xg85kJSQ%g!R^U@WHOA?DpQd1PlGfOfQa`Q{_3)%c4m>C!tiW18U+5IBK85kIf zOA_-^bQ5#(^HLRxQ;W({ixhJ6Q&I~#{37@n7#NcC^GZ@HN)&Pv^AgijiwZgYBDfeB z7(j+8loltZrxtQS6lUfXmn7!ofUE^c7IOPVa56A3q@*V27bTYD7ZvjOm4*rymnIdL zWR{ecWaj6^r=+G87xLB?@`dshBo-B?#^B2<1yIEhwmY-jemtT?^5B6=Lus35UJ19^xQ;G{kY70d*GT1^T zp&_CRR+E~d3knfkusg*v*h85?;US*E5h{)p8oH3s&;^HvLgr5q5Oz&fS50x!4t{}b#{JHp-gEgM{-7HPD)W~UZHGkp&WW77pEqJ0*9+m zUbC@Kp|(&_Bb2i!u^bu(N}&v)Yz2v+U@272U#iveeC^IPN zbz=+lGQ<)?St086V+##3*b+-aS#mP-QVR{enL{~X>ABD-gDFEcw$NAuJupf#OL9`V z3QaT{3r%Ya%``HU!0Fo^n!baP)3+}ueVb<}XDDQ-WT;f8C=Mxf%8*XX(2p&2&frc2If)JCBo~bgkgr{93*A7z z24zK0L=FS{)jdNw!ye+-46r9XP|SvU5^kbrhBY`xqZ{MZ+F0maTj&Gw1;|KOurJU< zq#&^N&Or-R|3k^t!*1~*{`wKKeIV*B=tcnW~lT!-| zLpchHQo)6IVG$_P>%>CK07&lVf+e2f4C@GNwiTANHWrrF7M8(0nhVYu8KCrEURziJ za-}W<1A}LpLS=rbLV03d336P*61GBGQGTuhq@1?0Dy&3!A_JVVt1v^|(3=P5xN1d1Z7i&u)vJh9LmL_F@qQ0}XunVNj3)F6%78558z zikT?JLMsZmVY4!^U!;&) zk(gVMld4djs*sUbmI^NZ)ADn2^2Pd^X;Mx z>j+A$Tin`MxTLmlDabbt|+kY7}ykd$AN&jn7>AeLUS zv0ip+r2ibVKng)}dLStUTd)eSZny+kwSp}~7Z+F#WO8w0St{66B!iOk z^YRpI!LHT=u~U;v;){(nG{Krc1}2pzXQ!4Z*eZZz_0m&I;vpg$>baE=mO4Ulwt}sK zcWR{uL;^`190;}waA(7f&(VE;mXcXj-152^iojoVpRkis3HJoZIpZ)%9oLuo{^K8o>3B?Sd?B|xVje9 zzJrwQ;;^K$Cc`>nG%i8WuAo4xxGY@T+E}=*ws1YH2wMipPoR2oLv7(kP=1PKU|bo5WHYuhL<5#Ewq#gG(B4s7 zTN?|v)fR3Cl{6p+Zh(XgsLbC{TeuSzV6eg&+(X!f;_kxT88YB9e@_MvxXjmv`Cu=k z%->gAxE~b4oD2*MF7VFwfehu09mqxgK@?M=MLyiDLmAfK-a5j7!>x^lM`{a?f?N(V zY#+qspg28NTX-B~qc;NsLpW$;AyEO|zf~wNN-QV3-kY}4C@GLJK=O|W8s zgPd~=;%iXNaIUuSJjmCpvDFNq#wn~y0C_3C*ceh}@C z7At__BQp=2o79UH)XOr9N=g%R)D;}VgB22!lk-dSO2B0}$VNSgVc04Luo{Rv!5uu1 z6}AdcZ)vC(gN7c|brjS=HkPLr=jWzEYZFvGdJs=Ssxr_hLQx66`UqT2Wv5ocEYJiC zz$=-=VptWUsbFi1VmVTp0#`H_G%{9pX5g$kArbF| zms%SOFV_}cft9i6z*!mA;=Wp2cnv0lQuTNI59U>A+Z=y6zeFYmZcU|Du4=FP+urB1vESZ8u&~|0F4Znq=G6= zaG4A$i(sYjQzTb|OW|kOTH0 zz5(fl^j#sI0d@V~VmeYe<1wi3`VQK61*t83-`ZICp|vM0 zskO22b8X=lkoEYLLfi{VYM_eaYi;2-kRBsO&}ev?0`9^dmRe zSqZ=t5Gu1l>QKvI1)|DrOnoS2H%OYsOm=%Xjvpdd<+U0Jy0Uh01alqQYWa4DiQ&QF44`*Fjx{0g(QK|NFk%0j}a+F zVvsx`4o>hA;3xsl$7O-C060THOR*wJuo5YRk__astVkLxfs}QNWIz!m0WQ&GK}-&C ziIxtFM>((t6V1jVd9cX}V7?M~x;+^CbbFB^$Q64A}TzyKM1$je^F zqr0GKkdl0GVFBvWDPBg-pnFN=km7dI@+=F#tUHr&pv2@+wLYflF2`uyZkkC=(VP+8}+_gU&lTkZ92b zhq)fu6G~Z7KY}6x5-p%ITOX{%0HP$qmVtpm1w7*s51!mAQb2xj|c}3DP|VKMiDcM zK?x=`FFC)oC^0=X1)7l5i&7Ozi$MXBkN_IMPe{;FNCHhpXM#tiK$F&p(JRP+q+XE) zD9NA%6F8|^g003(Y6Y+mu>vcwjsQ&yhGdZ7wjygt5ZHjdXbUz*iF9K!l(RsA1nDdl z*?|qShZ+XyGC_hML!-9H0W6O08HfQHvav;upfG?;`+_tTfvTm}#v*61p)O#}kVpbq zgWQi&Vqjn(B8*)jK6V3}=MFJ1f;hc!Z>E+e!BHY;xkV^o|UvWtsSAnp_{0*zaz z#HXYdq~@ih<|SvQ78j+0{lrt0mK>jySezPPT9i|i26BW9s8lPq($_D^&(A5=%S-ZYmx$nOT$z5@SluE6M{g zI8#8=6eWqd1x5K_(|90}6rTfHja5_t@-R~FG$l$_L}q7slh1VElI&PgoJ zC`v8JNlZ>HDg_C%rlcm7rWcigObQi9%}q)LC7H^c)Z&cP)RLldMh1q?P}%&_l7i9_ zT;^AR3}OR0ExDwq62##JEg8!z&5f@B<-4LPkn0&k`BO@B3*zBPxVWeqY%6a{esX4B zT0T^94MZ_td14W$)&h;=rY7bV)q>TEf}$-gGY1^|@g=Fb1)zmQMRg!cISNt>bX5&Z zQi|$9T*1_e%o0%YFDWgChGGNAXx31+n6Y)MgOF1$EEB%^rnI+LOnaG)`#=9Lw-f(?Us zyr4L>G$lX2A~&a~4J<5>T2YW+oEo2*SCU$kmzYyr)D9LG0)<|2X%cv~8Z=Zoz-Dqo z7LCM%5@Jy&STP?cKNO{=!4h&8Sd)B|F3L5%|CqF%5bzRW!2 zNG|FFDdPld(JjtQFX{*J`QuYEixZP_Qb7~Cpg^1e*2|fh5?@f1nwD8nG!dkXAyhUw zBe5tkxg@m+68Q0%d7y$UJ~y!_yR@Ka64*bSC5cJ#<(VlZ8AX#pal;YH2T8iHlw33g zWEyK|h?}lO(Nqwd9lH3lXd0-XR1M_^tzS&708KFG79=K@6io+55oda8UMhH*Y0(U@ z3;Do_4-wQe!J<6Hxrs$3@r9-NC8@S3Jw)Qnc6*})9$_cAtii_5P b{kk6HS2j>WO(`zg0IrNSGBPlfCg}kHV{gdx literal 10820 zcmZo*nQE!U00upLDf!8zC7C(JdU^RNsl`)zI6wlqsd*(+CQs3CoumNGUDIOUz9zo)SByhdDC^OiQMxmXu`XrRx@#Bo>vVrs$SumSpJW=9lD8N$lZB zN=(i!0@)ZlrH2)+7p6x6sz(8&M**sbJvk?_xEO3nX%A;nVtH|XX;E_OlpcmDJ?tfk z=^#_4WN`Jc6(kmce44@1!v<4gsGpIao2p+`np3J@P?VpQnp{$>4>DIjB|o`XAL`V2 zsI_`U#U)cRIAW)0WH9uwmSmRXq#}7ggTHo)MuuSQlnmk6DH&q1Q!*rCr({USPASch z?U8hc_*x+t;%kTxd_lg>kk3%bP{`oTP|Q#Q8J@w=!xEBO0g4iQ+FGYT<10fpLq9_y zL%lR9F@rIKrH3UaGcR?DH*;xH1~)8DQQX$UpP5%&l9-bNiUp8orzFChi!22VU#Jf< z1( zCpED+HASH;wWv5VKTjb)O(7ux6jTWbR#sCozp}hEWd>vJF#^ zynrT?p^y;)_5^!QW=U#MVh*uhV23&zOrp{R2P`zveZm3?WMoHxz0wHt3ZoUWTo0qw6o|E{pp*{| zQeJ3~#wX?ECud^|*dES`+#IXof<#am!BJ3@TAW%`mWnmk3KV1(z|#lVWf}4r(=sf) zxsani!>9*SAyVvvy)YA+fJ>7y!iWkYUC%Uy%KTD=^2EFnS}26#2%@8V3Ep;;uRE!p-DlU1B>kGnBh1#V;;8B z*DF1Fo^~$5(+=3{&@vE5;m!q0r@E!ZiRr0G zxN5!RV&y+ba8>@KqeO_mZgGCMKUNkKQB+g7VK(05IZ%wB)-^K zLldkCWMEQha&~Hof~^8bRxdrZBpxE7p`Kd_VW}e&XDiq$c&AosKqQdF!GU0_0CzUr z_-wu6)ROq*{JfIXyprO0P{73(mlS2@rGs6n2lk34EMJSGCY8}}2gQ(rf&$IL9VttJ zD#|QAMg|5%mLjgMj19?1ElO2LEJ{@?K-URz{|FfuT7W{EN~Fw|mer$(&CRyly$0kB#DtRo>X3MF!456dLB%xA0$o`lJXIktH5HuoAXy6> zGm!FHp*S-y8B}#=7As^HD}bUcGY^~%)r%F>%QA~fN)vO`6&%BZ6%vz^^Gowez{NPo zMm>mO*y;wb8i+f=ojH&dw(#mty|^T`ELB}cK^>X<~z z-{9IS9yxQvTI--94o6|l4^A%?CAztZd5P(%$VE0xN&z87X~E6!3~B{cl)#OI7T<-S zk_l0KV<wC}8wJsX+ri#1V!O^AV!~Ohz*vGk}qD1GqT)gq#&%#StiT z<0y{Ai%SyoQgjnRBh|X#X^A3TQ0hS{qL7s+K$L(R%u^Df^KI}^U6Q-?;=u^pQH+MR z5?PoS7{EyfQd;0tjuZ-65+FSaSprP(L7fZ>?{uPzw{Gx2b8>!eYB6NSG!azB!GZ|p z$VAv6FlfG{Bp+M~fO`1~iFx3zq(?|dfI@0sNl9B=|eSHHx zFth@x&l)5HxVhKFQi7Q@C4GmAkfAT=*JzqBYZJv9ZI zGSrJw6-tXi9!y984ZtTP=qMzCW{oq!BTt~&WyA;-WEc{jb_&q^44R?SCCN_Y4nq%# zS~C=~oWOxXeD4L~K~R)|dM_Z|u-*$u2H7#-Fb@VB59z%q5fLm%an?hOdbkh3g(RRTU#PhT$!7cpA(NEAyiV7m{**ZUz8i4 zn_3K-MFX!|;Z4pgNd&ETftXM`rH3!MvAZT3;c%e;N z5va?ZmolY?EetfnQj9bu-NRXum=s^0nNpH5#hastFEg*8v?Lys;8HU4(x>!rmX@UH zTId#Mrcdb+OUukjjnB+0PAvjEBQ-BEDJL~$N=FZCN@`MR`V_w&E{IFwON(-*`1NoX zC6>p-RCzP@@D-&cgB+h)l$w{ET0F(ChqI(8F*y~q;$})m4^L4lXmTk&C$%g!XNosd z4_``YZb5uzUP)?EUSbYNJzr{3QGOAU`&dIm+;lCb^zeX;PD(6J1zX)C1IqQqR{HuS z`T04;dYP#uX?poZ>G~NZxjFh%dSpQ8 zE&`1=r^KhE7Nq8-q~;}Og5A$slAjz8Qk|2Tmt8!ihcz{?q^NRA4^KgAK|I1a;6e!G zobvK=J;?kU*f}YwW%>oF1;zSPdiX&LeNrnxvqZTCiOD5XI>1Y%z$<*>D?o{1N@tI3 zNl|7lN`ip|WjtglL1zzVdTL%OWZ>PeM+P-A(S6AVnt@6#nbN}xn;eV>&0tRHVM@)L z(j$;sQIKB@if%;cvnA)}rDcNRnGckr<1 zXQZZ$c`$1-69YpJe|$=2F({parr#ir=gLUTDS@Rj)*e1k zI26Pu=jVb3c|fU*7v$!=(p+e`dow~E3eT4yMS`gnnI)iQStX^#2*Ws2KodD7iMa)! z^bgGqpu{o7FGIzzhZkxWbh(&c4{u6-a%NsyKEf)|$9NGeMIvem(qXVK$|Q z6`aGT^hkr$7R0CKfXBLu<3Xb*@!(lpa7yO`rRbv6G)RI21pr@pViBm41&uMMCgx7@ z>k$R10T&XbMLF>$sksH9#Y|IrI0{k=bX5&ZQl|9qLZcxbqzqiF@x-U37MEn^fr~a! zk$^~58FHmbJ?x+fV9>(7+9?_tuQHxzRWdR#WR-!4b0Fe0sLLmnA)3X*#K4fn32JRP zW!Ps~gXAngjo%(2l*NP~<3KaqJ)-F4FSwgF#hal=2$Z_vAqo{{?BRmeshRnCQ~Y|^ zKq)^3WDC}$)Vl}zzw?BRwdrPLyjpFxf)sVsmjTFiLm2lk|2 z7RZBs8B%^=zxic2`GLLVSK7mi>_Tuba>CpT=J6FI78R$WR38Fp-UX}UfE2l4M{pFS zrh(i6@=^~k%s5ba4HC(a1-I%MdjvA`QZkD`Gw3CWMd_&}U~_n2H4$h{!4yBpN=s0L z*=JaR;{?)F70uwyc%Jbn3lwMIVTmkI41s69ATbIWLM;Uio%C?RYCDkmrAc}K9(48P diff --git a/docs/_build/doctrees/index.doctree b/docs/_build/doctrees/index.doctree index 3dfe8f0888ddf510511c3b7e49f7d9a787efb1d1..41878354ca955f987b4b6d4ab3e04c8afadd446d 100644 GIT binary patch literal 18990 zcmZo*N>0g7E-lH-Db~x&Pf0E20&#Ow^Gdi188sUVnQ9A}H9|S_5_3~aDhpDJ3t4Il zSv4Z~85kJSQ%g!R^U@WHOA?DpQd0`q{32Kx7#K41Qc^1l**hXc7#J85Q&KWZGV}8i za}h!BIB46#k2 zD77HJII|?bsIri!18i?*UJA$5{rsc<8xDsixbmR ziwgx~3x&`Fwz#smBsCYNhO1Cmv$0U5wop_fl(Q(Y9OB$Uu~3Fkwt_^E_Y1`_Oh`=z z*~?WZq1jj{Sz9Qjks%h!kRhJI6v_tGFCC%7z`&p%TAW%`tY2J`T9&Hsl3JFUlV1P| zMSW11>!;)=7wdzgNw285q);YQGPR;0HMu0dC^ap$C^auRH6=c&G9DD?g|fAUavALC z4k<}2N>44}DwNl3EL5m1RMf~2i{N5lV9?W3hzA>N1#(n|bcReQdr4wC*d(P;Hi))D z>^OOs>gOTFp?+*>QgMb-D0?v^Kno2rRB8(iH8M0}Gc;o})MGQ$VhfElLS;a~rJJY= zN)Niw^q`xTU!)6855^ffsTo?ag(eyqd>QPir5U>3tQjDS#IXivW=f%HZJ}9)PKH8p zhJI|Jxduk4W|rinaur%=HWpge7FvM<6_hG{^Gour6dVT66GVCL4D0EYyXKQ1jS8bs;$b*y`mLZGPM${HYg8TsHgWOk|lnV7;RBd52NJT7)U!kcU;@=pJP`0Afw9=x?!dP$}fJ3WV zY+)RzurVx*PXuLjjo8A33`l4|vIrM6ZImWufJ)iK+QKB5?_d^!ON(Ss03!l1C6pgp zAL_#D!&FcR7N%veC4#aZFVx(^bd3y94$r79%mjHKRFk?xYtmq7O`4SMl3-dua6=Y-vBz8dgv7olF5M=vY1_lPtG*ISK zFH2R(OD#(+QYZ!GwS)vv*_V)@kd&I1UzDl?Qle0tkzbmVqEM8Yn4(aUk;-M2lAmmq zfM%^id1gt50;mj8uz?0iJTyq`5){%)GgDGQ89hCtN}s zqBJQHR2xCrie` zL0D0clbM`Z(gCf-wPL{~0?2|Mq=soPC^Pbd;-n9~&dSUKwH&w#`!yR2C)5^B1m&AH zkfSF-L);=o(*Dk;Y>&lwU3ZtU|_(PR|;pfHWtpVEt~_2DME@tF;be80ZO@ZYYXRrw4kTl z`A{FC_z9eH7a*tHg{_T+i)srO!<;`C?EDN!g5?Fd89DD3E@^EnTv}VW3}hpQEW}l* zkO~1*)Gn_rTmdr}rKnwr;<&<9p>psJf-X`AK^NXZSPd?D*JN;kOI{IJ>97`3@~*2b zTn};)sHfqKwWkpb>uGGr5YJc+Nz9Ofcq58az;UnZ4Yy!ZhCR6dLcr9`t&N3SY74i5 z(ig~vb>LV>PmM{XIXS5%@j02rC0vEuG#d-I*B0)8g%8wGq1;HS3U`9i>Mo5?Hi%&1 zZg4fM1<51uTmee|Vz40DgV6{D*%zN#lA6m^xL32Wa9?fVeo$bQF)%P_X*uWT7L=Bx zYH4wCX(>Po2iKg$;*!i{1*nvQb54F~ih@mQvQd0VX^C9|mzIK~uP2w5f~!+7mzIKO zUU5lcUUF(NmzF|sYI12&W=W+&dQpC90Z1eyF&#uXmX_oz1ScovWag!FWr$~N&ye=Z zPzvRQyRGm5$g2mjM1~*CXNNR0Ko#BL+QK6s_j9AVzwl^=c*Z_x%z`VHV^|FH76aw# z*8 zh38@Mc@mtFLfIfj7hV9ThzwAyU#u;>1XBfbFgT(wYh>V#UxY*A_m3*sW4q_!R2U z!e<&8Q!*Z+mgx6DK7F2H9|3A9U=6Xt7p;wjFKY{5!Q$sJI2JP?rFbHQbD%T{91E{& z3*UfZ0aWQZ`g#_=1qJatr~^=nU2qV;2M6(o+QN?@*S^gV&v*?DDR652gsD;2TN32r z&l&dMiWOwXm)6F@ueF8WU`BiZC#%E^r3{#>zSkE1fVs-mDY)<_$W^~E4HwS9obd?Fg5CWOMzVVAL=Ssr{E$6Mh1q~#v(>W28P-qCPoGZm|1_& zoma#RR>J~O6Tt~;tisBVB34EQh76V3A~vurQqLV+DnlGx#12yAtl3z^0XB&ftX3=w ztXDh>WIU4li@3nj=tk*!OK0fE7IA}&vPW|S55zCLVAJ`)+To^ytilM!eS%T~@F|ahcQM%qTAb*L2 zjDmC^(48Uy@tq{t4k@r9a63R2WB5)QtWpM|65QYgmFPvXAS+aAi{!u(=ZsU(gaJQI{@kzEsz61e$fUSqyttf7Qq7=X+WymigZEh#Ir!w zA$v{_tRCHLU2i#%=k!5(>@m$UfOygnY@-p_1hFir1t4c2dlK5}+N;@EWDHhs0#P4f z2(BiBOY(~n(^J7Cq{;btC8>EO3Mr{MnPsU(m5{nW*qBR8!96E2IWx5gQA1~ds^ub6 zuvr+chtbQd<-X7Du;PJPYQJFp%XSe}sdL0X2X@ z)f+pgQwB+`MUfyS;#nYjko^>e)nr|7Wl-=$gY-aJa8TD2#Xvk33$`Z?tXV7zYBtDn zWRD?bw|KDn1c>?wEpU=|Ni0bOjc`J;R*(y*o$OMXmzbOH;>4w;5R{ryoROJUf}YP3 z!G@rRg)TgwC4uEZfsqWhGzH9ultNls2uq7nLB12u0yzyiMAE?OG0gW?1%*gDNDnAa zWq{4j1gjOx0vn}LTa*Rnqw5vV0=WdFJsYIm9vr?{2Nd5s?fsudm z!O9Adlp(nvoPP?za=7!4I>;eKU`3$(Qw%n+1k4u$H=%-Df{RK)4ppfwDg%pSdI;nU zkU8Zbb)c}W02@;YRxXwW)-Ij}G8;K)tH9Fers;Zvom34n3et!Jxu6CTEVW>B>%bb} z=7KCkPH5m9R1a3!08t4EVrXJ2Y6KarQd`snmPEH-JPYQVW{^6NZ(6`+wStw4Wr4Me zXMrq0_DvgD8r?KqZw*kuw1bR-#x%?s9S|>df(`Ei>xLT+vJt~e-C&hH5S8F27$Q@s z)E4!ECD0v!@J=7d4IuCIgUy)$RxSn(8kjjn6G3Xkvp_Z>`)v|fExOUV-kKo4O$O8ZUusEteo=O2UOKd7$S*BP zH8n;r2WEmbVK@<94$K0}gS<~v64Swx^ITexVW05ClH?5Zo&~ZLIT|j3rO{2(^#;53GRP=Ms~WGz(jBmJ$od9^Nkw-- zs>HKEwjulZ9#}QH;kw>Npy0a?(gP{$kW6|2@!>ErgPefuL!=txF8BIjlys`Jfl7X%Yy>uIoRqK zV7?f5$$*v?9;=I9g8VI>1#&TRSiJ%pfMKDx2`H>ygYmj~I9;2t9sOYKwk?r7`^patz3{Um(*!aq=5%+8?lTNZpQV zTG3ySO7Sd^t;mt^53B>-0$p#gTmOUf*n?XNXr?hRK?d=dz=L>9;6c1Bs1+cmASYU+ zs)re@o&};FR>bEe=2VtsCKp2zsdr{xYH=oL8o<>z2tAFmf^}lJ7M@1g!1AE*Uh9E2PkTwo0t7I<5L0-GD82b3mxz=rdJ)rw_-jZ&#C z;sf*1^@?YKoC4C$57G`9L4>=cNB|O$f?yMcz$QS_Ajl4kG$;&KCW534$phdtC<>Ot zod&HyE)fGO0wqFmuz?a_KBSET4gVrZkZVR0Ks=)hHeU~{Q7j8;F33`3&mfib`e5}25cLtN;F8|405sbTF2`XD zBO%!*I1n^^5bPR^o^=et`Y{{|&pJk6d655&!S%C=0lQBaA0te zE6B|%wMA}VaZJB~oB=Y&9i$Ev10G;wJi*GvvcTHKvp{Aehq4z~8r?KqZ?KcRK}JDR z9>@hgkYMozo9hSG2samG8FI=4XCr^G$^eK;a5e%bDV5ryK(H9Pwc=SYPXvK%26-YF zY(xlHIW#O>gNs5z(&AYl^N~Fh23Ce{s;;*KC|trpdZ3mrGfeA zdd0Iq4gzUU2Wf}Q^Pu^oEC)#$k~hGaCKoJ+JJYy; z+>-}#4=B&%gUu@d>k)$_HBbbo)D{(j#W1}DaskMQB9IZFKraRxQ36&DO=^C@MWrBV z@hp(}$bno2R)%h>t~c0KNyAiBWow2_y!6xD(yklayJ$Hc%8DuP%uuM1x@UsTV;zyMm+Qq%x)AP0D1i#v3! zLs28xBn!>Pq9(AF&0xM5cm=*AVg-H>Yz2N%3&;@hERX}CAp=@wSkwyEhvXvgRI08w z+@dy+K6~8ewnKue1MG@UuxVmhP|HA00|ysqSwm45SV=cT2`Frk9Rp58JzzQ9iO3V? z`(Cg@P)h0p8`=-%i-A{SC4ko<27@;qCBvYzOj!3II>3FQDKi=495l%iQo3=E)kzgZyHfL6%B^%uXq$&fWxa5j3Je9zlvI=qm1_&0vLFAPOUFp=-G@ z4Th}c#+|mHu7)h_MlK6LtJ#aTg3XW3g8Hdw8>pZZ00Wv~7 z3*4T(7ET*1@q{=g37n}u~C6)!X3gj$s!UWeV=fFzN zLzIB)6(pyC>y-;&Io$P15X|Qn!3sh3$|bO&m%)6z6_E4A zvp}{(y$!BSu7b5-cpF|ET?1(Y6-U>>HrxO!7t4a02C^1v8n`yO36{n%%{vHG=-dJs z)vVcAbQ^5c9k5!sQ6Tf7MuAq^72O3%gO^>yS7H~Xra_k4f#%#{K0{h-2c8lwE=ei6 z2eJuVu%N8F?u6$#*s?pYyY7Qj_iHv5JpjAwA;{tksJm2ZiyndbU_TW-2D7JRfh+(| zmlT(z=oY1>6+K~MU_e=%mj&`9$k$InrbA}nU`{T221zW>!S=lXn+eGxAQwPdvxpUX z&?@I8Sn(@}V(?fTB!+YIQ=l>Y8e|h@48H+u0lOb*5g%d_{w+uwD2CsGZFmn>E|vu~ z4P-4eTEH><0W6JSns+eBuOC51fnxX**r?B7wQ!?A=0lAF$M6@BG@clS`3zeOe+Ah@ zObmnF^$nyN6vN-a?)m|;I0Nb~&WjiQ#`>`~HK?gv2n&1vp}uff+PX$;b>IsRWN`LSnc$HL)l;BLUQyK+nBQV8vjU zV~ty8W(Ee(g2*Bku-U9&<&d#P6tj!iKnlgPKu&;05jeKl!5T2k_YML1n**c=6x*C& z!@0m}#j>DAfoy~t1&(cQkTj0iW@BJrfcO$yT=RhJAttWDuHpr$2E{cW*j4->Ycrs( zQmHKx0Q12F3!XVS_BM=TQ5@7}pGmC<469bzFiD{7i zm@%D-xp5~sKer&iG%tm#NSqnmpq2oeC5dDfqJ{|NfGIDM0=Y${wn!SR5aJ~nW(I~( z{*wIUlA_d9-SVQuf`ZhdB3Wio*9^RA6jH0fi+4F@28Lwx9XC+zTt)H__b7nfqX@PJ z;x~{BA(0!(4pm*G1a*!wNL?sLUTJPpYEf!RkqR>dgEvDcZ)RR{PH9SNMrKM%YF?2l zGXq0shImGAD0^~Z0cb|INR63+!7r30Jtse@NS&F1p#!olAe0NdXS6s!FQ>9d1FQ|~ z*9@)LQ1;ZklA_Gi;v!9E28P%oEszP|vJq`ZPLVb<14CPp4k#Q#*`Q9)1=$hG19ewg zW=?8xksjC_eP#xRERdGOP>$Thij>rXl8hn)kUy9}dlSMkK-+UsI@8h_G8syte5f0! ziwvPbV#Ex}(;Bg%tT~{a&fZKApA{K{`jB0I2%P-1CGep-HhNnU}X ze$7lNE^+|d=LoisEh9C9JwqphBMUTQk-?b-YLRDfWvtHN_RHYO0=027c(Xt)&J4Z` z<5G~foxmCuG9*($U5!xglA^@C;s4+vf_(j2uO&X&`45 z7rB82-9cu7yNsYlwgSX+NM``V7I}bDAE?)v2yViHm4W*ZutNx9i#$O}K>eM>(olio z%G{**(P;eN8fx-Y3l%NC`4(1>- z2O}hN;K&q=&`iPN>F4HO1ZwetV<-}20VpR#fh~vzn+Zx;Dz!y1U=Bu#&H}jw6uYq? zz4j5>3=9k&l?ACqIhlFcu*{+aZd)lSWELyr<(DYH2QO0eisB$iIUa0b0@x0ah2mKt zCm@n?hEf)&7${9j1dWOm7W{Wx|yBlvL36$<(}LP!XI5 z_7hK0T5>#Ssc3v@QBF}l$PqHI(SVZt{G4LF%+!)Jz5Jqd{fv^_9Q~pKkP=Q%?>H4? zPf;PbROTy4EdY5Ha@a$0Q4!d1zT%Ri%!2sj{9Moig5siLuqbayKFG@CY=|36KnAjc zDu&9UQV^RrCowO*G%-CjJ~=-nwWtgv#*~^@R1RWrrX+$k@F(UL6jgvt;{o+|i%R0b zC3R6H$iqybJn<>1#U+_};Ji>&1y;zFk(dKhRt-|d8p;cGALJN_q8gC83>&{tnbe}9 z{G#~Oykt;gJv|=6%}C5k$w@6Lss*`20Oa}NoW$acqSS(%#N^bXI*>4HN@`MRdQm;d zq)>s>+@w@clBvu|EzU?yEh%aMRfnOn`K2WVr6st`Zv+_x+P9UNTvF5o;_!k_mB=g2 zjjsR|Vnxj$*E5Flr#kEE9Z2N>MjhR5Up!KQTK#H8(LcCq6MHr6?72@J3M& zIPu7q6lLbZ%R@vmiZ3nz1#B-k(3n&6%8L5HhCw`DP@Gztk{@4@n^V*e78XdYD9A5P zjnB+0NiE7t%qcFK02UVlgPaab|ka3=p3`J|(jl zbYe_AG!SQk^>SvW#20|hA*m>u1yaTkDw~{NUbOV zW!i$oWt+X@flIz_7ZsD6WnCV0u8KY7-vX=n@m}tvC53#44=|a zA(Rt{AZ?$`;EH7nC`~A3fZLf{!Io)&4zkHmk43NTOG7zf)lYHJHc&VeZ3hJ{=mZT= YgKh`dyqzHPbYhEkfoO%q;?g8N0Kt_RoB#j- literal 16675 zcmZo*nJVGP00upLDf!8zC7C(JdU^RNsl`)zI6wlqsd*(+CQs3lpfa1yp+_6DLu@YDPVDqq{QUxB9J8@MR4Wp z$vKI|#bB+aJ)A{}<;D4>MaijCdbsrT6ym{VSWW3+FG)-XX_=D2)x(mLnU^}no3V!t zrc_-&BR@A)zp6B+RKK7oKPxr4q*y;Uza(EjB|o`XA8d$TQE|zX9=3wSB2Xw~uw-z= zPRU@*U@1+?VCZ2lPE7^{0#fK?@Yhb!$PkR3k|7#9MWaUsWSnlIZf<^Zwr);lQc+@2 zrEXe&k#16cN&b`!k<<+F*eMy3Af>V(rB(`w3Lv!#P_+ta`9%s~wXlFm&5(|rQraV) zT2YXiToPZDnwDCWnwOlK5}#BVpP4eHc1ngw25)hOY!Abf4EYSz9!ab!!J$$+CBqP; zRyl(uLoq`MVjUag z2zOygMq-IVVopwed9gxeeyKu9zCvnZab`{>Bm(kFOB6~{i%W_Xk`jwkQxx*^xZ)EW z!-Ewv^U{hEi%W`1lS@jAQWNw*q3oNX;LQ&9A;_m3pd8@dV?&8uQ;-xc4yXAZOYC7QN=+**%AC?8 zmQhktV5P60SYE7`n44IYpQo3cpQ{hfpP)j<5Yr=ipdtj*nN|w8jFQjb%24p;LQVis z2dcpW9UjWy@Z?WVEh)*&OV=$fNh~T!O+mP!2PUO}kOF1+4A~4*Pf#%+Wp#~}aWz5M?&X|J8e7vPe8Gc0BJl8V~l(*E&QWf%2 z%TkLJNB_t>$rKaT6s0DnD3oNRa#^M1CtD?;xm=+< zvm`?SRNyGsK!Y$I8cucz3hAYpDXE}*oSs^ukd~O6nUk4V#0Ay@w^B_Zv!qy|v^X(6 z6`phX!Qr5s(E&=L8X4|AqF9}ck`!?$$xzN%0xp`^b23X(ixP8)Etoi9E(GP5Eg)k+ zj>&NEVF85|ven=mvK^K~WbpY1sVJ!(Nn{R#I7zI;Dp>B|mve4>zcQsVK#2*boeJ3#9bS5@2Lt02dsP z0+f(0q`=Bj0clmvl3`?EK$NN?;BtdIsWc}iwIn_#vl!Gw!bp`pY!C%gdKk69br^Df z0i{dR4EZc=i22Aa1hwlpL59X>mZaul%f;xK50p^Kw6vV_a|=pKQnj?WxU>`?xy>~v zvA85NSph1g;GC0RnxbHnnrswbQd(k{z@??&=-0xwBCNfar4|4&i zFo(II0_Fn31))a9o2+(51_n@R4l`;?hH_RD%n0`$QLHH)BcdlFMKq-8N_;diJNkN( z9|tQyZiU2wBs2~dfP_GC;NAl^8@b#8D_721jpSYuThkX;Sdq?~o1ELZcRm5t=P z{Ts-k5Vy-f-Tn?F1aUitJ;*))YgNwrNtO>-KxH}k&K6)|V90P~eYN(|ZT#{dun4YSTn3tlEoS#>cnpdKblA4oQmReK^sltPexwI7Aa}tv? zQ;R69XcY+A2MZ4cXn5Fyyvh+fB`b)?S}ZF7qyt{-dVqM4`*KRiXY}2=Wg}DSZn_2Rx;(0r4PF%mMQbMylS96j~If zYM6UTNY%_iF39bHV=xyJm#lAqltYrWIyB5KfP^4^2AhqXKETSAvu-2%nD}%Ji+rS9 zNTPE;f!qdhE@UKIIqMZj2;y80xCO{=2J2DI`bw6YIkBZu(j48x0W$-ZIyIpo&BDyU z0P0a?xFe}ScDgu7fpQi0ndP1ARZ)kxey+}$bzOwAw*#ogu9sdbRC2lk}fcJ6O|2n*r7(i zf=?G3e10%v+!0EU6G$RRUO6ig$@_`qHClYNv>a1%GxIWwONtUflWE|#OEGku0Fq)n zQ&RIvK%)j~3W>?dsl~+#xrup+>EKZ!E-lFTLwI6Iat4KIN}hmQV4gth#ox)TH^8yJ4zYMXu)W3(k5JW*=F+*1UGPF?)CgZmTAH$-KY9zIl)U}0+p4ck93 z)7&vsBBwNd76yh4KjC1!s6A^ zazvO8^Q;Bbv*I9Mkd%3=3oN6h%%)~9NrRAHSl$nzXOV`lSI@piWBrS}^WLR=`g8K13$R8vn=T9IV@Z|go z#Dl~qKbkj?!wWq7_7y3xC``(5XOoaVP{Wgj724q@Dk=9c2m8U(hYK|9_+Uo3L!^<@ zvLZ-MIZKiiM?DUkfER;z(Y!&k%{b=bMG{j|GC@7D#2nqCRLEjU%GvDg?pph0eBXg1iV_qnP2|gIz6hnAn0eC}){M!UVJ&D?`ECy)=o!w1Dgm z5{fp-1n{DzVDKVUg`m`e{Nl`#{G!T)c%&NL735@?ABjpSS>YfpkV4NBno#^fLZD2- z0A4-=SsjXXmbV=h4G z;Q*zGoU+su(8@wtsQa>#L2gpciULs}r@7-a5h)76**Fanf8g02=;BY3=0wCG0qe_ zLf}Y1@d;)GWEHY9FnBX&$Y%9`te1xceG7;O33_bipa*+D)?kN~Bc8aIUe;wz2H7MU zJEcbmv~o=s>2#6Vtnp6_~iwD=fq^APX{mpb-Ji zvmn1i)@{QV!lEu3t;@=Sxm*nFa!ARiTLjOnNUjH~P=KgVKv4npffd{b79r9z$% zYmp6x7TLQ&!3i}EwD1>;N~C0$bsnT#IqNti8V5@_wrKUN0N4v3NzU632W zt7stRXR)$@hZVlTjCMzI22zZJ%Xbbolr{@FMLaaBa`RKD5mgEx$3vnj7#dX)AR*dE z70d(RsDe5J6jd;1C_|l*#m0uMK0#YS28tq6HjF5Ome3MKKIiT7uS~g+L?A7v?3J<|{UcSHMw~0J2d&0~A#-M?`>x zKph>TqbeISsvzdVqAD3`WQKbW%yRUID#jI2uzEWPyu_3vIX|}`zceoeYoib?8TIg& z|syMD=EqZ?+)|Jn$O0-&^9HphZm|5YJVqa)npGxZem4BYC%cH6mKTz zjwVojnAH#RoPu{4G*z{NxE!&FO%Q&Fu2cYm>5gEsmL{e42qu=6$m>NC=e_CFT{UIHPKPVN0J-rU{cgj?;D z9zN(I@pwqPYl>eFM=@k)OYxK*LC{u3@ZQC=BJjeelqo%IVTn1Vsl`aa)x%kmm=s^0 znF5M%jvl_uyn@n__|&{)Q00+6rH8Y$Bu&>sw>UF>N{<*Qg~ey)6{i+~QfYi@9;j87 zGNq%3H6=BvG<}L+4;REG@ufvMQ~Y|kixSJ>s=OI{_=?h!L5_!X!HTE&^>Bh(x~ZTY zjZ->$ctGvOqLO%MbTRetrIh9t#AoJ}q!#5R=77}mr4|+C7a_TiH8jLc*J4T!56EcH z8W*tDJuFG$l$_L}DLn!p2NmZe z7H1Tt7UU#?c2No!r6%VWrNpPCf;NAo<|TtNlV1;SNq%xXNHr*ii>LIkg7R7Alpdaf z)Pi_~bHE#=K+W{>@^Za`%94!yJg{?8Qp@xUQVWXpp_w+dq9h(v7$qiywuyjtpMrCG zd<7^AOzG^AEh);(Mad?Rpo}jr0J*ZWhci7jFSRJKB)@2iUylrGWTN{LwB9>2xnxQY zZ%$%fdTC;MYJ75jO6rszrqsMCJp!o}1^LCPD51}moS&DL35sVvP(F^&Oo=ZjN=?hG zn9{=p+Nvs$nwylG5?@?WnUh+ak(ydErL#u}v?a8-Gzrxs%&B>0Q~Y}P!4X_goLZWa zA77CRN;AA*A$T(O>k%lf%qvN(h|kDO&&bJ4&nTJF!;%A9r`{u*Us{5kTG2e1^%+!! z@W-cQ7K74hJlyeI8HqXYRL0uFS6ot*SpW)m(5xURmGOeyoL8C)4R>!ws6*kU5lE3> zYDH!VXzxu)X)!Fxvi5MMB!V_cCgv7^(mymefD*?PzYI&i9$u(jIgq`&em%S?`N^4i zY554NM3Zy!6SLz}a}zUj;uBL+ic-PbdOLd9Vc9%GsfVXHH?gQBzOXdEB()e6%J8BS zkr+W44rE<&Nqku*cyt96;%H$urH2)q!>9B}gVYwpr$Y9c6vu+s9=K=&6$yw`m0?<%)WZ%MaRKdpuAQQhWys3FkfjG|M;m15 zf_Gt}@6Ythn2^z%wGyOm8HhLxA`XCb7-ooMZ3FQ(g9r)GP6JWUvfdscl>OTv>p4eR3HaJLc=e^z%NUVoq?evV}c*p-yK;XKl^1E zc7VOxkp=RqUuh37va7*C&k6Gbn8#O;SX7*fT+wE&$JVfhRjgb+u!@xln}-?`GbCfD zAlI$z;f_Imo__AA)hlR&FiD+9cr|Ou4jn{x?=f{Lh0IW;Dx^a9*}>{tP>@+EC}D2p zRl=-rJ=kHLFKBn0t28MQR05$iPr#wY0jaFO(ZW%bng+5494nAv9<+g+*eO|{5v0T( zT;n9MQ?fuKdx@nzys$_C)s!F;p*vj}djvou8bzQ@?vSPg*d08uHUPM*>Id4v3(};K zp_`$R1y1SfK*=4HXCUJly%`-DjTu#0;M}nglrb_g60<-#0lb4N3zSzNZCH^EjZ#oJ O^l-vj9v~Y_lk@>> import boto.ec2, moto + >>> mock_ec2 = moto.mock_ec2() + >>> mock_ec2.start() + >>> conn = boto.ec2.connect_to_region("eu-west-1") + +Launching instances +------------------- + +After mock is started, the behavior is the same than previously:: + + >>> reservation = conn.run_instances('ami-f00ba4') + >>> reservation.instances[0] + Instance:i-91dd2f32 + +Moto set static or generate random object's attributes:: + + >>> vars(reservation.instances[0]) + {'_in_monitoring_element': False, + '_placement': None, + '_previous_state': None, + '_state': pending(0), + 'ami_launch_index': u'0', + 'architecture': u'x86_64', + 'block_device_mapping': None, + 'client_token': '', + 'connection': EC2Connection:ec2.eu-west-1.amazonaws.com, + 'dns_name': u'ec2-54.214.135.84.compute-1.amazonaws.com', + 'ebs_optimized': False, + 'eventsSet': None, + 'group_name': None, + 'groups': [], + 'hypervisor': u'xen', + 'id': u'i-91dd2f32', + 'image_id': u'f00ba4', + 'instance_profile': None, + 'instance_type': u'm1.small', + 'interfaces': [NetworkInterface:eni-ed65f870], + 'ip_address': u'54.214.135.84', + 'item': u'\n ', + 'kernel': u'None', + 'key_name': u'None', + 'launch_time': u'2015-07-27T05:59:57Z', + 'monitored': True, + 'monitoring': u'\n ', + 'monitoring_state': u'enabled', + 'persistent': False, + 'platform': None, + 'private_dns_name': u'ip-10.136.187.180.ec2.internal', + 'private_ip_address': u'10.136.187.180', + 'product_codes': [], + 'public_dns_name': u'ec2-54.214.135.84.compute-1.amazonaws.com', + 'ramdisk': None, + 'reason': '', + 'region': RegionInfo:eu-west-1, + 'requester_id': None, + 'root_device_name': None, + 'root_device_type': None, + 'sourceDestCheck': u'true', + 'spot_instance_request_id': None, + 'state_reason': None, + 'subnet_id': None, + 'tags': {}, + 'virtualization_type': u'paravirtual', + 'vpc_id': None} diff --git a/docs/_build/html/_sources/getting_started.rst.txt b/docs/_build/html/_sources/getting_started.rst.txt new file mode 100644 index 000000000..e0a4fb10e --- /dev/null +++ b/docs/_build/html/_sources/getting_started.rst.txt @@ -0,0 +1,112 @@ +========================= +Getting Started with Moto +========================= + +Installing Moto +--------------- + +You can use ``pip`` to install the latest released version of ``moto``:: + + pip install moto + +If you want to install ``moto`` from source:: + + git clone git://github.com/spulec/moto.git + cd moto + python setup.py install + +Moto usage +---------- + +For example we have the following code we want to test: + +.. sourcecode:: python + + import boto + from boto.s3.key import Key + + class MyModel(object): + def __init__(self, name, value): + self.name = name + self.value = value + + def save(self): + conn = boto.connect_s3() + bucket = conn.get_bucket('mybucket') + k = Key(bucket) + k.key = self.name + k.set_contents_from_string(self.value) + +There are several method to do this, just keep in mind Moto creates a full blank environment. + +Decorator +~~~~~~~~~ + +With a decorator wrapping all the calls to S3 are automatically mocked out. + +.. sourcecode:: python + + import boto + from moto import mock_s3 + from mymodule import MyModel + + @mock_s3 + def test_my_model_save(): + conn = boto.connect_s3() + # We need to create the bucket since this is all in Moto's 'virtual' AWS account + conn.create_bucket('mybucket') + + model_instance = MyModel('steve', 'is awesome') + model_instance.save() + + assert conn.get_bucket('mybucket').get_key('steve').get_contents_as_string() == 'is awesome' + +Context manager +~~~~~~~~~~~~~~~ + +Same as decorator, every call inside ``with`` statement are mocked out. + +.. sourcecode:: python + + def test_my_model_save(): + with mock_s3(): + conn = boto.connect_s3() + conn.create_bucket('mybucket') + + model_instance = MyModel('steve', 'is awesome') + model_instance.save() + + assert conn.get_bucket('mybucket').get_key('steve').get_contents_as_string() == 'is awesome' + +Raw +~~~ + +You can also start and stop manually the mocking. + +.. sourcecode:: python + + def test_my_model_save(): + mock = mock_s3() + mock.start() + + conn = boto.connect_s3() + conn.create_bucket('mybucket') + + model_instance = MyModel('steve', 'is awesome') + model_instance.save() + + assert conn.get_bucket('mybucket').get_key('steve').get_contents_as_string() == 'is awesome' + + mock.stop() + +Stand-alone server mode +~~~~~~~~~~~~~~~~~~~~~~~ + +Moto comes with a stand-alone server allowing you to mock out an AWS HTTP endpoint. It is very useful to test even if you don't use Python. + +.. sourcecode:: bash + + $ moto_server ec2 -p3000 + * Running on http://127.0.0.1:3000/ + +This method isn't encouraged if you're using ``boto``, best is to use decorator method. diff --git a/docs/_build/html/_sources/index.rst.txt b/docs/_build/html/_sources/index.rst.txt new file mode 100644 index 000000000..189ce524a --- /dev/null +++ b/docs/_build/html/_sources/index.rst.txt @@ -0,0 +1,91 @@ +.. _index: + +============================= +Moto: A Mock library for boto +============================= + +A library that allows you to easily mock out tests based on +_`AWS infrastructure`. + +.. _AWS infrastructure: http://aws.amazon.com/ + +Getting Started +--------------- + +If you've never used ``moto`` before, you should read the +:doc:`Getting Started with Moto ` guide to get familiar +with ``moto`` & its usage. + +Currently implemented Services +------------------------------ + +* **Compute** + + * :doc:`Elastic Compute Cloud ` + * AMI + * EBS + * Instances + * Security groups + * Tags + * Auto Scaling + +* **Storage and content delivery** + + * S3 + * Glacier + +* **Database** + + * RDS + * DynamoDB + * Redshift + +* **Networking** + + * Route53 + +* **Administration and security** + + * Identity & access management + * CloudWatch + +* **Deployment and management** + + * CloudFormation + +* **Analytics** + + * Kinesis + * EMR + +* **Application service** + + * SQS + * SES + +* **Mobile services** + + * SNS + +Additional Resources +-------------------- + +* `Moto Source Repository`_ +* `Moto Issue Tracker`_ + +.. _Moto Issue Tracker: https://github.com/spulec/moto/issues +.. _Moto Source Repository: https://github.com/spulec/moto + +Indices and tables +================== + +* :ref:`genindex` +* :ref:`modindex` +* :ref:`search` + +.. toctree:: + :maxdepth: 2 + :hidden: + :glob: + + getting_started diff --git a/docs/_build/html/_static/alabaster.css b/docs/_build/html/_static/alabaster.css index 07a9e2a42..be65b1374 100644 --- a/docs/_build/html/_static/alabaster.css +++ b/docs/_build/html/_static/alabaster.css @@ -15,6 +15,41 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + @import url("basic.css"); /* -- page layout ----------------------------------------------------------- */ @@ -22,12 +57,13 @@ body { font-family: 'goudy old style', 'minion pro', 'bell mt', Georgia, 'Hiragino Mincho Pro', serif; font-size: 17px; - background-color: white; + background-color: #fff; color: #000; margin: 0; padding: 0; } + div.document { width: 940px; margin: 30px auto 0 auto; @@ -44,6 +80,8 @@ div.bodywrapper { div.sphinxsidebar { width: 220px; + font-size: 14px; + line-height: 1.5; } hr { @@ -51,11 +89,15 @@ hr { } div.body { - background-color: #ffffff; + background-color: #fff; color: #3E4349; padding: 0 30px 0 30px; } +div.body > .section { + text-align: left; +} + div.footer { width: 940px; margin: 20px auto 30px auto; @@ -68,6 +110,11 @@ div.footer a { color: #888; } +p.caption { + font-family: inherit; + font-size: inherit; +} + div.relations { display: none; @@ -84,11 +131,6 @@ div.sphinxsidebar a:hover { border-bottom: 1px solid #999; } -div.sphinxsidebar { - font-size: 14px; - line-height: 1.5; -} - div.sphinxsidebarwrapper { padding: 18px 10px; } @@ -168,8 +210,8 @@ div.sphinxsidebar input { div.sphinxsidebar hr { border: none; height: 1px; - color: #999; - background: #999; + color: #AAA; + background: #AAA; text-align: left; margin-left: 0; @@ -225,19 +267,15 @@ div.body p, div.body dd, div.body li { div.admonition { margin: 20px 0px; padding: 10px 30px; - background-color: #FCC; - border: 1px solid #FAA; + background-color: #EEE; + border: 1px solid #CCC; } -div.admonition tt.xref, div.admonition a tt { +div.admonition tt.xref, div.admonition code.xref, div.admonition a tt { + background-color: #FBFBFB; border-bottom: 1px solid #fafafa; } -dd div.admonition { - margin-left: -60px; - padding-left: 60px; -} - div.admonition p.admonition-title { font-family: 'Garamond', 'Georgia', serif; font-weight: normal; @@ -252,25 +290,71 @@ div.admonition p.last { } div.highlight { - background-color: white; + background-color: #fff; } dt:target, .highlight { background: #FAF3E8; } +div.warning { + background-color: #FCC; + border: 1px solid #FAA; +} + +div.danger { + background-color: #FCC; + border: 1px solid #FAA; + -moz-box-shadow: 2px 2px 4px #D52C2C; + -webkit-box-shadow: 2px 2px 4px #D52C2C; + box-shadow: 2px 2px 4px #D52C2C; +} + +div.error { + background-color: #FCC; + border: 1px solid #FAA; + -moz-box-shadow: 2px 2px 4px #D52C2C; + -webkit-box-shadow: 2px 2px 4px #D52C2C; + box-shadow: 2px 2px 4px #D52C2C; +} + +div.caution { + background-color: #FCC; + border: 1px solid #FAA; +} + +div.attention { + background-color: #FCC; + border: 1px solid #FAA; +} + +div.important { + background-color: #EEE; + border: 1px solid #CCC; +} + div.note { background-color: #EEE; border: 1px solid #CCC; } +div.tip { + background-color: #EEE; + border: 1px solid #CCC; +} + +div.hint { + background-color: #EEE; + border: 1px solid #CCC; +} + div.seealso { background-color: #EEE; border: 1px solid #CCC; } div.topic { - background-color: #eee; + background-color: #EEE; } p.admonition-title { @@ -305,16 +389,16 @@ tt.descname, code.descname { } img.screenshot { - -moz-box-shadow: 2px 2px 4px #eee; - -webkit-box-shadow: 2px 2px 4px #eee; - box-shadow: 2px 2px 4px #eee; + -moz-box-shadow: 2px 2px 4px #EEE; + -webkit-box-shadow: 2px 2px 4px #EEE; + box-shadow: 2px 2px 4px #EEE; } table.docutils { border: 1px solid #888; - -moz-box-shadow: 2px 2px 4px #eee; - -webkit-box-shadow: 2px 2px 4px #eee; - box-shadow: 2px 2px 4px #eee; + -moz-box-shadow: 2px 2px 4px #EEE; + -webkit-box-shadow: 2px 2px 4px #EEE; + box-shadow: 2px 2px 4px #EEE; } table.docutils td, table.docutils th { @@ -350,8 +434,22 @@ table.field-list td { padding: 0; } +table.field-list p { + margin-bottom: 0.8em; +} + +/* Cloned from + * https://github.com/sphinx-doc/sphinx/commit/ef60dbfce09286b20b7385333d63a60321784e68 + */ +.field-name { + -moz-hyphens: manual; + -ms-hyphens: manual; + -webkit-hyphens: manual; + hyphens: manual; +} + table.footnote td.label { - width: 0px; + width: .1px; padding: 0.3em 0 0.3em 0.5em; } @@ -374,6 +472,7 @@ blockquote { } ul, ol { + /* Matches the 30px from the narrow-screen "li > ul" selector below */ margin: 10px 0 10px 30px; padding: 0; } @@ -385,16 +484,15 @@ pre { line-height: 1.3em; } +div.viewcode-block:target { + background: #ffd; +} + dl pre, blockquote pre, li pre { margin-left: 0; padding-left: 30px; } -dl dl pre { - margin-left: -90px; - padding-left: 90px; -} - tt, code { background-color: #ecf0f3; color: #222; @@ -403,7 +501,7 @@ tt, code { tt.xref, code.xref, a tt { background-color: #FBFBFB; - border-bottom: 1px solid white; + border-bottom: 1px solid #fff; } a.reference { @@ -411,6 +509,11 @@ a.reference { border-bottom: 1px dotted #004B6B; } +/* Don't put an underline on images */ +a.image-reference, a.image-reference:hover { + border-bottom: none; +} + a.reference:hover { border-bottom: 1px solid #6D4100; } @@ -460,6 +563,11 @@ a:hover tt, a:hover code { margin-left: 0; } + li > ul { + /* Matches the 30px from the "ul, ol" selector above */ + margin-left: 30px; + } + .document { width: auto; } @@ -495,7 +603,7 @@ a:hover tt, a:hover code { div.documentwrapper { float: none; - background: white; + background: #fff; } div.sphinxsidebar { @@ -510,7 +618,7 @@ a:hover tt, a:hover code { div.sphinxsidebar h3, div.sphinxsidebar h4, div.sphinxsidebar p, div.sphinxsidebar h3 a { - color: white; + color: #fff; } div.sphinxsidebar a { diff --git a/docs/_build/html/_static/basic.css b/docs/_build/html/_static/basic.css index 9fa77d886..7ed0e58ed 100644 --- a/docs/_build/html/_static/basic.css +++ b/docs/_build/html/_static/basic.css @@ -4,7 +4,7 @@ * * Sphinx stylesheet -- basic theme. * - * :copyright: Copyright 2007-2015 by the Sphinx team, see AUTHORS. + * :copyright: Copyright 2007-2016 by the Sphinx team, see AUTHORS. * :license: BSD, see LICENSE for details. * */ @@ -52,6 +52,8 @@ div.sphinxsidebar { width: 230px; margin-left: -100%; font-size: 90%; + word-wrap: break-word; + overflow-wrap : break-word; } div.sphinxsidebar ul { @@ -83,10 +85,6 @@ div.sphinxsidebar #searchbox input[type="text"] { width: 170px; } -div.sphinxsidebar #searchbox input[type="submit"] { - width: 30px; -} - img { border: 0; max-width: 100%; @@ -124,6 +122,8 @@ ul.keywordmatches li.goodmatch a { table.contentstable { width: 90%; + margin-left: auto; + margin-right: auto; } table.contentstable p.biglink { @@ -151,9 +151,14 @@ table.indextable td { vertical-align: top; } -table.indextable dl, table.indextable dd { +table.indextable ul { margin-top: 0; margin-bottom: 0; + list-style-type: none; +} + +table.indextable > tbody > tr > td > ul { + padding-left: 0em; } table.indextable tr.pcap { @@ -185,8 +190,22 @@ div.genindex-jumpbox { padding: 0.4em; } +/* -- domain module index --------------------------------------------------- */ + +table.modindextable td { + padding: 2px; + border-collapse: collapse; +} + /* -- general body styles --------------------------------------------------- */ +div.body p, div.body dd, div.body li, div.body blockquote { + -moz-hyphens: auto; + -ms-hyphens: auto; + -webkit-hyphens: auto; + hyphens: auto; +} + a.headerlink { visibility: hidden; } @@ -212,10 +231,6 @@ div.body td { text-align: left; } -.field-list ul { - padding-left: 1em; -} - .first { margin-top: 0 !important; } @@ -332,10 +347,6 @@ table.docutils td, table.docutils th { border-bottom: 1px solid #aaa; } -table.field-list td, table.field-list th { - border: 0 !important; -} - table.footnote td, table.footnote th { border: 0 !important; } @@ -372,6 +383,20 @@ div.figure p.caption span.caption-number { div.figure p.caption span.caption-text { } +/* -- field list styles ----------------------------------------------------- */ + +table.field-list td, table.field-list th { + border: 0 !important; +} + +.field-list ul { + margin: 0; + padding-left: 1em; +} + +.field-list p { + margin: 0; +} /* -- other body styles ----------------------------------------------------- */ @@ -422,15 +447,6 @@ dl.glossary dt { font-size: 1.1em; } -.field-list ul { - margin: 0; - padding-left: 1em; -} - -.field-list p { - margin: 0; -} - .optional { font-size: 1.3em; } @@ -489,6 +505,13 @@ pre { overflow-y: hidden; /* fixes display issues on Chrome browsers */ } +span.pre { + -moz-hyphens: none; + -ms-hyphens: none; + -webkit-hyphens: none; + hyphens: none; +} + td.linenos pre { padding: 5px 0px; border: 0; @@ -580,6 +603,16 @@ span.eqno { float: right; } +span.eqno a.headerlink { + position: relative; + left: 0px; + z-index: 1; +} + +div.math:hover a.headerlink { + visibility: visible; +} + /* -- printout stylesheet --------------------------------------------------- */ @media print { diff --git a/docs/_build/html/_static/comment-bright.png b/docs/_build/html/_static/comment-bright.png index 551517b8c83b76f734ff791f847829a760ad1903..15e27edb12ac25701ac0ac21b97b52bb4e45415e 100644 GIT binary patch delta 733 zcmZ1@{e^Xcay`>-PZ!6K3dYvS%m<#>2(;awx#aHO+}qo7Z*MCE5g_Jb`CGovX4(Gk zoGZxp*=6BQixZE(l-%syULRBaS^E9en_q5Ao0|RopS84izFlD6U19M$<6lB^x^@?z zahX+p``)ko-D`uj`EKsseWSs4()QzdKQ~{_`?>HeU-%*W`djZJN^|>uvI=~5oO;yb zv*%XT0-La+!;e|3-e^aH0m%l_;7N>^T1?nB=yd2bI%{B&u{-jP?s~&}DqJCK$rPdcf1ljhF^6B}{Pqv$`t`q@Sj%nP{i$Qx+VgWvHeczz zT@inN`zEtoPA=V;UE*p&1`dlvAN3TUuRrdnJwbRTBP;vb2+3UmuzOcuD@Ow>RReJGX8XkJ2=;;3)ojYxDd2-)>L8|KWGbDGOPC(a7Gzdm@WJ zKgcrf4LfqsR(s4|FM0Vr?TpILCn)QYg=}P36re1_LfToFfcGMc)I$ztaD0e0syd^chdj> literal 3500 zcmeAS@N?(olHy`uVBq!ia0y~yU=RRd4mJh`2Kmqb6B!t|{4<@M0|GMgOY(~|@(UC^ zot+g5it^Jkb5a=?DsIgUP7euwDQ5fq-WMnDj)p1T`9Y!_3PDcY4I&fOv=li7ygC#Y z-V_ZyDCp|SB7I^(2XkLvLyu^n4o?@?4fZFBi~5=ucXXZl{jhp*_O(6VpPl`;^}5~j zHJ|5hKF@xDgJ+tDaiD1f&q^mb!3WL9j~sjWM9-m-OQnE8fMautV)0*PMus1CHa6Ny z2iW&9FjidV=3qGBJNtlvO#6TSS57ly7#ms`50p%_@aAX`VQ{GGb6UpW;KtZ6cZOOp z!vYqDgwx4Mj~N1V84}d%pIv5XnEPX9H6ugx6h%?S1rr$*gi||Q7&5FG)|?OPa%G4x zU=T_3K4QsmLzlt8Gd#_MVZ(NY1Lp*|H!*PVFc<`Ov}!W2R5K);P*$#BXqm|%;`S-r z=A-sHf#+Hb3>7ms+3ZxYbZw00&@E&S57(2^IOi#@En=3)lsL;l?@;ATNux#+{s*7W zFfc5bC@T1%`Sah3bA0R0oipp(#uu;qYCqe5rKF_C|DK*-UEsjL@UW!r;y)ez%~B24 zYz?{p7OB2t$%x}#Q2M<)Z4p`y@O=6I_x69?7pO8cq<0Cl+ddChsZM4wg zoSb}mqtY6l)yJeZgsoxT-T5{7{Kj$?p+?sOoMtTLP6~-~jvf&{5q>v3Rw&4*mWBM z;71xkr?R{xp9(xRdCHbL!^kmo@vNY`D;9)b+(gCDQtyfSTd=&HE^uY;w7(^p!rXkV4SB0t1_NnPaMPPWLyK8O9C zmjo`@82@1^y}9KJc| z^OQ7u_Qx{{f6OWI<9x2MeER3Qu6rWyH6Lk8FEjDkX_y+5y=vO@X%VYERtK(jf4$di z-P`4Jm+f`k9r?SF&+)jIdW^bzdjC1rxzB@d2H#!IwOq{C*6Qmzy;a#ur5EmYDc_&B z{N0Rs`{Ekr`Q$IZSGo83FPU2Xzaf8fe=Yy&&J)45nMZ}~HqSW;o1>VEfaTMv{`)Bt=EmeC-Yd$W0l9b$8wJ?R%26BRP*+I<@;_{knhfA zn#-(xtbLB3J#%)^*=sXRXWJTHH+nw%`Rwbn?{80ts)*9rwqo0kRL@OMqLxLz+qNgK zHOew_X+&sL>}I#MbJwa~+r4)8a>3<0{U-aZo_8(%@P?1uPHtPdU2?6LI&%W%QE}p)#xcPHOcc*vLX}#%odb6dE-954QiC4+j!q|sZ zm%h%Oy?OSbw<~V1e`oeC^IiL$`DaY#YtD-_F1~R1g!6;Irx)$H=T9}rzQ3l<-`T%0 zed}{&{bc=#;cKsN*uJ!U)%V2r;&$KcisnDIdu(HV&*)xc{2B8D+fQsi?SD9bUVQ6) z=X&M;DgUefUu0O`pvsue+}Y^Z7|rziM^%mD{yuxF+o|6+-F)lVvtXyAsAGTA+NP%m z*D7Y5cr#(G;`iq>&P_a*$=4cC_u|?G_QYq2-G}RV)LYwI!`tlp3OXXBE*efZEx&lj zx%j@zFPCX9_hzRX`dhBo^)l76Tcwv}bIW{}$u9e2dTEhou13s_@XYa+vDDLAXO@?< z@BW{+pW7eJeZ>8^T6~87iP(b5h35`^XOK++$j)a@2UK&eNSO>MJMg@!|EkJKJiW z*sL;zTq9eny@t(`CQc}HTBYbad0YCc^k->hr`M#mZu)mLuY0?`+*J0H*_$V8asq`_FcsvYfVhp7z*&GlH6Rl6;WZH>*2u5DVmSN&JN4WGWQZm(LEZ+2tc!Bb5Kw=!nG z<__h0+FJYi&h@vk{t^0-_qQhgYMU!^o42;`-VfXI_Ukv-+xpYbYTVx4yv=#%;{7%| z_M|R9dG4&WeaMoKFCo7|%-&tPyEXCpkGzZTq^tLQGkJ6McJ!X_f9+20yB*3H>U%Zq zYU*##-++iMhW5@m_DH$FrSi$k@0|C)%kf&>D(k6U^N{_re3@*ztbUf$tWUGtW(Uvu z9i0@t%m0ks#cQwbRsC_lEzdv4#3r)R>vPD-%v&#;E>BMXbiQZq)VZ~FdOz4pLUdd+I0J=aQKe!S?jIhb6=IbviUdpvj6ej zb9T>Kw=*Vt=ew%CTdUK)zm%S5e(vsg((0F3z~PaQdg| zllSM=EdBfHz-!6%w~rk94mwmtZKEMBy8ijg|{~13weht2` z{P$$Z$&>%HFwgk>C)rr{HUk3#TavfC3&Vd9T(EcfWCjKX&H|6fVg?3oVGw3ym^DX& zfq{X&#M9T6{T2tOxEYtz$`~642Bv$SE{-7;gzxdtWPQzTs+edCC z2xm2&&xqFTaMf^g?Gibd=r~72aY2&j>IDx@OwgL-#(bO;=yC-+#B6T$;`JcyMEx_KSQ?PyC&2!r`lQHIQi{*TG^YIc|XcdpFLWUc+#Wj z{Hax5lO&h9onYFTAU)^A2bWKWc7{jwuZuk}FZk_W_W3_lgEh~DvkE$%5uTH@Q;?^| zz}xmwaK5>SOIuw`UE!m%y6-s-oSq-^Gx4+UQFbxm$x&iD!RlJ|&%Vr@$bO<$;j`Pj z=A8-C+s^sr^;fYyGcz~Y@$Gcz37d>1K|vE<>}qab;3mf6D>^kHP3hj{OFP$YRsEzj zdsB zE^h7Bw9;RXUR?Qc%fv-Zid^gd)jSryddj>#uTsh({_iOb`Iizh z((!exj3T%0Y?YdrRLNDz%OT*(!I-@*=G>h9MrGf2bAM;N@V9dBjo#n;pM5%ccf;rA zcj@20?|JQYbhrGSy$c2VL)0d(nf7FLr0HyVo8W&HulBa{ee}Om{bT#qozLIyT-Yd> z?=QD@&bt?XUp~y;`_%g8*MGlE9ba?)n|eEL)|;wthmXE=zrSkEvdJPr;B4q1!L|x<~d(2MA|-nS9vRu{dsfl?QL*$*YUmYxwL}o|NqUi zYVd!yPFz4;I92+CO5o1I^IX$q{%=Wqc+o#vRN~hXr)Q1)#_gf^zgzzE-*hcqK>KJD zi>}x{3&WJ(?d7)S`=9fzzF#H8hu{#-ZQ*(<)Me&28B*WPmzr^wauZame$r%yui zx6QdLsb^=u=wz3@n|@mTZspg`&9S_eG&Nzz( zcg90Jd;$!SEfHK7njT(cdC0|a>5R`3k$v8?e*Au~qx#}@zh0|n^yDq7I*d2|3F&Z# zZRu|CUw<&_Ve*IP3ii?ktUnDOe1BB`y4Ze3qb+as z_D8p+pF2b?KJbRI;-)(1J@%TmE$rsS^K3sSRL6ST>?(Zv@NN6W{-yI-I6Z^BzIEU9 z6ew8jmAl7t4%4YkAFu9Rtzoybo_kZy?DJEXZ(Axa7b`sJDs$7sEtlV3Iw{<`&VR1S zl&0pK8;U}X2OS(Z9+W?p+w>)`e}4a4w#vN|;`o<*I#iLU)i}>u(r{AgzTjOK|AgkkT%|9BSemb2~OwP%I*qtyE={0}p>%#^vT za9Q(m%w>Hw{(o<$r!C+2d+*JUDIesY9zQVYN9cQnnX7(pJL7QdNBP8hhAWkY4a$El S_cAarFnGH9xvX7ms+3ZxYbZw00&@E&S57(2^IOi#@En=3)lsL;l?@;ATNux#+{s*7W zFfc5bC@T1%`Sah3bA0R0oipp(#uu;qYCqe5rKF_C|DK*-UEsjL@UW!r;y)ez%~B24 zYz?{p7OB2t$%x}#Q2M<)Z4p`y@O=6I_x69?7pO8cq<0Cl+ddChsZM4wg zoSb}mqtY6l)yJeZgsoxT-T5{7{Kj$?p+?sOoMtTLP6~-~jvf&{5q>v3Rw&4*mWBM z;71xkr?R{xp9(xRdCHbL!^kmo@vNY`D;9)b+(gCDQtyfSTd=&HE^uY;w7(^p!rXkV4SB0t1_NnPaMPPWLyK8O9C zmjo`@82@1^y}9KJc| z^OQ7u_Qx{{f6OWI<9x2MeER3Qu6rWyH6Lk8FEjDkX_y+5y=vO@X%VYERtK(jf4$di z-P`4Jm+f`k9r?SF&+)jIdW^bzdjC1rxzB@d2H#!IwOq{C*6Qmzy;a#ur5EmYDc_&B z{N0Rs`{Ekr`Q$IZSGo83FPU2Xzaf8fe=Yy&&J)45nMZ}~HqSW;o1>VEfaTMv{`)Bt=EmeC-Yd$W0l9b$8wJ?R%26BRP*+I<@;_{knhfA zn#-(xtbLB3J#%)^*=sXRXWJTHH+nw%`Rwbn?{80ts)*9rwqo0kRL@OMqLxLz+qNgK zHOew_X+&sL>}I#MbJwa~+r4)8a>3<0{U-aZo_8(%@P?1uPHtPdU2?6LI&%W%QE}p)#xcPHOcc*vLX}#%odb6dE-954QiC4+j!q|sZ zm%h%Oy?OSbw<~V1e`oeC^IiL$`DaY#YtD-_F1~R1g!6;Irx)$H=T9}rzQ3l<-`T%0 zed}{&{bc=#;cKsN*uJ!U)%V2r;&$KcisnDIdu(HV&*)xc{2B8D+fQsi?SD9bUVQ6) z=X&M;DgUefUu0O`pvsue+}Y^Z7|rziM^%mD{yuxF+o|6+-F)lVvtXyAsAGTA+NP%m z*D7Y5cr#(G;`iq>&P_a*$=4cC_u|?G_QYq2-G}RV)LYwI!`tlp3OXXBE*efZEx&lj zx%j@zFPCX9_hzRX`dhBo^)l76Tcwv}bIW{}$u9e2dTEhou13s_@XYa+vDDLAXO@?< z@BW{+pW7eJeZ>8^T6~87iP(b5h35`^XOK++$j)a@2UK&eNSO>MJMg@!|EkJKJiW z*sL;zTq9eny@t(`CQc}HTBYbad0YCc^k->hr`M#mZu)mLuY0?`+*J0H*_$V8asq`_FcsvYfVhp7z*&GlH6Rl6;WZH>*2u5DVmSN&JN4WGWQZm(LEZ+2tc!Bb5Kw=!nG z<__h0+FJYi&h@vk{t^0-_qQhgYMU!^o42;`-VfXI_Ukv-+xpYbYTVx4yv=#%;{7%| z_M|R9dG4&WeaMoKFCo7|%-&tPyEXCpkGzZTq^tLQGkJ6McJ!X_f9+20yB*3H>U%Zq zYU*##-++iMhW5@m_DH$FrSi$k@0|C)%kf&>D(k6U^N{_re3@*ztbUf$tWUGtW(Uvu z9i0@t%m0ks#cQwbRsC_lEzdv4#3r)R>vPD-%v&#;E>BMXbiQZq)VZ~FdOz4pLUdd+I0J=aQKe!S?jIhb6=IbviUdpvj6ej zb9T>Kw=*Vt=ew%CTdUK)zm%S5e(vsg((0F3z~PaQdg| zllSM=EdBfHz-!6%w~rk94mwmtZKEMBy8ijg|{~13weht2` z{P$$Z$&>%HFwgk>C)rr{HUk3#TavfC3&Vd9T(EcfWCjKX&H|6fVg?3oVGw3ym^DX& zfq{X&#M9T6{T7Fiu(;x**(dHYFfi+Qx;TbdoKBq-?=iPj;<)|!_qF$n`{o&+t^CA) zdg``i-JwRlWr2bxuXVXyab2U=b@8m2h@|x4qfKmUk8~wnbX9S2S=%jfI?By7soo** zjM6Ntxkk%p&yrgv*T3)m@9Oz^e0r~nzdmhz{@W)02Uq1g`!DQ!bH6k7ZuS^t~ zuxU@0invDB6paoJR^39Es^dlLzsaooQ}C8~mj9J4vNdm#5>(9Qin_1_dOwjc2~lE| zUinLG<&?m8T=4Cc73C9d&rqwNG zLF$*hW1p`x*4}d@g*NQh8^p^4+zY~Mg}jS1QX+k(DH!tCC>yrkIFoX4V_*2Zvf_QdPnb%( zAG2L=j=1w+#{Yd)#1DtLBOIc811TcZbW@BX>V>Yn`JRx3VyuFdRJS4Y*n zRmm*pOFsz7&91InP*c%xYGIw+OAD{6vZg1mlzgA>yVicLrcM0Uy3JABvMmFr)V=$* z>d=1$V|iBCB#KD~MW?_<-v_5Wr*P5WQFz>j^N&)TU^_g(vFy7%+n z*RgN^hRv_6{_y;qZ@*leXt`6OtKw>dxz{CIlmAS&j@uoy{d@PBtFrH7e;3~Jd>cCB zr2ZMrEgCi}3>H1mPWhj{p)fw)_3OTQR*&8v6WMnrUN*cUOp$fLg$eVY{$u3Gsx#Ml TpAyW#z`)??>gTe~DWM4fxevPs diff --git a/docs/_build/html/_static/comment.png b/docs/_build/html/_static/comment.png index 92feb52b8824c6b0f59b658b1196c61de9162a95..dfbc0cbd512bdeefcb1984c99d8e577efb77f006 100644 GIT binary patch delta 617 zcmew=)yO(Qxt__x)5S5Qg0Xc1bB}Di$g$`Bx57ehhuzuG7kGTD=xVnR^HsUKW8c5A z=|Ah+eCzFA+vj#FuP4{+{8PssW2ctqyXotPhu;5fw8Pm%IDf4%Z`$@qbp6_@ldHMs zPdh1m-oB^&(W`F7)vVj*S;SxT7g)=6^TWO6CezhK+PN0_o(>kPf2BTu<63F`OS3<| zpJQ8eZBN4dLt!Tj6j$!BFHu*KysNxHz3EAS97DpAshMnvRE<1%citxH3&FWOx*hAA?teH2MMPh3vK!> z+pDq0$)tIfQO&Gt*$x|~%0HNUz;=DWGROM;GjE=lm1@0!%j4|YO`*(9ZyzM=clx?` zXZz7h4jM0+r+7XPK6rkXaL`@el@Hx^D6L>fe5;{%PN619tNih~d%{sp`bkmWIqTEf z&$IY0lMmGHSRt9B*md@nqJgm4YVoPBO!efycOS8IEo9bqU@%^Avvbu-*|6Zl-5sw| z7%dh*njt54#(Mq*CKfgC0*?#>hK4k2h6eeKr$jOyNAA>#tr2;D&?h-BRq?L){#+zZ}zCxZXpCY3+~o6>l$oEKbecDYdaF+bGY*|0@fxW!0V~mhBf8 eKg^M+XXL+9qP0`n_$>ni1B0ilpUXO@geCxIwjL?~ literal 3445 zcmeAS@N?(olHy`uVBq!ia0y~yU=RRd4mJh`2Kmqb6B!t|{4<@M0|GMgOY(~|@(UC^ zot+g5it^Jkb5a=?DsIgUP7euwDQ5fq-WMnDj)p1T`9Y!_3PDcY4I&fOv=li7ygC#Y z-V_ZyDCp|SB7I^(2XkLvLyu^n4o?@?4fZFBi~5=ucXXZl{jhp*_O(6VpPl`;^}5~j zHJ|5hKF@xDgJ+tDaiD1f&q^mb!3WL9j~sjWM9-m-OQnE8fMautV)0*PMus1CHa6Ny z2iW&9FjidV=3qGBJNtlvO#6TSS57ly7#ms`50p%_@aAX`VQ{GGb6UpW;KtZ6cZOOp z!vYqDgwx4Mj~N1V84}d%pIv5XnEPX9H6ugx6h%?S1rr$*gi||Q7&5FG)|?OPa%G4x zU=T_3K4QsmLzlt8Gd#_MVZ(NY1Lp*|H!*PVFc<`Ov}!W2R5K);P*$#BXqm|%;`S-r z=A-sHf#+Hb3>7ms+3ZxYbZw00&@E&S57(2^IOi#@En=3)lsL;l?@;ATNux#+{s*7W zFfc5bC@T1%`Sah3bA0R0oipp(#uu;qYCqe5rKF_C|DK*-UEsjL@UW!r;y)ez%~B24 zYz?{p7OB2t$%x}#Q2M<)Z4p`y@O=6I_x69?7pO8cq<0Cl+ddChsZM4wg zoSb}mqtY6l)yJeZgsoxT-T5{7{Kj$?p+?sOoMtTLP6~-~jvf&{5q>v3Rw&4*mWBM z;71xkr?R{xp9(xRdCHbL!^kmo@vNY`D;9)b+(gCDQtyfSTd=&HE^uY;w7(^p!rXkV4SB0t1_NnPaMPPWLyK8O9C zmjo`@82@1^y}9KJc| z^OQ7u_Qx{{f6OWI<9x2MeER3Qu6rWyH6Lk8FEjDkX_y+5y=vO@X%VYERtK(jf4$di z-P`4Jm+f`k9r?SF&+)jIdW^bzdjC1rxzB@d2H#!IwOq{C*6Qmzy;a#ur5EmYDc_&B z{N0Rs`{Ekr`Q$IZSGo83FPU2Xzaf8fe=Yy&&J)45nMZ}~HqSW;o1>VEfaTMv{`)Bt=EmeC-Yd$W0l9b$8wJ?R%26BRP*+I<@;_{knhfA zn#-(xtbLB3J#%)^*=sXRXWJTHH+nw%`Rwbn?{80ts)*9rwqo0kRL@OMqLxLz+qNgK zHOew_X+&sL>}I#MbJwa~+r4)8a>3<0{U-aZo_8(%@P?1uPHtPdU2?6LI&%W%QE}p)#xcPHOcc*vLX}#%odb6dE-954QiC4+j!q|sZ zm%h%Oy?OSbw<~V1e`oeC^IiL$`DaY#YtD-_F1~R1g!6;Irx)$H=T9}rzQ3l<-`T%0 zed}{&{bc=#;cKsN*uJ!U)%V2r;&$KcisnDIdu(HV&*)xc{2B8D+fQsi?SD9bUVQ6) z=X&M;DgUefUu0O`pvsue+}Y^Z7|rziM^%mD{yuxF+o|6+-F)lVvtXyAsAGTA+NP%m z*D7Y5cr#(G;`iq>&P_a*$=4cC_u|?G_QYq2-G}RV)LYwI!`tlp3OXXBE*efZEx&lj zx%j@zFPCX9_hzRX`dhBo^)l76Tcwv}bIW{}$u9e2dTEhou13s_@XYa+vDDLAXO@?< z@BW{+pW7eJeZ>8^T6~87iP(b5h35`^XOK++$j)a@2UK&eNSO>MJMg@!|EkJKJiW z*sL;zTq9eny@t(`CQc}HTBYbad0YCc^k->hr`M#mZu)mLuY0?`+*J0H*_$V8asq`_FcsvYfVhp7z*&GlH6Rl6;WZH>*2u5DVmSN&JN4WGWQZm(LEZ+2tc!Bb5Kw=!nG z<__h0+FJYi&h@vk{t^0-_qQhgYMU!^o42;`-VfXI_Ukv-+xpYbYTVx4yv=#%;{7%| z_M|R9dG4&WeaMoKFCo7|%-&tPyEXCpkGzZTq^tLQGkJ6McJ!X_f9+20yB*3H>U%Zq zYU*##-++iMhW5@m_DH$FrSi$k@0|C)%kf&>D(k6U^N{_re3@*ztbUf$tWUGtW(Uvu z9i0@t%m0ks#cQwbRsC_lEzdv4#3r)R>vPD-%v&#;E>BMXbiQZq)VZ~FdOz4pLUdd+I0J=aQKe!S?jIhb6=IbviUdpvj6ej zb9T>Kw=*Vt=ew%CTdUK)zm%S5e(vsg((0F3z~PaQdg| zllSM=EdBfHz-!6%w~rk94mwmtZKEMBy8ijg|{~13weht2` z{P$$Z$&>%HFwgk>C)rr{HUk3#TavfC3&Vd9T(EcfWCjKX&H|6fVg?3oVGw3ym^DX& zfq{X&#M9T6{T2r|gQ4~NuzRb{WJY}uRaOqM z&Rs0LRdKG#%sO0JXP#( zXy`c5tTEl^`LwY4_evo&k#=aV(hH*- zA*bHOuI&htoH%I$&*pDDS4ze=Vl$4Zh zNl$)yVe0hokSO&fUyYfH`cAydN>#eEwPK%T{r_*X*E4Sjx8szP-kF=U@?uR=<LIk)#YzhIRssVetqqKx-)9;M%jjk&Ts$U`}0NRYq?y?p95b$Tx(o?&tq9``1iaQ zY$b_XHM(ScZR)Ho_kVdfFV9(N00RTiFK`}@GdlkfB8 z|0h%hFMoJz`_Fh8J1gsoH@hbaCjEqnh9BH~;eVP>Z4}_TYs9l&;f7o45x0xlI(ikZUNYQF zALUn^{l6f`F-Z6nm&)z%3A01&n+%v)Gw+JDXc{)K%H%dnF)*BC+J8PV^!)|~1_lOC LS3j3^P6QU4nJ@LmUDMkkHg6$}gv>?NMQuI#tixfpcC%7omM85kJ+0(?ST85kHg$H!9(v0iy- z&%nSSSrX(I%)rdTBdBC*>k<>6Shiy2fh#Xxef;w4_n&`%R%oM6BW;_|Gqyk`H9BQ#a%rvZjr^o(+z%^sh^6r7dlh+ z=JM*<-wvnQNzGIWc79U&v1aF4ksHr;d@(wC<@08Zh#%U1n>Rm5o4_XY+H2J+56k1h a`M?W#-Oy@Uh-ExrK+T_+x{>*lf s$0}3vW*ASm}54I+^k1$CmXeEw{uN7#J8lUHx3vIVCg!08gDwr2qf` literal 347 zcmeAS@N?(olHy`uVBq!ia0y~yU=RRd4rT@h1`S>QU4nJ@LmUDMkkHg6$}gv>?NMQuI#ti8O4=2j=l}8W?*3O3-AeXWnf@9NYh7ML)4+aK? zL{AsT5DCfNgOOrQ1|lv8y=P2RJhT4${=noX8b23z^|-i276(r^_+_SkD%xJ?Oxc^u zt7m^ZoMtCAQz_W_N$JO$oo7XEJlpZb=;W2pn>8YSX!~v6{2*-to6u{oRjWKKj|b=f Y&SDeWqqRhkfq{X+)78&qol`;+06>VDc>n+a diff --git a/docs/_build/html/_static/file.png b/docs/_build/html/_static/file.png index 254c60bfbe2715ae2edca48ebccfd074deb8031d..a858a410e4faa62ce324d814e4b816fff83a6fb3 100644 GIT binary patch delta 270 zcmaFHG>>V5WIY=L1B3kM|A`C?3{O2>978G?*G_H@Vq+9({jOQvnsZ&@_BOVgGHJJM z9o9YNo@2Ft=RV{6zv`Z^Yx(!@-_B2;iXv1TKZx^3=zeqItgEm8`1ddGr_Y~-8;*Bs zbOng0?rZ2}XJ@ytv0<^avf_IDH>a+?DdPK^$GyLwshyDA zd(AccfJmIl)^f(c+C%3KGqP=q`!hrQ%J;L2_ti77RQ)kYcId$sPjN+EelD6E+{@kibdDWBhswWrCKi}Bc z*w!hTy{ zfq{WB$=lsUWeeNSnG6gJIi4<#Arg|T2Oa$m1u(E4sNBz=bNC$V$A8O=X z8ZSeA@OFKMn*UnogF7y=`aiW%@>sZo<1mA&yMvjA)Zsa-N0|%SgvRir9{?BaVZN11_lOCS3j3^P6= 0 && j < len ? [ this[ j ] ] : [] ); + }, + + end: function() { + return this.prevObject || this.constructor(); + }, + + // For internal use only. + // Behaves like an Array's method, not like a jQuery method. + push: push, + sort: arr.sort, + splice: arr.splice +}; + +jQuery.extend = jQuery.fn.extend = function() { + var options, name, src, copy, copyIsArray, clone, + target = arguments[ 0 ] || {}, + i = 1, + length = arguments.length, + deep = false; + + // Handle a deep copy situation + if ( typeof target === "boolean" ) { + deep = target; + + // Skip the boolean and the target + target = arguments[ i ] || {}; + i++; + } + + // Handle case when target is a string or something (possible in deep copy) + if ( typeof target !== "object" && !jQuery.isFunction( target ) ) { + target = {}; + } + + // Extend jQuery itself if only one argument is passed + if ( i === length ) { + target = this; + i--; + } + + for ( ; i < length; i++ ) { + + // Only deal with non-null/undefined values + if ( ( options = arguments[ i ] ) != null ) { + + // Extend the base object + for ( name in options ) { + src = target[ name ]; + copy = options[ name ]; + + // Prevent never-ending loop + if ( target === copy ) { + continue; + } + + // Recurse if we're merging plain objects or arrays + if ( deep && copy && ( jQuery.isPlainObject( copy ) || + ( copyIsArray = jQuery.isArray( copy ) ) ) ) { + + if ( copyIsArray ) { + copyIsArray = false; + clone = src && jQuery.isArray( src ) ? src : []; + + } else { + clone = src && jQuery.isPlainObject( src ) ? src : {}; + } + + // Never move original objects, clone them + target[ name ] = jQuery.extend( deep, clone, copy ); + + // Don't bring in undefined values + } else if ( copy !== undefined ) { + target[ name ] = copy; + } + } + } + } + + // Return the modified object + return target; +}; + +jQuery.extend( { + + // Unique for each copy of jQuery on the page + expando: "jQuery" + ( version + Math.random() ).replace( /\D/g, "" ), + + // Assume jQuery is ready without the ready module + isReady: true, + + error: function( msg ) { + throw new Error( msg ); + }, + + noop: function() {}, + + isFunction: function( obj ) { + return jQuery.type( obj ) === "function"; + }, + + isArray: Array.isArray, + + isWindow: function( obj ) { + return obj != null && obj === obj.window; + }, + + isNumeric: function( obj ) { + + // As of jQuery 3.0, isNumeric is limited to + // strings and numbers (primitives or objects) + // that can be coerced to finite numbers (gh-2662) + var type = jQuery.type( obj ); + return ( type === "number" || type === "string" ) && + + // parseFloat NaNs numeric-cast false positives ("") + // ...but misinterprets leading-number strings, particularly hex literals ("0x...") + // subtraction forces infinities to NaN + !isNaN( obj - parseFloat( obj ) ); + }, + + isPlainObject: function( obj ) { + var proto, Ctor; + + // Detect obvious negatives + // Use toString instead of jQuery.type to catch host objects + if ( !obj || toString.call( obj ) !== "[object Object]" ) { + return false; + } + + proto = getProto( obj ); + + // Objects with no prototype (e.g., `Object.create( null )`) are plain + if ( !proto ) { + return true; + } + + // Objects with prototype are plain iff they were constructed by a global Object function + Ctor = hasOwn.call( proto, "constructor" ) && proto.constructor; + return typeof Ctor === "function" && fnToString.call( Ctor ) === ObjectFunctionString; + }, + + isEmptyObject: function( obj ) { + + /* eslint-disable no-unused-vars */ + // See https://github.com/eslint/eslint/issues/6125 + var name; + + for ( name in obj ) { + return false; + } + return true; + }, + + type: function( obj ) { + if ( obj == null ) { + return obj + ""; + } + + // Support: Android <=2.3 only (functionish RegExp) + return typeof obj === "object" || typeof obj === "function" ? + class2type[ toString.call( obj ) ] || "object" : + typeof obj; + }, + + // Evaluates a script in a global context + globalEval: function( code ) { + DOMEval( code ); + }, + + // Convert dashed to camelCase; used by the css and data modules + // Support: IE <=9 - 11, Edge 12 - 13 + // Microsoft forgot to hump their vendor prefix (#9572) + camelCase: function( string ) { + return string.replace( rmsPrefix, "ms-" ).replace( rdashAlpha, fcamelCase ); + }, + + nodeName: function( elem, name ) { + return elem.nodeName && elem.nodeName.toLowerCase() === name.toLowerCase(); + }, + + each: function( obj, callback ) { + var length, i = 0; + + if ( isArrayLike( obj ) ) { + length = obj.length; + for ( ; i < length; i++ ) { + if ( callback.call( obj[ i ], i, obj[ i ] ) === false ) { + break; + } + } + } else { + for ( i in obj ) { + if ( callback.call( obj[ i ], i, obj[ i ] ) === false ) { + break; + } + } + } + + return obj; + }, + + // Support: Android <=4.0 only + trim: function( text ) { + return text == null ? + "" : + ( text + "" ).replace( rtrim, "" ); + }, + + // results is for internal usage only + makeArray: function( arr, results ) { + var ret = results || []; + + if ( arr != null ) { + if ( isArrayLike( Object( arr ) ) ) { + jQuery.merge( ret, + typeof arr === "string" ? + [ arr ] : arr + ); + } else { + push.call( ret, arr ); + } + } + + return ret; + }, + + inArray: function( elem, arr, i ) { + return arr == null ? -1 : indexOf.call( arr, elem, i ); + }, + + // Support: Android <=4.0 only, PhantomJS 1 only + // push.apply(_, arraylike) throws on ancient WebKit + merge: function( first, second ) { + var len = +second.length, + j = 0, + i = first.length; + + for ( ; j < len; j++ ) { + first[ i++ ] = second[ j ]; + } + + first.length = i; + + return first; + }, + + grep: function( elems, callback, invert ) { + var callbackInverse, + matches = [], + i = 0, + length = elems.length, + callbackExpect = !invert; + + // Go through the array, only saving the items + // that pass the validator function + for ( ; i < length; i++ ) { + callbackInverse = !callback( elems[ i ], i ); + if ( callbackInverse !== callbackExpect ) { + matches.push( elems[ i ] ); + } + } + + return matches; + }, + + // arg is for internal usage only + map: function( elems, callback, arg ) { + var length, value, + i = 0, + ret = []; + + // Go through the array, translating each of the items to their new values + if ( isArrayLike( elems ) ) { + length = elems.length; + for ( ; i < length; i++ ) { + value = callback( elems[ i ], i, arg ); + + if ( value != null ) { + ret.push( value ); + } + } + + // Go through every key on the object, + } else { + for ( i in elems ) { + value = callback( elems[ i ], i, arg ); + + if ( value != null ) { + ret.push( value ); + } + } + } + + // Flatten any nested arrays + return concat.apply( [], ret ); + }, + + // A global GUID counter for objects + guid: 1, + + // Bind a function to a context, optionally partially applying any + // arguments. + proxy: function( fn, context ) { + var tmp, args, proxy; + + if ( typeof context === "string" ) { + tmp = fn[ context ]; + context = fn; + fn = tmp; + } + + // Quick check to determine if target is callable, in the spec + // this throws a TypeError, but we will just return undefined. + if ( !jQuery.isFunction( fn ) ) { + return undefined; + } + + // Simulated bind + args = slice.call( arguments, 2 ); + proxy = function() { + return fn.apply( context || this, args.concat( slice.call( arguments ) ) ); + }; + + // Set the guid of unique handler to the same of original handler, so it can be removed + proxy.guid = fn.guid = fn.guid || jQuery.guid++; + + return proxy; + }, + + now: Date.now, + + // jQuery.support is not used in Core but other projects attach their + // properties to it so it needs to exist. + support: support +} ); + +if ( typeof Symbol === "function" ) { + jQuery.fn[ Symbol.iterator ] = arr[ Symbol.iterator ]; +} + +// Populate the class2type map +jQuery.each( "Boolean Number String Function Array Date RegExp Object Error Symbol".split( " " ), +function( i, name ) { + class2type[ "[object " + name + "]" ] = name.toLowerCase(); +} ); + +function isArrayLike( obj ) { + + // Support: real iOS 8.2 only (not reproducible in simulator) + // `in` check used to prevent JIT error (gh-2145) + // hasOwn isn't used here due to false negatives + // regarding Nodelist length in IE + var length = !!obj && "length" in obj && obj.length, + type = jQuery.type( obj ); + + if ( type === "function" || jQuery.isWindow( obj ) ) { + return false; + } + + return type === "array" || length === 0 || + typeof length === "number" && length > 0 && ( length - 1 ) in obj; +} +var Sizzle = +/*! + * Sizzle CSS Selector Engine v2.3.0 + * https://sizzlejs.com/ + * + * Copyright jQuery Foundation and other contributors + * Released under the MIT license + * http://jquery.org/license + * + * Date: 2016-01-04 + */ +(function( window ) { + +var i, + support, + Expr, + getText, + isXML, + tokenize, + compile, + select, + outermostContext, + sortInput, + hasDuplicate, + + // Local document vars + setDocument, + document, + docElem, + documentIsHTML, + rbuggyQSA, + rbuggyMatches, + matches, + contains, + + // Instance-specific data + expando = "sizzle" + 1 * new Date(), + preferredDoc = window.document, + dirruns = 0, + done = 0, + classCache = createCache(), + tokenCache = createCache(), + compilerCache = createCache(), + sortOrder = function( a, b ) { + if ( a === b ) { + hasDuplicate = true; + } + return 0; + }, + + // Instance methods + hasOwn = ({}).hasOwnProperty, + arr = [], + pop = arr.pop, + push_native = arr.push, + push = arr.push, + slice = arr.slice, + // Use a stripped-down indexOf as it's faster than native + // https://jsperf.com/thor-indexof-vs-for/5 + indexOf = function( list, elem ) { + var i = 0, + len = list.length; + for ( ; i < len; i++ ) { + if ( list[i] === elem ) { + return i; + } + } + return -1; + }, + + booleans = "checked|selected|async|autofocus|autoplay|controls|defer|disabled|hidden|ismap|loop|multiple|open|readonly|required|scoped", + + // Regular expressions + + // http://www.w3.org/TR/css3-selectors/#whitespace + whitespace = "[\\x20\\t\\r\\n\\f]", + + // http://www.w3.org/TR/CSS21/syndata.html#value-def-identifier + identifier = "(?:\\\\.|[\\w-]|[^\0-\\xa0])+", + + // Attribute selectors: http://www.w3.org/TR/selectors/#attribute-selectors + attributes = "\\[" + whitespace + "*(" + identifier + ")(?:" + whitespace + + // Operator (capture 2) + "*([*^$|!~]?=)" + whitespace + + // "Attribute values must be CSS identifiers [capture 5] or strings [capture 3 or capture 4]" + "*(?:'((?:\\\\.|[^\\\\'])*)'|\"((?:\\\\.|[^\\\\\"])*)\"|(" + identifier + "))|)" + whitespace + + "*\\]", + + pseudos = ":(" + identifier + ")(?:\\((" + + // To reduce the number of selectors needing tokenize in the preFilter, prefer arguments: + // 1. quoted (capture 3; capture 4 or capture 5) + "('((?:\\\\.|[^\\\\'])*)'|\"((?:\\\\.|[^\\\\\"])*)\")|" + + // 2. simple (capture 6) + "((?:\\\\.|[^\\\\()[\\]]|" + attributes + ")*)|" + + // 3. anything else (capture 2) + ".*" + + ")\\)|)", + + // Leading and non-escaped trailing whitespace, capturing some non-whitespace characters preceding the latter + rwhitespace = new RegExp( whitespace + "+", "g" ), + rtrim = new RegExp( "^" + whitespace + "+|((?:^|[^\\\\])(?:\\\\.)*)" + whitespace + "+$", "g" ), + + rcomma = new RegExp( "^" + whitespace + "*," + whitespace + "*" ), + rcombinators = new RegExp( "^" + whitespace + "*([>+~]|" + whitespace + ")" + whitespace + "*" ), + + rattributeQuotes = new RegExp( "=" + whitespace + "*([^\\]'\"]*?)" + whitespace + "*\\]", "g" ), + + rpseudo = new RegExp( pseudos ), + ridentifier = new RegExp( "^" + identifier + "$" ), + + matchExpr = { + "ID": new RegExp( "^#(" + identifier + ")" ), + "CLASS": new RegExp( "^\\.(" + identifier + ")" ), + "TAG": new RegExp( "^(" + identifier + "|[*])" ), + "ATTR": new RegExp( "^" + attributes ), + "PSEUDO": new RegExp( "^" + pseudos ), + "CHILD": new RegExp( "^:(only|first|last|nth|nth-last)-(child|of-type)(?:\\(" + whitespace + + "*(even|odd|(([+-]|)(\\d*)n|)" + whitespace + "*(?:([+-]|)" + whitespace + + "*(\\d+)|))" + whitespace + "*\\)|)", "i" ), + "bool": new RegExp( "^(?:" + booleans + ")$", "i" ), + // For use in libraries implementing .is() + // We use this for POS matching in `select` + "needsContext": new RegExp( "^" + whitespace + "*[>+~]|:(even|odd|eq|gt|lt|nth|first|last)(?:\\(" + + whitespace + "*((?:-\\d)?\\d*)" + whitespace + "*\\)|)(?=[^-]|$)", "i" ) + }, + + rinputs = /^(?:input|select|textarea|button)$/i, + rheader = /^h\d$/i, + + rnative = /^[^{]+\{\s*\[native \w/, + + // Easily-parseable/retrievable ID or TAG or CLASS selectors + rquickExpr = /^(?:#([\w-]+)|(\w+)|\.([\w-]+))$/, + + rsibling = /[+~]/, + + // CSS escapes + // http://www.w3.org/TR/CSS21/syndata.html#escaped-characters + runescape = new RegExp( "\\\\([\\da-f]{1,6}" + whitespace + "?|(" + whitespace + ")|.)", "ig" ), + funescape = function( _, escaped, escapedWhitespace ) { + var high = "0x" + escaped - 0x10000; + // NaN means non-codepoint + // Support: Firefox<24 + // Workaround erroneous numeric interpretation of +"0x" + return high !== high || escapedWhitespace ? + escaped : + high < 0 ? + // BMP codepoint + String.fromCharCode( high + 0x10000 ) : + // Supplemental Plane codepoint (surrogate pair) + String.fromCharCode( high >> 10 | 0xD800, high & 0x3FF | 0xDC00 ); + }, + + // CSS string/identifier serialization + // https://drafts.csswg.org/cssom/#common-serializing-idioms + rcssescape = /([\0-\x1f\x7f]|^-?\d)|^-$|[^\x80-\uFFFF\w-]/g, + fcssescape = function( ch, asCodePoint ) { + if ( asCodePoint ) { + + // U+0000 NULL becomes U+FFFD REPLACEMENT CHARACTER + if ( ch === "\0" ) { + return "\uFFFD"; + } + + // Control characters and (dependent upon position) numbers get escaped as code points + return ch.slice( 0, -1 ) + "\\" + ch.charCodeAt( ch.length - 1 ).toString( 16 ) + " "; + } + + // Other potentially-special ASCII characters get backslash-escaped + return "\\" + ch; + }, + + // Used for iframes + // See setDocument() + // Removing the function wrapper causes a "Permission Denied" + // error in IE + unloadHandler = function() { + setDocument(); + }, + + disabledAncestor = addCombinator( + function( elem ) { + return elem.disabled === true; + }, + { dir: "parentNode", next: "legend" } + ); + +// Optimize for push.apply( _, NodeList ) +try { + push.apply( + (arr = slice.call( preferredDoc.childNodes )), + preferredDoc.childNodes + ); + // Support: Android<4.0 + // Detect silently failing push.apply + arr[ preferredDoc.childNodes.length ].nodeType; +} catch ( e ) { + push = { apply: arr.length ? + + // Leverage slice if possible + function( target, els ) { + push_native.apply( target, slice.call(els) ); + } : + + // Support: IE<9 + // Otherwise append directly + function( target, els ) { + var j = target.length, + i = 0; + // Can't trust NodeList.length + while ( (target[j++] = els[i++]) ) {} + target.length = j - 1; + } + }; +} + +function Sizzle( selector, context, results, seed ) { + var m, i, elem, nid, match, groups, newSelector, + newContext = context && context.ownerDocument, + + // nodeType defaults to 9, since context defaults to document + nodeType = context ? context.nodeType : 9; + + results = results || []; + + // Return early from calls with invalid selector or context + if ( typeof selector !== "string" || !selector || + nodeType !== 1 && nodeType !== 9 && nodeType !== 11 ) { + + return results; + } + + // Try to shortcut find operations (as opposed to filters) in HTML documents + if ( !seed ) { + + if ( ( context ? context.ownerDocument || context : preferredDoc ) !== document ) { + setDocument( context ); + } + context = context || document; + + if ( documentIsHTML ) { + + // If the selector is sufficiently simple, try using a "get*By*" DOM method + // (excepting DocumentFragment context, where the methods don't exist) + if ( nodeType !== 11 && (match = rquickExpr.exec( selector )) ) { + + // ID selector + if ( (m = match[1]) ) { + + // Document context + if ( nodeType === 9 ) { + if ( (elem = context.getElementById( m )) ) { + + // Support: IE, Opera, Webkit + // TODO: identify versions + // getElementById can match elements by name instead of ID + if ( elem.id === m ) { + results.push( elem ); + return results; + } + } else { + return results; + } + + // Element context + } else { + + // Support: IE, Opera, Webkit + // TODO: identify versions + // getElementById can match elements by name instead of ID + if ( newContext && (elem = newContext.getElementById( m )) && + contains( context, elem ) && + elem.id === m ) { + + results.push( elem ); + return results; + } + } + + // Type selector + } else if ( match[2] ) { + push.apply( results, context.getElementsByTagName( selector ) ); + return results; + + // Class selector + } else if ( (m = match[3]) && support.getElementsByClassName && + context.getElementsByClassName ) { + + push.apply( results, context.getElementsByClassName( m ) ); + return results; + } + } + + // Take advantage of querySelectorAll + if ( support.qsa && + !compilerCache[ selector + " " ] && + (!rbuggyQSA || !rbuggyQSA.test( selector )) ) { + + if ( nodeType !== 1 ) { + newContext = context; + newSelector = selector; + + // qSA looks outside Element context, which is not what we want + // Thanks to Andrew Dupont for this workaround technique + // Support: IE <=8 + // Exclude object elements + } else if ( context.nodeName.toLowerCase() !== "object" ) { + + // Capture the context ID, setting it first if necessary + if ( (nid = context.getAttribute( "id" )) ) { + nid = nid.replace( rcssescape, fcssescape ); + } else { + context.setAttribute( "id", (nid = expando) ); + } + + // Prefix every selector in the list + groups = tokenize( selector ); + i = groups.length; + while ( i-- ) { + groups[i] = "#" + nid + " " + toSelector( groups[i] ); + } + newSelector = groups.join( "," ); + + // Expand context for sibling selectors + newContext = rsibling.test( selector ) && testContext( context.parentNode ) || + context; + } + + if ( newSelector ) { + try { + push.apply( results, + newContext.querySelectorAll( newSelector ) + ); + return results; + } catch ( qsaError ) { + } finally { + if ( nid === expando ) { + context.removeAttribute( "id" ); + } + } + } + } + } + } + + // All others + return select( selector.replace( rtrim, "$1" ), context, results, seed ); +} + +/** + * Create key-value caches of limited size + * @returns {function(string, object)} Returns the Object data after storing it on itself with + * property name the (space-suffixed) string and (if the cache is larger than Expr.cacheLength) + * deleting the oldest entry + */ +function createCache() { + var keys = []; + + function cache( key, value ) { + // Use (key + " ") to avoid collision with native prototype properties (see Issue #157) + if ( keys.push( key + " " ) > Expr.cacheLength ) { + // Only keep the most recent entries + delete cache[ keys.shift() ]; + } + return (cache[ key + " " ] = value); + } + return cache; +} + +/** + * Mark a function for special use by Sizzle + * @param {Function} fn The function to mark + */ +function markFunction( fn ) { + fn[ expando ] = true; + return fn; +} + +/** + * Support testing using an element + * @param {Function} fn Passed the created element and returns a boolean result + */ +function assert( fn ) { + var el = document.createElement("fieldset"); + + try { + return !!fn( el ); + } catch (e) { + return false; + } finally { + // Remove from its parent by default + if ( el.parentNode ) { + el.parentNode.removeChild( el ); + } + // release memory in IE + el = null; + } +} + +/** + * Adds the same handler for all of the specified attrs + * @param {String} attrs Pipe-separated list of attributes + * @param {Function} handler The method that will be applied + */ +function addHandle( attrs, handler ) { + var arr = attrs.split("|"), + i = arr.length; + + while ( i-- ) { + Expr.attrHandle[ arr[i] ] = handler; + } +} + +/** + * Checks document order of two siblings + * @param {Element} a + * @param {Element} b + * @returns {Number} Returns less than 0 if a precedes b, greater than 0 if a follows b + */ +function siblingCheck( a, b ) { + var cur = b && a, + diff = cur && a.nodeType === 1 && b.nodeType === 1 && + a.sourceIndex - b.sourceIndex; + + // Use IE sourceIndex if available on both nodes + if ( diff ) { + return diff; + } + + // Check if b follows a + if ( cur ) { + while ( (cur = cur.nextSibling) ) { + if ( cur === b ) { + return -1; + } + } + } + + return a ? 1 : -1; +} + +/** + * Returns a function to use in pseudos for input types + * @param {String} type + */ +function createInputPseudo( type ) { + return function( elem ) { + var name = elem.nodeName.toLowerCase(); + return name === "input" && elem.type === type; + }; +} + +/** + * Returns a function to use in pseudos for buttons + * @param {String} type + */ +function createButtonPseudo( type ) { + return function( elem ) { + var name = elem.nodeName.toLowerCase(); + return (name === "input" || name === "button") && elem.type === type; + }; +} + +/** + * Returns a function to use in pseudos for :enabled/:disabled + * @param {Boolean} disabled true for :disabled; false for :enabled + */ +function createDisabledPseudo( disabled ) { + // Known :disabled false positives: + // IE: *[disabled]:not(button, input, select, textarea, optgroup, option, menuitem, fieldset) + // not IE: fieldset[disabled] > legend:nth-of-type(n+2) :can-disable + return function( elem ) { + + // Check form elements and option elements for explicit disabling + return "label" in elem && elem.disabled === disabled || + "form" in elem && elem.disabled === disabled || + + // Check non-disabled form elements for fieldset[disabled] ancestors + "form" in elem && elem.disabled === false && ( + // Support: IE6-11+ + // Ancestry is covered for us + elem.isDisabled === disabled || + + // Otherwise, assume any non-