Merge remote-tracking branch 'upstream/master'

This commit is contained in:
Stephan 2019-05-28 08:55:50 +02:00
commit d0de38601d
110 changed files with 22567 additions and 20241 deletions

1
.gitignore vendored
View File

@ -18,3 +18,4 @@ venv/
.python-version
.vscode/
tests/file.tmp
.eggs/

View File

@ -54,3 +54,5 @@ Moto is written by Steve Pulec with contributions from:
* [William Richard](https://github.com/william-richard)
* [Alex Casalboni](https://github.com/alexcasalboni)
* [Jon Beilke](https://github.com/jrbeilke)
* [Craig Anderson](https://github.com/craiga)
* [Robert Lewis](https://github.com/ralewis85)

File diff suppressed because it is too large Load Diff

View File

@ -47,7 +47,7 @@ def test_my_model_save():
body = conn.Object('mybucket', 'steve').get()['Body'].read().decode("utf-8")
assert body == b'is awesome'
assert body == 'is awesome'
```
With the decorator wrapping the test, all the calls to s3 are automatically mocked out. The mock keeps the state of the buckets and keys.

View File

@ -36,6 +36,7 @@ from .polly import mock_polly # flake8: noqa
from .rds import mock_rds, mock_rds_deprecated # flake8: noqa
from .rds2 import mock_rds2, mock_rds2_deprecated # flake8: noqa
from .redshift import mock_redshift, mock_redshift_deprecated # flake8: noqa
from .resourcegroups import mock_resourcegroups # flake8: noqa
from .s3 import mock_s3, mock_s3_deprecated # flake8: noqa
from .ses import mock_ses, mock_ses_deprecated # flake8: noqa
from .secretsmanager import mock_secretsmanager # flake8: noqa

View File

@ -1,4 +1,7 @@
from __future__ import unicode_literals
import random
from boto.ec2.blockdevicemapping import BlockDeviceType, BlockDeviceMapping
from moto.compat import OrderedDict
from moto.core import BaseBackend, BaseModel
@ -159,13 +162,7 @@ class FakeAutoScalingGroup(BaseModel):
self.autoscaling_backend = autoscaling_backend
self.name = name
if not availability_zones and not vpc_zone_identifier:
raise AutoscalingClientError(
"ValidationError",
"At least one Availability Zone or VPC Subnet is required."
)
self.availability_zones = availability_zones
self.vpc_zone_identifier = vpc_zone_identifier
self._set_azs_and_vpcs(availability_zones, vpc_zone_identifier)
self.max_size = max_size
self.min_size = min_size
@ -188,6 +185,35 @@ class FakeAutoScalingGroup(BaseModel):
self.tags = tags if tags else []
self.set_desired_capacity(desired_capacity)
def _set_azs_and_vpcs(self, availability_zones, vpc_zone_identifier, update=False):
# for updates, if only AZs are provided, they must not clash with
# the AZs of existing VPCs
if update and availability_zones and not vpc_zone_identifier:
vpc_zone_identifier = self.vpc_zone_identifier
if vpc_zone_identifier:
# extract azs for vpcs
subnet_ids = vpc_zone_identifier.split(',')
subnets = self.autoscaling_backend.ec2_backend.get_all_subnets(subnet_ids=subnet_ids)
vpc_zones = [subnet.availability_zone for subnet in subnets]
if availability_zones and set(availability_zones) != set(vpc_zones):
raise AutoscalingClientError(
"ValidationError",
"The availability zones of the specified subnets and the Auto Scaling group do not match",
)
availability_zones = vpc_zones
elif not availability_zones:
if not update:
raise AutoscalingClientError(
"ValidationError",
"At least one Availability Zone or VPC Subnet is required."
)
return
self.availability_zones = availability_zones
self.vpc_zone_identifier = vpc_zone_identifier
@classmethod
def create_from_cloudformation_json(cls, resource_name, cloudformation_json, region_name):
properties = cloudformation_json['Properties']
@ -246,8 +272,8 @@ class FakeAutoScalingGroup(BaseModel):
health_check_period, health_check_type,
placement_group, termination_policies,
new_instances_protected_from_scale_in=None):
if availability_zones:
self.availability_zones = availability_zones
self._set_azs_and_vpcs(availability_zones, vpc_zone_identifier, update=True)
if max_size is not None:
self.max_size = max_size
if min_size is not None:
@ -257,8 +283,6 @@ class FakeAutoScalingGroup(BaseModel):
self.launch_config = self.autoscaling_backend.launch_configurations[
launch_config_name]
self.launch_config_name = launch_config_name
if vpc_zone_identifier is not None:
self.vpc_zone_identifier = vpc_zone_identifier
if health_check_period is not None:
self.health_check_period = health_check_period
if health_check_type is not None:
@ -319,7 +343,8 @@ class FakeAutoScalingGroup(BaseModel):
self.launch_config.user_data,
self.launch_config.security_groups,
instance_type=self.launch_config.instance_type,
tags={'instance': propagated_tags}
tags={'instance': propagated_tags},
placement=random.choice(self.availability_zones),
)
for instance in reservation.instances:
instance.autoscaling_group = self

View File

@ -404,7 +404,7 @@ ATTACH_LOAD_BALANCER_TARGET_GROUPS_TEMPLATE = """<AttachLoadBalancerTargetGroups
<AttachLoadBalancerTargetGroupsResult>
</AttachLoadBalancerTargetGroupsResult>
<ResponseMetadata>
<RequestId>{{ requestid }}</RequestId>
<RequestId></RequestId>
</ResponseMetadata>
</AttachLoadBalancerTargetGroupsResponse>"""
@ -412,7 +412,7 @@ ATTACH_INSTANCES_TEMPLATE = """<AttachInstancesResponse xmlns="http://autoscalin
<AttachInstancesResult>
</AttachInstancesResult>
<ResponseMetadata>
<RequestId>{{ requestid }}</RequestId>
<RequestId></RequestId>
</ResponseMetadata>
</AttachInstancesResponse>"""
@ -428,7 +428,7 @@ DESCRIBE_LOAD_BALANCER_TARGET_GROUPS = """<DescribeLoadBalancerTargetGroupsRespo
</LoadBalancerTargetGroups>
</DescribeLoadBalancerTargetGroupsResult>
<ResponseMetadata>
<RequestId>{{ requestid }}</RequestId>
<RequestId></RequestId>
</ResponseMetadata>
</DescribeLoadBalancerTargetGroupsResponse>"""
@ -454,7 +454,7 @@ DETACH_INSTANCES_TEMPLATE = """<DetachInstancesResponse xmlns="http://autoscalin
</Activities>
</DetachInstancesResult>
<ResponseMetadata>
<RequestId>{{ requestid }}</RequestId>
<RequestId></RequestId>
</ResponseMetadata>
</DetachInstancesResponse>"""
@ -462,7 +462,7 @@ DETACH_LOAD_BALANCER_TARGET_GROUPS_TEMPLATE = """<DetachLoadBalancerTargetGroups
<DetachLoadBalancerTargetGroupsResult>
</DetachLoadBalancerTargetGroupsResult>
<ResponseMetadata>
<RequestId>{{ requestid }}</RequestId>
<RequestId></RequestId>
</ResponseMetadata>
</DetachLoadBalancerTargetGroupsResponse>"""
@ -499,7 +499,7 @@ DESCRIBE_AUTOSCALING_GROUPS_TEMPLATE = """<DescribeAutoScalingGroupsResponse xml
{% for instance_state in group.instance_states %}
<member>
<HealthStatus>{{ instance_state.health_status }}</HealthStatus>
<AvailabilityZone>us-east-1e</AvailabilityZone>
<AvailabilityZone>{{ instance_state.instance.placement }}</AvailabilityZone>
<InstanceId>{{ instance_state.instance.id }}</InstanceId>
<LaunchConfigurationName>{{ group.launch_config_name }}</LaunchConfigurationName>
<LifecycleState>{{ instance_state.lifecycle_state }}</LifecycleState>
@ -585,7 +585,7 @@ DESCRIBE_AUTOSCALING_INSTANCES_TEMPLATE = """<DescribeAutoScalingInstancesRespon
<member>
<HealthStatus>{{ instance_state.health_status }}</HealthStatus>
<AutoScalingGroupName>{{ instance_state.instance.autoscaling_group.name }}</AutoScalingGroupName>
<AvailabilityZone>us-east-1e</AvailabilityZone>
<AvailabilityZone>{{ instance_state.instance.placement }}</AvailabilityZone>
<InstanceId>{{ instance_state.instance.id }}</InstanceId>
<LaunchConfigurationName>{{ instance_state.instance.autoscaling_group.launch_config_name }}</LaunchConfigurationName>
<LifecycleState>{{ instance_state.lifecycle_state }}</LifecycleState>
@ -654,7 +654,7 @@ DELETE_POLICY_TEMPLATE = """<DeleteScalingPolicyResponse xmlns="http://autoscali
ATTACH_LOAD_BALANCERS_TEMPLATE = """<AttachLoadBalancersResponse xmlns="http://autoscaling.amazonaws.com/doc/2011-01-01/">
<AttachLoadBalancersResult></AttachLoadBalancersResult>
<ResponseMetadata>
<RequestId>{{ requestid }}</RequestId>
<RequestId></RequestId>
</ResponseMetadata>
</AttachLoadBalancersResponse>"""
@ -670,14 +670,14 @@ DESCRIBE_LOAD_BALANCERS_TEMPLATE = """<DescribeLoadBalancersResponse xmlns="http
</LoadBalancers>
</DescribeLoadBalancersResult>
<ResponseMetadata>
<RequestId>{{ requestid }}</RequestId>
<RequestId></RequestId>
</ResponseMetadata>
</DescribeLoadBalancersResponse>"""
DETACH_LOAD_BALANCERS_TEMPLATE = """<DetachLoadBalancersResponse xmlns="http://autoscaling.amazonaws.com/doc/2011-01-01/">
<DetachLoadBalancersResult></DetachLoadBalancersResult>
<ResponseMetadata>
<RequestId>{{ requestid }}</RequestId>
<RequestId></RequestId>
</ResponseMetadata>
</DetachLoadBalancersResponse>"""
@ -690,13 +690,13 @@ SUSPEND_PROCESSES_TEMPLATE = """<SuspendProcessesResponse xmlns="http://autoscal
SET_INSTANCE_HEALTH_TEMPLATE = """<SetInstanceHealthResponse xmlns="http://autoscaling.amazonaws.com/doc/2011-01-01/">
<SetInstanceHealthResponse></SetInstanceHealthResponse>
<ResponseMetadata>
<RequestId>{{ requestid }}</RequestId>
<RequestId></RequestId>
</ResponseMetadata>
</SetInstanceHealthResponse>"""
SET_INSTANCE_PROTECTION_TEMPLATE = """<SetInstanceProtectionResponse xmlns="http://autoscaling.amazonaws.com/doc/2011-01-01/">
<SetInstanceProtectionResult></SetInstanceProtectionResult>
<ResponseMetadata>
<RequestId>{{ requestid }}</RequestId>
<RequestId></RequestId>
</ResponseMetadata>
</SetInstanceProtectionResponse>"""

View File

@ -30,7 +30,7 @@ from moto.s3.models import s3_backend
from moto.logs.models import logs_backends
from moto.s3.exceptions import MissingBucket, MissingKey
from moto import settings
from .utils import make_function_arn
from .utils import make_function_arn, make_function_ver_arn
logger = logging.getLogger(__name__)
@ -45,7 +45,7 @@ except ImportError:
_stderr_regex = re.compile(r'START|END|REPORT RequestId: .*')
_orig_adapter_send = requests.adapters.HTTPAdapter.send
docker_3 = docker.__version__.startswith("3")
docker_3 = docker.__version__[0] >= '3'
def zip2tar(zip_bytes):
@ -215,12 +215,12 @@ class LambdaFunction(BaseModel):
self.code_size = key.size
self.code_sha_256 = hashlib.sha256(key.value).hexdigest()
self.function_arn = make_function_arn(self.region, ACCOUNT_ID, self.function_name, version)
self.function_arn = make_function_arn(self.region, ACCOUNT_ID, self.function_name)
self.tags = dict()
def set_version(self, version):
self.function_arn = make_function_arn(self.region, ACCOUNT_ID, self.function_name, version)
self.function_arn = make_function_ver_arn(self.region, ACCOUNT_ID, self.function_name, version)
self.version = version
self.last_modified = datetime.datetime.utcnow().strftime('%Y-%m-%d %H:%M:%S')
@ -503,7 +503,10 @@ class LambdaStorage(object):
def list_versions_by_function(self, name):
if name not in self._functions:
return None
return [self._functions[name]['latest']]
latest = copy.copy(self._functions[name]['latest'])
latest.function_arn += ':$LATEST'
return [latest] + self._functions[name]['versions']
def get_arn(self, arn):
return self._arns.get(arn, None)
@ -535,6 +538,7 @@ class LambdaStorage(object):
fn.set_version(new_version)
self._functions[name]['versions'].append(fn)
self._arns[fn.function_arn] = fn
return fn
def del_function(self, name, qualifier=None):
@ -604,6 +608,9 @@ class LambdaBackend(BaseBackend):
self._lambdas.put_function(fn)
if spec.get('Publish'):
ver = self.publish_function(function_name)
fn.version = ver.version
return fn
def publish_function(self, function_name):

View File

@ -150,7 +150,7 @@ class LambdaResponse(BaseResponse):
for fn in self.lambda_backend.list_functions():
json_data = fn.get_configuration()
json_data['Version'] = '$LATEST'
result['Functions'].append(json_data)
return 200, {}, json.dumps(result)
@ -204,7 +204,10 @@ class LambdaResponse(BaseResponse):
if fn:
code = fn.get_code()
if qualifier is None or qualifier == '$LATEST':
code['Configuration']['Version'] = '$LATEST'
if qualifier == '$LATEST':
code['Configuration']['FunctionArn'] += ':$LATEST'
return 200, {}, json.dumps(code)
else:
return 404, {}, "{}"

View File

@ -3,8 +3,13 @@ from collections import namedtuple
ARN = namedtuple('ARN', ['region', 'account', 'function_name', 'version'])
def make_function_arn(region, account, name, version='1'):
return 'arn:aws:lambda:{0}:{1}:function:{2}:{3}'.format(region, account, name, version)
def make_function_arn(region, account, name):
return 'arn:aws:lambda:{0}:{1}:function:{2}'.format(region, account, name)
def make_function_ver_arn(region, account, name, version='1'):
arn = make_function_arn(region, account, name)
return '{0}:{1}'.format(arn, version)
def split_function_arn(arn):

View File

@ -32,6 +32,7 @@ from moto.organizations import organizations_backends
from moto.polly import polly_backends
from moto.rds2 import rds2_backends
from moto.redshift import redshift_backends
from moto.resourcegroups import resourcegroups_backends
from moto.route53 import route53_backends
from moto.s3 import s3_backends
from moto.ses import ses_backends
@ -81,6 +82,7 @@ BACKENDS = {
'organizations': organizations_backends,
'polly': polly_backends,
'redshift': redshift_backends,
'resource-groups': resourcegroups_backends,
'rds': rds2_backends,
's3': s3_backends,
's3bucket_path': s3_backends,

View File

@ -12,7 +12,7 @@ from moto.batch import models as batch_models
from moto.cloudwatch import models as cloudwatch_models
from moto.cognitoidentity import models as cognitoidentity_models
from moto.datapipeline import models as datapipeline_models
from moto.dynamodb import models as dynamodb_models
from moto.dynamodb2 import models as dynamodb2_models
from moto.ec2 import models as ec2_models
from moto.ecs import models as ecs_models
from moto.elb import models as elb_models
@ -37,7 +37,7 @@ MODEL_MAP = {
"AWS::Batch::JobDefinition": batch_models.JobDefinition,
"AWS::Batch::JobQueue": batch_models.JobQueue,
"AWS::Batch::ComputeEnvironment": batch_models.ComputeEnvironment,
"AWS::DynamoDB::Table": dynamodb_models.Table,
"AWS::DynamoDB::Table": dynamodb2_models.Table,
"AWS::Kinesis::Stream": kinesis_models.Stream,
"AWS::Lambda::EventSourceMapping": lambda_models.EventSourceMapping,
"AWS::Lambda::Function": lambda_models.LambdaFunction,
@ -425,11 +425,18 @@ class ResourceMap(collections.Mapping):
self.resolved_parameters[parameter_name] = parameter.get('Default')
# Set any input parameters that were passed
self.no_echo_parameter_keys = []
for key, value in self.input_parameters.items():
if key in self.resolved_parameters:
value_type = parameter_slots[key].get('Type', 'String')
parameter_slot = parameter_slots[key]
value_type = parameter_slot.get('Type', 'String')
if value_type == 'CommaDelimitedList' or value_type.startswith("List"):
value = value.split(',')
if parameter_slot.get('NoEcho'):
self.no_echo_parameter_keys.append(key)
self.resolved_parameters[key] = value
# Check if there are any non-default params that were not passed input

View File

@ -654,7 +654,11 @@ DESCRIBE_STACKS_TEMPLATE = """<DescribeStacksResponse>
{% for param_name, param_value in stack.stack_parameters.items() %}
<member>
<ParameterKey>{{ param_name }}</ParameterKey>
<ParameterValue>{{ param_value }}</ParameterValue>
{% if param_name in stack.resource_map.no_echo_parameter_keys %}
<ParameterValue>****</ParameterValue>
{% else %}
<ParameterValue>{{ param_value }}</ParameterValue>
{% endif %}
</member>
{% endfor %}
</Parameters>

View File

@ -287,6 +287,18 @@ class CognitoIdpUser(BaseModel):
return user_json
def update_attributes(self, new_attributes):
def flatten_attrs(attrs):
return {attr['Name']: attr['Value'] for attr in attrs}
def expand_attrs(attrs):
return [{'Name': k, 'Value': v} for k, v in attrs.items()]
flat_attributes = flatten_attrs(self.attributes)
flat_attributes.update(flatten_attrs(new_attributes))
self.attributes = expand_attrs(flat_attributes)
class CognitoIdpBackend(BaseBackend):
@ -673,6 +685,17 @@ class CognitoIdpBackend(BaseBackend):
else:
raise NotAuthorizedError(access_token)
def admin_update_user_attributes(self, user_pool_id, username, attributes):
user_pool = self.user_pools.get(user_pool_id)
if not user_pool:
raise ResourceNotFoundError(user_pool_id)
if username not in user_pool.users:
raise UserNotFoundError(username)
user = user_pool.users[username]
user.update_attributes(attributes)
cognitoidp_backends = {}
for region in boto.cognito.identity.regions():

View File

@ -352,6 +352,13 @@ class CognitoIdpResponse(BaseResponse):
cognitoidp_backends[region].change_password(access_token, previous_password, proposed_password)
return ""
def admin_update_user_attributes(self):
user_pool_id = self._get_param("UserPoolId")
username = self._get_param("Username")
attributes = self._get_param("UserAttributes")
cognitoidp_backends[self.region].admin_update_user_attributes(user_pool_id, username, attributes)
return ""
class CognitoIdpJsonWebKeyResponse(BaseResponse):

View File

@ -152,11 +152,18 @@ class BaseResponse(_TemplateEnvironmentMixin):
for key, value in flat.items():
querystring[key] = [value]
elif self.body:
querystring.update(parse_qs(raw_body, keep_blank_values=True))
try:
querystring.update(parse_qs(raw_body, keep_blank_values=True))
except UnicodeEncodeError:
pass # ignore encoding errors, as the body may not contain a legitimate querystring
if not querystring:
querystring.update(headers)
querystring = _decode_dict(querystring)
try:
querystring = _decode_dict(querystring)
except UnicodeDecodeError:
pass # ignore decoding errors, as the body may not contain a legitimate querystring
self.uri = full_url
self.path = urlparse(full_url).path
self.querystring = querystring

View File

@ -280,7 +280,7 @@ def amzn_request_id(f):
# Update request ID in XML
try:
body = body.replace('{{ requestid }}', request_id)
body = re.sub(r'(?<=<RequestId>).*(?=<\/RequestId>)', request_id, body)
except Exception: # Will just ignore if it cant work on bytes (which are str's on python2)
pass

View File

@ -0,0 +1,2 @@
class InvalidIndexNameError(ValueError):
pass

View File

@ -13,6 +13,7 @@ from moto.core import BaseBackend, BaseModel
from moto.core.utils import unix_time
from moto.core.exceptions import JsonRESTError
from .comparisons import get_comparison_func, get_filter_expression, Op
from .exceptions import InvalidIndexNameError
class DynamoJsonEncoder(json.JSONEncoder):
@ -293,6 +294,19 @@ class Item(BaseModel):
# TODO: implement other data types
raise NotImplementedError(
'ADD not supported for %s' % ', '.join(update_action['Value'].keys()))
elif action == 'DELETE':
if set(update_action['Value'].keys()) == set(['SS']):
existing = self.attrs.get(attribute_name, DynamoType({"SS": {}}))
new_set = set(existing.value).difference(set(new_value))
self.attrs[attribute_name] = DynamoType({
"SS": list(new_set)
})
else:
raise NotImplementedError(
'ADD not supported for %s' % ', '.join(update_action['Value'].keys()))
else:
raise NotImplementedError(
'%s action not support for update_with_attribute_updates' % action)
class StreamRecord(BaseModel):
@ -403,6 +417,25 @@ class Table(BaseModel):
}
self.set_stream_specification(streams)
@classmethod
def create_from_cloudformation_json(cls, resource_name, cloudformation_json, region_name):
properties = cloudformation_json['Properties']
params = {}
if 'KeySchema' in properties:
params['schema'] = properties['KeySchema']
if 'AttributeDefinitions' in properties:
params['attr'] = properties['AttributeDefinitions']
if 'GlobalSecondaryIndexes' in properties:
params['global_indexes'] = properties['GlobalSecondaryIndexes']
if 'ProvisionedThroughput' in properties:
params['throughput'] = properties['ProvisionedThroughput']
if 'LocalSecondaryIndexes' in properties:
params['indexes'] = properties['LocalSecondaryIndexes']
table = dynamodb_backends[region_name].create_table(name=properties['TableName'], **params)
return table
def _generate_arn(self, name):
return 'arn:aws:dynamodb:us-east-1:123456789011:table/' + name
@ -572,7 +605,7 @@ class Table(BaseModel):
results = []
if index_name:
all_indexes = (self.global_indexes or []) + (self.indexes or [])
all_indexes = self.all_indexes()
indexes_by_name = dict((i['IndexName'], i) for i in all_indexes)
if index_name not in indexes_by_name:
raise ValueError('Invalid index: %s for table: %s. Available indexes are: %s' % (
@ -672,11 +705,39 @@ class Table(BaseModel):
else:
yield hash_set
def scan(self, filters, limit, exclusive_start_key, filter_expression=None):
def all_indexes(self):
return (self.global_indexes or []) + (self.indexes or [])
def has_idx_items(self, index_name):
all_indexes = self.all_indexes()
indexes_by_name = dict((i['IndexName'], i) for i in all_indexes)
idx = indexes_by_name[index_name]
idx_col_set = set([i['AttributeName'] for i in idx['KeySchema']])
for hash_set in self.items.values():
if self.range_key_attr:
for item in hash_set.values():
if idx_col_set.issubset(set(item.attrs)):
yield item
else:
if idx_col_set.issubset(set(hash_set.attrs)):
yield hash_set
def scan(self, filters, limit, exclusive_start_key, filter_expression=None, index_name=None):
results = []
scanned_count = 0
all_indexes = self.all_indexes()
indexes_by_name = dict((i['IndexName'], i) for i in all_indexes)
for item in self.all_items():
if index_name:
if index_name not in indexes_by_name:
raise InvalidIndexNameError('The table does not have the specified index: %s' % index_name)
items = self.has_idx_items(index_name)
else:
items = self.all_items()
for item in items:
scanned_count += 1
passes_all_conditions = True
for attribute_name, (comparison_operator, comparison_objs) in filters.items():
@ -703,10 +764,10 @@ class Table(BaseModel):
results.append(item)
results, last_evaluated_key = self._trim_results(results, limit,
exclusive_start_key)
exclusive_start_key, index_name)
return results, scanned_count, last_evaluated_key
def _trim_results(self, results, limit, exclusive_start_key):
def _trim_results(self, results, limit, exclusive_start_key, scaned_index=None):
if exclusive_start_key is not None:
hash_key = DynamoType(exclusive_start_key.get(self.hash_key_attr))
range_key = exclusive_start_key.get(self.range_key_attr)
@ -726,6 +787,14 @@ class Table(BaseModel):
if results[-1].range_key is not None:
last_evaluated_key[self.range_key_attr] = results[-1].range_key
if scaned_index:
all_indexes = self.all_indexes()
indexes_by_name = dict((i['IndexName'], i) for i in all_indexes)
idx = indexes_by_name[scaned_index]
idx_col_list = [i['AttributeName'] for i in idx['KeySchema']]
for col in idx_col_list:
last_evaluated_key[col] = results[-1].attrs[col]
return results, last_evaluated_key
def lookup(self, *args, **kwargs):
@ -893,7 +962,7 @@ class DynamoDBBackend(BaseBackend):
return table.query(hash_key, range_comparison, range_values, limit,
exclusive_start_key, scan_index_forward, projection_expression, index_name, filter_expression, **filter_kwargs)
def scan(self, table_name, filters, limit, exclusive_start_key, filter_expression, expr_names, expr_values):
def scan(self, table_name, filters, limit, exclusive_start_key, filter_expression, expr_names, expr_values, index_name):
table = self.tables.get(table_name)
if not table:
return None, None, None
@ -908,7 +977,7 @@ class DynamoDBBackend(BaseBackend):
else:
filter_expression = Op(None, None) # Will always eval to true
return table.scan(scan_filters, limit, exclusive_start_key, filter_expression)
return table.scan(scan_filters, limit, exclusive_start_key, filter_expression, index_name)
def update_item(self, table_name, key, update_expression, attribute_updates, expression_attribute_names,
expression_attribute_values, expected=None):

View File

@ -5,6 +5,7 @@ import re
from moto.core.responses import BaseResponse
from moto.core.utils import camelcase_to_underscores, amzn_request_id
from .exceptions import InvalidIndexNameError
from .models import dynamodb_backends, dynamo_json_dump
@ -156,8 +157,16 @@ class DynamoHandler(BaseResponse):
body = self.body
# get the table name
table_name = body['TableName']
# get the throughput
throughput = body["ProvisionedThroughput"]
# check billing mode and get the throughput
if "BillingMode" in body.keys() and body["BillingMode"] == "PAY_PER_REQUEST":
if "ProvisionedThroughput" in body.keys():
er = 'com.amazonaws.dynamodb.v20111205#ValidationException'
return self.error(er,
'ProvisionedThroughput cannot be specified \
when BillingMode is PAY_PER_REQUEST')
throughput = None
else: # Provisioned (default billing mode)
throughput = body["ProvisionedThroughput"]
# getting the schema
key_schema = body['KeySchema']
# getting attribute definition
@ -552,6 +561,7 @@ class DynamoHandler(BaseResponse):
exclusive_start_key = self.body.get('ExclusiveStartKey')
limit = self.body.get("Limit")
index_name = self.body.get('IndexName')
try:
items, scanned_count, last_evaluated_key = self.dynamodb_backend.scan(name, filters,
@ -559,7 +569,11 @@ class DynamoHandler(BaseResponse):
exclusive_start_key,
filter_expression,
expression_attribute_names,
expression_attribute_values)
expression_attribute_values,
index_name)
except InvalidIndexNameError as err:
er = 'com.amazonaws.dynamodb.v20111205#ValidationException'
return self.error(er, str(err))
except ValueError as err:
er = 'com.amazonaws.dynamodb.v20111205#ValidationError'
return self.error(er, 'Bad Filter Expression: {0}'.format(err))

View File

@ -58,6 +58,14 @@ class InvalidKeyPairDuplicateError(EC2ClientError):
.format(key))
class InvalidKeyPairFormatError(EC2ClientError):
def __init__(self):
super(InvalidKeyPairFormatError, self).__init__(
"InvalidKeyPair.Format",
"Key is not in valid OpenSSH public key format")
class InvalidVPCIdError(EC2ClientError):
def __init__(self, vpc_id):
@ -420,3 +428,79 @@ class OperationNotPermitted(EC2ClientError):
"The vpc CIDR block with association ID {} may not be disassociated. "
"It is the primary IPv4 CIDR block of the VPC".format(association_id)
)
class NetworkAclEntryAlreadyExistsError(EC2ClientError):
def __init__(self, rule_number):
super(NetworkAclEntryAlreadyExistsError, self).__init__(
"NetworkAclEntryAlreadyExists",
"The network acl entry identified by {} already exists.".format(rule_number)
)
class InvalidSubnetRangeError(EC2ClientError):
def __init__(self, cidr_block):
super(InvalidSubnetRangeError, self).__init__(
"InvalidSubnet.Range",
"The CIDR '{}' is invalid.".format(cidr_block)
)
class InvalidCIDRBlockParameterError(EC2ClientError):
def __init__(self, cidr_block):
super(InvalidCIDRBlockParameterError, self).__init__(
"InvalidParameterValue",
"Value ({}) for parameter cidrBlock is invalid. This is not a valid CIDR block.".format(cidr_block)
)
class InvalidDestinationCIDRBlockParameterError(EC2ClientError):
def __init__(self, cidr_block):
super(InvalidDestinationCIDRBlockParameterError, self).__init__(
"InvalidParameterValue",
"Value ({}) for parameter destinationCidrBlock is invalid. This is not a valid CIDR block.".format(cidr_block)
)
class InvalidSubnetConflictError(EC2ClientError):
def __init__(self, cidr_block):
super(InvalidSubnetConflictError, self).__init__(
"InvalidSubnet.Conflict",
"The CIDR '{}' conflicts with another subnet".format(cidr_block)
)
class InvalidVPCRangeError(EC2ClientError):
def __init__(self, cidr_block):
super(InvalidVPCRangeError, self).__init__(
"InvalidVpc.Range",
"The CIDR '{}' is invalid.".format(cidr_block)
)
# accept exception
class OperationNotPermitted2(EC2ClientError):
def __init__(self, client_region, pcx_id, acceptor_region):
super(OperationNotPermitted2, self).__init__(
"OperationNotPermitted",
"Incorrect region ({0}) specified for this request."
"VPC peering connection {1} must be accepted in region {2}".format(client_region, pcx_id, acceptor_region)
)
# reject exception
class OperationNotPermitted3(EC2ClientError):
def __init__(self, client_region, pcx_id, acceptor_region):
super(OperationNotPermitted3, self).__init__(
"OperationNotPermitted",
"Incorrect region ({0}) specified for this request."
"VPC peering connection {1} must be accepted or rejected in region {2}".format(client_region,
pcx_id,
acceptor_region)
)

View File

@ -20,6 +20,7 @@ from boto.ec2.blockdevicemapping import BlockDeviceMapping, BlockDeviceType
from boto.ec2.spotinstancerequest import SpotInstanceRequest as BotoSpotRequest
from boto.ec2.launchspecification import LaunchSpecification
from moto.compat import OrderedDict
from moto.core import BaseBackend
from moto.core.models import Model, BaseModel
@ -35,14 +36,17 @@ from .exceptions import (
InvalidAMIIdError,
InvalidAMIAttributeItemValueError,
InvalidAssociationIdError,
InvalidCIDRBlockParameterError,
InvalidCIDRSubnetError,
InvalidCustomerGatewayIdError,
InvalidDestinationCIDRBlockParameterError,
InvalidDHCPOptionsIdError,
InvalidDomainError,
InvalidID,
InvalidInstanceIdError,
InvalidInternetGatewayIdError,
InvalidKeyPairDuplicateError,
InvalidKeyPairFormatError,
InvalidKeyPairNameError,
InvalidNetworkAclIdError,
InvalidNetworkAttachmentIdError,
@ -56,20 +60,26 @@ from .exceptions import (
InvalidSecurityGroupDuplicateError,
InvalidSecurityGroupNotFoundError,
InvalidSnapshotIdError,
InvalidSubnetConflictError,
InvalidSubnetIdError,
InvalidSubnetRangeError,
InvalidVolumeIdError,
InvalidVolumeAttachmentError,
InvalidVpcCidrBlockAssociationIdError,
InvalidVPCPeeringConnectionIdError,
InvalidVPCPeeringConnectionStateTransitionError,
InvalidVPCIdError,
InvalidVPCRangeError,
InvalidVpnGatewayIdError,
InvalidVpnConnectionIdError,
MalformedAMIIdError,
MalformedDHCPOptionsIdError,
MissingParameterError,
MotoNotImplementedError,
NetworkAclEntryAlreadyExistsError,
OperationNotPermitted,
OperationNotPermitted2,
OperationNotPermitted3,
ResourceAlreadyAssociatedError,
RulesPerSecurityGroupLimitExceededError,
TagLimitExceeded)
@ -118,6 +128,8 @@ from .utils import (
random_customer_gateway_id,
is_tag_filter,
tag_filter_matches,
rsa_public_key_parse,
rsa_public_key_fingerprint
)
INSTANCE_TYPES = json.load(
@ -404,7 +416,7 @@ class Instance(TaggedEC2Resource, BotoInstance):
warnings.warn('Could not find AMI with image-id:{0}, '
'in the near future this will '
'cause an error.\n'
'Use ec2_backend.describe_images() to'
'Use ec2_backend.describe_images() to '
'find suitable image for your test'.format(image_id),
PendingDeprecationWarning)
@ -908,7 +920,14 @@ class KeyPairBackend(object):
def import_key_pair(self, key_name, public_key_material):
if key_name in self.keypairs:
raise InvalidKeyPairDuplicateError(key_name)
keypair = KeyPair(key_name, **random_key_pair())
try:
rsa_public_key = rsa_public_key_parse(public_key_material)
except ValueError:
raise InvalidKeyPairFormatError()
fingerprint = rsa_public_key_fingerprint(rsa_public_key)
keypair = KeyPair(key_name, material=public_key_material, fingerprint=fingerprint)
self.keypairs[key_name] = keypair
return keypair
@ -1879,6 +1898,8 @@ class Snapshot(TaggedEC2Resource):
return str(self.encrypted).lower()
elif filter_name == 'status':
return self.status
elif filter_name == 'owner-id':
return self.owner_id
else:
return super(Snapshot, self).get_filter_value(
filter_name, 'DescribeSnapshots')
@ -2120,22 +2141,28 @@ class VPC(TaggedEC2Resource):
class VPCBackend(object):
__refs__ = defaultdict(list)
vpc_refs = defaultdict(set)
def __init__(self):
self.vpcs = {}
self.__refs__[self.__class__].append(weakref.ref(self))
self.vpc_refs[self.__class__].add(weakref.ref(self))
super(VPCBackend, self).__init__()
@classmethod
def get_instances(cls):
for inst_ref in cls.__refs__[cls]:
def get_vpc_refs(cls):
for inst_ref in cls.vpc_refs[cls]:
inst = inst_ref()
if inst is not None:
yield inst
def create_vpc(self, cidr_block, instance_tenancy='default', amazon_provided_ipv6_cidr_block=False):
vpc_id = random_vpc_id()
try:
vpc_cidr_block = ipaddress.IPv4Network(six.text_type(cidr_block), strict=False)
except ValueError:
raise InvalidCIDRBlockParameterError(cidr_block)
if vpc_cidr_block.prefixlen < 16 or vpc_cidr_block.prefixlen > 28:
raise InvalidVPCRangeError(cidr_block)
vpc = VPC(self, vpc_id, cidr_block, len(self.vpcs) == 0, instance_tenancy, amazon_provided_ipv6_cidr_block)
self.vpcs[vpc_id] = vpc
@ -2159,7 +2186,7 @@ class VPCBackend(object):
# get vpc by vpc id and aws region
def get_cross_vpc(self, vpc_id, peer_region):
for vpcs in self.get_instances():
for vpcs in self.get_vpc_refs():
if vpcs.region_name == peer_region:
match_vpc = vpcs.get_vpc(vpc_id)
return match_vpc
@ -2280,15 +2307,31 @@ class VPCPeeringConnection(TaggedEC2Resource):
class VPCPeeringConnectionBackend(object):
# for cross region vpc reference
vpc_pcx_refs = defaultdict(set)
def __init__(self):
self.vpc_pcxs = {}
self.vpc_pcx_refs[self.__class__].add(weakref.ref(self))
super(VPCPeeringConnectionBackend, self).__init__()
@classmethod
def get_vpc_pcx_refs(cls):
for inst_ref in cls.vpc_pcx_refs[cls]:
inst = inst_ref()
if inst is not None:
yield inst
def create_vpc_peering_connection(self, vpc, peer_vpc):
vpc_pcx_id = random_vpc_peering_connection_id()
vpc_pcx = VPCPeeringConnection(vpc_pcx_id, vpc, peer_vpc)
vpc_pcx._status.pending()
self.vpc_pcxs[vpc_pcx_id] = vpc_pcx
# insert cross region peering info
if vpc.ec2_backend.region_name != peer_vpc.ec2_backend.region_name:
for vpc_pcx_cx in peer_vpc.ec2_backend.get_vpc_pcx_refs():
if vpc_pcx_cx.region_name == peer_vpc.ec2_backend.region_name:
vpc_pcx_cx.vpc_pcxs[vpc_pcx_id] = vpc_pcx
return vpc_pcx
def get_all_vpc_peering_connections(self):
@ -2306,6 +2349,11 @@ class VPCPeeringConnectionBackend(object):
def accept_vpc_peering_connection(self, vpc_pcx_id):
vpc_pcx = self.get_vpc_peering_connection(vpc_pcx_id)
# if cross region need accepter from another region
pcx_req_region = vpc_pcx.vpc.ec2_backend.region_name
pcx_acp_region = vpc_pcx.peer_vpc.ec2_backend.region_name
if pcx_req_region != pcx_acp_region and self.region_name == pcx_req_region:
raise OperationNotPermitted2(self.region_name, vpc_pcx.id, pcx_acp_region)
if vpc_pcx._status.code != 'pending-acceptance':
raise InvalidVPCPeeringConnectionStateTransitionError(vpc_pcx.id)
vpc_pcx._status.accept()
@ -2313,6 +2361,11 @@ class VPCPeeringConnectionBackend(object):
def reject_vpc_peering_connection(self, vpc_pcx_id):
vpc_pcx = self.get_vpc_peering_connection(vpc_pcx_id)
# if cross region need accepter from another region
pcx_req_region = vpc_pcx.vpc.ec2_backend.region_name
pcx_acp_region = vpc_pcx.peer_vpc.ec2_backend.region_name
if pcx_req_region != pcx_acp_region and self.region_name == pcx_req_region:
raise OperationNotPermitted3(self.region_name, vpc_pcx.id, pcx_acp_region)
if vpc_pcx._status.code != 'pending-acceptance':
raise InvalidVPCPeeringConnectionStateTransitionError(vpc_pcx.id)
vpc_pcx._status.reject()
@ -2326,7 +2379,7 @@ class Subnet(TaggedEC2Resource):
self.id = subnet_id
self.vpc_id = vpc_id
self.cidr_block = cidr_block
self.cidr = ipaddress.ip_network(six.text_type(self.cidr_block))
self.cidr = ipaddress.IPv4Network(six.text_type(self.cidr_block), strict=False)
self._availability_zone = availability_zone
self.default_for_az = default_for_az
self.map_public_ip_on_launch = map_public_ip_on_launch
@ -2458,7 +2511,19 @@ class SubnetBackend(object):
def create_subnet(self, vpc_id, cidr_block, availability_zone):
subnet_id = random_subnet_id()
self.get_vpc(vpc_id) # Validate VPC exists
vpc = self.get_vpc(vpc_id) # Validate VPC exists and the supplied CIDR block is a subnet of the VPC's
vpc_cidr_block = ipaddress.IPv4Network(six.text_type(vpc.cidr_block), strict=False)
try:
subnet_cidr_block = ipaddress.IPv4Network(six.text_type(cidr_block), strict=False)
except ValueError:
raise InvalidCIDRBlockParameterError(cidr_block)
if not (vpc_cidr_block.network_address <= subnet_cidr_block.network_address and
vpc_cidr_block.broadcast_address >= subnet_cidr_block.broadcast_address):
raise InvalidSubnetRangeError(cidr_block)
for subnet in self.get_all_subnets(filters={'vpc-id': vpc_id}):
if subnet.cidr.overlaps(subnet_cidr_block):
raise InvalidSubnetConflictError(cidr_block)
# if this is the first subnet for an availability zone,
# consider it the default
@ -2718,6 +2783,11 @@ class RouteBackend(object):
elif EC2_RESOURCE_TO_PREFIX['internet-gateway'] in gateway_id:
gateway = self.get_internet_gateway(gateway_id)
try:
ipaddress.IPv4Network(six.text_type(destination_cidr_block), strict=False)
except ValueError:
raise InvalidDestinationCIDRBlockParameterError(destination_cidr_block)
route = Route(route_table, destination_cidr_block, local=local,
gateway=gateway,
instance=self.get_instance(
@ -3595,10 +3665,10 @@ class NetworkAclBackend(object):
def add_default_entries(self, network_acl_id):
default_acl_entries = [
{'rule_number': 100, 'rule_action': 'allow', 'egress': 'true'},
{'rule_number': 32767, 'rule_action': 'deny', 'egress': 'true'},
{'rule_number': 100, 'rule_action': 'allow', 'egress': 'false'},
{'rule_number': 32767, 'rule_action': 'deny', 'egress': 'false'}
{'rule_number': "100", 'rule_action': 'allow', 'egress': 'true'},
{'rule_number': "32767", 'rule_action': 'deny', 'egress': 'true'},
{'rule_number': "100", 'rule_action': 'allow', 'egress': 'false'},
{'rule_number': "32767", 'rule_action': 'deny', 'egress': 'false'}
]
for entry in default_acl_entries:
self.create_network_acl_entry(network_acl_id=network_acl_id, rule_number=entry['rule_number'], protocol='-1',
@ -3629,12 +3699,14 @@ class NetworkAclBackend(object):
icmp_code, icmp_type, port_range_from,
port_range_to):
network_acl = self.get_network_acl(network_acl_id)
if any(entry.egress == egress and entry.rule_number == rule_number for entry in network_acl.network_acl_entries):
raise NetworkAclEntryAlreadyExistsError(rule_number)
network_acl_entry = NetworkAclEntry(self, network_acl_id, rule_number,
protocol, rule_action, egress,
cidr_block, icmp_code, icmp_type,
port_range_from, port_range_to)
network_acl = self.get_network_acl(network_acl_id)
network_acl.network_acl_entries.append(network_acl_entry)
return network_acl_entry

View File

@ -74,30 +74,35 @@ CREATE_VPC_PEERING_CONNECTION_RESPONSE = """
"""
DESCRIBE_VPC_PEERING_CONNECTIONS_RESPONSE = """
<DescribeVpcPeeringConnectionsResponse xmlns="http://ec2.amazonaws.com/doc/2013-10-15/">
<requestId>7a62c49f-347e-4fc4-9331-6e8eEXAMPLE</requestId>
<vpcPeeringConnectionSet>
{% for vpc_pcx in vpc_pcxs %}
<item>
<vpcPeeringConnectionId>{{ vpc_pcx.id }}</vpcPeeringConnectionId>
<requesterVpcInfo>
<ownerId>777788889999</ownerId>
<vpcId>{{ vpc_pcx.vpc.id }}</vpcId>
<cidrBlock>{{ vpc_pcx.vpc.cidr_block }}</cidrBlock>
</requesterVpcInfo>
<accepterVpcInfo>
<ownerId>123456789012</ownerId>
<vpcId>{{ vpc_pcx.peer_vpc.id }}</vpcId>
</accepterVpcInfo>
<status>
<code>{{ vpc_pcx._status.code }}</code>
<message>{{ vpc_pcx._status.message }}</message>
</status>
<expirationTime>2014-02-17T16:00:50.000Z</expirationTime>
<tagSet/>
</item>
{% endfor %}
</vpcPeeringConnectionSet>
<DescribeVpcPeeringConnectionsResponse xmlns="http://ec2.amazonaws.com/doc/2016-11-15/">
<requestId>7a62c49f-347e-4fc4-9331-6e8eEXAMPLE</requestId>
<vpcPeeringConnectionSet>
{% for vpc_pcx in vpc_pcxs %}
<item>
<vpcPeeringConnectionId>{{ vpc_pcx.id }}</vpcPeeringConnectionId>
<requesterVpcInfo>
<ownerId>777788889999</ownerId>
<vpcId>{{ vpc_pcx.vpc.id }}</vpcId>
<cidrBlock>{{ vpc_pcx.vpc.cidr_block }}</cidrBlock>
</requesterVpcInfo>
<accepterVpcInfo>
<ownerId>123456789012</ownerId>
<vpcId>{{ vpc_pcx.peer_vpc.id }}</vpcId>
<cidrBlock>{{ vpc_pcx.peer_vpc.cidr_block }}</cidrBlock>
<peeringOptions>
<allowEgressFromLocalClassicLinkToRemoteVpc>false</allowEgressFromLocalClassicLinkToRemoteVpc>
<allowEgressFromLocalVpcToRemoteClassicLink>true</allowEgressFromLocalVpcToRemoteClassicLink>
<allowDnsResolutionFromRemoteVpc>false</allowDnsResolutionFromRemoteVpc>
</peeringOptions>
</accepterVpcInfo>
<status>
<code>{{ vpc_pcx._status.code }}</code>
<message>{{ vpc_pcx._status.message }}</message>
</status>
<tagSet/>
</item>
{% endfor %}
</vpcPeeringConnectionSet>
</DescribeVpcPeeringConnectionsResponse>
"""
@ -109,19 +114,24 @@ DELETE_VPC_PEERING_CONNECTION_RESPONSE = """
"""
ACCEPT_VPC_PEERING_CONNECTION_RESPONSE = """
<AcceptVpcPeeringConnectionResponse xmlns="http://ec2.amazonaws.com/doc/2013-10-15/">
<AcceptVpcPeeringConnectionResponse xmlns="http://ec2.amazonaws.com/doc/2016-11-15/">
<requestId>7a62c49f-347e-4fc4-9331-6e8eEXAMPLE</requestId>
<vpcPeeringConnection>
<vpcPeeringConnectionId>{{ vpc_pcx.id }}</vpcPeeringConnectionId>
<requesterVpcInfo>
<ownerId>123456789012</ownerId>
<ownerId>777788889999</ownerId>
<vpcId>{{ vpc_pcx.vpc.id }}</vpcId>
<cidrBlock>{{ vpc_pcx.vpc.cidr_block }}</cidrBlock>
</requesterVpcInfo>
<accepterVpcInfo>
<ownerId>777788889999</ownerId>
<ownerId>123456789012</ownerId>
<vpcId>{{ vpc_pcx.peer_vpc.id }}</vpcId>
<cidrBlock>{{ vpc_pcx.peer_vpc.cidr_block }}</cidrBlock>
<peeringOptions>
<allowEgressFromLocalClassicLinkToRemoteVpc>false</allowEgressFromLocalClassicLinkToRemoteVpc>
<allowEgressFromLocalVpcToRemoteClassicLink>false</allowEgressFromLocalVpcToRemoteClassicLink>
<allowDnsResolutionFromRemoteVpc>false</allowDnsResolutionFromRemoteVpc>
</peeringOptions>
</accepterVpcInfo>
<status>
<code>{{ vpc_pcx._status.code }}</code>

View File

@ -1,10 +1,19 @@
from __future__ import unicode_literals
import base64
import hashlib
import fnmatch
import random
import re
import six
from cryptography.hazmat.primitives import serialization
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives.asymmetric import rsa
import sshpubkeys.exceptions
from sshpubkeys.keys import SSHKey
EC2_RESOURCE_TO_PREFIX = {
'customer-gateway': 'cgw',
'dhcp-options': 'dopt',
@ -453,23 +462,19 @@ def simple_aws_filter_to_re(filter_string):
def random_key_pair():
def random_hex():
return chr(random.choice(list(range(48, 58)) + list(range(97, 102))))
private_key = rsa.generate_private_key(
public_exponent=65537,
key_size=2048,
backend=default_backend())
private_key_material = private_key.private_bytes(
encoding=serialization.Encoding.PEM,
format=serialization.PrivateFormat.TraditionalOpenSSL,
encryption_algorithm=serialization.NoEncryption())
public_key_fingerprint = rsa_public_key_fingerprint(private_key.public_key())
def random_fingerprint():
return ':'.join([random_hex() + random_hex() for i in range(20)])
def random_material():
return ''.join([
chr(random.choice(list(range(65, 91)) + list(range(48, 58)) +
list(range(97, 102))))
for i in range(1000)
])
material = "---- BEGIN RSA PRIVATE KEY ----" + random_material() + \
"-----END RSA PRIVATE KEY-----"
return {
'fingerprint': random_fingerprint(),
'material': material
'fingerprint': public_key_fingerprint,
'material': private_key_material.decode('ascii')
}
@ -535,3 +540,28 @@ def generate_instance_identity_document(instance):
}
return document
def rsa_public_key_parse(key_material):
try:
if not isinstance(key_material, six.binary_type):
key_material = key_material.encode("ascii")
decoded_key = base64.b64decode(key_material).decode("ascii")
public_key = SSHKey(decoded_key)
except (sshpubkeys.exceptions.InvalidKeyException, UnicodeDecodeError):
raise ValueError('bad key')
if not public_key.rsa:
raise ValueError('bad key')
return public_key.rsa
def rsa_public_key_fingerprint(rsa_public_key):
key_data = rsa_public_key.public_bytes(
encoding=serialization.Encoding.DER,
format=serialization.PublicFormat.SubjectPublicKeyInfo)
fingerprint_hex = hashlib.md5(key_data).hexdigest()
fingerprint = re.sub(r'([a-f0-9]{2})(?!$)', r'\1:', fingerprint_hex)
return fingerprint

View File

@ -699,12 +699,15 @@ class EC2ContainerServiceBackend(BaseBackend):
return service
def list_services(self, cluster_str):
def list_services(self, cluster_str, scheduling_strategy=None):
cluster_name = cluster_str.split('/')[-1]
service_arns = []
for key, value in self.services.items():
if cluster_name + ':' in key:
service_arns.append(self.services[key].arn)
service = self.services[key]
if scheduling_strategy is None or service.scheduling_strategy == scheduling_strategy:
service_arns.append(service.arn)
return sorted(service_arns)
def describe_services(self, cluster_str, service_names_or_arns):

View File

@ -163,7 +163,8 @@ class EC2ContainerServiceResponse(BaseResponse):
def list_services(self):
cluster_str = self._get_param('cluster')
service_arns = self.ecs_backend.list_services(cluster_str)
scheduling_strategy = self._get_param('schedulingStrategy')
service_arns = self.ecs_backend.list_services(cluster_str, scheduling_strategy)
return json.dumps({
'serviceArns': service_arns
# ,

View File

@ -131,7 +131,7 @@ class InvalidActionTypeError(ELBClientError):
def __init__(self, invalid_name, index):
super(InvalidActionTypeError, self).__init__(
"ValidationError",
"1 validation error detected: Value '%s' at 'actions.%s.member.type' failed to satisfy constraint: Member must satisfy enum value set: [forward]" % (invalid_name, index)
"1 validation error detected: Value '%s' at 'actions.%s.member.type' failed to satisfy constraint: Member must satisfy enum value set: [forward, redirect]" % (invalid_name, index)
)

View File

@ -204,8 +204,20 @@ class FakeListener(BaseModel):
# transform default actions to confirm with the rest of the code and XML templates
if "DefaultActions" in properties:
default_actions = []
for action in properties['DefaultActions']:
default_actions.append({'type': action['Type'], 'target_group_arn': action['TargetGroupArn']})
for i, action in enumerate(properties['DefaultActions']):
action_type = action['Type']
if action_type == 'forward':
default_actions.append({'type': action_type, 'target_group_arn': action['TargetGroupArn']})
elif action_type == 'redirect':
redirect_action = {'type': action_type, }
for redirect_config_key, redirect_config_value in action['RedirectConfig'].items():
# need to match the output of _get_list_prefix
if redirect_config_key == 'StatusCode':
redirect_config_key = 'status_code'
redirect_action['redirect_config._' + redirect_config_key.lower()] = redirect_config_value
default_actions.append(redirect_action)
else:
raise InvalidActionTypeError(action_type, i + 1)
else:
default_actions = None
@ -417,11 +429,15 @@ class ELBv2Backend(BaseBackend):
for i, action in enumerate(actions):
index = i + 1
action_type = action['type']
if action_type not in ['forward']:
if action_type == 'forward':
action_target_group_arn = action['target_group_arn']
if action_target_group_arn not in target_group_arns:
raise ActionTargetGroupNotFoundError(action_target_group_arn)
elif action_type == 'redirect':
# nothing to do
pass
else:
raise InvalidActionTypeError(action_type, index)
action_target_group_arn = action['target_group_arn']
if action_target_group_arn not in target_group_arns:
raise ActionTargetGroupNotFoundError(action_target_group_arn)
# TODO: check for error 'TooManyRegistrationsForTargetId'
# TODO: check for error 'TooManyRules'
@ -483,10 +499,18 @@ class ELBv2Backend(BaseBackend):
arn = load_balancer_arn.replace(':loadbalancer/', ':listener/') + "/%s%s" % (port, id(self))
listener = FakeListener(load_balancer_arn, arn, protocol, port, ssl_policy, certificate, default_actions)
balancer.listeners[listener.arn] = listener
for action in default_actions:
if action['target_group_arn'] in self.target_groups.keys():
target_group = self.target_groups[action['target_group_arn']]
target_group.load_balancer_arns.append(load_balancer_arn)
for i, action in enumerate(default_actions):
action_type = action['type']
if action_type == 'forward':
if action['target_group_arn'] in self.target_groups.keys():
target_group = self.target_groups[action['target_group_arn']]
target_group.load_balancer_arns.append(load_balancer_arn)
elif action_type == 'redirect':
# nothing to do
pass
else:
raise InvalidActionTypeError(action_type, i + 1)
return listener
def describe_load_balancers(self, arns, names):
@ -649,11 +673,15 @@ class ELBv2Backend(BaseBackend):
for i, action in enumerate(actions):
index = i + 1
action_type = action['type']
if action_type not in ['forward']:
if action_type == 'forward':
action_target_group_arn = action['target_group_arn']
if action_target_group_arn not in target_group_arns:
raise ActionTargetGroupNotFoundError(action_target_group_arn)
elif action_type == 'redirect':
# nothing to do
pass
else:
raise InvalidActionTypeError(action_type, index)
action_target_group_arn = action['target_group_arn']
if action_target_group_arn not in target_group_arns:
raise ActionTargetGroupNotFoundError(action_target_group_arn)
# TODO: check for error 'TooManyRegistrationsForTargetId'
# TODO: check for error 'TooManyRules'
@ -873,7 +901,7 @@ class ELBv2Backend(BaseBackend):
# Its already validated in responses.py
listener.ssl_policy = ssl_policy
if default_actions is not None:
if default_actions is not None and default_actions != []:
# Is currently not validated
listener.default_actions = default_actions

View File

@ -704,7 +704,11 @@ CREATE_RULE_TEMPLATE = """<CreateRuleResponse xmlns="http://elasticloadbalancing
{% for action in rule.actions %}
<member>
<Type>{{ action["type"] }}</Type>
{% if action["type"] == "forward" %}
<TargetGroupArn>{{ action["target_group_arn"] }}</TargetGroupArn>
{% elif action["type"] == "redirect" %}
<RedirectConfig>{{ action["redirect_config"] }}</RedirectConfig>
{% endif %}
</member>
{% endfor %}
</Actions>
@ -772,7 +776,15 @@ CREATE_LISTENER_TEMPLATE = """<CreateListenerResponse xmlns="http://elasticloadb
{% for action in listener.default_actions %}
<member>
<Type>{{ action.type }}</Type>
<TargetGroupArn>{{ action.target_group_arn }}</TargetGroupArn>
{% if action["type"] == "forward" %}
<TargetGroupArn>{{ action["target_group_arn"] }}</TargetGroupArn>
{% elif action["type"] == "redirect" %}
<RedirectConfig>
<Protocol>{{ action["redirect_config._protocol"] }}</Protocol>
<Port>{{ action["redirect_config._port"] }}</Port>
<StatusCode>{{ action["redirect_config._status_code"] }}</StatusCode>
</RedirectConfig>
{% endif %}
</member>
{% endfor %}
</DefaultActions>
@ -877,7 +889,15 @@ DESCRIBE_RULES_TEMPLATE = """<DescribeRulesResponse xmlns="http://elasticloadbal
{% for action in rule.actions %}
<member>
<Type>{{ action["type"] }}</Type>
{% if action["type"] == "forward" %}
<TargetGroupArn>{{ action["target_group_arn"] }}</TargetGroupArn>
{% elif action["type"] == "redirect" %}
<RedirectConfig>
<Protocol>{{ action["redirect_config._protocol"] }}</Protocol>
<Port>{{ action["redirect_config._port"] }}</Port>
<StatusCode>{{ action["redirect_config._status_code"] }}</StatusCode>
</RedirectConfig>
{% endif %}
</member>
{% endfor %}
</Actions>
@ -970,7 +990,15 @@ DESCRIBE_LISTENERS_TEMPLATE = """<DescribeLoadBalancersResponse xmlns="http://el
{% for action in listener.default_actions %}
<member>
<Type>{{ action.type }}</Type>
<TargetGroupArn>{{ action.target_group_arn }}</TargetGroupArn>
{% if action["type"] == "forward" %}
<TargetGroupArn>{{ action["target_group_arn"] }}</TargetGroupArn>m
{% elif action["type"] == "redirect" %}
<RedirectConfig>
<Protocol>{{ action["redirect_config._protocol"] }}</Protocol>
<Port>{{ action["redirect_config._port"] }}</Port>
<StatusCode>{{ action["redirect_config._status_code"] }}</StatusCode>
</RedirectConfig>
{% endif %}
</member>
{% endfor %}
</DefaultActions>
@ -1399,7 +1427,15 @@ MODIFY_LISTENER_TEMPLATE = """<ModifyListenerResponse xmlns="http://elasticloadb
{% for action in listener.default_actions %}
<member>
<Type>{{ action.type }}</Type>
<TargetGroupArn>{{ action.target_group_arn }}</TargetGroupArn>
{% if action["type"] == "forward" %}
<TargetGroupArn>{{ action["target_group_arn"] }}</TargetGroupArn>
{% elif action["type"] == "redirect" %}
<RedirectConfig>
<Protocol>{{ action["redirect_config._protocol"] }}</Protocol>
<Port>{{ action["redirect_config._port"] }}</Port>
<StatusCode>{{ action["redirect_config._status_code"] }}</StatusCode>
</RedirectConfig>
{% endif %}
</member>
{% endfor %}
</DefaultActions>

View File

@ -97,7 +97,8 @@ class FakeCluster(BaseModel):
visible_to_all_users='false',
release_label=None,
requested_ami_version=None,
running_ami_version=None):
running_ami_version=None,
custom_ami_id=None):
self.id = cluster_id or random_cluster_id()
emr_backend.clusters[self.id] = self
self.emr_backend = emr_backend
@ -162,6 +163,7 @@ class FakeCluster(BaseModel):
self.release_label = release_label
self.requested_ami_version = requested_ami_version
self.running_ami_version = running_ami_version
self.custom_ami_id = custom_ami_id
self.role = job_flow_role or 'EMRJobflowDefault'
self.service_role = service_role

View File

@ -267,6 +267,18 @@ class ElasticMapReduceResponse(BaseResponse):
else:
kwargs['running_ami_version'] = '1.0.0'
custom_ami_id = self._get_param('CustomAmiId')
if custom_ami_id:
kwargs['custom_ami_id'] = custom_ami_id
if release_label and release_label < 'emr-5.7.0':
message = 'Custom AMI is not allowed'
raise EmrError(error_type='ValidationException',
message=message, template='error_json')
elif ami_version:
message = 'Custom AMI is not supported in this version of EMR'
raise EmrError(error_type='ValidationException',
message=message, template='error_json')
cluster = self.backend.run_job_flow(**kwargs)
applications = self._get_list_prefix('Applications.member')
@ -375,6 +387,9 @@ DESCRIBE_CLUSTER_TEMPLATE = """<DescribeClusterResponse xmlns="http://elasticmap
</member>
{% endfor %}
</Configurations>
{% if cluster.custom_ami_id is not none %}
<CustomAmiId>{{ cluster.custom_ami_id }}</CustomAmiId>
{% endif %}
<Ec2InstanceAttributes>
<AdditionalMasterSecurityGroups>
{% for each in cluster.additional_master_security_groups %}

View File

@ -56,6 +56,14 @@ class GlueBackend(BaseBackend):
database = self.get_database(database_name)
return [table for table_name, table in database.tables.items()]
def delete_table(self, database_name, table_name):
database = self.get_database(database_name)
try:
del database.tables[table_name]
except KeyError:
raise TableNotFoundException(table_name)
return {}
class FakeDatabase(BaseModel):

View File

@ -84,6 +84,12 @@ class GlueResponse(BaseResponse):
]
})
def delete_table(self):
database_name = self.parameters.get('DatabaseName')
table_name = self.parameters.get('Name')
resp = self.glue_backend.delete_table(database_name, table_name)
return json.dumps(resp)
def get_partitions(self):
database_name = self.parameters.get('DatabaseName')
table_name = self.parameters.get('TableName')

View File

@ -9,6 +9,7 @@ from cryptography import x509
from cryptography.hazmat.backends import default_backend
import pytz
from moto.core.exceptions import RESTError
from moto.core import BaseBackend, BaseModel
from moto.core.utils import iso_8601_datetime_without_milliseconds
@ -131,7 +132,7 @@ class InlinePolicy(Policy):
class Role(BaseModel):
def __init__(self, role_id, name, assume_role_policy_document, path):
def __init__(self, role_id, name, assume_role_policy_document, path, permissions_boundary):
self.id = role_id
self.name = name
self.assume_role_policy_document = assume_role_policy_document
@ -141,6 +142,7 @@ class Role(BaseModel):
self.create_date = datetime.now(pytz.utc)
self.tags = {}
self.description = ""
self.permissions_boundary = permissions_boundary
@classmethod
def create_from_cloudformation_json(cls, resource_name, cloudformation_json, region_name):
@ -150,6 +152,7 @@ class Role(BaseModel):
role_name=resource_name,
assume_role_policy_document=properties['AssumeRolePolicyDocument'],
path=properties.get('Path', '/'),
permissions_boundary=properties.get('PermissionsBoundary', '')
)
policies = properties.get('Policies', [])
@ -470,6 +473,8 @@ class IAMBackend(BaseBackend):
self.managed_policies = self._init_managed_policies()
self.account_aliases = []
self.saml_providers = {}
self.policy_arn_regex = re.compile(
r'^arn:aws:iam::[0-9]*:policy/.*$')
super(IAMBackend, self).__init__()
def _init_managed_policies(self):
@ -587,9 +592,12 @@ class IAMBackend(BaseBackend):
return policies, marker
def create_role(self, role_name, assume_role_policy_document, path):
def create_role(self, role_name, assume_role_policy_document, path, permissions_boundary):
role_id = random_resource_id()
role = Role(role_id, role_name, assume_role_policy_document, path)
if permissions_boundary and not self.policy_arn_regex.match(permissions_boundary):
raise RESTError('InvalidParameterValue', 'Value ({}) for parameter PermissionsBoundary is invalid.'.format(permissions_boundary))
role = Role(role_id, role_name, assume_role_policy_document, path, permissions_boundary)
self.roles[role_id] = role
return role

View File

@ -175,9 +175,11 @@ class IamResponse(BaseResponse):
path = self._get_param('Path')
assume_role_policy_document = self._get_param(
'AssumeRolePolicyDocument')
permissions_boundary = self._get_param(
'PermissionsBoundary')
role = iam_backend.create_role(
role_name, assume_role_policy_document, path)
role_name, assume_role_policy_document, path, permissions_boundary)
template = self.response_template(CREATE_ROLE_TEMPLATE)
return template.render(role=role)
@ -1000,6 +1002,12 @@ CREATE_ROLE_TEMPLATE = """<CreateRoleResponse xmlns="https://iam.amazonaws.com/d
<AssumeRolePolicyDocument>{{ role.assume_role_policy_document }}</AssumeRolePolicyDocument>
<CreateDate>{{ role.create_date }}</CreateDate>
<RoleId>{{ role.id }}</RoleId>
{% if role.permissions_boundary %}
<PermissionsBoundary>
<PermissionsBoundaryType>PermissionsBoundaryPolicy</PermissionsBoundaryType>
<PermissionsBoundaryArn>{{ role.permissions_boundary }}</PermissionsBoundaryArn>
</PermissionsBoundary>
{% endif %}
</Role>
</CreateRoleResult>
<ResponseMetadata>
@ -1102,6 +1110,12 @@ LIST_ROLES_TEMPLATE = """<ListRolesResponse xmlns="https://iam.amazonaws.com/doc
<AssumeRolePolicyDocument>{{ role.assume_role_policy_document }}</AssumeRolePolicyDocument>
<CreateDate>{{ role.create_date }}</CreateDate>
<RoleId>{{ role.id }}</RoleId>
{% if role.permissions_boundary %}
<PermissionsBoundary>
<PermissionsBoundaryType>PermissionsBoundaryPolicy</PermissionsBoundaryType>
<PermissionsBoundaryArn>{{ role.permissions_boundary }}</PermissionsBoundaryArn>
</PermissionsBoundary>
{% endif %}
</member>
{% endfor %}
</Roles>

View File

@ -97,7 +97,7 @@ class FakeThingGroup(BaseModel):
class FakeCertificate(BaseModel):
def __init__(self, certificate_pem, status, region_name):
def __init__(self, certificate_pem, status, region_name, ca_certificate_pem=None):
m = hashlib.sha256()
m.update(str(uuid.uuid4()).encode('utf-8'))
self.certificate_id = m.hexdigest()
@ -110,12 +110,18 @@ class FakeCertificate(BaseModel):
self.transfer_data = {}
self.creation_date = time.time()
self.last_modified_date = self.creation_date
self.ca_certificate_id = None
self.ca_certificate_pem = ca_certificate_pem
if ca_certificate_pem:
m.update(str(uuid.uuid4()).encode('utf-8'))
self.ca_certificate_id = m.hexdigest()
def to_dict(self):
return {
'certificateArn': self.arn,
'certificateId': self.certificate_id,
'caCertificateId': self.ca_certificate_id,
'status': self.status,
'creationDate': self.creation_date
}
@ -509,6 +515,12 @@ class IoTBackend(BaseBackend):
def list_certificates(self):
return self.certificates.values()
def register_certificate(self, certificate_pem, ca_certificate_pem, set_as_active, status):
certificate = FakeCertificate(certificate_pem, 'ACTIVE' if set_as_active else status,
self.region_name, ca_certificate_pem)
self.certificates[certificate.certificate_id] = certificate
return certificate
def update_certificate(self, certificate_id, new_status):
cert = self.describe_certificate(certificate_id)
# TODO: validate new_status

View File

@ -296,6 +296,20 @@ class IoTResponse(BaseResponse):
# TODO: implement pagination in the future
return json.dumps(dict(certificates=[_.to_dict() for _ in certificates]))
def register_certificate(self):
certificate_pem = self._get_param("certificatePem")
ca_certificate_pem = self._get_param("caCertificatePem")
set_as_active = self._get_bool_param("setAsActive")
status = self._get_param("status")
cert = self.iot_backend.register_certificate(
certificate_pem=certificate_pem,
ca_certificate_pem=ca_certificate_pem,
set_as_active=set_as_active,
status=status
)
return json.dumps(dict(certificateId=cert.certificate_id, certificateArn=cert.arn))
def update_certificate(self):
certificate_id = self._get_param("certificateId")
new_status = self._get_param("newStatus")

View File

@ -116,10 +116,12 @@ class Stream(BaseModel):
def __init__(self, stream_name, shard_count, region):
self.stream_name = stream_name
self.shard_count = shard_count
self.creation_datetime = datetime.datetime.now()
self.region = region
self.account_number = "123456789012"
self.shards = {}
self.tags = {}
self.status = "ACTIVE"
if six.PY3:
izip_longest = itertools.zip_longest
@ -183,12 +185,23 @@ class Stream(BaseModel):
"StreamDescription": {
"StreamARN": self.arn,
"StreamName": self.stream_name,
"StreamStatus": "ACTIVE",
"StreamStatus": self.status,
"HasMoreShards": False,
"Shards": [shard.to_json() for shard in self.shards.values()],
}
}
def to_json_summary(self):
return {
"StreamDescriptionSummary": {
"StreamARN": self.arn,
"StreamName": self.stream_name,
"StreamStatus": self.status,
"StreamCreationTimestamp": six.text_type(self.creation_datetime),
"OpenShardCount": self.shard_count,
}
}
@classmethod
def create_from_cloudformation_json(cls, resource_name, cloudformation_json, region_name):
properties = cloudformation_json['Properties']
@ -309,6 +322,9 @@ class KinesisBackend(BaseBackend):
else:
raise StreamNotFoundError(stream_name)
def describe_stream_summary(self, stream_name):
return self.describe_stream(stream_name)
def list_streams(self):
return self.streams.values()

View File

@ -33,6 +33,11 @@ class KinesisResponse(BaseResponse):
stream = self.kinesis_backend.describe_stream(stream_name)
return json.dumps(stream.to_json())
def describe_stream_summary(self):
stream_name = self.parameters.get('StreamName')
stream = self.kinesis_backend.describe_stream_summary(stream_name)
return json.dumps(stream.to_json_summary())
def list_streams(self):
streams = self.kinesis_backend.list_streams()
stream_names = [stream.stream_name for stream in streams]

View File

@ -1,8 +1,19 @@
import sys
import base64
from .exceptions import InvalidArgumentError
if sys.version_info[0] == 2:
encode_method = base64.encodestring
decode_method = base64.decodestring
elif sys.version_info[0] == 3:
encode_method = base64.encodebytes
decode_method = base64.decodebytes
else:
raise Exception("Python version is not supported")
def compose_new_shard_iterator(stream_name, shard, shard_iterator_type, starting_sequence_number,
at_timestamp):
if shard_iterator_type == "AT_SEQUENCE_NUMBER":
@ -22,7 +33,7 @@ def compose_new_shard_iterator(stream_name, shard, shard_iterator_type, starting
def compose_shard_iterator(stream_name, shard, last_sequence_id):
return base64.encodestring(
return encode_method(
"{0}:{1}:{2}".format(
stream_name,
shard.shard_id,
@ -32,4 +43,4 @@ def compose_shard_iterator(stream_name, shard, last_sequence_id):
def decompose_shard_iterator(shard_iterator):
return base64.decodestring(shard_iterator.encode("utf-8")).decode("utf-8").split(":")
return decode_method(shard_iterator.encode("utf-8")).decode("utf-8").split(":")

View File

@ -3,7 +3,7 @@ from __future__ import unicode_literals
import os
import boto.kms
from moto.core import BaseBackend, BaseModel
from moto.core.utils import iso_8601_datetime_without_milliseconds
from moto.core.utils import iso_8601_datetime_without_milliseconds, unix_time
from .utils import generate_key_id
from collections import defaultdict
from datetime import datetime, timedelta
@ -37,7 +37,7 @@ class Key(BaseModel):
"KeyMetadata": {
"AWSAccountId": self.account_id,
"Arn": self.arn,
"CreationDate": datetime.strftime(datetime.utcnow(), "%s"),
"CreationDate": "%d" % unix_time(),
"Description": self.description,
"Enabled": self.enabled,
"KeyId": self.id,

View File

@ -137,6 +137,7 @@ class LogGroup:
self.creationTime = unix_time_millis()
self.tags = tags
self.streams = dict() # {name: LogStream}
self.retentionInDays = None # AWS defaults to Never Expire for log group retention
def create_log_stream(self, log_stream_name):
if log_stream_name in self.streams:
@ -201,14 +202,20 @@ class LogGroup:
return events_page, next_token, searched_streams
def to_describe_dict(self):
return {
log_group = {
"arn": self.arn,
"creationTime": self.creationTime,
"logGroupName": self.name,
"metricFilterCount": 0,
"retentionInDays": 30,
"storedBytes": sum(s.storedBytes for s in self.streams.values()),
}
# AWS only returns retentionInDays if a value is set for the log group (ie. not Never Expire)
if self.retentionInDays:
log_group["retentionInDays"] = self.retentionInDays
return log_group
def set_retention_policy(self, retention_in_days):
self.retentionInDays = retention_in_days
class LogsBackend(BaseBackend):
@ -289,5 +296,17 @@ class LogsBackend(BaseBackend):
log_group = self.groups[log_group_name]
return log_group.filter_log_events(log_group_name, log_stream_names, start_time, end_time, limit, next_token, filter_pattern, interleaved)
def put_retention_policy(self, log_group_name, retention_in_days):
if log_group_name not in self.groups:
raise ResourceNotFoundException()
log_group = self.groups[log_group_name]
return log_group.set_retention_policy(retention_in_days)
def delete_retention_policy(self, log_group_name):
if log_group_name not in self.groups:
raise ResourceNotFoundException()
log_group = self.groups[log_group_name]
return log_group.set_retention_policy(None)
logs_backends = {region.name: LogsBackend(region.name) for region in boto.logs.regions()}

View File

@ -123,3 +123,14 @@ class LogsResponse(BaseResponse):
"nextToken": next_token,
"searchedLogStreams": searched_streams
})
def put_retention_policy(self):
log_group_name = self._get_param('logGroupName')
retention_in_days = self._get_param('retentionInDays')
self.logs_backend.put_retention_policy(log_group_name, retention_in_days)
return ''
def delete_retention_policy(self):
log_group_name = self._get_param('logGroupName')
self.logs_backend.delete_retention_policy(log_group_name)
return ''

View File

@ -47,6 +47,7 @@ class FakeOrganization(BaseModel):
class FakeAccount(BaseModel):
def __init__(self, organization, **kwargs):
self.type = 'ACCOUNT'
self.organization_id = organization.id
self.master_account_id = organization.master_account_id
self.create_account_status_id = utils.make_random_create_account_status_id()
@ -57,6 +58,7 @@ class FakeAccount(BaseModel):
self.status = 'ACTIVE'
self.joined_method = 'CREATED'
self.parent_id = organization.root_id
self.attached_policies = []
@property
def arn(self):
@ -103,6 +105,7 @@ class FakeOrganizationalUnit(BaseModel):
self.name = kwargs.get('Name')
self.parent_id = kwargs.get('ParentId')
self._arn_format = utils.OU_ARN_FORMAT
self.attached_policies = []
@property
def arn(self):
@ -134,6 +137,7 @@ class FakeRoot(FakeOrganizationalUnit):
'Status': 'ENABLED'
}]
self._arn_format = utils.ROOT_ARN_FORMAT
self.attached_policies = []
def describe(self):
return {
@ -144,12 +148,52 @@ class FakeRoot(FakeOrganizationalUnit):
}
class FakeServiceControlPolicy(BaseModel):
def __init__(self, organization, **kwargs):
self.type = 'POLICY'
self.content = kwargs.get('Content')
self.description = kwargs.get('Description')
self.name = kwargs.get('Name')
self.type = kwargs.get('Type')
self.id = utils.make_random_service_control_policy_id()
self.aws_managed = False
self.organization_id = organization.id
self.master_account_id = organization.master_account_id
self._arn_format = utils.SCP_ARN_FORMAT
self.attachments = []
@property
def arn(self):
return self._arn_format.format(
self.master_account_id,
self.organization_id,
self.id
)
def describe(self):
return {
'Policy': {
'PolicySummary': {
'Id': self.id,
'Arn': self.arn,
'Name': self.name,
'Description': self.description,
'Type': self.type,
'AwsManaged': self.aws_managed,
},
'Content': self.content
}
}
class OrganizationsBackend(BaseBackend):
def __init__(self):
self.org = None
self.accounts = []
self.ou = []
self.policies = []
def create_organization(self, **kwargs):
self.org = FakeOrganization(kwargs['FeatureSet'])
@ -292,5 +336,108 @@ class OrganizationsBackend(BaseBackend):
]
)
def create_policy(self, **kwargs):
new_policy = FakeServiceControlPolicy(self.org, **kwargs)
self.policies.append(new_policy)
return new_policy.describe()
def describe_policy(self, **kwargs):
if re.compile(utils.SCP_ID_REGEX).match(kwargs['PolicyId']):
policy = next((p for p in self.policies if p.id == kwargs['PolicyId']), None)
if policy is None:
raise RESTError(
'PolicyNotFoundException',
"You specified a policy that doesn't exist."
)
else:
raise RESTError(
'InvalidInputException',
'You specified an invalid value.'
)
return policy.describe()
def attach_policy(self, **kwargs):
policy = next((p for p in self.policies if p.id == kwargs['PolicyId']), None)
if (re.compile(utils.ROOT_ID_REGEX).match(kwargs['TargetId']) or
re.compile(utils.OU_ID_REGEX).match(kwargs['TargetId'])):
ou = next((ou for ou in self.ou if ou.id == kwargs['TargetId']), None)
if ou is not None:
if ou not in ou.attached_policies:
ou.attached_policies.append(policy)
policy.attachments.append(ou)
else:
raise RESTError(
'OrganizationalUnitNotFoundException',
"You specified an organizational unit that doesn't exist."
)
elif re.compile(utils.ACCOUNT_ID_REGEX).match(kwargs['TargetId']):
account = next((a for a in self.accounts if a.id == kwargs['TargetId']), None)
if account is not None:
if account not in account.attached_policies:
account.attached_policies.append(policy)
policy.attachments.append(account)
else:
raise RESTError(
'AccountNotFoundException',
"You specified an account that doesn't exist."
)
else:
raise RESTError(
'InvalidInputException',
'You specified an invalid value.'
)
def list_policies(self, **kwargs):
return dict(Policies=[
p.describe()['Policy']['PolicySummary'] for p in self.policies
])
def list_policies_for_target(self, **kwargs):
if re.compile(utils.OU_ID_REGEX).match(kwargs['TargetId']):
obj = next((ou for ou in self.ou if ou.id == kwargs['TargetId']), None)
if obj is None:
raise RESTError(
'OrganizationalUnitNotFoundException',
"You specified an organizational unit that doesn't exist."
)
elif re.compile(utils.ACCOUNT_ID_REGEX).match(kwargs['TargetId']):
obj = next((a for a in self.accounts if a.id == kwargs['TargetId']), None)
if obj is None:
raise RESTError(
'AccountNotFoundException',
"You specified an account that doesn't exist."
)
else:
raise RESTError(
'InvalidInputException',
'You specified an invalid value.'
)
return dict(Policies=[
p.describe()['Policy']['PolicySummary'] for p in obj.attached_policies
])
def list_targets_for_policy(self, **kwargs):
if re.compile(utils.SCP_ID_REGEX).match(kwargs['PolicyId']):
policy = next((p for p in self.policies if p.id == kwargs['PolicyId']), None)
if policy is None:
raise RESTError(
'PolicyNotFoundException',
"You specified a policy that doesn't exist."
)
else:
raise RESTError(
'InvalidInputException',
'You specified an invalid value.'
)
objects = [
{
'TargetId': obj.id,
'Arn': obj.arn,
'Name': obj.name,
'Type': obj.type,
} for obj in policy.attachments
]
return dict(Targets=objects)
organizations_backend = OrganizationsBackend()

View File

@ -85,3 +85,33 @@ class OrganizationsResponse(BaseResponse):
return json.dumps(
self.organizations_backend.list_children(**self.request_params)
)
def create_policy(self):
return json.dumps(
self.organizations_backend.create_policy(**self.request_params)
)
def describe_policy(self):
return json.dumps(
self.organizations_backend.describe_policy(**self.request_params)
)
def attach_policy(self):
return json.dumps(
self.organizations_backend.attach_policy(**self.request_params)
)
def list_policies(self):
return json.dumps(
self.organizations_backend.list_policies(**self.request_params)
)
def list_policies_for_target(self):
return json.dumps(
self.organizations_backend.list_policies_for_target(**self.request_params)
)
def list_targets_for_policy(self):
return json.dumps(
self.organizations_backend.list_targets_for_policy(**self.request_params)
)

View File

@ -10,6 +10,7 @@ MASTER_ACCOUNT_ARN_FORMAT = 'arn:aws:organizations::{0}:account/{1}/{0}'
ACCOUNT_ARN_FORMAT = 'arn:aws:organizations::{0}:account/{1}/{2}'
ROOT_ARN_FORMAT = 'arn:aws:organizations::{0}:root/{1}/{2}'
OU_ARN_FORMAT = 'arn:aws:organizations::{0}:ou/{1}/{2}'
SCP_ARN_FORMAT = 'arn:aws:organizations::{0}:policy/{1}/service_control_policy/{2}'
CHARSET = string.ascii_lowercase + string.digits
ORG_ID_SIZE = 10
@ -17,6 +18,15 @@ ROOT_ID_SIZE = 4
ACCOUNT_ID_SIZE = 12
OU_ID_SUFFIX_SIZE = 8
CREATE_ACCOUNT_STATUS_ID_SIZE = 8
SCP_ID_SIZE = 8
EMAIL_REGEX = "^.+@[a-zA-Z0-9-.]+.[a-zA-Z]{2,3}|[0-9]{1,3}$"
ORG_ID_REGEX = r'o-[a-z0-9]{%s}' % ORG_ID_SIZE
ROOT_ID_REGEX = r'r-[a-z0-9]{%s}' % ROOT_ID_SIZE
OU_ID_REGEX = r'ou-[a-z0-9]{%s}-[a-z0-9]{%s}' % (ROOT_ID_SIZE, OU_ID_SUFFIX_SIZE)
ACCOUNT_ID_REGEX = r'[0-9]{%s}' % ACCOUNT_ID_SIZE
CREATE_ACCOUNT_STATUS_ID_REGEX = r'car-[a-z0-9]{%s}' % CREATE_ACCOUNT_STATUS_ID_SIZE
SCP_ID_REGEX = r'p-[a-z0-9]{%s}' % SCP_ID_SIZE
def make_random_org_id():
@ -57,3 +67,10 @@ def make_random_create_account_status_id():
# "car-" followed by from 8 to 32 lower-case letters or digits.
# e.g. 'car-35gxzwrp'
return 'car-' + ''.join(random.choice(CHARSET) for x in range(CREATE_ACCOUNT_STATUS_ID_SIZE))
def make_random_service_control_policy_id():
# The regex pattern for a policy ID string requires "p-" followed by
# from 8 to 128 lower-case letters or digits.
# e.g. 'p-k2av4a8a'
return 'p-' + ''.join(random.choice(CHARSET) for x in range(SCP_ID_SIZE))

View File

@ -531,14 +531,37 @@ class RedshiftBackend(BaseBackend):
setattr(cluster, key, value)
if new_cluster_identifier:
self.delete_cluster(cluster_identifier)
dic = {
"cluster_identifier": cluster_identifier,
"skip_final_snapshot": True,
"final_cluster_snapshot_identifier": None
}
self.delete_cluster(**dic)
cluster.cluster_identifier = new_cluster_identifier
self.clusters[new_cluster_identifier] = cluster
return cluster
def delete_cluster(self, cluster_identifier):
def delete_cluster(self, **cluster_kwargs):
cluster_identifier = cluster_kwargs.pop("cluster_identifier")
cluster_skip_final_snapshot = cluster_kwargs.pop("skip_final_snapshot")
cluster_snapshot_identifer = cluster_kwargs.pop("final_cluster_snapshot_identifier")
if cluster_identifier in self.clusters:
if cluster_skip_final_snapshot is False and cluster_snapshot_identifer is None:
raise ClientError(
"InvalidParameterValue",
'FinalSnapshotIdentifier is required for Snapshot copy '
'when SkipFinalSnapshot is False'
)
elif cluster_skip_final_snapshot is False and cluster_snapshot_identifer is not None: # create snapshot
cluster = self.describe_clusters(cluster_identifier)[0]
self.create_cluster_snapshot(
cluster_identifier,
cluster_snapshot_identifer,
cluster.region,
cluster.tags)
return self.clusters.pop(cluster_identifier)
raise ClusterNotFoundError(cluster_identifier)
@ -617,9 +640,12 @@ class RedshiftBackend(BaseBackend):
def describe_cluster_snapshots(self, cluster_identifier=None, snapshot_identifier=None):
if cluster_identifier:
cluster_snapshots = []
for snapshot in self.snapshots.values():
if snapshot.cluster.cluster_identifier == cluster_identifier:
return [snapshot]
cluster_snapshots.append(snapshot)
if cluster_snapshots:
return cluster_snapshots
raise ClusterNotFoundError(cluster_identifier)
if snapshot_identifier:

View File

@ -240,8 +240,13 @@ class RedshiftResponse(BaseResponse):
})
def delete_cluster(self):
cluster_identifier = self._get_param("ClusterIdentifier")
cluster = self.redshift_backend.delete_cluster(cluster_identifier)
request_kwargs = {
"cluster_identifier": self._get_param("ClusterIdentifier"),
"final_cluster_snapshot_identifier": self._get_param("FinalClusterSnapshotIdentifier"),
"skip_final_snapshot": self._get_bool_param("SkipFinalClusterSnapshot")
}
cluster = self.redshift_backend.delete_cluster(**request_kwargs)
return self.get_response({
"DeleteClusterResponse": {

View File

@ -0,0 +1,6 @@
from __future__ import unicode_literals
from .models import resourcegroups_backends
from ..core.models import base_decorator
resourcegroups_backend = resourcegroups_backends['us-east-1']
mock_resourcegroups = base_decorator(resourcegroups_backends)

View File

@ -0,0 +1,13 @@
from __future__ import unicode_literals
import json
from werkzeug.exceptions import HTTPException
class BadRequestException(HTTPException):
code = 400
def __init__(self, message, **kwargs):
super(BadRequestException, self).__init__(
description=json.dumps({"Message": message, "Code": "BadRequestException"}), **kwargs
)

View File

@ -0,0 +1,338 @@
from __future__ import unicode_literals
from builtins import str
import boto3
import json
import re
from moto.core import BaseBackend, BaseModel
from .exceptions import BadRequestException
class FakeResourceGroup(BaseModel):
def __init__(self, name, resource_query, description=None, tags=None):
self.errors = []
description = description or ""
tags = tags or {}
if self._validate_description(value=description):
self._description = description
if self._validate_name(value=name):
self._name = name
if self._validate_resource_query(value=resource_query):
self._resource_query = resource_query
if self._validate_tags(value=tags):
self._tags = tags
self._raise_errors()
self.arn = "arn:aws:resource-groups:us-west-1:123456789012:{name}".format(name=name)
@staticmethod
def _format_error(key, value, constraint):
return "Value '{value}' at '{key}' failed to satisfy constraint: {constraint}".format(
constraint=constraint,
key=key,
value=value,
)
def _raise_errors(self):
if self.errors:
errors_len = len(self.errors)
plural = "s" if len(self.errors) > 1 else ""
errors = "; ".join(self.errors)
raise BadRequestException("{errors_len} validation error{plural} detected: {errors}".format(
errors_len=errors_len, plural=plural, errors=errors,
))
def _validate_description(self, value):
errors = []
if len(value) > 511:
errors.append(self._format_error(
key="description",
value=value,
constraint="Member must have length less than or equal to 512",
))
if not re.match(r"^[\sa-zA-Z0-9_.-]*$", value):
errors.append(self._format_error(
key="name",
value=value,
constraint=r"Member must satisfy regular expression pattern: [\sa-zA-Z0-9_\.-]*",
))
if errors:
self.errors += errors
return False
return True
def _validate_name(self, value):
errors = []
if len(value) > 128:
errors.append(self._format_error(
key="name",
value=value,
constraint="Member must have length less than or equal to 128",
))
# Note \ is a character to match not an escape.
if not re.match(r"^[a-zA-Z0-9_\\.-]+$", value):
errors.append(self._format_error(
key="name",
value=value,
constraint=r"Member must satisfy regular expression pattern: [a-zA-Z0-9_\.-]+",
))
if errors:
self.errors += errors
return False
return True
def _validate_resource_query(self, value):
errors = []
if value["Type"] not in {"CLOUDFORMATION_STACK_1_0", "TAG_FILTERS_1_0"}:
errors.append(self._format_error(
key="resourceQuery.type",
value=value,
constraint="Member must satisfy enum value set: [CLOUDFORMATION_STACK_1_0, TAG_FILTERS_1_0]",
))
if len(value["Query"]) > 2048:
errors.append(self._format_error(
key="resourceQuery.query",
value=value,
constraint="Member must have length less than or equal to 2048",
))
if errors:
self.errors += errors
return False
return True
def _validate_tags(self, value):
errors = []
# AWS only outputs one error for all keys and one for all values.
error_keys = None
error_values = None
regex = re.compile(r"^([\\p{L}\\p{Z}\\p{N}_.:/=+\-@]*)$")
for tag_key, tag_value in value.items():
# Validation for len(tag_key) >= 1 is done by botocore.
if len(tag_key) > 128 or re.match(regex, tag_key):
error_keys = self._format_error(
key="tags",
value=value,
constraint=(
"Map value must satisfy constraint: ["
"Member must have length less than or equal to 128, "
"Member must have length greater than or equal to 1, "
r"Member must satisfy regular expression pattern: ^([\p{L}\p{Z}\p{N}_.:/=+\-@]*)$"
"]"
),
)
# Validation for len(tag_value) >= 0 is nonsensical.
if len(tag_value) > 256 or re.match(regex, tag_key):
error_values = self._format_error(
key="tags",
value=value,
constraint=(
"Map value must satisfy constraint: ["
"Member must have length less than or equal to 256, "
"Member must have length greater than or equal to 0, "
r"Member must satisfy regular expression pattern: ^([\p{L}\p{Z}\p{N}_.:/=+\-@]*)$"
"]"
),
)
if error_keys:
errors.append(error_keys)
if error_values:
errors.append(error_values)
if errors:
self.errors += errors
return False
return True
@property
def description(self):
return self._description
@description.setter
def description(self, value):
if not self._validate_description(value=value):
self._raise_errors()
self._description = value
@property
def name(self):
return self._name
@name.setter
def name(self, value):
if not self._validate_name(value=value):
self._raise_errors()
self._name = value
@property
def resource_query(self):
return self._resource_query
@resource_query.setter
def resource_query(self, value):
if not self._validate_resource_query(value=value):
self._raise_errors()
self._resource_query = value
@property
def tags(self):
return self._tags
@tags.setter
def tags(self, value):
if not self._validate_tags(value=value):
self._raise_errors()
self._tags = value
class ResourceGroups():
def __init__(self):
self.by_name = {}
self.by_arn = {}
def __contains__(self, item):
return item in self.by_name
def append(self, resource_group):
self.by_name[resource_group.name] = resource_group
self.by_arn[resource_group.arn] = resource_group
def delete(self, name):
group = self.by_name[name]
del self.by_name[name]
del self.by_arn[group.arn]
return group
class ResourceGroupsBackend(BaseBackend):
def __init__(self, region_name=None):
super(ResourceGroupsBackend, self).__init__()
self.region_name = region_name
self.groups = ResourceGroups()
@staticmethod
def _validate_resource_query(resource_query):
type = resource_query["Type"]
query = json.loads(resource_query["Query"])
query_keys = set(query.keys())
invalid_json_exception = BadRequestException("Invalid query: Invalid query format: check JSON syntax")
if not isinstance(query["ResourceTypeFilters"], list):
raise invalid_json_exception
if type == "CLOUDFORMATION_STACK_1_0":
if query_keys != {"ResourceTypeFilters", "StackIdentifier"}:
raise invalid_json_exception
stack_identifier = query["StackIdentifier"]
if not isinstance(stack_identifier, str):
raise invalid_json_exception
if not re.match(
r"^arn:aws:cloudformation:[a-z]{2}-[a-z]+-[0-9]+:[0-9]+:stack/[-0-9A-z]+/[-0-9a-f]+$",
stack_identifier,
):
raise BadRequestException(
"Invalid query: Verify that the specified ARN is formatted correctly."
)
# Once checking other resources is implemented.
# if stack_identifier not in self.cloudformation_backend.stacks:
# raise BadRequestException("Invalid query: The specified CloudFormation stack doesn't exist.")
if type == "TAG_FILTERS_1_0":
if query_keys != {"ResourceTypeFilters", "TagFilters"}:
raise invalid_json_exception
tag_filters = query["TagFilters"]
if not isinstance(tag_filters, list):
raise invalid_json_exception
if not tag_filters or len(tag_filters) > 50:
raise BadRequestException(
"Invalid query: The TagFilters list must contain between 1 and 50 elements"
)
for tag_filter in tag_filters:
if not isinstance(tag_filter, dict):
raise invalid_json_exception
if set(tag_filter.keys()) != {"Key", "Values"}:
raise invalid_json_exception
key = tag_filter["Key"]
if not isinstance(key, str):
raise invalid_json_exception
if not key:
raise BadRequestException(
"Invalid query: The TagFilter element cannot have empty or null Key field"
)
if len(key) > 128:
raise BadRequestException("Invalid query: The maximum length for a tag Key is 128")
values = tag_filter["Values"]
if not isinstance(values, list):
raise invalid_json_exception
if len(values) > 20:
raise BadRequestException(
"Invalid query: The TagFilter Values list must contain between 0 and 20 elements"
)
for value in values:
if not isinstance(value, str):
raise invalid_json_exception
if len(value) > 256:
raise BadRequestException(
"Invalid query: The maximum length for a tag Value is 256"
)
@staticmethod
def _validate_tags(tags):
for tag in tags:
if tag.lower().startswith('aws:'):
raise BadRequestException("Tag keys must not start with 'aws:'")
def create_group(self, name, resource_query, description=None, tags=None):
tags = tags or {}
group = FakeResourceGroup(
name=name,
resource_query=resource_query,
description=description,
tags=tags,
)
if name in self.groups:
raise BadRequestException("Cannot create group: group already exists")
if name.upper().startswith("AWS"):
raise BadRequestException("Group name must not start with 'AWS'")
self._validate_tags(tags)
self._validate_resource_query(resource_query)
self.groups.append(group)
return group
def delete_group(self, group_name):
return self.groups.delete(name=group_name)
def get_group(self, group_name):
return self.groups.by_name[group_name]
def get_tags(self, arn):
return self.groups.by_arn[arn].tags
# def list_group_resources(self):
# ...
def list_groups(self, filters=None, max_results=None, next_token=None):
return self.groups.by_name
# def search_resources(self):
# ...
def tag(self, arn, tags):
all_tags = self.groups.by_arn[arn].tags
all_tags.update(tags)
self._validate_tags(all_tags)
self.groups.by_arn[arn].tags = all_tags
def untag(self, arn, keys):
group = self.groups.by_arn[arn]
for key in keys:
del group.tags[key]
def update_group(self, group_name, description=None):
if description:
self.groups.by_name[group_name].description = description
return self.groups.by_name[group_name]
def update_group_query(self, group_name, resource_query):
self._validate_resource_query(resource_query)
self.groups.by_name[group_name].resource_query = resource_query
return self.groups.by_name[group_name]
available_regions = boto3.session.Session().get_available_regions("resource-groups")
resourcegroups_backends = {region: ResourceGroupsBackend(region_name=region) for region in available_regions}

View File

@ -0,0 +1,162 @@
from __future__ import unicode_literals
import json
try:
from urllib import unquote
except ImportError:
from urllib.parse import unquote
from moto.core.responses import BaseResponse
from .models import resourcegroups_backends
class ResourceGroupsResponse(BaseResponse):
SERVICE_NAME = 'resource-groups'
@property
def resourcegroups_backend(self):
return resourcegroups_backends[self.region]
def create_group(self):
name = self._get_param("Name")
description = self._get_param("Description")
resource_query = self._get_param("ResourceQuery")
tags = self._get_param("Tags")
group = self.resourcegroups_backend.create_group(
name=name,
description=description,
resource_query=resource_query,
tags=tags,
)
return json.dumps({
"Group": {
"GroupArn": group.arn,
"Name": group.name,
"Description": group.description
},
"ResourceQuery": group.resource_query,
"Tags": group.tags
})
def delete_group(self):
group_name = self._get_param("GroupName")
group = self.resourcegroups_backend.delete_group(group_name=group_name)
return json.dumps({
"Group": {
"GroupArn": group.arn,
"Name": group.name,
"Description": group.description
},
})
def get_group(self):
group_name = self._get_param("GroupName")
group = self.resourcegroups_backend.get_group(group_name=group_name)
return json.dumps({
"Group": {
"GroupArn": group.arn,
"Name": group.name,
"Description": group.description,
}
})
def get_group_query(self):
group_name = self._get_param("GroupName")
group = self.resourcegroups_backend.get_group(group_name=group_name)
return json.dumps({
"GroupQuery": {
"GroupName": group.name,
"ResourceQuery": group.resource_query,
}
})
def get_tags(self):
arn = unquote(self._get_param("Arn"))
return json.dumps({
"Arn": arn,
"Tags": self.resourcegroups_backend.get_tags(arn=arn)
})
def list_group_resources(self):
raise NotImplementedError('ResourceGroups.list_group_resources is not yet implemented')
def list_groups(self):
filters = self._get_param("Filters")
if filters:
raise NotImplementedError(
'ResourceGroups.list_groups with filter parameter is not yet implemented'
)
max_results = self._get_int_param("MaxResults", 50)
next_token = self._get_param("NextToken")
groups = self.resourcegroups_backend.list_groups(
filters=filters,
max_results=max_results,
next_token=next_token
)
return json.dumps({
"GroupIdentifiers": [{
"GroupName": group.name,
"GroupArn": group.arn,
} for group in groups.values()],
"Groups": [{
"GroupArn": group.arn,
"Name": group.name,
"Description": group.description,
} for group in groups.values()],
"NextToken": next_token,
})
def search_resources(self):
raise NotImplementedError('ResourceGroups.search_resources is not yet implemented')
def tag(self):
arn = unquote(self._get_param("Arn"))
tags = self._get_param("Tags")
if arn not in self.resourcegroups_backend.groups.by_arn:
raise NotImplementedError(
'ResourceGroups.tag with non-resource-group Arn parameter is not yet implemented'
)
self.resourcegroups_backend.tag(arn=arn, tags=tags)
return json.dumps({
"Arn": arn,
"Tags": tags
})
def untag(self):
arn = unquote(self._get_param("Arn"))
keys = self._get_param("Keys")
if arn not in self.resourcegroups_backend.groups.by_arn:
raise NotImplementedError(
'ResourceGroups.untag with non-resource-group Arn parameter is not yet implemented'
)
self.resourcegroups_backend.untag(arn=arn, keys=keys)
return json.dumps({
"Arn": arn,
"Keys": keys
})
def update_group(self):
group_name = self._get_param("GroupName")
description = self._get_param("Description", "")
group = self.resourcegroups_backend.update_group(group_name=group_name, description=description)
return json.dumps({
"Group": {
"GroupArn": group.arn,
"Name": group.name,
"Description": group.description
},
})
def update_group_query(self):
group_name = self._get_param("GroupName")
resource_query = self._get_param("ResourceQuery")
group = self.resourcegroups_backend.update_group_query(
group_name=group_name,
resource_query=resource_query
)
return json.dumps({
"GroupQuery": {
"GroupName": group.name,
"ResourceQuery": resource_query
}
})

View File

@ -0,0 +1,14 @@
from __future__ import unicode_literals
from .responses import ResourceGroupsResponse
url_bases = [
"https?://resource-groups(-fips)?.(.+).amazonaws.com",
]
url_paths = {
'{0}/groups$': ResourceGroupsResponse.dispatch,
'{0}/groups/(?P<resource_group_name>[^/]+)$': ResourceGroupsResponse.dispatch,
'{0}/groups/(?P<resource_group_name>[^/]+)/query$': ResourceGroupsResponse.dispatch,
'{0}/groups-list$': ResourceGroupsResponse.dispatch,
'{0}/resources/(?P<resource_arn>[^/]+)/tags$': ResourceGroupsResponse.dispatch,
}

View File

@ -17,8 +17,11 @@ import six
from bisect import insort
from moto.core import BaseBackend, BaseModel
from moto.core.utils import iso_8601_datetime_with_milliseconds, rfc_1123_datetime
from .exceptions import BucketAlreadyExists, MissingBucket, InvalidBucketName, InvalidPart, \
EntityTooSmall, MissingKey, InvalidNotificationDestination, MalformedXML, InvalidStorageClass, DuplicateTagKeys
from .exceptions import (
BucketAlreadyExists, MissingBucket, InvalidBucketName, InvalidPart, InvalidRequest,
EntityTooSmall, MissingKey, InvalidNotificationDestination, MalformedXML, InvalidStorageClass,
InvalidTargetBucketForLogging, DuplicateTagKeys, CrossLocationLoggingProhibitted
)
from .utils import clean_key_name, _VersionedKeyStore
MAX_BUCKET_NAME_LENGTH = 63
@ -463,6 +466,7 @@ class FakeBucket(BaseModel):
self.cors = []
self.logging = {}
self.notification_configuration = None
self.accelerate_configuration = None
@property
def location(self):
@ -557,7 +561,6 @@ class FakeBucket(BaseModel):
self.rules = []
def set_cors(self, rules):
from moto.s3.exceptions import InvalidRequest, MalformedXML
self.cors = []
if len(rules) > 100:
@ -607,7 +610,6 @@ class FakeBucket(BaseModel):
self.logging = {}
return
from moto.s3.exceptions import InvalidTargetBucketForLogging, CrossLocationLoggingProhibitted
# Target bucket must exist in the same account (assuming all moto buckets are in the same account):
if not bucket_backend.buckets.get(logging_config["TargetBucket"]):
raise InvalidTargetBucketForLogging("The target bucket for logging does not exist.")
@ -655,6 +657,13 @@ class FakeBucket(BaseModel):
if region != self.region_name:
raise InvalidNotificationDestination()
def set_accelerate_configuration(self, accelerate_config):
if self.accelerate_configuration is None and accelerate_config == 'Suspended':
# Cannot "suspend" a not active acceleration. Leaves it undefined
return
self.accelerate_configuration = accelerate_config
def set_website_configuration(self, website_configuration):
self.website_configuration = website_configuration
@ -857,6 +866,15 @@ class S3Backend(BaseBackend):
bucket = self.get_bucket(bucket_name)
bucket.set_notification_configuration(notification_config)
def put_bucket_accelerate_configuration(self, bucket_name, accelerate_configuration):
if accelerate_configuration not in ['Enabled', 'Suspended']:
raise MalformedXML()
bucket = self.get_bucket(bucket_name)
if bucket.name.find('.') != -1:
raise InvalidRequest('PutBucketAccelerateConfiguration')
bucket.set_accelerate_configuration(accelerate_configuration)
def initiate_multipart(self, bucket_name, key_name, metadata):
bucket = self.get_bucket(bucket_name)
new_multipart = FakeMultipart(key_name, metadata)
@ -894,12 +912,11 @@ class S3Backend(BaseBackend):
return multipart.set_part(part_id, value)
def copy_part(self, dest_bucket_name, multipart_id, part_id,
src_bucket_name, src_key_name, start_byte, end_byte):
src_key_name = clean_key_name(src_key_name)
src_bucket = self.get_bucket(src_bucket_name)
src_bucket_name, src_key_name, src_version_id, start_byte, end_byte):
dest_bucket = self.get_bucket(dest_bucket_name)
multipart = dest_bucket.multiparts[multipart_id]
src_value = src_bucket.keys[src_key_name].value
src_value = self.get_key(src_bucket_name, src_key_name, version_id=src_version_id).value
if start_byte is not None:
src_value = src_value[start_byte:end_byte + 1]
return multipart.set_part(part_id, src_value)

52
moto/s3/responses.py Executable file → Normal file
View File

@ -257,6 +257,13 @@ class ResponseObject(_TemplateEnvironmentMixin):
return 200, {}, ""
template = self.response_template(S3_GET_BUCKET_NOTIFICATION_CONFIG)
return template.render(bucket=bucket)
elif "accelerate" in querystring:
bucket = self.backend.get_bucket(bucket_name)
if bucket.accelerate_configuration is None:
template = self.response_template(S3_BUCKET_ACCELERATE_NOT_SET)
return 200, {}, template.render()
template = self.response_template(S3_BUCKET_ACCELERATE)
return template.render(bucket=bucket)
elif 'versions' in querystring:
delimiter = querystring.get('delimiter', [None])[0]
@ -442,6 +449,15 @@ class ResponseObject(_TemplateEnvironmentMixin):
raise MalformedXML()
except Exception as e:
raise e
elif "accelerate" in querystring:
try:
accelerate_status = self._accelerate_config_from_xml(body)
self.backend.put_bucket_accelerate_configuration(bucket_name, accelerate_status)
return ""
except KeyError:
raise MalformedXML()
except Exception as e:
raise e
else:
if body:
@ -691,6 +707,8 @@ class ResponseObject(_TemplateEnvironmentMixin):
if 'x-amz-copy-source' in request.headers:
src = unquote(request.headers.get("x-amz-copy-source")).lstrip("/")
src_bucket, src_key = src.split("/", 1)
src_key, src_version_id = src_key.split("?versionId=") if "?versionId=" in src_key else (src_key, None)
src_range = request.headers.get(
'x-amz-copy-source-range', '').split("bytes=")[-1]
@ -700,9 +718,13 @@ class ResponseObject(_TemplateEnvironmentMixin):
except ValueError:
start_byte, end_byte = None, None
key = self.backend.copy_part(
bucket_name, upload_id, part_number, src_bucket,
src_key, start_byte, end_byte)
if self.backend.get_key(src_bucket, src_key, version_id=src_version_id):
key = self.backend.copy_part(
bucket_name, upload_id, part_number, src_bucket,
src_key, src_version_id, start_byte, end_byte)
else:
return 404, response_headers, ""
template = self.response_template(S3_MULTIPART_UPLOAD_RESPONSE)
response = template.render(part=key)
else:
@ -741,8 +763,13 @@ class ResponseObject(_TemplateEnvironmentMixin):
lstrip("/").split("/", 1)
src_version_id = parse_qs(src_key_parsed.query).get(
'versionId', [None])[0]
self.backend.copy_key(src_bucket, src_key, bucket_name, key_name,
storage=storage_class, acl=acl, src_version_id=src_version_id)
if self.backend.get_key(src_bucket, src_key, version_id=src_version_id):
self.backend.copy_key(src_bucket, src_key, bucket_name, key_name,
storage=storage_class, acl=acl, src_version_id=src_version_id)
else:
return 404, response_headers, ""
new_key = self.backend.get_key(bucket_name, key_name)
mdirective = request.headers.get('x-amz-metadata-directive')
if mdirective is not None and mdirective == 'REPLACE':
@ -1034,6 +1061,11 @@ class ResponseObject(_TemplateEnvironmentMixin):
return parsed_xml["NotificationConfiguration"]
def _accelerate_config_from_xml(self, xml):
parsed_xml = xmltodict.parse(xml)
config = parsed_xml['AccelerateConfiguration']
return config['Status']
def _key_response_delete(self, bucket_name, query, key_name, headers):
if query.get('uploadId'):
upload_id = query['uploadId'][0]
@ -1686,3 +1718,13 @@ S3_GET_BUCKET_NOTIFICATION_CONFIG = """<?xml version="1.0" encoding="UTF-8"?>
{% endfor %}
</NotificationConfiguration>
"""
S3_BUCKET_ACCELERATE = """
<AccelerateConfiguration xmlns="http://s3.amazonaws.com/doc/2006-03-01/">
<Status>{{ bucket.accelerate_configuration }}</Status>
</AccelerateConfiguration>
"""
S3_BUCKET_ACCELERATE_NOT_SET = """
<AccelerateConfiguration xmlns="http://s3.amazonaws.com/doc/2006-03-01/"/>
"""

View File

@ -29,6 +29,14 @@ class InvalidParameterException(SecretsManagerClientError):
message)
class ResourceExistsException(SecretsManagerClientError):
def __init__(self, message):
super(ResourceExistsException, self).__init__(
'ResourceExistsException',
message
)
class InvalidRequestException(SecretsManagerClientError):
def __init__(self, message):
super(InvalidRequestException, self).__init__(

View File

@ -11,6 +11,7 @@ from moto.core import BaseBackend, BaseModel
from .exceptions import (
ResourceNotFoundException,
InvalidParameterException,
ResourceExistsException,
InvalidRequestException,
ClientError
)
@ -47,6 +48,17 @@ class SecretsManagerBackend(BaseBackend):
if not self._is_valid_identifier(secret_id):
raise ResourceNotFoundException()
if not version_id and version_stage:
# set version_id to match version_stage
versions_dict = self.secrets[secret_id]['versions']
for ver_id, ver_val in versions_dict.items():
if version_stage in ver_val['version_stages']:
version_id = ver_id
break
if not version_id:
raise ResourceNotFoundException()
# TODO check this part
if 'deleted_date' in self.secrets[secret_id]:
raise InvalidRequestException(
"An error occurred (InvalidRequestException) when calling the GetSecretValue operation: You tried to \
@ -54,42 +66,91 @@ class SecretsManagerBackend(BaseBackend):
)
secret = self.secrets[secret_id]
version_id = version_id or secret['default_version_id']
secret_version = secret['versions'][version_id]
response = json.dumps({
"ARN": secret_arn(self.region, secret['secret_id']),
"Name": secret['name'],
"VersionId": secret['version_id'],
"SecretString": secret['secret_string'],
"VersionStages": [
"AWSCURRENT",
],
"CreatedDate": secret['createdate']
"VersionId": secret_version['version_id'],
"SecretString": secret_version['secret_string'],
"VersionStages": secret_version['version_stages'],
"CreatedDate": secret_version['createdate'],
})
return response
def create_secret(self, name, secret_string, tags, **kwargs):
generated_version_id = str(uuid.uuid4())
# error if secret exists
if name in self.secrets.keys():
raise ResourceExistsException('A resource with the ID you requested already exists.')
secret = {
'secret_string': secret_string,
'secret_id': name,
'name': name,
'createdate': int(time.time()),
'rotation_enabled': False,
'rotation_lambda_arn': '',
'auto_rotate_after_days': 0,
'version_id': generated_version_id,
'tags': tags
}
self.secrets[name] = secret
version_id = self._add_secret(name, secret_string, tags=tags)
response = json.dumps({
"ARN": secret_arn(self.region, name),
"Name": name,
"VersionId": generated_version_id,
"VersionId": version_id,
})
return response
def _add_secret(self, secret_id, secret_string, tags=[], version_id=None, version_stages=None):
if version_stages is None:
version_stages = ['AWSCURRENT']
if not version_id:
version_id = str(uuid.uuid4())
secret_version = {
'secret_string': secret_string,
'createdate': int(time.time()),
'version_id': version_id,
'version_stages': version_stages,
}
if secret_id in self.secrets:
# remove all old AWSPREVIOUS stages
for secret_verion_to_look_at in self.secrets[secret_id]['versions'].values():
if 'AWSPREVIOUS' in secret_verion_to_look_at['version_stages']:
secret_verion_to_look_at['version_stages'].remove('AWSPREVIOUS')
# set old AWSCURRENT secret to AWSPREVIOUS
previous_current_version_id = self.secrets[secret_id]['default_version_id']
self.secrets[secret_id]['versions'][previous_current_version_id]['version_stages'] = ['AWSPREVIOUS']
self.secrets[secret_id]['versions'][version_id] = secret_version
self.secrets[secret_id]['default_version_id'] = version_id
else:
self.secrets[secret_id] = {
'versions': {
version_id: secret_version
},
'default_version_id': version_id,
}
secret = self.secrets[secret_id]
secret['secret_id'] = secret_id
secret['name'] = secret_id
secret['rotation_enabled'] = False
secret['rotation_lambda_arn'] = ''
secret['auto_rotate_after_days'] = 0
secret['tags'] = tags
return version_id
def put_secret_value(self, secret_id, secret_string, version_stages):
version_id = self._add_secret(secret_id, secret_string, version_stages=version_stages)
response = json.dumps({
'ARN': secret_arn(self.region, secret_id),
'Name': secret_id,
'VersionId': version_id,
'VersionStages': version_stages
})
return response
@ -162,17 +223,24 @@ class SecretsManagerBackend(BaseBackend):
secret = self.secrets[secret_id]
secret['version_id'] = client_request_token or ''
old_secret_version = secret['versions'][secret['default_version_id']]
new_version_id = client_request_token or str(uuid.uuid4())
self._add_secret(secret_id, old_secret_version['secret_string'], secret['tags'], version_id=new_version_id, version_stages=['AWSCURRENT'])
secret['rotation_lambda_arn'] = rotation_lambda_arn or ''
if rotation_rules:
secret['auto_rotate_after_days'] = rotation_rules.get(rotation_days, 0)
if secret['auto_rotate_after_days'] > 0:
secret['rotation_enabled'] = True
if 'AWSCURRENT' in old_secret_version['version_stages']:
old_secret_version['version_stages'].remove('AWSCURRENT')
response = json.dumps({
"ARN": secret_arn(self.region, secret['secret_id']),
"Name": secret['name'],
"VersionId": secret['version_id']
"VersionId": new_version_id
})
return response
@ -206,28 +274,54 @@ class SecretsManagerBackend(BaseBackend):
return response
def list_secret_version_ids(self, secret_id):
secret = self.secrets[secret_id]
version_list = []
for version_id, version in secret['versions'].items():
version_list.append({
'CreatedDate': int(time.time()),
'LastAccessedDate': int(time.time()),
'VersionId': version_id,
'VersionStages': version['version_stages'],
})
response = json.dumps({
'ARN': secret['secret_id'],
'Name': secret['name'],
'NextToken': '',
'Versions': version_list,
})
return response
def list_secrets(self, max_results, next_token):
# TODO implement pagination and limits
secret_list = [{
"ARN": secret_arn(self.region, secret['secret_id']),
"DeletedDate": secret.get('deleted_date', None),
"Description": "",
"KmsKeyId": "",
"LastAccessedDate": None,
"LastChangedDate": None,
"LastRotatedDate": None,
"Name": secret['name'],
"RotationEnabled": secret['rotation_enabled'],
"RotationLambdaARN": secret['rotation_lambda_arn'],
"RotationRules": {
"AutomaticallyAfterDays": secret['auto_rotate_after_days']
},
"SecretVersionsToStages": {
secret['version_id']: ["AWSCURRENT"]
},
"Tags": secret['tags']
} for secret in self.secrets.values()]
secret_list = []
for secret in self.secrets.values():
versions_to_stages = {}
for version_id, version in secret['versions'].items():
versions_to_stages[version_id] = version['version_stages']
secret_list.append({
"ARN": secret_arn(self.region, secret['secret_id']),
"DeletedDate": secret.get('deleted_date', None),
"Description": "",
"KmsKeyId": "",
"LastAccessedDate": None,
"LastChangedDate": None,
"LastRotatedDate": None,
"Name": secret['name'],
"RotationEnabled": secret['rotation_enabled'],
"RotationLambdaARN": secret['rotation_lambda_arn'],
"RotationRules": {
"AutomaticallyAfterDays": secret['auto_rotate_after_days']
},
"SecretVersionsToStages": versions_to_stages,
"Tags": secret['tags']
})
return secret_list, None

View File

@ -67,6 +67,22 @@ class SecretsManagerResponse(BaseResponse):
rotation_rules=rotation_rules
)
def put_secret_value(self):
secret_id = self._get_param('SecretId', if_none='')
secret_string = self._get_param('SecretString', if_none='')
version_stages = self._get_param('VersionStages', if_none=['AWSCURRENT'])
return secretsmanager_backends[self.region].put_secret_value(
secret_id=secret_id,
secret_string=secret_string,
version_stages=version_stages,
)
def list_secret_version_ids(self):
secret_id = self._get_param('SecretId', if_none='')
return secretsmanager_backends[self.region].list_secret_version_ids(
secret_id=secret_id
)
def list_secrets(self):
max_results = self._get_int_param("MaxResults")
next_token = self._get_param("NextToken")

View File

@ -255,7 +255,7 @@ class SNSBackend(BaseBackend):
return candidate_topic
def _get_values_nexttoken(self, values_map, next_token=None):
if next_token is None:
if next_token is None or not next_token:
next_token = 0
next_token = int(next_token)
values = list(values_map.values())[

View File

@ -420,7 +420,7 @@ CREATE_QUEUE_RESPONSE = """<CreateQueueResponse>
<VisibilityTimeout>{{ queue.visibility_timeout }}</VisibilityTimeout>
</CreateQueueResult>
<ResponseMetadata>
<RequestId>{{ requestid }}</RequestId>
<RequestId></RequestId>
</ResponseMetadata>
</CreateQueueResponse>"""
@ -429,7 +429,7 @@ GET_QUEUE_URL_RESPONSE = """<GetQueueUrlResponse>
<QueueUrl>{{ queue.url(request_url) }}</QueueUrl>
</GetQueueUrlResult>
<ResponseMetadata>
<RequestId>{{ requestid }}</RequestId>
<RequestId></RequestId>
</ResponseMetadata>
</GetQueueUrlResponse>"""
@ -440,13 +440,13 @@ LIST_QUEUES_RESPONSE = """<ListQueuesResponse>
{% endfor %}
</ListQueuesResult>
<ResponseMetadata>
<RequestId>{{ requestid }}</RequestId>
<RequestId></RequestId>
</ResponseMetadata>
</ListQueuesResponse>"""
DELETE_QUEUE_RESPONSE = """<DeleteQueueResponse>
<ResponseMetadata>
<RequestId>{{ requestid }}</RequestId>
<RequestId></RequestId>
</ResponseMetadata>
</DeleteQueueResponse>"""
@ -460,13 +460,13 @@ GET_QUEUE_ATTRIBUTES_RESPONSE = """<GetQueueAttributesResponse>
{% endfor %}
</GetQueueAttributesResult>
<ResponseMetadata>
<RequestId>{{ requestid }}</RequestId>
<RequestId></RequestId>
</ResponseMetadata>
</GetQueueAttributesResponse>"""
SET_QUEUE_ATTRIBUTE_RESPONSE = """<SetQueueAttributesResponse>
<ResponseMetadata>
<RequestId>{{ requestid }}</RequestId>
<RequestId></RequestId>
</ResponseMetadata>
</SetQueueAttributesResponse>"""
@ -483,7 +483,7 @@ SEND_MESSAGE_RESPONSE = """<SendMessageResponse>
</MessageId>
</SendMessageResult>
<ResponseMetadata>
<RequestId>{{ requestid }}</RequestId>
<RequestId></RequestId>
</ResponseMetadata>
</SendMessageResponse>"""
@ -543,7 +543,7 @@ RECEIVE_MESSAGE_RESPONSE = """<ReceiveMessageResponse>
{% endfor %}
</ReceiveMessageResult>
<ResponseMetadata>
<RequestId>{{ requestid }}</RequestId>
<RequestId></RequestId>
</ResponseMetadata>
</ReceiveMessageResponse>"""
@ -561,13 +561,13 @@ SEND_MESSAGE_BATCH_RESPONSE = """<SendMessageBatchResponse>
{% endfor %}
</SendMessageBatchResult>
<ResponseMetadata>
<RequestId>{{ requestid }}</RequestId>
<RequestId></RequestId>
</ResponseMetadata>
</SendMessageBatchResponse>"""
DELETE_MESSAGE_RESPONSE = """<DeleteMessageResponse>
<ResponseMetadata>
<RequestId>{{ requestid }}</RequestId>
<RequestId></RequestId>
</ResponseMetadata>
</DeleteMessageResponse>"""
@ -580,13 +580,13 @@ DELETE_MESSAGE_BATCH_RESPONSE = """<DeleteMessageBatchResponse>
{% endfor %}
</DeleteMessageBatchResult>
<ResponseMetadata>
<RequestId>{{ requestid }}</RequestId>
<RequestId></RequestId>
</ResponseMetadata>
</DeleteMessageBatchResponse>"""
CHANGE_MESSAGE_VISIBILITY_RESPONSE = """<ChangeMessageVisibilityResponse>
<ResponseMetadata>
<RequestId>{{ requestid }}</RequestId>
<RequestId></RequestId>
</ResponseMetadata>
</ChangeMessageVisibilityResponse>"""
@ -613,7 +613,7 @@ CHANGE_MESSAGE_VISIBILITY_BATCH_RESPONSE = """<ChangeMessageVisibilityBatchRespo
PURGE_QUEUE_RESPONSE = """<PurgeQueueResponse>
<ResponseMetadata>
<RequestId>{{ requestid }}</RequestId>
<RequestId></RequestId>
</ResponseMetadata>
</PurgeQueueResponse>"""

View File

@ -28,7 +28,7 @@ install_requires = [
"xmltodict",
"six>1.9",
"werkzeug",
"PyYAML",
"PyYAML==3.13",
"pytz",
"python-dateutil<3.0.0,>=2.1",
"python-jose<4.0.0",
@ -39,6 +39,7 @@ install_requires = [
"responses>=0.9.0",
"idna<2.9,>=2.5",
"cfn-lint",
"sshpubkeys>=3.1.0,<4.0"
]
extras_require = {

View File

@ -32,7 +32,7 @@ def test_create_autoscaling_group():
group = AutoScalingGroup(
name='tester_group',
availability_zones=['us-east-1c', 'us-east-1b'],
availability_zones=['us-east-1a', 'us-east-1b'],
default_cooldown=60,
desired_capacity=2,
health_check_period=100,
@ -42,7 +42,10 @@ def test_create_autoscaling_group():
launch_config=config,
load_balancers=["test_lb"],
placement_group="test_placement",
vpc_zone_identifier=mocked_networking['subnet1'],
vpc_zone_identifier="{subnet1},{subnet2}".format(
subnet1=mocked_networking['subnet1'],
subnet2=mocked_networking['subnet2'],
),
termination_policies=["OldestInstance", "NewestInstance"],
tags=[Tag(
resource_id='tester_group',
@ -57,12 +60,15 @@ def test_create_autoscaling_group():
group = conn.get_all_groups()[0]
group.name.should.equal('tester_group')
set(group.availability_zones).should.equal(
set(['us-east-1c', 'us-east-1b']))
set(['us-east-1a', 'us-east-1b']))
group.desired_capacity.should.equal(2)
group.max_size.should.equal(2)
group.min_size.should.equal(2)
group.instances.should.have.length_of(2)
group.vpc_zone_identifier.should.equal(mocked_networking['subnet1'])
group.vpc_zone_identifier.should.equal("{subnet1},{subnet2}".format(
subnet1=mocked_networking['subnet1'],
subnet2=mocked_networking['subnet2'],
))
group.launch_config_name.should.equal('tester')
group.default_cooldown.should.equal(60)
group.health_check_period.should.equal(100)
@ -109,7 +115,7 @@ def test_create_autoscaling_groups_defaults():
group.launch_config_name.should.equal('tester')
# Defaults
list(group.availability_zones).should.equal([])
list(group.availability_zones).should.equal(['us-east-1a']) # subnet1
group.desired_capacity.should.equal(2)
group.vpc_zone_identifier.should.equal(mocked_networking['subnet1'])
group.default_cooldown.should.equal(300)
@ -217,7 +223,6 @@ def test_autoscaling_update():
group = AutoScalingGroup(
name='tester_group',
availability_zones=['us-east-1c', 'us-east-1b'],
desired_capacity=2,
max_size=2,
min_size=2,
@ -227,13 +232,16 @@ def test_autoscaling_update():
conn.create_auto_scaling_group(group)
group = conn.get_all_groups()[0]
group.availability_zones.should.equal(['us-east-1a'])
group.vpc_zone_identifier.should.equal(mocked_networking['subnet1'])
group.vpc_zone_identifier = 'subnet-5678efgh'
group.availability_zones = ['us-east-1b']
group.vpc_zone_identifier = mocked_networking['subnet2']
group.update()
group = conn.get_all_groups()[0]
group.vpc_zone_identifier.should.equal('subnet-5678efgh')
group.availability_zones.should.equal(['us-east-1b'])
group.vpc_zone_identifier.should.equal(mocked_networking['subnet2'])
@mock_autoscaling_deprecated
@ -249,7 +257,7 @@ def test_autoscaling_tags_update():
group = AutoScalingGroup(
name='tester_group',
availability_zones=['us-east-1c', 'us-east-1b'],
availability_zones=['us-east-1a'],
desired_capacity=2,
max_size=2,
min_size=2,
@ -309,7 +317,7 @@ def test_autoscaling_group_delete():
@mock_autoscaling_deprecated
def test_autoscaling_group_describe_instances():
mocked_networking = setup_networking_deprecated()
conn = boto.connect_autoscale()
conn = boto.ec2.autoscale.connect_to_region('us-east-1')
config = LaunchConfiguration(
name='tester',
image_id='ami-abcd1234',
@ -332,7 +340,7 @@ def test_autoscaling_group_describe_instances():
instances[0].health_status.should.equal('Healthy')
autoscale_instance_ids = [instance.instance_id for instance in instances]
ec2_conn = boto.connect_ec2()
ec2_conn = boto.ec2.connect_to_region('us-east-1')
reservations = ec2_conn.get_all_instances()
instances = reservations[0].instances
instances.should.have.length_of(2)
@ -355,7 +363,7 @@ def test_set_desired_capacity_up():
group = AutoScalingGroup(
name='tester_group',
availability_zones=['us-east-1c', 'us-east-1b'],
availability_zones=['us-east-1a'],
desired_capacity=2,
max_size=2,
min_size=2,
@ -391,7 +399,7 @@ def test_set_desired_capacity_down():
group = AutoScalingGroup(
name='tester_group',
availability_zones=['us-east-1c', 'us-east-1b'],
availability_zones=['us-east-1a'],
desired_capacity=2,
max_size=2,
min_size=2,
@ -427,7 +435,7 @@ def test_set_desired_capacity_the_same():
group = AutoScalingGroup(
name='tester_group',
availability_zones=['us-east-1c', 'us-east-1b'],
availability_zones=['us-east-1a'],
desired_capacity=2,
max_size=2,
min_size=2,
@ -543,6 +551,7 @@ def test_describe_load_balancers():
)
response = client.describe_load_balancers(AutoScalingGroupName='test_asg')
assert response['ResponseMetadata']['RequestId']
list(response['LoadBalancers']).should.have.length_of(1)
response['LoadBalancers'][0]['LoadBalancerName'].should.equal('my-lb')
@ -738,8 +747,12 @@ def test_describe_autoscaling_groups_boto3():
response['ResponseMetadata']['HTTPStatusCode'].should.equal(200)
group = response['AutoScalingGroups'][0]
group['AutoScalingGroupName'].should.equal('test_asg')
group['AvailabilityZones'].should.equal(['us-east-1a'])
group['VPCZoneIdentifier'].should.equal(mocked_networking['subnet1'])
group['NewInstancesProtectedFromScaleIn'].should.equal(True)
group['Instances'][0]['ProtectedFromScaleIn'].should.equal(True)
for instance in group['Instances']:
instance['AvailabilityZone'].should.equal('us-east-1a')
instance['ProtectedFromScaleIn'].should.equal(True)
@mock_autoscaling
@ -770,6 +783,7 @@ def test_describe_autoscaling_instances_boto3():
response = client.describe_auto_scaling_instances(InstanceIds=instance_ids)
for instance in response['AutoScalingInstances']:
instance['AutoScalingGroupName'].should.equal('test_asg')
instance['AvailabilityZone'].should.equal('us-east-1a')
instance['ProtectedFromScaleIn'].should.equal(True)
@ -793,6 +807,10 @@ def test_update_autoscaling_group_boto3():
_ = client.update_auto_scaling_group(
AutoScalingGroupName='test_asg',
MinSize=1,
VPCZoneIdentifier="{subnet1},{subnet2}".format(
subnet1=mocked_networking['subnet1'],
subnet2=mocked_networking['subnet2'],
),
NewInstancesProtectedFromScaleIn=False,
)
@ -801,6 +819,7 @@ def test_update_autoscaling_group_boto3():
)
group = response['AutoScalingGroups'][0]
group['MinSize'].should.equal(1)
set(group['AvailabilityZones']).should.equal({'us-east-1a', 'us-east-1b'})
group['NewInstancesProtectedFromScaleIn'].should.equal(False)

View File

@ -106,7 +106,7 @@ def test_detach_all_target_groups():
MaxSize=INSTANCE_COUNT,
DesiredCapacity=INSTANCE_COUNT,
TargetGroupARNs=[target_group_arn],
VPCZoneIdentifier=mocked_networking['vpc'])
VPCZoneIdentifier=mocked_networking['subnet1'])
response = client.describe_load_balancer_target_groups(
AutoScalingGroupName='test_asg')

View File

@ -1,5 +1,6 @@
import boto
import boto3
from boto import vpc as boto_vpc
from moto import mock_ec2, mock_ec2_deprecated
@ -19,9 +20,14 @@ def setup_networking():
@mock_ec2_deprecated
def setup_networking_deprecated():
conn = boto.connect_vpc()
conn = boto_vpc.connect_to_region('us-east-1')
vpc = conn.create_vpc("10.11.0.0/16")
subnet1 = conn.create_subnet(vpc.id, "10.11.1.0/24")
subnet2 = conn.create_subnet(vpc.id, "10.11.2.0/24")
subnet1 = conn.create_subnet(
vpc.id,
"10.11.1.0/24",
availability_zone='us-east-1a')
subnet2 = conn.create_subnet(
vpc.id,
"10.11.2.0/24",
availability_zone='us-east-1b')
return {'vpc': vpc.id, 'subnet1': subnet1.id, 'subnet2': subnet2.id}

View File

@ -282,7 +282,7 @@ def test_create_function_from_aws_bucket():
result.pop('LastModified')
result.should.equal({
'FunctionName': 'testFunction',
'FunctionArn': 'arn:aws:lambda:{}:123456789012:function:testFunction:$LATEST'.format(_lambda_region),
'FunctionArn': 'arn:aws:lambda:{}:123456789012:function:testFunction'.format(_lambda_region),
'Runtime': 'python2.7',
'Role': 'test-iam-role',
'Handler': 'lambda_function.lambda_handler',
@ -291,7 +291,7 @@ def test_create_function_from_aws_bucket():
'Description': 'test lambda function',
'Timeout': 3,
'MemorySize': 128,
'Version': '$LATEST',
'Version': '1',
'VpcConfig': {
"SecurityGroupIds": ["sg-123abc"],
"SubnetIds": ["subnet-123abc"],
@ -327,7 +327,7 @@ def test_create_function_from_zipfile():
result.should.equal({
'FunctionName': 'testFunction',
'FunctionArn': 'arn:aws:lambda:{}:123456789012:function:testFunction:$LATEST'.format(_lambda_region),
'FunctionArn': 'arn:aws:lambda:{}:123456789012:function:testFunction'.format(_lambda_region),
'Runtime': 'python2.7',
'Role': 'test-iam-role',
'Handler': 'lambda_function.lambda_handler',
@ -336,7 +336,7 @@ def test_create_function_from_zipfile():
'Timeout': 3,
'MemorySize': 128,
'CodeSha256': hashlib.sha256(zip_content).hexdigest(),
'Version': '$LATEST',
'Version': '1',
'VpcConfig': {
"SecurityGroupIds": [],
"SubnetIds": [],
@ -398,6 +398,8 @@ def test_get_function():
# Test get function with
result = conn.get_function(FunctionName='testFunction', Qualifier='$LATEST')
result['Configuration']['Version'].should.equal('$LATEST')
result['Configuration']['FunctionArn'].should.equal('arn:aws:lambda:us-west-2:123456789012:function:testFunction:$LATEST')
# Test get function when can't find function name
with assert_raises(ClientError):
@ -464,7 +466,7 @@ def test_publish():
Description='test lambda function',
Timeout=3,
MemorySize=128,
Publish=True,
Publish=False,
)
function_list = conn.list_functions()
@ -485,7 +487,7 @@ def test_publish():
function_list = conn.list_functions()
function_list['Functions'].should.have.length_of(1)
function_list['Functions'][0]['FunctionArn'].should.contain('testFunction:$LATEST')
function_list['Functions'][0]['FunctionArn'].should.contain('testFunction')
@mock_lambda
@ -528,7 +530,7 @@ def test_list_create_list_get_delete_list():
"CodeSha256": hashlib.sha256(zip_content).hexdigest(),
"CodeSize": len(zip_content),
"Description": "test lambda function",
"FunctionArn": 'arn:aws:lambda:{}:123456789012:function:testFunction:$LATEST'.format(_lambda_region),
"FunctionArn": 'arn:aws:lambda:{}:123456789012:function:testFunction'.format(_lambda_region),
"FunctionName": "testFunction",
"Handler": "lambda_function.lambda_handler",
"MemorySize": 128,
@ -701,7 +703,7 @@ def test_invoke_async_function():
)
success_result = conn.invoke_async(
FunctionName='testFunction',
FunctionName='testFunction',
InvokeArgs=json.dumps({'test': 'event'})
)
@ -741,7 +743,7 @@ def test_get_function_created_with_zipfile():
"CodeSha256": hashlib.sha256(zip_content).hexdigest(),
"CodeSize": len(zip_content),
"Description": "test lambda function",
"FunctionArn": 'arn:aws:lambda:{}:123456789012:function:testFunction:$LATEST'.format(_lambda_region),
"FunctionArn": 'arn:aws:lambda:{}:123456789012:function:testFunction'.format(_lambda_region),
"FunctionName": "testFunction",
"Handler": "lambda_function.handler",
"MemorySize": 128,
@ -842,7 +844,7 @@ def test_list_versions_by_function():
conn.create_function(
FunctionName='testFunction',
Runtime='python2.7',
Role='test-iam-role',
Role='arn:aws:iam::123456789012:role/test-iam-role',
Handler='lambda_function.lambda_handler',
Code={
'S3Bucket': 'test-bucket',
@ -857,8 +859,28 @@ def test_list_versions_by_function():
res = conn.publish_version(FunctionName='testFunction')
assert res['ResponseMetadata']['HTTPStatusCode'] == 201
versions = conn.list_versions_by_function(FunctionName='testFunction')
assert len(versions['Versions']) == 3
assert versions['Versions'][0]['FunctionArn'] == 'arn:aws:lambda:us-west-2:123456789012:function:testFunction:$LATEST'
assert versions['Versions'][1]['FunctionArn'] == 'arn:aws:lambda:us-west-2:123456789012:function:testFunction:1'
assert versions['Versions'][2]['FunctionArn'] == 'arn:aws:lambda:us-west-2:123456789012:function:testFunction:2'
conn.create_function(
FunctionName='testFunction_2',
Runtime='python2.7',
Role='arn:aws:iam::123456789012:role/test-iam-role',
Handler='lambda_function.lambda_handler',
Code={
'S3Bucket': 'test-bucket',
'S3Key': 'test.zip',
},
Description='test lambda function',
Timeout=3,
MemorySize=128,
Publish=False,
)
versions = conn.list_versions_by_function(FunctionName='testFunction_2')
assert len(versions['Versions']) == 1
assert versions['Versions'][0]['FunctionArn'] == 'arn:aws:lambda:us-west-2:123456789012:function:testFunction_2:$LATEST'
@mock_lambda

View File

@ -2,6 +2,8 @@ from __future__ import unicode_literals
import json
import base64
from decimal import Decimal
import boto
import boto.cloudformation
import boto.datapipeline
@ -22,6 +24,7 @@ from moto import (
mock_cloudformation,
mock_cloudformation_deprecated,
mock_datapipeline_deprecated,
mock_dynamodb2,
mock_ec2,
mock_ec2_deprecated,
mock_elb,
@ -39,6 +42,7 @@ from moto import (
mock_sqs,
mock_sqs_deprecated,
mock_elbv2)
from moto.dynamodb2.models import Table
from .fixtures import (
ec2_classic_eip,
@ -2085,7 +2089,7 @@ def test_stack_kms():
def test_stack_spot_fleet():
conn = boto3.client('ec2', 'us-east-1')
vpc = conn.create_vpc(CidrBlock="10.0.0.0/8")['Vpc']
vpc = conn.create_vpc(CidrBlock="10.0.0.0/16")['Vpc']
subnet = conn.create_subnet(
VpcId=vpc['VpcId'], CidrBlock='10.0.0.0/16', AvailabilityZone='us-east-1a')['Subnet']
subnet_id = subnet['SubnetId']
@ -2169,7 +2173,7 @@ def test_stack_spot_fleet():
def test_stack_spot_fleet_should_figure_out_default_price():
conn = boto3.client('ec2', 'us-east-1')
vpc = conn.create_vpc(CidrBlock="10.0.0.0/8")['Vpc']
vpc = conn.create_vpc(CidrBlock="10.0.0.0/16")['Vpc']
subnet = conn.create_subnet(
VpcId=vpc['VpcId'], CidrBlock='10.0.0.0/16', AvailabilityZone='us-east-1a')['Subnet']
subnet_id = subnet['SubnetId']
@ -2433,3 +2437,131 @@ def test_stack_elbv2_resources_integration():
dns['OutputValue'].should.equal(load_balancers[0]['DNSName'])
name['OutputValue'].should.equal(load_balancers[0]['LoadBalancerName'])
@mock_dynamodb2
@mock_cloudformation
def test_stack_dynamodb_resources_integration():
dynamodb_template = {
"AWSTemplateFormatVersion": "2010-09-09",
"Resources": {
"myDynamoDBTable": {
"Type": "AWS::DynamoDB::Table",
"Properties": {
"AttributeDefinitions": [
{
"AttributeName": "Album",
"AttributeType": "S"
},
{
"AttributeName": "Artist",
"AttributeType": "S"
},
{
"AttributeName": "Sales",
"AttributeType": "N"
},
{
"AttributeName": "NumberOfSongs",
"AttributeType": "N"
}
],
"KeySchema": [
{
"AttributeName": "Album",
"KeyType": "HASH"
},
{
"AttributeName": "Artist",
"KeyType": "RANGE"
}
],
"ProvisionedThroughput": {
"ReadCapacityUnits": "5",
"WriteCapacityUnits": "5"
},
"TableName": "myTableName",
"GlobalSecondaryIndexes": [{
"IndexName": "myGSI",
"KeySchema": [
{
"AttributeName": "Sales",
"KeyType": "HASH"
},
{
"AttributeName": "Artist",
"KeyType": "RANGE"
}
],
"Projection": {
"NonKeyAttributes": ["Album","NumberOfSongs"],
"ProjectionType": "INCLUDE"
},
"ProvisionedThroughput": {
"ReadCapacityUnits": "5",
"WriteCapacityUnits": "5"
}
},
{
"IndexName": "myGSI2",
"KeySchema": [
{
"AttributeName": "NumberOfSongs",
"KeyType": "HASH"
},
{
"AttributeName": "Sales",
"KeyType": "RANGE"
}
],
"Projection": {
"NonKeyAttributes": ["Album","Artist"],
"ProjectionType": "INCLUDE"
},
"ProvisionedThroughput": {
"ReadCapacityUnits": "5",
"WriteCapacityUnits": "5"
}
}],
"LocalSecondaryIndexes":[{
"IndexName": "myLSI",
"KeySchema": [
{
"AttributeName": "Album",
"KeyType": "HASH"
},
{
"AttributeName": "Sales",
"KeyType": "RANGE"
}
],
"Projection": {
"NonKeyAttributes": ["Artist","NumberOfSongs"],
"ProjectionType": "INCLUDE"
}
}]
}
}
}
}
dynamodb_template_json = json.dumps(dynamodb_template)
cfn_conn = boto3.client('cloudformation', 'us-east-1')
cfn_conn.create_stack(
StackName='dynamodb_stack',
TemplateBody=dynamodb_template_json,
)
dynamodb_conn = boto3.resource('dynamodb', region_name='us-east-1')
table = dynamodb_conn.Table('myTableName')
table.name.should.equal('myTableName')
table.put_item(Item={"Album": "myAlbum", "Artist": "myArtist", "Sales": 10, "NumberOfSongs": 5})
response = table.get_item(Key={"Album": "myAlbum", "Artist": "myArtist"})
response['Item']['Album'].should.equal('myAlbum')
response['Item']['Sales'].should.equal(Decimal('10'))
response['Item']['NumberOfSongs'].should.equal(Decimal('5'))
response['Item']['Album'].should.equal('myAlbum')

View File

@ -83,6 +83,18 @@ get_availability_zones_output = {
}
}
parameters = {
"Parameters": {
"Param": {
"Type": "String",
},
"NoEchoParam": {
"Type": "String",
"NoEcho": True
}
}
}
split_select_template = {
"AWSTemplateFormatVersion": "2010-09-09",
"Resources": {
@ -157,6 +169,9 @@ get_attribute_outputs_template = dict(
get_availability_zones_template = dict(
list(dummy_template.items()) + list(get_availability_zones_output.items()))
parameters_template = dict(
list(dummy_template.items()) + list(parameters.items()))
dummy_template_json = json.dumps(dummy_template)
name_type_template_json = json.dumps(name_type_template)
output_type_template_json = json.dumps(outputs_template)
@ -165,6 +180,7 @@ get_attribute_outputs_template_json = json.dumps(
get_attribute_outputs_template)
get_availability_zones_template_json = json.dumps(
get_availability_zones_template)
parameters_template_json = json.dumps(parameters_template)
split_select_template_json = json.dumps(split_select_template)
sub_template_json = json.dumps(sub_template)
export_value_template_json = json.dumps(export_value_template)
@ -290,6 +306,18 @@ def test_parse_stack_with_bad_get_attribute_outputs():
"test_id", "test_stack", bad_output_template_json, {}, "us-west-1").should.throw(ValidationError)
def test_parse_stack_with_parameters():
stack = FakeStack(
stack_id="test_id",
name="test_stack",
template=parameters_template_json,
parameters={"Param": "visible value", "NoEchoParam": "hidden value"},
region_name='us-west-1')
stack.resource_map.no_echo_parameter_keys.should.have("NoEchoParam")
stack.resource_map.no_echo_parameter_keys.should_not.have("Param")
def test_parse_equals_condition():
parse_condition(
condition={"Fn::Equals": [{"Ref": "EnvType"}, "prod"]},

View File

@ -1162,3 +1162,53 @@ def test_confirm_forgot_password():
ConfirmationCode=str(uuid.uuid4()),
Password=str(uuid.uuid4()),
)
@mock_cognitoidp
def test_admin_update_user_attributes():
conn = boto3.client("cognito-idp", "us-west-2")
username = str(uuid.uuid4())
user_pool_id = conn.create_user_pool(PoolName=str(uuid.uuid4()))["UserPool"]["Id"]
conn.admin_create_user(
UserPoolId=user_pool_id,
Username=username,
UserAttributes=[
{
'Name': 'family_name',
'Value': 'Doe',
},
{
'Name': 'given_name',
'Value': 'John',
}
]
)
conn.admin_update_user_attributes(
UserPoolId=user_pool_id,
Username=username,
UserAttributes=[
{
'Name': 'family_name',
'Value': 'Doe',
},
{
'Name': 'given_name',
'Value': 'Jane',
}
]
)
user = conn.admin_get_user(
UserPoolId=user_pool_id,
Username=username
)
attributes = user['UserAttributes']
attributes.should.be.a(list)
for attr in attributes:
val = attr['Value']
if attr['Name'] == 'family_name':
val.should.equal('Doe')
elif attr['Name'] == 'given_name':
val.should.equal('Jane')

View File

@ -1,81 +1,89 @@
from __future__ import unicode_literals
import sure # noqa
from moto.core.responses import AWSServiceSpec
from moto.core.responses import flatten_json_request_body
def test_flatten_json_request_body():
spec = AWSServiceSpec(
'data/emr/2009-03-31/service-2.json').input_spec('RunJobFlow')
body = {
'Name': 'cluster',
'Instances': {
'Ec2KeyName': 'ec2key',
'InstanceGroups': [
{'InstanceRole': 'MASTER',
'InstanceType': 'm1.small'},
{'InstanceRole': 'CORE',
'InstanceType': 'm1.medium'},
],
'Placement': {'AvailabilityZone': 'us-east-1'},
},
'Steps': [
{'HadoopJarStep': {
'Properties': [
{'Key': 'k1', 'Value': 'v1'},
{'Key': 'k2', 'Value': 'v2'}
],
'Args': ['arg1', 'arg2']}},
],
'Configurations': [
{'Classification': 'class',
'Properties': {'propkey1': 'propkey1',
'propkey2': 'propkey2'}},
{'Classification': 'anotherclass',
'Properties': {'propkey3': 'propkey3'}},
]
}
flat = flatten_json_request_body('', body, spec)
flat['Name'].should.equal(body['Name'])
flat['Instances.Ec2KeyName'].should.equal(body['Instances']['Ec2KeyName'])
for idx in range(2):
flat['Instances.InstanceGroups.member.' + str(idx + 1) + '.InstanceRole'].should.equal(
body['Instances']['InstanceGroups'][idx]['InstanceRole'])
flat['Instances.InstanceGroups.member.' + str(idx + 1) + '.InstanceType'].should.equal(
body['Instances']['InstanceGroups'][idx]['InstanceType'])
flat['Instances.Placement.AvailabilityZone'].should.equal(
body['Instances']['Placement']['AvailabilityZone'])
for idx in range(1):
prefix = 'Steps.member.' + str(idx + 1) + '.HadoopJarStep'
step = body['Steps'][idx]['HadoopJarStep']
i = 0
while prefix + '.Properties.member.' + str(i + 1) + '.Key' in flat:
flat[prefix + '.Properties.member.' +
str(i + 1) + '.Key'].should.equal(step['Properties'][i]['Key'])
flat[prefix + '.Properties.member.' +
str(i + 1) + '.Value'].should.equal(step['Properties'][i]['Value'])
i += 1
i = 0
while prefix + '.Args.member.' + str(i + 1) in flat:
flat[prefix + '.Args.member.' +
str(i + 1)].should.equal(step['Args'][i])
i += 1
for idx in range(2):
flat['Configurations.member.' + str(idx + 1) + '.Classification'].should.equal(
body['Configurations'][idx]['Classification'])
props = {}
i = 1
keyfmt = 'Configurations.member.{0}.Properties.entry.{1}'
key = keyfmt.format(idx + 1, i)
while key + '.key' in flat:
props[flat[key + '.key']] = flat[key + '.value']
i += 1
key = keyfmt.format(idx + 1, i)
props.should.equal(body['Configurations'][idx]['Properties'])
from __future__ import unicode_literals
import sure # noqa
from botocore.awsrequest import AWSPreparedRequest
from moto.core.responses import AWSServiceSpec, BaseResponse
from moto.core.responses import flatten_json_request_body
def test_flatten_json_request_body():
spec = AWSServiceSpec(
'data/emr/2009-03-31/service-2.json').input_spec('RunJobFlow')
body = {
'Name': 'cluster',
'Instances': {
'Ec2KeyName': 'ec2key',
'InstanceGroups': [
{'InstanceRole': 'MASTER',
'InstanceType': 'm1.small'},
{'InstanceRole': 'CORE',
'InstanceType': 'm1.medium'},
],
'Placement': {'AvailabilityZone': 'us-east-1'},
},
'Steps': [
{'HadoopJarStep': {
'Properties': [
{'Key': 'k1', 'Value': 'v1'},
{'Key': 'k2', 'Value': 'v2'}
],
'Args': ['arg1', 'arg2']}},
],
'Configurations': [
{'Classification': 'class',
'Properties': {'propkey1': 'propkey1',
'propkey2': 'propkey2'}},
{'Classification': 'anotherclass',
'Properties': {'propkey3': 'propkey3'}},
]
}
flat = flatten_json_request_body('', body, spec)
flat['Name'].should.equal(body['Name'])
flat['Instances.Ec2KeyName'].should.equal(body['Instances']['Ec2KeyName'])
for idx in range(2):
flat['Instances.InstanceGroups.member.' + str(idx + 1) + '.InstanceRole'].should.equal(
body['Instances']['InstanceGroups'][idx]['InstanceRole'])
flat['Instances.InstanceGroups.member.' + str(idx + 1) + '.InstanceType'].should.equal(
body['Instances']['InstanceGroups'][idx]['InstanceType'])
flat['Instances.Placement.AvailabilityZone'].should.equal(
body['Instances']['Placement']['AvailabilityZone'])
for idx in range(1):
prefix = 'Steps.member.' + str(idx + 1) + '.HadoopJarStep'
step = body['Steps'][idx]['HadoopJarStep']
i = 0
while prefix + '.Properties.member.' + str(i + 1) + '.Key' in flat:
flat[prefix + '.Properties.member.' +
str(i + 1) + '.Key'].should.equal(step['Properties'][i]['Key'])
flat[prefix + '.Properties.member.' +
str(i + 1) + '.Value'].should.equal(step['Properties'][i]['Value'])
i += 1
i = 0
while prefix + '.Args.member.' + str(i + 1) in flat:
flat[prefix + '.Args.member.' +
str(i + 1)].should.equal(step['Args'][i])
i += 1
for idx in range(2):
flat['Configurations.member.' + str(idx + 1) + '.Classification'].should.equal(
body['Configurations'][idx]['Classification'])
props = {}
i = 1
keyfmt = 'Configurations.member.{0}.Properties.entry.{1}'
key = keyfmt.format(idx + 1, i)
while key + '.key' in flat:
props[flat[key + '.key']] = flat[key + '.value']
i += 1
key = keyfmt.format(idx + 1, i)
props.should.equal(body['Configurations'][idx]['Properties'])
def test_parse_qs_unicode_decode_error():
body = b'{"key": "%D0"}, "C": "#0 = :0"}'
request = AWSPreparedRequest('GET', 'http://request', {'foo': 'bar'}, body, False)
BaseResponse().setup_class(request, request.url, request.headers)

View File

@ -949,6 +949,33 @@ def test_bad_scan_filter():
raise RuntimeError('Should of raised ResourceInUseException')
@mock_dynamodb2
def test_create_table_pay_per_request():
client = boto3.client('dynamodb', region_name='us-east-1')
client.create_table(
TableName='test1',
AttributeDefinitions=[{'AttributeName': 'client', 'AttributeType': 'S'}, {'AttributeName': 'app', 'AttributeType': 'S'}],
KeySchema=[{'AttributeName': 'client', 'KeyType': 'HASH'}, {'AttributeName': 'app', 'KeyType': 'RANGE'}],
BillingMode="PAY_PER_REQUEST"
)
@mock_dynamodb2
def test_create_table_error_pay_per_request_with_provisioned_param():
client = boto3.client('dynamodb', region_name='us-east-1')
try:
client.create_table(
TableName='test1',
AttributeDefinitions=[{'AttributeName': 'client', 'AttributeType': 'S'}, {'AttributeName': 'app', 'AttributeType': 'S'}],
KeySchema=[{'AttributeName': 'client', 'KeyType': 'HASH'}, {'AttributeName': 'app', 'KeyType': 'RANGE'}],
ProvisionedThroughput={'ReadCapacityUnits': 123, 'WriteCapacityUnits': 123},
BillingMode="PAY_PER_REQUEST"
)
except ClientError as err:
err.response['Error']['Code'].should.equal('ValidationException')
@mock_dynamodb2
def test_duplicate_create():
client = boto3.client('dynamodb', region_name='us-east-1')
@ -1504,6 +1531,7 @@ def test_dynamodb_streams_2():
}
assert 'LatestStreamLabel' in resp['TableDescription']
assert 'LatestStreamArn' in resp['TableDescription']
@mock_dynamodb2
def test_condition_expressions():
@ -1669,8 +1697,8 @@ def test_query_gsi_with_range_key():
res = dynamodb.query(TableName='test', IndexName='test_gsi',
KeyConditionExpression='gsi_hash_key = :gsi_hash_key AND gsi_range_key = :gsi_range_key',
ExpressionAttributeValues={
':gsi_hash_key': {'S': 'key1'},
':gsi_range_key': {'S': 'range1'}
':gsi_hash_key': {'S': 'key1'},
':gsi_range_key': {'S': 'range1'}
})
res.should.have.key("Count").equal(1)
res.should.have.key("Items")
@ -1679,3 +1707,45 @@ def test_query_gsi_with_range_key():
'gsi_hash_key': {'S': 'key1'},
'gsi_range_key': {'S': 'range1'},
})
@mock_dynamodb2
def test_scan_by_non_exists_index():
dynamodb = boto3.client('dynamodb', region_name='us-east-1')
dynamodb.create_table(
TableName='test',
KeySchema=[{'AttributeName': 'id', 'KeyType': 'HASH'}],
AttributeDefinitions=[
{'AttributeName': 'id', 'AttributeType': 'S'},
{'AttributeName': 'gsi_col', 'AttributeType': 'S'}
],
ProvisionedThroughput={'ReadCapacityUnits': 1, 'WriteCapacityUnits': 1},
GlobalSecondaryIndexes=[
{
'IndexName': 'test_gsi',
'KeySchema': [
{
'AttributeName': 'gsi_col',
'KeyType': 'HASH'
},
],
'Projection': {
'ProjectionType': 'ALL',
},
'ProvisionedThroughput': {
'ReadCapacityUnits': 1,
'WriteCapacityUnits': 1
}
},
]
)
with assert_raises(ClientError) as ex:
dynamodb.scan(TableName='test', IndexName='non_exists_index')
ex.exception.response['Error']['Code'].should.equal('ValidationException')
ex.exception.response['ResponseMetadata']['HTTPStatusCode'].should.equal(400)
ex.exception.response['Error']['Message'].should.equal(
'The table does not have the specified index: non_exists_index'
)

File diff suppressed because it is too large Load Diff

View File

@ -829,3 +829,77 @@ def test_scan_pagination():
results = page1['Items'] + page2['Items']
usernames = set([r['username'] for r in results])
usernames.should.equal(set(expected_usernames))
@mock_dynamodb2
def test_scan_by_index():
dynamodb = boto3.client('dynamodb', region_name='us-east-1')
dynamodb.create_table(
TableName='test',
KeySchema=[{'AttributeName': 'id', 'KeyType': 'HASH'}],
AttributeDefinitions=[
{'AttributeName': 'id', 'AttributeType': 'S'},
{'AttributeName': 'gsi_col', 'AttributeType': 'S'}
],
ProvisionedThroughput={'ReadCapacityUnits': 1, 'WriteCapacityUnits': 1},
GlobalSecondaryIndexes=[
{
'IndexName': 'test_gsi',
'KeySchema': [
{
'AttributeName': 'gsi_col',
'KeyType': 'HASH'
},
],
'Projection': {
'ProjectionType': 'ALL',
},
'ProvisionedThroughput': {
'ReadCapacityUnits': 1,
'WriteCapacityUnits': 1
}
},
]
)
dynamodb.put_item(
TableName='test',
Item={
'id': {'S': '1'},
'col1': {'S': 'val1'},
'gsi_col': {'S': 'gsi_val1'},
}
)
dynamodb.put_item(
TableName='test',
Item={
'id': {'S': '2'},
'col1': {'S': 'val2'},
'gsi_col': {'S': 'gsi_val2'},
}
)
dynamodb.put_item(
TableName='test',
Item={
'id': {'S': '3'},
'col1': {'S': 'val3'},
}
)
res = dynamodb.scan(TableName='test')
assert res['Count'] == 3
assert len(res['Items']) == 3
res = dynamodb.scan(TableName='test', IndexName='test_gsi')
assert res['Count'] == 2
assert len(res['Items']) == 2
res = dynamodb.scan(TableName='test', IndexName='test_gsi', Limit=1)
assert res['Count'] == 1
assert len(res['Items']) == 1
last_eval_key = res['LastEvaluatedKey']
assert last_eval_key['id']['S'] == '1'
assert last_eval_key['gsi_col']['S'] == 'gsi_val1'

View File

15
tests/test_ec2/helpers.py Normal file
View File

@ -0,0 +1,15 @@
import six
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives import serialization
from cryptography.hazmat.primitives.asymmetric import rsa
def rsa_check_private_key(private_key_material):
assert isinstance(private_key_material, six.string_types)
private_key = serialization.load_pem_private_key(
data=private_key_material.encode('ascii'),
backend=default_backend(),
password=None)
assert isinstance(private_key, rsa.RSAPrivateKey)

View File

@ -16,7 +16,7 @@ from moto import mock_ec2_deprecated, mock_ec2
@mock_ec2_deprecated
def test_create_and_delete_volume():
conn = boto.connect_ec2('the_key', 'the_secret')
conn = boto.ec2.connect_to_region("us-east-1")
volume = conn.create_volume(80, "us-east-1a")
all_volumes = conn.get_all_volumes()
@ -52,7 +52,7 @@ def test_create_and_delete_volume():
@mock_ec2_deprecated
def test_create_encrypted_volume_dryrun():
conn = boto.connect_ec2('the_key', 'the_secret')
conn = boto.ec2.connect_to_region("us-east-1")
with assert_raises(EC2ResponseError) as ex:
conn.create_volume(80, "us-east-1a", encrypted=True, dry_run=True)
ex.exception.error_code.should.equal('DryRunOperation')
@ -63,7 +63,7 @@ def test_create_encrypted_volume_dryrun():
@mock_ec2_deprecated
def test_create_encrypted_volume():
conn = boto.connect_ec2('the_key', 'the_secret')
conn = boto.ec2.connect_to_region("us-east-1")
volume = conn.create_volume(80, "us-east-1a", encrypted=True)
with assert_raises(EC2ResponseError) as ex:
@ -79,7 +79,7 @@ def test_create_encrypted_volume():
@mock_ec2_deprecated
def test_filter_volume_by_id():
conn = boto.connect_ec2('the_key', 'the_secret')
conn = boto.ec2.connect_to_region("us-east-1")
volume1 = conn.create_volume(80, "us-east-1a")
volume2 = conn.create_volume(36, "us-east-1b")
volume3 = conn.create_volume(20, "us-east-1c")
@ -99,7 +99,7 @@ def test_filter_volume_by_id():
@mock_ec2_deprecated
def test_volume_filters():
conn = boto.connect_ec2('the_key', 'the_secret')
conn = boto.ec2.connect_to_region("us-east-1")
reservation = conn.run_instances('ami-1234abcd')
instance = reservation.instances[0]
@ -196,7 +196,7 @@ def test_volume_filters():
@mock_ec2_deprecated
def test_volume_attach_and_detach():
conn = boto.connect_ec2('the_key', 'the_secret')
conn = boto.ec2.connect_to_region("us-east-1")
reservation = conn.run_instances('ami-1234abcd')
instance = reservation.instances[0]
volume = conn.create_volume(80, "us-east-1a")
@ -252,7 +252,7 @@ def test_volume_attach_and_detach():
@mock_ec2_deprecated
def test_create_snapshot():
conn = boto.connect_ec2('the_key', 'the_secret')
conn = boto.ec2.connect_to_region("us-east-1")
volume = conn.create_volume(80, "us-east-1a")
with assert_raises(EC2ResponseError) as ex:
@ -291,7 +291,7 @@ def test_create_snapshot():
@mock_ec2_deprecated
def test_create_encrypted_snapshot():
conn = boto.connect_ec2('the_key', 'the_secret')
conn = boto.ec2.connect_to_region("us-east-1")
volume = conn.create_volume(80, "us-east-1a", encrypted=True)
snapshot = volume.create_snapshot('a test snapshot')
snapshot.update()
@ -306,7 +306,7 @@ def test_create_encrypted_snapshot():
@mock_ec2_deprecated
def test_filter_snapshot_by_id():
conn = boto.connect_ec2('the_key', 'the_secret')
conn = boto.ec2.connect_to_region("us-east-1")
volume1 = conn.create_volume(36, "us-east-1a")
snap1 = volume1.create_snapshot('a test snapshot 1')
volume2 = conn.create_volume(42, 'us-east-1a')
@ -333,7 +333,7 @@ def test_filter_snapshot_by_id():
@mock_ec2_deprecated
def test_snapshot_filters():
conn = boto.connect_ec2('the_key', 'the_secret')
conn = boto.ec2.connect_to_region("us-east-1")
volume1 = conn.create_volume(20, "us-east-1a", encrypted=False)
volume2 = conn.create_volume(25, "us-east-1a", encrypted=True)
@ -394,12 +394,17 @@ def test_snapshot_filters():
set([snap.id for snap in snapshots_by_encrypted]
).should.equal({snapshot3.id})
snapshots_by_owner_id = conn.get_all_snapshots(
filters={'owner-id': '123456789012'})
set([snap.id for snap in snapshots_by_owner_id]
).should.equal({snapshot1.id, snapshot2.id, snapshot3.id})
@mock_ec2_deprecated
def test_snapshot_attribute():
import copy
conn = boto.connect_ec2('the_key', 'the_secret')
conn = boto.ec2.connect_to_region("us-east-1")
volume = conn.create_volume(80, "us-east-1a")
snapshot = volume.create_snapshot()
@ -502,7 +507,7 @@ def test_snapshot_attribute():
@mock_ec2_deprecated
def test_create_volume_from_snapshot():
conn = boto.connect_ec2('the_key', 'the_secret')
conn = boto.ec2.connect_to_region("us-east-1")
volume = conn.create_volume(80, "us-east-1a")
snapshot = volume.create_snapshot('a test snapshot')
@ -524,7 +529,7 @@ def test_create_volume_from_snapshot():
@mock_ec2_deprecated
def test_create_volume_from_encrypted_snapshot():
conn = boto.connect_ec2('the_key', 'the_secret')
conn = boto.ec2.connect_to_region("us-east-1")
volume = conn.create_volume(80, "us-east-1a", encrypted=True)
snapshot = volume.create_snapshot('a test snapshot')
@ -569,7 +574,7 @@ def test_modify_attribute_blockDeviceMapping():
@mock_ec2_deprecated
def test_volume_tag_escaping():
conn = boto.connect_ec2('the_key', 'the_secret')
conn = boto.ec2.connect_to_region("us-east-1")
vol = conn.create_volume(10, 'us-east-1a')
snapshot = conn.create_snapshot(vol.id, 'Desc')

View File

@ -42,7 +42,7 @@ def test_add_servers():
@freeze_time("2014-01-01 05:00:00")
@mock_ec2_deprecated
def test_instance_launch_and_terminate():
conn = boto.connect_ec2('the_key', 'the_secret')
conn = boto.ec2.connect_to_region("us-east-1")
with assert_raises(EC2ResponseError) as ex:
reservation = conn.run_instances('ami-1234abcd', dry_run=True)
@ -820,7 +820,7 @@ def test_run_instance_with_instance_type():
@mock_ec2_deprecated
def test_run_instance_with_default_placement():
conn = boto.connect_ec2('the_key', 'the_secret')
conn = boto.ec2.connect_to_region("us-east-1")
reservation = conn.run_instances('ami-1234abcd')
instance = reservation.instances[0]

View File

@ -1,151 +1,224 @@
from __future__ import unicode_literals
# Ensure 'assert_raises' context manager support for Python 2.6
import tests.backport_assert_raises
from nose.tools import assert_raises
import boto
import six
import sure # noqa
from boto.exception import EC2ResponseError
from moto import mock_ec2_deprecated
@mock_ec2_deprecated
def test_key_pairs_empty():
conn = boto.connect_ec2('the_key', 'the_secret')
assert len(conn.get_all_key_pairs()) == 0
@mock_ec2_deprecated
def test_key_pairs_invalid_id():
conn = boto.connect_ec2('the_key', 'the_secret')
with assert_raises(EC2ResponseError) as cm:
conn.get_all_key_pairs('foo')
cm.exception.code.should.equal('InvalidKeyPair.NotFound')
cm.exception.status.should.equal(400)
cm.exception.request_id.should_not.be.none
@mock_ec2_deprecated
def test_key_pairs_create():
conn = boto.connect_ec2('the_key', 'the_secret')
with assert_raises(EC2ResponseError) as ex:
kp = conn.create_key_pair('foo', dry_run=True)
ex.exception.error_code.should.equal('DryRunOperation')
ex.exception.status.should.equal(400)
ex.exception.message.should.equal(
'An error occurred (DryRunOperation) when calling the CreateKeyPair operation: Request would have succeeded, but DryRun flag is set')
kp = conn.create_key_pair('foo')
assert kp.material.startswith('---- BEGIN RSA PRIVATE KEY ----')
kps = conn.get_all_key_pairs()
assert len(kps) == 1
assert kps[0].name == 'foo'
@mock_ec2_deprecated
def test_key_pairs_create_two():
conn = boto.connect_ec2('the_key', 'the_secret')
kp = conn.create_key_pair('foo')
kp = conn.create_key_pair('bar')
assert kp.material.startswith('---- BEGIN RSA PRIVATE KEY ----')
kps = conn.get_all_key_pairs()
kps.should.have.length_of(2)
[i.name for i in kps].should.contain('foo')
[i.name for i in kps].should.contain('bar')
kps = conn.get_all_key_pairs('foo')
kps.should.have.length_of(1)
kps[0].name.should.equal('foo')
@mock_ec2_deprecated
def test_key_pairs_create_exist():
conn = boto.connect_ec2('the_key', 'the_secret')
kp = conn.create_key_pair('foo')
assert kp.material.startswith('---- BEGIN RSA PRIVATE KEY ----')
assert len(conn.get_all_key_pairs()) == 1
with assert_raises(EC2ResponseError) as cm:
conn.create_key_pair('foo')
cm.exception.code.should.equal('InvalidKeyPair.Duplicate')
cm.exception.status.should.equal(400)
cm.exception.request_id.should_not.be.none
@mock_ec2_deprecated
def test_key_pairs_delete_no_exist():
conn = boto.connect_ec2('the_key', 'the_secret')
assert len(conn.get_all_key_pairs()) == 0
r = conn.delete_key_pair('foo')
r.should.be.ok
@mock_ec2_deprecated
def test_key_pairs_delete_exist():
conn = boto.connect_ec2('the_key', 'the_secret')
conn.create_key_pair('foo')
with assert_raises(EC2ResponseError) as ex:
r = conn.delete_key_pair('foo', dry_run=True)
ex.exception.error_code.should.equal('DryRunOperation')
ex.exception.status.should.equal(400)
ex.exception.message.should.equal(
'An error occurred (DryRunOperation) when calling the DeleteKeyPair operation: Request would have succeeded, but DryRun flag is set')
r = conn.delete_key_pair('foo')
r.should.be.ok
assert len(conn.get_all_key_pairs()) == 0
@mock_ec2_deprecated
def test_key_pairs_import():
conn = boto.connect_ec2('the_key', 'the_secret')
with assert_raises(EC2ResponseError) as ex:
kp = conn.import_key_pair('foo', b'content', dry_run=True)
ex.exception.error_code.should.equal('DryRunOperation')
ex.exception.status.should.equal(400)
ex.exception.message.should.equal(
'An error occurred (DryRunOperation) when calling the ImportKeyPair operation: Request would have succeeded, but DryRun flag is set')
kp = conn.import_key_pair('foo', b'content')
assert kp.name == 'foo'
kps = conn.get_all_key_pairs()
assert len(kps) == 1
assert kps[0].name == 'foo'
@mock_ec2_deprecated
def test_key_pairs_import_exist():
conn = boto.connect_ec2('the_key', 'the_secret')
kp = conn.import_key_pair('foo', b'content')
assert kp.name == 'foo'
assert len(conn.get_all_key_pairs()) == 1
with assert_raises(EC2ResponseError) as cm:
conn.create_key_pair('foo')
cm.exception.code.should.equal('InvalidKeyPair.Duplicate')
cm.exception.status.should.equal(400)
cm.exception.request_id.should_not.be.none
@mock_ec2_deprecated
def test_key_pair_filters():
conn = boto.connect_ec2('the_key', 'the_secret')
_ = conn.create_key_pair('kpfltr1')
kp2 = conn.create_key_pair('kpfltr2')
kp3 = conn.create_key_pair('kpfltr3')
kp_by_name = conn.get_all_key_pairs(
filters={'key-name': 'kpfltr2'})
set([kp.name for kp in kp_by_name]
).should.equal(set([kp2.name]))
kp_by_name = conn.get_all_key_pairs(
filters={'fingerprint': kp3.fingerprint})
set([kp.name for kp in kp_by_name]
).should.equal(set([kp3.name]))
from __future__ import unicode_literals
# Ensure 'assert_raises' context manager support for Python 2.6
import tests.backport_assert_raises
from nose.tools import assert_raises
import boto
import sure # noqa
from boto.exception import EC2ResponseError
from moto import mock_ec2_deprecated
from .helpers import rsa_check_private_key
RSA_PUBLIC_KEY_OPENSSH = b"""\
ssh-rsa \
AAAAB3NzaC1yc2EAAAADAQABAAABAQDusXfgTE4eBP50NglSzCSEGnIL6+cr6m3H\
6cZANOQ+P1o/W4BdtcAL3sor4iGi7SOeJgo\8kweyMQrhrt6HaKGgromRiz37LQx\
4YIAcBi4Zd023mO/V7Rc2Chh18mWgLSmA6ng+j37ip6452zxtv0jHAz9pJolbKBp\
JzbZlPN45ZCTk9ck0fSVHRl6VRSSPQcpqi65XpRf+35zNOCGCc1mAOOTmw59Q2a6\
A3t8mL7r91aM5q6QOQm219lctFM8O7HRJnDgmhGpnjRwE1LyKktWTbgFZ4SNWU2X\
qusUO07jKuSxzPumXBeU+JEtx0J1tqZwJlpGt2R+0qN7nKnPl2+hx \
moto@github.com"""
RSA_PUBLIC_KEY_RFC4716 = b"""\
---- BEGIN SSH2 PUBLIC KEY ----
AAAAB3NzaC1yc2EAAAADAQABAAABAQDusXfgTE4eBP50NglSzCSEGnIL6+cr6m3H6cZANO
Q+P1o/W4BdtcAL3sor4iGi7SOeJgo8kweyMQrhrt6HaKGgromRiz37LQx4YIAcBi4Zd023
mO/V7Rc2Chh18mWgLSmA6ng+j37ip6452zxtv0jHAz9pJolbKBpJzbZlPN45ZCTk9ck0fS
VHRl6VRSSPQcpqi65XpRf+35zNOCGCc1mAOOTmw59Q2a6A3t8mL7r91aM5q6QOQm219lct
FM8O7HRJnDgmhGpnjRwE1LyKktWTbgFZ4SNWU2XqusUO07jKuSxzPumXBeU+JEtx0J1tqZ
wJlpGt2R+0qN7nKnPl2+hx
---- END SSH2 PUBLIC KEY ----
"""
RSA_PUBLIC_KEY_FINGERPRINT = "6a:49:07:1c:7e:bd:d2:bd:96:25:fe:b5:74:83:ae:fd"
DSA_PUBLIC_KEY_OPENSSH = b"""ssh-dss \
AAAAB3NzaC1kc3MAAACBAJ0aXctVwbN6VB81gpo8R7DUk8zXRjZvrkg8Y8vEGt63gklpNJNsLXtEUXkl5D4c0nD2FZO1rJNqFoe\
OQOCoGSfclHvt9w4yPl/lUEtb3Qtj1j80MInETHr19vaSunRk5R+M+8YH+LLcdYdz7MijuGey02mbi0H9K5nUIcuLMArVAAAAFQ\
D0RDvsObRWBlnaW8645obZBM86jwAAAIBNZwf3B4krIzAwVfkMHLDSdAvs7lOWE7o8SJLzr9t4a9HhYp9SLbMzJ815KWfidEYV2\
+s4ZaPCfcZ1GENFRbE8rixz5eMAjEUXEPMJkblDZTHzMsH96z2cOCQZ0vfOmgznsf18Uf725pqo9OqAioEsTJjX8jtI2qNPEBU0\
uhMSZQAAAIBBMGhDu5CWPUlS2QG7vzmzw81XasmHE/s2YPDRbolkriwlunpgwZhCscoQP8HFHY+DLUVvUb+GZwBmFt4l1uHl03b\
ffsm7UIHtCBYERr9Nx0u20ldfhkgB1lhaJb5o0ZJ3pmJ38KChfyHe5EUcqRdEFo89Mp72VI2Z6UHyL175RA== \
moto@github.com"""
@mock_ec2_deprecated
def test_key_pairs_empty():
conn = boto.connect_ec2('the_key', 'the_secret')
assert len(conn.get_all_key_pairs()) == 0
@mock_ec2_deprecated
def test_key_pairs_invalid_id():
conn = boto.connect_ec2('the_key', 'the_secret')
with assert_raises(EC2ResponseError) as cm:
conn.get_all_key_pairs('foo')
cm.exception.code.should.equal('InvalidKeyPair.NotFound')
cm.exception.status.should.equal(400)
cm.exception.request_id.should_not.be.none
@mock_ec2_deprecated
def test_key_pairs_create():
conn = boto.connect_ec2('the_key', 'the_secret')
with assert_raises(EC2ResponseError) as ex:
conn.create_key_pair('foo', dry_run=True)
ex.exception.error_code.should.equal('DryRunOperation')
ex.exception.status.should.equal(400)
ex.exception.message.should.equal(
'An error occurred (DryRunOperation) when calling the CreateKeyPair operation: Request would have succeeded, but DryRun flag is set')
kp = conn.create_key_pair('foo')
rsa_check_private_key(kp.material)
kps = conn.get_all_key_pairs()
assert len(kps) == 1
assert kps[0].name == 'foo'
@mock_ec2_deprecated
def test_key_pairs_create_two():
conn = boto.connect_ec2('the_key', 'the_secret')
kp1 = conn.create_key_pair('foo')
rsa_check_private_key(kp1.material)
kp2 = conn.create_key_pair('bar')
rsa_check_private_key(kp2.material)
assert kp1.material != kp2.material
kps = conn.get_all_key_pairs()
kps.should.have.length_of(2)
assert {i.name for i in kps} == {'foo', 'bar'}
kps = conn.get_all_key_pairs('foo')
kps.should.have.length_of(1)
kps[0].name.should.equal('foo')
@mock_ec2_deprecated
def test_key_pairs_create_exist():
conn = boto.connect_ec2('the_key', 'the_secret')
conn.create_key_pair('foo')
assert len(conn.get_all_key_pairs()) == 1
with assert_raises(EC2ResponseError) as cm:
conn.create_key_pair('foo')
cm.exception.code.should.equal('InvalidKeyPair.Duplicate')
cm.exception.status.should.equal(400)
cm.exception.request_id.should_not.be.none
@mock_ec2_deprecated
def test_key_pairs_delete_no_exist():
conn = boto.connect_ec2('the_key', 'the_secret')
assert len(conn.get_all_key_pairs()) == 0
r = conn.delete_key_pair('foo')
r.should.be.ok
@mock_ec2_deprecated
def test_key_pairs_delete_exist():
conn = boto.connect_ec2('the_key', 'the_secret')
conn.create_key_pair('foo')
with assert_raises(EC2ResponseError) as ex:
r = conn.delete_key_pair('foo', dry_run=True)
ex.exception.error_code.should.equal('DryRunOperation')
ex.exception.status.should.equal(400)
ex.exception.message.should.equal(
'An error occurred (DryRunOperation) when calling the DeleteKeyPair operation: Request would have succeeded, but DryRun flag is set')
r = conn.delete_key_pair('foo')
r.should.be.ok
assert len(conn.get_all_key_pairs()) == 0
@mock_ec2_deprecated
def test_key_pairs_import():
conn = boto.connect_ec2('the_key', 'the_secret')
with assert_raises(EC2ResponseError) as ex:
conn.import_key_pair('foo', RSA_PUBLIC_KEY_OPENSSH, dry_run=True)
ex.exception.error_code.should.equal('DryRunOperation')
ex.exception.status.should.equal(400)
ex.exception.message.should.equal(
'An error occurred (DryRunOperation) when calling the ImportKeyPair operation: Request would have succeeded, but DryRun flag is set')
kp1 = conn.import_key_pair('foo', RSA_PUBLIC_KEY_OPENSSH)
assert kp1.name == 'foo'
assert kp1.fingerprint == RSA_PUBLIC_KEY_FINGERPRINT
kp2 = conn.import_key_pair('foo2', RSA_PUBLIC_KEY_RFC4716)
assert kp2.name == 'foo2'
assert kp2.fingerprint == RSA_PUBLIC_KEY_FINGERPRINT
kps = conn.get_all_key_pairs()
assert len(kps) == 2
assert kps[0].name == kp1.name
assert kps[1].name == kp2.name
@mock_ec2_deprecated
def test_key_pairs_import_exist():
conn = boto.connect_ec2('the_key', 'the_secret')
kp = conn.import_key_pair('foo', RSA_PUBLIC_KEY_OPENSSH)
assert kp.name == 'foo'
assert len(conn.get_all_key_pairs()) == 1
with assert_raises(EC2ResponseError) as cm:
conn.create_key_pair('foo')
cm.exception.code.should.equal('InvalidKeyPair.Duplicate')
cm.exception.status.should.equal(400)
cm.exception.request_id.should_not.be.none
@mock_ec2_deprecated
def test_key_pairs_invalid():
conn = boto.connect_ec2('the_key', 'the_secret')
with assert_raises(EC2ResponseError) as ex:
conn.import_key_pair('foo', b'')
ex.exception.error_code.should.equal('InvalidKeyPair.Format')
ex.exception.status.should.equal(400)
ex.exception.message.should.equal(
'Key is not in valid OpenSSH public key format')
with assert_raises(EC2ResponseError) as ex:
conn.import_key_pair('foo', b'garbage')
ex.exception.error_code.should.equal('InvalidKeyPair.Format')
ex.exception.status.should.equal(400)
ex.exception.message.should.equal(
'Key is not in valid OpenSSH public key format')
with assert_raises(EC2ResponseError) as ex:
conn.import_key_pair('foo', DSA_PUBLIC_KEY_OPENSSH)
ex.exception.error_code.should.equal('InvalidKeyPair.Format')
ex.exception.status.should.equal(400)
ex.exception.message.should.equal(
'Key is not in valid OpenSSH public key format')
@mock_ec2_deprecated
def test_key_pair_filters():
conn = boto.connect_ec2('the_key', 'the_secret')
_ = conn.create_key_pair('kpfltr1')
kp2 = conn.create_key_pair('kpfltr2')
kp3 = conn.create_key_pair('kpfltr3')
kp_by_name = conn.get_all_key_pairs(
filters={'key-name': 'kpfltr2'})
set([kp.name for kp in kp_by_name]
).should.equal(set([kp2.name]))
kp_by_name = conn.get_all_key_pairs(
filters={'fingerprint': kp3.fingerprint})
set([kp.name for kp in kp_by_name]
).should.equal(set([kp3.name]))

View File

@ -2,6 +2,8 @@ from __future__ import unicode_literals
import boto
import boto3
import sure # noqa
from nose.tools import assert_raises
from botocore.exceptions import ClientError
from moto import mock_ec2_deprecated, mock_ec2
@ -28,7 +30,7 @@ def test_new_subnet_associates_with_default_network_acl():
conn = boto.connect_vpc('the_key', 'the secret')
vpc = conn.get_all_vpcs()[0]
subnet = conn.create_subnet(vpc.id, "10.0.0.0/18")
subnet = conn.create_subnet(vpc.id, "172.31.48.0/20")
all_network_acls = conn.get_all_network_acls()
all_network_acls.should.have.length_of(1)
@ -214,3 +216,37 @@ def test_default_network_acl_default_entries():
unique_entries.append(entry)
unique_entries.should.have.length_of(4)
@mock_ec2
def test_delete_default_network_acl_default_entry():
ec2 = boto3.resource('ec2', region_name='us-west-1')
default_network_acl = next(iter(ec2.network_acls.all()), None)
default_network_acl.is_default.should.be.ok
default_network_acl.entries.should.have.length_of(4)
first_default_network_acl_entry = default_network_acl.entries[0]
default_network_acl.delete_entry(Egress=first_default_network_acl_entry['Egress'],
RuleNumber=first_default_network_acl_entry['RuleNumber'])
default_network_acl.entries.should.have.length_of(3)
@mock_ec2
def test_duplicate_network_acl_entry():
ec2 = boto3.resource('ec2', region_name='us-west-1')
default_network_acl = next(iter(ec2.network_acls.all()), None)
default_network_acl.is_default.should.be.ok
rule_number = 200
egress = True
default_network_acl.create_entry(CidrBlock="0.0.0.0/0", Egress=egress, Protocol="-1", RuleAction="allow", RuleNumber=rule_number)
with assert_raises(ClientError) as ex:
default_network_acl.create_entry(CidrBlock="10.0.0.0/0", Egress=egress, Protocol="-1", RuleAction="deny", RuleNumber=rule_number)
str(ex.exception).should.equal(
"An error occurred (NetworkAclEntryAlreadyExists) when calling the CreateNetworkAclEntry "
"operation: The network acl entry identified by {} already exists.".format(rule_number))

View File

@ -1,148 +1,150 @@
from __future__ import unicode_literals
import boto.ec2
import boto.ec2.autoscale
import boto.ec2.elb
import sure
from moto import mock_ec2_deprecated, mock_autoscaling_deprecated, mock_elb_deprecated
from moto.ec2 import ec2_backends
def test_use_boto_regions():
boto_regions = {r.name for r in boto.ec2.regions()}
moto_regions = set(ec2_backends)
moto_regions.should.equal(boto_regions)
def add_servers_to_region(ami_id, count, region):
conn = boto.ec2.connect_to_region(region)
for index in range(count):
conn.run_instances(ami_id)
@mock_ec2_deprecated
def test_add_servers_to_a_single_region():
region = 'ap-northeast-1'
add_servers_to_region('ami-1234abcd', 1, region)
add_servers_to_region('ami-5678efgh', 1, region)
conn = boto.ec2.connect_to_region(region)
reservations = conn.get_all_instances()
len(reservations).should.equal(2)
reservations.sort(key=lambda x: x.instances[0].image_id)
reservations[0].instances[0].image_id.should.equal('ami-1234abcd')
reservations[1].instances[0].image_id.should.equal('ami-5678efgh')
@mock_ec2_deprecated
def test_add_servers_to_multiple_regions():
region1 = 'us-east-1'
region2 = 'ap-northeast-1'
add_servers_to_region('ami-1234abcd', 1, region1)
add_servers_to_region('ami-5678efgh', 1, region2)
us_conn = boto.ec2.connect_to_region(region1)
ap_conn = boto.ec2.connect_to_region(region2)
us_reservations = us_conn.get_all_instances()
ap_reservations = ap_conn.get_all_instances()
len(us_reservations).should.equal(1)
len(ap_reservations).should.equal(1)
us_reservations[0].instances[0].image_id.should.equal('ami-1234abcd')
ap_reservations[0].instances[0].image_id.should.equal('ami-5678efgh')
@mock_autoscaling_deprecated
@mock_elb_deprecated
def test_create_autoscaling_group():
elb_conn = boto.ec2.elb.connect_to_region('us-east-1')
elb_conn.create_load_balancer(
'us_test_lb', zones=[], listeners=[(80, 8080, 'http')])
elb_conn = boto.ec2.elb.connect_to_region('ap-northeast-1')
elb_conn.create_load_balancer(
'ap_test_lb', zones=[], listeners=[(80, 8080, 'http')])
us_conn = boto.ec2.autoscale.connect_to_region('us-east-1')
config = boto.ec2.autoscale.LaunchConfiguration(
name='us_tester',
image_id='ami-abcd1234',
instance_type='m1.small',
)
us_conn.create_launch_configuration(config)
group = boto.ec2.autoscale.AutoScalingGroup(
name='us_tester_group',
availability_zones=['us-east-1c'],
default_cooldown=60,
desired_capacity=2,
health_check_period=100,
health_check_type="EC2",
max_size=2,
min_size=2,
launch_config=config,
load_balancers=["us_test_lb"],
placement_group="us_test_placement",
vpc_zone_identifier='subnet-1234abcd',
termination_policies=["OldestInstance", "NewestInstance"],
)
us_conn.create_auto_scaling_group(group)
ap_conn = boto.ec2.autoscale.connect_to_region('ap-northeast-1')
config = boto.ec2.autoscale.LaunchConfiguration(
name='ap_tester',
image_id='ami-efgh5678',
instance_type='m1.small',
)
ap_conn.create_launch_configuration(config)
group = boto.ec2.autoscale.AutoScalingGroup(
name='ap_tester_group',
availability_zones=['ap-northeast-1a'],
default_cooldown=60,
desired_capacity=2,
health_check_period=100,
health_check_type="EC2",
max_size=2,
min_size=2,
launch_config=config,
load_balancers=["ap_test_lb"],
placement_group="ap_test_placement",
vpc_zone_identifier='subnet-5678efgh',
termination_policies=["OldestInstance", "NewestInstance"],
)
ap_conn.create_auto_scaling_group(group)
len(us_conn.get_all_groups()).should.equal(1)
len(ap_conn.get_all_groups()).should.equal(1)
us_group = us_conn.get_all_groups()[0]
us_group.name.should.equal('us_tester_group')
list(us_group.availability_zones).should.equal(['us-east-1c'])
us_group.desired_capacity.should.equal(2)
us_group.max_size.should.equal(2)
us_group.min_size.should.equal(2)
us_group.vpc_zone_identifier.should.equal('subnet-1234abcd')
us_group.launch_config_name.should.equal('us_tester')
us_group.default_cooldown.should.equal(60)
us_group.health_check_period.should.equal(100)
us_group.health_check_type.should.equal("EC2")
list(us_group.load_balancers).should.equal(["us_test_lb"])
us_group.placement_group.should.equal("us_test_placement")
list(us_group.termination_policies).should.equal(
["OldestInstance", "NewestInstance"])
ap_group = ap_conn.get_all_groups()[0]
ap_group.name.should.equal('ap_tester_group')
list(ap_group.availability_zones).should.equal(['ap-northeast-1a'])
ap_group.desired_capacity.should.equal(2)
ap_group.max_size.should.equal(2)
ap_group.min_size.should.equal(2)
ap_group.vpc_zone_identifier.should.equal('subnet-5678efgh')
ap_group.launch_config_name.should.equal('ap_tester')
ap_group.default_cooldown.should.equal(60)
ap_group.health_check_period.should.equal(100)
ap_group.health_check_type.should.equal("EC2")
list(ap_group.load_balancers).should.equal(["ap_test_lb"])
ap_group.placement_group.should.equal("ap_test_placement")
list(ap_group.termination_policies).should.equal(
["OldestInstance", "NewestInstance"])
from __future__ import unicode_literals
import boto.ec2
import boto.ec2.autoscale
import boto.ec2.elb
import sure
from moto import mock_ec2_deprecated, mock_autoscaling_deprecated, mock_elb_deprecated
from moto.ec2 import ec2_backends
def test_use_boto_regions():
boto_regions = {r.name for r in boto.ec2.regions()}
moto_regions = set(ec2_backends)
moto_regions.should.equal(boto_regions)
def add_servers_to_region(ami_id, count, region):
conn = boto.ec2.connect_to_region(region)
for index in range(count):
conn.run_instances(ami_id)
@mock_ec2_deprecated
def test_add_servers_to_a_single_region():
region = 'ap-northeast-1'
add_servers_to_region('ami-1234abcd', 1, region)
add_servers_to_region('ami-5678efgh', 1, region)
conn = boto.ec2.connect_to_region(region)
reservations = conn.get_all_instances()
len(reservations).should.equal(2)
reservations.sort(key=lambda x: x.instances[0].image_id)
reservations[0].instances[0].image_id.should.equal('ami-1234abcd')
reservations[1].instances[0].image_id.should.equal('ami-5678efgh')
@mock_ec2_deprecated
def test_add_servers_to_multiple_regions():
region1 = 'us-east-1'
region2 = 'ap-northeast-1'
add_servers_to_region('ami-1234abcd', 1, region1)
add_servers_to_region('ami-5678efgh', 1, region2)
us_conn = boto.ec2.connect_to_region(region1)
ap_conn = boto.ec2.connect_to_region(region2)
us_reservations = us_conn.get_all_instances()
ap_reservations = ap_conn.get_all_instances()
len(us_reservations).should.equal(1)
len(ap_reservations).should.equal(1)
us_reservations[0].instances[0].image_id.should.equal('ami-1234abcd')
ap_reservations[0].instances[0].image_id.should.equal('ami-5678efgh')
@mock_autoscaling_deprecated
@mock_elb_deprecated
def test_create_autoscaling_group():
elb_conn = boto.ec2.elb.connect_to_region('us-east-1')
elb_conn.create_load_balancer(
'us_test_lb', zones=[], listeners=[(80, 8080, 'http')])
elb_conn = boto.ec2.elb.connect_to_region('ap-northeast-1')
elb_conn.create_load_balancer(
'ap_test_lb', zones=[], listeners=[(80, 8080, 'http')])
us_conn = boto.ec2.autoscale.connect_to_region('us-east-1')
config = boto.ec2.autoscale.LaunchConfiguration(
name='us_tester',
image_id='ami-abcd1234',
instance_type='m1.small',
)
x = us_conn.create_launch_configuration(config)
us_subnet_id = list(ec2_backends['us-east-1'].subnets['us-east-1c'].keys())[0]
ap_subnet_id = list(ec2_backends['ap-northeast-1'].subnets['ap-northeast-1a'].keys())[0]
group = boto.ec2.autoscale.AutoScalingGroup(
name='us_tester_group',
availability_zones=['us-east-1c'],
default_cooldown=60,
desired_capacity=2,
health_check_period=100,
health_check_type="EC2",
max_size=2,
min_size=2,
launch_config=config,
load_balancers=["us_test_lb"],
placement_group="us_test_placement",
vpc_zone_identifier=us_subnet_id,
termination_policies=["OldestInstance", "NewestInstance"],
)
us_conn.create_auto_scaling_group(group)
ap_conn = boto.ec2.autoscale.connect_to_region('ap-northeast-1')
config = boto.ec2.autoscale.LaunchConfiguration(
name='ap_tester',
image_id='ami-efgh5678',
instance_type='m1.small',
)
ap_conn.create_launch_configuration(config)
group = boto.ec2.autoscale.AutoScalingGroup(
name='ap_tester_group',
availability_zones=['ap-northeast-1a'],
default_cooldown=60,
desired_capacity=2,
health_check_period=100,
health_check_type="EC2",
max_size=2,
min_size=2,
launch_config=config,
load_balancers=["ap_test_lb"],
placement_group="ap_test_placement",
vpc_zone_identifier=ap_subnet_id,
termination_policies=["OldestInstance", "NewestInstance"],
)
ap_conn.create_auto_scaling_group(group)
len(us_conn.get_all_groups()).should.equal(1)
len(ap_conn.get_all_groups()).should.equal(1)
us_group = us_conn.get_all_groups()[0]
us_group.name.should.equal('us_tester_group')
list(us_group.availability_zones).should.equal(['us-east-1c'])
us_group.desired_capacity.should.equal(2)
us_group.max_size.should.equal(2)
us_group.min_size.should.equal(2)
us_group.vpc_zone_identifier.should.equal(us_subnet_id)
us_group.launch_config_name.should.equal('us_tester')
us_group.default_cooldown.should.equal(60)
us_group.health_check_period.should.equal(100)
us_group.health_check_type.should.equal("EC2")
list(us_group.load_balancers).should.equal(["us_test_lb"])
us_group.placement_group.should.equal("us_test_placement")
list(us_group.termination_policies).should.equal(
["OldestInstance", "NewestInstance"])
ap_group = ap_conn.get_all_groups()[0]
ap_group.name.should.equal('ap_tester_group')
list(ap_group.availability_zones).should.equal(['ap-northeast-1a'])
ap_group.desired_capacity.should.equal(2)
ap_group.max_size.should.equal(2)
ap_group.min_size.should.equal(2)
ap_group.vpc_zone_identifier.should.equal(ap_subnet_id)
ap_group.launch_config_name.should.equal('ap_tester')
ap_group.default_cooldown.should.equal(60)
ap_group.health_check_period.should.equal(100)
ap_group.health_check_type.should.equal("EC2")
list(ap_group.load_balancers).should.equal(["ap_test_lb"])
ap_group.placement_group.should.equal("ap_test_placement")
list(ap_group.termination_policies).should.equal(
["OldestInstance", "NewestInstance"])

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -7,7 +7,7 @@ from moto import mock_ec2
def get_subnet_id(conn):
vpc = conn.create_vpc(CidrBlock="10.0.0.0/8")['Vpc']
vpc = conn.create_vpc(CidrBlock="10.0.0.0/16")['Vpc']
subnet = conn.create_subnet(
VpcId=vpc['VpcId'], CidrBlock='10.0.0.0/16', AvailabilityZone='us-east-1a')['Subnet']
subnet_id = subnet['SubnetId']

View File

@ -1,268 +1,268 @@
from __future__ import unicode_literals
from nose.tools import assert_raises
import datetime
import boto
import boto3
from boto.exception import EC2ResponseError
from botocore.exceptions import ClientError
import pytz
import sure # noqa
from moto import mock_ec2, mock_ec2_deprecated
from moto.backends import get_model
from moto.core.utils import iso_8601_datetime_with_milliseconds
@mock_ec2
def test_request_spot_instances():
conn = boto3.client('ec2', 'us-east-1')
vpc = conn.create_vpc(CidrBlock="10.0.0.0/8")['Vpc']
subnet = conn.create_subnet(
VpcId=vpc['VpcId'], CidrBlock='10.0.0.0/16', AvailabilityZone='us-east-1a')['Subnet']
subnet_id = subnet['SubnetId']
conn.create_security_group(GroupName='group1', Description='description')
conn.create_security_group(GroupName='group2', Description='description')
start_dt = datetime.datetime(2013, 1, 1).replace(tzinfo=pytz.utc)
end_dt = datetime.datetime(2013, 1, 2).replace(tzinfo=pytz.utc)
start = iso_8601_datetime_with_milliseconds(start_dt)
end = iso_8601_datetime_with_milliseconds(end_dt)
with assert_raises(ClientError) as ex:
request = conn.request_spot_instances(
SpotPrice="0.5", InstanceCount=1, Type='one-time',
ValidFrom=start, ValidUntil=end, LaunchGroup="the-group",
AvailabilityZoneGroup='my-group',
LaunchSpecification={
"ImageId": 'ami-abcd1234',
"KeyName": "test",
"SecurityGroups": ['group1', 'group2'],
"UserData": "some test data",
"InstanceType": 'm1.small',
"Placement": {
"AvailabilityZone": 'us-east-1c',
},
"KernelId": "test-kernel",
"RamdiskId": "test-ramdisk",
"Monitoring": {
"Enabled": True,
},
"SubnetId": subnet_id,
},
DryRun=True,
)
ex.exception.response['Error']['Code'].should.equal('DryRunOperation')
ex.exception.response['ResponseMetadata'][
'HTTPStatusCode'].should.equal(400)
ex.exception.response['Error']['Message'].should.equal(
'An error occurred (DryRunOperation) when calling the RequestSpotInstance operation: Request would have succeeded, but DryRun flag is set')
request = conn.request_spot_instances(
SpotPrice="0.5", InstanceCount=1, Type='one-time',
ValidFrom=start, ValidUntil=end, LaunchGroup="the-group",
AvailabilityZoneGroup='my-group',
LaunchSpecification={
"ImageId": 'ami-abcd1234',
"KeyName": "test",
"SecurityGroups": ['group1', 'group2'],
"UserData": "some test data",
"InstanceType": 'm1.small',
"Placement": {
"AvailabilityZone": 'us-east-1c',
},
"KernelId": "test-kernel",
"RamdiskId": "test-ramdisk",
"Monitoring": {
"Enabled": True,
},
"SubnetId": subnet_id,
},
)
requests = conn.describe_spot_instance_requests()['SpotInstanceRequests']
requests.should.have.length_of(1)
request = requests[0]
request['State'].should.equal("open")
request['SpotPrice'].should.equal("0.5")
request['Type'].should.equal('one-time')
request['ValidFrom'].should.equal(start_dt)
request['ValidUntil'].should.equal(end_dt)
request['LaunchGroup'].should.equal("the-group")
request['AvailabilityZoneGroup'].should.equal('my-group')
launch_spec = request['LaunchSpecification']
security_group_names = [group['GroupName']
for group in launch_spec['SecurityGroups']]
set(security_group_names).should.equal(set(['group1', 'group2']))
launch_spec['ImageId'].should.equal('ami-abcd1234')
launch_spec['KeyName'].should.equal("test")
launch_spec['InstanceType'].should.equal('m1.small')
launch_spec['KernelId'].should.equal("test-kernel")
launch_spec['RamdiskId'].should.equal("test-ramdisk")
launch_spec['SubnetId'].should.equal(subnet_id)
@mock_ec2
def test_request_spot_instances_default_arguments():
"""
Test that moto set the correct default arguments
"""
conn = boto3.client('ec2', 'us-east-1')
request = conn.request_spot_instances(
SpotPrice="0.5",
LaunchSpecification={
"ImageId": 'ami-abcd1234',
}
)
requests = conn.describe_spot_instance_requests()['SpotInstanceRequests']
requests.should.have.length_of(1)
request = requests[0]
request['State'].should.equal("open")
request['SpotPrice'].should.equal("0.5")
request['Type'].should.equal('one-time')
request.shouldnt.contain('ValidFrom')
request.shouldnt.contain('ValidUntil')
request.shouldnt.contain('LaunchGroup')
request.shouldnt.contain('AvailabilityZoneGroup')
launch_spec = request['LaunchSpecification']
security_group_names = [group['GroupName']
for group in launch_spec['SecurityGroups']]
security_group_names.should.equal(["default"])
launch_spec['ImageId'].should.equal('ami-abcd1234')
request.shouldnt.contain('KeyName')
launch_spec['InstanceType'].should.equal('m1.small')
request.shouldnt.contain('KernelId')
request.shouldnt.contain('RamdiskId')
request.shouldnt.contain('SubnetId')
@mock_ec2_deprecated
def test_cancel_spot_instance_request():
conn = boto.connect_ec2()
conn.request_spot_instances(
price=0.5, image_id='ami-abcd1234',
)
requests = conn.get_all_spot_instance_requests()
requests.should.have.length_of(1)
with assert_raises(EC2ResponseError) as ex:
conn.cancel_spot_instance_requests([requests[0].id], dry_run=True)
ex.exception.error_code.should.equal('DryRunOperation')
ex.exception.status.should.equal(400)
ex.exception.message.should.equal(
'An error occurred (DryRunOperation) when calling the CancelSpotInstance operation: Request would have succeeded, but DryRun flag is set')
conn.cancel_spot_instance_requests([requests[0].id])
requests = conn.get_all_spot_instance_requests()
requests.should.have.length_of(0)
@mock_ec2_deprecated
def test_request_spot_instances_fulfilled():
"""
Test that moto correctly fullfills a spot instance request
"""
conn = boto.ec2.connect_to_region("us-east-1")
request = conn.request_spot_instances(
price=0.5, image_id='ami-abcd1234',
)
requests = conn.get_all_spot_instance_requests()
requests.should.have.length_of(1)
request = requests[0]
request.state.should.equal("open")
get_model('SpotInstanceRequest', 'us-east-1')[0].state = 'active'
requests = conn.get_all_spot_instance_requests()
requests.should.have.length_of(1)
request = requests[0]
request.state.should.equal("active")
@mock_ec2_deprecated
def test_tag_spot_instance_request():
"""
Test that moto correctly tags a spot instance request
"""
conn = boto.connect_ec2()
request = conn.request_spot_instances(
price=0.5, image_id='ami-abcd1234',
)
request[0].add_tag('tag1', 'value1')
request[0].add_tag('tag2', 'value2')
requests = conn.get_all_spot_instance_requests()
requests.should.have.length_of(1)
request = requests[0]
tag_dict = dict(request.tags)
tag_dict.should.equal({'tag1': 'value1', 'tag2': 'value2'})
@mock_ec2_deprecated
def test_get_all_spot_instance_requests_filtering():
"""
Test that moto correctly filters spot instance requests
"""
conn = boto.connect_ec2()
request1 = conn.request_spot_instances(
price=0.5, image_id='ami-abcd1234',
)
request2 = conn.request_spot_instances(
price=0.5, image_id='ami-abcd1234',
)
conn.request_spot_instances(
price=0.5, image_id='ami-abcd1234',
)
request1[0].add_tag('tag1', 'value1')
request1[0].add_tag('tag2', 'value2')
request2[0].add_tag('tag1', 'value1')
request2[0].add_tag('tag2', 'wrong')
requests = conn.get_all_spot_instance_requests(filters={'state': 'active'})
requests.should.have.length_of(0)
requests = conn.get_all_spot_instance_requests(filters={'state': 'open'})
requests.should.have.length_of(3)
requests = conn.get_all_spot_instance_requests(
filters={'tag:tag1': 'value1'})
requests.should.have.length_of(2)
requests = conn.get_all_spot_instance_requests(
filters={'tag:tag1': 'value1', 'tag:tag2': 'value2'})
requests.should.have.length_of(1)
@mock_ec2_deprecated
def test_request_spot_instances_setting_instance_id():
conn = boto.ec2.connect_to_region("us-east-1")
request = conn.request_spot_instances(
price=0.5, image_id='ami-abcd1234')
req = get_model('SpotInstanceRequest', 'us-east-1')[0]
req.state = 'active'
req.instance_id = 'i-12345678'
request = conn.get_all_spot_instance_requests()[0]
assert request.state == 'active'
assert request.instance_id == 'i-12345678'
from __future__ import unicode_literals
from nose.tools import assert_raises
import datetime
import boto
import boto3
from boto.exception import EC2ResponseError
from botocore.exceptions import ClientError
import pytz
import sure # noqa
from moto import mock_ec2, mock_ec2_deprecated
from moto.backends import get_model
from moto.core.utils import iso_8601_datetime_with_milliseconds
@mock_ec2
def test_request_spot_instances():
conn = boto3.client('ec2', 'us-east-1')
vpc = conn.create_vpc(CidrBlock="10.0.0.0/16")['Vpc']
subnet = conn.create_subnet(
VpcId=vpc['VpcId'], CidrBlock='10.0.0.0/16', AvailabilityZone='us-east-1a')['Subnet']
subnet_id = subnet['SubnetId']
conn.create_security_group(GroupName='group1', Description='description')
conn.create_security_group(GroupName='group2', Description='description')
start_dt = datetime.datetime(2013, 1, 1).replace(tzinfo=pytz.utc)
end_dt = datetime.datetime(2013, 1, 2).replace(tzinfo=pytz.utc)
start = iso_8601_datetime_with_milliseconds(start_dt)
end = iso_8601_datetime_with_milliseconds(end_dt)
with assert_raises(ClientError) as ex:
request = conn.request_spot_instances(
SpotPrice="0.5", InstanceCount=1, Type='one-time',
ValidFrom=start, ValidUntil=end, LaunchGroup="the-group",
AvailabilityZoneGroup='my-group',
LaunchSpecification={
"ImageId": 'ami-abcd1234',
"KeyName": "test",
"SecurityGroups": ['group1', 'group2'],
"UserData": "some test data",
"InstanceType": 'm1.small',
"Placement": {
"AvailabilityZone": 'us-east-1c',
},
"KernelId": "test-kernel",
"RamdiskId": "test-ramdisk",
"Monitoring": {
"Enabled": True,
},
"SubnetId": subnet_id,
},
DryRun=True,
)
ex.exception.response['Error']['Code'].should.equal('DryRunOperation')
ex.exception.response['ResponseMetadata'][
'HTTPStatusCode'].should.equal(400)
ex.exception.response['Error']['Message'].should.equal(
'An error occurred (DryRunOperation) when calling the RequestSpotInstance operation: Request would have succeeded, but DryRun flag is set')
request = conn.request_spot_instances(
SpotPrice="0.5", InstanceCount=1, Type='one-time',
ValidFrom=start, ValidUntil=end, LaunchGroup="the-group",
AvailabilityZoneGroup='my-group',
LaunchSpecification={
"ImageId": 'ami-abcd1234',
"KeyName": "test",
"SecurityGroups": ['group1', 'group2'],
"UserData": "some test data",
"InstanceType": 'm1.small',
"Placement": {
"AvailabilityZone": 'us-east-1c',
},
"KernelId": "test-kernel",
"RamdiskId": "test-ramdisk",
"Monitoring": {
"Enabled": True,
},
"SubnetId": subnet_id,
},
)
requests = conn.describe_spot_instance_requests()['SpotInstanceRequests']
requests.should.have.length_of(1)
request = requests[0]
request['State'].should.equal("open")
request['SpotPrice'].should.equal("0.5")
request['Type'].should.equal('one-time')
request['ValidFrom'].should.equal(start_dt)
request['ValidUntil'].should.equal(end_dt)
request['LaunchGroup'].should.equal("the-group")
request['AvailabilityZoneGroup'].should.equal('my-group')
launch_spec = request['LaunchSpecification']
security_group_names = [group['GroupName']
for group in launch_spec['SecurityGroups']]
set(security_group_names).should.equal(set(['group1', 'group2']))
launch_spec['ImageId'].should.equal('ami-abcd1234')
launch_spec['KeyName'].should.equal("test")
launch_spec['InstanceType'].should.equal('m1.small')
launch_spec['KernelId'].should.equal("test-kernel")
launch_spec['RamdiskId'].should.equal("test-ramdisk")
launch_spec['SubnetId'].should.equal(subnet_id)
@mock_ec2
def test_request_spot_instances_default_arguments():
"""
Test that moto set the correct default arguments
"""
conn = boto3.client('ec2', 'us-east-1')
request = conn.request_spot_instances(
SpotPrice="0.5",
LaunchSpecification={
"ImageId": 'ami-abcd1234',
}
)
requests = conn.describe_spot_instance_requests()['SpotInstanceRequests']
requests.should.have.length_of(1)
request = requests[0]
request['State'].should.equal("open")
request['SpotPrice'].should.equal("0.5")
request['Type'].should.equal('one-time')
request.shouldnt.contain('ValidFrom')
request.shouldnt.contain('ValidUntil')
request.shouldnt.contain('LaunchGroup')
request.shouldnt.contain('AvailabilityZoneGroup')
launch_spec = request['LaunchSpecification']
security_group_names = [group['GroupName']
for group in launch_spec['SecurityGroups']]
security_group_names.should.equal(["default"])
launch_spec['ImageId'].should.equal('ami-abcd1234')
request.shouldnt.contain('KeyName')
launch_spec['InstanceType'].should.equal('m1.small')
request.shouldnt.contain('KernelId')
request.shouldnt.contain('RamdiskId')
request.shouldnt.contain('SubnetId')
@mock_ec2_deprecated
def test_cancel_spot_instance_request():
conn = boto.connect_ec2()
conn.request_spot_instances(
price=0.5, image_id='ami-abcd1234',
)
requests = conn.get_all_spot_instance_requests()
requests.should.have.length_of(1)
with assert_raises(EC2ResponseError) as ex:
conn.cancel_spot_instance_requests([requests[0].id], dry_run=True)
ex.exception.error_code.should.equal('DryRunOperation')
ex.exception.status.should.equal(400)
ex.exception.message.should.equal(
'An error occurred (DryRunOperation) when calling the CancelSpotInstance operation: Request would have succeeded, but DryRun flag is set')
conn.cancel_spot_instance_requests([requests[0].id])
requests = conn.get_all_spot_instance_requests()
requests.should.have.length_of(0)
@mock_ec2_deprecated
def test_request_spot_instances_fulfilled():
"""
Test that moto correctly fullfills a spot instance request
"""
conn = boto.ec2.connect_to_region("us-east-1")
request = conn.request_spot_instances(
price=0.5, image_id='ami-abcd1234',
)
requests = conn.get_all_spot_instance_requests()
requests.should.have.length_of(1)
request = requests[0]
request.state.should.equal("open")
get_model('SpotInstanceRequest', 'us-east-1')[0].state = 'active'
requests = conn.get_all_spot_instance_requests()
requests.should.have.length_of(1)
request = requests[0]
request.state.should.equal("active")
@mock_ec2_deprecated
def test_tag_spot_instance_request():
"""
Test that moto correctly tags a spot instance request
"""
conn = boto.connect_ec2()
request = conn.request_spot_instances(
price=0.5, image_id='ami-abcd1234',
)
request[0].add_tag('tag1', 'value1')
request[0].add_tag('tag2', 'value2')
requests = conn.get_all_spot_instance_requests()
requests.should.have.length_of(1)
request = requests[0]
tag_dict = dict(request.tags)
tag_dict.should.equal({'tag1': 'value1', 'tag2': 'value2'})
@mock_ec2_deprecated
def test_get_all_spot_instance_requests_filtering():
"""
Test that moto correctly filters spot instance requests
"""
conn = boto.connect_ec2()
request1 = conn.request_spot_instances(
price=0.5, image_id='ami-abcd1234',
)
request2 = conn.request_spot_instances(
price=0.5, image_id='ami-abcd1234',
)
conn.request_spot_instances(
price=0.5, image_id='ami-abcd1234',
)
request1[0].add_tag('tag1', 'value1')
request1[0].add_tag('tag2', 'value2')
request2[0].add_tag('tag1', 'value1')
request2[0].add_tag('tag2', 'wrong')
requests = conn.get_all_spot_instance_requests(filters={'state': 'active'})
requests.should.have.length_of(0)
requests = conn.get_all_spot_instance_requests(filters={'state': 'open'})
requests.should.have.length_of(3)
requests = conn.get_all_spot_instance_requests(
filters={'tag:tag1': 'value1'})
requests.should.have.length_of(2)
requests = conn.get_all_spot_instance_requests(
filters={'tag:tag1': 'value1', 'tag:tag2': 'value2'})
requests.should.have.length_of(1)
@mock_ec2_deprecated
def test_request_spot_instances_setting_instance_id():
conn = boto.ec2.connect_to_region("us-east-1")
request = conn.request_spot_instances(
price=0.5, image_id='ami-abcd1234')
req = get_model('SpotInstanceRequest', 'us-east-1')[0]
req.state = 'active'
req.instance_id = 'i-12345678'
request = conn.get_all_spot_instance_requests()[0]
assert request.state == 'active'
assert request.instance_id == 'i-12345678'

View File

@ -1,291 +1,340 @@
from __future__ import unicode_literals
# Ensure 'assert_raises' context manager support for Python 2.6
import tests.backport_assert_raises # noqa
from nose.tools import assert_raises
import boto3
import boto
import boto.vpc
from boto.exception import EC2ResponseError
from botocore.exceptions import ParamValidationError
import json
import sure # noqa
from moto import mock_cloudformation_deprecated, mock_ec2, mock_ec2_deprecated
@mock_ec2_deprecated
def test_subnets():
ec2 = boto.connect_ec2('the_key', 'the_secret')
conn = boto.connect_vpc('the_key', 'the_secret')
vpc = conn.create_vpc("10.0.0.0/16")
subnet = conn.create_subnet(vpc.id, "10.0.0.0/18")
all_subnets = conn.get_all_subnets()
all_subnets.should.have.length_of(1 + len(ec2.get_all_zones()))
conn.delete_subnet(subnet.id)
all_subnets = conn.get_all_subnets()
all_subnets.should.have.length_of(0 + len(ec2.get_all_zones()))
with assert_raises(EC2ResponseError) as cm:
conn.delete_subnet(subnet.id)
cm.exception.code.should.equal('InvalidSubnetID.NotFound')
cm.exception.status.should.equal(400)
cm.exception.request_id.should_not.be.none
@mock_ec2_deprecated
def test_subnet_create_vpc_validation():
conn = boto.connect_vpc('the_key', 'the_secret')
with assert_raises(EC2ResponseError) as cm:
conn.create_subnet("vpc-abcd1234", "10.0.0.0/18")
cm.exception.code.should.equal('InvalidVpcID.NotFound')
cm.exception.status.should.equal(400)
cm.exception.request_id.should_not.be.none
@mock_ec2_deprecated
def test_subnet_tagging():
conn = boto.connect_vpc('the_key', 'the_secret')
vpc = conn.create_vpc("10.0.0.0/16")
subnet = conn.create_subnet(vpc.id, "10.0.0.0/18")
subnet.add_tag("a key", "some value")
tag = conn.get_all_tags()[0]
tag.name.should.equal("a key")
tag.value.should.equal("some value")
# Refresh the subnet
subnet = conn.get_all_subnets(subnet_ids=[subnet.id])[0]
subnet.tags.should.have.length_of(1)
subnet.tags["a key"].should.equal("some value")
@mock_ec2_deprecated
def test_subnet_should_have_proper_availability_zone_set():
conn = boto.vpc.connect_to_region('us-west-1')
vpcA = conn.create_vpc("10.0.0.0/16")
subnetA = conn.create_subnet(
vpcA.id, "10.0.0.0/24", availability_zone='us-west-1b')
subnetA.availability_zone.should.equal('us-west-1b')
@mock_ec2
def test_default_subnet():
ec2 = boto3.resource('ec2', region_name='us-west-1')
default_vpc = list(ec2.vpcs.all())[0]
default_vpc.cidr_block.should.equal('172.31.0.0/16')
default_vpc.reload()
default_vpc.is_default.should.be.ok
subnet = ec2.create_subnet(
VpcId=default_vpc.id, CidrBlock='172.31.0.0/20', AvailabilityZone='us-west-1a')
subnet.reload()
subnet.map_public_ip_on_launch.shouldnt.be.ok
@mock_ec2_deprecated
def test_non_default_subnet():
vpc_cli = boto.vpc.connect_to_region('us-west-1')
# Create the non default VPC
vpc = vpc_cli.create_vpc("10.0.0.0/16")
vpc.is_default.shouldnt.be.ok
subnet = vpc_cli.create_subnet(vpc.id, "10.0.0.0/24")
subnet = vpc_cli.get_all_subnets(subnet_ids=[subnet.id])[0]
subnet.mapPublicIpOnLaunch.should.equal('false')
@mock_ec2
def test_boto3_non_default_subnet():
ec2 = boto3.resource('ec2', region_name='us-west-1')
# Create the non default VPC
vpc = ec2.create_vpc(CidrBlock='10.0.0.0/16')
vpc.reload()
vpc.is_default.shouldnt.be.ok
subnet = ec2.create_subnet(
VpcId=vpc.id, CidrBlock='10.0.0.0/24', AvailabilityZone='us-west-1a')
subnet.reload()
subnet.map_public_ip_on_launch.shouldnt.be.ok
@mock_ec2
def test_modify_subnet_attribute():
ec2 = boto3.resource('ec2', region_name='us-west-1')
client = boto3.client('ec2', region_name='us-west-1')
# Get the default VPC
vpc = list(ec2.vpcs.all())[0]
subnet = ec2.create_subnet(
VpcId=vpc.id, CidrBlock='10.0.0.0/24', AvailabilityZone='us-west-1a')
# 'map_public_ip_on_launch' is set when calling 'DescribeSubnets' action
subnet.reload()
# For non default subnet, attribute value should be 'False'
subnet.map_public_ip_on_launch.shouldnt.be.ok
client.modify_subnet_attribute(
SubnetId=subnet.id, MapPublicIpOnLaunch={'Value': False})
subnet.reload()
subnet.map_public_ip_on_launch.shouldnt.be.ok
client.modify_subnet_attribute(
SubnetId=subnet.id, MapPublicIpOnLaunch={'Value': True})
subnet.reload()
subnet.map_public_ip_on_launch.should.be.ok
@mock_ec2
def test_modify_subnet_attribute_validation():
ec2 = boto3.resource('ec2', region_name='us-west-1')
client = boto3.client('ec2', region_name='us-west-1')
vpc = ec2.create_vpc(CidrBlock='10.0.0.0/16')
subnet = ec2.create_subnet(
VpcId=vpc.id, CidrBlock='10.0.0.0/24', AvailabilityZone='us-west-1a')
with assert_raises(ParamValidationError):
client.modify_subnet_attribute(
SubnetId=subnet.id, MapPublicIpOnLaunch={'Value': 'invalid'})
@mock_ec2_deprecated
def test_subnet_get_by_id():
ec2 = boto.ec2.connect_to_region('us-west-1')
conn = boto.vpc.connect_to_region('us-west-1')
vpcA = conn.create_vpc("10.0.0.0/16")
subnetA = conn.create_subnet(
vpcA.id, "10.0.0.0/24", availability_zone='us-west-1a')
vpcB = conn.create_vpc("10.0.0.0/16")
subnetB1 = conn.create_subnet(
vpcB.id, "10.0.0.0/24", availability_zone='us-west-1a')
subnetB2 = conn.create_subnet(
vpcB.id, "10.0.1.0/24", availability_zone='us-west-1b')
subnets_by_id = conn.get_all_subnets(subnet_ids=[subnetA.id, subnetB1.id])
subnets_by_id.should.have.length_of(2)
subnets_by_id = tuple(map(lambda s: s.id, subnets_by_id))
subnetA.id.should.be.within(subnets_by_id)
subnetB1.id.should.be.within(subnets_by_id)
with assert_raises(EC2ResponseError) as cm:
conn.get_all_subnets(subnet_ids=['subnet-does_not_exist'])
cm.exception.code.should.equal('InvalidSubnetID.NotFound')
cm.exception.status.should.equal(400)
cm.exception.request_id.should_not.be.none
@mock_ec2_deprecated
def test_get_subnets_filtering():
ec2 = boto.ec2.connect_to_region('us-west-1')
conn = boto.vpc.connect_to_region('us-west-1')
vpcA = conn.create_vpc("10.0.0.0/16")
subnetA = conn.create_subnet(
vpcA.id, "10.0.0.0/24", availability_zone='us-west-1a')
vpcB = conn.create_vpc("10.0.0.0/16")
subnetB1 = conn.create_subnet(
vpcB.id, "10.0.0.0/24", availability_zone='us-west-1a')
subnetB2 = conn.create_subnet(
vpcB.id, "10.0.1.0/24", availability_zone='us-west-1b')
all_subnets = conn.get_all_subnets()
all_subnets.should.have.length_of(3 + len(ec2.get_all_zones()))
# Filter by VPC ID
subnets_by_vpc = conn.get_all_subnets(filters={'vpc-id': vpcB.id})
subnets_by_vpc.should.have.length_of(2)
set([subnet.id for subnet in subnets_by_vpc]).should.equal(
set([subnetB1.id, subnetB2.id]))
# Filter by CIDR variations
subnets_by_cidr1 = conn.get_all_subnets(filters={'cidr': "10.0.0.0/24"})
subnets_by_cidr1.should.have.length_of(2)
set([subnet.id for subnet in subnets_by_cidr1]
).should.equal(set([subnetA.id, subnetB1.id]))
subnets_by_cidr2 = conn.get_all_subnets(
filters={'cidr-block': "10.0.0.0/24"})
subnets_by_cidr2.should.have.length_of(2)
set([subnet.id for subnet in subnets_by_cidr2]
).should.equal(set([subnetA.id, subnetB1.id]))
subnets_by_cidr3 = conn.get_all_subnets(
filters={'cidrBlock': "10.0.0.0/24"})
subnets_by_cidr3.should.have.length_of(2)
set([subnet.id for subnet in subnets_by_cidr3]
).should.equal(set([subnetA.id, subnetB1.id]))
# Filter by VPC ID and CIDR
subnets_by_vpc_and_cidr = conn.get_all_subnets(
filters={'vpc-id': vpcB.id, 'cidr': "10.0.0.0/24"})
subnets_by_vpc_and_cidr.should.have.length_of(1)
set([subnet.id for subnet in subnets_by_vpc_and_cidr]
).should.equal(set([subnetB1.id]))
# Filter by subnet ID
subnets_by_id = conn.get_all_subnets(filters={'subnet-id': subnetA.id})
subnets_by_id.should.have.length_of(1)
set([subnet.id for subnet in subnets_by_id]).should.equal(set([subnetA.id]))
# Filter by availabilityZone
subnets_by_az = conn.get_all_subnets(
filters={'availabilityZone': 'us-west-1a', 'vpc-id': vpcB.id})
subnets_by_az.should.have.length_of(1)
set([subnet.id for subnet in subnets_by_az]
).should.equal(set([subnetB1.id]))
# Filter by defaultForAz
subnets_by_az = conn.get_all_subnets(filters={'defaultForAz': "true"})
subnets_by_az.should.have.length_of(len(conn.get_all_zones()))
# Unsupported filter
conn.get_all_subnets.when.called_with(
filters={'not-implemented-filter': 'foobar'}).should.throw(NotImplementedError)
@mock_ec2_deprecated
@mock_cloudformation_deprecated
def test_subnet_tags_through_cloudformation():
vpc_conn = boto.vpc.connect_to_region('us-west-1')
vpc = vpc_conn.create_vpc("10.0.0.0/16")
subnet_template = {
"AWSTemplateFormatVersion": "2010-09-09",
"Resources": {
"testSubnet": {
"Type": "AWS::EC2::Subnet",
"Properties": {
"VpcId": vpc.id,
"CidrBlock": "10.0.0.0/24",
"AvailabilityZone": "us-west-1b",
"Tags": [{
"Key": "foo",
"Value": "bar",
}, {
"Key": "blah",
"Value": "baz",
}]
}
}
}
}
cf_conn = boto.cloudformation.connect_to_region("us-west-1")
template_json = json.dumps(subnet_template)
cf_conn.create_stack(
"test_stack",
template_body=template_json,
)
subnet = vpc_conn.get_all_subnets(filters={'cidrBlock': '10.0.0.0/24'})[0]
subnet.tags["foo"].should.equal("bar")
subnet.tags["blah"].should.equal("baz")
from __future__ import unicode_literals
# Ensure 'assert_raises' context manager support for Python 2.6
import tests.backport_assert_raises # noqa
from nose.tools import assert_raises
import boto3
import boto
import boto.vpc
from boto.exception import EC2ResponseError
from botocore.exceptions import ParamValidationError, ClientError
import json
import sure # noqa
from moto import mock_cloudformation_deprecated, mock_ec2, mock_ec2_deprecated
@mock_ec2_deprecated
def test_subnets():
ec2 = boto.connect_ec2('the_key', 'the_secret')
conn = boto.connect_vpc('the_key', 'the_secret')
vpc = conn.create_vpc("10.0.0.0/16")
subnet = conn.create_subnet(vpc.id, "10.0.0.0/18")
all_subnets = conn.get_all_subnets()
all_subnets.should.have.length_of(1 + len(ec2.get_all_zones()))
conn.delete_subnet(subnet.id)
all_subnets = conn.get_all_subnets()
all_subnets.should.have.length_of(0 + len(ec2.get_all_zones()))
with assert_raises(EC2ResponseError) as cm:
conn.delete_subnet(subnet.id)
cm.exception.code.should.equal('InvalidSubnetID.NotFound')
cm.exception.status.should.equal(400)
cm.exception.request_id.should_not.be.none
@mock_ec2_deprecated
def test_subnet_create_vpc_validation():
conn = boto.connect_vpc('the_key', 'the_secret')
with assert_raises(EC2ResponseError) as cm:
conn.create_subnet("vpc-abcd1234", "10.0.0.0/18")
cm.exception.code.should.equal('InvalidVpcID.NotFound')
cm.exception.status.should.equal(400)
cm.exception.request_id.should_not.be.none
@mock_ec2_deprecated
def test_subnet_tagging():
conn = boto.connect_vpc('the_key', 'the_secret')
vpc = conn.create_vpc("10.0.0.0/16")
subnet = conn.create_subnet(vpc.id, "10.0.0.0/18")
subnet.add_tag("a key", "some value")
tag = conn.get_all_tags()[0]
tag.name.should.equal("a key")
tag.value.should.equal("some value")
# Refresh the subnet
subnet = conn.get_all_subnets(subnet_ids=[subnet.id])[0]
subnet.tags.should.have.length_of(1)
subnet.tags["a key"].should.equal("some value")
@mock_ec2_deprecated
def test_subnet_should_have_proper_availability_zone_set():
conn = boto.vpc.connect_to_region('us-west-1')
vpcA = conn.create_vpc("10.0.0.0/16")
subnetA = conn.create_subnet(
vpcA.id, "10.0.0.0/24", availability_zone='us-west-1b')
subnetA.availability_zone.should.equal('us-west-1b')
@mock_ec2
def test_default_subnet():
ec2 = boto3.resource('ec2', region_name='us-west-1')
default_vpc = list(ec2.vpcs.all())[0]
default_vpc.cidr_block.should.equal('172.31.0.0/16')
default_vpc.reload()
default_vpc.is_default.should.be.ok
subnet = ec2.create_subnet(
VpcId=default_vpc.id, CidrBlock='172.31.48.0/20', AvailabilityZone='us-west-1a')
subnet.reload()
subnet.map_public_ip_on_launch.shouldnt.be.ok
@mock_ec2_deprecated
def test_non_default_subnet():
vpc_cli = boto.vpc.connect_to_region('us-west-1')
# Create the non default VPC
vpc = vpc_cli.create_vpc("10.0.0.0/16")
vpc.is_default.shouldnt.be.ok
subnet = vpc_cli.create_subnet(vpc.id, "10.0.0.0/24")
subnet = vpc_cli.get_all_subnets(subnet_ids=[subnet.id])[0]
subnet.mapPublicIpOnLaunch.should.equal('false')
@mock_ec2
def test_boto3_non_default_subnet():
ec2 = boto3.resource('ec2', region_name='us-west-1')
# Create the non default VPC
vpc = ec2.create_vpc(CidrBlock='10.0.0.0/16')
vpc.reload()
vpc.is_default.shouldnt.be.ok
subnet = ec2.create_subnet(
VpcId=vpc.id, CidrBlock='10.0.0.0/24', AvailabilityZone='us-west-1a')
subnet.reload()
subnet.map_public_ip_on_launch.shouldnt.be.ok
@mock_ec2
def test_modify_subnet_attribute():
ec2 = boto3.resource('ec2', region_name='us-west-1')
client = boto3.client('ec2', region_name='us-west-1')
# Get the default VPC
vpc = list(ec2.vpcs.all())[0]
subnet = ec2.create_subnet(
VpcId=vpc.id, CidrBlock="172.31.48.0/20", AvailabilityZone='us-west-1a')
# 'map_public_ip_on_launch' is set when calling 'DescribeSubnets' action
subnet.reload()
# For non default subnet, attribute value should be 'False'
subnet.map_public_ip_on_launch.shouldnt.be.ok
client.modify_subnet_attribute(
SubnetId=subnet.id, MapPublicIpOnLaunch={'Value': False})
subnet.reload()
subnet.map_public_ip_on_launch.shouldnt.be.ok
client.modify_subnet_attribute(
SubnetId=subnet.id, MapPublicIpOnLaunch={'Value': True})
subnet.reload()
subnet.map_public_ip_on_launch.should.be.ok
@mock_ec2
def test_modify_subnet_attribute_validation():
ec2 = boto3.resource('ec2', region_name='us-west-1')
client = boto3.client('ec2', region_name='us-west-1')
vpc = ec2.create_vpc(CidrBlock='10.0.0.0/16')
subnet = ec2.create_subnet(
VpcId=vpc.id, CidrBlock='10.0.0.0/24', AvailabilityZone='us-west-1a')
with assert_raises(ParamValidationError):
client.modify_subnet_attribute(
SubnetId=subnet.id, MapPublicIpOnLaunch={'Value': 'invalid'})
@mock_ec2_deprecated
def test_subnet_get_by_id():
ec2 = boto.ec2.connect_to_region('us-west-1')
conn = boto.vpc.connect_to_region('us-west-1')
vpcA = conn.create_vpc("10.0.0.0/16")
subnetA = conn.create_subnet(
vpcA.id, "10.0.0.0/24", availability_zone='us-west-1a')
vpcB = conn.create_vpc("10.0.0.0/16")
subnetB1 = conn.create_subnet(
vpcB.id, "10.0.0.0/24", availability_zone='us-west-1a')
subnetB2 = conn.create_subnet(
vpcB.id, "10.0.1.0/24", availability_zone='us-west-1b')
subnets_by_id = conn.get_all_subnets(subnet_ids=[subnetA.id, subnetB1.id])
subnets_by_id.should.have.length_of(2)
subnets_by_id = tuple(map(lambda s: s.id, subnets_by_id))
subnetA.id.should.be.within(subnets_by_id)
subnetB1.id.should.be.within(subnets_by_id)
with assert_raises(EC2ResponseError) as cm:
conn.get_all_subnets(subnet_ids=['subnet-does_not_exist'])
cm.exception.code.should.equal('InvalidSubnetID.NotFound')
cm.exception.status.should.equal(400)
cm.exception.request_id.should_not.be.none
@mock_ec2_deprecated
def test_get_subnets_filtering():
ec2 = boto.ec2.connect_to_region('us-west-1')
conn = boto.vpc.connect_to_region('us-west-1')
vpcA = conn.create_vpc("10.0.0.0/16")
subnetA = conn.create_subnet(
vpcA.id, "10.0.0.0/24", availability_zone='us-west-1a')
vpcB = conn.create_vpc("10.0.0.0/16")
subnetB1 = conn.create_subnet(
vpcB.id, "10.0.0.0/24", availability_zone='us-west-1a')
subnetB2 = conn.create_subnet(
vpcB.id, "10.0.1.0/24", availability_zone='us-west-1b')
all_subnets = conn.get_all_subnets()
all_subnets.should.have.length_of(3 + len(ec2.get_all_zones()))
# Filter by VPC ID
subnets_by_vpc = conn.get_all_subnets(filters={'vpc-id': vpcB.id})
subnets_by_vpc.should.have.length_of(2)
set([subnet.id for subnet in subnets_by_vpc]).should.equal(
set([subnetB1.id, subnetB2.id]))
# Filter by CIDR variations
subnets_by_cidr1 = conn.get_all_subnets(filters={'cidr': "10.0.0.0/24"})
subnets_by_cidr1.should.have.length_of(2)
set([subnet.id for subnet in subnets_by_cidr1]
).should.equal(set([subnetA.id, subnetB1.id]))
subnets_by_cidr2 = conn.get_all_subnets(
filters={'cidr-block': "10.0.0.0/24"})
subnets_by_cidr2.should.have.length_of(2)
set([subnet.id for subnet in subnets_by_cidr2]
).should.equal(set([subnetA.id, subnetB1.id]))
subnets_by_cidr3 = conn.get_all_subnets(
filters={'cidrBlock': "10.0.0.0/24"})
subnets_by_cidr3.should.have.length_of(2)
set([subnet.id for subnet in subnets_by_cidr3]
).should.equal(set([subnetA.id, subnetB1.id]))
# Filter by VPC ID and CIDR
subnets_by_vpc_and_cidr = conn.get_all_subnets(
filters={'vpc-id': vpcB.id, 'cidr': "10.0.0.0/24"})
subnets_by_vpc_and_cidr.should.have.length_of(1)
set([subnet.id for subnet in subnets_by_vpc_and_cidr]
).should.equal(set([subnetB1.id]))
# Filter by subnet ID
subnets_by_id = conn.get_all_subnets(filters={'subnet-id': subnetA.id})
subnets_by_id.should.have.length_of(1)
set([subnet.id for subnet in subnets_by_id]).should.equal(set([subnetA.id]))
# Filter by availabilityZone
subnets_by_az = conn.get_all_subnets(
filters={'availabilityZone': 'us-west-1a', 'vpc-id': vpcB.id})
subnets_by_az.should.have.length_of(1)
set([subnet.id for subnet in subnets_by_az]
).should.equal(set([subnetB1.id]))
# Filter by defaultForAz
subnets_by_az = conn.get_all_subnets(filters={'defaultForAz': "true"})
subnets_by_az.should.have.length_of(len(conn.get_all_zones()))
# Unsupported filter
conn.get_all_subnets.when.called_with(
filters={'not-implemented-filter': 'foobar'}).should.throw(NotImplementedError)
@mock_ec2_deprecated
@mock_cloudformation_deprecated
def test_subnet_tags_through_cloudformation():
vpc_conn = boto.vpc.connect_to_region('us-west-1')
vpc = vpc_conn.create_vpc("10.0.0.0/16")
subnet_template = {
"AWSTemplateFormatVersion": "2010-09-09",
"Resources": {
"testSubnet": {
"Type": "AWS::EC2::Subnet",
"Properties": {
"VpcId": vpc.id,
"CidrBlock": "10.0.0.0/24",
"AvailabilityZone": "us-west-1b",
"Tags": [{
"Key": "foo",
"Value": "bar",
}, {
"Key": "blah",
"Value": "baz",
}]
}
}
}
}
cf_conn = boto.cloudformation.connect_to_region("us-west-1")
template_json = json.dumps(subnet_template)
cf_conn.create_stack(
"test_stack",
template_body=template_json,
)
subnet = vpc_conn.get_all_subnets(filters={'cidrBlock': '10.0.0.0/24'})[0]
subnet.tags["foo"].should.equal("bar")
subnet.tags["blah"].should.equal("baz")
@mock_ec2
def test_create_subnet_with_invalid_cidr_range():
ec2 = boto3.resource('ec2', region_name='us-west-1')
vpc = ec2.create_vpc(CidrBlock='10.0.0.0/16')
vpc.reload()
vpc.is_default.shouldnt.be.ok
subnet_cidr_block = '10.1.0.0/20'
with assert_raises(ClientError) as ex:
subnet = ec2.create_subnet(VpcId=vpc.id, CidrBlock=subnet_cidr_block)
str(ex.exception).should.equal(
"An error occurred (InvalidSubnet.Range) when calling the CreateSubnet "
"operation: The CIDR '{}' is invalid.".format(subnet_cidr_block))
@mock_ec2
def test_create_subnet_with_invalid_cidr_block_parameter():
ec2 = boto3.resource('ec2', region_name='us-west-1')
vpc = ec2.create_vpc(CidrBlock='10.0.0.0/16')
vpc.reload()
vpc.is_default.shouldnt.be.ok
subnet_cidr_block = '1000.1.0.0/20'
with assert_raises(ClientError) as ex:
subnet = ec2.create_subnet(VpcId=vpc.id, CidrBlock=subnet_cidr_block)
str(ex.exception).should.equal(
"An error occurred (InvalidParameterValue) when calling the CreateSubnet "
"operation: Value ({}) for parameter cidrBlock is invalid. This is not a valid CIDR block.".format(subnet_cidr_block))
@mock_ec2
def test_create_subnets_with_overlapping_cidr_blocks():
ec2 = boto3.resource('ec2', region_name='us-west-1')
vpc = ec2.create_vpc(CidrBlock='10.0.0.0/16')
vpc.reload()
vpc.is_default.shouldnt.be.ok
subnet_cidr_block = '10.0.0.0/24'
with assert_raises(ClientError) as ex:
subnet1 = ec2.create_subnet(VpcId=vpc.id, CidrBlock=subnet_cidr_block)
subnet2 = ec2.create_subnet(VpcId=vpc.id, CidrBlock=subnet_cidr_block)
str(ex.exception).should.equal(
"An error occurred (InvalidSubnet.Conflict) when calling the CreateSubnet "
"operation: The CIDR '{}' conflicts with another subnet".format(subnet_cidr_block))

View File

@ -1,8 +1,12 @@
from moto.ec2 import utils
def test_random_key_pair():
key_pair = utils.random_key_pair()
assert len(key_pair['fingerprint']) == 59
assert key_pair['material'].startswith('---- BEGIN RSA PRIVATE KEY ----')
assert key_pair['material'].endswith('-----END RSA PRIVATE KEY-----')
from moto.ec2 import utils
from .helpers import rsa_check_private_key
def test_random_key_pair():
key_pair = utils.random_key_pair()
rsa_check_private_key(key_pair['material'])
# AWS uses MD5 fingerprints, which are 47 characters long, *not* SHA1
# fingerprints with 59 characters.
assert len(key_pair['fingerprint']) == 47

View File

@ -107,14 +107,19 @@ def test_vpc_peering_connections_cross_region():
ec2_apn1 = boto3.resource('ec2', region_name='ap-northeast-1')
vpc_apn1 = ec2_apn1.create_vpc(CidrBlock='10.20.0.0/16')
# create peering
vpc_pcx = ec2_usw1.create_vpc_peering_connection(
vpc_pcx_usw1 = ec2_usw1.create_vpc_peering_connection(
VpcId=vpc_usw1.id,
PeerVpcId=vpc_apn1.id,
PeerRegion='ap-northeast-1',
)
vpc_pcx.status['Code'].should.equal('initiating-request')
vpc_pcx.requester_vpc.id.should.equal(vpc_usw1.id)
vpc_pcx.accepter_vpc.id.should.equal(vpc_apn1.id)
vpc_pcx_usw1.status['Code'].should.equal('initiating-request')
vpc_pcx_usw1.requester_vpc.id.should.equal(vpc_usw1.id)
vpc_pcx_usw1.accepter_vpc.id.should.equal(vpc_apn1.id)
# test cross region vpc peering connection exist
vpc_pcx_apn1 = ec2_apn1.VpcPeeringConnection(vpc_pcx_usw1.id)
vpc_pcx_apn1.id.should.equal(vpc_pcx_usw1.id)
vpc_pcx_apn1.requester_vpc.id.should.equal(vpc_usw1.id)
vpc_pcx_apn1.accepter_vpc.id.should.equal(vpc_apn1.id)
@mock_ec2
@ -131,3 +136,148 @@ def test_vpc_peering_connections_cross_region_fail():
PeerVpcId=vpc_apn1.id,
PeerRegion='ap-northeast-2')
cm.exception.response['Error']['Code'].should.equal('InvalidVpcID.NotFound')
@mock_ec2
def test_vpc_peering_connections_cross_region_accept():
# create vpc in us-west-1 and ap-northeast-1
ec2_usw1 = boto3.resource('ec2', region_name='us-west-1')
vpc_usw1 = ec2_usw1.create_vpc(CidrBlock='10.90.0.0/16')
ec2_apn1 = boto3.resource('ec2', region_name='ap-northeast-1')
vpc_apn1 = ec2_apn1.create_vpc(CidrBlock='10.20.0.0/16')
# create peering
vpc_pcx_usw1 = ec2_usw1.create_vpc_peering_connection(
VpcId=vpc_usw1.id,
PeerVpcId=vpc_apn1.id,
PeerRegion='ap-northeast-1',
)
# accept peering from ap-northeast-1
ec2_apn1 = boto3.client('ec2', region_name='ap-northeast-1')
ec2_usw1 = boto3.client('ec2', region_name='us-west-1')
acp_pcx_apn1 = ec2_apn1.accept_vpc_peering_connection(
VpcPeeringConnectionId=vpc_pcx_usw1.id
)
des_pcx_apn1 = ec2_usw1.describe_vpc_peering_connections(
VpcPeeringConnectionIds=[vpc_pcx_usw1.id]
)
des_pcx_usw1 = ec2_usw1.describe_vpc_peering_connections(
VpcPeeringConnectionIds=[vpc_pcx_usw1.id]
)
acp_pcx_apn1['VpcPeeringConnection']['Status']['Code'].should.equal('active')
des_pcx_apn1['VpcPeeringConnections'][0]['Status']['Code'].should.equal('active')
des_pcx_usw1['VpcPeeringConnections'][0]['Status']['Code'].should.equal('active')
@mock_ec2
def test_vpc_peering_connections_cross_region_reject():
# create vpc in us-west-1 and ap-northeast-1
ec2_usw1 = boto3.resource('ec2', region_name='us-west-1')
vpc_usw1 = ec2_usw1.create_vpc(CidrBlock='10.90.0.0/16')
ec2_apn1 = boto3.resource('ec2', region_name='ap-northeast-1')
vpc_apn1 = ec2_apn1.create_vpc(CidrBlock='10.20.0.0/16')
# create peering
vpc_pcx_usw1 = ec2_usw1.create_vpc_peering_connection(
VpcId=vpc_usw1.id,
PeerVpcId=vpc_apn1.id,
PeerRegion='ap-northeast-1',
)
# reject peering from ap-northeast-1
ec2_apn1 = boto3.client('ec2', region_name='ap-northeast-1')
ec2_usw1 = boto3.client('ec2', region_name='us-west-1')
rej_pcx_apn1 = ec2_apn1.reject_vpc_peering_connection(
VpcPeeringConnectionId=vpc_pcx_usw1.id
)
des_pcx_apn1 = ec2_usw1.describe_vpc_peering_connections(
VpcPeeringConnectionIds=[vpc_pcx_usw1.id]
)
des_pcx_usw1 = ec2_usw1.describe_vpc_peering_connections(
VpcPeeringConnectionIds=[vpc_pcx_usw1.id]
)
rej_pcx_apn1['Return'].should.equal(True)
des_pcx_apn1['VpcPeeringConnections'][0]['Status']['Code'].should.equal('rejected')
des_pcx_usw1['VpcPeeringConnections'][0]['Status']['Code'].should.equal('rejected')
@mock_ec2
def test_vpc_peering_connections_cross_region_delete():
# create vpc in us-west-1 and ap-northeast-1
ec2_usw1 = boto3.resource('ec2', region_name='us-west-1')
vpc_usw1 = ec2_usw1.create_vpc(CidrBlock='10.90.0.0/16')
ec2_apn1 = boto3.resource('ec2', region_name='ap-northeast-1')
vpc_apn1 = ec2_apn1.create_vpc(CidrBlock='10.20.0.0/16')
# create peering
vpc_pcx_usw1 = ec2_usw1.create_vpc_peering_connection(
VpcId=vpc_usw1.id,
PeerVpcId=vpc_apn1.id,
PeerRegion='ap-northeast-1',
)
# reject peering from ap-northeast-1
ec2_apn1 = boto3.client('ec2', region_name='ap-northeast-1')
ec2_usw1 = boto3.client('ec2', region_name='us-west-1')
del_pcx_apn1 = ec2_apn1.delete_vpc_peering_connection(
VpcPeeringConnectionId=vpc_pcx_usw1.id
)
des_pcx_apn1 = ec2_usw1.describe_vpc_peering_connections(
VpcPeeringConnectionIds=[vpc_pcx_usw1.id]
)
des_pcx_usw1 = ec2_usw1.describe_vpc_peering_connections(
VpcPeeringConnectionIds=[vpc_pcx_usw1.id]
)
del_pcx_apn1['Return'].should.equal(True)
des_pcx_apn1['VpcPeeringConnections'][0]['Status']['Code'].should.equal('deleted')
des_pcx_usw1['VpcPeeringConnections'][0]['Status']['Code'].should.equal('deleted')
@mock_ec2
def test_vpc_peering_connections_cross_region_accept_wrong_region():
# create vpc in us-west-1 and ap-northeast-1
ec2_usw1 = boto3.resource('ec2', region_name='us-west-1')
vpc_usw1 = ec2_usw1.create_vpc(CidrBlock='10.90.0.0/16')
ec2_apn1 = boto3.resource('ec2', region_name='ap-northeast-1')
vpc_apn1 = ec2_apn1.create_vpc(CidrBlock='10.20.0.0/16')
# create peering
vpc_pcx_usw1 = ec2_usw1.create_vpc_peering_connection(
VpcId=vpc_usw1.id,
PeerVpcId=vpc_apn1.id,
PeerRegion='ap-northeast-1',
)
# accept wrong peering from us-west-1 which will raise error
ec2_apn1 = boto3.client('ec2', region_name='ap-northeast-1')
ec2_usw1 = boto3.client('ec2', region_name='us-west-1')
with assert_raises(ClientError) as cm:
ec2_usw1.accept_vpc_peering_connection(
VpcPeeringConnectionId=vpc_pcx_usw1.id
)
cm.exception.response['Error']['Code'].should.equal('OperationNotPermitted')
exp_msg = 'Incorrect region ({0}) specified for this request.VPC ' \
'peering connection {1} must be ' \
'accepted in region {2}'.format('us-west-1', vpc_pcx_usw1.id, 'ap-northeast-1')
cm.exception.response['Error']['Message'].should.equal(exp_msg)
@mock_ec2
def test_vpc_peering_connections_cross_region_reject_wrong_region():
# create vpc in us-west-1 and ap-northeast-1
ec2_usw1 = boto3.resource('ec2', region_name='us-west-1')
vpc_usw1 = ec2_usw1.create_vpc(CidrBlock='10.90.0.0/16')
ec2_apn1 = boto3.resource('ec2', region_name='ap-northeast-1')
vpc_apn1 = ec2_apn1.create_vpc(CidrBlock='10.20.0.0/16')
# create peering
vpc_pcx_usw1 = ec2_usw1.create_vpc_peering_connection(
VpcId=vpc_usw1.id,
PeerVpcId=vpc_apn1.id,
PeerRegion='ap-northeast-1',
)
# reject wrong peering from us-west-1 which will raise error
ec2_apn1 = boto3.client('ec2', region_name='ap-northeast-1')
ec2_usw1 = boto3.client('ec2', region_name='us-west-1')
with assert_raises(ClientError) as cm:
ec2_usw1.reject_vpc_peering_connection(
VpcPeeringConnectionId=vpc_pcx_usw1.id
)
cm.exception.response['Error']['Code'].should.equal('OperationNotPermitted')
exp_msg = 'Incorrect region ({0}) specified for this request.VPC ' \
'peering connection {1} must be accepted or ' \
'rejected in region {2}'.format('us-west-1', vpc_pcx_usw1.id, 'ap-northeast-1')
cm.exception.response['Error']['Message'].should.equal(exp_msg)

File diff suppressed because it is too large Load Diff

View File

@ -388,23 +388,32 @@ def test_list_services():
cluster='test_ecs_cluster',
serviceName='test_ecs_service1',
taskDefinition='test_ecs_task',
schedulingStrategy='REPLICA',
desiredCount=2
)
_ = client.create_service(
cluster='test_ecs_cluster',
serviceName='test_ecs_service2',
taskDefinition='test_ecs_task',
schedulingStrategy='DAEMON',
desiredCount=2
)
response = client.list_services(
unfiltered_response = client.list_services(
cluster='test_ecs_cluster'
)
len(response['serviceArns']).should.equal(2)
response['serviceArns'][0].should.equal(
len(unfiltered_response['serviceArns']).should.equal(2)
unfiltered_response['serviceArns'][0].should.equal(
'arn:aws:ecs:us-east-1:012345678910:service/test_ecs_service1')
response['serviceArns'][1].should.equal(
unfiltered_response['serviceArns'][1].should.equal(
'arn:aws:ecs:us-east-1:012345678910:service/test_ecs_service2')
filtered_response = client.list_services(
cluster='test_ecs_cluster',
schedulingStrategy='REPLICA'
)
len(filtered_response['serviceArns']).should.equal(1)
filtered_response['serviceArns'][0].should.equal(
'arn:aws:ecs:us-east-1:012345678910:service/test_ecs_service1')
@mock_ecs
def test_describe_services():

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -1,426 +1,447 @@
from __future__ import unicode_literals
import sure # noqa
import re
from nose.tools import assert_raises
import boto3
from botocore.client import ClientError
from datetime import datetime
import pytz
from moto import mock_glue
from . import helpers
@mock_glue
def test_create_database():
client = boto3.client('glue', region_name='us-east-1')
database_name = 'myspecialdatabase'
helpers.create_database(client, database_name)
response = helpers.get_database(client, database_name)
database = response['Database']
database.should.equal({'Name': database_name})
@mock_glue
def test_create_database_already_exists():
client = boto3.client('glue', region_name='us-east-1')
database_name = 'cantcreatethisdatabasetwice'
helpers.create_database(client, database_name)
with assert_raises(ClientError) as exc:
helpers.create_database(client, database_name)
exc.exception.response['Error']['Code'].should.equal('AlreadyExistsException')
@mock_glue
def test_get_database_not_exits():
client = boto3.client('glue', region_name='us-east-1')
database_name = 'nosuchdatabase'
with assert_raises(ClientError) as exc:
helpers.get_database(client, database_name)
exc.exception.response['Error']['Code'].should.equal('EntityNotFoundException')
exc.exception.response['Error']['Message'].should.match('Database nosuchdatabase not found')
@mock_glue
def test_create_table():
client = boto3.client('glue', region_name='us-east-1')
database_name = 'myspecialdatabase'
helpers.create_database(client, database_name)
table_name = 'myspecialtable'
table_input = helpers.create_table_input(database_name, table_name)
helpers.create_table(client, database_name, table_name, table_input)
response = helpers.get_table(client, database_name, table_name)
table = response['Table']
table['Name'].should.equal(table_input['Name'])
table['StorageDescriptor'].should.equal(table_input['StorageDescriptor'])
table['PartitionKeys'].should.equal(table_input['PartitionKeys'])
@mock_glue
def test_create_table_already_exists():
client = boto3.client('glue', region_name='us-east-1')
database_name = 'myspecialdatabase'
helpers.create_database(client, database_name)
table_name = 'cantcreatethistabletwice'
helpers.create_table(client, database_name, table_name)
with assert_raises(ClientError) as exc:
helpers.create_table(client, database_name, table_name)
exc.exception.response['Error']['Code'].should.equal('AlreadyExistsException')
@mock_glue
def test_get_tables():
client = boto3.client('glue', region_name='us-east-1')
database_name = 'myspecialdatabase'
helpers.create_database(client, database_name)
table_names = ['myfirsttable', 'mysecondtable', 'mythirdtable']
table_inputs = {}
for table_name in table_names:
table_input = helpers.create_table_input(database_name, table_name)
table_inputs[table_name] = table_input
helpers.create_table(client, database_name, table_name, table_input)
response = helpers.get_tables(client, database_name)
tables = response['TableList']
tables.should.have.length_of(3)
for table in tables:
table_name = table['Name']
table_name.should.equal(table_inputs[table_name]['Name'])
table['StorageDescriptor'].should.equal(table_inputs[table_name]['StorageDescriptor'])
table['PartitionKeys'].should.equal(table_inputs[table_name]['PartitionKeys'])
@mock_glue
def test_get_table_versions():
client = boto3.client('glue', region_name='us-east-1')
database_name = 'myspecialdatabase'
helpers.create_database(client, database_name)
table_name = 'myfirsttable'
version_inputs = {}
table_input = helpers.create_table_input(database_name, table_name)
helpers.create_table(client, database_name, table_name, table_input)
version_inputs["1"] = table_input
columns = [{'Name': 'country', 'Type': 'string'}]
table_input = helpers.create_table_input(database_name, table_name, columns=columns)
helpers.update_table(client, database_name, table_name, table_input)
version_inputs["2"] = table_input
# Updateing with an indentical input should still create a new version
helpers.update_table(client, database_name, table_name, table_input)
version_inputs["3"] = table_input
response = helpers.get_table_versions(client, database_name, table_name)
vers = response['TableVersions']
vers.should.have.length_of(3)
vers[0]['Table']['StorageDescriptor']['Columns'].should.equal([])
vers[-1]['Table']['StorageDescriptor']['Columns'].should.equal(columns)
for n, ver in enumerate(vers):
n = str(n + 1)
ver['VersionId'].should.equal(n)
ver['Table']['Name'].should.equal(table_name)
ver['Table']['StorageDescriptor'].should.equal(version_inputs[n]['StorageDescriptor'])
ver['Table']['PartitionKeys'].should.equal(version_inputs[n]['PartitionKeys'])
response = helpers.get_table_version(client, database_name, table_name, "3")
ver = response['TableVersion']
ver['VersionId'].should.equal("3")
ver['Table']['Name'].should.equal(table_name)
ver['Table']['StorageDescriptor']['Columns'].should.equal(columns)
@mock_glue
def test_get_table_version_not_found():
client = boto3.client('glue', region_name='us-east-1')
database_name = 'myspecialdatabase'
table_name = 'myfirsttable'
helpers.create_database(client, database_name)
helpers.create_table(client, database_name, table_name)
with assert_raises(ClientError) as exc:
helpers.get_table_version(client, database_name, 'myfirsttable', "20")
exc.exception.response['Error']['Code'].should.equal('EntityNotFoundException')
exc.exception.response['Error']['Message'].should.match('version', re.I)
@mock_glue
def test_get_table_version_invalid_input():
client = boto3.client('glue', region_name='us-east-1')
database_name = 'myspecialdatabase'
table_name = 'myfirsttable'
helpers.create_database(client, database_name)
helpers.create_table(client, database_name, table_name)
with assert_raises(ClientError) as exc:
helpers.get_table_version(client, database_name, 'myfirsttable', "10not-an-int")
exc.exception.response['Error']['Code'].should.equal('InvalidInputException')
@mock_glue
def test_get_table_not_exits():
client = boto3.client('glue', region_name='us-east-1')
database_name = 'myspecialdatabase'
helpers.create_database(client, database_name)
with assert_raises(ClientError) as exc:
helpers.get_table(client, database_name, 'myfirsttable')
exc.exception.response['Error']['Code'].should.equal('EntityNotFoundException')
exc.exception.response['Error']['Message'].should.match('Table myfirsttable not found')
@mock_glue
def test_get_table_when_database_not_exits():
client = boto3.client('glue', region_name='us-east-1')
database_name = 'nosuchdatabase'
with assert_raises(ClientError) as exc:
helpers.get_table(client, database_name, 'myfirsttable')
exc.exception.response['Error']['Code'].should.equal('EntityNotFoundException')
exc.exception.response['Error']['Message'].should.match('Database nosuchdatabase not found')
@mock_glue
def test_get_partitions_empty():
client = boto3.client('glue', region_name='us-east-1')
database_name = 'myspecialdatabase'
table_name = 'myfirsttable'
helpers.create_database(client, database_name)
helpers.create_table(client, database_name, table_name)
response = client.get_partitions(DatabaseName=database_name, TableName=table_name)
response['Partitions'].should.have.length_of(0)
@mock_glue
def test_create_partition():
client = boto3.client('glue', region_name='us-east-1')
database_name = 'myspecialdatabase'
table_name = 'myfirsttable'
values = ['2018-10-01']
helpers.create_database(client, database_name)
helpers.create_table(client, database_name, table_name)
before = datetime.now(pytz.utc)
part_input = helpers.create_partition_input(database_name, table_name, values=values)
helpers.create_partition(client, database_name, table_name, part_input)
after = datetime.now(pytz.utc)
response = client.get_partitions(DatabaseName=database_name, TableName=table_name)
partitions = response['Partitions']
partitions.should.have.length_of(1)
partition = partitions[0]
partition['TableName'].should.equal(table_name)
partition['StorageDescriptor'].should.equal(part_input['StorageDescriptor'])
partition['Values'].should.equal(values)
partition['CreationTime'].should.be.greater_than(before)
partition['CreationTime'].should.be.lower_than(after)
@mock_glue
def test_create_partition_already_exist():
client = boto3.client('glue', region_name='us-east-1')
database_name = 'myspecialdatabase'
table_name = 'myfirsttable'
values = ['2018-10-01']
helpers.create_database(client, database_name)
helpers.create_table(client, database_name, table_name)
helpers.create_partition(client, database_name, table_name, values=values)
with assert_raises(ClientError) as exc:
helpers.create_partition(client, database_name, table_name, values=values)
exc.exception.response['Error']['Code'].should.equal('AlreadyExistsException')
@mock_glue
def test_get_partition_not_found():
client = boto3.client('glue', region_name='us-east-1')
database_name = 'myspecialdatabase'
table_name = 'myfirsttable'
values = ['2018-10-01']
helpers.create_database(client, database_name)
helpers.create_table(client, database_name, table_name)
with assert_raises(ClientError) as exc:
helpers.get_partition(client, database_name, table_name, values)
exc.exception.response['Error']['Code'].should.equal('EntityNotFoundException')
exc.exception.response['Error']['Message'].should.match('partition')
@mock_glue
def test_get_partition():
client = boto3.client('glue', region_name='us-east-1')
database_name = 'myspecialdatabase'
table_name = 'myfirsttable'
helpers.create_database(client, database_name)
helpers.create_table(client, database_name, table_name)
values = [['2018-10-01'], ['2018-09-01']]
helpers.create_partition(client, database_name, table_name, values=values[0])
helpers.create_partition(client, database_name, table_name, values=values[1])
response = client.get_partition(DatabaseName=database_name, TableName=table_name, PartitionValues=values[1])
partition = response['Partition']
partition['TableName'].should.equal(table_name)
partition['Values'].should.equal(values[1])
@mock_glue
def test_update_partition_not_found_moving():
client = boto3.client('glue', region_name='us-east-1')
database_name = 'myspecialdatabase'
table_name = 'myfirsttable'
helpers.create_database(client, database_name)
helpers.create_table(client, database_name, table_name)
with assert_raises(ClientError) as exc:
helpers.update_partition(client, database_name, table_name, old_values=['0000-00-00'], values=['2018-10-02'])
exc.exception.response['Error']['Code'].should.equal('EntityNotFoundException')
exc.exception.response['Error']['Message'].should.match('partition')
@mock_glue
def test_update_partition_not_found_change_in_place():
client = boto3.client('glue', region_name='us-east-1')
database_name = 'myspecialdatabase'
table_name = 'myfirsttable'
values = ['2018-10-01']
helpers.create_database(client, database_name)
helpers.create_table(client, database_name, table_name)
with assert_raises(ClientError) as exc:
helpers.update_partition(client, database_name, table_name, old_values=values, values=values)
exc.exception.response['Error']['Code'].should.equal('EntityNotFoundException')
exc.exception.response['Error']['Message'].should.match('partition')
@mock_glue
def test_update_partition_cannot_overwrite():
client = boto3.client('glue', region_name='us-east-1')
database_name = 'myspecialdatabase'
table_name = 'myfirsttable'
helpers.create_database(client, database_name)
helpers.create_table(client, database_name, table_name)
values = [['2018-10-01'], ['2018-09-01']]
helpers.create_partition(client, database_name, table_name, values=values[0])
helpers.create_partition(client, database_name, table_name, values=values[1])
with assert_raises(ClientError) as exc:
helpers.update_partition(client, database_name, table_name, old_values=values[0], values=values[1])
exc.exception.response['Error']['Code'].should.equal('AlreadyExistsException')
@mock_glue
def test_update_partition():
client = boto3.client('glue', region_name='us-east-1')
database_name = 'myspecialdatabase'
table_name = 'myfirsttable'
values = ['2018-10-01']
helpers.create_database(client, database_name)
helpers.create_table(client, database_name, table_name)
helpers.create_partition(client, database_name, table_name, values=values)
response = helpers.update_partition(
client,
database_name,
table_name,
old_values=values,
values=values,
columns=[{'Name': 'country', 'Type': 'string'}],
)
response = client.get_partition(DatabaseName=database_name, TableName=table_name, PartitionValues=values)
partition = response['Partition']
partition['TableName'].should.equal(table_name)
partition['StorageDescriptor']['Columns'].should.equal([{'Name': 'country', 'Type': 'string'}])
@mock_glue
def test_update_partition_move():
client = boto3.client('glue', region_name='us-east-1')
database_name = 'myspecialdatabase'
table_name = 'myfirsttable'
values = ['2018-10-01']
new_values = ['2018-09-01']
helpers.create_database(client, database_name)
helpers.create_table(client, database_name, table_name)
helpers.create_partition(client, database_name, table_name, values=values)
response = helpers.update_partition(
client,
database_name,
table_name,
old_values=values,
values=new_values,
columns=[{'Name': 'country', 'Type': 'string'}],
)
with assert_raises(ClientError) as exc:
helpers.get_partition(client, database_name, table_name, values)
# Old partition shouldn't exist anymore
exc.exception.response['Error']['Code'].should.equal('EntityNotFoundException')
response = client.get_partition(DatabaseName=database_name, TableName=table_name, PartitionValues=new_values)
partition = response['Partition']
partition['TableName'].should.equal(table_name)
partition['StorageDescriptor']['Columns'].should.equal([{'Name': 'country', 'Type': 'string'}])
from __future__ import unicode_literals
import sure # noqa
import re
from nose.tools import assert_raises
import boto3
from botocore.client import ClientError
from datetime import datetime
import pytz
from moto import mock_glue
from . import helpers
@mock_glue
def test_create_database():
client = boto3.client('glue', region_name='us-east-1')
database_name = 'myspecialdatabase'
helpers.create_database(client, database_name)
response = helpers.get_database(client, database_name)
database = response['Database']
database.should.equal({'Name': database_name})
@mock_glue
def test_create_database_already_exists():
client = boto3.client('glue', region_name='us-east-1')
database_name = 'cantcreatethisdatabasetwice'
helpers.create_database(client, database_name)
with assert_raises(ClientError) as exc:
helpers.create_database(client, database_name)
exc.exception.response['Error']['Code'].should.equal('AlreadyExistsException')
@mock_glue
def test_get_database_not_exits():
client = boto3.client('glue', region_name='us-east-1')
database_name = 'nosuchdatabase'
with assert_raises(ClientError) as exc:
helpers.get_database(client, database_name)
exc.exception.response['Error']['Code'].should.equal('EntityNotFoundException')
exc.exception.response['Error']['Message'].should.match('Database nosuchdatabase not found')
@mock_glue
def test_create_table():
client = boto3.client('glue', region_name='us-east-1')
database_name = 'myspecialdatabase'
helpers.create_database(client, database_name)
table_name = 'myspecialtable'
table_input = helpers.create_table_input(database_name, table_name)
helpers.create_table(client, database_name, table_name, table_input)
response = helpers.get_table(client, database_name, table_name)
table = response['Table']
table['Name'].should.equal(table_input['Name'])
table['StorageDescriptor'].should.equal(table_input['StorageDescriptor'])
table['PartitionKeys'].should.equal(table_input['PartitionKeys'])
@mock_glue
def test_create_table_already_exists():
client = boto3.client('glue', region_name='us-east-1')
database_name = 'myspecialdatabase'
helpers.create_database(client, database_name)
table_name = 'cantcreatethistabletwice'
helpers.create_table(client, database_name, table_name)
with assert_raises(ClientError) as exc:
helpers.create_table(client, database_name, table_name)
exc.exception.response['Error']['Code'].should.equal('AlreadyExistsException')
@mock_glue
def test_get_tables():
client = boto3.client('glue', region_name='us-east-1')
database_name = 'myspecialdatabase'
helpers.create_database(client, database_name)
table_names = ['myfirsttable', 'mysecondtable', 'mythirdtable']
table_inputs = {}
for table_name in table_names:
table_input = helpers.create_table_input(database_name, table_name)
table_inputs[table_name] = table_input
helpers.create_table(client, database_name, table_name, table_input)
response = helpers.get_tables(client, database_name)
tables = response['TableList']
tables.should.have.length_of(3)
for table in tables:
table_name = table['Name']
table_name.should.equal(table_inputs[table_name]['Name'])
table['StorageDescriptor'].should.equal(table_inputs[table_name]['StorageDescriptor'])
table['PartitionKeys'].should.equal(table_inputs[table_name]['PartitionKeys'])
@mock_glue
def test_get_table_versions():
client = boto3.client('glue', region_name='us-east-1')
database_name = 'myspecialdatabase'
helpers.create_database(client, database_name)
table_name = 'myfirsttable'
version_inputs = {}
table_input = helpers.create_table_input(database_name, table_name)
helpers.create_table(client, database_name, table_name, table_input)
version_inputs["1"] = table_input
columns = [{'Name': 'country', 'Type': 'string'}]
table_input = helpers.create_table_input(database_name, table_name, columns=columns)
helpers.update_table(client, database_name, table_name, table_input)
version_inputs["2"] = table_input
# Updateing with an indentical input should still create a new version
helpers.update_table(client, database_name, table_name, table_input)
version_inputs["3"] = table_input
response = helpers.get_table_versions(client, database_name, table_name)
vers = response['TableVersions']
vers.should.have.length_of(3)
vers[0]['Table']['StorageDescriptor']['Columns'].should.equal([])
vers[-1]['Table']['StorageDescriptor']['Columns'].should.equal(columns)
for n, ver in enumerate(vers):
n = str(n + 1)
ver['VersionId'].should.equal(n)
ver['Table']['Name'].should.equal(table_name)
ver['Table']['StorageDescriptor'].should.equal(version_inputs[n]['StorageDescriptor'])
ver['Table']['PartitionKeys'].should.equal(version_inputs[n]['PartitionKeys'])
response = helpers.get_table_version(client, database_name, table_name, "3")
ver = response['TableVersion']
ver['VersionId'].should.equal("3")
ver['Table']['Name'].should.equal(table_name)
ver['Table']['StorageDescriptor']['Columns'].should.equal(columns)
@mock_glue
def test_get_table_version_not_found():
client = boto3.client('glue', region_name='us-east-1')
database_name = 'myspecialdatabase'
table_name = 'myfirsttable'
helpers.create_database(client, database_name)
helpers.create_table(client, database_name, table_name)
with assert_raises(ClientError) as exc:
helpers.get_table_version(client, database_name, 'myfirsttable', "20")
exc.exception.response['Error']['Code'].should.equal('EntityNotFoundException')
exc.exception.response['Error']['Message'].should.match('version', re.I)
@mock_glue
def test_get_table_version_invalid_input():
client = boto3.client('glue', region_name='us-east-1')
database_name = 'myspecialdatabase'
table_name = 'myfirsttable'
helpers.create_database(client, database_name)
helpers.create_table(client, database_name, table_name)
with assert_raises(ClientError) as exc:
helpers.get_table_version(client, database_name, 'myfirsttable', "10not-an-int")
exc.exception.response['Error']['Code'].should.equal('InvalidInputException')
@mock_glue
def test_get_table_not_exits():
client = boto3.client('glue', region_name='us-east-1')
database_name = 'myspecialdatabase'
helpers.create_database(client, database_name)
with assert_raises(ClientError) as exc:
helpers.get_table(client, database_name, 'myfirsttable')
exc.exception.response['Error']['Code'].should.equal('EntityNotFoundException')
exc.exception.response['Error']['Message'].should.match('Table myfirsttable not found')
@mock_glue
def test_get_table_when_database_not_exits():
client = boto3.client('glue', region_name='us-east-1')
database_name = 'nosuchdatabase'
with assert_raises(ClientError) as exc:
helpers.get_table(client, database_name, 'myfirsttable')
exc.exception.response['Error']['Code'].should.equal('EntityNotFoundException')
exc.exception.response['Error']['Message'].should.match('Database nosuchdatabase not found')
@mock_glue
def test_delete_table():
client = boto3.client('glue', region_name='us-east-1')
database_name = 'myspecialdatabase'
helpers.create_database(client, database_name)
table_name = 'myspecialtable'
table_input = helpers.create_table_input(database_name, table_name)
helpers.create_table(client, database_name, table_name, table_input)
result = client.delete_table(DatabaseName=database_name, Name=table_name)
result['ResponseMetadata']['HTTPStatusCode'].should.equal(200)
# confirm table is deleted
with assert_raises(ClientError) as exc:
helpers.get_table(client, database_name, table_name)
exc.exception.response['Error']['Code'].should.equal('EntityNotFoundException')
exc.exception.response['Error']['Message'].should.match('Table myspecialtable not found')
@mock_glue
def test_get_partitions_empty():
client = boto3.client('glue', region_name='us-east-1')
database_name = 'myspecialdatabase'
table_name = 'myfirsttable'
helpers.create_database(client, database_name)
helpers.create_table(client, database_name, table_name)
response = client.get_partitions(DatabaseName=database_name, TableName=table_name)
response['Partitions'].should.have.length_of(0)
@mock_glue
def test_create_partition():
client = boto3.client('glue', region_name='us-east-1')
database_name = 'myspecialdatabase'
table_name = 'myfirsttable'
values = ['2018-10-01']
helpers.create_database(client, database_name)
helpers.create_table(client, database_name, table_name)
before = datetime.now(pytz.utc)
part_input = helpers.create_partition_input(database_name, table_name, values=values)
helpers.create_partition(client, database_name, table_name, part_input)
after = datetime.now(pytz.utc)
response = client.get_partitions(DatabaseName=database_name, TableName=table_name)
partitions = response['Partitions']
partitions.should.have.length_of(1)
partition = partitions[0]
partition['TableName'].should.equal(table_name)
partition['StorageDescriptor'].should.equal(part_input['StorageDescriptor'])
partition['Values'].should.equal(values)
partition['CreationTime'].should.be.greater_than(before)
partition['CreationTime'].should.be.lower_than(after)
@mock_glue
def test_create_partition_already_exist():
client = boto3.client('glue', region_name='us-east-1')
database_name = 'myspecialdatabase'
table_name = 'myfirsttable'
values = ['2018-10-01']
helpers.create_database(client, database_name)
helpers.create_table(client, database_name, table_name)
helpers.create_partition(client, database_name, table_name, values=values)
with assert_raises(ClientError) as exc:
helpers.create_partition(client, database_name, table_name, values=values)
exc.exception.response['Error']['Code'].should.equal('AlreadyExistsException')
@mock_glue
def test_get_partition_not_found():
client = boto3.client('glue', region_name='us-east-1')
database_name = 'myspecialdatabase'
table_name = 'myfirsttable'
values = ['2018-10-01']
helpers.create_database(client, database_name)
helpers.create_table(client, database_name, table_name)
with assert_raises(ClientError) as exc:
helpers.get_partition(client, database_name, table_name, values)
exc.exception.response['Error']['Code'].should.equal('EntityNotFoundException')
exc.exception.response['Error']['Message'].should.match('partition')
@mock_glue
def test_get_partition():
client = boto3.client('glue', region_name='us-east-1')
database_name = 'myspecialdatabase'
table_name = 'myfirsttable'
helpers.create_database(client, database_name)
helpers.create_table(client, database_name, table_name)
values = [['2018-10-01'], ['2018-09-01']]
helpers.create_partition(client, database_name, table_name, values=values[0])
helpers.create_partition(client, database_name, table_name, values=values[1])
response = client.get_partition(DatabaseName=database_name, TableName=table_name, PartitionValues=values[1])
partition = response['Partition']
partition['TableName'].should.equal(table_name)
partition['Values'].should.equal(values[1])
@mock_glue
def test_update_partition_not_found_moving():
client = boto3.client('glue', region_name='us-east-1')
database_name = 'myspecialdatabase'
table_name = 'myfirsttable'
helpers.create_database(client, database_name)
helpers.create_table(client, database_name, table_name)
with assert_raises(ClientError) as exc:
helpers.update_partition(client, database_name, table_name, old_values=['0000-00-00'], values=['2018-10-02'])
exc.exception.response['Error']['Code'].should.equal('EntityNotFoundException')
exc.exception.response['Error']['Message'].should.match('partition')
@mock_glue
def test_update_partition_not_found_change_in_place():
client = boto3.client('glue', region_name='us-east-1')
database_name = 'myspecialdatabase'
table_name = 'myfirsttable'
values = ['2018-10-01']
helpers.create_database(client, database_name)
helpers.create_table(client, database_name, table_name)
with assert_raises(ClientError) as exc:
helpers.update_partition(client, database_name, table_name, old_values=values, values=values)
exc.exception.response['Error']['Code'].should.equal('EntityNotFoundException')
exc.exception.response['Error']['Message'].should.match('partition')
@mock_glue
def test_update_partition_cannot_overwrite():
client = boto3.client('glue', region_name='us-east-1')
database_name = 'myspecialdatabase'
table_name = 'myfirsttable'
helpers.create_database(client, database_name)
helpers.create_table(client, database_name, table_name)
values = [['2018-10-01'], ['2018-09-01']]
helpers.create_partition(client, database_name, table_name, values=values[0])
helpers.create_partition(client, database_name, table_name, values=values[1])
with assert_raises(ClientError) as exc:
helpers.update_partition(client, database_name, table_name, old_values=values[0], values=values[1])
exc.exception.response['Error']['Code'].should.equal('AlreadyExistsException')
@mock_glue
def test_update_partition():
client = boto3.client('glue', region_name='us-east-1')
database_name = 'myspecialdatabase'
table_name = 'myfirsttable'
values = ['2018-10-01']
helpers.create_database(client, database_name)
helpers.create_table(client, database_name, table_name)
helpers.create_partition(client, database_name, table_name, values=values)
response = helpers.update_partition(
client,
database_name,
table_name,
old_values=values,
values=values,
columns=[{'Name': 'country', 'Type': 'string'}],
)
response = client.get_partition(DatabaseName=database_name, TableName=table_name, PartitionValues=values)
partition = response['Partition']
partition['TableName'].should.equal(table_name)
partition['StorageDescriptor']['Columns'].should.equal([{'Name': 'country', 'Type': 'string'}])
@mock_glue
def test_update_partition_move():
client = boto3.client('glue', region_name='us-east-1')
database_name = 'myspecialdatabase'
table_name = 'myfirsttable'
values = ['2018-10-01']
new_values = ['2018-09-01']
helpers.create_database(client, database_name)
helpers.create_table(client, database_name, table_name)
helpers.create_partition(client, database_name, table_name, values=values)
response = helpers.update_partition(
client,
database_name,
table_name,
old_values=values,
values=new_values,
columns=[{'Name': 'country', 'Type': 'string'}],
)
with assert_raises(ClientError) as exc:
helpers.get_partition(client, database_name, table_name, values)
# Old partition shouldn't exist anymore
exc.exception.response['Error']['Code'].should.equal('EntityNotFoundException')
response = client.get_partition(DatabaseName=database_name, TableName=table_name, PartitionValues=new_values)
partition = response['Partition']
partition['TableName'].should.equal(table_name)
partition['StorageDescriptor']['Columns'].should.equal([{'Name': 'country', 'Type': 'string'}])

View File

@ -128,7 +128,6 @@ def test_create_role_and_instance_profile():
profile = conn.create_instance_profile('my-other-profile')
profile.path.should.equal('/')
@mock_iam_deprecated()
def test_remove_role_from_instance_profile():
conn = boto.connect_iam()
@ -358,7 +357,7 @@ def test_list_policy_versions():
versions = conn.list_policy_versions(
PolicyArn="arn:aws:iam::123456789012:policy/TestListPolicyVersions")
versions.get('Versions')[0].get('VersionId').should.equal('v1')
conn.create_policy_version(
PolicyArn="arn:aws:iam::123456789012:policy/TestListPolicyVersions",
PolicyDocument='{"second":"policy"}')
@ -1292,4 +1291,22 @@ def test_create_role_no_path():
conn = boto3.client('iam', region_name='us-east-1')
resp = conn.create_role(RoleName='my-role', AssumeRolePolicyDocument='some policy', Description='test')
resp.get('Role').get('Arn').should.equal('arn:aws:iam::123456789012:role/my-role')
resp.get('Role').should_not.have.key('PermissionsBoundary')
@mock_iam()
def test_create_role_with_permissions_boundary():
conn = boto3.client('iam', region_name='us-east-1')
boundary = 'arn:aws:iam::123456789012:policy/boundary'
resp = conn.create_role(RoleName='my-role', AssumeRolePolicyDocument='some policy', Description='test', PermissionsBoundary=boundary)
expected = {
'PermissionsBoundaryType': 'PermissionsBoundaryPolicy',
'PermissionsBoundaryArn': boundary
}
resp.get('Role').get('PermissionsBoundary').should.equal(expected)
invalid_boundary_arn = 'arn:aws:iam::123456789:not_a_boundary'
with assert_raises(ClientError):
conn.create_role(RoleName='bad-boundary', AssumeRolePolicyDocument='some policy', Description='test', PermissionsBoundary=invalid_boundary_arn)
# Ensure the PermissionsBoundary is included in role listing as well
conn.list_roles().get('Roles')[0].get('PermissionsBoundary').should.equal(expected)

View File

@ -350,7 +350,7 @@ def test_list_things_with_attribute_and_thing_type_filter_and_next_token():
@mock_iot
def test_certs():
client = boto3.client('iot', region_name='ap-northeast-1')
client = boto3.client('iot', region_name='us-east-1')
cert = client.create_keys_and_certificate(setAsActive=True)
cert.should.have.key('certificateArn').which.should_not.be.none
cert.should.have.key('certificateId').which.should_not.be.none
@ -367,6 +367,29 @@ def test_certs():
cert_desc.should.have.key('certificateId').which.should_not.be.none
cert_desc.should.have.key('certificatePem').which.should_not.be.none
cert_desc.should.have.key('status').which.should.equal('ACTIVE')
cert_pem = cert_desc['certificatePem']
res = client.list_certificates()
for cert in res['certificates']:
cert.should.have.key('certificateArn').which.should_not.be.none
cert.should.have.key('certificateId').which.should_not.be.none
cert.should.have.key('status').which.should_not.be.none
cert.should.have.key('creationDate').which.should_not.be.none
client.update_certificate(certificateId=cert_id, newStatus='REVOKED')
cert = client.describe_certificate(certificateId=cert_id)
cert_desc = cert['certificateDescription']
cert_desc.should.have.key('status').which.should.equal('REVOKED')
client.delete_certificate(certificateId=cert_id)
res = client.list_certificates()
res.should.have.key('certificates')
# Test register_certificate flow
cert = client.register_certificate(certificatePem=cert_pem, setAsActive=True)
cert.should.have.key('certificateId').which.should_not.be.none
cert.should.have.key('certificateArn').which.should_not.be.none
cert_id = cert['certificateId']
res = client.list_certificates()
res.should.have.key('certificates').which.should.have.length_of(1)
@ -378,11 +401,12 @@ def test_certs():
client.update_certificate(certificateId=cert_id, newStatus='REVOKED')
cert = client.describe_certificate(certificateId=cert_id)
cert_desc.should.have.key('status').which.should.equal('ACTIVE')
cert_desc = cert['certificateDescription']
cert_desc.should.have.key('status').which.should.equal('REVOKED')
client.delete_certificate(certificateId=cert_id)
res = client.list_certificates()
res.should.have.key('certificates').which.should.have.length_of(0)
res.should.have.key('certificates')
@mock_iot

File diff suppressed because it is too large Load Diff

View File

@ -18,13 +18,14 @@ from dateutil.tz import tzutc
@mock_kms_deprecated
def test_create_key():
conn = boto.kms.connect_to_region("us-west-2")
with freeze_time("2015-01-01 00:00:00"):
key = conn.create_key(policy="my policy",
description="my key", key_usage='ENCRYPT_DECRYPT')
key = conn.create_key(policy="my policy",
description="my key", key_usage='ENCRYPT_DECRYPT')
key['KeyMetadata']['Description'].should.equal("my key")
key['KeyMetadata']['KeyUsage'].should.equal("ENCRYPT_DECRYPT")
key['KeyMetadata']['Enabled'].should.equal(True)
key['KeyMetadata']['Description'].should.equal("my key")
key['KeyMetadata']['KeyUsage'].should.equal("ENCRYPT_DECRYPT")
key['KeyMetadata']['Enabled'].should.equal(True)
key['KeyMetadata']['CreationDate'].should.equal("1420070400")
@mock_kms_deprecated
@ -980,5 +981,3 @@ def test_put_key_policy_key_not_found():
PolicyName='default',
Policy='new policy'
)

View File

@ -1,128 +1,164 @@
import boto3
import sure # noqa
import six
from botocore.exceptions import ClientError
from moto import mock_logs, settings
from nose.tools import assert_raises
_logs_region = 'us-east-1' if settings.TEST_SERVER_MODE else 'us-west-2'
@mock_logs
def test_log_group_create():
conn = boto3.client('logs', 'us-west-2')
log_group_name = 'dummy'
response = conn.create_log_group(logGroupName=log_group_name)
response = conn.describe_log_groups(logGroupNamePrefix=log_group_name)
assert len(response['logGroups']) == 1
response = conn.delete_log_group(logGroupName=log_group_name)
@mock_logs
def test_exceptions():
conn = boto3.client('logs', 'us-west-2')
log_group_name = 'dummy'
log_stream_name = 'dummp-stream'
conn.create_log_group(logGroupName=log_group_name)
with assert_raises(ClientError):
conn.create_log_group(logGroupName=log_group_name)
# descrine_log_groups is not implemented yet
conn.create_log_stream(
logGroupName=log_group_name,
logStreamName=log_stream_name
)
with assert_raises(ClientError):
conn.create_log_stream(
logGroupName=log_group_name,
logStreamName=log_stream_name
)
conn.put_log_events(
logGroupName=log_group_name,
logStreamName=log_stream_name,
logEvents=[
{
'timestamp': 0,
'message': 'line'
},
],
)
with assert_raises(ClientError):
conn.put_log_events(
logGroupName=log_group_name,
logStreamName="invalid-stream",
logEvents=[
{
'timestamp': 0,
'message': 'line'
},
],
)
@mock_logs
def test_put_logs():
conn = boto3.client('logs', 'us-west-2')
log_group_name = 'dummy'
log_stream_name = 'stream'
conn.create_log_group(logGroupName=log_group_name)
conn.create_log_stream(
logGroupName=log_group_name,
logStreamName=log_stream_name
)
messages = [
{'timestamp': 0, 'message': 'hello'},
{'timestamp': 0, 'message': 'world'}
]
putRes = conn.put_log_events(
logGroupName=log_group_name,
logStreamName=log_stream_name,
logEvents=messages
)
res = conn.get_log_events(
logGroupName=log_group_name,
logStreamName=log_stream_name
)
events = res['events']
nextSequenceToken = putRes['nextSequenceToken']
assert isinstance(nextSequenceToken, six.string_types) == True
assert len(nextSequenceToken) == 56
events.should.have.length_of(2)
@mock_logs
def test_filter_logs_interleaved():
conn = boto3.client('logs', 'us-west-2')
log_group_name = 'dummy'
log_stream_name = 'stream'
conn.create_log_group(logGroupName=log_group_name)
conn.create_log_stream(
logGroupName=log_group_name,
logStreamName=log_stream_name
)
messages = [
{'timestamp': 0, 'message': 'hello'},
{'timestamp': 0, 'message': 'world'}
]
conn.put_log_events(
logGroupName=log_group_name,
logStreamName=log_stream_name,
logEvents=messages
)
res = conn.filter_log_events(
logGroupName=log_group_name,
logStreamNames=[log_stream_name],
interleaved=True,
)
events = res['events']
for original_message, resulting_event in zip(messages, events):
resulting_event['eventId'].should.equal(str(resulting_event['eventId']))
resulting_event['timestamp'].should.equal(original_message['timestamp'])
resulting_event['message'].should.equal(original_message['message'])
import boto3
import sure # noqa
import six
from botocore.exceptions import ClientError
from moto import mock_logs, settings
from nose.tools import assert_raises
_logs_region = 'us-east-1' if settings.TEST_SERVER_MODE else 'us-west-2'
@mock_logs
def test_log_group_create():
conn = boto3.client('logs', 'us-west-2')
log_group_name = 'dummy'
response = conn.create_log_group(logGroupName=log_group_name)
response = conn.describe_log_groups(logGroupNamePrefix=log_group_name)
assert len(response['logGroups']) == 1
# AWS defaults to Never Expire for log group retention
assert response['logGroups'][0].get('retentionInDays') == None
response = conn.delete_log_group(logGroupName=log_group_name)
@mock_logs
def test_exceptions():
conn = boto3.client('logs', 'us-west-2')
log_group_name = 'dummy'
log_stream_name = 'dummp-stream'
conn.create_log_group(logGroupName=log_group_name)
with assert_raises(ClientError):
conn.create_log_group(logGroupName=log_group_name)
# descrine_log_groups is not implemented yet
conn.create_log_stream(
logGroupName=log_group_name,
logStreamName=log_stream_name
)
with assert_raises(ClientError):
conn.create_log_stream(
logGroupName=log_group_name,
logStreamName=log_stream_name
)
conn.put_log_events(
logGroupName=log_group_name,
logStreamName=log_stream_name,
logEvents=[
{
'timestamp': 0,
'message': 'line'
},
],
)
with assert_raises(ClientError):
conn.put_log_events(
logGroupName=log_group_name,
logStreamName="invalid-stream",
logEvents=[
{
'timestamp': 0,
'message': 'line'
},
],
)
@mock_logs
def test_put_logs():
conn = boto3.client('logs', 'us-west-2')
log_group_name = 'dummy'
log_stream_name = 'stream'
conn.create_log_group(logGroupName=log_group_name)
conn.create_log_stream(
logGroupName=log_group_name,
logStreamName=log_stream_name
)
messages = [
{'timestamp': 0, 'message': 'hello'},
{'timestamp': 0, 'message': 'world'}
]
putRes = conn.put_log_events(
logGroupName=log_group_name,
logStreamName=log_stream_name,
logEvents=messages
)
res = conn.get_log_events(
logGroupName=log_group_name,
logStreamName=log_stream_name
)
events = res['events']
nextSequenceToken = putRes['nextSequenceToken']
assert isinstance(nextSequenceToken, six.string_types) == True
assert len(nextSequenceToken) == 56
events.should.have.length_of(2)
@mock_logs
def test_filter_logs_interleaved():
conn = boto3.client('logs', 'us-west-2')
log_group_name = 'dummy'
log_stream_name = 'stream'
conn.create_log_group(logGroupName=log_group_name)
conn.create_log_stream(
logGroupName=log_group_name,
logStreamName=log_stream_name
)
messages = [
{'timestamp': 0, 'message': 'hello'},
{'timestamp': 0, 'message': 'world'}
]
conn.put_log_events(
logGroupName=log_group_name,
logStreamName=log_stream_name,
logEvents=messages
)
res = conn.filter_log_events(
logGroupName=log_group_name,
logStreamNames=[log_stream_name],
interleaved=True,
)
events = res['events']
for original_message, resulting_event in zip(messages, events):
resulting_event['eventId'].should.equal(str(resulting_event['eventId']))
resulting_event['timestamp'].should.equal(original_message['timestamp'])
resulting_event['message'].should.equal(original_message['message'])
@mock_logs
def test_put_retention_policy():
conn = boto3.client('logs', 'us-west-2')
log_group_name = 'dummy'
response = conn.create_log_group(logGroupName=log_group_name)
response = conn.put_retention_policy(logGroupName=log_group_name, retentionInDays=7)
response = conn.describe_log_groups(logGroupNamePrefix=log_group_name)
assert len(response['logGroups']) == 1
assert response['logGroups'][0].get('retentionInDays') == 7
response = conn.delete_log_group(logGroupName=log_group_name)
@mock_logs
def test_delete_retention_policy():
conn = boto3.client('logs', 'us-west-2')
log_group_name = 'dummy'
response = conn.create_log_group(logGroupName=log_group_name)
response = conn.put_retention_policy(logGroupName=log_group_name, retentionInDays=7)
response = conn.describe_log_groups(logGroupNamePrefix=log_group_name)
assert len(response['logGroups']) == 1
assert response['logGroups'][0].get('retentionInDays') == 7
response = conn.delete_retention_policy(logGroupName=log_group_name)
response = conn.describe_log_groups(logGroupNamePrefix=log_group_name)
assert len(response['logGroups']) == 1
assert response['logGroups'][0].get('retentionInDays') == None
response = conn.delete_log_group(logGroupName=log_group_name)

View File

@ -1,136 +1,152 @@
from __future__ import unicode_literals
import six
import sure # noqa
import datetime
from moto.organizations import utils
EMAIL_REGEX = "^.+@[a-zA-Z0-9-.]+.[a-zA-Z]{2,3}|[0-9]{1,3}$"
ORG_ID_REGEX = r'o-[a-z0-9]{%s}' % utils.ORG_ID_SIZE
ROOT_ID_REGEX = r'r-[a-z0-9]{%s}' % utils.ROOT_ID_SIZE
OU_ID_REGEX = r'ou-[a-z0-9]{%s}-[a-z0-9]{%s}' % (utils.ROOT_ID_SIZE, utils.OU_ID_SUFFIX_SIZE)
ACCOUNT_ID_REGEX = r'[0-9]{%s}' % utils.ACCOUNT_ID_SIZE
CREATE_ACCOUNT_STATUS_ID_REGEX = r'car-[a-z0-9]{%s}' % utils.CREATE_ACCOUNT_STATUS_ID_SIZE
def test_make_random_org_id():
org_id = utils.make_random_org_id()
org_id.should.match(ORG_ID_REGEX)
def test_make_random_root_id():
root_id = utils.make_random_root_id()
root_id.should.match(ROOT_ID_REGEX)
def test_make_random_ou_id():
root_id = utils.make_random_root_id()
ou_id = utils.make_random_ou_id(root_id)
ou_id.should.match(OU_ID_REGEX)
def test_make_random_account_id():
account_id = utils.make_random_account_id()
account_id.should.match(ACCOUNT_ID_REGEX)
def test_make_random_create_account_status_id():
create_account_status_id = utils.make_random_create_account_status_id()
create_account_status_id.should.match(CREATE_ACCOUNT_STATUS_ID_REGEX)
def validate_organization(response):
org = response['Organization']
sorted(org.keys()).should.equal([
'Arn',
'AvailablePolicyTypes',
'FeatureSet',
'Id',
'MasterAccountArn',
'MasterAccountEmail',
'MasterAccountId',
])
org['Id'].should.match(ORG_ID_REGEX)
org['MasterAccountId'].should.equal(utils.MASTER_ACCOUNT_ID)
org['MasterAccountArn'].should.equal(utils.MASTER_ACCOUNT_ARN_FORMAT.format(
org['MasterAccountId'],
org['Id'],
))
org['Arn'].should.equal(utils.ORGANIZATION_ARN_FORMAT.format(
org['MasterAccountId'],
org['Id'],
))
org['MasterAccountEmail'].should.equal(utils.MASTER_ACCOUNT_EMAIL)
org['FeatureSet'].should.be.within(['ALL', 'CONSOLIDATED_BILLING'])
org['AvailablePolicyTypes'].should.equal([{
'Type': 'SERVICE_CONTROL_POLICY',
'Status': 'ENABLED'
}])
def validate_roots(org, response):
response.should.have.key('Roots').should.be.a(list)
response['Roots'].should_not.be.empty
root = response['Roots'][0]
root.should.have.key('Id').should.match(ROOT_ID_REGEX)
root.should.have.key('Arn').should.equal(utils.ROOT_ARN_FORMAT.format(
org['MasterAccountId'],
org['Id'],
root['Id'],
))
root.should.have.key('Name').should.be.a(six.string_types)
root.should.have.key('PolicyTypes').should.be.a(list)
root['PolicyTypes'][0].should.have.key('Type').should.equal('SERVICE_CONTROL_POLICY')
root['PolicyTypes'][0].should.have.key('Status').should.equal('ENABLED')
def validate_organizational_unit(org, response):
response.should.have.key('OrganizationalUnit').should.be.a(dict)
ou = response['OrganizationalUnit']
ou.should.have.key('Id').should.match(OU_ID_REGEX)
ou.should.have.key('Arn').should.equal(utils.OU_ARN_FORMAT.format(
org['MasterAccountId'],
org['Id'],
ou['Id'],
))
ou.should.have.key('Name').should.be.a(six.string_types)
def validate_account(org, account):
sorted(account.keys()).should.equal([
'Arn',
'Email',
'Id',
'JoinedMethod',
'JoinedTimestamp',
'Name',
'Status',
])
account['Id'].should.match(ACCOUNT_ID_REGEX)
account['Arn'].should.equal(utils.ACCOUNT_ARN_FORMAT.format(
org['MasterAccountId'],
org['Id'],
account['Id'],
))
account['Email'].should.match(EMAIL_REGEX)
account['JoinedMethod'].should.be.within(['INVITED', 'CREATED'])
account['Status'].should.be.within(['ACTIVE', 'SUSPENDED'])
account['Name'].should.be.a(six.string_types)
account['JoinedTimestamp'].should.be.a(datetime.datetime)
def validate_create_account_status(create_status):
sorted(create_status.keys()).should.equal([
'AccountId',
'AccountName',
'CompletedTimestamp',
'Id',
'RequestedTimestamp',
'State',
])
create_status['Id'].should.match(CREATE_ACCOUNT_STATUS_ID_REGEX)
create_status['AccountId'].should.match(ACCOUNT_ID_REGEX)
create_status['AccountName'].should.be.a(six.string_types)
create_status['State'].should.equal('SUCCEEDED')
create_status['RequestedTimestamp'].should.be.a(datetime.datetime)
create_status['CompletedTimestamp'].should.be.a(datetime.datetime)
from __future__ import unicode_literals
import six
import sure # noqa
import datetime
from moto.organizations import utils
def test_make_random_org_id():
org_id = utils.make_random_org_id()
org_id.should.match(utils.ORG_ID_REGEX)
def test_make_random_root_id():
root_id = utils.make_random_root_id()
root_id.should.match(utils.ROOT_ID_REGEX)
def test_make_random_ou_id():
root_id = utils.make_random_root_id()
ou_id = utils.make_random_ou_id(root_id)
ou_id.should.match(utils.OU_ID_REGEX)
def test_make_random_account_id():
account_id = utils.make_random_account_id()
account_id.should.match(utils.ACCOUNT_ID_REGEX)
def test_make_random_create_account_status_id():
create_account_status_id = utils.make_random_create_account_status_id()
create_account_status_id.should.match(utils.CREATE_ACCOUNT_STATUS_ID_REGEX)
def test_make_random_service_control_policy_id():
service_control_policy_id = utils.make_random_service_control_policy_id()
service_control_policy_id.should.match(utils.SCP_ID_REGEX)
def validate_organization(response):
org = response['Organization']
sorted(org.keys()).should.equal([
'Arn',
'AvailablePolicyTypes',
'FeatureSet',
'Id',
'MasterAccountArn',
'MasterAccountEmail',
'MasterAccountId',
])
org['Id'].should.match(utils.ORG_ID_REGEX)
org['MasterAccountId'].should.equal(utils.MASTER_ACCOUNT_ID)
org['MasterAccountArn'].should.equal(utils.MASTER_ACCOUNT_ARN_FORMAT.format(
org['MasterAccountId'],
org['Id'],
))
org['Arn'].should.equal(utils.ORGANIZATION_ARN_FORMAT.format(
org['MasterAccountId'],
org['Id'],
))
org['MasterAccountEmail'].should.equal(utils.MASTER_ACCOUNT_EMAIL)
org['FeatureSet'].should.be.within(['ALL', 'CONSOLIDATED_BILLING'])
org['AvailablePolicyTypes'].should.equal([{
'Type': 'SERVICE_CONTROL_POLICY',
'Status': 'ENABLED'
}])
def validate_roots(org, response):
response.should.have.key('Roots').should.be.a(list)
response['Roots'].should_not.be.empty
root = response['Roots'][0]
root.should.have.key('Id').should.match(utils.ROOT_ID_REGEX)
root.should.have.key('Arn').should.equal(utils.ROOT_ARN_FORMAT.format(
org['MasterAccountId'],
org['Id'],
root['Id'],
))
root.should.have.key('Name').should.be.a(six.string_types)
root.should.have.key('PolicyTypes').should.be.a(list)
root['PolicyTypes'][0].should.have.key('Type').should.equal('SERVICE_CONTROL_POLICY')
root['PolicyTypes'][0].should.have.key('Status').should.equal('ENABLED')
def validate_organizational_unit(org, response):
response.should.have.key('OrganizationalUnit').should.be.a(dict)
ou = response['OrganizationalUnit']
ou.should.have.key('Id').should.match(utils.OU_ID_REGEX)
ou.should.have.key('Arn').should.equal(utils.OU_ARN_FORMAT.format(
org['MasterAccountId'],
org['Id'],
ou['Id'],
))
ou.should.have.key('Name').should.be.a(six.string_types)
def validate_account(org, account):
sorted(account.keys()).should.equal([
'Arn',
'Email',
'Id',
'JoinedMethod',
'JoinedTimestamp',
'Name',
'Status',
])
account['Id'].should.match(utils.ACCOUNT_ID_REGEX)
account['Arn'].should.equal(utils.ACCOUNT_ARN_FORMAT.format(
org['MasterAccountId'],
org['Id'],
account['Id'],
))
account['Email'].should.match(utils.EMAIL_REGEX)
account['JoinedMethod'].should.be.within(['INVITED', 'CREATED'])
account['Status'].should.be.within(['ACTIVE', 'SUSPENDED'])
account['Name'].should.be.a(six.string_types)
account['JoinedTimestamp'].should.be.a(datetime.datetime)
def validate_create_account_status(create_status):
sorted(create_status.keys()).should.equal([
'AccountId',
'AccountName',
'CompletedTimestamp',
'Id',
'RequestedTimestamp',
'State',
])
create_status['Id'].should.match(utils.CREATE_ACCOUNT_STATUS_ID_REGEX)
create_status['AccountId'].should.match(utils.ACCOUNT_ID_REGEX)
create_status['AccountName'].should.be.a(six.string_types)
create_status['State'].should.equal('SUCCEEDED')
create_status['RequestedTimestamp'].should.be.a(datetime.datetime)
create_status['CompletedTimestamp'].should.be.a(datetime.datetime)
def validate_policy_summary(org, summary):
summary.should.be.a(dict)
summary.should.have.key('Id').should.match(utils.SCP_ID_REGEX)
summary.should.have.key('Arn').should.equal(utils.SCP_ARN_FORMAT.format(
org['MasterAccountId'],
org['Id'],
summary['Id'],
))
summary.should.have.key('Name').should.be.a(six.string_types)
summary.should.have.key('Description').should.be.a(six.string_types)
summary.should.have.key('Type').should.equal('SERVICE_CONTROL_POLICY')
summary.should.have.key('AwsManaged').should.be.a(bool)
def validate_service_control_policy(org, response):
response.should.have.key('PolicySummary').should.be.a(dict)
response.should.have.key('Content').should.be.a(six.string_types)
validate_policy_summary(org, response['PolicySummary'])

View File

@ -1,322 +1,594 @@
from __future__ import unicode_literals
import boto3
import sure # noqa
from botocore.exceptions import ClientError
from nose.tools import assert_raises
from moto import mock_organizations
from moto.organizations import utils
from .organizations_test_utils import (
validate_organization,
validate_roots,
validate_organizational_unit,
validate_account,
validate_create_account_status,
)
@mock_organizations
def test_create_organization():
client = boto3.client('organizations', region_name='us-east-1')
response = client.create_organization(FeatureSet='ALL')
validate_organization(response)
response['Organization']['FeatureSet'].should.equal('ALL')
@mock_organizations
def test_describe_organization():
client = boto3.client('organizations', region_name='us-east-1')
client.create_organization(FeatureSet='ALL')
response = client.describe_organization()
validate_organization(response)
@mock_organizations
def test_describe_organization_exception():
client = boto3.client('organizations', region_name='us-east-1')
with assert_raises(ClientError) as e:
response = client.describe_organization()
ex = e.exception
ex.operation_name.should.equal('DescribeOrganization')
ex.response['Error']['Code'].should.equal('400')
ex.response['Error']['Message'].should.contain('AWSOrganizationsNotInUseException')
# Organizational Units
@mock_organizations
def test_list_roots():
client = boto3.client('organizations', region_name='us-east-1')
org = client.create_organization(FeatureSet='ALL')['Organization']
response = client.list_roots()
validate_roots(org, response)
@mock_organizations
def test_create_organizational_unit():
client = boto3.client('organizations', region_name='us-east-1')
org = client.create_organization(FeatureSet='ALL')['Organization']
root_id = client.list_roots()['Roots'][0]['Id']
ou_name = 'ou01'
response = client.create_organizational_unit(
ParentId=root_id,
Name=ou_name,
)
validate_organizational_unit(org, response)
response['OrganizationalUnit']['Name'].should.equal(ou_name)
@mock_organizations
def test_describe_organizational_unit():
client = boto3.client('organizations', region_name='us-east-1')
org = client.create_organization(FeatureSet='ALL')['Organization']
root_id = client.list_roots()['Roots'][0]['Id']
ou_id = client.create_organizational_unit(
ParentId=root_id,
Name='ou01',
)['OrganizationalUnit']['Id']
response = client.describe_organizational_unit(OrganizationalUnitId=ou_id)
validate_organizational_unit(org, response)
@mock_organizations
def test_describe_organizational_unit_exception():
client = boto3.client('organizations', region_name='us-east-1')
org = client.create_organization(FeatureSet='ALL')['Organization']
with assert_raises(ClientError) as e:
response = client.describe_organizational_unit(
OrganizationalUnitId=utils.make_random_root_id()
)
ex = e.exception
ex.operation_name.should.equal('DescribeOrganizationalUnit')
ex.response['Error']['Code'].should.equal('400')
ex.response['Error']['Message'].should.contain('OrganizationalUnitNotFoundException')
@mock_organizations
def test_list_organizational_units_for_parent():
client = boto3.client('organizations', region_name='us-east-1')
org = client.create_organization(FeatureSet='ALL')['Organization']
root_id = client.list_roots()['Roots'][0]['Id']
client.create_organizational_unit(ParentId=root_id, Name='ou01')
client.create_organizational_unit(ParentId=root_id, Name='ou02')
client.create_organizational_unit(ParentId=root_id, Name='ou03')
response = client.list_organizational_units_for_parent(ParentId=root_id)
response.should.have.key('OrganizationalUnits').should.be.a(list)
for ou in response['OrganizationalUnits']:
validate_organizational_unit(org, dict(OrganizationalUnit=ou))
@mock_organizations
def test_list_organizational_units_for_parent_exception():
client = boto3.client('organizations', region_name='us-east-1')
with assert_raises(ClientError) as e:
response = client.list_organizational_units_for_parent(
ParentId=utils.make_random_root_id()
)
ex = e.exception
ex.operation_name.should.equal('ListOrganizationalUnitsForParent')
ex.response['Error']['Code'].should.equal('400')
ex.response['Error']['Message'].should.contain('ParentNotFoundException')
# Accounts
mockname = 'mock-account'
mockdomain = 'moto-example.org'
mockemail = '@'.join([mockname, mockdomain])
@mock_organizations
def test_create_account():
client = boto3.client('organizations', region_name='us-east-1')
client.create_organization(FeatureSet='ALL')
create_status = client.create_account(
AccountName=mockname, Email=mockemail
)['CreateAccountStatus']
validate_create_account_status(create_status)
create_status['AccountName'].should.equal(mockname)
@mock_organizations
def test_describe_account():
client = boto3.client('organizations', region_name='us-east-1')
org = client.create_organization(FeatureSet='ALL')['Organization']
account_id = client.create_account(
AccountName=mockname, Email=mockemail
)['CreateAccountStatus']['AccountId']
response = client.describe_account(AccountId=account_id)
validate_account(org, response['Account'])
response['Account']['Name'].should.equal(mockname)
response['Account']['Email'].should.equal(mockemail)
@mock_organizations
def test_describe_account_exception():
client = boto3.client('organizations', region_name='us-east-1')
with assert_raises(ClientError) as e:
response = client.describe_account(AccountId=utils.make_random_account_id())
ex = e.exception
ex.operation_name.should.equal('DescribeAccount')
ex.response['Error']['Code'].should.equal('400')
ex.response['Error']['Message'].should.contain('AccountNotFoundException')
@mock_organizations
def test_list_accounts():
client = boto3.client('organizations', region_name='us-east-1')
org = client.create_organization(FeatureSet='ALL')['Organization']
for i in range(5):
name = mockname + str(i)
email = name + '@' + mockdomain
client.create_account(AccountName=name, Email=email)
response = client.list_accounts()
response.should.have.key('Accounts')
accounts = response['Accounts']
len(accounts).should.equal(5)
for account in accounts:
validate_account(org, account)
accounts[3]['Name'].should.equal(mockname + '3')
accounts[2]['Email'].should.equal(mockname + '2' + '@' + mockdomain)
@mock_organizations
def test_list_accounts_for_parent():
client = boto3.client('organizations', region_name='us-east-1')
org = client.create_organization(FeatureSet='ALL')['Organization']
root_id = client.list_roots()['Roots'][0]['Id']
account_id = client.create_account(
AccountName=mockname,
Email=mockemail,
)['CreateAccountStatus']['AccountId']
response = client.list_accounts_for_parent(ParentId=root_id)
account_id.should.be.within([account['Id'] for account in response['Accounts']])
@mock_organizations
def test_move_account():
client = boto3.client('organizations', region_name='us-east-1')
org = client.create_organization(FeatureSet='ALL')['Organization']
root_id = client.list_roots()['Roots'][0]['Id']
account_id = client.create_account(
AccountName=mockname, Email=mockemail
)['CreateAccountStatus']['AccountId']
ou01 = client.create_organizational_unit(ParentId=root_id, Name='ou01')
ou01_id = ou01['OrganizationalUnit']['Id']
client.move_account(
AccountId=account_id,
SourceParentId=root_id,
DestinationParentId=ou01_id,
)
response = client.list_accounts_for_parent(ParentId=ou01_id)
account_id.should.be.within([account['Id'] for account in response['Accounts']])
@mock_organizations
def test_list_parents_for_ou():
client = boto3.client('organizations', region_name='us-east-1')
org = client.create_organization(FeatureSet='ALL')['Organization']
root_id = client.list_roots()['Roots'][0]['Id']
ou01 = client.create_organizational_unit(ParentId=root_id, Name='ou01')
ou01_id = ou01['OrganizationalUnit']['Id']
response01 = client.list_parents(ChildId=ou01_id)
response01.should.have.key('Parents').should.be.a(list)
response01['Parents'][0].should.have.key('Id').should.equal(root_id)
response01['Parents'][0].should.have.key('Type').should.equal('ROOT')
ou02 = client.create_organizational_unit(ParentId=ou01_id, Name='ou02')
ou02_id = ou02['OrganizationalUnit']['Id']
response02 = client.list_parents(ChildId=ou02_id)
response02.should.have.key('Parents').should.be.a(list)
response02['Parents'][0].should.have.key('Id').should.equal(ou01_id)
response02['Parents'][0].should.have.key('Type').should.equal('ORGANIZATIONAL_UNIT')
@mock_organizations
def test_list_parents_for_accounts():
client = boto3.client('organizations', region_name='us-east-1')
org = client.create_organization(FeatureSet='ALL')['Organization']
root_id = client.list_roots()['Roots'][0]['Id']
ou01 = client.create_organizational_unit(ParentId=root_id, Name='ou01')
ou01_id = ou01['OrganizationalUnit']['Id']
account01_id = client.create_account(
AccountName='account01',
Email='account01@moto-example.org'
)['CreateAccountStatus']['AccountId']
account02_id = client.create_account(
AccountName='account02',
Email='account02@moto-example.org'
)['CreateAccountStatus']['AccountId']
client.move_account(
AccountId=account02_id,
SourceParentId=root_id,
DestinationParentId=ou01_id,
)
response01 = client.list_parents(ChildId=account01_id)
response01.should.have.key('Parents').should.be.a(list)
response01['Parents'][0].should.have.key('Id').should.equal(root_id)
response01['Parents'][0].should.have.key('Type').should.equal('ROOT')
response02 = client.list_parents(ChildId=account02_id)
response02.should.have.key('Parents').should.be.a(list)
response02['Parents'][0].should.have.key('Id').should.equal(ou01_id)
response02['Parents'][0].should.have.key('Type').should.equal('ORGANIZATIONAL_UNIT')
@mock_organizations
def test_list_children():
client = boto3.client('organizations', region_name='us-east-1')
org = client.create_organization(FeatureSet='ALL')['Organization']
root_id = client.list_roots()['Roots'][0]['Id']
ou01 = client.create_organizational_unit(ParentId=root_id, Name='ou01')
ou01_id = ou01['OrganizationalUnit']['Id']
ou02 = client.create_organizational_unit(ParentId=ou01_id, Name='ou02')
ou02_id = ou02['OrganizationalUnit']['Id']
account01_id = client.create_account(
AccountName='account01',
Email='account01@moto-example.org'
)['CreateAccountStatus']['AccountId']
account02_id = client.create_account(
AccountName='account02',
Email='account02@moto-example.org'
)['CreateAccountStatus']['AccountId']
client.move_account(
AccountId=account02_id,
SourceParentId=root_id,
DestinationParentId=ou01_id,
)
response01 = client.list_children(ParentId=root_id, ChildType='ACCOUNT')
response02 = client.list_children(ParentId=root_id, ChildType='ORGANIZATIONAL_UNIT')
response03 = client.list_children(ParentId=ou01_id, ChildType='ACCOUNT')
response04 = client.list_children(ParentId=ou01_id, ChildType='ORGANIZATIONAL_UNIT')
response01['Children'][0]['Id'].should.equal(account01_id)
response01['Children'][0]['Type'].should.equal('ACCOUNT')
response02['Children'][0]['Id'].should.equal(ou01_id)
response02['Children'][0]['Type'].should.equal('ORGANIZATIONAL_UNIT')
response03['Children'][0]['Id'].should.equal(account02_id)
response03['Children'][0]['Type'].should.equal('ACCOUNT')
response04['Children'][0]['Id'].should.equal(ou02_id)
response04['Children'][0]['Type'].should.equal('ORGANIZATIONAL_UNIT')
@mock_organizations
def test_list_children_exception():
client = boto3.client('organizations', region_name='us-east-1')
org = client.create_organization(FeatureSet='ALL')['Organization']
root_id = client.list_roots()['Roots'][0]['Id']
with assert_raises(ClientError) as e:
response = client.list_children(
ParentId=utils.make_random_root_id(),
ChildType='ACCOUNT'
)
ex = e.exception
ex.operation_name.should.equal('ListChildren')
ex.response['Error']['Code'].should.equal('400')
ex.response['Error']['Message'].should.contain('ParentNotFoundException')
with assert_raises(ClientError) as e:
response = client.list_children(
ParentId=root_id,
ChildType='BLEE'
)
ex = e.exception
ex.operation_name.should.equal('ListChildren')
ex.response['Error']['Code'].should.equal('400')
ex.response['Error']['Message'].should.contain('InvalidInputException')
from __future__ import unicode_literals
import boto3
import json
import six
import sure # noqa
from botocore.exceptions import ClientError
from nose.tools import assert_raises
from moto import mock_organizations
from moto.organizations import utils
from .organizations_test_utils import (
validate_organization,
validate_roots,
validate_organizational_unit,
validate_account,
validate_create_account_status,
validate_service_control_policy,
validate_policy_summary,
)
@mock_organizations
def test_create_organization():
client = boto3.client('organizations', region_name='us-east-1')
response = client.create_organization(FeatureSet='ALL')
validate_organization(response)
response['Organization']['FeatureSet'].should.equal('ALL')
@mock_organizations
def test_describe_organization():
client = boto3.client('organizations', region_name='us-east-1')
client.create_organization(FeatureSet='ALL')
response = client.describe_organization()
validate_organization(response)
@mock_organizations
def test_describe_organization_exception():
client = boto3.client('organizations', region_name='us-east-1')
with assert_raises(ClientError) as e:
response = client.describe_organization()
ex = e.exception
ex.operation_name.should.equal('DescribeOrganization')
ex.response['Error']['Code'].should.equal('400')
ex.response['Error']['Message'].should.contain('AWSOrganizationsNotInUseException')
# Organizational Units
@mock_organizations
def test_list_roots():
client = boto3.client('organizations', region_name='us-east-1')
org = client.create_organization(FeatureSet='ALL')['Organization']
response = client.list_roots()
validate_roots(org, response)
@mock_organizations
def test_create_organizational_unit():
client = boto3.client('organizations', region_name='us-east-1')
org = client.create_organization(FeatureSet='ALL')['Organization']
root_id = client.list_roots()['Roots'][0]['Id']
ou_name = 'ou01'
response = client.create_organizational_unit(
ParentId=root_id,
Name=ou_name,
)
validate_organizational_unit(org, response)
response['OrganizationalUnit']['Name'].should.equal(ou_name)
@mock_organizations
def test_describe_organizational_unit():
client = boto3.client('organizations', region_name='us-east-1')
org = client.create_organization(FeatureSet='ALL')['Organization']
root_id = client.list_roots()['Roots'][0]['Id']
ou_id = client.create_organizational_unit(
ParentId=root_id,
Name='ou01',
)['OrganizationalUnit']['Id']
response = client.describe_organizational_unit(OrganizationalUnitId=ou_id)
validate_organizational_unit(org, response)
@mock_organizations
def test_describe_organizational_unit_exception():
client = boto3.client('organizations', region_name='us-east-1')
org = client.create_organization(FeatureSet='ALL')['Organization']
with assert_raises(ClientError) as e:
response = client.describe_organizational_unit(
OrganizationalUnitId=utils.make_random_root_id()
)
ex = e.exception
ex.operation_name.should.equal('DescribeOrganizationalUnit')
ex.response['Error']['Code'].should.equal('400')
ex.response['Error']['Message'].should.contain('OrganizationalUnitNotFoundException')
@mock_organizations
def test_list_organizational_units_for_parent():
client = boto3.client('organizations', region_name='us-east-1')
org = client.create_organization(FeatureSet='ALL')['Organization']
root_id = client.list_roots()['Roots'][0]['Id']
client.create_organizational_unit(ParentId=root_id, Name='ou01')
client.create_organizational_unit(ParentId=root_id, Name='ou02')
client.create_organizational_unit(ParentId=root_id, Name='ou03')
response = client.list_organizational_units_for_parent(ParentId=root_id)
response.should.have.key('OrganizationalUnits').should.be.a(list)
for ou in response['OrganizationalUnits']:
validate_organizational_unit(org, dict(OrganizationalUnit=ou))
@mock_organizations
def test_list_organizational_units_for_parent_exception():
client = boto3.client('organizations', region_name='us-east-1')
with assert_raises(ClientError) as e:
response = client.list_organizational_units_for_parent(
ParentId=utils.make_random_root_id()
)
ex = e.exception
ex.operation_name.should.equal('ListOrganizationalUnitsForParent')
ex.response['Error']['Code'].should.equal('400')
ex.response['Error']['Message'].should.contain('ParentNotFoundException')
# Accounts
mockname = 'mock-account'
mockdomain = 'moto-example.org'
mockemail = '@'.join([mockname, mockdomain])
@mock_organizations
def test_create_account():
client = boto3.client('organizations', region_name='us-east-1')
client.create_organization(FeatureSet='ALL')
create_status = client.create_account(
AccountName=mockname, Email=mockemail
)['CreateAccountStatus']
validate_create_account_status(create_status)
create_status['AccountName'].should.equal(mockname)
@mock_organizations
def test_describe_account():
client = boto3.client('organizations', region_name='us-east-1')
org = client.create_organization(FeatureSet='ALL')['Organization']
account_id = client.create_account(
AccountName=mockname, Email=mockemail
)['CreateAccountStatus']['AccountId']
response = client.describe_account(AccountId=account_id)
validate_account(org, response['Account'])
response['Account']['Name'].should.equal(mockname)
response['Account']['Email'].should.equal(mockemail)
@mock_organizations
def test_describe_account_exception():
client = boto3.client('organizations', region_name='us-east-1')
with assert_raises(ClientError) as e:
response = client.describe_account(AccountId=utils.make_random_account_id())
ex = e.exception
ex.operation_name.should.equal('DescribeAccount')
ex.response['Error']['Code'].should.equal('400')
ex.response['Error']['Message'].should.contain('AccountNotFoundException')
@mock_organizations
def test_list_accounts():
client = boto3.client('organizations', region_name='us-east-1')
org = client.create_organization(FeatureSet='ALL')['Organization']
for i in range(5):
name = mockname + str(i)
email = name + '@' + mockdomain
client.create_account(AccountName=name, Email=email)
response = client.list_accounts()
response.should.have.key('Accounts')
accounts = response['Accounts']
len(accounts).should.equal(5)
for account in accounts:
validate_account(org, account)
accounts[3]['Name'].should.equal(mockname + '3')
accounts[2]['Email'].should.equal(mockname + '2' + '@' + mockdomain)
@mock_organizations
def test_list_accounts_for_parent():
client = boto3.client('organizations', region_name='us-east-1')
org = client.create_organization(FeatureSet='ALL')['Organization']
root_id = client.list_roots()['Roots'][0]['Id']
account_id = client.create_account(
AccountName=mockname,
Email=mockemail,
)['CreateAccountStatus']['AccountId']
response = client.list_accounts_for_parent(ParentId=root_id)
account_id.should.be.within([account['Id'] for account in response['Accounts']])
@mock_organizations
def test_move_account():
client = boto3.client('organizations', region_name='us-east-1')
org = client.create_organization(FeatureSet='ALL')['Organization']
root_id = client.list_roots()['Roots'][0]['Id']
account_id = client.create_account(
AccountName=mockname, Email=mockemail
)['CreateAccountStatus']['AccountId']
ou01 = client.create_organizational_unit(ParentId=root_id, Name='ou01')
ou01_id = ou01['OrganizationalUnit']['Id']
client.move_account(
AccountId=account_id,
SourceParentId=root_id,
DestinationParentId=ou01_id,
)
response = client.list_accounts_for_parent(ParentId=ou01_id)
account_id.should.be.within([account['Id'] for account in response['Accounts']])
@mock_organizations
def test_list_parents_for_ou():
client = boto3.client('organizations', region_name='us-east-1')
org = client.create_organization(FeatureSet='ALL')['Organization']
root_id = client.list_roots()['Roots'][0]['Id']
ou01 = client.create_organizational_unit(ParentId=root_id, Name='ou01')
ou01_id = ou01['OrganizationalUnit']['Id']
response01 = client.list_parents(ChildId=ou01_id)
response01.should.have.key('Parents').should.be.a(list)
response01['Parents'][0].should.have.key('Id').should.equal(root_id)
response01['Parents'][0].should.have.key('Type').should.equal('ROOT')
ou02 = client.create_organizational_unit(ParentId=ou01_id, Name='ou02')
ou02_id = ou02['OrganizationalUnit']['Id']
response02 = client.list_parents(ChildId=ou02_id)
response02.should.have.key('Parents').should.be.a(list)
response02['Parents'][0].should.have.key('Id').should.equal(ou01_id)
response02['Parents'][0].should.have.key('Type').should.equal('ORGANIZATIONAL_UNIT')
@mock_organizations
def test_list_parents_for_accounts():
client = boto3.client('organizations', region_name='us-east-1')
org = client.create_organization(FeatureSet='ALL')['Organization']
root_id = client.list_roots()['Roots'][0]['Id']
ou01 = client.create_organizational_unit(ParentId=root_id, Name='ou01')
ou01_id = ou01['OrganizationalUnit']['Id']
account01_id = client.create_account(
AccountName='account01',
Email='account01@moto-example.org'
)['CreateAccountStatus']['AccountId']
account02_id = client.create_account(
AccountName='account02',
Email='account02@moto-example.org'
)['CreateAccountStatus']['AccountId']
client.move_account(
AccountId=account02_id,
SourceParentId=root_id,
DestinationParentId=ou01_id,
)
response01 = client.list_parents(ChildId=account01_id)
response01.should.have.key('Parents').should.be.a(list)
response01['Parents'][0].should.have.key('Id').should.equal(root_id)
response01['Parents'][0].should.have.key('Type').should.equal('ROOT')
response02 = client.list_parents(ChildId=account02_id)
response02.should.have.key('Parents').should.be.a(list)
response02['Parents'][0].should.have.key('Id').should.equal(ou01_id)
response02['Parents'][0].should.have.key('Type').should.equal('ORGANIZATIONAL_UNIT')
@mock_organizations
def test_list_children():
client = boto3.client('organizations', region_name='us-east-1')
org = client.create_organization(FeatureSet='ALL')['Organization']
root_id = client.list_roots()['Roots'][0]['Id']
ou01 = client.create_organizational_unit(ParentId=root_id, Name='ou01')
ou01_id = ou01['OrganizationalUnit']['Id']
ou02 = client.create_organizational_unit(ParentId=ou01_id, Name='ou02')
ou02_id = ou02['OrganizationalUnit']['Id']
account01_id = client.create_account(
AccountName='account01',
Email='account01@moto-example.org'
)['CreateAccountStatus']['AccountId']
account02_id = client.create_account(
AccountName='account02',
Email='account02@moto-example.org'
)['CreateAccountStatus']['AccountId']
client.move_account(
AccountId=account02_id,
SourceParentId=root_id,
DestinationParentId=ou01_id,
)
response01 = client.list_children(ParentId=root_id, ChildType='ACCOUNT')
response02 = client.list_children(ParentId=root_id, ChildType='ORGANIZATIONAL_UNIT')
response03 = client.list_children(ParentId=ou01_id, ChildType='ACCOUNT')
response04 = client.list_children(ParentId=ou01_id, ChildType='ORGANIZATIONAL_UNIT')
response01['Children'][0]['Id'].should.equal(account01_id)
response01['Children'][0]['Type'].should.equal('ACCOUNT')
response02['Children'][0]['Id'].should.equal(ou01_id)
response02['Children'][0]['Type'].should.equal('ORGANIZATIONAL_UNIT')
response03['Children'][0]['Id'].should.equal(account02_id)
response03['Children'][0]['Type'].should.equal('ACCOUNT')
response04['Children'][0]['Id'].should.equal(ou02_id)
response04['Children'][0]['Type'].should.equal('ORGANIZATIONAL_UNIT')
@mock_organizations
def test_list_children_exception():
client = boto3.client('organizations', region_name='us-east-1')
org = client.create_organization(FeatureSet='ALL')['Organization']
root_id = client.list_roots()['Roots'][0]['Id']
with assert_raises(ClientError) as e:
response = client.list_children(
ParentId=utils.make_random_root_id(),
ChildType='ACCOUNT'
)
ex = e.exception
ex.operation_name.should.equal('ListChildren')
ex.response['Error']['Code'].should.equal('400')
ex.response['Error']['Message'].should.contain('ParentNotFoundException')
with assert_raises(ClientError) as e:
response = client.list_children(
ParentId=root_id,
ChildType='BLEE'
)
ex = e.exception
ex.operation_name.should.equal('ListChildren')
ex.response['Error']['Code'].should.equal('400')
ex.response['Error']['Message'].should.contain('InvalidInputException')
# Service Control Policies
policy_doc01 = dict(
Version='2012-10-17',
Statement=[dict(
Sid='MockPolicyStatement',
Effect='Allow',
Action='s3:*',
Resource='*',
)]
)
@mock_organizations
def test_create_policy():
client = boto3.client('organizations', region_name='us-east-1')
org = client.create_organization(FeatureSet='ALL')['Organization']
policy = client.create_policy(
Content=json.dumps(policy_doc01),
Description='A dummy service control policy',
Name='MockServiceControlPolicy',
Type='SERVICE_CONTROL_POLICY'
)['Policy']
validate_service_control_policy(org, policy)
policy['PolicySummary']['Name'].should.equal('MockServiceControlPolicy')
policy['PolicySummary']['Description'].should.equal('A dummy service control policy')
policy['Content'].should.equal(json.dumps(policy_doc01))
@mock_organizations
def test_describe_policy():
client = boto3.client('organizations', region_name='us-east-1')
org = client.create_organization(FeatureSet='ALL')['Organization']
policy_id = client.create_policy(
Content=json.dumps(policy_doc01),
Description='A dummy service control policy',
Name='MockServiceControlPolicy',
Type='SERVICE_CONTROL_POLICY'
)['Policy']['PolicySummary']['Id']
policy = client.describe_policy(PolicyId=policy_id)['Policy']
validate_service_control_policy(org, policy)
policy['PolicySummary']['Name'].should.equal('MockServiceControlPolicy')
policy['PolicySummary']['Description'].should.equal('A dummy service control policy')
policy['Content'].should.equal(json.dumps(policy_doc01))
@mock_organizations
def test_describe_policy_exception():
client = boto3.client('organizations', region_name='us-east-1')
client.create_organization(FeatureSet='ALL')['Organization']
policy_id = 'p-47fhe9s3'
with assert_raises(ClientError) as e:
response = client.describe_policy(PolicyId=policy_id)
ex = e.exception
ex.operation_name.should.equal('DescribePolicy')
ex.response['Error']['Code'].should.equal('400')
ex.response['Error']['Message'].should.contain('PolicyNotFoundException')
with assert_raises(ClientError) as e:
response = client.describe_policy(PolicyId='meaninglessstring')
ex = e.exception
ex.operation_name.should.equal('DescribePolicy')
ex.response['Error']['Code'].should.equal('400')
ex.response['Error']['Message'].should.contain('InvalidInputException')
@mock_organizations
def test_attach_policy():
client = boto3.client('organizations', region_name='us-east-1')
org = client.create_organization(FeatureSet='ALL')['Organization']
root_id = client.list_roots()['Roots'][0]['Id']
ou_id = client.create_organizational_unit(
ParentId=root_id,
Name='ou01',
)['OrganizationalUnit']['Id']
account_id = client.create_account(
AccountName=mockname,
Email=mockemail,
)['CreateAccountStatus']['AccountId']
policy_id = client.create_policy(
Content=json.dumps(policy_doc01),
Description='A dummy service control policy',
Name='MockServiceControlPolicy',
Type='SERVICE_CONTROL_POLICY'
)['Policy']['PolicySummary']['Id']
response = client.attach_policy(PolicyId=policy_id, TargetId=root_id)
response['ResponseMetadata']['HTTPStatusCode'].should.equal(200)
response = client.attach_policy(PolicyId=policy_id, TargetId=ou_id)
response['ResponseMetadata']['HTTPStatusCode'].should.equal(200)
response = client.attach_policy(PolicyId=policy_id, TargetId=account_id)
response['ResponseMetadata']['HTTPStatusCode'].should.equal(200)
@mock_organizations
def test_attach_policy_exception():
client = boto3.client('organizations', region_name='us-east-1')
client.create_organization(FeatureSet='ALL')['Organization']
root_id='r-dj873'
ou_id='ou-gi99-i7r8eh2i2'
account_id='126644886543'
policy_id = client.create_policy(
Content=json.dumps(policy_doc01),
Description='A dummy service control policy',
Name='MockServiceControlPolicy',
Type='SERVICE_CONTROL_POLICY'
)['Policy']['PolicySummary']['Id']
with assert_raises(ClientError) as e:
response = client.attach_policy(PolicyId=policy_id, TargetId=root_id)
ex = e.exception
ex.operation_name.should.equal('AttachPolicy')
ex.response['Error']['Code'].should.equal('400')
ex.response['Error']['Message'].should.contain('OrganizationalUnitNotFoundException')
with assert_raises(ClientError) as e:
response = client.attach_policy(PolicyId=policy_id, TargetId=ou_id)
ex = e.exception
ex.operation_name.should.equal('AttachPolicy')
ex.response['Error']['Code'].should.equal('400')
ex.response['Error']['Message'].should.contain('OrganizationalUnitNotFoundException')
with assert_raises(ClientError) as e:
response = client.attach_policy(PolicyId=policy_id, TargetId=account_id)
ex = e.exception
ex.operation_name.should.equal('AttachPolicy')
ex.response['Error']['Code'].should.equal('400')
ex.response['Error']['Message'].should.contain('AccountNotFoundException')
with assert_raises(ClientError) as e:
response = client.attach_policy(PolicyId=policy_id, TargetId='meaninglessstring')
ex = e.exception
ex.operation_name.should.equal('AttachPolicy')
ex.response['Error']['Code'].should.equal('400')
ex.response['Error']['Message'].should.contain('InvalidInputException')
@mock_organizations
def test_list_polices():
client = boto3.client('organizations', region_name='us-east-1')
org = client.create_organization(FeatureSet='ALL')['Organization']
for i in range(0,4):
client.create_policy(
Content=json.dumps(policy_doc01),
Description='A dummy service control policy',
Name='MockServiceControlPolicy' + str(i),
Type='SERVICE_CONTROL_POLICY'
)
response = client.list_policies(Filter='SERVICE_CONTROL_POLICY')
for policy in response['Policies']:
validate_policy_summary(org, policy)
@mock_organizations
def test_list_policies_for_target():
client = boto3.client('organizations', region_name='us-east-1')
org = client.create_organization(FeatureSet='ALL')['Organization']
root_id = client.list_roots()['Roots'][0]['Id']
ou_id = client.create_organizational_unit(
ParentId=root_id,
Name='ou01',
)['OrganizationalUnit']['Id']
account_id = client.create_account(
AccountName=mockname,
Email=mockemail,
)['CreateAccountStatus']['AccountId']
policy_id = client.create_policy(
Content=json.dumps(policy_doc01),
Description='A dummy service control policy',
Name='MockServiceControlPolicy',
Type='SERVICE_CONTROL_POLICY'
)['Policy']['PolicySummary']['Id']
client.attach_policy(PolicyId=policy_id, TargetId=ou_id)
response = client.list_policies_for_target(
TargetId=ou_id,
Filter='SERVICE_CONTROL_POLICY',
)
for policy in response['Policies']:
validate_policy_summary(org, policy)
client.attach_policy(PolicyId=policy_id, TargetId=account_id)
response = client.list_policies_for_target(
TargetId=account_id,
Filter='SERVICE_CONTROL_POLICY',
)
for policy in response['Policies']:
validate_policy_summary(org, policy)
@mock_organizations
def test_list_policies_for_target_exception():
client = boto3.client('organizations', region_name='us-east-1')
client.create_organization(FeatureSet='ALL')['Organization']
ou_id='ou-gi99-i7r8eh2i2'
account_id='126644886543'
with assert_raises(ClientError) as e:
response = client.list_policies_for_target(
TargetId=ou_id,
Filter='SERVICE_CONTROL_POLICY',
)
ex = e.exception
ex.operation_name.should.equal('ListPoliciesForTarget')
ex.response['Error']['Code'].should.equal('400')
ex.response['Error']['Message'].should.contain('OrganizationalUnitNotFoundException')
with assert_raises(ClientError) as e:
response = client.list_policies_for_target(
TargetId=account_id,
Filter='SERVICE_CONTROL_POLICY',
)
ex = e.exception
ex.operation_name.should.equal('ListPoliciesForTarget')
ex.response['Error']['Code'].should.equal('400')
ex.response['Error']['Message'].should.contain('AccountNotFoundException')
with assert_raises(ClientError) as e:
response = client.list_policies_for_target(
TargetId='meaninglessstring',
Filter='SERVICE_CONTROL_POLICY',
)
ex = e.exception
ex.operation_name.should.equal('ListPoliciesForTarget')
ex.response['Error']['Code'].should.equal('400')
ex.response['Error']['Message'].should.contain('InvalidInputException')
@mock_organizations
def test_list_targets_for_policy():
client = boto3.client('organizations', region_name='us-east-1')
org = client.create_organization(FeatureSet='ALL')['Organization']
root_id = client.list_roots()['Roots'][0]['Id']
ou_id = client.create_organizational_unit(
ParentId=root_id,
Name='ou01',
)['OrganizationalUnit']['Id']
account_id = client.create_account(
AccountName=mockname,
Email=mockemail,
)['CreateAccountStatus']['AccountId']
policy_id = client.create_policy(
Content=json.dumps(policy_doc01),
Description='A dummy service control policy',
Name='MockServiceControlPolicy',
Type='SERVICE_CONTROL_POLICY'
)['Policy']['PolicySummary']['Id']
client.attach_policy(PolicyId=policy_id, TargetId=root_id)
client.attach_policy(PolicyId=policy_id, TargetId=ou_id)
client.attach_policy(PolicyId=policy_id, TargetId=account_id)
response = client.list_targets_for_policy(PolicyId=policy_id)
for target in response['Targets']:
target.should.be.a(dict)
target.should.have.key('Name').should.be.a(six.string_types)
target.should.have.key('Arn').should.be.a(six.string_types)
target.should.have.key('TargetId').should.be.a(six.string_types)
target.should.have.key('Type').should.be.within(
['ROOT', 'ORGANIZATIONAL_UNIT', 'ACCOUNT']
)
@mock_organizations
def test_list_targets_for_policy_exception():
client = boto3.client('organizations', region_name='us-east-1')
client.create_organization(FeatureSet='ALL')['Organization']
policy_id = 'p-47fhe9s3'
with assert_raises(ClientError) as e:
response = client.list_targets_for_policy(PolicyId=policy_id)
ex = e.exception
ex.operation_name.should.equal('ListTargetsForPolicy')
ex.response['Error']['Code'].should.equal('400')
ex.response['Error']['Message'].should.contain('PolicyNotFoundException')
with assert_raises(ClientError) as e:
response = client.list_targets_for_policy(PolicyId='meaninglessstring')
ex = e.exception
ex.operation_name.should.equal('ListTargetsForPolicy')
ex.response['Error']['Code'].should.equal('400')
ex.response['Error']['Message'].should.contain('InvalidInputException')

View File

@ -1,324 +1,324 @@
from __future__ import unicode_literals
import boto3
import boto.rds
import boto.vpc
from boto.exception import BotoServerError
import sure # noqa
from moto import mock_ec2_deprecated, mock_rds_deprecated, mock_rds
from tests.helpers import disable_on_py3
@mock_rds_deprecated
def test_create_database():
conn = boto.rds.connect_to_region("us-west-2")
database = conn.create_dbinstance("db-master-1", 10, 'db.m1.small', 'root', 'hunter2',
security_groups=["my_sg"])
database.status.should.equal('available')
database.id.should.equal("db-master-1")
database.allocated_storage.should.equal(10)
database.instance_class.should.equal("db.m1.small")
database.master_username.should.equal("root")
database.endpoint.should.equal(
('db-master-1.aaaaaaaaaa.us-west-2.rds.amazonaws.com', 3306))
database.security_groups[0].name.should.equal('my_sg')
@mock_rds_deprecated
def test_get_databases():
conn = boto.rds.connect_to_region("us-west-2")
list(conn.get_all_dbinstances()).should.have.length_of(0)
conn.create_dbinstance("db-master-1", 10, 'db.m1.small', 'root', 'hunter2')
conn.create_dbinstance("db-master-2", 10, 'db.m1.small', 'root', 'hunter2')
list(conn.get_all_dbinstances()).should.have.length_of(2)
databases = conn.get_all_dbinstances("db-master-1")
list(databases).should.have.length_of(1)
databases[0].id.should.equal("db-master-1")
@mock_rds
def test_get_databases_paginated():
conn = boto3.client('rds', region_name="us-west-2")
for i in range(51):
conn.create_db_instance(AllocatedStorage=5,
Port=5432,
DBInstanceIdentifier='rds%d' % i,
DBInstanceClass='db.t1.micro',
Engine='postgres')
resp = conn.describe_db_instances()
resp["DBInstances"].should.have.length_of(50)
resp["Marker"].should.equal(resp["DBInstances"][-1]['DBInstanceIdentifier'])
resp2 = conn.describe_db_instances(Marker=resp["Marker"])
resp2["DBInstances"].should.have.length_of(1)
@mock_rds_deprecated
def test_describe_non_existant_database():
conn = boto.rds.connect_to_region("us-west-2")
conn.get_all_dbinstances.when.called_with(
"not-a-db").should.throw(BotoServerError)
@mock_rds_deprecated
def test_delete_database():
conn = boto.rds.connect_to_region("us-west-2")
list(conn.get_all_dbinstances()).should.have.length_of(0)
conn.create_dbinstance("db-master-1", 10, 'db.m1.small', 'root', 'hunter2')
list(conn.get_all_dbinstances()).should.have.length_of(1)
conn.delete_dbinstance("db-master-1")
list(conn.get_all_dbinstances()).should.have.length_of(0)
@mock_rds_deprecated
def test_delete_non_existant_database():
conn = boto.rds.connect_to_region("us-west-2")
conn.delete_dbinstance.when.called_with(
"not-a-db").should.throw(BotoServerError)
@mock_rds_deprecated
def test_create_database_security_group():
conn = boto.rds.connect_to_region("us-west-2")
security_group = conn.create_dbsecurity_group('db_sg', 'DB Security Group')
security_group.name.should.equal('db_sg')
security_group.description.should.equal("DB Security Group")
list(security_group.ip_ranges).should.equal([])
@mock_rds_deprecated
def test_get_security_groups():
conn = boto.rds.connect_to_region("us-west-2")
list(conn.get_all_dbsecurity_groups()).should.have.length_of(0)
conn.create_dbsecurity_group('db_sg1', 'DB Security Group')
conn.create_dbsecurity_group('db_sg2', 'DB Security Group')
list(conn.get_all_dbsecurity_groups()).should.have.length_of(2)
databases = conn.get_all_dbsecurity_groups("db_sg1")
list(databases).should.have.length_of(1)
databases[0].name.should.equal("db_sg1")
@mock_rds_deprecated
def test_get_non_existant_security_group():
conn = boto.rds.connect_to_region("us-west-2")
conn.get_all_dbsecurity_groups.when.called_with(
"not-a-sg").should.throw(BotoServerError)
@mock_rds_deprecated
def test_delete_database_security_group():
conn = boto.rds.connect_to_region("us-west-2")
conn.create_dbsecurity_group('db_sg', 'DB Security Group')
list(conn.get_all_dbsecurity_groups()).should.have.length_of(1)
conn.delete_dbsecurity_group("db_sg")
list(conn.get_all_dbsecurity_groups()).should.have.length_of(0)
@mock_rds_deprecated
def test_delete_non_existant_security_group():
conn = boto.rds.connect_to_region("us-west-2")
conn.delete_dbsecurity_group.when.called_with(
"not-a-db").should.throw(BotoServerError)
@disable_on_py3()
@mock_rds_deprecated
def test_security_group_authorize():
conn = boto.rds.connect_to_region("us-west-2")
security_group = conn.create_dbsecurity_group('db_sg', 'DB Security Group')
list(security_group.ip_ranges).should.equal([])
security_group.authorize(cidr_ip='10.3.2.45/32')
security_group = conn.get_all_dbsecurity_groups()[0]
list(security_group.ip_ranges).should.have.length_of(1)
security_group.ip_ranges[0].cidr_ip.should.equal('10.3.2.45/32')
@mock_rds_deprecated
def test_add_security_group_to_database():
conn = boto.rds.connect_to_region("us-west-2")
database = conn.create_dbinstance(
"db-master-1", 10, 'db.m1.small', 'root', 'hunter2')
security_group = conn.create_dbsecurity_group('db_sg', 'DB Security Group')
database.modify(security_groups=[security_group])
database = conn.get_all_dbinstances()[0]
list(database.security_groups).should.have.length_of(1)
database.security_groups[0].name.should.equal("db_sg")
@mock_ec2_deprecated
@mock_rds_deprecated
def test_add_database_subnet_group():
vpc_conn = boto.vpc.connect_to_region("us-west-2")
vpc = vpc_conn.create_vpc("10.0.0.0/16")
subnet1 = vpc_conn.create_subnet(vpc.id, "10.1.0.0/24")
subnet2 = vpc_conn.create_subnet(vpc.id, "10.2.0.0/24")
subnet_ids = [subnet1.id, subnet2.id]
conn = boto.rds.connect_to_region("us-west-2")
subnet_group = conn.create_db_subnet_group(
"db_subnet", "my db subnet", subnet_ids)
subnet_group.name.should.equal('db_subnet')
subnet_group.description.should.equal("my db subnet")
list(subnet_group.subnet_ids).should.equal(subnet_ids)
@mock_ec2_deprecated
@mock_rds_deprecated
def test_describe_database_subnet_group():
vpc_conn = boto.vpc.connect_to_region("us-west-2")
vpc = vpc_conn.create_vpc("10.0.0.0/16")
subnet = vpc_conn.create_subnet(vpc.id, "10.1.0.0/24")
conn = boto.rds.connect_to_region("us-west-2")
conn.create_db_subnet_group("db_subnet1", "my db subnet", [subnet.id])
conn.create_db_subnet_group("db_subnet2", "my db subnet", [subnet.id])
list(conn.get_all_db_subnet_groups()).should.have.length_of(2)
list(conn.get_all_db_subnet_groups("db_subnet1")).should.have.length_of(1)
conn.get_all_db_subnet_groups.when.called_with(
"not-a-subnet").should.throw(BotoServerError)
@mock_ec2_deprecated
@mock_rds_deprecated
def test_delete_database_subnet_group():
vpc_conn = boto.vpc.connect_to_region("us-west-2")
vpc = vpc_conn.create_vpc("10.0.0.0/16")
subnet = vpc_conn.create_subnet(vpc.id, "10.1.0.0/24")
conn = boto.rds.connect_to_region("us-west-2")
conn.create_db_subnet_group("db_subnet1", "my db subnet", [subnet.id])
list(conn.get_all_db_subnet_groups()).should.have.length_of(1)
conn.delete_db_subnet_group("db_subnet1")
list(conn.get_all_db_subnet_groups()).should.have.length_of(0)
conn.delete_db_subnet_group.when.called_with(
"db_subnet1").should.throw(BotoServerError)
@mock_ec2_deprecated
@mock_rds_deprecated
def test_create_database_in_subnet_group():
vpc_conn = boto.vpc.connect_to_region("us-west-2")
vpc = vpc_conn.create_vpc("10.0.0.0/16")
subnet = vpc_conn.create_subnet(vpc.id, "10.1.0.0/24")
conn = boto.rds.connect_to_region("us-west-2")
conn.create_db_subnet_group("db_subnet1", "my db subnet", [subnet.id])
database = conn.create_dbinstance("db-master-1", 10, 'db.m1.small',
'root', 'hunter2', db_subnet_group_name="db_subnet1")
database = conn.get_all_dbinstances("db-master-1")[0]
database.subnet_group.name.should.equal("db_subnet1")
@mock_rds_deprecated
def test_create_database_replica():
conn = boto.rds.connect_to_region("us-west-2")
primary = conn.create_dbinstance(
"db-master-1", 10, 'db.m1.small', 'root', 'hunter2')
replica = conn.create_dbinstance_read_replica(
"replica", "db-master-1", "db.m1.small")
replica.id.should.equal("replica")
replica.instance_class.should.equal("db.m1.small")
status_info = replica.status_infos[0]
status_info.normal.should.equal(True)
status_info.status_type.should.equal('read replication')
status_info.status.should.equal('replicating')
primary = conn.get_all_dbinstances("db-master-1")[0]
primary.read_replica_dbinstance_identifiers[0].should.equal("replica")
conn.delete_dbinstance("replica")
primary = conn.get_all_dbinstances("db-master-1")[0]
list(primary.read_replica_dbinstance_identifiers).should.have.length_of(0)
@mock_rds_deprecated
def test_create_cross_region_database_replica():
west_1_conn = boto.rds.connect_to_region("us-west-1")
west_2_conn = boto.rds.connect_to_region("us-west-2")
primary = west_1_conn.create_dbinstance(
"db-master-1", 10, 'db.m1.small', 'root', 'hunter2')
primary_arn = "arn:aws:rds:us-west-1:1234567890:db:db-master-1"
replica = west_2_conn.create_dbinstance_read_replica(
"replica",
primary_arn,
"db.m1.small",
)
primary = west_1_conn.get_all_dbinstances("db-master-1")[0]
primary.read_replica_dbinstance_identifiers[0].should.equal("replica")
replica = west_2_conn.get_all_dbinstances("replica")[0]
replica.instance_class.should.equal("db.m1.small")
west_2_conn.delete_dbinstance("replica")
primary = west_1_conn.get_all_dbinstances("db-master-1")[0]
list(primary.read_replica_dbinstance_identifiers).should.have.length_of(0)
@mock_rds_deprecated
def test_connecting_to_us_east_1():
# boto does not use us-east-1 in the URL for RDS,
# and that broke moto in the past:
# https://github.com/boto/boto/blob/e271ff09364ea18d9d8b6f4d63d6b0ac6cbc9b75/boto/endpoints.json#L285
conn = boto.rds.connect_to_region("us-east-1")
database = conn.create_dbinstance("db-master-1", 10, 'db.m1.small', 'root', 'hunter2',
security_groups=["my_sg"])
database.status.should.equal('available')
database.id.should.equal("db-master-1")
database.allocated_storage.should.equal(10)
database.instance_class.should.equal("db.m1.small")
database.master_username.should.equal("root")
database.endpoint.should.equal(
('db-master-1.aaaaaaaaaa.us-east-1.rds.amazonaws.com', 3306))
database.security_groups[0].name.should.equal('my_sg')
@mock_rds_deprecated
def test_create_database_with_iops():
conn = boto.rds.connect_to_region("us-west-2")
database = conn.create_dbinstance(
"db-master-1", 10, 'db.m1.small', 'root', 'hunter2', iops=6000)
database.status.should.equal('available')
database.iops.should.equal(6000)
# boto>2.36.0 may change the following property name to `storage_type`
database.StorageType.should.equal('io1')
from __future__ import unicode_literals
import boto3
import boto.rds
import boto.vpc
from boto.exception import BotoServerError
import sure # noqa
from moto import mock_ec2_deprecated, mock_rds_deprecated, mock_rds
from tests.helpers import disable_on_py3
@mock_rds_deprecated
def test_create_database():
conn = boto.rds.connect_to_region("us-west-2")
database = conn.create_dbinstance("db-master-1", 10, 'db.m1.small', 'root', 'hunter2',
security_groups=["my_sg"])
database.status.should.equal('available')
database.id.should.equal("db-master-1")
database.allocated_storage.should.equal(10)
database.instance_class.should.equal("db.m1.small")
database.master_username.should.equal("root")
database.endpoint.should.equal(
('db-master-1.aaaaaaaaaa.us-west-2.rds.amazonaws.com', 3306))
database.security_groups[0].name.should.equal('my_sg')
@mock_rds_deprecated
def test_get_databases():
conn = boto.rds.connect_to_region("us-west-2")
list(conn.get_all_dbinstances()).should.have.length_of(0)
conn.create_dbinstance("db-master-1", 10, 'db.m1.small', 'root', 'hunter2')
conn.create_dbinstance("db-master-2", 10, 'db.m1.small', 'root', 'hunter2')
list(conn.get_all_dbinstances()).should.have.length_of(2)
databases = conn.get_all_dbinstances("db-master-1")
list(databases).should.have.length_of(1)
databases[0].id.should.equal("db-master-1")
@mock_rds
def test_get_databases_paginated():
conn = boto3.client('rds', region_name="us-west-2")
for i in range(51):
conn.create_db_instance(AllocatedStorage=5,
Port=5432,
DBInstanceIdentifier='rds%d' % i,
DBInstanceClass='db.t1.micro',
Engine='postgres')
resp = conn.describe_db_instances()
resp["DBInstances"].should.have.length_of(50)
resp["Marker"].should.equal(resp["DBInstances"][-1]['DBInstanceIdentifier'])
resp2 = conn.describe_db_instances(Marker=resp["Marker"])
resp2["DBInstances"].should.have.length_of(1)
@mock_rds_deprecated
def test_describe_non_existant_database():
conn = boto.rds.connect_to_region("us-west-2")
conn.get_all_dbinstances.when.called_with(
"not-a-db").should.throw(BotoServerError)
@mock_rds_deprecated
def test_delete_database():
conn = boto.rds.connect_to_region("us-west-2")
list(conn.get_all_dbinstances()).should.have.length_of(0)
conn.create_dbinstance("db-master-1", 10, 'db.m1.small', 'root', 'hunter2')
list(conn.get_all_dbinstances()).should.have.length_of(1)
conn.delete_dbinstance("db-master-1")
list(conn.get_all_dbinstances()).should.have.length_of(0)
@mock_rds_deprecated
def test_delete_non_existant_database():
conn = boto.rds.connect_to_region("us-west-2")
conn.delete_dbinstance.when.called_with(
"not-a-db").should.throw(BotoServerError)
@mock_rds_deprecated
def test_create_database_security_group():
conn = boto.rds.connect_to_region("us-west-2")
security_group = conn.create_dbsecurity_group('db_sg', 'DB Security Group')
security_group.name.should.equal('db_sg')
security_group.description.should.equal("DB Security Group")
list(security_group.ip_ranges).should.equal([])
@mock_rds_deprecated
def test_get_security_groups():
conn = boto.rds.connect_to_region("us-west-2")
list(conn.get_all_dbsecurity_groups()).should.have.length_of(0)
conn.create_dbsecurity_group('db_sg1', 'DB Security Group')
conn.create_dbsecurity_group('db_sg2', 'DB Security Group')
list(conn.get_all_dbsecurity_groups()).should.have.length_of(2)
databases = conn.get_all_dbsecurity_groups("db_sg1")
list(databases).should.have.length_of(1)
databases[0].name.should.equal("db_sg1")
@mock_rds_deprecated
def test_get_non_existant_security_group():
conn = boto.rds.connect_to_region("us-west-2")
conn.get_all_dbsecurity_groups.when.called_with(
"not-a-sg").should.throw(BotoServerError)
@mock_rds_deprecated
def test_delete_database_security_group():
conn = boto.rds.connect_to_region("us-west-2")
conn.create_dbsecurity_group('db_sg', 'DB Security Group')
list(conn.get_all_dbsecurity_groups()).should.have.length_of(1)
conn.delete_dbsecurity_group("db_sg")
list(conn.get_all_dbsecurity_groups()).should.have.length_of(0)
@mock_rds_deprecated
def test_delete_non_existant_security_group():
conn = boto.rds.connect_to_region("us-west-2")
conn.delete_dbsecurity_group.when.called_with(
"not-a-db").should.throw(BotoServerError)
@disable_on_py3()
@mock_rds_deprecated
def test_security_group_authorize():
conn = boto.rds.connect_to_region("us-west-2")
security_group = conn.create_dbsecurity_group('db_sg', 'DB Security Group')
list(security_group.ip_ranges).should.equal([])
security_group.authorize(cidr_ip='10.3.2.45/32')
security_group = conn.get_all_dbsecurity_groups()[0]
list(security_group.ip_ranges).should.have.length_of(1)
security_group.ip_ranges[0].cidr_ip.should.equal('10.3.2.45/32')
@mock_rds_deprecated
def test_add_security_group_to_database():
conn = boto.rds.connect_to_region("us-west-2")
database = conn.create_dbinstance(
"db-master-1", 10, 'db.m1.small', 'root', 'hunter2')
security_group = conn.create_dbsecurity_group('db_sg', 'DB Security Group')
database.modify(security_groups=[security_group])
database = conn.get_all_dbinstances()[0]
list(database.security_groups).should.have.length_of(1)
database.security_groups[0].name.should.equal("db_sg")
@mock_ec2_deprecated
@mock_rds_deprecated
def test_add_database_subnet_group():
vpc_conn = boto.vpc.connect_to_region("us-west-2")
vpc = vpc_conn.create_vpc("10.0.0.0/16")
subnet1 = vpc_conn.create_subnet(vpc.id, "10.0.1.0/24")
subnet2 = vpc_conn.create_subnet(vpc.id, "10.0.2.0/24")
subnet_ids = [subnet1.id, subnet2.id]
conn = boto.rds.connect_to_region("us-west-2")
subnet_group = conn.create_db_subnet_group(
"db_subnet", "my db subnet", subnet_ids)
subnet_group.name.should.equal('db_subnet')
subnet_group.description.should.equal("my db subnet")
list(subnet_group.subnet_ids).should.equal(subnet_ids)
@mock_ec2_deprecated
@mock_rds_deprecated
def test_describe_database_subnet_group():
vpc_conn = boto.vpc.connect_to_region("us-west-2")
vpc = vpc_conn.create_vpc("10.0.0.0/16")
subnet = vpc_conn.create_subnet(vpc.id, "10.0.1.0/24")
conn = boto.rds.connect_to_region("us-west-2")
conn.create_db_subnet_group("db_subnet1", "my db subnet", [subnet.id])
conn.create_db_subnet_group("db_subnet2", "my db subnet", [subnet.id])
list(conn.get_all_db_subnet_groups()).should.have.length_of(2)
list(conn.get_all_db_subnet_groups("db_subnet1")).should.have.length_of(1)
conn.get_all_db_subnet_groups.when.called_with(
"not-a-subnet").should.throw(BotoServerError)
@mock_ec2_deprecated
@mock_rds_deprecated
def test_delete_database_subnet_group():
vpc_conn = boto.vpc.connect_to_region("us-west-2")
vpc = vpc_conn.create_vpc("10.0.0.0/16")
subnet = vpc_conn.create_subnet(vpc.id, "10.0.1.0/24")
conn = boto.rds.connect_to_region("us-west-2")
conn.create_db_subnet_group("db_subnet1", "my db subnet", [subnet.id])
list(conn.get_all_db_subnet_groups()).should.have.length_of(1)
conn.delete_db_subnet_group("db_subnet1")
list(conn.get_all_db_subnet_groups()).should.have.length_of(0)
conn.delete_db_subnet_group.when.called_with(
"db_subnet1").should.throw(BotoServerError)
@mock_ec2_deprecated
@mock_rds_deprecated
def test_create_database_in_subnet_group():
vpc_conn = boto.vpc.connect_to_region("us-west-2")
vpc = vpc_conn.create_vpc("10.0.0.0/16")
subnet = vpc_conn.create_subnet(vpc.id, "10.0.1.0/24")
conn = boto.rds.connect_to_region("us-west-2")
conn.create_db_subnet_group("db_subnet1", "my db subnet", [subnet.id])
database = conn.create_dbinstance("db-master-1", 10, 'db.m1.small',
'root', 'hunter2', db_subnet_group_name="db_subnet1")
database = conn.get_all_dbinstances("db-master-1")[0]
database.subnet_group.name.should.equal("db_subnet1")
@mock_rds_deprecated
def test_create_database_replica():
conn = boto.rds.connect_to_region("us-west-2")
primary = conn.create_dbinstance(
"db-master-1", 10, 'db.m1.small', 'root', 'hunter2')
replica = conn.create_dbinstance_read_replica(
"replica", "db-master-1", "db.m1.small")
replica.id.should.equal("replica")
replica.instance_class.should.equal("db.m1.small")
status_info = replica.status_infos[0]
status_info.normal.should.equal(True)
status_info.status_type.should.equal('read replication')
status_info.status.should.equal('replicating')
primary = conn.get_all_dbinstances("db-master-1")[0]
primary.read_replica_dbinstance_identifiers[0].should.equal("replica")
conn.delete_dbinstance("replica")
primary = conn.get_all_dbinstances("db-master-1")[0]
list(primary.read_replica_dbinstance_identifiers).should.have.length_of(0)
@mock_rds_deprecated
def test_create_cross_region_database_replica():
west_1_conn = boto.rds.connect_to_region("us-west-1")
west_2_conn = boto.rds.connect_to_region("us-west-2")
primary = west_1_conn.create_dbinstance(
"db-master-1", 10, 'db.m1.small', 'root', 'hunter2')
primary_arn = "arn:aws:rds:us-west-1:1234567890:db:db-master-1"
replica = west_2_conn.create_dbinstance_read_replica(
"replica",
primary_arn,
"db.m1.small",
)
primary = west_1_conn.get_all_dbinstances("db-master-1")[0]
primary.read_replica_dbinstance_identifiers[0].should.equal("replica")
replica = west_2_conn.get_all_dbinstances("replica")[0]
replica.instance_class.should.equal("db.m1.small")
west_2_conn.delete_dbinstance("replica")
primary = west_1_conn.get_all_dbinstances("db-master-1")[0]
list(primary.read_replica_dbinstance_identifiers).should.have.length_of(0)
@mock_rds_deprecated
def test_connecting_to_us_east_1():
# boto does not use us-east-1 in the URL for RDS,
# and that broke moto in the past:
# https://github.com/boto/boto/blob/e271ff09364ea18d9d8b6f4d63d6b0ac6cbc9b75/boto/endpoints.json#L285
conn = boto.rds.connect_to_region("us-east-1")
database = conn.create_dbinstance("db-master-1", 10, 'db.m1.small', 'root', 'hunter2',
security_groups=["my_sg"])
database.status.should.equal('available')
database.id.should.equal("db-master-1")
database.allocated_storage.should.equal(10)
database.instance_class.should.equal("db.m1.small")
database.master_username.should.equal("root")
database.endpoint.should.equal(
('db-master-1.aaaaaaaaaa.us-east-1.rds.amazonaws.com', 3306))
database.security_groups[0].name.should.equal('my_sg')
@mock_rds_deprecated
def test_create_database_with_iops():
conn = boto.rds.connect_to_region("us-west-2")
database = conn.create_dbinstance(
"db-master-1", 10, 'db.m1.small', 'root', 'hunter2', iops=6000)
database.status.should.equal('available')
database.iops.should.equal(6000)
# boto>2.36.0 may change the following property name to `storage_type`
database.StorageType.should.equal('io1')

Some files were not shown because too many files have changed in this diff Show More