Merge remote-tracking branch 'upstream/master'

This commit is contained in:
Stephan 2019-05-28 08:55:50 +02:00
commit d0de38601d
110 changed files with 22567 additions and 20241 deletions

1
.gitignore vendored
View File

@ -18,3 +18,4 @@ venv/
.python-version .python-version
.vscode/ .vscode/
tests/file.tmp tests/file.tmp
.eggs/

View File

@ -54,3 +54,5 @@ Moto is written by Steve Pulec with contributions from:
* [William Richard](https://github.com/william-richard) * [William Richard](https://github.com/william-richard)
* [Alex Casalboni](https://github.com/alexcasalboni) * [Alex Casalboni](https://github.com/alexcasalboni)
* [Jon Beilke](https://github.com/jrbeilke) * [Jon Beilke](https://github.com/jrbeilke)
* [Craig Anderson](https://github.com/craiga)
* [Robert Lewis](https://github.com/ralewis85)

File diff suppressed because it is too large Load Diff

View File

@ -47,7 +47,7 @@ def test_my_model_save():
body = conn.Object('mybucket', 'steve').get()['Body'].read().decode("utf-8") body = conn.Object('mybucket', 'steve').get()['Body'].read().decode("utf-8")
assert body == b'is awesome' assert body == 'is awesome'
``` ```
With the decorator wrapping the test, all the calls to s3 are automatically mocked out. The mock keeps the state of the buckets and keys. With the decorator wrapping the test, all the calls to s3 are automatically mocked out. The mock keeps the state of the buckets and keys.

View File

@ -36,6 +36,7 @@ from .polly import mock_polly # flake8: noqa
from .rds import mock_rds, mock_rds_deprecated # flake8: noqa from .rds import mock_rds, mock_rds_deprecated # flake8: noqa
from .rds2 import mock_rds2, mock_rds2_deprecated # flake8: noqa from .rds2 import mock_rds2, mock_rds2_deprecated # flake8: noqa
from .redshift import mock_redshift, mock_redshift_deprecated # flake8: noqa from .redshift import mock_redshift, mock_redshift_deprecated # flake8: noqa
from .resourcegroups import mock_resourcegroups # flake8: noqa
from .s3 import mock_s3, mock_s3_deprecated # flake8: noqa from .s3 import mock_s3, mock_s3_deprecated # flake8: noqa
from .ses import mock_ses, mock_ses_deprecated # flake8: noqa from .ses import mock_ses, mock_ses_deprecated # flake8: noqa
from .secretsmanager import mock_secretsmanager # flake8: noqa from .secretsmanager import mock_secretsmanager # flake8: noqa

View File

@ -1,4 +1,7 @@
from __future__ import unicode_literals from __future__ import unicode_literals
import random
from boto.ec2.blockdevicemapping import BlockDeviceType, BlockDeviceMapping from boto.ec2.blockdevicemapping import BlockDeviceType, BlockDeviceMapping
from moto.compat import OrderedDict from moto.compat import OrderedDict
from moto.core import BaseBackend, BaseModel from moto.core import BaseBackend, BaseModel
@ -159,13 +162,7 @@ class FakeAutoScalingGroup(BaseModel):
self.autoscaling_backend = autoscaling_backend self.autoscaling_backend = autoscaling_backend
self.name = name self.name = name
if not availability_zones and not vpc_zone_identifier: self._set_azs_and_vpcs(availability_zones, vpc_zone_identifier)
raise AutoscalingClientError(
"ValidationError",
"At least one Availability Zone or VPC Subnet is required."
)
self.availability_zones = availability_zones
self.vpc_zone_identifier = vpc_zone_identifier
self.max_size = max_size self.max_size = max_size
self.min_size = min_size self.min_size = min_size
@ -188,6 +185,35 @@ class FakeAutoScalingGroup(BaseModel):
self.tags = tags if tags else [] self.tags = tags if tags else []
self.set_desired_capacity(desired_capacity) self.set_desired_capacity(desired_capacity)
def _set_azs_and_vpcs(self, availability_zones, vpc_zone_identifier, update=False):
# for updates, if only AZs are provided, they must not clash with
# the AZs of existing VPCs
if update and availability_zones and not vpc_zone_identifier:
vpc_zone_identifier = self.vpc_zone_identifier
if vpc_zone_identifier:
# extract azs for vpcs
subnet_ids = vpc_zone_identifier.split(',')
subnets = self.autoscaling_backend.ec2_backend.get_all_subnets(subnet_ids=subnet_ids)
vpc_zones = [subnet.availability_zone for subnet in subnets]
if availability_zones and set(availability_zones) != set(vpc_zones):
raise AutoscalingClientError(
"ValidationError",
"The availability zones of the specified subnets and the Auto Scaling group do not match",
)
availability_zones = vpc_zones
elif not availability_zones:
if not update:
raise AutoscalingClientError(
"ValidationError",
"At least one Availability Zone or VPC Subnet is required."
)
return
self.availability_zones = availability_zones
self.vpc_zone_identifier = vpc_zone_identifier
@classmethod @classmethod
def create_from_cloudformation_json(cls, resource_name, cloudformation_json, region_name): def create_from_cloudformation_json(cls, resource_name, cloudformation_json, region_name):
properties = cloudformation_json['Properties'] properties = cloudformation_json['Properties']
@ -246,8 +272,8 @@ class FakeAutoScalingGroup(BaseModel):
health_check_period, health_check_type, health_check_period, health_check_type,
placement_group, termination_policies, placement_group, termination_policies,
new_instances_protected_from_scale_in=None): new_instances_protected_from_scale_in=None):
if availability_zones: self._set_azs_and_vpcs(availability_zones, vpc_zone_identifier, update=True)
self.availability_zones = availability_zones
if max_size is not None: if max_size is not None:
self.max_size = max_size self.max_size = max_size
if min_size is not None: if min_size is not None:
@ -257,8 +283,6 @@ class FakeAutoScalingGroup(BaseModel):
self.launch_config = self.autoscaling_backend.launch_configurations[ self.launch_config = self.autoscaling_backend.launch_configurations[
launch_config_name] launch_config_name]
self.launch_config_name = launch_config_name self.launch_config_name = launch_config_name
if vpc_zone_identifier is not None:
self.vpc_zone_identifier = vpc_zone_identifier
if health_check_period is not None: if health_check_period is not None:
self.health_check_period = health_check_period self.health_check_period = health_check_period
if health_check_type is not None: if health_check_type is not None:
@ -319,7 +343,8 @@ class FakeAutoScalingGroup(BaseModel):
self.launch_config.user_data, self.launch_config.user_data,
self.launch_config.security_groups, self.launch_config.security_groups,
instance_type=self.launch_config.instance_type, instance_type=self.launch_config.instance_type,
tags={'instance': propagated_tags} tags={'instance': propagated_tags},
placement=random.choice(self.availability_zones),
) )
for instance in reservation.instances: for instance in reservation.instances:
instance.autoscaling_group = self instance.autoscaling_group = self

View File

@ -404,7 +404,7 @@ ATTACH_LOAD_BALANCER_TARGET_GROUPS_TEMPLATE = """<AttachLoadBalancerTargetGroups
<AttachLoadBalancerTargetGroupsResult> <AttachLoadBalancerTargetGroupsResult>
</AttachLoadBalancerTargetGroupsResult> </AttachLoadBalancerTargetGroupsResult>
<ResponseMetadata> <ResponseMetadata>
<RequestId>{{ requestid }}</RequestId> <RequestId></RequestId>
</ResponseMetadata> </ResponseMetadata>
</AttachLoadBalancerTargetGroupsResponse>""" </AttachLoadBalancerTargetGroupsResponse>"""
@ -412,7 +412,7 @@ ATTACH_INSTANCES_TEMPLATE = """<AttachInstancesResponse xmlns="http://autoscalin
<AttachInstancesResult> <AttachInstancesResult>
</AttachInstancesResult> </AttachInstancesResult>
<ResponseMetadata> <ResponseMetadata>
<RequestId>{{ requestid }}</RequestId> <RequestId></RequestId>
</ResponseMetadata> </ResponseMetadata>
</AttachInstancesResponse>""" </AttachInstancesResponse>"""
@ -428,7 +428,7 @@ DESCRIBE_LOAD_BALANCER_TARGET_GROUPS = """<DescribeLoadBalancerTargetGroupsRespo
</LoadBalancerTargetGroups> </LoadBalancerTargetGroups>
</DescribeLoadBalancerTargetGroupsResult> </DescribeLoadBalancerTargetGroupsResult>
<ResponseMetadata> <ResponseMetadata>
<RequestId>{{ requestid }}</RequestId> <RequestId></RequestId>
</ResponseMetadata> </ResponseMetadata>
</DescribeLoadBalancerTargetGroupsResponse>""" </DescribeLoadBalancerTargetGroupsResponse>"""
@ -454,7 +454,7 @@ DETACH_INSTANCES_TEMPLATE = """<DetachInstancesResponse xmlns="http://autoscalin
</Activities> </Activities>
</DetachInstancesResult> </DetachInstancesResult>
<ResponseMetadata> <ResponseMetadata>
<RequestId>{{ requestid }}</RequestId> <RequestId></RequestId>
</ResponseMetadata> </ResponseMetadata>
</DetachInstancesResponse>""" </DetachInstancesResponse>"""
@ -462,7 +462,7 @@ DETACH_LOAD_BALANCER_TARGET_GROUPS_TEMPLATE = """<DetachLoadBalancerTargetGroups
<DetachLoadBalancerTargetGroupsResult> <DetachLoadBalancerTargetGroupsResult>
</DetachLoadBalancerTargetGroupsResult> </DetachLoadBalancerTargetGroupsResult>
<ResponseMetadata> <ResponseMetadata>
<RequestId>{{ requestid }}</RequestId> <RequestId></RequestId>
</ResponseMetadata> </ResponseMetadata>
</DetachLoadBalancerTargetGroupsResponse>""" </DetachLoadBalancerTargetGroupsResponse>"""
@ -499,7 +499,7 @@ DESCRIBE_AUTOSCALING_GROUPS_TEMPLATE = """<DescribeAutoScalingGroupsResponse xml
{% for instance_state in group.instance_states %} {% for instance_state in group.instance_states %}
<member> <member>
<HealthStatus>{{ instance_state.health_status }}</HealthStatus> <HealthStatus>{{ instance_state.health_status }}</HealthStatus>
<AvailabilityZone>us-east-1e</AvailabilityZone> <AvailabilityZone>{{ instance_state.instance.placement }}</AvailabilityZone>
<InstanceId>{{ instance_state.instance.id }}</InstanceId> <InstanceId>{{ instance_state.instance.id }}</InstanceId>
<LaunchConfigurationName>{{ group.launch_config_name }}</LaunchConfigurationName> <LaunchConfigurationName>{{ group.launch_config_name }}</LaunchConfigurationName>
<LifecycleState>{{ instance_state.lifecycle_state }}</LifecycleState> <LifecycleState>{{ instance_state.lifecycle_state }}</LifecycleState>
@ -585,7 +585,7 @@ DESCRIBE_AUTOSCALING_INSTANCES_TEMPLATE = """<DescribeAutoScalingInstancesRespon
<member> <member>
<HealthStatus>{{ instance_state.health_status }}</HealthStatus> <HealthStatus>{{ instance_state.health_status }}</HealthStatus>
<AutoScalingGroupName>{{ instance_state.instance.autoscaling_group.name }}</AutoScalingGroupName> <AutoScalingGroupName>{{ instance_state.instance.autoscaling_group.name }}</AutoScalingGroupName>
<AvailabilityZone>us-east-1e</AvailabilityZone> <AvailabilityZone>{{ instance_state.instance.placement }}</AvailabilityZone>
<InstanceId>{{ instance_state.instance.id }}</InstanceId> <InstanceId>{{ instance_state.instance.id }}</InstanceId>
<LaunchConfigurationName>{{ instance_state.instance.autoscaling_group.launch_config_name }}</LaunchConfigurationName> <LaunchConfigurationName>{{ instance_state.instance.autoscaling_group.launch_config_name }}</LaunchConfigurationName>
<LifecycleState>{{ instance_state.lifecycle_state }}</LifecycleState> <LifecycleState>{{ instance_state.lifecycle_state }}</LifecycleState>
@ -654,7 +654,7 @@ DELETE_POLICY_TEMPLATE = """<DeleteScalingPolicyResponse xmlns="http://autoscali
ATTACH_LOAD_BALANCERS_TEMPLATE = """<AttachLoadBalancersResponse xmlns="http://autoscaling.amazonaws.com/doc/2011-01-01/"> ATTACH_LOAD_BALANCERS_TEMPLATE = """<AttachLoadBalancersResponse xmlns="http://autoscaling.amazonaws.com/doc/2011-01-01/">
<AttachLoadBalancersResult></AttachLoadBalancersResult> <AttachLoadBalancersResult></AttachLoadBalancersResult>
<ResponseMetadata> <ResponseMetadata>
<RequestId>{{ requestid }}</RequestId> <RequestId></RequestId>
</ResponseMetadata> </ResponseMetadata>
</AttachLoadBalancersResponse>""" </AttachLoadBalancersResponse>"""
@ -670,14 +670,14 @@ DESCRIBE_LOAD_BALANCERS_TEMPLATE = """<DescribeLoadBalancersResponse xmlns="http
</LoadBalancers> </LoadBalancers>
</DescribeLoadBalancersResult> </DescribeLoadBalancersResult>
<ResponseMetadata> <ResponseMetadata>
<RequestId>{{ requestid }}</RequestId> <RequestId></RequestId>
</ResponseMetadata> </ResponseMetadata>
</DescribeLoadBalancersResponse>""" </DescribeLoadBalancersResponse>"""
DETACH_LOAD_BALANCERS_TEMPLATE = """<DetachLoadBalancersResponse xmlns="http://autoscaling.amazonaws.com/doc/2011-01-01/"> DETACH_LOAD_BALANCERS_TEMPLATE = """<DetachLoadBalancersResponse xmlns="http://autoscaling.amazonaws.com/doc/2011-01-01/">
<DetachLoadBalancersResult></DetachLoadBalancersResult> <DetachLoadBalancersResult></DetachLoadBalancersResult>
<ResponseMetadata> <ResponseMetadata>
<RequestId>{{ requestid }}</RequestId> <RequestId></RequestId>
</ResponseMetadata> </ResponseMetadata>
</DetachLoadBalancersResponse>""" </DetachLoadBalancersResponse>"""
@ -690,13 +690,13 @@ SUSPEND_PROCESSES_TEMPLATE = """<SuspendProcessesResponse xmlns="http://autoscal
SET_INSTANCE_HEALTH_TEMPLATE = """<SetInstanceHealthResponse xmlns="http://autoscaling.amazonaws.com/doc/2011-01-01/"> SET_INSTANCE_HEALTH_TEMPLATE = """<SetInstanceHealthResponse xmlns="http://autoscaling.amazonaws.com/doc/2011-01-01/">
<SetInstanceHealthResponse></SetInstanceHealthResponse> <SetInstanceHealthResponse></SetInstanceHealthResponse>
<ResponseMetadata> <ResponseMetadata>
<RequestId>{{ requestid }}</RequestId> <RequestId></RequestId>
</ResponseMetadata> </ResponseMetadata>
</SetInstanceHealthResponse>""" </SetInstanceHealthResponse>"""
SET_INSTANCE_PROTECTION_TEMPLATE = """<SetInstanceProtectionResponse xmlns="http://autoscaling.amazonaws.com/doc/2011-01-01/"> SET_INSTANCE_PROTECTION_TEMPLATE = """<SetInstanceProtectionResponse xmlns="http://autoscaling.amazonaws.com/doc/2011-01-01/">
<SetInstanceProtectionResult></SetInstanceProtectionResult> <SetInstanceProtectionResult></SetInstanceProtectionResult>
<ResponseMetadata> <ResponseMetadata>
<RequestId>{{ requestid }}</RequestId> <RequestId></RequestId>
</ResponseMetadata> </ResponseMetadata>
</SetInstanceProtectionResponse>""" </SetInstanceProtectionResponse>"""

View File

@ -30,7 +30,7 @@ from moto.s3.models import s3_backend
from moto.logs.models import logs_backends from moto.logs.models import logs_backends
from moto.s3.exceptions import MissingBucket, MissingKey from moto.s3.exceptions import MissingBucket, MissingKey
from moto import settings from moto import settings
from .utils import make_function_arn from .utils import make_function_arn, make_function_ver_arn
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
@ -45,7 +45,7 @@ except ImportError:
_stderr_regex = re.compile(r'START|END|REPORT RequestId: .*') _stderr_regex = re.compile(r'START|END|REPORT RequestId: .*')
_orig_adapter_send = requests.adapters.HTTPAdapter.send _orig_adapter_send = requests.adapters.HTTPAdapter.send
docker_3 = docker.__version__.startswith("3") docker_3 = docker.__version__[0] >= '3'
def zip2tar(zip_bytes): def zip2tar(zip_bytes):
@ -215,12 +215,12 @@ class LambdaFunction(BaseModel):
self.code_size = key.size self.code_size = key.size
self.code_sha_256 = hashlib.sha256(key.value).hexdigest() self.code_sha_256 = hashlib.sha256(key.value).hexdigest()
self.function_arn = make_function_arn(self.region, ACCOUNT_ID, self.function_name, version) self.function_arn = make_function_arn(self.region, ACCOUNT_ID, self.function_name)
self.tags = dict() self.tags = dict()
def set_version(self, version): def set_version(self, version):
self.function_arn = make_function_arn(self.region, ACCOUNT_ID, self.function_name, version) self.function_arn = make_function_ver_arn(self.region, ACCOUNT_ID, self.function_name, version)
self.version = version self.version = version
self.last_modified = datetime.datetime.utcnow().strftime('%Y-%m-%d %H:%M:%S') self.last_modified = datetime.datetime.utcnow().strftime('%Y-%m-%d %H:%M:%S')
@ -503,7 +503,10 @@ class LambdaStorage(object):
def list_versions_by_function(self, name): def list_versions_by_function(self, name):
if name not in self._functions: if name not in self._functions:
return None return None
return [self._functions[name]['latest']]
latest = copy.copy(self._functions[name]['latest'])
latest.function_arn += ':$LATEST'
return [latest] + self._functions[name]['versions']
def get_arn(self, arn): def get_arn(self, arn):
return self._arns.get(arn, None) return self._arns.get(arn, None)
@ -535,6 +538,7 @@ class LambdaStorage(object):
fn.set_version(new_version) fn.set_version(new_version)
self._functions[name]['versions'].append(fn) self._functions[name]['versions'].append(fn)
self._arns[fn.function_arn] = fn
return fn return fn
def del_function(self, name, qualifier=None): def del_function(self, name, qualifier=None):
@ -604,6 +608,9 @@ class LambdaBackend(BaseBackend):
self._lambdas.put_function(fn) self._lambdas.put_function(fn)
if spec.get('Publish'):
ver = self.publish_function(function_name)
fn.version = ver.version
return fn return fn
def publish_function(self, function_name): def publish_function(self, function_name):

View File

@ -150,7 +150,7 @@ class LambdaResponse(BaseResponse):
for fn in self.lambda_backend.list_functions(): for fn in self.lambda_backend.list_functions():
json_data = fn.get_configuration() json_data = fn.get_configuration()
json_data['Version'] = '$LATEST'
result['Functions'].append(json_data) result['Functions'].append(json_data)
return 200, {}, json.dumps(result) return 200, {}, json.dumps(result)
@ -204,7 +204,10 @@ class LambdaResponse(BaseResponse):
if fn: if fn:
code = fn.get_code() code = fn.get_code()
if qualifier is None or qualifier == '$LATEST':
code['Configuration']['Version'] = '$LATEST'
if qualifier == '$LATEST':
code['Configuration']['FunctionArn'] += ':$LATEST'
return 200, {}, json.dumps(code) return 200, {}, json.dumps(code)
else: else:
return 404, {}, "{}" return 404, {}, "{}"

View File

@ -3,8 +3,13 @@ from collections import namedtuple
ARN = namedtuple('ARN', ['region', 'account', 'function_name', 'version']) ARN = namedtuple('ARN', ['region', 'account', 'function_name', 'version'])
def make_function_arn(region, account, name, version='1'): def make_function_arn(region, account, name):
return 'arn:aws:lambda:{0}:{1}:function:{2}:{3}'.format(region, account, name, version) return 'arn:aws:lambda:{0}:{1}:function:{2}'.format(region, account, name)
def make_function_ver_arn(region, account, name, version='1'):
arn = make_function_arn(region, account, name)
return '{0}:{1}'.format(arn, version)
def split_function_arn(arn): def split_function_arn(arn):

View File

@ -32,6 +32,7 @@ from moto.organizations import organizations_backends
from moto.polly import polly_backends from moto.polly import polly_backends
from moto.rds2 import rds2_backends from moto.rds2 import rds2_backends
from moto.redshift import redshift_backends from moto.redshift import redshift_backends
from moto.resourcegroups import resourcegroups_backends
from moto.route53 import route53_backends from moto.route53 import route53_backends
from moto.s3 import s3_backends from moto.s3 import s3_backends
from moto.ses import ses_backends from moto.ses import ses_backends
@ -81,6 +82,7 @@ BACKENDS = {
'organizations': organizations_backends, 'organizations': organizations_backends,
'polly': polly_backends, 'polly': polly_backends,
'redshift': redshift_backends, 'redshift': redshift_backends,
'resource-groups': resourcegroups_backends,
'rds': rds2_backends, 'rds': rds2_backends,
's3': s3_backends, 's3': s3_backends,
's3bucket_path': s3_backends, 's3bucket_path': s3_backends,

View File

@ -12,7 +12,7 @@ from moto.batch import models as batch_models
from moto.cloudwatch import models as cloudwatch_models from moto.cloudwatch import models as cloudwatch_models
from moto.cognitoidentity import models as cognitoidentity_models from moto.cognitoidentity import models as cognitoidentity_models
from moto.datapipeline import models as datapipeline_models from moto.datapipeline import models as datapipeline_models
from moto.dynamodb import models as dynamodb_models from moto.dynamodb2 import models as dynamodb2_models
from moto.ec2 import models as ec2_models from moto.ec2 import models as ec2_models
from moto.ecs import models as ecs_models from moto.ecs import models as ecs_models
from moto.elb import models as elb_models from moto.elb import models as elb_models
@ -37,7 +37,7 @@ MODEL_MAP = {
"AWS::Batch::JobDefinition": batch_models.JobDefinition, "AWS::Batch::JobDefinition": batch_models.JobDefinition,
"AWS::Batch::JobQueue": batch_models.JobQueue, "AWS::Batch::JobQueue": batch_models.JobQueue,
"AWS::Batch::ComputeEnvironment": batch_models.ComputeEnvironment, "AWS::Batch::ComputeEnvironment": batch_models.ComputeEnvironment,
"AWS::DynamoDB::Table": dynamodb_models.Table, "AWS::DynamoDB::Table": dynamodb2_models.Table,
"AWS::Kinesis::Stream": kinesis_models.Stream, "AWS::Kinesis::Stream": kinesis_models.Stream,
"AWS::Lambda::EventSourceMapping": lambda_models.EventSourceMapping, "AWS::Lambda::EventSourceMapping": lambda_models.EventSourceMapping,
"AWS::Lambda::Function": lambda_models.LambdaFunction, "AWS::Lambda::Function": lambda_models.LambdaFunction,
@ -425,11 +425,18 @@ class ResourceMap(collections.Mapping):
self.resolved_parameters[parameter_name] = parameter.get('Default') self.resolved_parameters[parameter_name] = parameter.get('Default')
# Set any input parameters that were passed # Set any input parameters that were passed
self.no_echo_parameter_keys = []
for key, value in self.input_parameters.items(): for key, value in self.input_parameters.items():
if key in self.resolved_parameters: if key in self.resolved_parameters:
value_type = parameter_slots[key].get('Type', 'String') parameter_slot = parameter_slots[key]
value_type = parameter_slot.get('Type', 'String')
if value_type == 'CommaDelimitedList' or value_type.startswith("List"): if value_type == 'CommaDelimitedList' or value_type.startswith("List"):
value = value.split(',') value = value.split(',')
if parameter_slot.get('NoEcho'):
self.no_echo_parameter_keys.append(key)
self.resolved_parameters[key] = value self.resolved_parameters[key] = value
# Check if there are any non-default params that were not passed input # Check if there are any non-default params that were not passed input

View File

@ -654,7 +654,11 @@ DESCRIBE_STACKS_TEMPLATE = """<DescribeStacksResponse>
{% for param_name, param_value in stack.stack_parameters.items() %} {% for param_name, param_value in stack.stack_parameters.items() %}
<member> <member>
<ParameterKey>{{ param_name }}</ParameterKey> <ParameterKey>{{ param_name }}</ParameterKey>
<ParameterValue>{{ param_value }}</ParameterValue> {% if param_name in stack.resource_map.no_echo_parameter_keys %}
<ParameterValue>****</ParameterValue>
{% else %}
<ParameterValue>{{ param_value }}</ParameterValue>
{% endif %}
</member> </member>
{% endfor %} {% endfor %}
</Parameters> </Parameters>

View File

@ -287,6 +287,18 @@ class CognitoIdpUser(BaseModel):
return user_json return user_json
def update_attributes(self, new_attributes):
def flatten_attrs(attrs):
return {attr['Name']: attr['Value'] for attr in attrs}
def expand_attrs(attrs):
return [{'Name': k, 'Value': v} for k, v in attrs.items()]
flat_attributes = flatten_attrs(self.attributes)
flat_attributes.update(flatten_attrs(new_attributes))
self.attributes = expand_attrs(flat_attributes)
class CognitoIdpBackend(BaseBackend): class CognitoIdpBackend(BaseBackend):
@ -673,6 +685,17 @@ class CognitoIdpBackend(BaseBackend):
else: else:
raise NotAuthorizedError(access_token) raise NotAuthorizedError(access_token)
def admin_update_user_attributes(self, user_pool_id, username, attributes):
user_pool = self.user_pools.get(user_pool_id)
if not user_pool:
raise ResourceNotFoundError(user_pool_id)
if username not in user_pool.users:
raise UserNotFoundError(username)
user = user_pool.users[username]
user.update_attributes(attributes)
cognitoidp_backends = {} cognitoidp_backends = {}
for region in boto.cognito.identity.regions(): for region in boto.cognito.identity.regions():

View File

@ -352,6 +352,13 @@ class CognitoIdpResponse(BaseResponse):
cognitoidp_backends[region].change_password(access_token, previous_password, proposed_password) cognitoidp_backends[region].change_password(access_token, previous_password, proposed_password)
return "" return ""
def admin_update_user_attributes(self):
user_pool_id = self._get_param("UserPoolId")
username = self._get_param("Username")
attributes = self._get_param("UserAttributes")
cognitoidp_backends[self.region].admin_update_user_attributes(user_pool_id, username, attributes)
return ""
class CognitoIdpJsonWebKeyResponse(BaseResponse): class CognitoIdpJsonWebKeyResponse(BaseResponse):

View File

@ -152,11 +152,18 @@ class BaseResponse(_TemplateEnvironmentMixin):
for key, value in flat.items(): for key, value in flat.items():
querystring[key] = [value] querystring[key] = [value]
elif self.body: elif self.body:
querystring.update(parse_qs(raw_body, keep_blank_values=True)) try:
querystring.update(parse_qs(raw_body, keep_blank_values=True))
except UnicodeEncodeError:
pass # ignore encoding errors, as the body may not contain a legitimate querystring
if not querystring: if not querystring:
querystring.update(headers) querystring.update(headers)
querystring = _decode_dict(querystring) try:
querystring = _decode_dict(querystring)
except UnicodeDecodeError:
pass # ignore decoding errors, as the body may not contain a legitimate querystring
self.uri = full_url self.uri = full_url
self.path = urlparse(full_url).path self.path = urlparse(full_url).path
self.querystring = querystring self.querystring = querystring

View File

@ -280,7 +280,7 @@ def amzn_request_id(f):
# Update request ID in XML # Update request ID in XML
try: try:
body = body.replace('{{ requestid }}', request_id) body = re.sub(r'(?<=<RequestId>).*(?=<\/RequestId>)', request_id, body)
except Exception: # Will just ignore if it cant work on bytes (which are str's on python2) except Exception: # Will just ignore if it cant work on bytes (which are str's on python2)
pass pass

View File

@ -0,0 +1,2 @@
class InvalidIndexNameError(ValueError):
pass

View File

@ -13,6 +13,7 @@ from moto.core import BaseBackend, BaseModel
from moto.core.utils import unix_time from moto.core.utils import unix_time
from moto.core.exceptions import JsonRESTError from moto.core.exceptions import JsonRESTError
from .comparisons import get_comparison_func, get_filter_expression, Op from .comparisons import get_comparison_func, get_filter_expression, Op
from .exceptions import InvalidIndexNameError
class DynamoJsonEncoder(json.JSONEncoder): class DynamoJsonEncoder(json.JSONEncoder):
@ -293,6 +294,19 @@ class Item(BaseModel):
# TODO: implement other data types # TODO: implement other data types
raise NotImplementedError( raise NotImplementedError(
'ADD not supported for %s' % ', '.join(update_action['Value'].keys())) 'ADD not supported for %s' % ', '.join(update_action['Value'].keys()))
elif action == 'DELETE':
if set(update_action['Value'].keys()) == set(['SS']):
existing = self.attrs.get(attribute_name, DynamoType({"SS": {}}))
new_set = set(existing.value).difference(set(new_value))
self.attrs[attribute_name] = DynamoType({
"SS": list(new_set)
})
else:
raise NotImplementedError(
'ADD not supported for %s' % ', '.join(update_action['Value'].keys()))
else:
raise NotImplementedError(
'%s action not support for update_with_attribute_updates' % action)
class StreamRecord(BaseModel): class StreamRecord(BaseModel):
@ -403,6 +417,25 @@ class Table(BaseModel):
} }
self.set_stream_specification(streams) self.set_stream_specification(streams)
@classmethod
def create_from_cloudformation_json(cls, resource_name, cloudformation_json, region_name):
properties = cloudformation_json['Properties']
params = {}
if 'KeySchema' in properties:
params['schema'] = properties['KeySchema']
if 'AttributeDefinitions' in properties:
params['attr'] = properties['AttributeDefinitions']
if 'GlobalSecondaryIndexes' in properties:
params['global_indexes'] = properties['GlobalSecondaryIndexes']
if 'ProvisionedThroughput' in properties:
params['throughput'] = properties['ProvisionedThroughput']
if 'LocalSecondaryIndexes' in properties:
params['indexes'] = properties['LocalSecondaryIndexes']
table = dynamodb_backends[region_name].create_table(name=properties['TableName'], **params)
return table
def _generate_arn(self, name): def _generate_arn(self, name):
return 'arn:aws:dynamodb:us-east-1:123456789011:table/' + name return 'arn:aws:dynamodb:us-east-1:123456789011:table/' + name
@ -572,7 +605,7 @@ class Table(BaseModel):
results = [] results = []
if index_name: if index_name:
all_indexes = (self.global_indexes or []) + (self.indexes or []) all_indexes = self.all_indexes()
indexes_by_name = dict((i['IndexName'], i) for i in all_indexes) indexes_by_name = dict((i['IndexName'], i) for i in all_indexes)
if index_name not in indexes_by_name: if index_name not in indexes_by_name:
raise ValueError('Invalid index: %s for table: %s. Available indexes are: %s' % ( raise ValueError('Invalid index: %s for table: %s. Available indexes are: %s' % (
@ -672,11 +705,39 @@ class Table(BaseModel):
else: else:
yield hash_set yield hash_set
def scan(self, filters, limit, exclusive_start_key, filter_expression=None): def all_indexes(self):
return (self.global_indexes or []) + (self.indexes or [])
def has_idx_items(self, index_name):
all_indexes = self.all_indexes()
indexes_by_name = dict((i['IndexName'], i) for i in all_indexes)
idx = indexes_by_name[index_name]
idx_col_set = set([i['AttributeName'] for i in idx['KeySchema']])
for hash_set in self.items.values():
if self.range_key_attr:
for item in hash_set.values():
if idx_col_set.issubset(set(item.attrs)):
yield item
else:
if idx_col_set.issubset(set(hash_set.attrs)):
yield hash_set
def scan(self, filters, limit, exclusive_start_key, filter_expression=None, index_name=None):
results = [] results = []
scanned_count = 0 scanned_count = 0
all_indexes = self.all_indexes()
indexes_by_name = dict((i['IndexName'], i) for i in all_indexes)
for item in self.all_items(): if index_name:
if index_name not in indexes_by_name:
raise InvalidIndexNameError('The table does not have the specified index: %s' % index_name)
items = self.has_idx_items(index_name)
else:
items = self.all_items()
for item in items:
scanned_count += 1 scanned_count += 1
passes_all_conditions = True passes_all_conditions = True
for attribute_name, (comparison_operator, comparison_objs) in filters.items(): for attribute_name, (comparison_operator, comparison_objs) in filters.items():
@ -703,10 +764,10 @@ class Table(BaseModel):
results.append(item) results.append(item)
results, last_evaluated_key = self._trim_results(results, limit, results, last_evaluated_key = self._trim_results(results, limit,
exclusive_start_key) exclusive_start_key, index_name)
return results, scanned_count, last_evaluated_key return results, scanned_count, last_evaluated_key
def _trim_results(self, results, limit, exclusive_start_key): def _trim_results(self, results, limit, exclusive_start_key, scaned_index=None):
if exclusive_start_key is not None: if exclusive_start_key is not None:
hash_key = DynamoType(exclusive_start_key.get(self.hash_key_attr)) hash_key = DynamoType(exclusive_start_key.get(self.hash_key_attr))
range_key = exclusive_start_key.get(self.range_key_attr) range_key = exclusive_start_key.get(self.range_key_attr)
@ -726,6 +787,14 @@ class Table(BaseModel):
if results[-1].range_key is not None: if results[-1].range_key is not None:
last_evaluated_key[self.range_key_attr] = results[-1].range_key last_evaluated_key[self.range_key_attr] = results[-1].range_key
if scaned_index:
all_indexes = self.all_indexes()
indexes_by_name = dict((i['IndexName'], i) for i in all_indexes)
idx = indexes_by_name[scaned_index]
idx_col_list = [i['AttributeName'] for i in idx['KeySchema']]
for col in idx_col_list:
last_evaluated_key[col] = results[-1].attrs[col]
return results, last_evaluated_key return results, last_evaluated_key
def lookup(self, *args, **kwargs): def lookup(self, *args, **kwargs):
@ -893,7 +962,7 @@ class DynamoDBBackend(BaseBackend):
return table.query(hash_key, range_comparison, range_values, limit, return table.query(hash_key, range_comparison, range_values, limit,
exclusive_start_key, scan_index_forward, projection_expression, index_name, filter_expression, **filter_kwargs) exclusive_start_key, scan_index_forward, projection_expression, index_name, filter_expression, **filter_kwargs)
def scan(self, table_name, filters, limit, exclusive_start_key, filter_expression, expr_names, expr_values): def scan(self, table_name, filters, limit, exclusive_start_key, filter_expression, expr_names, expr_values, index_name):
table = self.tables.get(table_name) table = self.tables.get(table_name)
if not table: if not table:
return None, None, None return None, None, None
@ -908,7 +977,7 @@ class DynamoDBBackend(BaseBackend):
else: else:
filter_expression = Op(None, None) # Will always eval to true filter_expression = Op(None, None) # Will always eval to true
return table.scan(scan_filters, limit, exclusive_start_key, filter_expression) return table.scan(scan_filters, limit, exclusive_start_key, filter_expression, index_name)
def update_item(self, table_name, key, update_expression, attribute_updates, expression_attribute_names, def update_item(self, table_name, key, update_expression, attribute_updates, expression_attribute_names,
expression_attribute_values, expected=None): expression_attribute_values, expected=None):

View File

@ -5,6 +5,7 @@ import re
from moto.core.responses import BaseResponse from moto.core.responses import BaseResponse
from moto.core.utils import camelcase_to_underscores, amzn_request_id from moto.core.utils import camelcase_to_underscores, amzn_request_id
from .exceptions import InvalidIndexNameError
from .models import dynamodb_backends, dynamo_json_dump from .models import dynamodb_backends, dynamo_json_dump
@ -156,8 +157,16 @@ class DynamoHandler(BaseResponse):
body = self.body body = self.body
# get the table name # get the table name
table_name = body['TableName'] table_name = body['TableName']
# get the throughput # check billing mode and get the throughput
throughput = body["ProvisionedThroughput"] if "BillingMode" in body.keys() and body["BillingMode"] == "PAY_PER_REQUEST":
if "ProvisionedThroughput" in body.keys():
er = 'com.amazonaws.dynamodb.v20111205#ValidationException'
return self.error(er,
'ProvisionedThroughput cannot be specified \
when BillingMode is PAY_PER_REQUEST')
throughput = None
else: # Provisioned (default billing mode)
throughput = body["ProvisionedThroughput"]
# getting the schema # getting the schema
key_schema = body['KeySchema'] key_schema = body['KeySchema']
# getting attribute definition # getting attribute definition
@ -552,6 +561,7 @@ class DynamoHandler(BaseResponse):
exclusive_start_key = self.body.get('ExclusiveStartKey') exclusive_start_key = self.body.get('ExclusiveStartKey')
limit = self.body.get("Limit") limit = self.body.get("Limit")
index_name = self.body.get('IndexName')
try: try:
items, scanned_count, last_evaluated_key = self.dynamodb_backend.scan(name, filters, items, scanned_count, last_evaluated_key = self.dynamodb_backend.scan(name, filters,
@ -559,7 +569,11 @@ class DynamoHandler(BaseResponse):
exclusive_start_key, exclusive_start_key,
filter_expression, filter_expression,
expression_attribute_names, expression_attribute_names,
expression_attribute_values) expression_attribute_values,
index_name)
except InvalidIndexNameError as err:
er = 'com.amazonaws.dynamodb.v20111205#ValidationException'
return self.error(er, str(err))
except ValueError as err: except ValueError as err:
er = 'com.amazonaws.dynamodb.v20111205#ValidationError' er = 'com.amazonaws.dynamodb.v20111205#ValidationError'
return self.error(er, 'Bad Filter Expression: {0}'.format(err)) return self.error(er, 'Bad Filter Expression: {0}'.format(err))

View File

@ -58,6 +58,14 @@ class InvalidKeyPairDuplicateError(EC2ClientError):
.format(key)) .format(key))
class InvalidKeyPairFormatError(EC2ClientError):
def __init__(self):
super(InvalidKeyPairFormatError, self).__init__(
"InvalidKeyPair.Format",
"Key is not in valid OpenSSH public key format")
class InvalidVPCIdError(EC2ClientError): class InvalidVPCIdError(EC2ClientError):
def __init__(self, vpc_id): def __init__(self, vpc_id):
@ -420,3 +428,79 @@ class OperationNotPermitted(EC2ClientError):
"The vpc CIDR block with association ID {} may not be disassociated. " "The vpc CIDR block with association ID {} may not be disassociated. "
"It is the primary IPv4 CIDR block of the VPC".format(association_id) "It is the primary IPv4 CIDR block of the VPC".format(association_id)
) )
class NetworkAclEntryAlreadyExistsError(EC2ClientError):
def __init__(self, rule_number):
super(NetworkAclEntryAlreadyExistsError, self).__init__(
"NetworkAclEntryAlreadyExists",
"The network acl entry identified by {} already exists.".format(rule_number)
)
class InvalidSubnetRangeError(EC2ClientError):
def __init__(self, cidr_block):
super(InvalidSubnetRangeError, self).__init__(
"InvalidSubnet.Range",
"The CIDR '{}' is invalid.".format(cidr_block)
)
class InvalidCIDRBlockParameterError(EC2ClientError):
def __init__(self, cidr_block):
super(InvalidCIDRBlockParameterError, self).__init__(
"InvalidParameterValue",
"Value ({}) for parameter cidrBlock is invalid. This is not a valid CIDR block.".format(cidr_block)
)
class InvalidDestinationCIDRBlockParameterError(EC2ClientError):
def __init__(self, cidr_block):
super(InvalidDestinationCIDRBlockParameterError, self).__init__(
"InvalidParameterValue",
"Value ({}) for parameter destinationCidrBlock is invalid. This is not a valid CIDR block.".format(cidr_block)
)
class InvalidSubnetConflictError(EC2ClientError):
def __init__(self, cidr_block):
super(InvalidSubnetConflictError, self).__init__(
"InvalidSubnet.Conflict",
"The CIDR '{}' conflicts with another subnet".format(cidr_block)
)
class InvalidVPCRangeError(EC2ClientError):
def __init__(self, cidr_block):
super(InvalidVPCRangeError, self).__init__(
"InvalidVpc.Range",
"The CIDR '{}' is invalid.".format(cidr_block)
)
# accept exception
class OperationNotPermitted2(EC2ClientError):
def __init__(self, client_region, pcx_id, acceptor_region):
super(OperationNotPermitted2, self).__init__(
"OperationNotPermitted",
"Incorrect region ({0}) specified for this request."
"VPC peering connection {1} must be accepted in region {2}".format(client_region, pcx_id, acceptor_region)
)
# reject exception
class OperationNotPermitted3(EC2ClientError):
def __init__(self, client_region, pcx_id, acceptor_region):
super(OperationNotPermitted3, self).__init__(
"OperationNotPermitted",
"Incorrect region ({0}) specified for this request."
"VPC peering connection {1} must be accepted or rejected in region {2}".format(client_region,
pcx_id,
acceptor_region)
)

View File

@ -20,6 +20,7 @@ from boto.ec2.blockdevicemapping import BlockDeviceMapping, BlockDeviceType
from boto.ec2.spotinstancerequest import SpotInstanceRequest as BotoSpotRequest from boto.ec2.spotinstancerequest import SpotInstanceRequest as BotoSpotRequest
from boto.ec2.launchspecification import LaunchSpecification from boto.ec2.launchspecification import LaunchSpecification
from moto.compat import OrderedDict from moto.compat import OrderedDict
from moto.core import BaseBackend from moto.core import BaseBackend
from moto.core.models import Model, BaseModel from moto.core.models import Model, BaseModel
@ -35,14 +36,17 @@ from .exceptions import (
InvalidAMIIdError, InvalidAMIIdError,
InvalidAMIAttributeItemValueError, InvalidAMIAttributeItemValueError,
InvalidAssociationIdError, InvalidAssociationIdError,
InvalidCIDRBlockParameterError,
InvalidCIDRSubnetError, InvalidCIDRSubnetError,
InvalidCustomerGatewayIdError, InvalidCustomerGatewayIdError,
InvalidDestinationCIDRBlockParameterError,
InvalidDHCPOptionsIdError, InvalidDHCPOptionsIdError,
InvalidDomainError, InvalidDomainError,
InvalidID, InvalidID,
InvalidInstanceIdError, InvalidInstanceIdError,
InvalidInternetGatewayIdError, InvalidInternetGatewayIdError,
InvalidKeyPairDuplicateError, InvalidKeyPairDuplicateError,
InvalidKeyPairFormatError,
InvalidKeyPairNameError, InvalidKeyPairNameError,
InvalidNetworkAclIdError, InvalidNetworkAclIdError,
InvalidNetworkAttachmentIdError, InvalidNetworkAttachmentIdError,
@ -56,20 +60,26 @@ from .exceptions import (
InvalidSecurityGroupDuplicateError, InvalidSecurityGroupDuplicateError,
InvalidSecurityGroupNotFoundError, InvalidSecurityGroupNotFoundError,
InvalidSnapshotIdError, InvalidSnapshotIdError,
InvalidSubnetConflictError,
InvalidSubnetIdError, InvalidSubnetIdError,
InvalidSubnetRangeError,
InvalidVolumeIdError, InvalidVolumeIdError,
InvalidVolumeAttachmentError, InvalidVolumeAttachmentError,
InvalidVpcCidrBlockAssociationIdError, InvalidVpcCidrBlockAssociationIdError,
InvalidVPCPeeringConnectionIdError, InvalidVPCPeeringConnectionIdError,
InvalidVPCPeeringConnectionStateTransitionError, InvalidVPCPeeringConnectionStateTransitionError,
InvalidVPCIdError, InvalidVPCIdError,
InvalidVPCRangeError,
InvalidVpnGatewayIdError, InvalidVpnGatewayIdError,
InvalidVpnConnectionIdError, InvalidVpnConnectionIdError,
MalformedAMIIdError, MalformedAMIIdError,
MalformedDHCPOptionsIdError, MalformedDHCPOptionsIdError,
MissingParameterError, MissingParameterError,
MotoNotImplementedError, MotoNotImplementedError,
NetworkAclEntryAlreadyExistsError,
OperationNotPermitted, OperationNotPermitted,
OperationNotPermitted2,
OperationNotPermitted3,
ResourceAlreadyAssociatedError, ResourceAlreadyAssociatedError,
RulesPerSecurityGroupLimitExceededError, RulesPerSecurityGroupLimitExceededError,
TagLimitExceeded) TagLimitExceeded)
@ -118,6 +128,8 @@ from .utils import (
random_customer_gateway_id, random_customer_gateway_id,
is_tag_filter, is_tag_filter,
tag_filter_matches, tag_filter_matches,
rsa_public_key_parse,
rsa_public_key_fingerprint
) )
INSTANCE_TYPES = json.load( INSTANCE_TYPES = json.load(
@ -404,7 +416,7 @@ class Instance(TaggedEC2Resource, BotoInstance):
warnings.warn('Could not find AMI with image-id:{0}, ' warnings.warn('Could not find AMI with image-id:{0}, '
'in the near future this will ' 'in the near future this will '
'cause an error.\n' 'cause an error.\n'
'Use ec2_backend.describe_images() to' 'Use ec2_backend.describe_images() to '
'find suitable image for your test'.format(image_id), 'find suitable image for your test'.format(image_id),
PendingDeprecationWarning) PendingDeprecationWarning)
@ -908,7 +920,14 @@ class KeyPairBackend(object):
def import_key_pair(self, key_name, public_key_material): def import_key_pair(self, key_name, public_key_material):
if key_name in self.keypairs: if key_name in self.keypairs:
raise InvalidKeyPairDuplicateError(key_name) raise InvalidKeyPairDuplicateError(key_name)
keypair = KeyPair(key_name, **random_key_pair())
try:
rsa_public_key = rsa_public_key_parse(public_key_material)
except ValueError:
raise InvalidKeyPairFormatError()
fingerprint = rsa_public_key_fingerprint(rsa_public_key)
keypair = KeyPair(key_name, material=public_key_material, fingerprint=fingerprint)
self.keypairs[key_name] = keypair self.keypairs[key_name] = keypair
return keypair return keypair
@ -1879,6 +1898,8 @@ class Snapshot(TaggedEC2Resource):
return str(self.encrypted).lower() return str(self.encrypted).lower()
elif filter_name == 'status': elif filter_name == 'status':
return self.status return self.status
elif filter_name == 'owner-id':
return self.owner_id
else: else:
return super(Snapshot, self).get_filter_value( return super(Snapshot, self).get_filter_value(
filter_name, 'DescribeSnapshots') filter_name, 'DescribeSnapshots')
@ -2120,22 +2141,28 @@ class VPC(TaggedEC2Resource):
class VPCBackend(object): class VPCBackend(object):
__refs__ = defaultdict(list) vpc_refs = defaultdict(set)
def __init__(self): def __init__(self):
self.vpcs = {} self.vpcs = {}
self.__refs__[self.__class__].append(weakref.ref(self)) self.vpc_refs[self.__class__].add(weakref.ref(self))
super(VPCBackend, self).__init__() super(VPCBackend, self).__init__()
@classmethod @classmethod
def get_instances(cls): def get_vpc_refs(cls):
for inst_ref in cls.__refs__[cls]: for inst_ref in cls.vpc_refs[cls]:
inst = inst_ref() inst = inst_ref()
if inst is not None: if inst is not None:
yield inst yield inst
def create_vpc(self, cidr_block, instance_tenancy='default', amazon_provided_ipv6_cidr_block=False): def create_vpc(self, cidr_block, instance_tenancy='default', amazon_provided_ipv6_cidr_block=False):
vpc_id = random_vpc_id() vpc_id = random_vpc_id()
try:
vpc_cidr_block = ipaddress.IPv4Network(six.text_type(cidr_block), strict=False)
except ValueError:
raise InvalidCIDRBlockParameterError(cidr_block)
if vpc_cidr_block.prefixlen < 16 or vpc_cidr_block.prefixlen > 28:
raise InvalidVPCRangeError(cidr_block)
vpc = VPC(self, vpc_id, cidr_block, len(self.vpcs) == 0, instance_tenancy, amazon_provided_ipv6_cidr_block) vpc = VPC(self, vpc_id, cidr_block, len(self.vpcs) == 0, instance_tenancy, amazon_provided_ipv6_cidr_block)
self.vpcs[vpc_id] = vpc self.vpcs[vpc_id] = vpc
@ -2159,7 +2186,7 @@ class VPCBackend(object):
# get vpc by vpc id and aws region # get vpc by vpc id and aws region
def get_cross_vpc(self, vpc_id, peer_region): def get_cross_vpc(self, vpc_id, peer_region):
for vpcs in self.get_instances(): for vpcs in self.get_vpc_refs():
if vpcs.region_name == peer_region: if vpcs.region_name == peer_region:
match_vpc = vpcs.get_vpc(vpc_id) match_vpc = vpcs.get_vpc(vpc_id)
return match_vpc return match_vpc
@ -2280,15 +2307,31 @@ class VPCPeeringConnection(TaggedEC2Resource):
class VPCPeeringConnectionBackend(object): class VPCPeeringConnectionBackend(object):
# for cross region vpc reference
vpc_pcx_refs = defaultdict(set)
def __init__(self): def __init__(self):
self.vpc_pcxs = {} self.vpc_pcxs = {}
self.vpc_pcx_refs[self.__class__].add(weakref.ref(self))
super(VPCPeeringConnectionBackend, self).__init__() super(VPCPeeringConnectionBackend, self).__init__()
@classmethod
def get_vpc_pcx_refs(cls):
for inst_ref in cls.vpc_pcx_refs[cls]:
inst = inst_ref()
if inst is not None:
yield inst
def create_vpc_peering_connection(self, vpc, peer_vpc): def create_vpc_peering_connection(self, vpc, peer_vpc):
vpc_pcx_id = random_vpc_peering_connection_id() vpc_pcx_id = random_vpc_peering_connection_id()
vpc_pcx = VPCPeeringConnection(vpc_pcx_id, vpc, peer_vpc) vpc_pcx = VPCPeeringConnection(vpc_pcx_id, vpc, peer_vpc)
vpc_pcx._status.pending() vpc_pcx._status.pending()
self.vpc_pcxs[vpc_pcx_id] = vpc_pcx self.vpc_pcxs[vpc_pcx_id] = vpc_pcx
# insert cross region peering info
if vpc.ec2_backend.region_name != peer_vpc.ec2_backend.region_name:
for vpc_pcx_cx in peer_vpc.ec2_backend.get_vpc_pcx_refs():
if vpc_pcx_cx.region_name == peer_vpc.ec2_backend.region_name:
vpc_pcx_cx.vpc_pcxs[vpc_pcx_id] = vpc_pcx
return vpc_pcx return vpc_pcx
def get_all_vpc_peering_connections(self): def get_all_vpc_peering_connections(self):
@ -2306,6 +2349,11 @@ class VPCPeeringConnectionBackend(object):
def accept_vpc_peering_connection(self, vpc_pcx_id): def accept_vpc_peering_connection(self, vpc_pcx_id):
vpc_pcx = self.get_vpc_peering_connection(vpc_pcx_id) vpc_pcx = self.get_vpc_peering_connection(vpc_pcx_id)
# if cross region need accepter from another region
pcx_req_region = vpc_pcx.vpc.ec2_backend.region_name
pcx_acp_region = vpc_pcx.peer_vpc.ec2_backend.region_name
if pcx_req_region != pcx_acp_region and self.region_name == pcx_req_region:
raise OperationNotPermitted2(self.region_name, vpc_pcx.id, pcx_acp_region)
if vpc_pcx._status.code != 'pending-acceptance': if vpc_pcx._status.code != 'pending-acceptance':
raise InvalidVPCPeeringConnectionStateTransitionError(vpc_pcx.id) raise InvalidVPCPeeringConnectionStateTransitionError(vpc_pcx.id)
vpc_pcx._status.accept() vpc_pcx._status.accept()
@ -2313,6 +2361,11 @@ class VPCPeeringConnectionBackend(object):
def reject_vpc_peering_connection(self, vpc_pcx_id): def reject_vpc_peering_connection(self, vpc_pcx_id):
vpc_pcx = self.get_vpc_peering_connection(vpc_pcx_id) vpc_pcx = self.get_vpc_peering_connection(vpc_pcx_id)
# if cross region need accepter from another region
pcx_req_region = vpc_pcx.vpc.ec2_backend.region_name
pcx_acp_region = vpc_pcx.peer_vpc.ec2_backend.region_name
if pcx_req_region != pcx_acp_region and self.region_name == pcx_req_region:
raise OperationNotPermitted3(self.region_name, vpc_pcx.id, pcx_acp_region)
if vpc_pcx._status.code != 'pending-acceptance': if vpc_pcx._status.code != 'pending-acceptance':
raise InvalidVPCPeeringConnectionStateTransitionError(vpc_pcx.id) raise InvalidVPCPeeringConnectionStateTransitionError(vpc_pcx.id)
vpc_pcx._status.reject() vpc_pcx._status.reject()
@ -2326,7 +2379,7 @@ class Subnet(TaggedEC2Resource):
self.id = subnet_id self.id = subnet_id
self.vpc_id = vpc_id self.vpc_id = vpc_id
self.cidr_block = cidr_block self.cidr_block = cidr_block
self.cidr = ipaddress.ip_network(six.text_type(self.cidr_block)) self.cidr = ipaddress.IPv4Network(six.text_type(self.cidr_block), strict=False)
self._availability_zone = availability_zone self._availability_zone = availability_zone
self.default_for_az = default_for_az self.default_for_az = default_for_az
self.map_public_ip_on_launch = map_public_ip_on_launch self.map_public_ip_on_launch = map_public_ip_on_launch
@ -2458,7 +2511,19 @@ class SubnetBackend(object):
def create_subnet(self, vpc_id, cidr_block, availability_zone): def create_subnet(self, vpc_id, cidr_block, availability_zone):
subnet_id = random_subnet_id() subnet_id = random_subnet_id()
self.get_vpc(vpc_id) # Validate VPC exists vpc = self.get_vpc(vpc_id) # Validate VPC exists and the supplied CIDR block is a subnet of the VPC's
vpc_cidr_block = ipaddress.IPv4Network(six.text_type(vpc.cidr_block), strict=False)
try:
subnet_cidr_block = ipaddress.IPv4Network(six.text_type(cidr_block), strict=False)
except ValueError:
raise InvalidCIDRBlockParameterError(cidr_block)
if not (vpc_cidr_block.network_address <= subnet_cidr_block.network_address and
vpc_cidr_block.broadcast_address >= subnet_cidr_block.broadcast_address):
raise InvalidSubnetRangeError(cidr_block)
for subnet in self.get_all_subnets(filters={'vpc-id': vpc_id}):
if subnet.cidr.overlaps(subnet_cidr_block):
raise InvalidSubnetConflictError(cidr_block)
# if this is the first subnet for an availability zone, # if this is the first subnet for an availability zone,
# consider it the default # consider it the default
@ -2718,6 +2783,11 @@ class RouteBackend(object):
elif EC2_RESOURCE_TO_PREFIX['internet-gateway'] in gateway_id: elif EC2_RESOURCE_TO_PREFIX['internet-gateway'] in gateway_id:
gateway = self.get_internet_gateway(gateway_id) gateway = self.get_internet_gateway(gateway_id)
try:
ipaddress.IPv4Network(six.text_type(destination_cidr_block), strict=False)
except ValueError:
raise InvalidDestinationCIDRBlockParameterError(destination_cidr_block)
route = Route(route_table, destination_cidr_block, local=local, route = Route(route_table, destination_cidr_block, local=local,
gateway=gateway, gateway=gateway,
instance=self.get_instance( instance=self.get_instance(
@ -3595,10 +3665,10 @@ class NetworkAclBackend(object):
def add_default_entries(self, network_acl_id): def add_default_entries(self, network_acl_id):
default_acl_entries = [ default_acl_entries = [
{'rule_number': 100, 'rule_action': 'allow', 'egress': 'true'}, {'rule_number': "100", 'rule_action': 'allow', 'egress': 'true'},
{'rule_number': 32767, 'rule_action': 'deny', 'egress': 'true'}, {'rule_number': "32767", 'rule_action': 'deny', 'egress': 'true'},
{'rule_number': 100, 'rule_action': 'allow', 'egress': 'false'}, {'rule_number': "100", 'rule_action': 'allow', 'egress': 'false'},
{'rule_number': 32767, 'rule_action': 'deny', 'egress': 'false'} {'rule_number': "32767", 'rule_action': 'deny', 'egress': 'false'}
] ]
for entry in default_acl_entries: for entry in default_acl_entries:
self.create_network_acl_entry(network_acl_id=network_acl_id, rule_number=entry['rule_number'], protocol='-1', self.create_network_acl_entry(network_acl_id=network_acl_id, rule_number=entry['rule_number'], protocol='-1',
@ -3629,12 +3699,14 @@ class NetworkAclBackend(object):
icmp_code, icmp_type, port_range_from, icmp_code, icmp_type, port_range_from,
port_range_to): port_range_to):
network_acl = self.get_network_acl(network_acl_id)
if any(entry.egress == egress and entry.rule_number == rule_number for entry in network_acl.network_acl_entries):
raise NetworkAclEntryAlreadyExistsError(rule_number)
network_acl_entry = NetworkAclEntry(self, network_acl_id, rule_number, network_acl_entry = NetworkAclEntry(self, network_acl_id, rule_number,
protocol, rule_action, egress, protocol, rule_action, egress,
cidr_block, icmp_code, icmp_type, cidr_block, icmp_code, icmp_type,
port_range_from, port_range_to) port_range_from, port_range_to)
network_acl = self.get_network_acl(network_acl_id)
network_acl.network_acl_entries.append(network_acl_entry) network_acl.network_acl_entries.append(network_acl_entry)
return network_acl_entry return network_acl_entry

View File

@ -74,30 +74,35 @@ CREATE_VPC_PEERING_CONNECTION_RESPONSE = """
""" """
DESCRIBE_VPC_PEERING_CONNECTIONS_RESPONSE = """ DESCRIBE_VPC_PEERING_CONNECTIONS_RESPONSE = """
<DescribeVpcPeeringConnectionsResponse xmlns="http://ec2.amazonaws.com/doc/2013-10-15/"> <DescribeVpcPeeringConnectionsResponse xmlns="http://ec2.amazonaws.com/doc/2016-11-15/">
<requestId>7a62c49f-347e-4fc4-9331-6e8eEXAMPLE</requestId> <requestId>7a62c49f-347e-4fc4-9331-6e8eEXAMPLE</requestId>
<vpcPeeringConnectionSet> <vpcPeeringConnectionSet>
{% for vpc_pcx in vpc_pcxs %} {% for vpc_pcx in vpc_pcxs %}
<item> <item>
<vpcPeeringConnectionId>{{ vpc_pcx.id }}</vpcPeeringConnectionId> <vpcPeeringConnectionId>{{ vpc_pcx.id }}</vpcPeeringConnectionId>
<requesterVpcInfo> <requesterVpcInfo>
<ownerId>777788889999</ownerId> <ownerId>777788889999</ownerId>
<vpcId>{{ vpc_pcx.vpc.id }}</vpcId> <vpcId>{{ vpc_pcx.vpc.id }}</vpcId>
<cidrBlock>{{ vpc_pcx.vpc.cidr_block }}</cidrBlock> <cidrBlock>{{ vpc_pcx.vpc.cidr_block }}</cidrBlock>
</requesterVpcInfo> </requesterVpcInfo>
<accepterVpcInfo> <accepterVpcInfo>
<ownerId>123456789012</ownerId> <ownerId>123456789012</ownerId>
<vpcId>{{ vpc_pcx.peer_vpc.id }}</vpcId> <vpcId>{{ vpc_pcx.peer_vpc.id }}</vpcId>
</accepterVpcInfo> <cidrBlock>{{ vpc_pcx.peer_vpc.cidr_block }}</cidrBlock>
<status> <peeringOptions>
<code>{{ vpc_pcx._status.code }}</code> <allowEgressFromLocalClassicLinkToRemoteVpc>false</allowEgressFromLocalClassicLinkToRemoteVpc>
<message>{{ vpc_pcx._status.message }}</message> <allowEgressFromLocalVpcToRemoteClassicLink>true</allowEgressFromLocalVpcToRemoteClassicLink>
</status> <allowDnsResolutionFromRemoteVpc>false</allowDnsResolutionFromRemoteVpc>
<expirationTime>2014-02-17T16:00:50.000Z</expirationTime> </peeringOptions>
<tagSet/> </accepterVpcInfo>
</item> <status>
{% endfor %} <code>{{ vpc_pcx._status.code }}</code>
</vpcPeeringConnectionSet> <message>{{ vpc_pcx._status.message }}</message>
</status>
<tagSet/>
</item>
{% endfor %}
</vpcPeeringConnectionSet>
</DescribeVpcPeeringConnectionsResponse> </DescribeVpcPeeringConnectionsResponse>
""" """
@ -109,19 +114,24 @@ DELETE_VPC_PEERING_CONNECTION_RESPONSE = """
""" """
ACCEPT_VPC_PEERING_CONNECTION_RESPONSE = """ ACCEPT_VPC_PEERING_CONNECTION_RESPONSE = """
<AcceptVpcPeeringConnectionResponse xmlns="http://ec2.amazonaws.com/doc/2013-10-15/"> <AcceptVpcPeeringConnectionResponse xmlns="http://ec2.amazonaws.com/doc/2016-11-15/">
<requestId>7a62c49f-347e-4fc4-9331-6e8eEXAMPLE</requestId> <requestId>7a62c49f-347e-4fc4-9331-6e8eEXAMPLE</requestId>
<vpcPeeringConnection> <vpcPeeringConnection>
<vpcPeeringConnectionId>{{ vpc_pcx.id }}</vpcPeeringConnectionId> <vpcPeeringConnectionId>{{ vpc_pcx.id }}</vpcPeeringConnectionId>
<requesterVpcInfo> <requesterVpcInfo>
<ownerId>123456789012</ownerId> <ownerId>777788889999</ownerId>
<vpcId>{{ vpc_pcx.vpc.id }}</vpcId> <vpcId>{{ vpc_pcx.vpc.id }}</vpcId>
<cidrBlock>{{ vpc_pcx.vpc.cidr_block }}</cidrBlock> <cidrBlock>{{ vpc_pcx.vpc.cidr_block }}</cidrBlock>
</requesterVpcInfo> </requesterVpcInfo>
<accepterVpcInfo> <accepterVpcInfo>
<ownerId>777788889999</ownerId> <ownerId>123456789012</ownerId>
<vpcId>{{ vpc_pcx.peer_vpc.id }}</vpcId> <vpcId>{{ vpc_pcx.peer_vpc.id }}</vpcId>
<cidrBlock>{{ vpc_pcx.peer_vpc.cidr_block }}</cidrBlock> <cidrBlock>{{ vpc_pcx.peer_vpc.cidr_block }}</cidrBlock>
<peeringOptions>
<allowEgressFromLocalClassicLinkToRemoteVpc>false</allowEgressFromLocalClassicLinkToRemoteVpc>
<allowEgressFromLocalVpcToRemoteClassicLink>false</allowEgressFromLocalVpcToRemoteClassicLink>
<allowDnsResolutionFromRemoteVpc>false</allowDnsResolutionFromRemoteVpc>
</peeringOptions>
</accepterVpcInfo> </accepterVpcInfo>
<status> <status>
<code>{{ vpc_pcx._status.code }}</code> <code>{{ vpc_pcx._status.code }}</code>

View File

@ -1,10 +1,19 @@
from __future__ import unicode_literals from __future__ import unicode_literals
import base64
import hashlib
import fnmatch import fnmatch
import random import random
import re import re
import six import six
from cryptography.hazmat.primitives import serialization
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives.asymmetric import rsa
import sshpubkeys.exceptions
from sshpubkeys.keys import SSHKey
EC2_RESOURCE_TO_PREFIX = { EC2_RESOURCE_TO_PREFIX = {
'customer-gateway': 'cgw', 'customer-gateway': 'cgw',
'dhcp-options': 'dopt', 'dhcp-options': 'dopt',
@ -453,23 +462,19 @@ def simple_aws_filter_to_re(filter_string):
def random_key_pair(): def random_key_pair():
def random_hex(): private_key = rsa.generate_private_key(
return chr(random.choice(list(range(48, 58)) + list(range(97, 102)))) public_exponent=65537,
key_size=2048,
backend=default_backend())
private_key_material = private_key.private_bytes(
encoding=serialization.Encoding.PEM,
format=serialization.PrivateFormat.TraditionalOpenSSL,
encryption_algorithm=serialization.NoEncryption())
public_key_fingerprint = rsa_public_key_fingerprint(private_key.public_key())
def random_fingerprint():
return ':'.join([random_hex() + random_hex() for i in range(20)])
def random_material():
return ''.join([
chr(random.choice(list(range(65, 91)) + list(range(48, 58)) +
list(range(97, 102))))
for i in range(1000)
])
material = "---- BEGIN RSA PRIVATE KEY ----" + random_material() + \
"-----END RSA PRIVATE KEY-----"
return { return {
'fingerprint': random_fingerprint(), 'fingerprint': public_key_fingerprint,
'material': material 'material': private_key_material.decode('ascii')
} }
@ -535,3 +540,28 @@ def generate_instance_identity_document(instance):
} }
return document return document
def rsa_public_key_parse(key_material):
try:
if not isinstance(key_material, six.binary_type):
key_material = key_material.encode("ascii")
decoded_key = base64.b64decode(key_material).decode("ascii")
public_key = SSHKey(decoded_key)
except (sshpubkeys.exceptions.InvalidKeyException, UnicodeDecodeError):
raise ValueError('bad key')
if not public_key.rsa:
raise ValueError('bad key')
return public_key.rsa
def rsa_public_key_fingerprint(rsa_public_key):
key_data = rsa_public_key.public_bytes(
encoding=serialization.Encoding.DER,
format=serialization.PublicFormat.SubjectPublicKeyInfo)
fingerprint_hex = hashlib.md5(key_data).hexdigest()
fingerprint = re.sub(r'([a-f0-9]{2})(?!$)', r'\1:', fingerprint_hex)
return fingerprint

View File

@ -699,12 +699,15 @@ class EC2ContainerServiceBackend(BaseBackend):
return service return service
def list_services(self, cluster_str): def list_services(self, cluster_str, scheduling_strategy=None):
cluster_name = cluster_str.split('/')[-1] cluster_name = cluster_str.split('/')[-1]
service_arns = [] service_arns = []
for key, value in self.services.items(): for key, value in self.services.items():
if cluster_name + ':' in key: if cluster_name + ':' in key:
service_arns.append(self.services[key].arn) service = self.services[key]
if scheduling_strategy is None or service.scheduling_strategy == scheduling_strategy:
service_arns.append(service.arn)
return sorted(service_arns) return sorted(service_arns)
def describe_services(self, cluster_str, service_names_or_arns): def describe_services(self, cluster_str, service_names_or_arns):

View File

@ -163,7 +163,8 @@ class EC2ContainerServiceResponse(BaseResponse):
def list_services(self): def list_services(self):
cluster_str = self._get_param('cluster') cluster_str = self._get_param('cluster')
service_arns = self.ecs_backend.list_services(cluster_str) scheduling_strategy = self._get_param('schedulingStrategy')
service_arns = self.ecs_backend.list_services(cluster_str, scheduling_strategy)
return json.dumps({ return json.dumps({
'serviceArns': service_arns 'serviceArns': service_arns
# , # ,

View File

@ -131,7 +131,7 @@ class InvalidActionTypeError(ELBClientError):
def __init__(self, invalid_name, index): def __init__(self, invalid_name, index):
super(InvalidActionTypeError, self).__init__( super(InvalidActionTypeError, self).__init__(
"ValidationError", "ValidationError",
"1 validation error detected: Value '%s' at 'actions.%s.member.type' failed to satisfy constraint: Member must satisfy enum value set: [forward]" % (invalid_name, index) "1 validation error detected: Value '%s' at 'actions.%s.member.type' failed to satisfy constraint: Member must satisfy enum value set: [forward, redirect]" % (invalid_name, index)
) )

View File

@ -204,8 +204,20 @@ class FakeListener(BaseModel):
# transform default actions to confirm with the rest of the code and XML templates # transform default actions to confirm with the rest of the code and XML templates
if "DefaultActions" in properties: if "DefaultActions" in properties:
default_actions = [] default_actions = []
for action in properties['DefaultActions']: for i, action in enumerate(properties['DefaultActions']):
default_actions.append({'type': action['Type'], 'target_group_arn': action['TargetGroupArn']}) action_type = action['Type']
if action_type == 'forward':
default_actions.append({'type': action_type, 'target_group_arn': action['TargetGroupArn']})
elif action_type == 'redirect':
redirect_action = {'type': action_type, }
for redirect_config_key, redirect_config_value in action['RedirectConfig'].items():
# need to match the output of _get_list_prefix
if redirect_config_key == 'StatusCode':
redirect_config_key = 'status_code'
redirect_action['redirect_config._' + redirect_config_key.lower()] = redirect_config_value
default_actions.append(redirect_action)
else:
raise InvalidActionTypeError(action_type, i + 1)
else: else:
default_actions = None default_actions = None
@ -417,11 +429,15 @@ class ELBv2Backend(BaseBackend):
for i, action in enumerate(actions): for i, action in enumerate(actions):
index = i + 1 index = i + 1
action_type = action['type'] action_type = action['type']
if action_type not in ['forward']: if action_type == 'forward':
action_target_group_arn = action['target_group_arn']
if action_target_group_arn not in target_group_arns:
raise ActionTargetGroupNotFoundError(action_target_group_arn)
elif action_type == 'redirect':
# nothing to do
pass
else:
raise InvalidActionTypeError(action_type, index) raise InvalidActionTypeError(action_type, index)
action_target_group_arn = action['target_group_arn']
if action_target_group_arn not in target_group_arns:
raise ActionTargetGroupNotFoundError(action_target_group_arn)
# TODO: check for error 'TooManyRegistrationsForTargetId' # TODO: check for error 'TooManyRegistrationsForTargetId'
# TODO: check for error 'TooManyRules' # TODO: check for error 'TooManyRules'
@ -483,10 +499,18 @@ class ELBv2Backend(BaseBackend):
arn = load_balancer_arn.replace(':loadbalancer/', ':listener/') + "/%s%s" % (port, id(self)) arn = load_balancer_arn.replace(':loadbalancer/', ':listener/') + "/%s%s" % (port, id(self))
listener = FakeListener(load_balancer_arn, arn, protocol, port, ssl_policy, certificate, default_actions) listener = FakeListener(load_balancer_arn, arn, protocol, port, ssl_policy, certificate, default_actions)
balancer.listeners[listener.arn] = listener balancer.listeners[listener.arn] = listener
for action in default_actions: for i, action in enumerate(default_actions):
if action['target_group_arn'] in self.target_groups.keys(): action_type = action['type']
target_group = self.target_groups[action['target_group_arn']] if action_type == 'forward':
target_group.load_balancer_arns.append(load_balancer_arn) if action['target_group_arn'] in self.target_groups.keys():
target_group = self.target_groups[action['target_group_arn']]
target_group.load_balancer_arns.append(load_balancer_arn)
elif action_type == 'redirect':
# nothing to do
pass
else:
raise InvalidActionTypeError(action_type, i + 1)
return listener return listener
def describe_load_balancers(self, arns, names): def describe_load_balancers(self, arns, names):
@ -649,11 +673,15 @@ class ELBv2Backend(BaseBackend):
for i, action in enumerate(actions): for i, action in enumerate(actions):
index = i + 1 index = i + 1
action_type = action['type'] action_type = action['type']
if action_type not in ['forward']: if action_type == 'forward':
action_target_group_arn = action['target_group_arn']
if action_target_group_arn not in target_group_arns:
raise ActionTargetGroupNotFoundError(action_target_group_arn)
elif action_type == 'redirect':
# nothing to do
pass
else:
raise InvalidActionTypeError(action_type, index) raise InvalidActionTypeError(action_type, index)
action_target_group_arn = action['target_group_arn']
if action_target_group_arn not in target_group_arns:
raise ActionTargetGroupNotFoundError(action_target_group_arn)
# TODO: check for error 'TooManyRegistrationsForTargetId' # TODO: check for error 'TooManyRegistrationsForTargetId'
# TODO: check for error 'TooManyRules' # TODO: check for error 'TooManyRules'
@ -873,7 +901,7 @@ class ELBv2Backend(BaseBackend):
# Its already validated in responses.py # Its already validated in responses.py
listener.ssl_policy = ssl_policy listener.ssl_policy = ssl_policy
if default_actions is not None: if default_actions is not None and default_actions != []:
# Is currently not validated # Is currently not validated
listener.default_actions = default_actions listener.default_actions = default_actions

View File

@ -704,7 +704,11 @@ CREATE_RULE_TEMPLATE = """<CreateRuleResponse xmlns="http://elasticloadbalancing
{% for action in rule.actions %} {% for action in rule.actions %}
<member> <member>
<Type>{{ action["type"] }}</Type> <Type>{{ action["type"] }}</Type>
{% if action["type"] == "forward" %}
<TargetGroupArn>{{ action["target_group_arn"] }}</TargetGroupArn> <TargetGroupArn>{{ action["target_group_arn"] }}</TargetGroupArn>
{% elif action["type"] == "redirect" %}
<RedirectConfig>{{ action["redirect_config"] }}</RedirectConfig>
{% endif %}
</member> </member>
{% endfor %} {% endfor %}
</Actions> </Actions>
@ -772,7 +776,15 @@ CREATE_LISTENER_TEMPLATE = """<CreateListenerResponse xmlns="http://elasticloadb
{% for action in listener.default_actions %} {% for action in listener.default_actions %}
<member> <member>
<Type>{{ action.type }}</Type> <Type>{{ action.type }}</Type>
<TargetGroupArn>{{ action.target_group_arn }}</TargetGroupArn> {% if action["type"] == "forward" %}
<TargetGroupArn>{{ action["target_group_arn"] }}</TargetGroupArn>
{% elif action["type"] == "redirect" %}
<RedirectConfig>
<Protocol>{{ action["redirect_config._protocol"] }}</Protocol>
<Port>{{ action["redirect_config._port"] }}</Port>
<StatusCode>{{ action["redirect_config._status_code"] }}</StatusCode>
</RedirectConfig>
{% endif %}
</member> </member>
{% endfor %} {% endfor %}
</DefaultActions> </DefaultActions>
@ -877,7 +889,15 @@ DESCRIBE_RULES_TEMPLATE = """<DescribeRulesResponse xmlns="http://elasticloadbal
{% for action in rule.actions %} {% for action in rule.actions %}
<member> <member>
<Type>{{ action["type"] }}</Type> <Type>{{ action["type"] }}</Type>
{% if action["type"] == "forward" %}
<TargetGroupArn>{{ action["target_group_arn"] }}</TargetGroupArn> <TargetGroupArn>{{ action["target_group_arn"] }}</TargetGroupArn>
{% elif action["type"] == "redirect" %}
<RedirectConfig>
<Protocol>{{ action["redirect_config._protocol"] }}</Protocol>
<Port>{{ action["redirect_config._port"] }}</Port>
<StatusCode>{{ action["redirect_config._status_code"] }}</StatusCode>
</RedirectConfig>
{% endif %}
</member> </member>
{% endfor %} {% endfor %}
</Actions> </Actions>
@ -970,7 +990,15 @@ DESCRIBE_LISTENERS_TEMPLATE = """<DescribeLoadBalancersResponse xmlns="http://el
{% for action in listener.default_actions %} {% for action in listener.default_actions %}
<member> <member>
<Type>{{ action.type }}</Type> <Type>{{ action.type }}</Type>
<TargetGroupArn>{{ action.target_group_arn }}</TargetGroupArn> {% if action["type"] == "forward" %}
<TargetGroupArn>{{ action["target_group_arn"] }}</TargetGroupArn>m
{% elif action["type"] == "redirect" %}
<RedirectConfig>
<Protocol>{{ action["redirect_config._protocol"] }}</Protocol>
<Port>{{ action["redirect_config._port"] }}</Port>
<StatusCode>{{ action["redirect_config._status_code"] }}</StatusCode>
</RedirectConfig>
{% endif %}
</member> </member>
{% endfor %} {% endfor %}
</DefaultActions> </DefaultActions>
@ -1399,7 +1427,15 @@ MODIFY_LISTENER_TEMPLATE = """<ModifyListenerResponse xmlns="http://elasticloadb
{% for action in listener.default_actions %} {% for action in listener.default_actions %}
<member> <member>
<Type>{{ action.type }}</Type> <Type>{{ action.type }}</Type>
<TargetGroupArn>{{ action.target_group_arn }}</TargetGroupArn> {% if action["type"] == "forward" %}
<TargetGroupArn>{{ action["target_group_arn"] }}</TargetGroupArn>
{% elif action["type"] == "redirect" %}
<RedirectConfig>
<Protocol>{{ action["redirect_config._protocol"] }}</Protocol>
<Port>{{ action["redirect_config._port"] }}</Port>
<StatusCode>{{ action["redirect_config._status_code"] }}</StatusCode>
</RedirectConfig>
{% endif %}
</member> </member>
{% endfor %} {% endfor %}
</DefaultActions> </DefaultActions>

View File

@ -97,7 +97,8 @@ class FakeCluster(BaseModel):
visible_to_all_users='false', visible_to_all_users='false',
release_label=None, release_label=None,
requested_ami_version=None, requested_ami_version=None,
running_ami_version=None): running_ami_version=None,
custom_ami_id=None):
self.id = cluster_id or random_cluster_id() self.id = cluster_id or random_cluster_id()
emr_backend.clusters[self.id] = self emr_backend.clusters[self.id] = self
self.emr_backend = emr_backend self.emr_backend = emr_backend
@ -162,6 +163,7 @@ class FakeCluster(BaseModel):
self.release_label = release_label self.release_label = release_label
self.requested_ami_version = requested_ami_version self.requested_ami_version = requested_ami_version
self.running_ami_version = running_ami_version self.running_ami_version = running_ami_version
self.custom_ami_id = custom_ami_id
self.role = job_flow_role or 'EMRJobflowDefault' self.role = job_flow_role or 'EMRJobflowDefault'
self.service_role = service_role self.service_role = service_role

View File

@ -267,6 +267,18 @@ class ElasticMapReduceResponse(BaseResponse):
else: else:
kwargs['running_ami_version'] = '1.0.0' kwargs['running_ami_version'] = '1.0.0'
custom_ami_id = self._get_param('CustomAmiId')
if custom_ami_id:
kwargs['custom_ami_id'] = custom_ami_id
if release_label and release_label < 'emr-5.7.0':
message = 'Custom AMI is not allowed'
raise EmrError(error_type='ValidationException',
message=message, template='error_json')
elif ami_version:
message = 'Custom AMI is not supported in this version of EMR'
raise EmrError(error_type='ValidationException',
message=message, template='error_json')
cluster = self.backend.run_job_flow(**kwargs) cluster = self.backend.run_job_flow(**kwargs)
applications = self._get_list_prefix('Applications.member') applications = self._get_list_prefix('Applications.member')
@ -375,6 +387,9 @@ DESCRIBE_CLUSTER_TEMPLATE = """<DescribeClusterResponse xmlns="http://elasticmap
</member> </member>
{% endfor %} {% endfor %}
</Configurations> </Configurations>
{% if cluster.custom_ami_id is not none %}
<CustomAmiId>{{ cluster.custom_ami_id }}</CustomAmiId>
{% endif %}
<Ec2InstanceAttributes> <Ec2InstanceAttributes>
<AdditionalMasterSecurityGroups> <AdditionalMasterSecurityGroups>
{% for each in cluster.additional_master_security_groups %} {% for each in cluster.additional_master_security_groups %}

View File

@ -56,6 +56,14 @@ class GlueBackend(BaseBackend):
database = self.get_database(database_name) database = self.get_database(database_name)
return [table for table_name, table in database.tables.items()] return [table for table_name, table in database.tables.items()]
def delete_table(self, database_name, table_name):
database = self.get_database(database_name)
try:
del database.tables[table_name]
except KeyError:
raise TableNotFoundException(table_name)
return {}
class FakeDatabase(BaseModel): class FakeDatabase(BaseModel):

View File

@ -84,6 +84,12 @@ class GlueResponse(BaseResponse):
] ]
}) })
def delete_table(self):
database_name = self.parameters.get('DatabaseName')
table_name = self.parameters.get('Name')
resp = self.glue_backend.delete_table(database_name, table_name)
return json.dumps(resp)
def get_partitions(self): def get_partitions(self):
database_name = self.parameters.get('DatabaseName') database_name = self.parameters.get('DatabaseName')
table_name = self.parameters.get('TableName') table_name = self.parameters.get('TableName')

View File

@ -9,6 +9,7 @@ from cryptography import x509
from cryptography.hazmat.backends import default_backend from cryptography.hazmat.backends import default_backend
import pytz import pytz
from moto.core.exceptions import RESTError
from moto.core import BaseBackend, BaseModel from moto.core import BaseBackend, BaseModel
from moto.core.utils import iso_8601_datetime_without_milliseconds from moto.core.utils import iso_8601_datetime_without_milliseconds
@ -131,7 +132,7 @@ class InlinePolicy(Policy):
class Role(BaseModel): class Role(BaseModel):
def __init__(self, role_id, name, assume_role_policy_document, path): def __init__(self, role_id, name, assume_role_policy_document, path, permissions_boundary):
self.id = role_id self.id = role_id
self.name = name self.name = name
self.assume_role_policy_document = assume_role_policy_document self.assume_role_policy_document = assume_role_policy_document
@ -141,6 +142,7 @@ class Role(BaseModel):
self.create_date = datetime.now(pytz.utc) self.create_date = datetime.now(pytz.utc)
self.tags = {} self.tags = {}
self.description = "" self.description = ""
self.permissions_boundary = permissions_boundary
@classmethod @classmethod
def create_from_cloudformation_json(cls, resource_name, cloudformation_json, region_name): def create_from_cloudformation_json(cls, resource_name, cloudformation_json, region_name):
@ -150,6 +152,7 @@ class Role(BaseModel):
role_name=resource_name, role_name=resource_name,
assume_role_policy_document=properties['AssumeRolePolicyDocument'], assume_role_policy_document=properties['AssumeRolePolicyDocument'],
path=properties.get('Path', '/'), path=properties.get('Path', '/'),
permissions_boundary=properties.get('PermissionsBoundary', '')
) )
policies = properties.get('Policies', []) policies = properties.get('Policies', [])
@ -470,6 +473,8 @@ class IAMBackend(BaseBackend):
self.managed_policies = self._init_managed_policies() self.managed_policies = self._init_managed_policies()
self.account_aliases = [] self.account_aliases = []
self.saml_providers = {} self.saml_providers = {}
self.policy_arn_regex = re.compile(
r'^arn:aws:iam::[0-9]*:policy/.*$')
super(IAMBackend, self).__init__() super(IAMBackend, self).__init__()
def _init_managed_policies(self): def _init_managed_policies(self):
@ -587,9 +592,12 @@ class IAMBackend(BaseBackend):
return policies, marker return policies, marker
def create_role(self, role_name, assume_role_policy_document, path): def create_role(self, role_name, assume_role_policy_document, path, permissions_boundary):
role_id = random_resource_id() role_id = random_resource_id()
role = Role(role_id, role_name, assume_role_policy_document, path) if permissions_boundary and not self.policy_arn_regex.match(permissions_boundary):
raise RESTError('InvalidParameterValue', 'Value ({}) for parameter PermissionsBoundary is invalid.'.format(permissions_boundary))
role = Role(role_id, role_name, assume_role_policy_document, path, permissions_boundary)
self.roles[role_id] = role self.roles[role_id] = role
return role return role

View File

@ -175,9 +175,11 @@ class IamResponse(BaseResponse):
path = self._get_param('Path') path = self._get_param('Path')
assume_role_policy_document = self._get_param( assume_role_policy_document = self._get_param(
'AssumeRolePolicyDocument') 'AssumeRolePolicyDocument')
permissions_boundary = self._get_param(
'PermissionsBoundary')
role = iam_backend.create_role( role = iam_backend.create_role(
role_name, assume_role_policy_document, path) role_name, assume_role_policy_document, path, permissions_boundary)
template = self.response_template(CREATE_ROLE_TEMPLATE) template = self.response_template(CREATE_ROLE_TEMPLATE)
return template.render(role=role) return template.render(role=role)
@ -1000,6 +1002,12 @@ CREATE_ROLE_TEMPLATE = """<CreateRoleResponse xmlns="https://iam.amazonaws.com/d
<AssumeRolePolicyDocument>{{ role.assume_role_policy_document }}</AssumeRolePolicyDocument> <AssumeRolePolicyDocument>{{ role.assume_role_policy_document }}</AssumeRolePolicyDocument>
<CreateDate>{{ role.create_date }}</CreateDate> <CreateDate>{{ role.create_date }}</CreateDate>
<RoleId>{{ role.id }}</RoleId> <RoleId>{{ role.id }}</RoleId>
{% if role.permissions_boundary %}
<PermissionsBoundary>
<PermissionsBoundaryType>PermissionsBoundaryPolicy</PermissionsBoundaryType>
<PermissionsBoundaryArn>{{ role.permissions_boundary }}</PermissionsBoundaryArn>
</PermissionsBoundary>
{% endif %}
</Role> </Role>
</CreateRoleResult> </CreateRoleResult>
<ResponseMetadata> <ResponseMetadata>
@ -1102,6 +1110,12 @@ LIST_ROLES_TEMPLATE = """<ListRolesResponse xmlns="https://iam.amazonaws.com/doc
<AssumeRolePolicyDocument>{{ role.assume_role_policy_document }}</AssumeRolePolicyDocument> <AssumeRolePolicyDocument>{{ role.assume_role_policy_document }}</AssumeRolePolicyDocument>
<CreateDate>{{ role.create_date }}</CreateDate> <CreateDate>{{ role.create_date }}</CreateDate>
<RoleId>{{ role.id }}</RoleId> <RoleId>{{ role.id }}</RoleId>
{% if role.permissions_boundary %}
<PermissionsBoundary>
<PermissionsBoundaryType>PermissionsBoundaryPolicy</PermissionsBoundaryType>
<PermissionsBoundaryArn>{{ role.permissions_boundary }}</PermissionsBoundaryArn>
</PermissionsBoundary>
{% endif %}
</member> </member>
{% endfor %} {% endfor %}
</Roles> </Roles>

View File

@ -97,7 +97,7 @@ class FakeThingGroup(BaseModel):
class FakeCertificate(BaseModel): class FakeCertificate(BaseModel):
def __init__(self, certificate_pem, status, region_name): def __init__(self, certificate_pem, status, region_name, ca_certificate_pem=None):
m = hashlib.sha256() m = hashlib.sha256()
m.update(str(uuid.uuid4()).encode('utf-8')) m.update(str(uuid.uuid4()).encode('utf-8'))
self.certificate_id = m.hexdigest() self.certificate_id = m.hexdigest()
@ -110,12 +110,18 @@ class FakeCertificate(BaseModel):
self.transfer_data = {} self.transfer_data = {}
self.creation_date = time.time() self.creation_date = time.time()
self.last_modified_date = self.creation_date self.last_modified_date = self.creation_date
self.ca_certificate_id = None self.ca_certificate_id = None
self.ca_certificate_pem = ca_certificate_pem
if ca_certificate_pem:
m.update(str(uuid.uuid4()).encode('utf-8'))
self.ca_certificate_id = m.hexdigest()
def to_dict(self): def to_dict(self):
return { return {
'certificateArn': self.arn, 'certificateArn': self.arn,
'certificateId': self.certificate_id, 'certificateId': self.certificate_id,
'caCertificateId': self.ca_certificate_id,
'status': self.status, 'status': self.status,
'creationDate': self.creation_date 'creationDate': self.creation_date
} }
@ -509,6 +515,12 @@ class IoTBackend(BaseBackend):
def list_certificates(self): def list_certificates(self):
return self.certificates.values() return self.certificates.values()
def register_certificate(self, certificate_pem, ca_certificate_pem, set_as_active, status):
certificate = FakeCertificate(certificate_pem, 'ACTIVE' if set_as_active else status,
self.region_name, ca_certificate_pem)
self.certificates[certificate.certificate_id] = certificate
return certificate
def update_certificate(self, certificate_id, new_status): def update_certificate(self, certificate_id, new_status):
cert = self.describe_certificate(certificate_id) cert = self.describe_certificate(certificate_id)
# TODO: validate new_status # TODO: validate new_status

View File

@ -296,6 +296,20 @@ class IoTResponse(BaseResponse):
# TODO: implement pagination in the future # TODO: implement pagination in the future
return json.dumps(dict(certificates=[_.to_dict() for _ in certificates])) return json.dumps(dict(certificates=[_.to_dict() for _ in certificates]))
def register_certificate(self):
certificate_pem = self._get_param("certificatePem")
ca_certificate_pem = self._get_param("caCertificatePem")
set_as_active = self._get_bool_param("setAsActive")
status = self._get_param("status")
cert = self.iot_backend.register_certificate(
certificate_pem=certificate_pem,
ca_certificate_pem=ca_certificate_pem,
set_as_active=set_as_active,
status=status
)
return json.dumps(dict(certificateId=cert.certificate_id, certificateArn=cert.arn))
def update_certificate(self): def update_certificate(self):
certificate_id = self._get_param("certificateId") certificate_id = self._get_param("certificateId")
new_status = self._get_param("newStatus") new_status = self._get_param("newStatus")

View File

@ -116,10 +116,12 @@ class Stream(BaseModel):
def __init__(self, stream_name, shard_count, region): def __init__(self, stream_name, shard_count, region):
self.stream_name = stream_name self.stream_name = stream_name
self.shard_count = shard_count self.shard_count = shard_count
self.creation_datetime = datetime.datetime.now()
self.region = region self.region = region
self.account_number = "123456789012" self.account_number = "123456789012"
self.shards = {} self.shards = {}
self.tags = {} self.tags = {}
self.status = "ACTIVE"
if six.PY3: if six.PY3:
izip_longest = itertools.zip_longest izip_longest = itertools.zip_longest
@ -183,12 +185,23 @@ class Stream(BaseModel):
"StreamDescription": { "StreamDescription": {
"StreamARN": self.arn, "StreamARN": self.arn,
"StreamName": self.stream_name, "StreamName": self.stream_name,
"StreamStatus": "ACTIVE", "StreamStatus": self.status,
"HasMoreShards": False, "HasMoreShards": False,
"Shards": [shard.to_json() for shard in self.shards.values()], "Shards": [shard.to_json() for shard in self.shards.values()],
} }
} }
def to_json_summary(self):
return {
"StreamDescriptionSummary": {
"StreamARN": self.arn,
"StreamName": self.stream_name,
"StreamStatus": self.status,
"StreamCreationTimestamp": six.text_type(self.creation_datetime),
"OpenShardCount": self.shard_count,
}
}
@classmethod @classmethod
def create_from_cloudformation_json(cls, resource_name, cloudformation_json, region_name): def create_from_cloudformation_json(cls, resource_name, cloudformation_json, region_name):
properties = cloudformation_json['Properties'] properties = cloudformation_json['Properties']
@ -309,6 +322,9 @@ class KinesisBackend(BaseBackend):
else: else:
raise StreamNotFoundError(stream_name) raise StreamNotFoundError(stream_name)
def describe_stream_summary(self, stream_name):
return self.describe_stream(stream_name)
def list_streams(self): def list_streams(self):
return self.streams.values() return self.streams.values()

View File

@ -33,6 +33,11 @@ class KinesisResponse(BaseResponse):
stream = self.kinesis_backend.describe_stream(stream_name) stream = self.kinesis_backend.describe_stream(stream_name)
return json.dumps(stream.to_json()) return json.dumps(stream.to_json())
def describe_stream_summary(self):
stream_name = self.parameters.get('StreamName')
stream = self.kinesis_backend.describe_stream_summary(stream_name)
return json.dumps(stream.to_json_summary())
def list_streams(self): def list_streams(self):
streams = self.kinesis_backend.list_streams() streams = self.kinesis_backend.list_streams()
stream_names = [stream.stream_name for stream in streams] stream_names = [stream.stream_name for stream in streams]

View File

@ -1,8 +1,19 @@
import sys
import base64 import base64
from .exceptions import InvalidArgumentError from .exceptions import InvalidArgumentError
if sys.version_info[0] == 2:
encode_method = base64.encodestring
decode_method = base64.decodestring
elif sys.version_info[0] == 3:
encode_method = base64.encodebytes
decode_method = base64.decodebytes
else:
raise Exception("Python version is not supported")
def compose_new_shard_iterator(stream_name, shard, shard_iterator_type, starting_sequence_number, def compose_new_shard_iterator(stream_name, shard, shard_iterator_type, starting_sequence_number,
at_timestamp): at_timestamp):
if shard_iterator_type == "AT_SEQUENCE_NUMBER": if shard_iterator_type == "AT_SEQUENCE_NUMBER":
@ -22,7 +33,7 @@ def compose_new_shard_iterator(stream_name, shard, shard_iterator_type, starting
def compose_shard_iterator(stream_name, shard, last_sequence_id): def compose_shard_iterator(stream_name, shard, last_sequence_id):
return base64.encodestring( return encode_method(
"{0}:{1}:{2}".format( "{0}:{1}:{2}".format(
stream_name, stream_name,
shard.shard_id, shard.shard_id,
@ -32,4 +43,4 @@ def compose_shard_iterator(stream_name, shard, last_sequence_id):
def decompose_shard_iterator(shard_iterator): def decompose_shard_iterator(shard_iterator):
return base64.decodestring(shard_iterator.encode("utf-8")).decode("utf-8").split(":") return decode_method(shard_iterator.encode("utf-8")).decode("utf-8").split(":")

View File

@ -3,7 +3,7 @@ from __future__ import unicode_literals
import os import os
import boto.kms import boto.kms
from moto.core import BaseBackend, BaseModel from moto.core import BaseBackend, BaseModel
from moto.core.utils import iso_8601_datetime_without_milliseconds from moto.core.utils import iso_8601_datetime_without_milliseconds, unix_time
from .utils import generate_key_id from .utils import generate_key_id
from collections import defaultdict from collections import defaultdict
from datetime import datetime, timedelta from datetime import datetime, timedelta
@ -37,7 +37,7 @@ class Key(BaseModel):
"KeyMetadata": { "KeyMetadata": {
"AWSAccountId": self.account_id, "AWSAccountId": self.account_id,
"Arn": self.arn, "Arn": self.arn,
"CreationDate": datetime.strftime(datetime.utcnow(), "%s"), "CreationDate": "%d" % unix_time(),
"Description": self.description, "Description": self.description,
"Enabled": self.enabled, "Enabled": self.enabled,
"KeyId": self.id, "KeyId": self.id,

View File

@ -137,6 +137,7 @@ class LogGroup:
self.creationTime = unix_time_millis() self.creationTime = unix_time_millis()
self.tags = tags self.tags = tags
self.streams = dict() # {name: LogStream} self.streams = dict() # {name: LogStream}
self.retentionInDays = None # AWS defaults to Never Expire for log group retention
def create_log_stream(self, log_stream_name): def create_log_stream(self, log_stream_name):
if log_stream_name in self.streams: if log_stream_name in self.streams:
@ -201,14 +202,20 @@ class LogGroup:
return events_page, next_token, searched_streams return events_page, next_token, searched_streams
def to_describe_dict(self): def to_describe_dict(self):
return { log_group = {
"arn": self.arn, "arn": self.arn,
"creationTime": self.creationTime, "creationTime": self.creationTime,
"logGroupName": self.name, "logGroupName": self.name,
"metricFilterCount": 0, "metricFilterCount": 0,
"retentionInDays": 30,
"storedBytes": sum(s.storedBytes for s in self.streams.values()), "storedBytes": sum(s.storedBytes for s in self.streams.values()),
} }
# AWS only returns retentionInDays if a value is set for the log group (ie. not Never Expire)
if self.retentionInDays:
log_group["retentionInDays"] = self.retentionInDays
return log_group
def set_retention_policy(self, retention_in_days):
self.retentionInDays = retention_in_days
class LogsBackend(BaseBackend): class LogsBackend(BaseBackend):
@ -289,5 +296,17 @@ class LogsBackend(BaseBackend):
log_group = self.groups[log_group_name] log_group = self.groups[log_group_name]
return log_group.filter_log_events(log_group_name, log_stream_names, start_time, end_time, limit, next_token, filter_pattern, interleaved) return log_group.filter_log_events(log_group_name, log_stream_names, start_time, end_time, limit, next_token, filter_pattern, interleaved)
def put_retention_policy(self, log_group_name, retention_in_days):
if log_group_name not in self.groups:
raise ResourceNotFoundException()
log_group = self.groups[log_group_name]
return log_group.set_retention_policy(retention_in_days)
def delete_retention_policy(self, log_group_name):
if log_group_name not in self.groups:
raise ResourceNotFoundException()
log_group = self.groups[log_group_name]
return log_group.set_retention_policy(None)
logs_backends = {region.name: LogsBackend(region.name) for region in boto.logs.regions()} logs_backends = {region.name: LogsBackend(region.name) for region in boto.logs.regions()}

View File

@ -123,3 +123,14 @@ class LogsResponse(BaseResponse):
"nextToken": next_token, "nextToken": next_token,
"searchedLogStreams": searched_streams "searchedLogStreams": searched_streams
}) })
def put_retention_policy(self):
log_group_name = self._get_param('logGroupName')
retention_in_days = self._get_param('retentionInDays')
self.logs_backend.put_retention_policy(log_group_name, retention_in_days)
return ''
def delete_retention_policy(self):
log_group_name = self._get_param('logGroupName')
self.logs_backend.delete_retention_policy(log_group_name)
return ''

View File

@ -47,6 +47,7 @@ class FakeOrganization(BaseModel):
class FakeAccount(BaseModel): class FakeAccount(BaseModel):
def __init__(self, organization, **kwargs): def __init__(self, organization, **kwargs):
self.type = 'ACCOUNT'
self.organization_id = organization.id self.organization_id = organization.id
self.master_account_id = organization.master_account_id self.master_account_id = organization.master_account_id
self.create_account_status_id = utils.make_random_create_account_status_id() self.create_account_status_id = utils.make_random_create_account_status_id()
@ -57,6 +58,7 @@ class FakeAccount(BaseModel):
self.status = 'ACTIVE' self.status = 'ACTIVE'
self.joined_method = 'CREATED' self.joined_method = 'CREATED'
self.parent_id = organization.root_id self.parent_id = organization.root_id
self.attached_policies = []
@property @property
def arn(self): def arn(self):
@ -103,6 +105,7 @@ class FakeOrganizationalUnit(BaseModel):
self.name = kwargs.get('Name') self.name = kwargs.get('Name')
self.parent_id = kwargs.get('ParentId') self.parent_id = kwargs.get('ParentId')
self._arn_format = utils.OU_ARN_FORMAT self._arn_format = utils.OU_ARN_FORMAT
self.attached_policies = []
@property @property
def arn(self): def arn(self):
@ -134,6 +137,7 @@ class FakeRoot(FakeOrganizationalUnit):
'Status': 'ENABLED' 'Status': 'ENABLED'
}] }]
self._arn_format = utils.ROOT_ARN_FORMAT self._arn_format = utils.ROOT_ARN_FORMAT
self.attached_policies = []
def describe(self): def describe(self):
return { return {
@ -144,12 +148,52 @@ class FakeRoot(FakeOrganizationalUnit):
} }
class FakeServiceControlPolicy(BaseModel):
def __init__(self, organization, **kwargs):
self.type = 'POLICY'
self.content = kwargs.get('Content')
self.description = kwargs.get('Description')
self.name = kwargs.get('Name')
self.type = kwargs.get('Type')
self.id = utils.make_random_service_control_policy_id()
self.aws_managed = False
self.organization_id = organization.id
self.master_account_id = organization.master_account_id
self._arn_format = utils.SCP_ARN_FORMAT
self.attachments = []
@property
def arn(self):
return self._arn_format.format(
self.master_account_id,
self.organization_id,
self.id
)
def describe(self):
return {
'Policy': {
'PolicySummary': {
'Id': self.id,
'Arn': self.arn,
'Name': self.name,
'Description': self.description,
'Type': self.type,
'AwsManaged': self.aws_managed,
},
'Content': self.content
}
}
class OrganizationsBackend(BaseBackend): class OrganizationsBackend(BaseBackend):
def __init__(self): def __init__(self):
self.org = None self.org = None
self.accounts = [] self.accounts = []
self.ou = [] self.ou = []
self.policies = []
def create_organization(self, **kwargs): def create_organization(self, **kwargs):
self.org = FakeOrganization(kwargs['FeatureSet']) self.org = FakeOrganization(kwargs['FeatureSet'])
@ -292,5 +336,108 @@ class OrganizationsBackend(BaseBackend):
] ]
) )
def create_policy(self, **kwargs):
new_policy = FakeServiceControlPolicy(self.org, **kwargs)
self.policies.append(new_policy)
return new_policy.describe()
def describe_policy(self, **kwargs):
if re.compile(utils.SCP_ID_REGEX).match(kwargs['PolicyId']):
policy = next((p for p in self.policies if p.id == kwargs['PolicyId']), None)
if policy is None:
raise RESTError(
'PolicyNotFoundException',
"You specified a policy that doesn't exist."
)
else:
raise RESTError(
'InvalidInputException',
'You specified an invalid value.'
)
return policy.describe()
def attach_policy(self, **kwargs):
policy = next((p for p in self.policies if p.id == kwargs['PolicyId']), None)
if (re.compile(utils.ROOT_ID_REGEX).match(kwargs['TargetId']) or
re.compile(utils.OU_ID_REGEX).match(kwargs['TargetId'])):
ou = next((ou for ou in self.ou if ou.id == kwargs['TargetId']), None)
if ou is not None:
if ou not in ou.attached_policies:
ou.attached_policies.append(policy)
policy.attachments.append(ou)
else:
raise RESTError(
'OrganizationalUnitNotFoundException',
"You specified an organizational unit that doesn't exist."
)
elif re.compile(utils.ACCOUNT_ID_REGEX).match(kwargs['TargetId']):
account = next((a for a in self.accounts if a.id == kwargs['TargetId']), None)
if account is not None:
if account not in account.attached_policies:
account.attached_policies.append(policy)
policy.attachments.append(account)
else:
raise RESTError(
'AccountNotFoundException',
"You specified an account that doesn't exist."
)
else:
raise RESTError(
'InvalidInputException',
'You specified an invalid value.'
)
def list_policies(self, **kwargs):
return dict(Policies=[
p.describe()['Policy']['PolicySummary'] for p in self.policies
])
def list_policies_for_target(self, **kwargs):
if re.compile(utils.OU_ID_REGEX).match(kwargs['TargetId']):
obj = next((ou for ou in self.ou if ou.id == kwargs['TargetId']), None)
if obj is None:
raise RESTError(
'OrganizationalUnitNotFoundException',
"You specified an organizational unit that doesn't exist."
)
elif re.compile(utils.ACCOUNT_ID_REGEX).match(kwargs['TargetId']):
obj = next((a for a in self.accounts if a.id == kwargs['TargetId']), None)
if obj is None:
raise RESTError(
'AccountNotFoundException',
"You specified an account that doesn't exist."
)
else:
raise RESTError(
'InvalidInputException',
'You specified an invalid value.'
)
return dict(Policies=[
p.describe()['Policy']['PolicySummary'] for p in obj.attached_policies
])
def list_targets_for_policy(self, **kwargs):
if re.compile(utils.SCP_ID_REGEX).match(kwargs['PolicyId']):
policy = next((p for p in self.policies if p.id == kwargs['PolicyId']), None)
if policy is None:
raise RESTError(
'PolicyNotFoundException',
"You specified a policy that doesn't exist."
)
else:
raise RESTError(
'InvalidInputException',
'You specified an invalid value.'
)
objects = [
{
'TargetId': obj.id,
'Arn': obj.arn,
'Name': obj.name,
'Type': obj.type,
} for obj in policy.attachments
]
return dict(Targets=objects)
organizations_backend = OrganizationsBackend() organizations_backend = OrganizationsBackend()

View File

@ -85,3 +85,33 @@ class OrganizationsResponse(BaseResponse):
return json.dumps( return json.dumps(
self.organizations_backend.list_children(**self.request_params) self.organizations_backend.list_children(**self.request_params)
) )
def create_policy(self):
return json.dumps(
self.organizations_backend.create_policy(**self.request_params)
)
def describe_policy(self):
return json.dumps(
self.organizations_backend.describe_policy(**self.request_params)
)
def attach_policy(self):
return json.dumps(
self.organizations_backend.attach_policy(**self.request_params)
)
def list_policies(self):
return json.dumps(
self.organizations_backend.list_policies(**self.request_params)
)
def list_policies_for_target(self):
return json.dumps(
self.organizations_backend.list_policies_for_target(**self.request_params)
)
def list_targets_for_policy(self):
return json.dumps(
self.organizations_backend.list_targets_for_policy(**self.request_params)
)

View File

@ -10,6 +10,7 @@ MASTER_ACCOUNT_ARN_FORMAT = 'arn:aws:organizations::{0}:account/{1}/{0}'
ACCOUNT_ARN_FORMAT = 'arn:aws:organizations::{0}:account/{1}/{2}' ACCOUNT_ARN_FORMAT = 'arn:aws:organizations::{0}:account/{1}/{2}'
ROOT_ARN_FORMAT = 'arn:aws:organizations::{0}:root/{1}/{2}' ROOT_ARN_FORMAT = 'arn:aws:organizations::{0}:root/{1}/{2}'
OU_ARN_FORMAT = 'arn:aws:organizations::{0}:ou/{1}/{2}' OU_ARN_FORMAT = 'arn:aws:organizations::{0}:ou/{1}/{2}'
SCP_ARN_FORMAT = 'arn:aws:organizations::{0}:policy/{1}/service_control_policy/{2}'
CHARSET = string.ascii_lowercase + string.digits CHARSET = string.ascii_lowercase + string.digits
ORG_ID_SIZE = 10 ORG_ID_SIZE = 10
@ -17,6 +18,15 @@ ROOT_ID_SIZE = 4
ACCOUNT_ID_SIZE = 12 ACCOUNT_ID_SIZE = 12
OU_ID_SUFFIX_SIZE = 8 OU_ID_SUFFIX_SIZE = 8
CREATE_ACCOUNT_STATUS_ID_SIZE = 8 CREATE_ACCOUNT_STATUS_ID_SIZE = 8
SCP_ID_SIZE = 8
EMAIL_REGEX = "^.+@[a-zA-Z0-9-.]+.[a-zA-Z]{2,3}|[0-9]{1,3}$"
ORG_ID_REGEX = r'o-[a-z0-9]{%s}' % ORG_ID_SIZE
ROOT_ID_REGEX = r'r-[a-z0-9]{%s}' % ROOT_ID_SIZE
OU_ID_REGEX = r'ou-[a-z0-9]{%s}-[a-z0-9]{%s}' % (ROOT_ID_SIZE, OU_ID_SUFFIX_SIZE)
ACCOUNT_ID_REGEX = r'[0-9]{%s}' % ACCOUNT_ID_SIZE
CREATE_ACCOUNT_STATUS_ID_REGEX = r'car-[a-z0-9]{%s}' % CREATE_ACCOUNT_STATUS_ID_SIZE
SCP_ID_REGEX = r'p-[a-z0-9]{%s}' % SCP_ID_SIZE
def make_random_org_id(): def make_random_org_id():
@ -57,3 +67,10 @@ def make_random_create_account_status_id():
# "car-" followed by from 8 to 32 lower-case letters or digits. # "car-" followed by from 8 to 32 lower-case letters or digits.
# e.g. 'car-35gxzwrp' # e.g. 'car-35gxzwrp'
return 'car-' + ''.join(random.choice(CHARSET) for x in range(CREATE_ACCOUNT_STATUS_ID_SIZE)) return 'car-' + ''.join(random.choice(CHARSET) for x in range(CREATE_ACCOUNT_STATUS_ID_SIZE))
def make_random_service_control_policy_id():
# The regex pattern for a policy ID string requires "p-" followed by
# from 8 to 128 lower-case letters or digits.
# e.g. 'p-k2av4a8a'
return 'p-' + ''.join(random.choice(CHARSET) for x in range(SCP_ID_SIZE))

View File

@ -531,14 +531,37 @@ class RedshiftBackend(BaseBackend):
setattr(cluster, key, value) setattr(cluster, key, value)
if new_cluster_identifier: if new_cluster_identifier:
self.delete_cluster(cluster_identifier) dic = {
"cluster_identifier": cluster_identifier,
"skip_final_snapshot": True,
"final_cluster_snapshot_identifier": None
}
self.delete_cluster(**dic)
cluster.cluster_identifier = new_cluster_identifier cluster.cluster_identifier = new_cluster_identifier
self.clusters[new_cluster_identifier] = cluster self.clusters[new_cluster_identifier] = cluster
return cluster return cluster
def delete_cluster(self, cluster_identifier): def delete_cluster(self, **cluster_kwargs):
cluster_identifier = cluster_kwargs.pop("cluster_identifier")
cluster_skip_final_snapshot = cluster_kwargs.pop("skip_final_snapshot")
cluster_snapshot_identifer = cluster_kwargs.pop("final_cluster_snapshot_identifier")
if cluster_identifier in self.clusters: if cluster_identifier in self.clusters:
if cluster_skip_final_snapshot is False and cluster_snapshot_identifer is None:
raise ClientError(
"InvalidParameterValue",
'FinalSnapshotIdentifier is required for Snapshot copy '
'when SkipFinalSnapshot is False'
)
elif cluster_skip_final_snapshot is False and cluster_snapshot_identifer is not None: # create snapshot
cluster = self.describe_clusters(cluster_identifier)[0]
self.create_cluster_snapshot(
cluster_identifier,
cluster_snapshot_identifer,
cluster.region,
cluster.tags)
return self.clusters.pop(cluster_identifier) return self.clusters.pop(cluster_identifier)
raise ClusterNotFoundError(cluster_identifier) raise ClusterNotFoundError(cluster_identifier)
@ -617,9 +640,12 @@ class RedshiftBackend(BaseBackend):
def describe_cluster_snapshots(self, cluster_identifier=None, snapshot_identifier=None): def describe_cluster_snapshots(self, cluster_identifier=None, snapshot_identifier=None):
if cluster_identifier: if cluster_identifier:
cluster_snapshots = []
for snapshot in self.snapshots.values(): for snapshot in self.snapshots.values():
if snapshot.cluster.cluster_identifier == cluster_identifier: if snapshot.cluster.cluster_identifier == cluster_identifier:
return [snapshot] cluster_snapshots.append(snapshot)
if cluster_snapshots:
return cluster_snapshots
raise ClusterNotFoundError(cluster_identifier) raise ClusterNotFoundError(cluster_identifier)
if snapshot_identifier: if snapshot_identifier:

View File

@ -240,8 +240,13 @@ class RedshiftResponse(BaseResponse):
}) })
def delete_cluster(self): def delete_cluster(self):
cluster_identifier = self._get_param("ClusterIdentifier") request_kwargs = {
cluster = self.redshift_backend.delete_cluster(cluster_identifier) "cluster_identifier": self._get_param("ClusterIdentifier"),
"final_cluster_snapshot_identifier": self._get_param("FinalClusterSnapshotIdentifier"),
"skip_final_snapshot": self._get_bool_param("SkipFinalClusterSnapshot")
}
cluster = self.redshift_backend.delete_cluster(**request_kwargs)
return self.get_response({ return self.get_response({
"DeleteClusterResponse": { "DeleteClusterResponse": {

View File

@ -0,0 +1,6 @@
from __future__ import unicode_literals
from .models import resourcegroups_backends
from ..core.models import base_decorator
resourcegroups_backend = resourcegroups_backends['us-east-1']
mock_resourcegroups = base_decorator(resourcegroups_backends)

View File

@ -0,0 +1,13 @@
from __future__ import unicode_literals
import json
from werkzeug.exceptions import HTTPException
class BadRequestException(HTTPException):
code = 400
def __init__(self, message, **kwargs):
super(BadRequestException, self).__init__(
description=json.dumps({"Message": message, "Code": "BadRequestException"}), **kwargs
)

View File

@ -0,0 +1,338 @@
from __future__ import unicode_literals
from builtins import str
import boto3
import json
import re
from moto.core import BaseBackend, BaseModel
from .exceptions import BadRequestException
class FakeResourceGroup(BaseModel):
def __init__(self, name, resource_query, description=None, tags=None):
self.errors = []
description = description or ""
tags = tags or {}
if self._validate_description(value=description):
self._description = description
if self._validate_name(value=name):
self._name = name
if self._validate_resource_query(value=resource_query):
self._resource_query = resource_query
if self._validate_tags(value=tags):
self._tags = tags
self._raise_errors()
self.arn = "arn:aws:resource-groups:us-west-1:123456789012:{name}".format(name=name)
@staticmethod
def _format_error(key, value, constraint):
return "Value '{value}' at '{key}' failed to satisfy constraint: {constraint}".format(
constraint=constraint,
key=key,
value=value,
)
def _raise_errors(self):
if self.errors:
errors_len = len(self.errors)
plural = "s" if len(self.errors) > 1 else ""
errors = "; ".join(self.errors)
raise BadRequestException("{errors_len} validation error{plural} detected: {errors}".format(
errors_len=errors_len, plural=plural, errors=errors,
))
def _validate_description(self, value):
errors = []
if len(value) > 511:
errors.append(self._format_error(
key="description",
value=value,
constraint="Member must have length less than or equal to 512",
))
if not re.match(r"^[\sa-zA-Z0-9_.-]*$", value):
errors.append(self._format_error(
key="name",
value=value,
constraint=r"Member must satisfy regular expression pattern: [\sa-zA-Z0-9_\.-]*",
))
if errors:
self.errors += errors
return False
return True
def _validate_name(self, value):
errors = []
if len(value) > 128:
errors.append(self._format_error(
key="name",
value=value,
constraint="Member must have length less than or equal to 128",
))
# Note \ is a character to match not an escape.
if not re.match(r"^[a-zA-Z0-9_\\.-]+$", value):
errors.append(self._format_error(
key="name",
value=value,
constraint=r"Member must satisfy regular expression pattern: [a-zA-Z0-9_\.-]+",
))
if errors:
self.errors += errors
return False
return True
def _validate_resource_query(self, value):
errors = []
if value["Type"] not in {"CLOUDFORMATION_STACK_1_0", "TAG_FILTERS_1_0"}:
errors.append(self._format_error(
key="resourceQuery.type",
value=value,
constraint="Member must satisfy enum value set: [CLOUDFORMATION_STACK_1_0, TAG_FILTERS_1_0]",
))
if len(value["Query"]) > 2048:
errors.append(self._format_error(
key="resourceQuery.query",
value=value,
constraint="Member must have length less than or equal to 2048",
))
if errors:
self.errors += errors
return False
return True
def _validate_tags(self, value):
errors = []
# AWS only outputs one error for all keys and one for all values.
error_keys = None
error_values = None
regex = re.compile(r"^([\\p{L}\\p{Z}\\p{N}_.:/=+\-@]*)$")
for tag_key, tag_value in value.items():
# Validation for len(tag_key) >= 1 is done by botocore.
if len(tag_key) > 128 or re.match(regex, tag_key):
error_keys = self._format_error(
key="tags",
value=value,
constraint=(
"Map value must satisfy constraint: ["
"Member must have length less than or equal to 128, "
"Member must have length greater than or equal to 1, "
r"Member must satisfy regular expression pattern: ^([\p{L}\p{Z}\p{N}_.:/=+\-@]*)$"
"]"
),
)
# Validation for len(tag_value) >= 0 is nonsensical.
if len(tag_value) > 256 or re.match(regex, tag_key):
error_values = self._format_error(
key="tags",
value=value,
constraint=(
"Map value must satisfy constraint: ["
"Member must have length less than or equal to 256, "
"Member must have length greater than or equal to 0, "
r"Member must satisfy regular expression pattern: ^([\p{L}\p{Z}\p{N}_.:/=+\-@]*)$"
"]"
),
)
if error_keys:
errors.append(error_keys)
if error_values:
errors.append(error_values)
if errors:
self.errors += errors
return False
return True
@property
def description(self):
return self._description
@description.setter
def description(self, value):
if not self._validate_description(value=value):
self._raise_errors()
self._description = value
@property
def name(self):
return self._name
@name.setter
def name(self, value):
if not self._validate_name(value=value):
self._raise_errors()
self._name = value
@property
def resource_query(self):
return self._resource_query
@resource_query.setter
def resource_query(self, value):
if not self._validate_resource_query(value=value):
self._raise_errors()
self._resource_query = value
@property
def tags(self):
return self._tags
@tags.setter
def tags(self, value):
if not self._validate_tags(value=value):
self._raise_errors()
self._tags = value
class ResourceGroups():
def __init__(self):
self.by_name = {}
self.by_arn = {}
def __contains__(self, item):
return item in self.by_name
def append(self, resource_group):
self.by_name[resource_group.name] = resource_group
self.by_arn[resource_group.arn] = resource_group
def delete(self, name):
group = self.by_name[name]
del self.by_name[name]
del self.by_arn[group.arn]
return group
class ResourceGroupsBackend(BaseBackend):
def __init__(self, region_name=None):
super(ResourceGroupsBackend, self).__init__()
self.region_name = region_name
self.groups = ResourceGroups()
@staticmethod
def _validate_resource_query(resource_query):
type = resource_query["Type"]
query = json.loads(resource_query["Query"])
query_keys = set(query.keys())
invalid_json_exception = BadRequestException("Invalid query: Invalid query format: check JSON syntax")
if not isinstance(query["ResourceTypeFilters"], list):
raise invalid_json_exception
if type == "CLOUDFORMATION_STACK_1_0":
if query_keys != {"ResourceTypeFilters", "StackIdentifier"}:
raise invalid_json_exception
stack_identifier = query["StackIdentifier"]
if not isinstance(stack_identifier, str):
raise invalid_json_exception
if not re.match(
r"^arn:aws:cloudformation:[a-z]{2}-[a-z]+-[0-9]+:[0-9]+:stack/[-0-9A-z]+/[-0-9a-f]+$",
stack_identifier,
):
raise BadRequestException(
"Invalid query: Verify that the specified ARN is formatted correctly."
)
# Once checking other resources is implemented.
# if stack_identifier not in self.cloudformation_backend.stacks:
# raise BadRequestException("Invalid query: The specified CloudFormation stack doesn't exist.")
if type == "TAG_FILTERS_1_0":
if query_keys != {"ResourceTypeFilters", "TagFilters"}:
raise invalid_json_exception
tag_filters = query["TagFilters"]
if not isinstance(tag_filters, list):
raise invalid_json_exception
if not tag_filters or len(tag_filters) > 50:
raise BadRequestException(
"Invalid query: The TagFilters list must contain between 1 and 50 elements"
)
for tag_filter in tag_filters:
if not isinstance(tag_filter, dict):
raise invalid_json_exception
if set(tag_filter.keys()) != {"Key", "Values"}:
raise invalid_json_exception
key = tag_filter["Key"]
if not isinstance(key, str):
raise invalid_json_exception
if not key:
raise BadRequestException(
"Invalid query: The TagFilter element cannot have empty or null Key field"
)
if len(key) > 128:
raise BadRequestException("Invalid query: The maximum length for a tag Key is 128")
values = tag_filter["Values"]
if not isinstance(values, list):
raise invalid_json_exception
if len(values) > 20:
raise BadRequestException(
"Invalid query: The TagFilter Values list must contain between 0 and 20 elements"
)
for value in values:
if not isinstance(value, str):
raise invalid_json_exception
if len(value) > 256:
raise BadRequestException(
"Invalid query: The maximum length for a tag Value is 256"
)
@staticmethod
def _validate_tags(tags):
for tag in tags:
if tag.lower().startswith('aws:'):
raise BadRequestException("Tag keys must not start with 'aws:'")
def create_group(self, name, resource_query, description=None, tags=None):
tags = tags or {}
group = FakeResourceGroup(
name=name,
resource_query=resource_query,
description=description,
tags=tags,
)
if name in self.groups:
raise BadRequestException("Cannot create group: group already exists")
if name.upper().startswith("AWS"):
raise BadRequestException("Group name must not start with 'AWS'")
self._validate_tags(tags)
self._validate_resource_query(resource_query)
self.groups.append(group)
return group
def delete_group(self, group_name):
return self.groups.delete(name=group_name)
def get_group(self, group_name):
return self.groups.by_name[group_name]
def get_tags(self, arn):
return self.groups.by_arn[arn].tags
# def list_group_resources(self):
# ...
def list_groups(self, filters=None, max_results=None, next_token=None):
return self.groups.by_name
# def search_resources(self):
# ...
def tag(self, arn, tags):
all_tags = self.groups.by_arn[arn].tags
all_tags.update(tags)
self._validate_tags(all_tags)
self.groups.by_arn[arn].tags = all_tags
def untag(self, arn, keys):
group = self.groups.by_arn[arn]
for key in keys:
del group.tags[key]
def update_group(self, group_name, description=None):
if description:
self.groups.by_name[group_name].description = description
return self.groups.by_name[group_name]
def update_group_query(self, group_name, resource_query):
self._validate_resource_query(resource_query)
self.groups.by_name[group_name].resource_query = resource_query
return self.groups.by_name[group_name]
available_regions = boto3.session.Session().get_available_regions("resource-groups")
resourcegroups_backends = {region: ResourceGroupsBackend(region_name=region) for region in available_regions}

View File

@ -0,0 +1,162 @@
from __future__ import unicode_literals
import json
try:
from urllib import unquote
except ImportError:
from urllib.parse import unquote
from moto.core.responses import BaseResponse
from .models import resourcegroups_backends
class ResourceGroupsResponse(BaseResponse):
SERVICE_NAME = 'resource-groups'
@property
def resourcegroups_backend(self):
return resourcegroups_backends[self.region]
def create_group(self):
name = self._get_param("Name")
description = self._get_param("Description")
resource_query = self._get_param("ResourceQuery")
tags = self._get_param("Tags")
group = self.resourcegroups_backend.create_group(
name=name,
description=description,
resource_query=resource_query,
tags=tags,
)
return json.dumps({
"Group": {
"GroupArn": group.arn,
"Name": group.name,
"Description": group.description
},
"ResourceQuery": group.resource_query,
"Tags": group.tags
})
def delete_group(self):
group_name = self._get_param("GroupName")
group = self.resourcegroups_backend.delete_group(group_name=group_name)
return json.dumps({
"Group": {
"GroupArn": group.arn,
"Name": group.name,
"Description": group.description
},
})
def get_group(self):
group_name = self._get_param("GroupName")
group = self.resourcegroups_backend.get_group(group_name=group_name)
return json.dumps({
"Group": {
"GroupArn": group.arn,
"Name": group.name,
"Description": group.description,
}
})
def get_group_query(self):
group_name = self._get_param("GroupName")
group = self.resourcegroups_backend.get_group(group_name=group_name)
return json.dumps({
"GroupQuery": {
"GroupName": group.name,
"ResourceQuery": group.resource_query,
}
})
def get_tags(self):
arn = unquote(self._get_param("Arn"))
return json.dumps({
"Arn": arn,
"Tags": self.resourcegroups_backend.get_tags(arn=arn)
})
def list_group_resources(self):
raise NotImplementedError('ResourceGroups.list_group_resources is not yet implemented')
def list_groups(self):
filters = self._get_param("Filters")
if filters:
raise NotImplementedError(
'ResourceGroups.list_groups with filter parameter is not yet implemented'
)
max_results = self._get_int_param("MaxResults", 50)
next_token = self._get_param("NextToken")
groups = self.resourcegroups_backend.list_groups(
filters=filters,
max_results=max_results,
next_token=next_token
)
return json.dumps({
"GroupIdentifiers": [{
"GroupName": group.name,
"GroupArn": group.arn,
} for group in groups.values()],
"Groups": [{
"GroupArn": group.arn,
"Name": group.name,
"Description": group.description,
} for group in groups.values()],
"NextToken": next_token,
})
def search_resources(self):
raise NotImplementedError('ResourceGroups.search_resources is not yet implemented')
def tag(self):
arn = unquote(self._get_param("Arn"))
tags = self._get_param("Tags")
if arn not in self.resourcegroups_backend.groups.by_arn:
raise NotImplementedError(
'ResourceGroups.tag with non-resource-group Arn parameter is not yet implemented'
)
self.resourcegroups_backend.tag(arn=arn, tags=tags)
return json.dumps({
"Arn": arn,
"Tags": tags
})
def untag(self):
arn = unquote(self._get_param("Arn"))
keys = self._get_param("Keys")
if arn not in self.resourcegroups_backend.groups.by_arn:
raise NotImplementedError(
'ResourceGroups.untag with non-resource-group Arn parameter is not yet implemented'
)
self.resourcegroups_backend.untag(arn=arn, keys=keys)
return json.dumps({
"Arn": arn,
"Keys": keys
})
def update_group(self):
group_name = self._get_param("GroupName")
description = self._get_param("Description", "")
group = self.resourcegroups_backend.update_group(group_name=group_name, description=description)
return json.dumps({
"Group": {
"GroupArn": group.arn,
"Name": group.name,
"Description": group.description
},
})
def update_group_query(self):
group_name = self._get_param("GroupName")
resource_query = self._get_param("ResourceQuery")
group = self.resourcegroups_backend.update_group_query(
group_name=group_name,
resource_query=resource_query
)
return json.dumps({
"GroupQuery": {
"GroupName": group.name,
"ResourceQuery": resource_query
}
})

View File

@ -0,0 +1,14 @@
from __future__ import unicode_literals
from .responses import ResourceGroupsResponse
url_bases = [
"https?://resource-groups(-fips)?.(.+).amazonaws.com",
]
url_paths = {
'{0}/groups$': ResourceGroupsResponse.dispatch,
'{0}/groups/(?P<resource_group_name>[^/]+)$': ResourceGroupsResponse.dispatch,
'{0}/groups/(?P<resource_group_name>[^/]+)/query$': ResourceGroupsResponse.dispatch,
'{0}/groups-list$': ResourceGroupsResponse.dispatch,
'{0}/resources/(?P<resource_arn>[^/]+)/tags$': ResourceGroupsResponse.dispatch,
}

View File

@ -17,8 +17,11 @@ import six
from bisect import insort from bisect import insort
from moto.core import BaseBackend, BaseModel from moto.core import BaseBackend, BaseModel
from moto.core.utils import iso_8601_datetime_with_milliseconds, rfc_1123_datetime from moto.core.utils import iso_8601_datetime_with_milliseconds, rfc_1123_datetime
from .exceptions import BucketAlreadyExists, MissingBucket, InvalidBucketName, InvalidPart, \ from .exceptions import (
EntityTooSmall, MissingKey, InvalidNotificationDestination, MalformedXML, InvalidStorageClass, DuplicateTagKeys BucketAlreadyExists, MissingBucket, InvalidBucketName, InvalidPart, InvalidRequest,
EntityTooSmall, MissingKey, InvalidNotificationDestination, MalformedXML, InvalidStorageClass,
InvalidTargetBucketForLogging, DuplicateTagKeys, CrossLocationLoggingProhibitted
)
from .utils import clean_key_name, _VersionedKeyStore from .utils import clean_key_name, _VersionedKeyStore
MAX_BUCKET_NAME_LENGTH = 63 MAX_BUCKET_NAME_LENGTH = 63
@ -463,6 +466,7 @@ class FakeBucket(BaseModel):
self.cors = [] self.cors = []
self.logging = {} self.logging = {}
self.notification_configuration = None self.notification_configuration = None
self.accelerate_configuration = None
@property @property
def location(self): def location(self):
@ -557,7 +561,6 @@ class FakeBucket(BaseModel):
self.rules = [] self.rules = []
def set_cors(self, rules): def set_cors(self, rules):
from moto.s3.exceptions import InvalidRequest, MalformedXML
self.cors = [] self.cors = []
if len(rules) > 100: if len(rules) > 100:
@ -607,7 +610,6 @@ class FakeBucket(BaseModel):
self.logging = {} self.logging = {}
return return
from moto.s3.exceptions import InvalidTargetBucketForLogging, CrossLocationLoggingProhibitted
# Target bucket must exist in the same account (assuming all moto buckets are in the same account): # Target bucket must exist in the same account (assuming all moto buckets are in the same account):
if not bucket_backend.buckets.get(logging_config["TargetBucket"]): if not bucket_backend.buckets.get(logging_config["TargetBucket"]):
raise InvalidTargetBucketForLogging("The target bucket for logging does not exist.") raise InvalidTargetBucketForLogging("The target bucket for logging does not exist.")
@ -655,6 +657,13 @@ class FakeBucket(BaseModel):
if region != self.region_name: if region != self.region_name:
raise InvalidNotificationDestination() raise InvalidNotificationDestination()
def set_accelerate_configuration(self, accelerate_config):
if self.accelerate_configuration is None and accelerate_config == 'Suspended':
# Cannot "suspend" a not active acceleration. Leaves it undefined
return
self.accelerate_configuration = accelerate_config
def set_website_configuration(self, website_configuration): def set_website_configuration(self, website_configuration):
self.website_configuration = website_configuration self.website_configuration = website_configuration
@ -857,6 +866,15 @@ class S3Backend(BaseBackend):
bucket = self.get_bucket(bucket_name) bucket = self.get_bucket(bucket_name)
bucket.set_notification_configuration(notification_config) bucket.set_notification_configuration(notification_config)
def put_bucket_accelerate_configuration(self, bucket_name, accelerate_configuration):
if accelerate_configuration not in ['Enabled', 'Suspended']:
raise MalformedXML()
bucket = self.get_bucket(bucket_name)
if bucket.name.find('.') != -1:
raise InvalidRequest('PutBucketAccelerateConfiguration')
bucket.set_accelerate_configuration(accelerate_configuration)
def initiate_multipart(self, bucket_name, key_name, metadata): def initiate_multipart(self, bucket_name, key_name, metadata):
bucket = self.get_bucket(bucket_name) bucket = self.get_bucket(bucket_name)
new_multipart = FakeMultipart(key_name, metadata) new_multipart = FakeMultipart(key_name, metadata)
@ -894,12 +912,11 @@ class S3Backend(BaseBackend):
return multipart.set_part(part_id, value) return multipart.set_part(part_id, value)
def copy_part(self, dest_bucket_name, multipart_id, part_id, def copy_part(self, dest_bucket_name, multipart_id, part_id,
src_bucket_name, src_key_name, start_byte, end_byte): src_bucket_name, src_key_name, src_version_id, start_byte, end_byte):
src_key_name = clean_key_name(src_key_name)
src_bucket = self.get_bucket(src_bucket_name)
dest_bucket = self.get_bucket(dest_bucket_name) dest_bucket = self.get_bucket(dest_bucket_name)
multipart = dest_bucket.multiparts[multipart_id] multipart = dest_bucket.multiparts[multipart_id]
src_value = src_bucket.keys[src_key_name].value
src_value = self.get_key(src_bucket_name, src_key_name, version_id=src_version_id).value
if start_byte is not None: if start_byte is not None:
src_value = src_value[start_byte:end_byte + 1] src_value = src_value[start_byte:end_byte + 1]
return multipart.set_part(part_id, src_value) return multipart.set_part(part_id, src_value)

52
moto/s3/responses.py Executable file → Normal file
View File

@ -257,6 +257,13 @@ class ResponseObject(_TemplateEnvironmentMixin):
return 200, {}, "" return 200, {}, ""
template = self.response_template(S3_GET_BUCKET_NOTIFICATION_CONFIG) template = self.response_template(S3_GET_BUCKET_NOTIFICATION_CONFIG)
return template.render(bucket=bucket) return template.render(bucket=bucket)
elif "accelerate" in querystring:
bucket = self.backend.get_bucket(bucket_name)
if bucket.accelerate_configuration is None:
template = self.response_template(S3_BUCKET_ACCELERATE_NOT_SET)
return 200, {}, template.render()
template = self.response_template(S3_BUCKET_ACCELERATE)
return template.render(bucket=bucket)
elif 'versions' in querystring: elif 'versions' in querystring:
delimiter = querystring.get('delimiter', [None])[0] delimiter = querystring.get('delimiter', [None])[0]
@ -442,6 +449,15 @@ class ResponseObject(_TemplateEnvironmentMixin):
raise MalformedXML() raise MalformedXML()
except Exception as e: except Exception as e:
raise e raise e
elif "accelerate" in querystring:
try:
accelerate_status = self._accelerate_config_from_xml(body)
self.backend.put_bucket_accelerate_configuration(bucket_name, accelerate_status)
return ""
except KeyError:
raise MalformedXML()
except Exception as e:
raise e
else: else:
if body: if body:
@ -691,6 +707,8 @@ class ResponseObject(_TemplateEnvironmentMixin):
if 'x-amz-copy-source' in request.headers: if 'x-amz-copy-source' in request.headers:
src = unquote(request.headers.get("x-amz-copy-source")).lstrip("/") src = unquote(request.headers.get("x-amz-copy-source")).lstrip("/")
src_bucket, src_key = src.split("/", 1) src_bucket, src_key = src.split("/", 1)
src_key, src_version_id = src_key.split("?versionId=") if "?versionId=" in src_key else (src_key, None)
src_range = request.headers.get( src_range = request.headers.get(
'x-amz-copy-source-range', '').split("bytes=")[-1] 'x-amz-copy-source-range', '').split("bytes=")[-1]
@ -700,9 +718,13 @@ class ResponseObject(_TemplateEnvironmentMixin):
except ValueError: except ValueError:
start_byte, end_byte = None, None start_byte, end_byte = None, None
key = self.backend.copy_part( if self.backend.get_key(src_bucket, src_key, version_id=src_version_id):
bucket_name, upload_id, part_number, src_bucket, key = self.backend.copy_part(
src_key, start_byte, end_byte) bucket_name, upload_id, part_number, src_bucket,
src_key, src_version_id, start_byte, end_byte)
else:
return 404, response_headers, ""
template = self.response_template(S3_MULTIPART_UPLOAD_RESPONSE) template = self.response_template(S3_MULTIPART_UPLOAD_RESPONSE)
response = template.render(part=key) response = template.render(part=key)
else: else:
@ -741,8 +763,13 @@ class ResponseObject(_TemplateEnvironmentMixin):
lstrip("/").split("/", 1) lstrip("/").split("/", 1)
src_version_id = parse_qs(src_key_parsed.query).get( src_version_id = parse_qs(src_key_parsed.query).get(
'versionId', [None])[0] 'versionId', [None])[0]
self.backend.copy_key(src_bucket, src_key, bucket_name, key_name,
storage=storage_class, acl=acl, src_version_id=src_version_id) if self.backend.get_key(src_bucket, src_key, version_id=src_version_id):
self.backend.copy_key(src_bucket, src_key, bucket_name, key_name,
storage=storage_class, acl=acl, src_version_id=src_version_id)
else:
return 404, response_headers, ""
new_key = self.backend.get_key(bucket_name, key_name) new_key = self.backend.get_key(bucket_name, key_name)
mdirective = request.headers.get('x-amz-metadata-directive') mdirective = request.headers.get('x-amz-metadata-directive')
if mdirective is not None and mdirective == 'REPLACE': if mdirective is not None and mdirective == 'REPLACE':
@ -1034,6 +1061,11 @@ class ResponseObject(_TemplateEnvironmentMixin):
return parsed_xml["NotificationConfiguration"] return parsed_xml["NotificationConfiguration"]
def _accelerate_config_from_xml(self, xml):
parsed_xml = xmltodict.parse(xml)
config = parsed_xml['AccelerateConfiguration']
return config['Status']
def _key_response_delete(self, bucket_name, query, key_name, headers): def _key_response_delete(self, bucket_name, query, key_name, headers):
if query.get('uploadId'): if query.get('uploadId'):
upload_id = query['uploadId'][0] upload_id = query['uploadId'][0]
@ -1686,3 +1718,13 @@ S3_GET_BUCKET_NOTIFICATION_CONFIG = """<?xml version="1.0" encoding="UTF-8"?>
{% endfor %} {% endfor %}
</NotificationConfiguration> </NotificationConfiguration>
""" """
S3_BUCKET_ACCELERATE = """
<AccelerateConfiguration xmlns="http://s3.amazonaws.com/doc/2006-03-01/">
<Status>{{ bucket.accelerate_configuration }}</Status>
</AccelerateConfiguration>
"""
S3_BUCKET_ACCELERATE_NOT_SET = """
<AccelerateConfiguration xmlns="http://s3.amazonaws.com/doc/2006-03-01/"/>
"""

View File

@ -29,6 +29,14 @@ class InvalidParameterException(SecretsManagerClientError):
message) message)
class ResourceExistsException(SecretsManagerClientError):
def __init__(self, message):
super(ResourceExistsException, self).__init__(
'ResourceExistsException',
message
)
class InvalidRequestException(SecretsManagerClientError): class InvalidRequestException(SecretsManagerClientError):
def __init__(self, message): def __init__(self, message):
super(InvalidRequestException, self).__init__( super(InvalidRequestException, self).__init__(

View File

@ -11,6 +11,7 @@ from moto.core import BaseBackend, BaseModel
from .exceptions import ( from .exceptions import (
ResourceNotFoundException, ResourceNotFoundException,
InvalidParameterException, InvalidParameterException,
ResourceExistsException,
InvalidRequestException, InvalidRequestException,
ClientError ClientError
) )
@ -47,6 +48,17 @@ class SecretsManagerBackend(BaseBackend):
if not self._is_valid_identifier(secret_id): if not self._is_valid_identifier(secret_id):
raise ResourceNotFoundException() raise ResourceNotFoundException()
if not version_id and version_stage:
# set version_id to match version_stage
versions_dict = self.secrets[secret_id]['versions']
for ver_id, ver_val in versions_dict.items():
if version_stage in ver_val['version_stages']:
version_id = ver_id
break
if not version_id:
raise ResourceNotFoundException()
# TODO check this part
if 'deleted_date' in self.secrets[secret_id]: if 'deleted_date' in self.secrets[secret_id]:
raise InvalidRequestException( raise InvalidRequestException(
"An error occurred (InvalidRequestException) when calling the GetSecretValue operation: You tried to \ "An error occurred (InvalidRequestException) when calling the GetSecretValue operation: You tried to \
@ -54,42 +66,91 @@ class SecretsManagerBackend(BaseBackend):
) )
secret = self.secrets[secret_id] secret = self.secrets[secret_id]
version_id = version_id or secret['default_version_id']
secret_version = secret['versions'][version_id]
response = json.dumps({ response = json.dumps({
"ARN": secret_arn(self.region, secret['secret_id']), "ARN": secret_arn(self.region, secret['secret_id']),
"Name": secret['name'], "Name": secret['name'],
"VersionId": secret['version_id'], "VersionId": secret_version['version_id'],
"SecretString": secret['secret_string'], "SecretString": secret_version['secret_string'],
"VersionStages": [ "VersionStages": secret_version['version_stages'],
"AWSCURRENT", "CreatedDate": secret_version['createdate'],
],
"CreatedDate": secret['createdate']
}) })
return response return response
def create_secret(self, name, secret_string, tags, **kwargs): def create_secret(self, name, secret_string, tags, **kwargs):
generated_version_id = str(uuid.uuid4()) # error if secret exists
if name in self.secrets.keys():
raise ResourceExistsException('A resource with the ID you requested already exists.')
secret = { version_id = self._add_secret(name, secret_string, tags=tags)
'secret_string': secret_string,
'secret_id': name,
'name': name,
'createdate': int(time.time()),
'rotation_enabled': False,
'rotation_lambda_arn': '',
'auto_rotate_after_days': 0,
'version_id': generated_version_id,
'tags': tags
}
self.secrets[name] = secret
response = json.dumps({ response = json.dumps({
"ARN": secret_arn(self.region, name), "ARN": secret_arn(self.region, name),
"Name": name, "Name": name,
"VersionId": generated_version_id, "VersionId": version_id,
})
return response
def _add_secret(self, secret_id, secret_string, tags=[], version_id=None, version_stages=None):
if version_stages is None:
version_stages = ['AWSCURRENT']
if not version_id:
version_id = str(uuid.uuid4())
secret_version = {
'secret_string': secret_string,
'createdate': int(time.time()),
'version_id': version_id,
'version_stages': version_stages,
}
if secret_id in self.secrets:
# remove all old AWSPREVIOUS stages
for secret_verion_to_look_at in self.secrets[secret_id]['versions'].values():
if 'AWSPREVIOUS' in secret_verion_to_look_at['version_stages']:
secret_verion_to_look_at['version_stages'].remove('AWSPREVIOUS')
# set old AWSCURRENT secret to AWSPREVIOUS
previous_current_version_id = self.secrets[secret_id]['default_version_id']
self.secrets[secret_id]['versions'][previous_current_version_id]['version_stages'] = ['AWSPREVIOUS']
self.secrets[secret_id]['versions'][version_id] = secret_version
self.secrets[secret_id]['default_version_id'] = version_id
else:
self.secrets[secret_id] = {
'versions': {
version_id: secret_version
},
'default_version_id': version_id,
}
secret = self.secrets[secret_id]
secret['secret_id'] = secret_id
secret['name'] = secret_id
secret['rotation_enabled'] = False
secret['rotation_lambda_arn'] = ''
secret['auto_rotate_after_days'] = 0
secret['tags'] = tags
return version_id
def put_secret_value(self, secret_id, secret_string, version_stages):
version_id = self._add_secret(secret_id, secret_string, version_stages=version_stages)
response = json.dumps({
'ARN': secret_arn(self.region, secret_id),
'Name': secret_id,
'VersionId': version_id,
'VersionStages': version_stages
}) })
return response return response
@ -162,17 +223,24 @@ class SecretsManagerBackend(BaseBackend):
secret = self.secrets[secret_id] secret = self.secrets[secret_id]
secret['version_id'] = client_request_token or '' old_secret_version = secret['versions'][secret['default_version_id']]
new_version_id = client_request_token or str(uuid.uuid4())
self._add_secret(secret_id, old_secret_version['secret_string'], secret['tags'], version_id=new_version_id, version_stages=['AWSCURRENT'])
secret['rotation_lambda_arn'] = rotation_lambda_arn or '' secret['rotation_lambda_arn'] = rotation_lambda_arn or ''
if rotation_rules: if rotation_rules:
secret['auto_rotate_after_days'] = rotation_rules.get(rotation_days, 0) secret['auto_rotate_after_days'] = rotation_rules.get(rotation_days, 0)
if secret['auto_rotate_after_days'] > 0: if secret['auto_rotate_after_days'] > 0:
secret['rotation_enabled'] = True secret['rotation_enabled'] = True
if 'AWSCURRENT' in old_secret_version['version_stages']:
old_secret_version['version_stages'].remove('AWSCURRENT')
response = json.dumps({ response = json.dumps({
"ARN": secret_arn(self.region, secret['secret_id']), "ARN": secret_arn(self.region, secret['secret_id']),
"Name": secret['name'], "Name": secret['name'],
"VersionId": secret['version_id'] "VersionId": new_version_id
}) })
return response return response
@ -206,28 +274,54 @@ class SecretsManagerBackend(BaseBackend):
return response return response
def list_secret_version_ids(self, secret_id):
secret = self.secrets[secret_id]
version_list = []
for version_id, version in secret['versions'].items():
version_list.append({
'CreatedDate': int(time.time()),
'LastAccessedDate': int(time.time()),
'VersionId': version_id,
'VersionStages': version['version_stages'],
})
response = json.dumps({
'ARN': secret['secret_id'],
'Name': secret['name'],
'NextToken': '',
'Versions': version_list,
})
return response
def list_secrets(self, max_results, next_token): def list_secrets(self, max_results, next_token):
# TODO implement pagination and limits # TODO implement pagination and limits
secret_list = [{ secret_list = []
"ARN": secret_arn(self.region, secret['secret_id']), for secret in self.secrets.values():
"DeletedDate": secret.get('deleted_date', None),
"Description": "", versions_to_stages = {}
"KmsKeyId": "", for version_id, version in secret['versions'].items():
"LastAccessedDate": None, versions_to_stages[version_id] = version['version_stages']
"LastChangedDate": None,
"LastRotatedDate": None, secret_list.append({
"Name": secret['name'], "ARN": secret_arn(self.region, secret['secret_id']),
"RotationEnabled": secret['rotation_enabled'], "DeletedDate": secret.get('deleted_date', None),
"RotationLambdaARN": secret['rotation_lambda_arn'], "Description": "",
"RotationRules": { "KmsKeyId": "",
"AutomaticallyAfterDays": secret['auto_rotate_after_days'] "LastAccessedDate": None,
}, "LastChangedDate": None,
"SecretVersionsToStages": { "LastRotatedDate": None,
secret['version_id']: ["AWSCURRENT"] "Name": secret['name'],
}, "RotationEnabled": secret['rotation_enabled'],
"Tags": secret['tags'] "RotationLambdaARN": secret['rotation_lambda_arn'],
} for secret in self.secrets.values()] "RotationRules": {
"AutomaticallyAfterDays": secret['auto_rotate_after_days']
},
"SecretVersionsToStages": versions_to_stages,
"Tags": secret['tags']
})
return secret_list, None return secret_list, None

View File

@ -67,6 +67,22 @@ class SecretsManagerResponse(BaseResponse):
rotation_rules=rotation_rules rotation_rules=rotation_rules
) )
def put_secret_value(self):
secret_id = self._get_param('SecretId', if_none='')
secret_string = self._get_param('SecretString', if_none='')
version_stages = self._get_param('VersionStages', if_none=['AWSCURRENT'])
return secretsmanager_backends[self.region].put_secret_value(
secret_id=secret_id,
secret_string=secret_string,
version_stages=version_stages,
)
def list_secret_version_ids(self):
secret_id = self._get_param('SecretId', if_none='')
return secretsmanager_backends[self.region].list_secret_version_ids(
secret_id=secret_id
)
def list_secrets(self): def list_secrets(self):
max_results = self._get_int_param("MaxResults") max_results = self._get_int_param("MaxResults")
next_token = self._get_param("NextToken") next_token = self._get_param("NextToken")

View File

@ -255,7 +255,7 @@ class SNSBackend(BaseBackend):
return candidate_topic return candidate_topic
def _get_values_nexttoken(self, values_map, next_token=None): def _get_values_nexttoken(self, values_map, next_token=None):
if next_token is None: if next_token is None or not next_token:
next_token = 0 next_token = 0
next_token = int(next_token) next_token = int(next_token)
values = list(values_map.values())[ values = list(values_map.values())[

View File

@ -420,7 +420,7 @@ CREATE_QUEUE_RESPONSE = """<CreateQueueResponse>
<VisibilityTimeout>{{ queue.visibility_timeout }}</VisibilityTimeout> <VisibilityTimeout>{{ queue.visibility_timeout }}</VisibilityTimeout>
</CreateQueueResult> </CreateQueueResult>
<ResponseMetadata> <ResponseMetadata>
<RequestId>{{ requestid }}</RequestId> <RequestId></RequestId>
</ResponseMetadata> </ResponseMetadata>
</CreateQueueResponse>""" </CreateQueueResponse>"""
@ -429,7 +429,7 @@ GET_QUEUE_URL_RESPONSE = """<GetQueueUrlResponse>
<QueueUrl>{{ queue.url(request_url) }}</QueueUrl> <QueueUrl>{{ queue.url(request_url) }}</QueueUrl>
</GetQueueUrlResult> </GetQueueUrlResult>
<ResponseMetadata> <ResponseMetadata>
<RequestId>{{ requestid }}</RequestId> <RequestId></RequestId>
</ResponseMetadata> </ResponseMetadata>
</GetQueueUrlResponse>""" </GetQueueUrlResponse>"""
@ -440,13 +440,13 @@ LIST_QUEUES_RESPONSE = """<ListQueuesResponse>
{% endfor %} {% endfor %}
</ListQueuesResult> </ListQueuesResult>
<ResponseMetadata> <ResponseMetadata>
<RequestId>{{ requestid }}</RequestId> <RequestId></RequestId>
</ResponseMetadata> </ResponseMetadata>
</ListQueuesResponse>""" </ListQueuesResponse>"""
DELETE_QUEUE_RESPONSE = """<DeleteQueueResponse> DELETE_QUEUE_RESPONSE = """<DeleteQueueResponse>
<ResponseMetadata> <ResponseMetadata>
<RequestId>{{ requestid }}</RequestId> <RequestId></RequestId>
</ResponseMetadata> </ResponseMetadata>
</DeleteQueueResponse>""" </DeleteQueueResponse>"""
@ -460,13 +460,13 @@ GET_QUEUE_ATTRIBUTES_RESPONSE = """<GetQueueAttributesResponse>
{% endfor %} {% endfor %}
</GetQueueAttributesResult> </GetQueueAttributesResult>
<ResponseMetadata> <ResponseMetadata>
<RequestId>{{ requestid }}</RequestId> <RequestId></RequestId>
</ResponseMetadata> </ResponseMetadata>
</GetQueueAttributesResponse>""" </GetQueueAttributesResponse>"""
SET_QUEUE_ATTRIBUTE_RESPONSE = """<SetQueueAttributesResponse> SET_QUEUE_ATTRIBUTE_RESPONSE = """<SetQueueAttributesResponse>
<ResponseMetadata> <ResponseMetadata>
<RequestId>{{ requestid }}</RequestId> <RequestId></RequestId>
</ResponseMetadata> </ResponseMetadata>
</SetQueueAttributesResponse>""" </SetQueueAttributesResponse>"""
@ -483,7 +483,7 @@ SEND_MESSAGE_RESPONSE = """<SendMessageResponse>
</MessageId> </MessageId>
</SendMessageResult> </SendMessageResult>
<ResponseMetadata> <ResponseMetadata>
<RequestId>{{ requestid }}</RequestId> <RequestId></RequestId>
</ResponseMetadata> </ResponseMetadata>
</SendMessageResponse>""" </SendMessageResponse>"""
@ -543,7 +543,7 @@ RECEIVE_MESSAGE_RESPONSE = """<ReceiveMessageResponse>
{% endfor %} {% endfor %}
</ReceiveMessageResult> </ReceiveMessageResult>
<ResponseMetadata> <ResponseMetadata>
<RequestId>{{ requestid }}</RequestId> <RequestId></RequestId>
</ResponseMetadata> </ResponseMetadata>
</ReceiveMessageResponse>""" </ReceiveMessageResponse>"""
@ -561,13 +561,13 @@ SEND_MESSAGE_BATCH_RESPONSE = """<SendMessageBatchResponse>
{% endfor %} {% endfor %}
</SendMessageBatchResult> </SendMessageBatchResult>
<ResponseMetadata> <ResponseMetadata>
<RequestId>{{ requestid }}</RequestId> <RequestId></RequestId>
</ResponseMetadata> </ResponseMetadata>
</SendMessageBatchResponse>""" </SendMessageBatchResponse>"""
DELETE_MESSAGE_RESPONSE = """<DeleteMessageResponse> DELETE_MESSAGE_RESPONSE = """<DeleteMessageResponse>
<ResponseMetadata> <ResponseMetadata>
<RequestId>{{ requestid }}</RequestId> <RequestId></RequestId>
</ResponseMetadata> </ResponseMetadata>
</DeleteMessageResponse>""" </DeleteMessageResponse>"""
@ -580,13 +580,13 @@ DELETE_MESSAGE_BATCH_RESPONSE = """<DeleteMessageBatchResponse>
{% endfor %} {% endfor %}
</DeleteMessageBatchResult> </DeleteMessageBatchResult>
<ResponseMetadata> <ResponseMetadata>
<RequestId>{{ requestid }}</RequestId> <RequestId></RequestId>
</ResponseMetadata> </ResponseMetadata>
</DeleteMessageBatchResponse>""" </DeleteMessageBatchResponse>"""
CHANGE_MESSAGE_VISIBILITY_RESPONSE = """<ChangeMessageVisibilityResponse> CHANGE_MESSAGE_VISIBILITY_RESPONSE = """<ChangeMessageVisibilityResponse>
<ResponseMetadata> <ResponseMetadata>
<RequestId>{{ requestid }}</RequestId> <RequestId></RequestId>
</ResponseMetadata> </ResponseMetadata>
</ChangeMessageVisibilityResponse>""" </ChangeMessageVisibilityResponse>"""
@ -613,7 +613,7 @@ CHANGE_MESSAGE_VISIBILITY_BATCH_RESPONSE = """<ChangeMessageVisibilityBatchRespo
PURGE_QUEUE_RESPONSE = """<PurgeQueueResponse> PURGE_QUEUE_RESPONSE = """<PurgeQueueResponse>
<ResponseMetadata> <ResponseMetadata>
<RequestId>{{ requestid }}</RequestId> <RequestId></RequestId>
</ResponseMetadata> </ResponseMetadata>
</PurgeQueueResponse>""" </PurgeQueueResponse>"""

View File

@ -28,7 +28,7 @@ install_requires = [
"xmltodict", "xmltodict",
"six>1.9", "six>1.9",
"werkzeug", "werkzeug",
"PyYAML", "PyYAML==3.13",
"pytz", "pytz",
"python-dateutil<3.0.0,>=2.1", "python-dateutil<3.0.0,>=2.1",
"python-jose<4.0.0", "python-jose<4.0.0",
@ -39,6 +39,7 @@ install_requires = [
"responses>=0.9.0", "responses>=0.9.0",
"idna<2.9,>=2.5", "idna<2.9,>=2.5",
"cfn-lint", "cfn-lint",
"sshpubkeys>=3.1.0,<4.0"
] ]
extras_require = { extras_require = {

View File

@ -32,7 +32,7 @@ def test_create_autoscaling_group():
group = AutoScalingGroup( group = AutoScalingGroup(
name='tester_group', name='tester_group',
availability_zones=['us-east-1c', 'us-east-1b'], availability_zones=['us-east-1a', 'us-east-1b'],
default_cooldown=60, default_cooldown=60,
desired_capacity=2, desired_capacity=2,
health_check_period=100, health_check_period=100,
@ -42,7 +42,10 @@ def test_create_autoscaling_group():
launch_config=config, launch_config=config,
load_balancers=["test_lb"], load_balancers=["test_lb"],
placement_group="test_placement", placement_group="test_placement",
vpc_zone_identifier=mocked_networking['subnet1'], vpc_zone_identifier="{subnet1},{subnet2}".format(
subnet1=mocked_networking['subnet1'],
subnet2=mocked_networking['subnet2'],
),
termination_policies=["OldestInstance", "NewestInstance"], termination_policies=["OldestInstance", "NewestInstance"],
tags=[Tag( tags=[Tag(
resource_id='tester_group', resource_id='tester_group',
@ -57,12 +60,15 @@ def test_create_autoscaling_group():
group = conn.get_all_groups()[0] group = conn.get_all_groups()[0]
group.name.should.equal('tester_group') group.name.should.equal('tester_group')
set(group.availability_zones).should.equal( set(group.availability_zones).should.equal(
set(['us-east-1c', 'us-east-1b'])) set(['us-east-1a', 'us-east-1b']))
group.desired_capacity.should.equal(2) group.desired_capacity.should.equal(2)
group.max_size.should.equal(2) group.max_size.should.equal(2)
group.min_size.should.equal(2) group.min_size.should.equal(2)
group.instances.should.have.length_of(2) group.instances.should.have.length_of(2)
group.vpc_zone_identifier.should.equal(mocked_networking['subnet1']) group.vpc_zone_identifier.should.equal("{subnet1},{subnet2}".format(
subnet1=mocked_networking['subnet1'],
subnet2=mocked_networking['subnet2'],
))
group.launch_config_name.should.equal('tester') group.launch_config_name.should.equal('tester')
group.default_cooldown.should.equal(60) group.default_cooldown.should.equal(60)
group.health_check_period.should.equal(100) group.health_check_period.should.equal(100)
@ -109,7 +115,7 @@ def test_create_autoscaling_groups_defaults():
group.launch_config_name.should.equal('tester') group.launch_config_name.should.equal('tester')
# Defaults # Defaults
list(group.availability_zones).should.equal([]) list(group.availability_zones).should.equal(['us-east-1a']) # subnet1
group.desired_capacity.should.equal(2) group.desired_capacity.should.equal(2)
group.vpc_zone_identifier.should.equal(mocked_networking['subnet1']) group.vpc_zone_identifier.should.equal(mocked_networking['subnet1'])
group.default_cooldown.should.equal(300) group.default_cooldown.should.equal(300)
@ -217,7 +223,6 @@ def test_autoscaling_update():
group = AutoScalingGroup( group = AutoScalingGroup(
name='tester_group', name='tester_group',
availability_zones=['us-east-1c', 'us-east-1b'],
desired_capacity=2, desired_capacity=2,
max_size=2, max_size=2,
min_size=2, min_size=2,
@ -227,13 +232,16 @@ def test_autoscaling_update():
conn.create_auto_scaling_group(group) conn.create_auto_scaling_group(group)
group = conn.get_all_groups()[0] group = conn.get_all_groups()[0]
group.availability_zones.should.equal(['us-east-1a'])
group.vpc_zone_identifier.should.equal(mocked_networking['subnet1']) group.vpc_zone_identifier.should.equal(mocked_networking['subnet1'])
group.vpc_zone_identifier = 'subnet-5678efgh' group.availability_zones = ['us-east-1b']
group.vpc_zone_identifier = mocked_networking['subnet2']
group.update() group.update()
group = conn.get_all_groups()[0] group = conn.get_all_groups()[0]
group.vpc_zone_identifier.should.equal('subnet-5678efgh') group.availability_zones.should.equal(['us-east-1b'])
group.vpc_zone_identifier.should.equal(mocked_networking['subnet2'])
@mock_autoscaling_deprecated @mock_autoscaling_deprecated
@ -249,7 +257,7 @@ def test_autoscaling_tags_update():
group = AutoScalingGroup( group = AutoScalingGroup(
name='tester_group', name='tester_group',
availability_zones=['us-east-1c', 'us-east-1b'], availability_zones=['us-east-1a'],
desired_capacity=2, desired_capacity=2,
max_size=2, max_size=2,
min_size=2, min_size=2,
@ -309,7 +317,7 @@ def test_autoscaling_group_delete():
@mock_autoscaling_deprecated @mock_autoscaling_deprecated
def test_autoscaling_group_describe_instances(): def test_autoscaling_group_describe_instances():
mocked_networking = setup_networking_deprecated() mocked_networking = setup_networking_deprecated()
conn = boto.connect_autoscale() conn = boto.ec2.autoscale.connect_to_region('us-east-1')
config = LaunchConfiguration( config = LaunchConfiguration(
name='tester', name='tester',
image_id='ami-abcd1234', image_id='ami-abcd1234',
@ -332,7 +340,7 @@ def test_autoscaling_group_describe_instances():
instances[0].health_status.should.equal('Healthy') instances[0].health_status.should.equal('Healthy')
autoscale_instance_ids = [instance.instance_id for instance in instances] autoscale_instance_ids = [instance.instance_id for instance in instances]
ec2_conn = boto.connect_ec2() ec2_conn = boto.ec2.connect_to_region('us-east-1')
reservations = ec2_conn.get_all_instances() reservations = ec2_conn.get_all_instances()
instances = reservations[0].instances instances = reservations[0].instances
instances.should.have.length_of(2) instances.should.have.length_of(2)
@ -355,7 +363,7 @@ def test_set_desired_capacity_up():
group = AutoScalingGroup( group = AutoScalingGroup(
name='tester_group', name='tester_group',
availability_zones=['us-east-1c', 'us-east-1b'], availability_zones=['us-east-1a'],
desired_capacity=2, desired_capacity=2,
max_size=2, max_size=2,
min_size=2, min_size=2,
@ -391,7 +399,7 @@ def test_set_desired_capacity_down():
group = AutoScalingGroup( group = AutoScalingGroup(
name='tester_group', name='tester_group',
availability_zones=['us-east-1c', 'us-east-1b'], availability_zones=['us-east-1a'],
desired_capacity=2, desired_capacity=2,
max_size=2, max_size=2,
min_size=2, min_size=2,
@ -427,7 +435,7 @@ def test_set_desired_capacity_the_same():
group = AutoScalingGroup( group = AutoScalingGroup(
name='tester_group', name='tester_group',
availability_zones=['us-east-1c', 'us-east-1b'], availability_zones=['us-east-1a'],
desired_capacity=2, desired_capacity=2,
max_size=2, max_size=2,
min_size=2, min_size=2,
@ -543,6 +551,7 @@ def test_describe_load_balancers():
) )
response = client.describe_load_balancers(AutoScalingGroupName='test_asg') response = client.describe_load_balancers(AutoScalingGroupName='test_asg')
assert response['ResponseMetadata']['RequestId']
list(response['LoadBalancers']).should.have.length_of(1) list(response['LoadBalancers']).should.have.length_of(1)
response['LoadBalancers'][0]['LoadBalancerName'].should.equal('my-lb') response['LoadBalancers'][0]['LoadBalancerName'].should.equal('my-lb')
@ -738,8 +747,12 @@ def test_describe_autoscaling_groups_boto3():
response['ResponseMetadata']['HTTPStatusCode'].should.equal(200) response['ResponseMetadata']['HTTPStatusCode'].should.equal(200)
group = response['AutoScalingGroups'][0] group = response['AutoScalingGroups'][0]
group['AutoScalingGroupName'].should.equal('test_asg') group['AutoScalingGroupName'].should.equal('test_asg')
group['AvailabilityZones'].should.equal(['us-east-1a'])
group['VPCZoneIdentifier'].should.equal(mocked_networking['subnet1'])
group['NewInstancesProtectedFromScaleIn'].should.equal(True) group['NewInstancesProtectedFromScaleIn'].should.equal(True)
group['Instances'][0]['ProtectedFromScaleIn'].should.equal(True) for instance in group['Instances']:
instance['AvailabilityZone'].should.equal('us-east-1a')
instance['ProtectedFromScaleIn'].should.equal(True)
@mock_autoscaling @mock_autoscaling
@ -770,6 +783,7 @@ def test_describe_autoscaling_instances_boto3():
response = client.describe_auto_scaling_instances(InstanceIds=instance_ids) response = client.describe_auto_scaling_instances(InstanceIds=instance_ids)
for instance in response['AutoScalingInstances']: for instance in response['AutoScalingInstances']:
instance['AutoScalingGroupName'].should.equal('test_asg') instance['AutoScalingGroupName'].should.equal('test_asg')
instance['AvailabilityZone'].should.equal('us-east-1a')
instance['ProtectedFromScaleIn'].should.equal(True) instance['ProtectedFromScaleIn'].should.equal(True)
@ -793,6 +807,10 @@ def test_update_autoscaling_group_boto3():
_ = client.update_auto_scaling_group( _ = client.update_auto_scaling_group(
AutoScalingGroupName='test_asg', AutoScalingGroupName='test_asg',
MinSize=1, MinSize=1,
VPCZoneIdentifier="{subnet1},{subnet2}".format(
subnet1=mocked_networking['subnet1'],
subnet2=mocked_networking['subnet2'],
),
NewInstancesProtectedFromScaleIn=False, NewInstancesProtectedFromScaleIn=False,
) )
@ -801,6 +819,7 @@ def test_update_autoscaling_group_boto3():
) )
group = response['AutoScalingGroups'][0] group = response['AutoScalingGroups'][0]
group['MinSize'].should.equal(1) group['MinSize'].should.equal(1)
set(group['AvailabilityZones']).should.equal({'us-east-1a', 'us-east-1b'})
group['NewInstancesProtectedFromScaleIn'].should.equal(False) group['NewInstancesProtectedFromScaleIn'].should.equal(False)

View File

@ -106,7 +106,7 @@ def test_detach_all_target_groups():
MaxSize=INSTANCE_COUNT, MaxSize=INSTANCE_COUNT,
DesiredCapacity=INSTANCE_COUNT, DesiredCapacity=INSTANCE_COUNT,
TargetGroupARNs=[target_group_arn], TargetGroupARNs=[target_group_arn],
VPCZoneIdentifier=mocked_networking['vpc']) VPCZoneIdentifier=mocked_networking['subnet1'])
response = client.describe_load_balancer_target_groups( response = client.describe_load_balancer_target_groups(
AutoScalingGroupName='test_asg') AutoScalingGroupName='test_asg')

View File

@ -1,5 +1,6 @@
import boto import boto
import boto3 import boto3
from boto import vpc as boto_vpc
from moto import mock_ec2, mock_ec2_deprecated from moto import mock_ec2, mock_ec2_deprecated
@ -19,9 +20,14 @@ def setup_networking():
@mock_ec2_deprecated @mock_ec2_deprecated
def setup_networking_deprecated(): def setup_networking_deprecated():
conn = boto.connect_vpc() conn = boto_vpc.connect_to_region('us-east-1')
vpc = conn.create_vpc("10.11.0.0/16") vpc = conn.create_vpc("10.11.0.0/16")
subnet1 = conn.create_subnet(vpc.id, "10.11.1.0/24") subnet1 = conn.create_subnet(
subnet2 = conn.create_subnet(vpc.id, "10.11.2.0/24") vpc.id,
"10.11.1.0/24",
availability_zone='us-east-1a')
subnet2 = conn.create_subnet(
vpc.id,
"10.11.2.0/24",
availability_zone='us-east-1b')
return {'vpc': vpc.id, 'subnet1': subnet1.id, 'subnet2': subnet2.id} return {'vpc': vpc.id, 'subnet1': subnet1.id, 'subnet2': subnet2.id}

View File

@ -282,7 +282,7 @@ def test_create_function_from_aws_bucket():
result.pop('LastModified') result.pop('LastModified')
result.should.equal({ result.should.equal({
'FunctionName': 'testFunction', 'FunctionName': 'testFunction',
'FunctionArn': 'arn:aws:lambda:{}:123456789012:function:testFunction:$LATEST'.format(_lambda_region), 'FunctionArn': 'arn:aws:lambda:{}:123456789012:function:testFunction'.format(_lambda_region),
'Runtime': 'python2.7', 'Runtime': 'python2.7',
'Role': 'test-iam-role', 'Role': 'test-iam-role',
'Handler': 'lambda_function.lambda_handler', 'Handler': 'lambda_function.lambda_handler',
@ -291,7 +291,7 @@ def test_create_function_from_aws_bucket():
'Description': 'test lambda function', 'Description': 'test lambda function',
'Timeout': 3, 'Timeout': 3,
'MemorySize': 128, 'MemorySize': 128,
'Version': '$LATEST', 'Version': '1',
'VpcConfig': { 'VpcConfig': {
"SecurityGroupIds": ["sg-123abc"], "SecurityGroupIds": ["sg-123abc"],
"SubnetIds": ["subnet-123abc"], "SubnetIds": ["subnet-123abc"],
@ -327,7 +327,7 @@ def test_create_function_from_zipfile():
result.should.equal({ result.should.equal({
'FunctionName': 'testFunction', 'FunctionName': 'testFunction',
'FunctionArn': 'arn:aws:lambda:{}:123456789012:function:testFunction:$LATEST'.format(_lambda_region), 'FunctionArn': 'arn:aws:lambda:{}:123456789012:function:testFunction'.format(_lambda_region),
'Runtime': 'python2.7', 'Runtime': 'python2.7',
'Role': 'test-iam-role', 'Role': 'test-iam-role',
'Handler': 'lambda_function.lambda_handler', 'Handler': 'lambda_function.lambda_handler',
@ -336,7 +336,7 @@ def test_create_function_from_zipfile():
'Timeout': 3, 'Timeout': 3,
'MemorySize': 128, 'MemorySize': 128,
'CodeSha256': hashlib.sha256(zip_content).hexdigest(), 'CodeSha256': hashlib.sha256(zip_content).hexdigest(),
'Version': '$LATEST', 'Version': '1',
'VpcConfig': { 'VpcConfig': {
"SecurityGroupIds": [], "SecurityGroupIds": [],
"SubnetIds": [], "SubnetIds": [],
@ -398,6 +398,8 @@ def test_get_function():
# Test get function with # Test get function with
result = conn.get_function(FunctionName='testFunction', Qualifier='$LATEST') result = conn.get_function(FunctionName='testFunction', Qualifier='$LATEST')
result['Configuration']['Version'].should.equal('$LATEST') result['Configuration']['Version'].should.equal('$LATEST')
result['Configuration']['FunctionArn'].should.equal('arn:aws:lambda:us-west-2:123456789012:function:testFunction:$LATEST')
# Test get function when can't find function name # Test get function when can't find function name
with assert_raises(ClientError): with assert_raises(ClientError):
@ -464,7 +466,7 @@ def test_publish():
Description='test lambda function', Description='test lambda function',
Timeout=3, Timeout=3,
MemorySize=128, MemorySize=128,
Publish=True, Publish=False,
) )
function_list = conn.list_functions() function_list = conn.list_functions()
@ -485,7 +487,7 @@ def test_publish():
function_list = conn.list_functions() function_list = conn.list_functions()
function_list['Functions'].should.have.length_of(1) function_list['Functions'].should.have.length_of(1)
function_list['Functions'][0]['FunctionArn'].should.contain('testFunction:$LATEST') function_list['Functions'][0]['FunctionArn'].should.contain('testFunction')
@mock_lambda @mock_lambda
@ -528,7 +530,7 @@ def test_list_create_list_get_delete_list():
"CodeSha256": hashlib.sha256(zip_content).hexdigest(), "CodeSha256": hashlib.sha256(zip_content).hexdigest(),
"CodeSize": len(zip_content), "CodeSize": len(zip_content),
"Description": "test lambda function", "Description": "test lambda function",
"FunctionArn": 'arn:aws:lambda:{}:123456789012:function:testFunction:$LATEST'.format(_lambda_region), "FunctionArn": 'arn:aws:lambda:{}:123456789012:function:testFunction'.format(_lambda_region),
"FunctionName": "testFunction", "FunctionName": "testFunction",
"Handler": "lambda_function.lambda_handler", "Handler": "lambda_function.lambda_handler",
"MemorySize": 128, "MemorySize": 128,
@ -741,7 +743,7 @@ def test_get_function_created_with_zipfile():
"CodeSha256": hashlib.sha256(zip_content).hexdigest(), "CodeSha256": hashlib.sha256(zip_content).hexdigest(),
"CodeSize": len(zip_content), "CodeSize": len(zip_content),
"Description": "test lambda function", "Description": "test lambda function",
"FunctionArn": 'arn:aws:lambda:{}:123456789012:function:testFunction:$LATEST'.format(_lambda_region), "FunctionArn": 'arn:aws:lambda:{}:123456789012:function:testFunction'.format(_lambda_region),
"FunctionName": "testFunction", "FunctionName": "testFunction",
"Handler": "lambda_function.handler", "Handler": "lambda_function.handler",
"MemorySize": 128, "MemorySize": 128,
@ -842,7 +844,7 @@ def test_list_versions_by_function():
conn.create_function( conn.create_function(
FunctionName='testFunction', FunctionName='testFunction',
Runtime='python2.7', Runtime='python2.7',
Role='test-iam-role', Role='arn:aws:iam::123456789012:role/test-iam-role',
Handler='lambda_function.lambda_handler', Handler='lambda_function.lambda_handler',
Code={ Code={
'S3Bucket': 'test-bucket', 'S3Bucket': 'test-bucket',
@ -857,8 +859,28 @@ def test_list_versions_by_function():
res = conn.publish_version(FunctionName='testFunction') res = conn.publish_version(FunctionName='testFunction')
assert res['ResponseMetadata']['HTTPStatusCode'] == 201 assert res['ResponseMetadata']['HTTPStatusCode'] == 201
versions = conn.list_versions_by_function(FunctionName='testFunction') versions = conn.list_versions_by_function(FunctionName='testFunction')
assert len(versions['Versions']) == 3
assert versions['Versions'][0]['FunctionArn'] == 'arn:aws:lambda:us-west-2:123456789012:function:testFunction:$LATEST' assert versions['Versions'][0]['FunctionArn'] == 'arn:aws:lambda:us-west-2:123456789012:function:testFunction:$LATEST'
assert versions['Versions'][1]['FunctionArn'] == 'arn:aws:lambda:us-west-2:123456789012:function:testFunction:1'
assert versions['Versions'][2]['FunctionArn'] == 'arn:aws:lambda:us-west-2:123456789012:function:testFunction:2'
conn.create_function(
FunctionName='testFunction_2',
Runtime='python2.7',
Role='arn:aws:iam::123456789012:role/test-iam-role',
Handler='lambda_function.lambda_handler',
Code={
'S3Bucket': 'test-bucket',
'S3Key': 'test.zip',
},
Description='test lambda function',
Timeout=3,
MemorySize=128,
Publish=False,
)
versions = conn.list_versions_by_function(FunctionName='testFunction_2')
assert len(versions['Versions']) == 1
assert versions['Versions'][0]['FunctionArn'] == 'arn:aws:lambda:us-west-2:123456789012:function:testFunction_2:$LATEST'
@mock_lambda @mock_lambda

View File

@ -2,6 +2,8 @@ from __future__ import unicode_literals
import json import json
import base64 import base64
from decimal import Decimal
import boto import boto
import boto.cloudformation import boto.cloudformation
import boto.datapipeline import boto.datapipeline
@ -22,6 +24,7 @@ from moto import (
mock_cloudformation, mock_cloudformation,
mock_cloudformation_deprecated, mock_cloudformation_deprecated,
mock_datapipeline_deprecated, mock_datapipeline_deprecated,
mock_dynamodb2,
mock_ec2, mock_ec2,
mock_ec2_deprecated, mock_ec2_deprecated,
mock_elb, mock_elb,
@ -39,6 +42,7 @@ from moto import (
mock_sqs, mock_sqs,
mock_sqs_deprecated, mock_sqs_deprecated,
mock_elbv2) mock_elbv2)
from moto.dynamodb2.models import Table
from .fixtures import ( from .fixtures import (
ec2_classic_eip, ec2_classic_eip,
@ -2085,7 +2089,7 @@ def test_stack_kms():
def test_stack_spot_fleet(): def test_stack_spot_fleet():
conn = boto3.client('ec2', 'us-east-1') conn = boto3.client('ec2', 'us-east-1')
vpc = conn.create_vpc(CidrBlock="10.0.0.0/8")['Vpc'] vpc = conn.create_vpc(CidrBlock="10.0.0.0/16")['Vpc']
subnet = conn.create_subnet( subnet = conn.create_subnet(
VpcId=vpc['VpcId'], CidrBlock='10.0.0.0/16', AvailabilityZone='us-east-1a')['Subnet'] VpcId=vpc['VpcId'], CidrBlock='10.0.0.0/16', AvailabilityZone='us-east-1a')['Subnet']
subnet_id = subnet['SubnetId'] subnet_id = subnet['SubnetId']
@ -2169,7 +2173,7 @@ def test_stack_spot_fleet():
def test_stack_spot_fleet_should_figure_out_default_price(): def test_stack_spot_fleet_should_figure_out_default_price():
conn = boto3.client('ec2', 'us-east-1') conn = boto3.client('ec2', 'us-east-1')
vpc = conn.create_vpc(CidrBlock="10.0.0.0/8")['Vpc'] vpc = conn.create_vpc(CidrBlock="10.0.0.0/16")['Vpc']
subnet = conn.create_subnet( subnet = conn.create_subnet(
VpcId=vpc['VpcId'], CidrBlock='10.0.0.0/16', AvailabilityZone='us-east-1a')['Subnet'] VpcId=vpc['VpcId'], CidrBlock='10.0.0.0/16', AvailabilityZone='us-east-1a')['Subnet']
subnet_id = subnet['SubnetId'] subnet_id = subnet['SubnetId']
@ -2433,3 +2437,131 @@ def test_stack_elbv2_resources_integration():
dns['OutputValue'].should.equal(load_balancers[0]['DNSName']) dns['OutputValue'].should.equal(load_balancers[0]['DNSName'])
name['OutputValue'].should.equal(load_balancers[0]['LoadBalancerName']) name['OutputValue'].should.equal(load_balancers[0]['LoadBalancerName'])
@mock_dynamodb2
@mock_cloudformation
def test_stack_dynamodb_resources_integration():
dynamodb_template = {
"AWSTemplateFormatVersion": "2010-09-09",
"Resources": {
"myDynamoDBTable": {
"Type": "AWS::DynamoDB::Table",
"Properties": {
"AttributeDefinitions": [
{
"AttributeName": "Album",
"AttributeType": "S"
},
{
"AttributeName": "Artist",
"AttributeType": "S"
},
{
"AttributeName": "Sales",
"AttributeType": "N"
},
{
"AttributeName": "NumberOfSongs",
"AttributeType": "N"
}
],
"KeySchema": [
{
"AttributeName": "Album",
"KeyType": "HASH"
},
{
"AttributeName": "Artist",
"KeyType": "RANGE"
}
],
"ProvisionedThroughput": {
"ReadCapacityUnits": "5",
"WriteCapacityUnits": "5"
},
"TableName": "myTableName",
"GlobalSecondaryIndexes": [{
"IndexName": "myGSI",
"KeySchema": [
{
"AttributeName": "Sales",
"KeyType": "HASH"
},
{
"AttributeName": "Artist",
"KeyType": "RANGE"
}
],
"Projection": {
"NonKeyAttributes": ["Album","NumberOfSongs"],
"ProjectionType": "INCLUDE"
},
"ProvisionedThroughput": {
"ReadCapacityUnits": "5",
"WriteCapacityUnits": "5"
}
},
{
"IndexName": "myGSI2",
"KeySchema": [
{
"AttributeName": "NumberOfSongs",
"KeyType": "HASH"
},
{
"AttributeName": "Sales",
"KeyType": "RANGE"
}
],
"Projection": {
"NonKeyAttributes": ["Album","Artist"],
"ProjectionType": "INCLUDE"
},
"ProvisionedThroughput": {
"ReadCapacityUnits": "5",
"WriteCapacityUnits": "5"
}
}],
"LocalSecondaryIndexes":[{
"IndexName": "myLSI",
"KeySchema": [
{
"AttributeName": "Album",
"KeyType": "HASH"
},
{
"AttributeName": "Sales",
"KeyType": "RANGE"
}
],
"Projection": {
"NonKeyAttributes": ["Artist","NumberOfSongs"],
"ProjectionType": "INCLUDE"
}
}]
}
}
}
}
dynamodb_template_json = json.dumps(dynamodb_template)
cfn_conn = boto3.client('cloudformation', 'us-east-1')
cfn_conn.create_stack(
StackName='dynamodb_stack',
TemplateBody=dynamodb_template_json,
)
dynamodb_conn = boto3.resource('dynamodb', region_name='us-east-1')
table = dynamodb_conn.Table('myTableName')
table.name.should.equal('myTableName')
table.put_item(Item={"Album": "myAlbum", "Artist": "myArtist", "Sales": 10, "NumberOfSongs": 5})
response = table.get_item(Key={"Album": "myAlbum", "Artist": "myArtist"})
response['Item']['Album'].should.equal('myAlbum')
response['Item']['Sales'].should.equal(Decimal('10'))
response['Item']['NumberOfSongs'].should.equal(Decimal('5'))
response['Item']['Album'].should.equal('myAlbum')

View File

@ -83,6 +83,18 @@ get_availability_zones_output = {
} }
} }
parameters = {
"Parameters": {
"Param": {
"Type": "String",
},
"NoEchoParam": {
"Type": "String",
"NoEcho": True
}
}
}
split_select_template = { split_select_template = {
"AWSTemplateFormatVersion": "2010-09-09", "AWSTemplateFormatVersion": "2010-09-09",
"Resources": { "Resources": {
@ -157,6 +169,9 @@ get_attribute_outputs_template = dict(
get_availability_zones_template = dict( get_availability_zones_template = dict(
list(dummy_template.items()) + list(get_availability_zones_output.items())) list(dummy_template.items()) + list(get_availability_zones_output.items()))
parameters_template = dict(
list(dummy_template.items()) + list(parameters.items()))
dummy_template_json = json.dumps(dummy_template) dummy_template_json = json.dumps(dummy_template)
name_type_template_json = json.dumps(name_type_template) name_type_template_json = json.dumps(name_type_template)
output_type_template_json = json.dumps(outputs_template) output_type_template_json = json.dumps(outputs_template)
@ -165,6 +180,7 @@ get_attribute_outputs_template_json = json.dumps(
get_attribute_outputs_template) get_attribute_outputs_template)
get_availability_zones_template_json = json.dumps( get_availability_zones_template_json = json.dumps(
get_availability_zones_template) get_availability_zones_template)
parameters_template_json = json.dumps(parameters_template)
split_select_template_json = json.dumps(split_select_template) split_select_template_json = json.dumps(split_select_template)
sub_template_json = json.dumps(sub_template) sub_template_json = json.dumps(sub_template)
export_value_template_json = json.dumps(export_value_template) export_value_template_json = json.dumps(export_value_template)
@ -290,6 +306,18 @@ def test_parse_stack_with_bad_get_attribute_outputs():
"test_id", "test_stack", bad_output_template_json, {}, "us-west-1").should.throw(ValidationError) "test_id", "test_stack", bad_output_template_json, {}, "us-west-1").should.throw(ValidationError)
def test_parse_stack_with_parameters():
stack = FakeStack(
stack_id="test_id",
name="test_stack",
template=parameters_template_json,
parameters={"Param": "visible value", "NoEchoParam": "hidden value"},
region_name='us-west-1')
stack.resource_map.no_echo_parameter_keys.should.have("NoEchoParam")
stack.resource_map.no_echo_parameter_keys.should_not.have("Param")
def test_parse_equals_condition(): def test_parse_equals_condition():
parse_condition( parse_condition(
condition={"Fn::Equals": [{"Ref": "EnvType"}, "prod"]}, condition={"Fn::Equals": [{"Ref": "EnvType"}, "prod"]},

View File

@ -1162,3 +1162,53 @@ def test_confirm_forgot_password():
ConfirmationCode=str(uuid.uuid4()), ConfirmationCode=str(uuid.uuid4()),
Password=str(uuid.uuid4()), Password=str(uuid.uuid4()),
) )
@mock_cognitoidp
def test_admin_update_user_attributes():
conn = boto3.client("cognito-idp", "us-west-2")
username = str(uuid.uuid4())
user_pool_id = conn.create_user_pool(PoolName=str(uuid.uuid4()))["UserPool"]["Id"]
conn.admin_create_user(
UserPoolId=user_pool_id,
Username=username,
UserAttributes=[
{
'Name': 'family_name',
'Value': 'Doe',
},
{
'Name': 'given_name',
'Value': 'John',
}
]
)
conn.admin_update_user_attributes(
UserPoolId=user_pool_id,
Username=username,
UserAttributes=[
{
'Name': 'family_name',
'Value': 'Doe',
},
{
'Name': 'given_name',
'Value': 'Jane',
}
]
)
user = conn.admin_get_user(
UserPoolId=user_pool_id,
Username=username
)
attributes = user['UserAttributes']
attributes.should.be.a(list)
for attr in attributes:
val = attr['Value']
if attr['Name'] == 'family_name':
val.should.equal('Doe')
elif attr['Name'] == 'given_name':
val.should.equal('Jane')

View File

@ -2,7 +2,9 @@ from __future__ import unicode_literals
import sure # noqa import sure # noqa
from moto.core.responses import AWSServiceSpec from botocore.awsrequest import AWSPreparedRequest
from moto.core.responses import AWSServiceSpec, BaseResponse
from moto.core.responses import flatten_json_request_body from moto.core.responses import flatten_json_request_body
@ -79,3 +81,9 @@ def test_flatten_json_request_body():
i += 1 i += 1
key = keyfmt.format(idx + 1, i) key = keyfmt.format(idx + 1, i)
props.should.equal(body['Configurations'][idx]['Properties']) props.should.equal(body['Configurations'][idx]['Properties'])
def test_parse_qs_unicode_decode_error():
body = b'{"key": "%D0"}, "C": "#0 = :0"}'
request = AWSPreparedRequest('GET', 'http://request', {'foo': 'bar'}, body, False)
BaseResponse().setup_class(request, request.url, request.headers)

View File

@ -949,6 +949,33 @@ def test_bad_scan_filter():
raise RuntimeError('Should of raised ResourceInUseException') raise RuntimeError('Should of raised ResourceInUseException')
@mock_dynamodb2
def test_create_table_pay_per_request():
client = boto3.client('dynamodb', region_name='us-east-1')
client.create_table(
TableName='test1',
AttributeDefinitions=[{'AttributeName': 'client', 'AttributeType': 'S'}, {'AttributeName': 'app', 'AttributeType': 'S'}],
KeySchema=[{'AttributeName': 'client', 'KeyType': 'HASH'}, {'AttributeName': 'app', 'KeyType': 'RANGE'}],
BillingMode="PAY_PER_REQUEST"
)
@mock_dynamodb2
def test_create_table_error_pay_per_request_with_provisioned_param():
client = boto3.client('dynamodb', region_name='us-east-1')
try:
client.create_table(
TableName='test1',
AttributeDefinitions=[{'AttributeName': 'client', 'AttributeType': 'S'}, {'AttributeName': 'app', 'AttributeType': 'S'}],
KeySchema=[{'AttributeName': 'client', 'KeyType': 'HASH'}, {'AttributeName': 'app', 'KeyType': 'RANGE'}],
ProvisionedThroughput={'ReadCapacityUnits': 123, 'WriteCapacityUnits': 123},
BillingMode="PAY_PER_REQUEST"
)
except ClientError as err:
err.response['Error']['Code'].should.equal('ValidationException')
@mock_dynamodb2 @mock_dynamodb2
def test_duplicate_create(): def test_duplicate_create():
client = boto3.client('dynamodb', region_name='us-east-1') client = boto3.client('dynamodb', region_name='us-east-1')
@ -1505,6 +1532,7 @@ def test_dynamodb_streams_2():
assert 'LatestStreamLabel' in resp['TableDescription'] assert 'LatestStreamLabel' in resp['TableDescription']
assert 'LatestStreamArn' in resp['TableDescription'] assert 'LatestStreamArn' in resp['TableDescription']
@mock_dynamodb2 @mock_dynamodb2
def test_condition_expressions(): def test_condition_expressions():
client = boto3.client('dynamodb', region_name='us-east-1') client = boto3.client('dynamodb', region_name='us-east-1')
@ -1669,8 +1697,8 @@ def test_query_gsi_with_range_key():
res = dynamodb.query(TableName='test', IndexName='test_gsi', res = dynamodb.query(TableName='test', IndexName='test_gsi',
KeyConditionExpression='gsi_hash_key = :gsi_hash_key AND gsi_range_key = :gsi_range_key', KeyConditionExpression='gsi_hash_key = :gsi_hash_key AND gsi_range_key = :gsi_range_key',
ExpressionAttributeValues={ ExpressionAttributeValues={
':gsi_hash_key': {'S': 'key1'}, ':gsi_hash_key': {'S': 'key1'},
':gsi_range_key': {'S': 'range1'} ':gsi_range_key': {'S': 'range1'}
}) })
res.should.have.key("Count").equal(1) res.should.have.key("Count").equal(1)
res.should.have.key("Items") res.should.have.key("Items")
@ -1679,3 +1707,45 @@ def test_query_gsi_with_range_key():
'gsi_hash_key': {'S': 'key1'}, 'gsi_hash_key': {'S': 'key1'},
'gsi_range_key': {'S': 'range1'}, 'gsi_range_key': {'S': 'range1'},
}) })
@mock_dynamodb2
def test_scan_by_non_exists_index():
dynamodb = boto3.client('dynamodb', region_name='us-east-1')
dynamodb.create_table(
TableName='test',
KeySchema=[{'AttributeName': 'id', 'KeyType': 'HASH'}],
AttributeDefinitions=[
{'AttributeName': 'id', 'AttributeType': 'S'},
{'AttributeName': 'gsi_col', 'AttributeType': 'S'}
],
ProvisionedThroughput={'ReadCapacityUnits': 1, 'WriteCapacityUnits': 1},
GlobalSecondaryIndexes=[
{
'IndexName': 'test_gsi',
'KeySchema': [
{
'AttributeName': 'gsi_col',
'KeyType': 'HASH'
},
],
'Projection': {
'ProjectionType': 'ALL',
},
'ProvisionedThroughput': {
'ReadCapacityUnits': 1,
'WriteCapacityUnits': 1
}
},
]
)
with assert_raises(ClientError) as ex:
dynamodb.scan(TableName='test', IndexName='non_exists_index')
ex.exception.response['Error']['Code'].should.equal('ValidationException')
ex.exception.response['ResponseMetadata']['HTTPStatusCode'].should.equal(400)
ex.exception.response['Error']['Message'].should.equal(
'The table does not have the specified index: non_exists_index'
)

View File

@ -1344,6 +1344,34 @@ def test_update_item_add_value_string_set():
'subject': '123', 'subject': '123',
}) })
@mock_dynamodb2
def test_update_item_delete_value_string_set():
table = _create_table_with_range_key()
table.put_item(Item={
'forum_name': 'the-key',
'subject': '123',
'string_set': set(['str1', 'str2']),
})
item_key = {'forum_name': 'the-key', 'subject': '123'}
table.update_item(
Key=item_key,
AttributeUpdates={
'string_set': {
'Action': u'DELETE',
'Value': set(['str2']),
},
},
)
returned_item = dict((k, str(v) if isinstance(v, Decimal) else v)
for k, v in table.get_item(Key=item_key)['Item'].items())
dict(returned_item).should.equal({
'string_set': set(['str1']),
'forum_name': 'the-key',
'subject': '123',
})
@mock_dynamodb2 @mock_dynamodb2
def test_update_item_add_value_does_not_exist_is_created(): def test_update_item_add_value_does_not_exist_is_created():
@ -1961,3 +1989,113 @@ def test_query_pagination():
results = page1['Items'] + page2['Items'] results = page1['Items'] + page2['Items']
subjects = set([int(r['subject']) for r in results]) subjects = set([int(r['subject']) for r in results])
subjects.should.equal(set(range(10))) subjects.should.equal(set(range(10)))
@mock_dynamodb2
def test_scan_by_index():
dynamodb = boto3.client('dynamodb', region_name='us-east-1')
dynamodb.create_table(
TableName='test',
KeySchema=[
{'AttributeName': 'id', 'KeyType': 'HASH'},
{'AttributeName': 'range_key', 'KeyType': 'RANGE'},
],
AttributeDefinitions=[
{'AttributeName': 'id', 'AttributeType': 'S'},
{'AttributeName': 'range_key', 'AttributeType': 'S'},
{'AttributeName': 'gsi_col', 'AttributeType': 'S'},
{'AttributeName': 'gsi_range_key', 'AttributeType': 'S'},
{'AttributeName': 'lsi_range_key', 'AttributeType': 'S'},
],
ProvisionedThroughput={'ReadCapacityUnits': 1, 'WriteCapacityUnits': 1},
GlobalSecondaryIndexes=[
{
'IndexName': 'test_gsi',
'KeySchema': [
{'AttributeName': 'gsi_col', 'KeyType': 'HASH'},
{'AttributeName': 'gsi_range_key', 'KeyType': 'RANGE'},
],
'Projection': {
'ProjectionType': 'ALL',
},
'ProvisionedThroughput': {
'ReadCapacityUnits': 1,
'WriteCapacityUnits': 1
}
},
],
LocalSecondaryIndexes=[
{
'IndexName': 'test_lsi',
'KeySchema': [
{'AttributeName': 'id', 'KeyType': 'HASH'},
{'AttributeName': 'lsi_range_key', 'KeyType': 'RANGE'},
],
'Projection': {
'ProjectionType': 'ALL',
},
},
]
)
dynamodb.put_item(
TableName='test',
Item={
'id': {'S': '1'},
'range_key': {'S': '1'},
'col1': {'S': 'val1'},
'gsi_col': {'S': '1'},
'gsi_range_key': {'S': '1'},
'lsi_range_key': {'S': '1'},
}
)
dynamodb.put_item(
TableName='test',
Item={
'id': {'S': '1'},
'range_key': {'S': '2'},
'col1': {'S': 'val2'},
'gsi_col': {'S': '1'},
'gsi_range_key': {'S': '2'},
'lsi_range_key': {'S': '2'},
}
)
dynamodb.put_item(
TableName='test',
Item={
'id': {'S': '3'},
'range_key': {'S': '1'},
'col1': {'S': 'val3'},
}
)
res = dynamodb.scan(TableName='test')
assert res['Count'] == 3
assert len(res['Items']) == 3
res = dynamodb.scan(TableName='test', IndexName='test_gsi')
assert res['Count'] == 2
assert len(res['Items']) == 2
res = dynamodb.scan(TableName='test', IndexName='test_gsi', Limit=1)
assert res['Count'] == 1
assert len(res['Items']) == 1
last_eval_key = res['LastEvaluatedKey']
assert last_eval_key['id']['S'] == '1'
assert last_eval_key['gsi_col']['S'] == '1'
assert last_eval_key['gsi_range_key']['S'] == '1'
res = dynamodb.scan(TableName='test', IndexName='test_lsi')
assert res['Count'] == 2
assert len(res['Items']) == 2
res = dynamodb.scan(TableName='test', IndexName='test_lsi', Limit=1)
assert res['Count'] == 1
assert len(res['Items']) == 1
last_eval_key = res['LastEvaluatedKey']
assert last_eval_key['id']['S'] == '1'
assert last_eval_key['range_key']['S'] == '1'
assert last_eval_key['lsi_range_key']['S'] == '1'

View File

@ -829,3 +829,77 @@ def test_scan_pagination():
results = page1['Items'] + page2['Items'] results = page1['Items'] + page2['Items']
usernames = set([r['username'] for r in results]) usernames = set([r['username'] for r in results])
usernames.should.equal(set(expected_usernames)) usernames.should.equal(set(expected_usernames))
@mock_dynamodb2
def test_scan_by_index():
dynamodb = boto3.client('dynamodb', region_name='us-east-1')
dynamodb.create_table(
TableName='test',
KeySchema=[{'AttributeName': 'id', 'KeyType': 'HASH'}],
AttributeDefinitions=[
{'AttributeName': 'id', 'AttributeType': 'S'},
{'AttributeName': 'gsi_col', 'AttributeType': 'S'}
],
ProvisionedThroughput={'ReadCapacityUnits': 1, 'WriteCapacityUnits': 1},
GlobalSecondaryIndexes=[
{
'IndexName': 'test_gsi',
'KeySchema': [
{
'AttributeName': 'gsi_col',
'KeyType': 'HASH'
},
],
'Projection': {
'ProjectionType': 'ALL',
},
'ProvisionedThroughput': {
'ReadCapacityUnits': 1,
'WriteCapacityUnits': 1
}
},
]
)
dynamodb.put_item(
TableName='test',
Item={
'id': {'S': '1'},
'col1': {'S': 'val1'},
'gsi_col': {'S': 'gsi_val1'},
}
)
dynamodb.put_item(
TableName='test',
Item={
'id': {'S': '2'},
'col1': {'S': 'val2'},
'gsi_col': {'S': 'gsi_val2'},
}
)
dynamodb.put_item(
TableName='test',
Item={
'id': {'S': '3'},
'col1': {'S': 'val3'},
}
)
res = dynamodb.scan(TableName='test')
assert res['Count'] == 3
assert len(res['Items']) == 3
res = dynamodb.scan(TableName='test', IndexName='test_gsi')
assert res['Count'] == 2
assert len(res['Items']) == 2
res = dynamodb.scan(TableName='test', IndexName='test_gsi', Limit=1)
assert res['Count'] == 1
assert len(res['Items']) == 1
last_eval_key = res['LastEvaluatedKey']
assert last_eval_key['id']['S'] == '1'
assert last_eval_key['gsi_col']['S'] == 'gsi_val1'

View File

15
tests/test_ec2/helpers.py Normal file
View File

@ -0,0 +1,15 @@
import six
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives import serialization
from cryptography.hazmat.primitives.asymmetric import rsa
def rsa_check_private_key(private_key_material):
assert isinstance(private_key_material, six.string_types)
private_key = serialization.load_pem_private_key(
data=private_key_material.encode('ascii'),
backend=default_backend(),
password=None)
assert isinstance(private_key, rsa.RSAPrivateKey)

View File

@ -16,7 +16,7 @@ from moto import mock_ec2_deprecated, mock_ec2
@mock_ec2_deprecated @mock_ec2_deprecated
def test_create_and_delete_volume(): def test_create_and_delete_volume():
conn = boto.connect_ec2('the_key', 'the_secret') conn = boto.ec2.connect_to_region("us-east-1")
volume = conn.create_volume(80, "us-east-1a") volume = conn.create_volume(80, "us-east-1a")
all_volumes = conn.get_all_volumes() all_volumes = conn.get_all_volumes()
@ -52,7 +52,7 @@ def test_create_and_delete_volume():
@mock_ec2_deprecated @mock_ec2_deprecated
def test_create_encrypted_volume_dryrun(): def test_create_encrypted_volume_dryrun():
conn = boto.connect_ec2('the_key', 'the_secret') conn = boto.ec2.connect_to_region("us-east-1")
with assert_raises(EC2ResponseError) as ex: with assert_raises(EC2ResponseError) as ex:
conn.create_volume(80, "us-east-1a", encrypted=True, dry_run=True) conn.create_volume(80, "us-east-1a", encrypted=True, dry_run=True)
ex.exception.error_code.should.equal('DryRunOperation') ex.exception.error_code.should.equal('DryRunOperation')
@ -63,7 +63,7 @@ def test_create_encrypted_volume_dryrun():
@mock_ec2_deprecated @mock_ec2_deprecated
def test_create_encrypted_volume(): def test_create_encrypted_volume():
conn = boto.connect_ec2('the_key', 'the_secret') conn = boto.ec2.connect_to_region("us-east-1")
volume = conn.create_volume(80, "us-east-1a", encrypted=True) volume = conn.create_volume(80, "us-east-1a", encrypted=True)
with assert_raises(EC2ResponseError) as ex: with assert_raises(EC2ResponseError) as ex:
@ -79,7 +79,7 @@ def test_create_encrypted_volume():
@mock_ec2_deprecated @mock_ec2_deprecated
def test_filter_volume_by_id(): def test_filter_volume_by_id():
conn = boto.connect_ec2('the_key', 'the_secret') conn = boto.ec2.connect_to_region("us-east-1")
volume1 = conn.create_volume(80, "us-east-1a") volume1 = conn.create_volume(80, "us-east-1a")
volume2 = conn.create_volume(36, "us-east-1b") volume2 = conn.create_volume(36, "us-east-1b")
volume3 = conn.create_volume(20, "us-east-1c") volume3 = conn.create_volume(20, "us-east-1c")
@ -99,7 +99,7 @@ def test_filter_volume_by_id():
@mock_ec2_deprecated @mock_ec2_deprecated
def test_volume_filters(): def test_volume_filters():
conn = boto.connect_ec2('the_key', 'the_secret') conn = boto.ec2.connect_to_region("us-east-1")
reservation = conn.run_instances('ami-1234abcd') reservation = conn.run_instances('ami-1234abcd')
instance = reservation.instances[0] instance = reservation.instances[0]
@ -196,7 +196,7 @@ def test_volume_filters():
@mock_ec2_deprecated @mock_ec2_deprecated
def test_volume_attach_and_detach(): def test_volume_attach_and_detach():
conn = boto.connect_ec2('the_key', 'the_secret') conn = boto.ec2.connect_to_region("us-east-1")
reservation = conn.run_instances('ami-1234abcd') reservation = conn.run_instances('ami-1234abcd')
instance = reservation.instances[0] instance = reservation.instances[0]
volume = conn.create_volume(80, "us-east-1a") volume = conn.create_volume(80, "us-east-1a")
@ -252,7 +252,7 @@ def test_volume_attach_and_detach():
@mock_ec2_deprecated @mock_ec2_deprecated
def test_create_snapshot(): def test_create_snapshot():
conn = boto.connect_ec2('the_key', 'the_secret') conn = boto.ec2.connect_to_region("us-east-1")
volume = conn.create_volume(80, "us-east-1a") volume = conn.create_volume(80, "us-east-1a")
with assert_raises(EC2ResponseError) as ex: with assert_raises(EC2ResponseError) as ex:
@ -291,7 +291,7 @@ def test_create_snapshot():
@mock_ec2_deprecated @mock_ec2_deprecated
def test_create_encrypted_snapshot(): def test_create_encrypted_snapshot():
conn = boto.connect_ec2('the_key', 'the_secret') conn = boto.ec2.connect_to_region("us-east-1")
volume = conn.create_volume(80, "us-east-1a", encrypted=True) volume = conn.create_volume(80, "us-east-1a", encrypted=True)
snapshot = volume.create_snapshot('a test snapshot') snapshot = volume.create_snapshot('a test snapshot')
snapshot.update() snapshot.update()
@ -306,7 +306,7 @@ def test_create_encrypted_snapshot():
@mock_ec2_deprecated @mock_ec2_deprecated
def test_filter_snapshot_by_id(): def test_filter_snapshot_by_id():
conn = boto.connect_ec2('the_key', 'the_secret') conn = boto.ec2.connect_to_region("us-east-1")
volume1 = conn.create_volume(36, "us-east-1a") volume1 = conn.create_volume(36, "us-east-1a")
snap1 = volume1.create_snapshot('a test snapshot 1') snap1 = volume1.create_snapshot('a test snapshot 1')
volume2 = conn.create_volume(42, 'us-east-1a') volume2 = conn.create_volume(42, 'us-east-1a')
@ -333,7 +333,7 @@ def test_filter_snapshot_by_id():
@mock_ec2_deprecated @mock_ec2_deprecated
def test_snapshot_filters(): def test_snapshot_filters():
conn = boto.connect_ec2('the_key', 'the_secret') conn = boto.ec2.connect_to_region("us-east-1")
volume1 = conn.create_volume(20, "us-east-1a", encrypted=False) volume1 = conn.create_volume(20, "us-east-1a", encrypted=False)
volume2 = conn.create_volume(25, "us-east-1a", encrypted=True) volume2 = conn.create_volume(25, "us-east-1a", encrypted=True)
@ -394,12 +394,17 @@ def test_snapshot_filters():
set([snap.id for snap in snapshots_by_encrypted] set([snap.id for snap in snapshots_by_encrypted]
).should.equal({snapshot3.id}) ).should.equal({snapshot3.id})
snapshots_by_owner_id = conn.get_all_snapshots(
filters={'owner-id': '123456789012'})
set([snap.id for snap in snapshots_by_owner_id]
).should.equal({snapshot1.id, snapshot2.id, snapshot3.id})
@mock_ec2_deprecated @mock_ec2_deprecated
def test_snapshot_attribute(): def test_snapshot_attribute():
import copy import copy
conn = boto.connect_ec2('the_key', 'the_secret') conn = boto.ec2.connect_to_region("us-east-1")
volume = conn.create_volume(80, "us-east-1a") volume = conn.create_volume(80, "us-east-1a")
snapshot = volume.create_snapshot() snapshot = volume.create_snapshot()
@ -502,7 +507,7 @@ def test_snapshot_attribute():
@mock_ec2_deprecated @mock_ec2_deprecated
def test_create_volume_from_snapshot(): def test_create_volume_from_snapshot():
conn = boto.connect_ec2('the_key', 'the_secret') conn = boto.ec2.connect_to_region("us-east-1")
volume = conn.create_volume(80, "us-east-1a") volume = conn.create_volume(80, "us-east-1a")
snapshot = volume.create_snapshot('a test snapshot') snapshot = volume.create_snapshot('a test snapshot')
@ -524,7 +529,7 @@ def test_create_volume_from_snapshot():
@mock_ec2_deprecated @mock_ec2_deprecated
def test_create_volume_from_encrypted_snapshot(): def test_create_volume_from_encrypted_snapshot():
conn = boto.connect_ec2('the_key', 'the_secret') conn = boto.ec2.connect_to_region("us-east-1")
volume = conn.create_volume(80, "us-east-1a", encrypted=True) volume = conn.create_volume(80, "us-east-1a", encrypted=True)
snapshot = volume.create_snapshot('a test snapshot') snapshot = volume.create_snapshot('a test snapshot')
@ -569,7 +574,7 @@ def test_modify_attribute_blockDeviceMapping():
@mock_ec2_deprecated @mock_ec2_deprecated
def test_volume_tag_escaping(): def test_volume_tag_escaping():
conn = boto.connect_ec2('the_key', 'the_secret') conn = boto.ec2.connect_to_region("us-east-1")
vol = conn.create_volume(10, 'us-east-1a') vol = conn.create_volume(10, 'us-east-1a')
snapshot = conn.create_snapshot(vol.id, 'Desc') snapshot = conn.create_snapshot(vol.id, 'Desc')

View File

@ -42,7 +42,7 @@ def test_add_servers():
@freeze_time("2014-01-01 05:00:00") @freeze_time("2014-01-01 05:00:00")
@mock_ec2_deprecated @mock_ec2_deprecated
def test_instance_launch_and_terminate(): def test_instance_launch_and_terminate():
conn = boto.connect_ec2('the_key', 'the_secret') conn = boto.ec2.connect_to_region("us-east-1")
with assert_raises(EC2ResponseError) as ex: with assert_raises(EC2ResponseError) as ex:
reservation = conn.run_instances('ami-1234abcd', dry_run=True) reservation = conn.run_instances('ami-1234abcd', dry_run=True)
@ -820,7 +820,7 @@ def test_run_instance_with_instance_type():
@mock_ec2_deprecated @mock_ec2_deprecated
def test_run_instance_with_default_placement(): def test_run_instance_with_default_placement():
conn = boto.connect_ec2('the_key', 'the_secret') conn = boto.ec2.connect_to_region("us-east-1")
reservation = conn.run_instances('ami-1234abcd') reservation = conn.run_instances('ami-1234abcd')
instance = reservation.instances[0] instance = reservation.instances[0]

View File

@ -4,12 +4,46 @@ import tests.backport_assert_raises
from nose.tools import assert_raises from nose.tools import assert_raises
import boto import boto
import six
import sure # noqa import sure # noqa
from boto.exception import EC2ResponseError from boto.exception import EC2ResponseError
from moto import mock_ec2_deprecated from moto import mock_ec2_deprecated
from .helpers import rsa_check_private_key
RSA_PUBLIC_KEY_OPENSSH = b"""\
ssh-rsa \
AAAAB3NzaC1yc2EAAAADAQABAAABAQDusXfgTE4eBP50NglSzCSEGnIL6+cr6m3H\
6cZANOQ+P1o/W4BdtcAL3sor4iGi7SOeJgo\8kweyMQrhrt6HaKGgromRiz37LQx\
4YIAcBi4Zd023mO/V7Rc2Chh18mWgLSmA6ng+j37ip6452zxtv0jHAz9pJolbKBp\
JzbZlPN45ZCTk9ck0fSVHRl6VRSSPQcpqi65XpRf+35zNOCGCc1mAOOTmw59Q2a6\
A3t8mL7r91aM5q6QOQm219lctFM8O7HRJnDgmhGpnjRwE1LyKktWTbgFZ4SNWU2X\
qusUO07jKuSxzPumXBeU+JEtx0J1tqZwJlpGt2R+0qN7nKnPl2+hx \
moto@github.com"""
RSA_PUBLIC_KEY_RFC4716 = b"""\
---- BEGIN SSH2 PUBLIC KEY ----
AAAAB3NzaC1yc2EAAAADAQABAAABAQDusXfgTE4eBP50NglSzCSEGnIL6+cr6m3H6cZANO
Q+P1o/W4BdtcAL3sor4iGi7SOeJgo8kweyMQrhrt6HaKGgromRiz37LQx4YIAcBi4Zd023
mO/V7Rc2Chh18mWgLSmA6ng+j37ip6452zxtv0jHAz9pJolbKBpJzbZlPN45ZCTk9ck0fS
VHRl6VRSSPQcpqi65XpRf+35zNOCGCc1mAOOTmw59Q2a6A3t8mL7r91aM5q6QOQm219lct
FM8O7HRJnDgmhGpnjRwE1LyKktWTbgFZ4SNWU2XqusUO07jKuSxzPumXBeU+JEtx0J1tqZ
wJlpGt2R+0qN7nKnPl2+hx
---- END SSH2 PUBLIC KEY ----
"""
RSA_PUBLIC_KEY_FINGERPRINT = "6a:49:07:1c:7e:bd:d2:bd:96:25:fe:b5:74:83:ae:fd"
DSA_PUBLIC_KEY_OPENSSH = b"""ssh-dss \
AAAAB3NzaC1kc3MAAACBAJ0aXctVwbN6VB81gpo8R7DUk8zXRjZvrkg8Y8vEGt63gklpNJNsLXtEUXkl5D4c0nD2FZO1rJNqFoe\
OQOCoGSfclHvt9w4yPl/lUEtb3Qtj1j80MInETHr19vaSunRk5R+M+8YH+LLcdYdz7MijuGey02mbi0H9K5nUIcuLMArVAAAAFQ\
D0RDvsObRWBlnaW8645obZBM86jwAAAIBNZwf3B4krIzAwVfkMHLDSdAvs7lOWE7o8SJLzr9t4a9HhYp9SLbMzJ815KWfidEYV2\
+s4ZaPCfcZ1GENFRbE8rixz5eMAjEUXEPMJkblDZTHzMsH96z2cOCQZ0vfOmgznsf18Uf725pqo9OqAioEsTJjX8jtI2qNPEBU0\
uhMSZQAAAIBBMGhDu5CWPUlS2QG7vzmzw81XasmHE/s2YPDRbolkriwlunpgwZhCscoQP8HFHY+DLUVvUb+GZwBmFt4l1uHl03b\
ffsm7UIHtCBYERr9Nx0u20ldfhkgB1lhaJb5o0ZJ3pmJ38KChfyHe5EUcqRdEFo89Mp72VI2Z6UHyL175RA== \
moto@github.com"""
@mock_ec2_deprecated @mock_ec2_deprecated
def test_key_pairs_empty(): def test_key_pairs_empty():
@ -33,14 +67,15 @@ def test_key_pairs_create():
conn = boto.connect_ec2('the_key', 'the_secret') conn = boto.connect_ec2('the_key', 'the_secret')
with assert_raises(EC2ResponseError) as ex: with assert_raises(EC2ResponseError) as ex:
kp = conn.create_key_pair('foo', dry_run=True) conn.create_key_pair('foo', dry_run=True)
ex.exception.error_code.should.equal('DryRunOperation') ex.exception.error_code.should.equal('DryRunOperation')
ex.exception.status.should.equal(400) ex.exception.status.should.equal(400)
ex.exception.message.should.equal( ex.exception.message.should.equal(
'An error occurred (DryRunOperation) when calling the CreateKeyPair operation: Request would have succeeded, but DryRun flag is set') 'An error occurred (DryRunOperation) when calling the CreateKeyPair operation: Request would have succeeded, but DryRun flag is set')
kp = conn.create_key_pair('foo') kp = conn.create_key_pair('foo')
assert kp.material.startswith('---- BEGIN RSA PRIVATE KEY ----') rsa_check_private_key(kp.material)
kps = conn.get_all_key_pairs() kps = conn.get_all_key_pairs()
assert len(kps) == 1 assert len(kps) == 1
assert kps[0].name == 'foo' assert kps[0].name == 'foo'
@ -49,13 +84,19 @@ def test_key_pairs_create():
@mock_ec2_deprecated @mock_ec2_deprecated
def test_key_pairs_create_two(): def test_key_pairs_create_two():
conn = boto.connect_ec2('the_key', 'the_secret') conn = boto.connect_ec2('the_key', 'the_secret')
kp = conn.create_key_pair('foo')
kp = conn.create_key_pair('bar') kp1 = conn.create_key_pair('foo')
assert kp.material.startswith('---- BEGIN RSA PRIVATE KEY ----') rsa_check_private_key(kp1.material)
kp2 = conn.create_key_pair('bar')
rsa_check_private_key(kp2.material)
assert kp1.material != kp2.material
kps = conn.get_all_key_pairs() kps = conn.get_all_key_pairs()
kps.should.have.length_of(2) kps.should.have.length_of(2)
[i.name for i in kps].should.contain('foo') assert {i.name for i in kps} == {'foo', 'bar'}
[i.name for i in kps].should.contain('bar')
kps = conn.get_all_key_pairs('foo') kps = conn.get_all_key_pairs('foo')
kps.should.have.length_of(1) kps.should.have.length_of(1)
kps[0].name.should.equal('foo') kps[0].name.should.equal('foo')
@ -64,8 +105,7 @@ def test_key_pairs_create_two():
@mock_ec2_deprecated @mock_ec2_deprecated
def test_key_pairs_create_exist(): def test_key_pairs_create_exist():
conn = boto.connect_ec2('the_key', 'the_secret') conn = boto.connect_ec2('the_key', 'the_secret')
kp = conn.create_key_pair('foo') conn.create_key_pair('foo')
assert kp.material.startswith('---- BEGIN RSA PRIVATE KEY ----')
assert len(conn.get_all_key_pairs()) == 1 assert len(conn.get_all_key_pairs()) == 1
with assert_raises(EC2ResponseError) as cm: with assert_raises(EC2ResponseError) as cm:
@ -105,23 +145,30 @@ def test_key_pairs_import():
conn = boto.connect_ec2('the_key', 'the_secret') conn = boto.connect_ec2('the_key', 'the_secret')
with assert_raises(EC2ResponseError) as ex: with assert_raises(EC2ResponseError) as ex:
kp = conn.import_key_pair('foo', b'content', dry_run=True) conn.import_key_pair('foo', RSA_PUBLIC_KEY_OPENSSH, dry_run=True)
ex.exception.error_code.should.equal('DryRunOperation') ex.exception.error_code.should.equal('DryRunOperation')
ex.exception.status.should.equal(400) ex.exception.status.should.equal(400)
ex.exception.message.should.equal( ex.exception.message.should.equal(
'An error occurred (DryRunOperation) when calling the ImportKeyPair operation: Request would have succeeded, but DryRun flag is set') 'An error occurred (DryRunOperation) when calling the ImportKeyPair operation: Request would have succeeded, but DryRun flag is set')
kp = conn.import_key_pair('foo', b'content') kp1 = conn.import_key_pair('foo', RSA_PUBLIC_KEY_OPENSSH)
assert kp.name == 'foo' assert kp1.name == 'foo'
assert kp1.fingerprint == RSA_PUBLIC_KEY_FINGERPRINT
kp2 = conn.import_key_pair('foo2', RSA_PUBLIC_KEY_RFC4716)
assert kp2.name == 'foo2'
assert kp2.fingerprint == RSA_PUBLIC_KEY_FINGERPRINT
kps = conn.get_all_key_pairs() kps = conn.get_all_key_pairs()
assert len(kps) == 1 assert len(kps) == 2
assert kps[0].name == 'foo' assert kps[0].name == kp1.name
assert kps[1].name == kp2.name
@mock_ec2_deprecated @mock_ec2_deprecated
def test_key_pairs_import_exist(): def test_key_pairs_import_exist():
conn = boto.connect_ec2('the_key', 'the_secret') conn = boto.connect_ec2('the_key', 'the_secret')
kp = conn.import_key_pair('foo', b'content') kp = conn.import_key_pair('foo', RSA_PUBLIC_KEY_OPENSSH)
assert kp.name == 'foo' assert kp.name == 'foo'
assert len(conn.get_all_key_pairs()) == 1 assert len(conn.get_all_key_pairs()) == 1
@ -132,6 +179,32 @@ def test_key_pairs_import_exist():
cm.exception.request_id.should_not.be.none cm.exception.request_id.should_not.be.none
@mock_ec2_deprecated
def test_key_pairs_invalid():
conn = boto.connect_ec2('the_key', 'the_secret')
with assert_raises(EC2ResponseError) as ex:
conn.import_key_pair('foo', b'')
ex.exception.error_code.should.equal('InvalidKeyPair.Format')
ex.exception.status.should.equal(400)
ex.exception.message.should.equal(
'Key is not in valid OpenSSH public key format')
with assert_raises(EC2ResponseError) as ex:
conn.import_key_pair('foo', b'garbage')
ex.exception.error_code.should.equal('InvalidKeyPair.Format')
ex.exception.status.should.equal(400)
ex.exception.message.should.equal(
'Key is not in valid OpenSSH public key format')
with assert_raises(EC2ResponseError) as ex:
conn.import_key_pair('foo', DSA_PUBLIC_KEY_OPENSSH)
ex.exception.error_code.should.equal('InvalidKeyPair.Format')
ex.exception.status.should.equal(400)
ex.exception.message.should.equal(
'Key is not in valid OpenSSH public key format')
@mock_ec2_deprecated @mock_ec2_deprecated
def test_key_pair_filters(): def test_key_pair_filters():
conn = boto.connect_ec2('the_key', 'the_secret') conn = boto.connect_ec2('the_key', 'the_secret')

View File

@ -2,6 +2,8 @@ from __future__ import unicode_literals
import boto import boto
import boto3 import boto3
import sure # noqa import sure # noqa
from nose.tools import assert_raises
from botocore.exceptions import ClientError
from moto import mock_ec2_deprecated, mock_ec2 from moto import mock_ec2_deprecated, mock_ec2
@ -28,7 +30,7 @@ def test_new_subnet_associates_with_default_network_acl():
conn = boto.connect_vpc('the_key', 'the secret') conn = boto.connect_vpc('the_key', 'the secret')
vpc = conn.get_all_vpcs()[0] vpc = conn.get_all_vpcs()[0]
subnet = conn.create_subnet(vpc.id, "10.0.0.0/18") subnet = conn.create_subnet(vpc.id, "172.31.48.0/20")
all_network_acls = conn.get_all_network_acls() all_network_acls = conn.get_all_network_acls()
all_network_acls.should.have.length_of(1) all_network_acls.should.have.length_of(1)
@ -214,3 +216,37 @@ def test_default_network_acl_default_entries():
unique_entries.append(entry) unique_entries.append(entry)
unique_entries.should.have.length_of(4) unique_entries.should.have.length_of(4)
@mock_ec2
def test_delete_default_network_acl_default_entry():
ec2 = boto3.resource('ec2', region_name='us-west-1')
default_network_acl = next(iter(ec2.network_acls.all()), None)
default_network_acl.is_default.should.be.ok
default_network_acl.entries.should.have.length_of(4)
first_default_network_acl_entry = default_network_acl.entries[0]
default_network_acl.delete_entry(Egress=first_default_network_acl_entry['Egress'],
RuleNumber=first_default_network_acl_entry['RuleNumber'])
default_network_acl.entries.should.have.length_of(3)
@mock_ec2
def test_duplicate_network_acl_entry():
ec2 = boto3.resource('ec2', region_name='us-west-1')
default_network_acl = next(iter(ec2.network_acls.all()), None)
default_network_acl.is_default.should.be.ok
rule_number = 200
egress = True
default_network_acl.create_entry(CidrBlock="0.0.0.0/0", Egress=egress, Protocol="-1", RuleAction="allow", RuleNumber=rule_number)
with assert_raises(ClientError) as ex:
default_network_acl.create_entry(CidrBlock="10.0.0.0/0", Egress=egress, Protocol="-1", RuleAction="deny", RuleNumber=rule_number)
str(ex.exception).should.equal(
"An error occurred (NetworkAclEntryAlreadyExists) when calling the CreateNetworkAclEntry "
"operation: The network acl entry identified by {} already exists.".format(rule_number))

View File

@ -68,8 +68,10 @@ def test_create_autoscaling_group():
image_id='ami-abcd1234', image_id='ami-abcd1234',
instance_type='m1.small', instance_type='m1.small',
) )
us_conn.create_launch_configuration(config) x = us_conn.create_launch_configuration(config)
us_subnet_id = list(ec2_backends['us-east-1'].subnets['us-east-1c'].keys())[0]
ap_subnet_id = list(ec2_backends['ap-northeast-1'].subnets['ap-northeast-1a'].keys())[0]
group = boto.ec2.autoscale.AutoScalingGroup( group = boto.ec2.autoscale.AutoScalingGroup(
name='us_tester_group', name='us_tester_group',
availability_zones=['us-east-1c'], availability_zones=['us-east-1c'],
@ -82,7 +84,7 @@ def test_create_autoscaling_group():
launch_config=config, launch_config=config,
load_balancers=["us_test_lb"], load_balancers=["us_test_lb"],
placement_group="us_test_placement", placement_group="us_test_placement",
vpc_zone_identifier='subnet-1234abcd', vpc_zone_identifier=us_subnet_id,
termination_policies=["OldestInstance", "NewestInstance"], termination_policies=["OldestInstance", "NewestInstance"],
) )
us_conn.create_auto_scaling_group(group) us_conn.create_auto_scaling_group(group)
@ -107,7 +109,7 @@ def test_create_autoscaling_group():
launch_config=config, launch_config=config,
load_balancers=["ap_test_lb"], load_balancers=["ap_test_lb"],
placement_group="ap_test_placement", placement_group="ap_test_placement",
vpc_zone_identifier='subnet-5678efgh', vpc_zone_identifier=ap_subnet_id,
termination_policies=["OldestInstance", "NewestInstance"], termination_policies=["OldestInstance", "NewestInstance"],
) )
ap_conn.create_auto_scaling_group(group) ap_conn.create_auto_scaling_group(group)
@ -121,7 +123,7 @@ def test_create_autoscaling_group():
us_group.desired_capacity.should.equal(2) us_group.desired_capacity.should.equal(2)
us_group.max_size.should.equal(2) us_group.max_size.should.equal(2)
us_group.min_size.should.equal(2) us_group.min_size.should.equal(2)
us_group.vpc_zone_identifier.should.equal('subnet-1234abcd') us_group.vpc_zone_identifier.should.equal(us_subnet_id)
us_group.launch_config_name.should.equal('us_tester') us_group.launch_config_name.should.equal('us_tester')
us_group.default_cooldown.should.equal(60) us_group.default_cooldown.should.equal(60)
us_group.health_check_period.should.equal(100) us_group.health_check_period.should.equal(100)
@ -137,7 +139,7 @@ def test_create_autoscaling_group():
ap_group.desired_capacity.should.equal(2) ap_group.desired_capacity.should.equal(2)
ap_group.max_size.should.equal(2) ap_group.max_size.should.equal(2)
ap_group.min_size.should.equal(2) ap_group.min_size.should.equal(2)
ap_group.vpc_zone_identifier.should.equal('subnet-5678efgh') ap_group.vpc_zone_identifier.should.equal(ap_subnet_id)
ap_group.launch_config_name.should.equal('ap_tester') ap_group.launch_config_name.should.equal('ap_tester')
ap_group.default_cooldown.should.equal(60) ap_group.default_cooldown.should.equal(60)
ap_group.health_check_period.should.equal(100) ap_group.health_check_period.should.equal(100)

View File

@ -6,6 +6,7 @@ from nose.tools import assert_raises
import boto import boto
import boto3 import boto3
from boto.exception import EC2ResponseError from boto.exception import EC2ResponseError
from botocore.exceptions import ClientError
import sure # noqa import sure # noqa
from moto import mock_ec2, mock_ec2_deprecated from moto import mock_ec2, mock_ec2_deprecated
@ -528,3 +529,26 @@ def test_network_acl_tagging():
if na.id == route_table.id) if na.id == route_table.id)
test_route_table.tags.should.have.length_of(1) test_route_table.tags.should.have.length_of(1)
test_route_table.tags["a key"].should.equal("some value") test_route_table.tags["a key"].should.equal("some value")
@mock_ec2
def test_create_route_with_invalid_destination_cidr_block_parameter():
ec2 = boto3.resource('ec2', region_name='us-west-1')
vpc = ec2.create_vpc(CidrBlock='10.0.0.0/16')
vpc.reload()
vpc.is_default.shouldnt.be.ok
route_table = ec2.create_route_table(VpcId=vpc.id)
route_table.reload()
internet_gateway = ec2.create_internet_gateway()
vpc.attach_internet_gateway(InternetGatewayId=internet_gateway.id)
internet_gateway.reload()
destination_cidr_block = '1000.1.0.0/20'
with assert_raises(ClientError) as ex:
route = route_table.create_route(DestinationCidrBlock=destination_cidr_block, GatewayId=internet_gateway.id)
str(ex.exception).should.equal(
"An error occurred (InvalidParameterValue) when calling the CreateRoute "
"operation: Value ({}) for parameter destinationCidrBlock is invalid. This is not a valid CIDR block.".format(destination_cidr_block))

View File

@ -501,7 +501,7 @@ def test_sec_group_rule_limit_vpc():
ec2_conn = boto.connect_ec2() ec2_conn = boto.connect_ec2()
vpc_conn = boto.connect_vpc() vpc_conn = boto.connect_vpc()
vpc = vpc_conn.create_vpc('10.0.0.0/8') vpc = vpc_conn.create_vpc('10.0.0.0/16')
sg = ec2_conn.create_security_group('test', 'test', vpc_id=vpc.id) sg = ec2_conn.create_security_group('test', 'test', vpc_id=vpc.id)
other_sg = ec2_conn.create_security_group('test_2', 'test', vpc_id=vpc.id) other_sg = ec2_conn.create_security_group('test_2', 'test', vpc_id=vpc.id)

View File

@ -7,7 +7,7 @@ from moto import mock_ec2
def get_subnet_id(conn): def get_subnet_id(conn):
vpc = conn.create_vpc(CidrBlock="10.0.0.0/8")['Vpc'] vpc = conn.create_vpc(CidrBlock="10.0.0.0/16")['Vpc']
subnet = conn.create_subnet( subnet = conn.create_subnet(
VpcId=vpc['VpcId'], CidrBlock='10.0.0.0/16', AvailabilityZone='us-east-1a')['Subnet'] VpcId=vpc['VpcId'], CidrBlock='10.0.0.0/16', AvailabilityZone='us-east-1a')['Subnet']
subnet_id = subnet['SubnetId'] subnet_id = subnet['SubnetId']

View File

@ -17,7 +17,7 @@ from moto.core.utils import iso_8601_datetime_with_milliseconds
@mock_ec2 @mock_ec2
def test_request_spot_instances(): def test_request_spot_instances():
conn = boto3.client('ec2', 'us-east-1') conn = boto3.client('ec2', 'us-east-1')
vpc = conn.create_vpc(CidrBlock="10.0.0.0/8")['Vpc'] vpc = conn.create_vpc(CidrBlock="10.0.0.0/16")['Vpc']
subnet = conn.create_subnet( subnet = conn.create_subnet(
VpcId=vpc['VpcId'], CidrBlock='10.0.0.0/16', AvailabilityZone='us-east-1a')['Subnet'] VpcId=vpc['VpcId'], CidrBlock='10.0.0.0/16', AvailabilityZone='us-east-1a')['Subnet']
subnet_id = subnet['SubnetId'] subnet_id = subnet['SubnetId']

View File

@ -7,7 +7,7 @@ import boto3
import boto import boto
import boto.vpc import boto.vpc
from boto.exception import EC2ResponseError from boto.exception import EC2ResponseError
from botocore.exceptions import ParamValidationError from botocore.exceptions import ParamValidationError, ClientError
import json import json
import sure # noqa import sure # noqa
@ -84,7 +84,7 @@ def test_default_subnet():
default_vpc.is_default.should.be.ok default_vpc.is_default.should.be.ok
subnet = ec2.create_subnet( subnet = ec2.create_subnet(
VpcId=default_vpc.id, CidrBlock='172.31.0.0/20', AvailabilityZone='us-west-1a') VpcId=default_vpc.id, CidrBlock='172.31.48.0/20', AvailabilityZone='us-west-1a')
subnet.reload() subnet.reload()
subnet.map_public_ip_on_launch.shouldnt.be.ok subnet.map_public_ip_on_launch.shouldnt.be.ok
@ -126,7 +126,7 @@ def test_modify_subnet_attribute():
vpc = list(ec2.vpcs.all())[0] vpc = list(ec2.vpcs.all())[0]
subnet = ec2.create_subnet( subnet = ec2.create_subnet(
VpcId=vpc.id, CidrBlock='10.0.0.0/24', AvailabilityZone='us-west-1a') VpcId=vpc.id, CidrBlock="172.31.48.0/20", AvailabilityZone='us-west-1a')
# 'map_public_ip_on_launch' is set when calling 'DescribeSubnets' action # 'map_public_ip_on_launch' is set when calling 'DescribeSubnets' action
subnet.reload() subnet.reload()
@ -289,3 +289,52 @@ def test_subnet_tags_through_cloudformation():
subnet = vpc_conn.get_all_subnets(filters={'cidrBlock': '10.0.0.0/24'})[0] subnet = vpc_conn.get_all_subnets(filters={'cidrBlock': '10.0.0.0/24'})[0]
subnet.tags["foo"].should.equal("bar") subnet.tags["foo"].should.equal("bar")
subnet.tags["blah"].should.equal("baz") subnet.tags["blah"].should.equal("baz")
@mock_ec2
def test_create_subnet_with_invalid_cidr_range():
ec2 = boto3.resource('ec2', region_name='us-west-1')
vpc = ec2.create_vpc(CidrBlock='10.0.0.0/16')
vpc.reload()
vpc.is_default.shouldnt.be.ok
subnet_cidr_block = '10.1.0.0/20'
with assert_raises(ClientError) as ex:
subnet = ec2.create_subnet(VpcId=vpc.id, CidrBlock=subnet_cidr_block)
str(ex.exception).should.equal(
"An error occurred (InvalidSubnet.Range) when calling the CreateSubnet "
"operation: The CIDR '{}' is invalid.".format(subnet_cidr_block))
@mock_ec2
def test_create_subnet_with_invalid_cidr_block_parameter():
ec2 = boto3.resource('ec2', region_name='us-west-1')
vpc = ec2.create_vpc(CidrBlock='10.0.0.0/16')
vpc.reload()
vpc.is_default.shouldnt.be.ok
subnet_cidr_block = '1000.1.0.0/20'
with assert_raises(ClientError) as ex:
subnet = ec2.create_subnet(VpcId=vpc.id, CidrBlock=subnet_cidr_block)
str(ex.exception).should.equal(
"An error occurred (InvalidParameterValue) when calling the CreateSubnet "
"operation: Value ({}) for parameter cidrBlock is invalid. This is not a valid CIDR block.".format(subnet_cidr_block))
@mock_ec2
def test_create_subnets_with_overlapping_cidr_blocks():
ec2 = boto3.resource('ec2', region_name='us-west-1')
vpc = ec2.create_vpc(CidrBlock='10.0.0.0/16')
vpc.reload()
vpc.is_default.shouldnt.be.ok
subnet_cidr_block = '10.0.0.0/24'
with assert_raises(ClientError) as ex:
subnet1 = ec2.create_subnet(VpcId=vpc.id, CidrBlock=subnet_cidr_block)
subnet2 = ec2.create_subnet(VpcId=vpc.id, CidrBlock=subnet_cidr_block)
str(ex.exception).should.equal(
"An error occurred (InvalidSubnet.Conflict) when calling the CreateSubnet "
"operation: The CIDR '{}' conflicts with another subnet".format(subnet_cidr_block))

View File

@ -1,8 +1,12 @@
from moto.ec2 import utils from moto.ec2 import utils
from .helpers import rsa_check_private_key
def test_random_key_pair(): def test_random_key_pair():
key_pair = utils.random_key_pair() key_pair = utils.random_key_pair()
assert len(key_pair['fingerprint']) == 59 rsa_check_private_key(key_pair['material'])
assert key_pair['material'].startswith('---- BEGIN RSA PRIVATE KEY ----')
assert key_pair['material'].endswith('-----END RSA PRIVATE KEY-----') # AWS uses MD5 fingerprints, which are 47 characters long, *not* SHA1
# fingerprints with 59 characters.
assert len(key_pair['fingerprint']) == 47

View File

@ -107,14 +107,19 @@ def test_vpc_peering_connections_cross_region():
ec2_apn1 = boto3.resource('ec2', region_name='ap-northeast-1') ec2_apn1 = boto3.resource('ec2', region_name='ap-northeast-1')
vpc_apn1 = ec2_apn1.create_vpc(CidrBlock='10.20.0.0/16') vpc_apn1 = ec2_apn1.create_vpc(CidrBlock='10.20.0.0/16')
# create peering # create peering
vpc_pcx = ec2_usw1.create_vpc_peering_connection( vpc_pcx_usw1 = ec2_usw1.create_vpc_peering_connection(
VpcId=vpc_usw1.id, VpcId=vpc_usw1.id,
PeerVpcId=vpc_apn1.id, PeerVpcId=vpc_apn1.id,
PeerRegion='ap-northeast-1', PeerRegion='ap-northeast-1',
) )
vpc_pcx.status['Code'].should.equal('initiating-request') vpc_pcx_usw1.status['Code'].should.equal('initiating-request')
vpc_pcx.requester_vpc.id.should.equal(vpc_usw1.id) vpc_pcx_usw1.requester_vpc.id.should.equal(vpc_usw1.id)
vpc_pcx.accepter_vpc.id.should.equal(vpc_apn1.id) vpc_pcx_usw1.accepter_vpc.id.should.equal(vpc_apn1.id)
# test cross region vpc peering connection exist
vpc_pcx_apn1 = ec2_apn1.VpcPeeringConnection(vpc_pcx_usw1.id)
vpc_pcx_apn1.id.should.equal(vpc_pcx_usw1.id)
vpc_pcx_apn1.requester_vpc.id.should.equal(vpc_usw1.id)
vpc_pcx_apn1.accepter_vpc.id.should.equal(vpc_apn1.id)
@mock_ec2 @mock_ec2
@ -131,3 +136,148 @@ def test_vpc_peering_connections_cross_region_fail():
PeerVpcId=vpc_apn1.id, PeerVpcId=vpc_apn1.id,
PeerRegion='ap-northeast-2') PeerRegion='ap-northeast-2')
cm.exception.response['Error']['Code'].should.equal('InvalidVpcID.NotFound') cm.exception.response['Error']['Code'].should.equal('InvalidVpcID.NotFound')
@mock_ec2
def test_vpc_peering_connections_cross_region_accept():
# create vpc in us-west-1 and ap-northeast-1
ec2_usw1 = boto3.resource('ec2', region_name='us-west-1')
vpc_usw1 = ec2_usw1.create_vpc(CidrBlock='10.90.0.0/16')
ec2_apn1 = boto3.resource('ec2', region_name='ap-northeast-1')
vpc_apn1 = ec2_apn1.create_vpc(CidrBlock='10.20.0.0/16')
# create peering
vpc_pcx_usw1 = ec2_usw1.create_vpc_peering_connection(
VpcId=vpc_usw1.id,
PeerVpcId=vpc_apn1.id,
PeerRegion='ap-northeast-1',
)
# accept peering from ap-northeast-1
ec2_apn1 = boto3.client('ec2', region_name='ap-northeast-1')
ec2_usw1 = boto3.client('ec2', region_name='us-west-1')
acp_pcx_apn1 = ec2_apn1.accept_vpc_peering_connection(
VpcPeeringConnectionId=vpc_pcx_usw1.id
)
des_pcx_apn1 = ec2_usw1.describe_vpc_peering_connections(
VpcPeeringConnectionIds=[vpc_pcx_usw1.id]
)
des_pcx_usw1 = ec2_usw1.describe_vpc_peering_connections(
VpcPeeringConnectionIds=[vpc_pcx_usw1.id]
)
acp_pcx_apn1['VpcPeeringConnection']['Status']['Code'].should.equal('active')
des_pcx_apn1['VpcPeeringConnections'][0]['Status']['Code'].should.equal('active')
des_pcx_usw1['VpcPeeringConnections'][0]['Status']['Code'].should.equal('active')
@mock_ec2
def test_vpc_peering_connections_cross_region_reject():
# create vpc in us-west-1 and ap-northeast-1
ec2_usw1 = boto3.resource('ec2', region_name='us-west-1')
vpc_usw1 = ec2_usw1.create_vpc(CidrBlock='10.90.0.0/16')
ec2_apn1 = boto3.resource('ec2', region_name='ap-northeast-1')
vpc_apn1 = ec2_apn1.create_vpc(CidrBlock='10.20.0.0/16')
# create peering
vpc_pcx_usw1 = ec2_usw1.create_vpc_peering_connection(
VpcId=vpc_usw1.id,
PeerVpcId=vpc_apn1.id,
PeerRegion='ap-northeast-1',
)
# reject peering from ap-northeast-1
ec2_apn1 = boto3.client('ec2', region_name='ap-northeast-1')
ec2_usw1 = boto3.client('ec2', region_name='us-west-1')
rej_pcx_apn1 = ec2_apn1.reject_vpc_peering_connection(
VpcPeeringConnectionId=vpc_pcx_usw1.id
)
des_pcx_apn1 = ec2_usw1.describe_vpc_peering_connections(
VpcPeeringConnectionIds=[vpc_pcx_usw1.id]
)
des_pcx_usw1 = ec2_usw1.describe_vpc_peering_connections(
VpcPeeringConnectionIds=[vpc_pcx_usw1.id]
)
rej_pcx_apn1['Return'].should.equal(True)
des_pcx_apn1['VpcPeeringConnections'][0]['Status']['Code'].should.equal('rejected')
des_pcx_usw1['VpcPeeringConnections'][0]['Status']['Code'].should.equal('rejected')
@mock_ec2
def test_vpc_peering_connections_cross_region_delete():
# create vpc in us-west-1 and ap-northeast-1
ec2_usw1 = boto3.resource('ec2', region_name='us-west-1')
vpc_usw1 = ec2_usw1.create_vpc(CidrBlock='10.90.0.0/16')
ec2_apn1 = boto3.resource('ec2', region_name='ap-northeast-1')
vpc_apn1 = ec2_apn1.create_vpc(CidrBlock='10.20.0.0/16')
# create peering
vpc_pcx_usw1 = ec2_usw1.create_vpc_peering_connection(
VpcId=vpc_usw1.id,
PeerVpcId=vpc_apn1.id,
PeerRegion='ap-northeast-1',
)
# reject peering from ap-northeast-1
ec2_apn1 = boto3.client('ec2', region_name='ap-northeast-1')
ec2_usw1 = boto3.client('ec2', region_name='us-west-1')
del_pcx_apn1 = ec2_apn1.delete_vpc_peering_connection(
VpcPeeringConnectionId=vpc_pcx_usw1.id
)
des_pcx_apn1 = ec2_usw1.describe_vpc_peering_connections(
VpcPeeringConnectionIds=[vpc_pcx_usw1.id]
)
des_pcx_usw1 = ec2_usw1.describe_vpc_peering_connections(
VpcPeeringConnectionIds=[vpc_pcx_usw1.id]
)
del_pcx_apn1['Return'].should.equal(True)
des_pcx_apn1['VpcPeeringConnections'][0]['Status']['Code'].should.equal('deleted')
des_pcx_usw1['VpcPeeringConnections'][0]['Status']['Code'].should.equal('deleted')
@mock_ec2
def test_vpc_peering_connections_cross_region_accept_wrong_region():
# create vpc in us-west-1 and ap-northeast-1
ec2_usw1 = boto3.resource('ec2', region_name='us-west-1')
vpc_usw1 = ec2_usw1.create_vpc(CidrBlock='10.90.0.0/16')
ec2_apn1 = boto3.resource('ec2', region_name='ap-northeast-1')
vpc_apn1 = ec2_apn1.create_vpc(CidrBlock='10.20.0.0/16')
# create peering
vpc_pcx_usw1 = ec2_usw1.create_vpc_peering_connection(
VpcId=vpc_usw1.id,
PeerVpcId=vpc_apn1.id,
PeerRegion='ap-northeast-1',
)
# accept wrong peering from us-west-1 which will raise error
ec2_apn1 = boto3.client('ec2', region_name='ap-northeast-1')
ec2_usw1 = boto3.client('ec2', region_name='us-west-1')
with assert_raises(ClientError) as cm:
ec2_usw1.accept_vpc_peering_connection(
VpcPeeringConnectionId=vpc_pcx_usw1.id
)
cm.exception.response['Error']['Code'].should.equal('OperationNotPermitted')
exp_msg = 'Incorrect region ({0}) specified for this request.VPC ' \
'peering connection {1} must be ' \
'accepted in region {2}'.format('us-west-1', vpc_pcx_usw1.id, 'ap-northeast-1')
cm.exception.response['Error']['Message'].should.equal(exp_msg)
@mock_ec2
def test_vpc_peering_connections_cross_region_reject_wrong_region():
# create vpc in us-west-1 and ap-northeast-1
ec2_usw1 = boto3.resource('ec2', region_name='us-west-1')
vpc_usw1 = ec2_usw1.create_vpc(CidrBlock='10.90.0.0/16')
ec2_apn1 = boto3.resource('ec2', region_name='ap-northeast-1')
vpc_apn1 = ec2_apn1.create_vpc(CidrBlock='10.20.0.0/16')
# create peering
vpc_pcx_usw1 = ec2_usw1.create_vpc_peering_connection(
VpcId=vpc_usw1.id,
PeerVpcId=vpc_apn1.id,
PeerRegion='ap-northeast-1',
)
# reject wrong peering from us-west-1 which will raise error
ec2_apn1 = boto3.client('ec2', region_name='ap-northeast-1')
ec2_usw1 = boto3.client('ec2', region_name='us-west-1')
with assert_raises(ClientError) as cm:
ec2_usw1.reject_vpc_peering_connection(
VpcPeeringConnectionId=vpc_pcx_usw1.id
)
cm.exception.response['Error']['Code'].should.equal('OperationNotPermitted')
exp_msg = 'Incorrect region ({0}) specified for this request.VPC ' \
'peering connection {1} must be accepted or ' \
'rejected in region {2}'.format('us-west-1', vpc_pcx_usw1.id, 'ap-northeast-1')
cm.exception.response['Error']['Message'].should.equal(exp_msg)

View File

@ -539,3 +539,27 @@ def test_ipv6_cidr_block_association_filters():
filtered_vpcs = list(ec2.vpcs.filter(Filters=[{'Name': 'ipv6-cidr-block-association.state', filtered_vpcs = list(ec2.vpcs.filter(Filters=[{'Name': 'ipv6-cidr-block-association.state',
'Values': ['associated']}])) 'Values': ['associated']}]))
filtered_vpcs.should.be.length_of(2) # 2 of 4 VPCs filtered_vpcs.should.be.length_of(2) # 2 of 4 VPCs
@mock_ec2
def test_create_vpc_with_invalid_cidr_block_parameter():
ec2 = boto3.resource('ec2', region_name='us-west-1')
vpc_cidr_block = '1000.1.0.0/20'
with assert_raises(ClientError) as ex:
vpc = ec2.create_vpc(CidrBlock=vpc_cidr_block)
str(ex.exception).should.equal(
"An error occurred (InvalidParameterValue) when calling the CreateVpc "
"operation: Value ({}) for parameter cidrBlock is invalid. This is not a valid CIDR block.".format(vpc_cidr_block))
@mock_ec2
def test_create_vpc_with_invalid_cidr_range():
ec2 = boto3.resource('ec2', region_name='us-west-1')
vpc_cidr_block = '10.1.0.0/29'
with assert_raises(ClientError) as ex:
vpc = ec2.create_vpc(CidrBlock=vpc_cidr_block)
str(ex.exception).should.equal(
"An error occurred (InvalidVpc.Range) when calling the CreateVpc "
"operation: The CIDR '{}' is invalid.".format(vpc_cidr_block))

View File

@ -388,23 +388,32 @@ def test_list_services():
cluster='test_ecs_cluster', cluster='test_ecs_cluster',
serviceName='test_ecs_service1', serviceName='test_ecs_service1',
taskDefinition='test_ecs_task', taskDefinition='test_ecs_task',
schedulingStrategy='REPLICA',
desiredCount=2 desiredCount=2
) )
_ = client.create_service( _ = client.create_service(
cluster='test_ecs_cluster', cluster='test_ecs_cluster',
serviceName='test_ecs_service2', serviceName='test_ecs_service2',
taskDefinition='test_ecs_task', taskDefinition='test_ecs_task',
schedulingStrategy='DAEMON',
desiredCount=2 desiredCount=2
) )
response = client.list_services( unfiltered_response = client.list_services(
cluster='test_ecs_cluster' cluster='test_ecs_cluster'
) )
len(response['serviceArns']).should.equal(2) len(unfiltered_response['serviceArns']).should.equal(2)
response['serviceArns'][0].should.equal( unfiltered_response['serviceArns'][0].should.equal(
'arn:aws:ecs:us-east-1:012345678910:service/test_ecs_service1') 'arn:aws:ecs:us-east-1:012345678910:service/test_ecs_service1')
response['serviceArns'][1].should.equal( unfiltered_response['serviceArns'][1].should.equal(
'arn:aws:ecs:us-east-1:012345678910:service/test_ecs_service2') 'arn:aws:ecs:us-east-1:012345678910:service/test_ecs_service2')
filtered_response = client.list_services(
cluster='test_ecs_cluster',
schedulingStrategy='REPLICA'
)
len(filtered_response['serviceArns']).should.equal(1)
filtered_response['serviceArns'][0].should.equal(
'arn:aws:ecs:us-east-1:012345678910:service/test_ecs_service1')
@mock_ecs @mock_ecs
def test_describe_services(): def test_describe_services():

View File

@ -21,7 +21,7 @@ from moto import mock_elb, mock_ec2, mock_elb_deprecated, mock_ec2_deprecated
@mock_ec2_deprecated @mock_ec2_deprecated
def test_create_load_balancer(): def test_create_load_balancer():
conn = boto.connect_elb() conn = boto.connect_elb()
ec2 = boto.connect_ec2('the_key', 'the_secret') ec2 = boto.ec2.connect_to_region("us-east-1")
security_group = ec2.create_security_group('sg-abc987', 'description') security_group = ec2.create_security_group('sg-abc987', 'description')

View File

@ -27,7 +27,7 @@ def test_create_load_balancer():
AvailabilityZone='us-east-1a') AvailabilityZone='us-east-1a')
subnet2 = ec2.create_subnet( subnet2 = ec2.create_subnet(
VpcId=vpc.id, VpcId=vpc.id,
CidrBlock='172.28.7.192/26', CidrBlock='172.28.7.0/26',
AvailabilityZone='us-east-1b') AvailabilityZone='us-east-1b')
response = conn.create_load_balancer( response = conn.create_load_balancer(
@ -69,7 +69,7 @@ def test_describe_load_balancers():
AvailabilityZone='us-east-1a') AvailabilityZone='us-east-1a')
subnet2 = ec2.create_subnet( subnet2 = ec2.create_subnet(
VpcId=vpc.id, VpcId=vpc.id,
CidrBlock='172.28.7.192/26', CidrBlock='172.28.7.0/26',
AvailabilityZone='us-east-1b') AvailabilityZone='us-east-1b')
conn.create_load_balancer( conn.create_load_balancer(
@ -112,7 +112,7 @@ def test_add_remove_tags():
vpc = ec2.create_vpc(CidrBlock='172.28.7.0/24', InstanceTenancy='default') vpc = ec2.create_vpc(CidrBlock='172.28.7.0/24', InstanceTenancy='default')
subnet1 = ec2.create_subnet( subnet1 = ec2.create_subnet(
VpcId=vpc.id, VpcId=vpc.id,
CidrBlock='172.28.7.192/26', CidrBlock='172.28.7.0/26',
AvailabilityZone='us-east-1a') AvailabilityZone='us-east-1a')
subnet2 = ec2.create_subnet( subnet2 = ec2.create_subnet(
VpcId=vpc.id, VpcId=vpc.id,
@ -234,7 +234,7 @@ def test_create_elb_in_multiple_region():
InstanceTenancy='default') InstanceTenancy='default')
subnet1 = ec2.create_subnet( subnet1 = ec2.create_subnet(
VpcId=vpc.id, VpcId=vpc.id,
CidrBlock='172.28.7.192/26', CidrBlock='172.28.7.0/26',
AvailabilityZone=region + 'a') AvailabilityZone=region + 'a')
subnet2 = ec2.create_subnet( subnet2 = ec2.create_subnet(
VpcId=vpc.id, VpcId=vpc.id,
@ -275,7 +275,7 @@ def test_create_target_group_and_listeners():
AvailabilityZone='us-east-1a') AvailabilityZone='us-east-1a')
subnet2 = ec2.create_subnet( subnet2 = ec2.create_subnet(
VpcId=vpc.id, VpcId=vpc.id,
CidrBlock='172.28.7.192/26', CidrBlock='172.28.7.0/26',
AvailabilityZone='us-east-1b') AvailabilityZone='us-east-1b')
response = conn.create_load_balancer( response = conn.create_load_balancer(
@ -434,7 +434,7 @@ def test_create_target_group_without_non_required_parameters():
AvailabilityZone='us-east-1a') AvailabilityZone='us-east-1a')
subnet2 = ec2.create_subnet( subnet2 = ec2.create_subnet(
VpcId=vpc.id, VpcId=vpc.id,
CidrBlock='172.28.7.192/26', CidrBlock='172.28.7.0/26',
AvailabilityZone='us-east-1b') AvailabilityZone='us-east-1b')
response = conn.create_load_balancer( response = conn.create_load_balancer(
@ -538,7 +538,7 @@ def test_describe_paginated_balancers():
AvailabilityZone='us-east-1a') AvailabilityZone='us-east-1a')
subnet2 = ec2.create_subnet( subnet2 = ec2.create_subnet(
VpcId=vpc.id, VpcId=vpc.id,
CidrBlock='172.28.7.192/26', CidrBlock='172.28.7.0/26',
AvailabilityZone='us-east-1b') AvailabilityZone='us-east-1b')
for i in range(51): for i in range(51):
@ -573,7 +573,7 @@ def test_delete_load_balancer():
AvailabilityZone='us-east-1a') AvailabilityZone='us-east-1a')
subnet2 = ec2.create_subnet( subnet2 = ec2.create_subnet(
VpcId=vpc.id, VpcId=vpc.id,
CidrBlock='172.28.7.192/26', CidrBlock='172.28.7.0/26',
AvailabilityZone='us-east-1b') AvailabilityZone='us-east-1b')
response = conn.create_load_balancer( response = conn.create_load_balancer(
@ -606,7 +606,7 @@ def test_register_targets():
AvailabilityZone='us-east-1a') AvailabilityZone='us-east-1a')
subnet2 = ec2.create_subnet( subnet2 = ec2.create_subnet(
VpcId=vpc.id, VpcId=vpc.id,
CidrBlock='172.28.7.192/26', CidrBlock='172.28.7.0/26',
AvailabilityZone='us-east-1b') AvailabilityZone='us-east-1b')
conn.create_load_balancer( conn.create_load_balancer(
@ -682,7 +682,7 @@ def test_target_group_attributes():
AvailabilityZone='us-east-1a') AvailabilityZone='us-east-1a')
subnet2 = ec2.create_subnet( subnet2 = ec2.create_subnet(
VpcId=vpc.id, VpcId=vpc.id,
CidrBlock='172.28.7.192/26', CidrBlock='172.28.7.0/26',
AvailabilityZone='us-east-1b') AvailabilityZone='us-east-1b')
response = conn.create_load_balancer( response = conn.create_load_balancer(
@ -773,7 +773,7 @@ def test_handle_listener_rules():
AvailabilityZone='us-east-1a') AvailabilityZone='us-east-1a')
subnet2 = ec2.create_subnet( subnet2 = ec2.create_subnet(
VpcId=vpc.id, VpcId=vpc.id,
CidrBlock='172.28.7.192/26', CidrBlock='172.28.7.0/26',
AvailabilityZone='us-east-1b') AvailabilityZone='us-east-1b')
response = conn.create_load_balancer( response = conn.create_load_balancer(
@ -1078,7 +1078,7 @@ def test_describe_invalid_target_group():
AvailabilityZone='us-east-1a') AvailabilityZone='us-east-1a')
subnet2 = ec2.create_subnet( subnet2 = ec2.create_subnet(
VpcId=vpc.id, VpcId=vpc.id,
CidrBlock='172.28.7.192/26', CidrBlock='172.28.7.0/26',
AvailabilityZone='us-east-1b') AvailabilityZone='us-east-1b')
response = conn.create_load_balancer( response = conn.create_load_balancer(
@ -1124,7 +1124,7 @@ def test_describe_target_groups_no_arguments():
AvailabilityZone='us-east-1a') AvailabilityZone='us-east-1a')
subnet2 = ec2.create_subnet( subnet2 = ec2.create_subnet(
VpcId=vpc.id, VpcId=vpc.id,
CidrBlock='172.28.7.192/26', CidrBlock='172.28.7.0/26',
AvailabilityZone='us-east-1b') AvailabilityZone='us-east-1b')
response = conn.create_load_balancer( response = conn.create_load_balancer(
@ -1188,7 +1188,7 @@ def test_set_ip_address_type():
AvailabilityZone='us-east-1a') AvailabilityZone='us-east-1a')
subnet2 = ec2.create_subnet( subnet2 = ec2.create_subnet(
VpcId=vpc.id, VpcId=vpc.id,
CidrBlock='172.28.7.192/26', CidrBlock='172.28.7.0/26',
AvailabilityZone='us-east-1b') AvailabilityZone='us-east-1b')
response = client.create_load_balancer( response = client.create_load_balancer(
@ -1238,7 +1238,7 @@ def test_set_security_groups():
AvailabilityZone='us-east-1a') AvailabilityZone='us-east-1a')
subnet2 = ec2.create_subnet( subnet2 = ec2.create_subnet(
VpcId=vpc.id, VpcId=vpc.id,
CidrBlock='172.28.7.192/26', CidrBlock='172.28.7.0/26',
AvailabilityZone='us-east-1b') AvailabilityZone='us-east-1b')
response = client.create_load_balancer( response = client.create_load_balancer(
@ -1275,11 +1275,11 @@ def test_set_subnets():
vpc = ec2.create_vpc(CidrBlock='172.28.7.0/24', InstanceTenancy='default') vpc = ec2.create_vpc(CidrBlock='172.28.7.0/24', InstanceTenancy='default')
subnet1 = ec2.create_subnet( subnet1 = ec2.create_subnet(
VpcId=vpc.id, VpcId=vpc.id,
CidrBlock='172.28.7.192/26', CidrBlock='172.28.7.0/26',
AvailabilityZone='us-east-1a') AvailabilityZone='us-east-1a')
subnet2 = ec2.create_subnet( subnet2 = ec2.create_subnet(
VpcId=vpc.id, VpcId=vpc.id,
CidrBlock='172.28.7.192/26', CidrBlock='172.28.7.64/26',
AvailabilityZone='us-east-1b') AvailabilityZone='us-east-1b')
subnet3 = ec2.create_subnet( subnet3 = ec2.create_subnet(
VpcId=vpc.id, VpcId=vpc.id,
@ -1332,7 +1332,7 @@ def test_set_subnets():
AvailabilityZone='us-east-1a') AvailabilityZone='us-east-1a')
subnet2 = ec2.create_subnet( subnet2 = ec2.create_subnet(
VpcId=vpc.id, VpcId=vpc.id,
CidrBlock='172.28.7.192/26', CidrBlock='172.28.7.0/26',
AvailabilityZone='us-east-1b') AvailabilityZone='us-east-1b')
response = client.create_load_balancer( response = client.create_load_balancer(
@ -1421,7 +1421,7 @@ def test_modify_listener_http_to_https():
AvailabilityZone='eu-central-1a') AvailabilityZone='eu-central-1a')
subnet2 = ec2.create_subnet( subnet2 = ec2.create_subnet(
VpcId=vpc.id, VpcId=vpc.id,
CidrBlock='172.28.7.192/26', CidrBlock='172.28.7.0/26',
AvailabilityZone='eu-central-1b') AvailabilityZone='eu-central-1b')
response = client.create_load_balancer( response = client.create_load_balancer(
@ -1586,3 +1586,143 @@ def test_create_target_groups_through_cloudformation():
assert len( assert len(
[tg for tg in target_group_dicts if tg['TargetGroupName'].startswith('test-stack')] [tg for tg in target_group_dicts if tg['TargetGroupName'].startswith('test-stack')]
) == 2 ) == 2
@mock_elbv2
@mock_ec2
def test_redirect_action_listener_rule():
conn = boto3.client('elbv2', region_name='us-east-1')
ec2 = boto3.resource('ec2', region_name='us-east-1')
security_group = ec2.create_security_group(
GroupName='a-security-group', Description='First One')
vpc = ec2.create_vpc(CidrBlock='172.28.7.0/24', InstanceTenancy='default')
subnet1 = ec2.create_subnet(
VpcId=vpc.id,
CidrBlock='172.28.7.192/26',
AvailabilityZone='us-east-1a')
subnet2 = ec2.create_subnet(
VpcId=vpc.id,
CidrBlock='172.28.7.128/26',
AvailabilityZone='us-east-1b')
response = conn.create_load_balancer(
Name='my-lb',
Subnets=[subnet1.id, subnet2.id],
SecurityGroups=[security_group.id],
Scheme='internal',
Tags=[{'Key': 'key_name', 'Value': 'a_value'}])
load_balancer_arn = response.get('LoadBalancers')[0].get('LoadBalancerArn')
response = conn.create_listener(LoadBalancerArn=load_balancer_arn,
Protocol='HTTP',
Port=80,
DefaultActions=[
{'Type': 'redirect',
'RedirectConfig': {
'Protocol': 'HTTPS',
'Port': '443',
'StatusCode': 'HTTP_301'
}}])
listener = response.get('Listeners')[0]
expected_default_actions = [{
'Type': 'redirect',
'RedirectConfig': {
'Protocol': 'HTTPS',
'Port': '443',
'StatusCode': 'HTTP_301'
}
}]
listener.get('DefaultActions').should.equal(expected_default_actions)
listener_arn = listener.get('ListenerArn')
describe_rules_response = conn.describe_rules(ListenerArn=listener_arn)
describe_rules_response['Rules'][0]['Actions'].should.equal(expected_default_actions)
describe_listener_response = conn.describe_listeners(ListenerArns=[listener_arn, ])
describe_listener_actions = describe_listener_response['Listeners'][0]['DefaultActions']
describe_listener_actions.should.equal(expected_default_actions)
modify_listener_response = conn.modify_listener(ListenerArn=listener_arn, Port=81)
modify_listener_actions = modify_listener_response['Listeners'][0]['DefaultActions']
modify_listener_actions.should.equal(expected_default_actions)
@mock_elbv2
@mock_cloudformation
def test_redirect_action_listener_rule_cloudformation():
cnf_conn = boto3.client('cloudformation', region_name='us-east-1')
elbv2_client = boto3.client('elbv2', region_name='us-east-1')
template = {
"AWSTemplateFormatVersion": "2010-09-09",
"Description": "ECS Cluster Test CloudFormation",
"Resources": {
"testVPC": {
"Type": "AWS::EC2::VPC",
"Properties": {
"CidrBlock": "10.0.0.0/16",
},
},
"subnet1": {
"Type": "AWS::EC2::Subnet",
"Properties": {
"CidrBlock": "10.0.0.0/24",
"VpcId": {"Ref": "testVPC"},
"AvalabilityZone": "us-east-1b",
},
},
"subnet2": {
"Type": "AWS::EC2::Subnet",
"Properties": {
"CidrBlock": "10.0.1.0/24",
"VpcId": {"Ref": "testVPC"},
"AvalabilityZone": "us-east-1b",
},
},
"testLb": {
"Type": "AWS::ElasticLoadBalancingV2::LoadBalancer",
"Properties": {
"Name": "my-lb",
"Subnets": [{"Ref": "subnet1"}, {"Ref": "subnet2"}],
"Type": "application",
"SecurityGroups": [],
}
},
"testListener": {
"Type": "AWS::ElasticLoadBalancingV2::Listener",
"Properties": {
"LoadBalancerArn": {"Ref": "testLb"},
"Port": 80,
"Protocol": "HTTP",
"DefaultActions": [{
"Type": "redirect",
"RedirectConfig": {
"Port": "443",
"Protocol": "HTTPS",
"StatusCode": "HTTP_301",
}
}]
}
}
}
}
template_json = json.dumps(template)
cnf_conn.create_stack(StackName="test-stack", TemplateBody=template_json)
describe_load_balancers_response = elbv2_client.describe_load_balancers(Names=['my-lb',])
describe_load_balancers_response['LoadBalancers'].should.have.length_of(1)
load_balancer_arn = describe_load_balancers_response['LoadBalancers'][0]['LoadBalancerArn']
describe_listeners_response = elbv2_client.describe_listeners(LoadBalancerArn=load_balancer_arn)
describe_listeners_response['Listeners'].should.have.length_of(1)
describe_listeners_response['Listeners'][0]['DefaultActions'].should.equal([{
'Type': 'redirect',
'RedirectConfig': {
'Port': '443', 'Protocol': 'HTTPS', 'StatusCode': 'HTTP_301',
}
},])

View File

@ -432,6 +432,47 @@ def test_run_job_flow_with_instance_groups():
x['BidPrice'].should.equal(y['BidPrice']) x['BidPrice'].should.equal(y['BidPrice'])
@mock_emr
def test_run_job_flow_with_custom_ami():
client = boto3.client('emr', region_name='us-east-1')
with assert_raises(ClientError) as ex:
# CustomAmiId available in Amazon EMR 5.7.0 and later
args = deepcopy(run_job_flow_args)
args['CustomAmiId'] = 'MyEmrCustomId'
args['ReleaseLabel'] = 'emr-5.6.0'
client.run_job_flow(**args)
ex.exception.response['Error']['Code'].should.equal('ValidationException')
ex.exception.response['Error']['Message'].should.equal('Custom AMI is not allowed')
with assert_raises(ClientError) as ex:
args = deepcopy(run_job_flow_args)
args['CustomAmiId'] = 'MyEmrCustomId'
args['AmiVersion'] = '3.8.1'
client.run_job_flow(**args)
ex.exception.response['Error']['Code'].should.equal('ValidationException')
ex.exception.response['Error']['Message'].should.equal(
'Custom AMI is not supported in this version of EMR')
with assert_raises(ClientError) as ex:
# AMI version and release label exception raises before CustomAmi exception
args = deepcopy(run_job_flow_args)
args['CustomAmiId'] = 'MyEmrCustomId'
args['ReleaseLabel'] = 'emr-5.6.0'
args['AmiVersion'] = '3.8.1'
client.run_job_flow(**args)
ex.exception.response['Error']['Code'].should.equal('ValidationException')
ex.exception.response['Error']['Message'].should.contain(
'Only one AMI version and release label may be specified.')
args = deepcopy(run_job_flow_args)
args['CustomAmiId'] = 'MyEmrCustomAmi'
args['ReleaseLabel'] = 'emr-5.7.0'
cluster_id = client.run_job_flow(**args)['JobFlowId']
resp = client.describe_cluster(ClusterId=cluster_id)
resp['Cluster']['CustomAmiId'].should.equal('MyEmrCustomAmi')
@mock_emr @mock_emr
def test_set_termination_protection(): def test_set_termination_protection():
client = boto3.client('emr', region_name='us-east-1') client = boto3.client('emr', region_name='us-east-1')

View File

@ -209,6 +209,27 @@ def test_get_table_when_database_not_exits():
exc.exception.response['Error']['Message'].should.match('Database nosuchdatabase not found') exc.exception.response['Error']['Message'].should.match('Database nosuchdatabase not found')
@mock_glue
def test_delete_table():
client = boto3.client('glue', region_name='us-east-1')
database_name = 'myspecialdatabase'
helpers.create_database(client, database_name)
table_name = 'myspecialtable'
table_input = helpers.create_table_input(database_name, table_name)
helpers.create_table(client, database_name, table_name, table_input)
result = client.delete_table(DatabaseName=database_name, Name=table_name)
result['ResponseMetadata']['HTTPStatusCode'].should.equal(200)
# confirm table is deleted
with assert_raises(ClientError) as exc:
helpers.get_table(client, database_name, table_name)
exc.exception.response['Error']['Code'].should.equal('EntityNotFoundException')
exc.exception.response['Error']['Message'].should.match('Table myspecialtable not found')
@mock_glue @mock_glue
def test_get_partitions_empty(): def test_get_partitions_empty():
client = boto3.client('glue', region_name='us-east-1') client = boto3.client('glue', region_name='us-east-1')

View File

@ -128,7 +128,6 @@ def test_create_role_and_instance_profile():
profile = conn.create_instance_profile('my-other-profile') profile = conn.create_instance_profile('my-other-profile')
profile.path.should.equal('/') profile.path.should.equal('/')
@mock_iam_deprecated() @mock_iam_deprecated()
def test_remove_role_from_instance_profile(): def test_remove_role_from_instance_profile():
conn = boto.connect_iam() conn = boto.connect_iam()
@ -1292,4 +1291,22 @@ def test_create_role_no_path():
conn = boto3.client('iam', region_name='us-east-1') conn = boto3.client('iam', region_name='us-east-1')
resp = conn.create_role(RoleName='my-role', AssumeRolePolicyDocument='some policy', Description='test') resp = conn.create_role(RoleName='my-role', AssumeRolePolicyDocument='some policy', Description='test')
resp.get('Role').get('Arn').should.equal('arn:aws:iam::123456789012:role/my-role') resp.get('Role').get('Arn').should.equal('arn:aws:iam::123456789012:role/my-role')
resp.get('Role').should_not.have.key('PermissionsBoundary')
@mock_iam()
def test_create_role_with_permissions_boundary():
conn = boto3.client('iam', region_name='us-east-1')
boundary = 'arn:aws:iam::123456789012:policy/boundary'
resp = conn.create_role(RoleName='my-role', AssumeRolePolicyDocument='some policy', Description='test', PermissionsBoundary=boundary)
expected = {
'PermissionsBoundaryType': 'PermissionsBoundaryPolicy',
'PermissionsBoundaryArn': boundary
}
resp.get('Role').get('PermissionsBoundary').should.equal(expected)
invalid_boundary_arn = 'arn:aws:iam::123456789:not_a_boundary'
with assert_raises(ClientError):
conn.create_role(RoleName='bad-boundary', AssumeRolePolicyDocument='some policy', Description='test', PermissionsBoundary=invalid_boundary_arn)
# Ensure the PermissionsBoundary is included in role listing as well
conn.list_roles().get('Roles')[0].get('PermissionsBoundary').should.equal(expected)

View File

@ -350,7 +350,7 @@ def test_list_things_with_attribute_and_thing_type_filter_and_next_token():
@mock_iot @mock_iot
def test_certs(): def test_certs():
client = boto3.client('iot', region_name='ap-northeast-1') client = boto3.client('iot', region_name='us-east-1')
cert = client.create_keys_and_certificate(setAsActive=True) cert = client.create_keys_and_certificate(setAsActive=True)
cert.should.have.key('certificateArn').which.should_not.be.none cert.should.have.key('certificateArn').which.should_not.be.none
cert.should.have.key('certificateId').which.should_not.be.none cert.should.have.key('certificateId').which.should_not.be.none
@ -367,6 +367,29 @@ def test_certs():
cert_desc.should.have.key('certificateId').which.should_not.be.none cert_desc.should.have.key('certificateId').which.should_not.be.none
cert_desc.should.have.key('certificatePem').which.should_not.be.none cert_desc.should.have.key('certificatePem').which.should_not.be.none
cert_desc.should.have.key('status').which.should.equal('ACTIVE') cert_desc.should.have.key('status').which.should.equal('ACTIVE')
cert_pem = cert_desc['certificatePem']
res = client.list_certificates()
for cert in res['certificates']:
cert.should.have.key('certificateArn').which.should_not.be.none
cert.should.have.key('certificateId').which.should_not.be.none
cert.should.have.key('status').which.should_not.be.none
cert.should.have.key('creationDate').which.should_not.be.none
client.update_certificate(certificateId=cert_id, newStatus='REVOKED')
cert = client.describe_certificate(certificateId=cert_id)
cert_desc = cert['certificateDescription']
cert_desc.should.have.key('status').which.should.equal('REVOKED')
client.delete_certificate(certificateId=cert_id)
res = client.list_certificates()
res.should.have.key('certificates')
# Test register_certificate flow
cert = client.register_certificate(certificatePem=cert_pem, setAsActive=True)
cert.should.have.key('certificateId').which.should_not.be.none
cert.should.have.key('certificateArn').which.should_not.be.none
cert_id = cert['certificateId']
res = client.list_certificates() res = client.list_certificates()
res.should.have.key('certificates').which.should.have.length_of(1) res.should.have.key('certificates').which.should.have.length_of(1)
@ -378,11 +401,12 @@ def test_certs():
client.update_certificate(certificateId=cert_id, newStatus='REVOKED') client.update_certificate(certificateId=cert_id, newStatus='REVOKED')
cert = client.describe_certificate(certificateId=cert_id) cert = client.describe_certificate(certificateId=cert_id)
cert_desc.should.have.key('status').which.should.equal('ACTIVE') cert_desc = cert['certificateDescription']
cert_desc.should.have.key('status').which.should.equal('REVOKED')
client.delete_certificate(certificateId=cert_id) client.delete_certificate(certificateId=cert_id)
res = client.list_certificates() res = client.list_certificates()
res.should.have.key('certificates').which.should.have.length_of(0) res.should.have.key('certificates')
@mock_iot @mock_iot

View File

@ -1,12 +1,13 @@
from __future__ import unicode_literals from __future__ import unicode_literals
import boto.kinesis
from boto.kinesis.exceptions import ResourceNotFoundException, InvalidArgumentException
import boto3
import sure # noqa
import datetime import datetime
import time import time
import boto.kinesis
import boto3
from boto.kinesis.exceptions import ResourceNotFoundException, \
InvalidArgumentException
from moto import mock_kinesis, mock_kinesis_deprecated from moto import mock_kinesis, mock_kinesis_deprecated
@ -73,6 +74,23 @@ def test_list_many_streams():
has_more_streams.should.equal(False) has_more_streams.should.equal(False)
@mock_kinesis
def test_describe_stream_summary():
conn = boto3.client('kinesis', region_name="us-west-2")
stream_name = 'my_stream_summary'
shard_count = 5
conn.create_stream(StreamName=stream_name, ShardCount=shard_count)
resp = conn.describe_stream_summary(StreamName=stream_name)
stream = resp["StreamDescriptionSummary"]
stream["StreamName"].should.equal(stream_name)
stream["OpenShardCount"].should.equal(shard_count)
stream["StreamARN"].should.equal(
"arn:aws:kinesis:us-west-2:123456789012:{}".format(stream_name))
stream["StreamStatus"].should.equal("ACTIVE")
@mock_kinesis_deprecated @mock_kinesis_deprecated
def test_basic_shard_iterator(): def test_basic_shard_iterator():
conn = boto.kinesis.connect_to_region("us-west-2") conn = boto.kinesis.connect_to_region("us-west-2")
@ -100,7 +118,8 @@ def test_get_invalid_shard_iterator():
conn.create_stream(stream_name, 1) conn.create_stream(stream_name, 1)
conn.get_shard_iterator.when.called_with( conn.get_shard_iterator.when.called_with(
stream_name, "123", 'TRIM_HORIZON').should.throw(ResourceNotFoundException) stream_name, "123", 'TRIM_HORIZON').should.throw(
ResourceNotFoundException)
@mock_kinesis_deprecated @mock_kinesis_deprecated
@ -354,8 +373,8 @@ def test_get_records_timestamp_filtering():
timestamp = datetime.datetime.utcnow() timestamp = datetime.datetime.utcnow()
conn.put_record(StreamName=stream_name, conn.put_record(StreamName=stream_name,
Data='1', Data='1',
PartitionKey='1') PartitionKey='1')
response = conn.describe_stream(StreamName=stream_name) response = conn.describe_stream(StreamName=stream_name)
shard_id = response['StreamDescription']['Shards'][0]['ShardId'] shard_id = response['StreamDescription']['Shards'][0]['ShardId']
@ -368,7 +387,7 @@ def test_get_records_timestamp_filtering():
response = conn.get_records(ShardIterator=shard_iterator) response = conn.get_records(ShardIterator=shard_iterator)
response['Records'].should.have.length_of(1) response['Records'].should.have.length_of(1)
response['Records'][0]['PartitionKey'].should.equal('1') response['Records'][0]['PartitionKey'].should.equal('1')
response['Records'][0]['ApproximateArrivalTimestamp'].should.be.\ response['Records'][0]['ApproximateArrivalTimestamp'].should.be. \
greater_than(timestamp) greater_than(timestamp)
response['MillisBehindLatest'].should.equal(0) response['MillisBehindLatest'].should.equal(0)
@ -461,7 +480,8 @@ def test_invalid_shard_iterator_type():
response = conn.describe_stream(stream_name) response = conn.describe_stream(stream_name)
shard_id = response['StreamDescription']['Shards'][0]['ShardId'] shard_id = response['StreamDescription']['Shards'][0]['ShardId']
response = conn.get_shard_iterator.when.called_with( response = conn.get_shard_iterator.when.called_with(
stream_name, shard_id, 'invalid-type').should.throw(InvalidArgumentException) stream_name, shard_id, 'invalid-type').should.throw(
InvalidArgumentException)
@mock_kinesis_deprecated @mock_kinesis_deprecated
@ -549,7 +569,8 @@ def test_split_shard():
shard_range = shards[0]['HashKeyRange'] shard_range = shards[0]['HashKeyRange']
new_starting_hash = ( new_starting_hash = (
int(shard_range['EndingHashKey']) + int(shard_range['StartingHashKey'])) // 2 int(shard_range['EndingHashKey']) + int(
shard_range['StartingHashKey'])) // 2
conn.split_shard("my_stream", shards[0]['ShardId'], str(new_starting_hash)) conn.split_shard("my_stream", shards[0]['ShardId'], str(new_starting_hash))
stream_response = conn.describe_stream(stream_name) stream_response = conn.describe_stream(stream_name)
@ -562,7 +583,8 @@ def test_split_shard():
shard_range = shards[2]['HashKeyRange'] shard_range = shards[2]['HashKeyRange']
new_starting_hash = ( new_starting_hash = (
int(shard_range['EndingHashKey']) + int(shard_range['StartingHashKey'])) // 2 int(shard_range['EndingHashKey']) + int(
shard_range['StartingHashKey'])) // 2
conn.split_shard("my_stream", shards[2]['ShardId'], str(new_starting_hash)) conn.split_shard("my_stream", shards[2]['ShardId'], str(new_starting_hash))
stream_response = conn.describe_stream(stream_name) stream_response = conn.describe_stream(stream_name)
@ -592,7 +614,8 @@ def test_merge_shards():
shards.should.have.length_of(4) shards.should.have.length_of(4)
conn.merge_shards.when.called_with( conn.merge_shards.when.called_with(
stream_name, 'shardId-000000000000', 'shardId-000000000002').should.throw(InvalidArgumentException) stream_name, 'shardId-000000000000',
'shardId-000000000002').should.throw(InvalidArgumentException)
stream_response = conn.describe_stream(stream_name) stream_response = conn.describe_stream(stream_name)

View File

@ -18,13 +18,14 @@ from dateutil.tz import tzutc
@mock_kms_deprecated @mock_kms_deprecated
def test_create_key(): def test_create_key():
conn = boto.kms.connect_to_region("us-west-2") conn = boto.kms.connect_to_region("us-west-2")
with freeze_time("2015-01-01 00:00:00"):
key = conn.create_key(policy="my policy",
description="my key", key_usage='ENCRYPT_DECRYPT')
key = conn.create_key(policy="my policy", key['KeyMetadata']['Description'].should.equal("my key")
description="my key", key_usage='ENCRYPT_DECRYPT') key['KeyMetadata']['KeyUsage'].should.equal("ENCRYPT_DECRYPT")
key['KeyMetadata']['Enabled'].should.equal(True)
key['KeyMetadata']['Description'].should.equal("my key") key['KeyMetadata']['CreationDate'].should.equal("1420070400")
key['KeyMetadata']['KeyUsage'].should.equal("ENCRYPT_DECRYPT")
key['KeyMetadata']['Enabled'].should.equal(True)
@mock_kms_deprecated @mock_kms_deprecated
@ -980,5 +981,3 @@ def test_put_key_policy_key_not_found():
PolicyName='default', PolicyName='default',
Policy='new policy' Policy='new policy'
) )

View File

@ -17,6 +17,8 @@ def test_log_group_create():
response = conn.describe_log_groups(logGroupNamePrefix=log_group_name) response = conn.describe_log_groups(logGroupNamePrefix=log_group_name)
assert len(response['logGroups']) == 1 assert len(response['logGroups']) == 1
# AWS defaults to Never Expire for log group retention
assert response['logGroups'][0].get('retentionInDays') == None
response = conn.delete_log_group(logGroupName=log_group_name) response = conn.delete_log_group(logGroupName=log_group_name)
@ -126,3 +128,37 @@ def test_filter_logs_interleaved():
resulting_event['timestamp'].should.equal(original_message['timestamp']) resulting_event['timestamp'].should.equal(original_message['timestamp'])
resulting_event['message'].should.equal(original_message['message']) resulting_event['message'].should.equal(original_message['message'])
@mock_logs
def test_put_retention_policy():
conn = boto3.client('logs', 'us-west-2')
log_group_name = 'dummy'
response = conn.create_log_group(logGroupName=log_group_name)
response = conn.put_retention_policy(logGroupName=log_group_name, retentionInDays=7)
response = conn.describe_log_groups(logGroupNamePrefix=log_group_name)
assert len(response['logGroups']) == 1
assert response['logGroups'][0].get('retentionInDays') == 7
response = conn.delete_log_group(logGroupName=log_group_name)
@mock_logs
def test_delete_retention_policy():
conn = boto3.client('logs', 'us-west-2')
log_group_name = 'dummy'
response = conn.create_log_group(logGroupName=log_group_name)
response = conn.put_retention_policy(logGroupName=log_group_name, retentionInDays=7)
response = conn.describe_log_groups(logGroupNamePrefix=log_group_name)
assert len(response['logGroups']) == 1
assert response['logGroups'][0].get('retentionInDays') == 7
response = conn.delete_retention_policy(logGroupName=log_group_name)
response = conn.describe_log_groups(logGroupNamePrefix=log_group_name)
assert len(response['logGroups']) == 1
assert response['logGroups'][0].get('retentionInDays') == None
response = conn.delete_log_group(logGroupName=log_group_name)

View File

@ -5,38 +5,36 @@ import sure # noqa
import datetime import datetime
from moto.organizations import utils from moto.organizations import utils
EMAIL_REGEX = "^.+@[a-zA-Z0-9-.]+.[a-zA-Z]{2,3}|[0-9]{1,3}$"
ORG_ID_REGEX = r'o-[a-z0-9]{%s}' % utils.ORG_ID_SIZE
ROOT_ID_REGEX = r'r-[a-z0-9]{%s}' % utils.ROOT_ID_SIZE
OU_ID_REGEX = r'ou-[a-z0-9]{%s}-[a-z0-9]{%s}' % (utils.ROOT_ID_SIZE, utils.OU_ID_SUFFIX_SIZE)
ACCOUNT_ID_REGEX = r'[0-9]{%s}' % utils.ACCOUNT_ID_SIZE
CREATE_ACCOUNT_STATUS_ID_REGEX = r'car-[a-z0-9]{%s}' % utils.CREATE_ACCOUNT_STATUS_ID_SIZE
def test_make_random_org_id(): def test_make_random_org_id():
org_id = utils.make_random_org_id() org_id = utils.make_random_org_id()
org_id.should.match(ORG_ID_REGEX) org_id.should.match(utils.ORG_ID_REGEX)
def test_make_random_root_id(): def test_make_random_root_id():
root_id = utils.make_random_root_id() root_id = utils.make_random_root_id()
root_id.should.match(ROOT_ID_REGEX) root_id.should.match(utils.ROOT_ID_REGEX)
def test_make_random_ou_id(): def test_make_random_ou_id():
root_id = utils.make_random_root_id() root_id = utils.make_random_root_id()
ou_id = utils.make_random_ou_id(root_id) ou_id = utils.make_random_ou_id(root_id)
ou_id.should.match(OU_ID_REGEX) ou_id.should.match(utils.OU_ID_REGEX)
def test_make_random_account_id(): def test_make_random_account_id():
account_id = utils.make_random_account_id() account_id = utils.make_random_account_id()
account_id.should.match(ACCOUNT_ID_REGEX) account_id.should.match(utils.ACCOUNT_ID_REGEX)
def test_make_random_create_account_status_id(): def test_make_random_create_account_status_id():
create_account_status_id = utils.make_random_create_account_status_id() create_account_status_id = utils.make_random_create_account_status_id()
create_account_status_id.should.match(CREATE_ACCOUNT_STATUS_ID_REGEX) create_account_status_id.should.match(utils.CREATE_ACCOUNT_STATUS_ID_REGEX)
def test_make_random_service_control_policy_id():
service_control_policy_id = utils.make_random_service_control_policy_id()
service_control_policy_id.should.match(utils.SCP_ID_REGEX)
def validate_organization(response): def validate_organization(response):
@ -50,7 +48,7 @@ def validate_organization(response):
'MasterAccountEmail', 'MasterAccountEmail',
'MasterAccountId', 'MasterAccountId',
]) ])
org['Id'].should.match(ORG_ID_REGEX) org['Id'].should.match(utils.ORG_ID_REGEX)
org['MasterAccountId'].should.equal(utils.MASTER_ACCOUNT_ID) org['MasterAccountId'].should.equal(utils.MASTER_ACCOUNT_ID)
org['MasterAccountArn'].should.equal(utils.MASTER_ACCOUNT_ARN_FORMAT.format( org['MasterAccountArn'].should.equal(utils.MASTER_ACCOUNT_ARN_FORMAT.format(
org['MasterAccountId'], org['MasterAccountId'],
@ -72,7 +70,7 @@ def validate_roots(org, response):
response.should.have.key('Roots').should.be.a(list) response.should.have.key('Roots').should.be.a(list)
response['Roots'].should_not.be.empty response['Roots'].should_not.be.empty
root = response['Roots'][0] root = response['Roots'][0]
root.should.have.key('Id').should.match(ROOT_ID_REGEX) root.should.have.key('Id').should.match(utils.ROOT_ID_REGEX)
root.should.have.key('Arn').should.equal(utils.ROOT_ARN_FORMAT.format( root.should.have.key('Arn').should.equal(utils.ROOT_ARN_FORMAT.format(
org['MasterAccountId'], org['MasterAccountId'],
org['Id'], org['Id'],
@ -87,7 +85,7 @@ def validate_roots(org, response):
def validate_organizational_unit(org, response): def validate_organizational_unit(org, response):
response.should.have.key('OrganizationalUnit').should.be.a(dict) response.should.have.key('OrganizationalUnit').should.be.a(dict)
ou = response['OrganizationalUnit'] ou = response['OrganizationalUnit']
ou.should.have.key('Id').should.match(OU_ID_REGEX) ou.should.have.key('Id').should.match(utils.OU_ID_REGEX)
ou.should.have.key('Arn').should.equal(utils.OU_ARN_FORMAT.format( ou.should.have.key('Arn').should.equal(utils.OU_ARN_FORMAT.format(
org['MasterAccountId'], org['MasterAccountId'],
org['Id'], org['Id'],
@ -106,13 +104,13 @@ def validate_account(org, account):
'Name', 'Name',
'Status', 'Status',
]) ])
account['Id'].should.match(ACCOUNT_ID_REGEX) account['Id'].should.match(utils.ACCOUNT_ID_REGEX)
account['Arn'].should.equal(utils.ACCOUNT_ARN_FORMAT.format( account['Arn'].should.equal(utils.ACCOUNT_ARN_FORMAT.format(
org['MasterAccountId'], org['MasterAccountId'],
org['Id'], org['Id'],
account['Id'], account['Id'],
)) ))
account['Email'].should.match(EMAIL_REGEX) account['Email'].should.match(utils.EMAIL_REGEX)
account['JoinedMethod'].should.be.within(['INVITED', 'CREATED']) account['JoinedMethod'].should.be.within(['INVITED', 'CREATED'])
account['Status'].should.be.within(['ACTIVE', 'SUSPENDED']) account['Status'].should.be.within(['ACTIVE', 'SUSPENDED'])
account['Name'].should.be.a(six.string_types) account['Name'].should.be.a(six.string_types)
@ -128,9 +126,27 @@ def validate_create_account_status(create_status):
'RequestedTimestamp', 'RequestedTimestamp',
'State', 'State',
]) ])
create_status['Id'].should.match(CREATE_ACCOUNT_STATUS_ID_REGEX) create_status['Id'].should.match(utils.CREATE_ACCOUNT_STATUS_ID_REGEX)
create_status['AccountId'].should.match(ACCOUNT_ID_REGEX) create_status['AccountId'].should.match(utils.ACCOUNT_ID_REGEX)
create_status['AccountName'].should.be.a(six.string_types) create_status['AccountName'].should.be.a(six.string_types)
create_status['State'].should.equal('SUCCEEDED') create_status['State'].should.equal('SUCCEEDED')
create_status['RequestedTimestamp'].should.be.a(datetime.datetime) create_status['RequestedTimestamp'].should.be.a(datetime.datetime)
create_status['CompletedTimestamp'].should.be.a(datetime.datetime) create_status['CompletedTimestamp'].should.be.a(datetime.datetime)
def validate_policy_summary(org, summary):
summary.should.be.a(dict)
summary.should.have.key('Id').should.match(utils.SCP_ID_REGEX)
summary.should.have.key('Arn').should.equal(utils.SCP_ARN_FORMAT.format(
org['MasterAccountId'],
org['Id'],
summary['Id'],
))
summary.should.have.key('Name').should.be.a(six.string_types)
summary.should.have.key('Description').should.be.a(six.string_types)
summary.should.have.key('Type').should.equal('SERVICE_CONTROL_POLICY')
summary.should.have.key('AwsManaged').should.be.a(bool)
def validate_service_control_policy(org, response):
response.should.have.key('PolicySummary').should.be.a(dict)
response.should.have.key('Content').should.be.a(six.string_types)
validate_policy_summary(org, response['PolicySummary'])

View File

@ -1,6 +1,8 @@
from __future__ import unicode_literals from __future__ import unicode_literals
import boto3 import boto3
import json
import six
import sure # noqa import sure # noqa
from botocore.exceptions import ClientError from botocore.exceptions import ClientError
from nose.tools import assert_raises from nose.tools import assert_raises
@ -13,6 +15,8 @@ from .organizations_test_utils import (
validate_organizational_unit, validate_organizational_unit,
validate_account, validate_account,
validate_create_account_status, validate_create_account_status,
validate_service_control_policy,
validate_policy_summary,
) )
@ -320,3 +324,271 @@ def test_list_children_exception():
ex.operation_name.should.equal('ListChildren') ex.operation_name.should.equal('ListChildren')
ex.response['Error']['Code'].should.equal('400') ex.response['Error']['Code'].should.equal('400')
ex.response['Error']['Message'].should.contain('InvalidInputException') ex.response['Error']['Message'].should.contain('InvalidInputException')
# Service Control Policies
policy_doc01 = dict(
Version='2012-10-17',
Statement=[dict(
Sid='MockPolicyStatement',
Effect='Allow',
Action='s3:*',
Resource='*',
)]
)
@mock_organizations
def test_create_policy():
client = boto3.client('organizations', region_name='us-east-1')
org = client.create_organization(FeatureSet='ALL')['Organization']
policy = client.create_policy(
Content=json.dumps(policy_doc01),
Description='A dummy service control policy',
Name='MockServiceControlPolicy',
Type='SERVICE_CONTROL_POLICY'
)['Policy']
validate_service_control_policy(org, policy)
policy['PolicySummary']['Name'].should.equal('MockServiceControlPolicy')
policy['PolicySummary']['Description'].should.equal('A dummy service control policy')
policy['Content'].should.equal(json.dumps(policy_doc01))
@mock_organizations
def test_describe_policy():
client = boto3.client('organizations', region_name='us-east-1')
org = client.create_organization(FeatureSet='ALL')['Organization']
policy_id = client.create_policy(
Content=json.dumps(policy_doc01),
Description='A dummy service control policy',
Name='MockServiceControlPolicy',
Type='SERVICE_CONTROL_POLICY'
)['Policy']['PolicySummary']['Id']
policy = client.describe_policy(PolicyId=policy_id)['Policy']
validate_service_control_policy(org, policy)
policy['PolicySummary']['Name'].should.equal('MockServiceControlPolicy')
policy['PolicySummary']['Description'].should.equal('A dummy service control policy')
policy['Content'].should.equal(json.dumps(policy_doc01))
@mock_organizations
def test_describe_policy_exception():
client = boto3.client('organizations', region_name='us-east-1')
client.create_organization(FeatureSet='ALL')['Organization']
policy_id = 'p-47fhe9s3'
with assert_raises(ClientError) as e:
response = client.describe_policy(PolicyId=policy_id)
ex = e.exception
ex.operation_name.should.equal('DescribePolicy')
ex.response['Error']['Code'].should.equal('400')
ex.response['Error']['Message'].should.contain('PolicyNotFoundException')
with assert_raises(ClientError) as e:
response = client.describe_policy(PolicyId='meaninglessstring')
ex = e.exception
ex.operation_name.should.equal('DescribePolicy')
ex.response['Error']['Code'].should.equal('400')
ex.response['Error']['Message'].should.contain('InvalidInputException')
@mock_organizations
def test_attach_policy():
client = boto3.client('organizations', region_name='us-east-1')
org = client.create_organization(FeatureSet='ALL')['Organization']
root_id = client.list_roots()['Roots'][0]['Id']
ou_id = client.create_organizational_unit(
ParentId=root_id,
Name='ou01',
)['OrganizationalUnit']['Id']
account_id = client.create_account(
AccountName=mockname,
Email=mockemail,
)['CreateAccountStatus']['AccountId']
policy_id = client.create_policy(
Content=json.dumps(policy_doc01),
Description='A dummy service control policy',
Name='MockServiceControlPolicy',
Type='SERVICE_CONTROL_POLICY'
)['Policy']['PolicySummary']['Id']
response = client.attach_policy(PolicyId=policy_id, TargetId=root_id)
response['ResponseMetadata']['HTTPStatusCode'].should.equal(200)
response = client.attach_policy(PolicyId=policy_id, TargetId=ou_id)
response['ResponseMetadata']['HTTPStatusCode'].should.equal(200)
response = client.attach_policy(PolicyId=policy_id, TargetId=account_id)
response['ResponseMetadata']['HTTPStatusCode'].should.equal(200)
@mock_organizations
def test_attach_policy_exception():
client = boto3.client('organizations', region_name='us-east-1')
client.create_organization(FeatureSet='ALL')['Organization']
root_id='r-dj873'
ou_id='ou-gi99-i7r8eh2i2'
account_id='126644886543'
policy_id = client.create_policy(
Content=json.dumps(policy_doc01),
Description='A dummy service control policy',
Name='MockServiceControlPolicy',
Type='SERVICE_CONTROL_POLICY'
)['Policy']['PolicySummary']['Id']
with assert_raises(ClientError) as e:
response = client.attach_policy(PolicyId=policy_id, TargetId=root_id)
ex = e.exception
ex.operation_name.should.equal('AttachPolicy')
ex.response['Error']['Code'].should.equal('400')
ex.response['Error']['Message'].should.contain('OrganizationalUnitNotFoundException')
with assert_raises(ClientError) as e:
response = client.attach_policy(PolicyId=policy_id, TargetId=ou_id)
ex = e.exception
ex.operation_name.should.equal('AttachPolicy')
ex.response['Error']['Code'].should.equal('400')
ex.response['Error']['Message'].should.contain('OrganizationalUnitNotFoundException')
with assert_raises(ClientError) as e:
response = client.attach_policy(PolicyId=policy_id, TargetId=account_id)
ex = e.exception
ex.operation_name.should.equal('AttachPolicy')
ex.response['Error']['Code'].should.equal('400')
ex.response['Error']['Message'].should.contain('AccountNotFoundException')
with assert_raises(ClientError) as e:
response = client.attach_policy(PolicyId=policy_id, TargetId='meaninglessstring')
ex = e.exception
ex.operation_name.should.equal('AttachPolicy')
ex.response['Error']['Code'].should.equal('400')
ex.response['Error']['Message'].should.contain('InvalidInputException')
@mock_organizations
def test_list_polices():
client = boto3.client('organizations', region_name='us-east-1')
org = client.create_organization(FeatureSet='ALL')['Organization']
for i in range(0,4):
client.create_policy(
Content=json.dumps(policy_doc01),
Description='A dummy service control policy',
Name='MockServiceControlPolicy' + str(i),
Type='SERVICE_CONTROL_POLICY'
)
response = client.list_policies(Filter='SERVICE_CONTROL_POLICY')
for policy in response['Policies']:
validate_policy_summary(org, policy)
@mock_organizations
def test_list_policies_for_target():
client = boto3.client('organizations', region_name='us-east-1')
org = client.create_organization(FeatureSet='ALL')['Organization']
root_id = client.list_roots()['Roots'][0]['Id']
ou_id = client.create_organizational_unit(
ParentId=root_id,
Name='ou01',
)['OrganizationalUnit']['Id']
account_id = client.create_account(
AccountName=mockname,
Email=mockemail,
)['CreateAccountStatus']['AccountId']
policy_id = client.create_policy(
Content=json.dumps(policy_doc01),
Description='A dummy service control policy',
Name='MockServiceControlPolicy',
Type='SERVICE_CONTROL_POLICY'
)['Policy']['PolicySummary']['Id']
client.attach_policy(PolicyId=policy_id, TargetId=ou_id)
response = client.list_policies_for_target(
TargetId=ou_id,
Filter='SERVICE_CONTROL_POLICY',
)
for policy in response['Policies']:
validate_policy_summary(org, policy)
client.attach_policy(PolicyId=policy_id, TargetId=account_id)
response = client.list_policies_for_target(
TargetId=account_id,
Filter='SERVICE_CONTROL_POLICY',
)
for policy in response['Policies']:
validate_policy_summary(org, policy)
@mock_organizations
def test_list_policies_for_target_exception():
client = boto3.client('organizations', region_name='us-east-1')
client.create_organization(FeatureSet='ALL')['Organization']
ou_id='ou-gi99-i7r8eh2i2'
account_id='126644886543'
with assert_raises(ClientError) as e:
response = client.list_policies_for_target(
TargetId=ou_id,
Filter='SERVICE_CONTROL_POLICY',
)
ex = e.exception
ex.operation_name.should.equal('ListPoliciesForTarget')
ex.response['Error']['Code'].should.equal('400')
ex.response['Error']['Message'].should.contain('OrganizationalUnitNotFoundException')
with assert_raises(ClientError) as e:
response = client.list_policies_for_target(
TargetId=account_id,
Filter='SERVICE_CONTROL_POLICY',
)
ex = e.exception
ex.operation_name.should.equal('ListPoliciesForTarget')
ex.response['Error']['Code'].should.equal('400')
ex.response['Error']['Message'].should.contain('AccountNotFoundException')
with assert_raises(ClientError) as e:
response = client.list_policies_for_target(
TargetId='meaninglessstring',
Filter='SERVICE_CONTROL_POLICY',
)
ex = e.exception
ex.operation_name.should.equal('ListPoliciesForTarget')
ex.response['Error']['Code'].should.equal('400')
ex.response['Error']['Message'].should.contain('InvalidInputException')
@mock_organizations
def test_list_targets_for_policy():
client = boto3.client('organizations', region_name='us-east-1')
org = client.create_organization(FeatureSet='ALL')['Organization']
root_id = client.list_roots()['Roots'][0]['Id']
ou_id = client.create_organizational_unit(
ParentId=root_id,
Name='ou01',
)['OrganizationalUnit']['Id']
account_id = client.create_account(
AccountName=mockname,
Email=mockemail,
)['CreateAccountStatus']['AccountId']
policy_id = client.create_policy(
Content=json.dumps(policy_doc01),
Description='A dummy service control policy',
Name='MockServiceControlPolicy',
Type='SERVICE_CONTROL_POLICY'
)['Policy']['PolicySummary']['Id']
client.attach_policy(PolicyId=policy_id, TargetId=root_id)
client.attach_policy(PolicyId=policy_id, TargetId=ou_id)
client.attach_policy(PolicyId=policy_id, TargetId=account_id)
response = client.list_targets_for_policy(PolicyId=policy_id)
for target in response['Targets']:
target.should.be.a(dict)
target.should.have.key('Name').should.be.a(six.string_types)
target.should.have.key('Arn').should.be.a(six.string_types)
target.should.have.key('TargetId').should.be.a(six.string_types)
target.should.have.key('Type').should.be.within(
['ROOT', 'ORGANIZATIONAL_UNIT', 'ACCOUNT']
)
@mock_organizations
def test_list_targets_for_policy_exception():
client = boto3.client('organizations', region_name='us-east-1')
client.create_organization(FeatureSet='ALL')['Organization']
policy_id = 'p-47fhe9s3'
with assert_raises(ClientError) as e:
response = client.list_targets_for_policy(PolicyId=policy_id)
ex = e.exception
ex.operation_name.should.equal('ListTargetsForPolicy')
ex.response['Error']['Code'].should.equal('400')
ex.response['Error']['Message'].should.contain('PolicyNotFoundException')
with assert_raises(ClientError) as e:
response = client.list_targets_for_policy(PolicyId='meaninglessstring')
ex = e.exception
ex.operation_name.should.equal('ListTargetsForPolicy')
ex.response['Error']['Code'].should.equal('400')
ex.response['Error']['Message'].should.contain('InvalidInputException')

View File

@ -174,8 +174,8 @@ def test_add_security_group_to_database():
def test_add_database_subnet_group(): def test_add_database_subnet_group():
vpc_conn = boto.vpc.connect_to_region("us-west-2") vpc_conn = boto.vpc.connect_to_region("us-west-2")
vpc = vpc_conn.create_vpc("10.0.0.0/16") vpc = vpc_conn.create_vpc("10.0.0.0/16")
subnet1 = vpc_conn.create_subnet(vpc.id, "10.1.0.0/24") subnet1 = vpc_conn.create_subnet(vpc.id, "10.0.1.0/24")
subnet2 = vpc_conn.create_subnet(vpc.id, "10.2.0.0/24") subnet2 = vpc_conn.create_subnet(vpc.id, "10.0.2.0/24")
subnet_ids = [subnet1.id, subnet2.id] subnet_ids = [subnet1.id, subnet2.id]
conn = boto.rds.connect_to_region("us-west-2") conn = boto.rds.connect_to_region("us-west-2")
@ -191,7 +191,7 @@ def test_add_database_subnet_group():
def test_describe_database_subnet_group(): def test_describe_database_subnet_group():
vpc_conn = boto.vpc.connect_to_region("us-west-2") vpc_conn = boto.vpc.connect_to_region("us-west-2")
vpc = vpc_conn.create_vpc("10.0.0.0/16") vpc = vpc_conn.create_vpc("10.0.0.0/16")
subnet = vpc_conn.create_subnet(vpc.id, "10.1.0.0/24") subnet = vpc_conn.create_subnet(vpc.id, "10.0.1.0/24")
conn = boto.rds.connect_to_region("us-west-2") conn = boto.rds.connect_to_region("us-west-2")
conn.create_db_subnet_group("db_subnet1", "my db subnet", [subnet.id]) conn.create_db_subnet_group("db_subnet1", "my db subnet", [subnet.id])
@ -209,7 +209,7 @@ def test_describe_database_subnet_group():
def test_delete_database_subnet_group(): def test_delete_database_subnet_group():
vpc_conn = boto.vpc.connect_to_region("us-west-2") vpc_conn = boto.vpc.connect_to_region("us-west-2")
vpc = vpc_conn.create_vpc("10.0.0.0/16") vpc = vpc_conn.create_vpc("10.0.0.0/16")
subnet = vpc_conn.create_subnet(vpc.id, "10.1.0.0/24") subnet = vpc_conn.create_subnet(vpc.id, "10.0.1.0/24")
conn = boto.rds.connect_to_region("us-west-2") conn = boto.rds.connect_to_region("us-west-2")
conn.create_db_subnet_group("db_subnet1", "my db subnet", [subnet.id]) conn.create_db_subnet_group("db_subnet1", "my db subnet", [subnet.id])
@ -227,7 +227,7 @@ def test_delete_database_subnet_group():
def test_create_database_in_subnet_group(): def test_create_database_in_subnet_group():
vpc_conn = boto.vpc.connect_to_region("us-west-2") vpc_conn = boto.vpc.connect_to_region("us-west-2")
vpc = vpc_conn.create_vpc("10.0.0.0/16") vpc = vpc_conn.create_vpc("10.0.0.0/16")
subnet = vpc_conn.create_subnet(vpc.id, "10.1.0.0/24") subnet = vpc_conn.create_subnet(vpc.id, "10.0.1.0/24")
conn = boto.rds.connect_to_region("us-west-2") conn = boto.rds.connect_to_region("us-west-2")
conn.create_db_subnet_group("db_subnet1", "my db subnet", [subnet.id]) conn.create_db_subnet_group("db_subnet1", "my db subnet", [subnet.id])

Some files were not shown because too many files have changed in this diff Show More